CombinedText stringlengths 4 3.42M |
|---|
0a91143a-2e56-11e5-9284-b827eb9e62be
0a965e72-2e56-11e5-9284-b827eb9e62be
0a965e72-2e56-11e5-9284-b827eb9e62be |
fdc25722-2e56-11e5-9284-b827eb9e62be
fdc77cca-2e56-11e5-9284-b827eb9e62be
fdc77cca-2e56-11e5-9284-b827eb9e62be |
9f450452-2e55-11e5-9284-b827eb9e62be
9f4a3508-2e55-11e5-9284-b827eb9e62be
9f4a3508-2e55-11e5-9284-b827eb9e62be |
3ba73086-2e56-11e5-9284-b827eb9e62be
3bac7c08-2e56-11e5-9284-b827eb9e62be
3bac7c08-2e56-11e5-9284-b827eb9e62be |
7a6c99b4-2e56-11e5-9284-b827eb9e62be
7a71bc28-2e56-11e5-9284-b827eb9e62be
7a71bc28-2e56-11e5-9284-b827eb9e62be |
cab7c87c-2e55-11e5-9284-b827eb9e62be
cabce94c-2e55-11e5-9284-b827eb9e62be
cabce94c-2e55-11e5-9284-b827eb9e62be |
c58fb41e-2e54-11e5-9284-b827eb9e62be
c594e380-2e54-11e5-9284-b827eb9e62be
c594e380-2e54-11e5-9284-b827eb9e62be |
266f1a58-2e56-11e5-9284-b827eb9e62be
267441ea-2e56-11e5-9284-b827eb9e62be
267441ea-2e56-11e5-9284-b827eb9e62be |
38b19d02-2e57-11e5-9284-b827eb9e62be
38b6b800-2e57-11e5-9284-b827eb9e62be
38b6b800-2e57-11e5-9284-b827eb9e62be |
98a98c16-2e56-11e5-9284-b827eb9e62be
98aeab74-2e56-11e5-9284-b827eb9e62be
98aeab74-2e56-11e5-9284-b827eb9e62be |
d988ca0e-2e55-11e5-9284-b827eb9e62be
d98de1e2-2e55-11e5-9284-b827eb9e62be
d98de1e2-2e55-11e5-9284-b827eb9e62be |
868f9922-2e55-11e5-9284-b827eb9e62be
8694bc9a-2e55-11e5-9284-b827eb9e62be
8694bc9a-2e55-11e5-9284-b827eb9e62be |
f0f86f64-2e55-11e5-9284-b827eb9e62be
f0fd919c-2e55-11e5-9284-b827eb9e62be
f0fd919c-2e55-11e5-9284-b827eb9e62be |
96b3dbd8-2e55-11e5-9284-b827eb9e62be
96b8f64a-2e55-11e5-9284-b827eb9e62be
96b8f64a-2e55-11e5-9284-b827eb9e62be |
1a674338-2e57-11e5-9284-b827eb9e62be
1a6c75a6-2e57-11e5-9284-b827eb9e62be
1a6c75a6-2e57-11e5-9284-b827eb9e62be |
10d6b56a-2e57-11e5-9284-b827eb9e62be
10dc147e-2e57-11e5-9284-b827eb9e62be
10dc147e-2e57-11e5-9284-b827eb9e62be |
3e5a7e8c-2e56-11e5-9284-b827eb9e62be
3e5fa9ac-2e56-11e5-9284-b827eb9e62be
3e5fa9ac-2e56-11e5-9284-b827eb9e62be |
026b7f10-2e57-11e5-9284-b827eb9e62be
02709f72-2e57-11e5-9284-b827eb9e62be
02709f72-2e57-11e5-9284-b827eb9e62be |
c7daad3c-2e54-11e5-9284-b827eb9e62be
c7dfdbfe-2e54-11e5-9284-b827eb9e62be
c7dfdbfe-2e54-11e5-9284-b827eb9e62be |
395f33b0-2e55-11e5-9284-b827eb9e62be
39647a5a-2e55-11e5-9284-b827eb9e62be
39647a5a-2e55-11e5-9284-b827eb9e62be |
30f04bc4-2e55-11e5-9284-b827eb9e62be
30f5c702-2e55-11e5-9284-b827eb9e62be
30f5c702-2e55-11e5-9284-b827eb9e62be |
449daff8-2e56-11e5-9284-b827eb9e62be
44a2c68c-2e56-11e5-9284-b827eb9e62be
44a2c68c-2e56-11e5-9284-b827eb9e62be |
1c6908ce-2e57-11e5-9284-b827eb9e62be
1c6e35f6-2e57-11e5-9284-b827eb9e62be
1c6e35f6-2e57-11e5-9284-b827eb9e62be |
379c5f4e-2e55-11e5-9284-b827eb9e62be
37a200f2-2e55-11e5-9284-b827eb9e62be
37a200f2-2e55-11e5-9284-b827eb9e62be |
d8e5d4de-2e55-11e5-9284-b827eb9e62be
d8eaf752-2e55-11e5-9284-b827eb9e62be
d8eaf752-2e55-11e5-9284-b827eb9e62be |
26c1d766-2e56-11e5-9284-b827eb9e62be
26c757d6-2e56-11e5-9284-b827eb9e62be
26c757d6-2e56-11e5-9284-b827eb9e62be |
f8afc2bc-2e54-11e5-9284-b827eb9e62be
f8b4e17a-2e54-11e5-9284-b827eb9e62be
f8b4e17a-2e54-11e5-9284-b827eb9e62be |
a345b7f0-2e54-11e5-9284-b827eb9e62be
a34acf9c-2e54-11e5-9284-b827eb9e62be
a34acf9c-2e54-11e5-9284-b827eb9e62be |
58bdc0ea-2e56-11e5-9284-b827eb9e62be
58c2d99a-2e56-11e5-9284-b827eb9e62be
58c2d99a-2e56-11e5-9284-b827eb9e62be |
e82fe28c-2e54-11e5-9284-b827eb9e62be
e834ff9c-2e54-11e5-9284-b827eb9e62be
e834ff9c-2e54-11e5-9284-b827eb9e62be |
38b1d378-2e55-11e5-9284-b827eb9e62be
38b71db0-2e55-11e5-9284-b827eb9e62be
38b71db0-2e55-11e5-9284-b827eb9e62be |
c2e6af5a-2e55-11e5-9284-b827eb9e62be
c2ebc3fa-2e55-11e5-9284-b827eb9e62be
c2ebc3fa-2e55-11e5-9284-b827eb9e62be |
f8a7a2ce-2e56-11e5-9284-b827eb9e62be
f8acd172-2e56-11e5-9284-b827eb9e62be
f8acd172-2e56-11e5-9284-b827eb9e62be |
6afcc6d4-2e56-11e5-9284-b827eb9e62be
6b0215da-2e56-11e5-9284-b827eb9e62be
6b0215da-2e56-11e5-9284-b827eb9e62be |
b1a12026-2e56-11e5-9284-b827eb9e62be
b1a635f2-2e56-11e5-9284-b827eb9e62be
b1a635f2-2e56-11e5-9284-b827eb9e62be |
c3394e4e-2e56-11e5-9284-b827eb9e62be
c33e6be0-2e56-11e5-9284-b827eb9e62be
c33e6be0-2e56-11e5-9284-b827eb9e62be |
ff888d80-2e54-11e5-9284-b827eb9e62be
ff8dbe86-2e54-11e5-9284-b827eb9e62be
ff8dbe86-2e54-11e5-9284-b827eb9e62be |
// Copyright 2017 gf Author(https://github.com/gogf/gf). All Rights Reserved.
//
// This Source Code Form is subject to the terms of the MIT License.
// If a copy of the MIT was not distributed with this file,
// You can obtain one at https://github.com/gogf/gf.
package glog
import (
"bytes"
"github.com/gogf/gf/test/gtest"
"strings"
"testing"
)
func Test_SetConfigWithMap(t *testing.T) {
gtest.C(t, func(t *gtest.T) {
l := New()
m := map[string]interface{}{
"path": "/var/log",
"level": "all",
"stdout": false,
"StStatus": 0,
}
err := l.SetConfigWithMap(m)
t.Assert(err, nil)
t.Assert(l.config.Path, m["path"])
t.Assert(l.config.Level, m["level"])
t.Assert(l.config.StdoutPrint, m["stdout"])
})
}
func Test_SetConfigWithMap_LevelStr(t *testing.T) {
gtest.C(t, func(t *gtest.T) {
buffer := bytes.NewBuffer(nil)
l := New()
m := map[string]interface{}{
"level": "all",
}
err := l.SetConfigWithMap(m)
t.Assert(err, nil)
l.SetWriter(buffer)
l.Debug("test")
l.Warning("test")
t.Assert(strings.Contains(buffer.String(), "DEBU"), true)
t.Assert(strings.Contains(buffer.String(), "WARN"), true)
})
gtest.C(t, func(t *gtest.T) {
buffer := bytes.NewBuffer(nil)
l := New()
m := map[string]interface{}{
"level": "warn",
}
err := l.SetConfigWithMap(m)
t.Assert(err, nil)
l.SetWriter(buffer)
l.Debug("test")
l.Warning("test")
t.Assert(strings.Contains(buffer.String(), "DEBU"), false)
t.Assert(strings.Contains(buffer.String(), "WARN"), true)
})
}
improve unit testing case for package glog
// Copyright 2017 gf Author(https://github.com/gogf/gf). All Rights Reserved.
//
// This Source Code Form is subject to the terms of the MIT License.
// If a copy of the MIT was not distributed with this file,
// You can obtain one at https://github.com/gogf/gf.
package glog
import (
"bytes"
"github.com/gogf/gf/test/gtest"
"strings"
"testing"
)
func Test_SetConfigWithMap(t *testing.T) {
gtest.C(t, func(t *gtest.T) {
l := New()
m := map[string]interface{}{
"path": "/var/log",
"level": "all",
"stdout": false,
"StStatus": 0,
}
err := l.SetConfigWithMap(m)
t.Assert(err, nil)
t.Assert(l.config.Path, m["path"])
t.Assert(l.config.Level, LEVEL_ALL)
t.Assert(l.config.StdoutPrint, m["stdout"])
})
}
func Test_SetConfigWithMap_LevelStr(t *testing.T) {
gtest.C(t, func(t *gtest.T) {
buffer := bytes.NewBuffer(nil)
l := New()
m := map[string]interface{}{
"level": "all",
}
err := l.SetConfigWithMap(m)
t.Assert(err, nil)
l.SetWriter(buffer)
l.Debug("test")
l.Warning("test")
t.Assert(strings.Contains(buffer.String(), "DEBU"), true)
t.Assert(strings.Contains(buffer.String(), "WARN"), true)
})
gtest.C(t, func(t *gtest.T) {
buffer := bytes.NewBuffer(nil)
l := New()
m := map[string]interface{}{
"level": "warn",
}
err := l.SetConfigWithMap(m)
t.Assert(err, nil)
l.SetWriter(buffer)
l.Debug("test")
l.Warning("test")
t.Assert(strings.Contains(buffer.String(), "DEBU"), false)
t.Assert(strings.Contains(buffer.String(), "WARN"), true)
})
}
|
8aea9dd2-2e55-11e5-9284-b827eb9e62be
8aefba2e-2e55-11e5-9284-b827eb9e62be
8aefba2e-2e55-11e5-9284-b827eb9e62be |
28b23e02-2e57-11e5-9284-b827eb9e62be
28b75680-2e57-11e5-9284-b827eb9e62be
28b75680-2e57-11e5-9284-b827eb9e62be |
d80dc08a-2e55-11e5-9284-b827eb9e62be
d812dc1e-2e55-11e5-9284-b827eb9e62be
d812dc1e-2e55-11e5-9284-b827eb9e62be |
63ab3ca4-2e55-11e5-9284-b827eb9e62be
63b06c1a-2e55-11e5-9284-b827eb9e62be
63b06c1a-2e55-11e5-9284-b827eb9e62be |
cd0ff9f0-2e55-11e5-9284-b827eb9e62be
cd267b8a-2e55-11e5-9284-b827eb9e62be
cd267b8a-2e55-11e5-9284-b827eb9e62be |
467f87ca-2e55-11e5-9284-b827eb9e62be
4684d31a-2e55-11e5-9284-b827eb9e62be
4684d31a-2e55-11e5-9284-b827eb9e62be |
8128a888-2e56-11e5-9284-b827eb9e62be
812ddaf6-2e56-11e5-9284-b827eb9e62be
812ddaf6-2e56-11e5-9284-b827eb9e62be |
f8dbf30e-2e55-11e5-9284-b827eb9e62be
f8e12496-2e55-11e5-9284-b827eb9e62be
f8e12496-2e55-11e5-9284-b827eb9e62be |
1a4323b8-2e57-11e5-9284-b827eb9e62be
1a484578-2e57-11e5-9284-b827eb9e62be
1a484578-2e57-11e5-9284-b827eb9e62be |
590aabf8-2e56-11e5-9284-b827eb9e62be
590fcbce-2e56-11e5-9284-b827eb9e62be
590fcbce-2e56-11e5-9284-b827eb9e62be |
9a56f6da-2e55-11e5-9284-b827eb9e62be
9a5c283a-2e55-11e5-9284-b827eb9e62be
9a5c283a-2e55-11e5-9284-b827eb9e62be |
cbad2d8a-2e55-11e5-9284-b827eb9e62be
cbb2489c-2e55-11e5-9284-b827eb9e62be
cbb2489c-2e55-11e5-9284-b827eb9e62be |
c90781e2-2e56-11e5-9284-b827eb9e62be
c90ca046-2e56-11e5-9284-b827eb9e62be
c90ca046-2e56-11e5-9284-b827eb9e62be |
package handlers
import (
"fmt"
"github.com/Sirupsen/logrus"
"github.com/docker/engine-api/types"
goUUID "github.com/nu7hatch/gouuid"
"github.com/pkg/errors"
"github.com/rancher/agent/core/hostInfo"
"github.com/rancher/agent/model"
"github.com/rancher/agent/utilities/constants"
"github.com/rancher/agent/utilities/docker"
"github.com/rancher/agent/utilities/utils"
revents "github.com/rancher/event-subscriber/events"
"github.com/rancher/go-rancher/v2"
"golang.org/x/net/context"
"os"
"time"
)
type Handler struct {
compute *ComputeHandler
storage *StorageHandler
configUpdate *ConfigUpdateHandler
ping *PingHandler
delegate *DelegateRequestHandler
}
func GetHandlers() map[string]revents.EventHandler {
handler := initializeHandlers()
return map[string]revents.EventHandler{
"compute.instance.activate": logRequest(handler.compute.InstanceActivate),
"compute.instance.deactivate": logRequest(handler.compute.InstanceDeactivate),
"compute.instance.force.stop": logRequest(handler.compute.InstanceForceStop),
"compute.instance.inspect": logRequest(handler.compute.InstanceInspect),
"compute.instance.pull": logRequest(handler.compute.InstancePull),
"compute.instance.remove": logRequest(handler.compute.InstanceRemove),
"storage.image.activate": logRequest(handler.storage.ImageActivate),
"storage.volume.activate": logRequest(handler.storage.VolumeActivate),
"storage.volume.deactivate": logRequest(handler.storage.VolumeDeactivate),
"storage.volume.remove": logRequest(handler.storage.VolumeRemove),
"delegate.request": logRequest(handler.delegate.DelegateRequest),
"ping": handler.ping.Ping,
"config.update": logRequest(handler.configUpdate.ConfigUpdate),
}
}
func logRequest(f revents.EventHandler) revents.EventHandler {
return func(event *revents.Event, cli *client.RancherClient) error {
logrus.Infof("Received event: Name: %s, Event Id: %s, Resource Id: %s", event.Name, event.ID, event.ResourceID)
return f(event, cli)
}
}
func reply(replyData map[string]interface{}, event *revents.Event, cli *client.RancherClient) error {
if replyData == nil {
replyData = make(map[string]interface{})
}
uuid, err := getUUID()
if err != nil {
return errors.Wrap(err, "can not aasign uuid to reply event")
}
reply := &client.Publish{
ResourceId: event.ResourceID,
PreviousIds: []string{event.ID},
ResourceType: event.ResourceType,
Name: event.ReplyTo,
Data: replyData,
Time: time.Now().UnixNano() / int64(time.Millisecond),
Resource: client.Resource{Id: uuid},
}
if reply.ResourceType != "agent" {
logrus.Infof("Reply: %v, %v, %v:%v", event.ID, event.Name, reply.ResourceId, reply.ResourceType)
}
logrus.Debugf("Reply: %+v", reply)
err = publishReply(reply, cli)
if err != nil {
return fmt.Errorf("Error sending reply %v: %v", event.ID, err)
}
return nil
}
func initializeHandlers() *Handler {
client := docker.GetClient(constants.DefaultVersion)
clientWithTimeout, err := docker.NewEnvClientWithTimeout(time.Duration(2) * time.Second)
if err != nil {
logrus.Errorf("Err: %v. Can not initialize docker client. Exiting go-agent", err)
}
clientWithTimeout.UpdateClientVersion(constants.DefaultVersion)
info := types.Info{}
version := types.Version{}
systemImages := map[string]string{}
flags := [3]bool{}
// initialize the info and version so we don't have to call docker API every time a ping request comes
for i := 0; i < 10; i++ {
in, err := client.Info(context.Background())
if err == nil {
info = in
flags[0] = true
break
}
time.Sleep(time.Duration(1) * time.Second)
}
for i := 0; i < 10; i++ {
v, err := client.ServerVersion(context.Background())
if err == nil {
version = v
flags[1] = true
break
}
time.Sleep(time.Duration(1) * time.Second)
}
for i := 0; i < 10; i++ {
ret, err := utils.GetAgentImage(client)
if err == nil {
systemImages = ret
flags[2] = true
break
}
time.Sleep(time.Duration(1) * time.Second)
}
// if we can't get the initialization data the program should exit
if !flags[0] || !flags[1] || !flags[2] {
logrus.Fatalf("Failed to initialize handlers. Exiting go-agent")
os.Exit(1)
}
Collectors := []hostInfo.Collector{
hostInfo.CPUCollector{},
hostInfo.DiskCollector{
Unit: 1048576,
InfoData: model.InfoData{
Info: info,
Version: version,
},
},
hostInfo.IopsCollector{},
hostInfo.MemoryCollector{
Unit: 1024.00,
},
hostInfo.OSCollector{
InfoData: model.InfoData{
Info: info,
Version: version,
},
},
}
computerHandler := ComputeHandler{
dockerClient: clientWithTimeout,
infoData: model.InfoData{
Info: info,
Version: version,
},
}
storageHandler := StorageHandler{
dockerClient: client,
}
delegateHandler := DelegateRequestHandler{
dockerClient: clientWithTimeout,
}
pingHandler := PingHandler{
dockerClient: clientWithTimeout,
collectors: Collectors,
SystemImage: systemImages,
}
configHandler := ConfigUpdateHandler{}
handler := Handler{
compute: &computerHandler,
storage: &storageHandler,
ping: &pingHandler,
configUpdate: &configHandler,
delegate: &delegateHandler,
}
return &handler
}
func replyWithParent(replyData map[string]interface{}, event *revents.Event, parent *revents.Event, cli *client.RancherClient) error {
childUUID, err := getUUID()
if err != nil {
return errors.Wrap(err, "can not aasign uuid to reply event")
}
child := map[string]interface{}{
"resourceId": event.ResourceID,
"previousIds": []string{event.ID},
"resourceType": event.ResourceType,
"name": event.ReplyTo,
"data": replyData,
"id": childUUID,
"time": time.Now().UnixNano() / int64(time.Millisecond),
"previousNames": []string{event.Name},
}
parentUUID, err := getUUID()
if err != nil {
return errors.Wrap(err, "can not aasign uuid to reply event")
}
reply := &client.Publish{
ResourceId: parent.ResourceID,
PreviousIds: []string{parent.ID},
ResourceType: parent.ResourceType,
Name: parent.ReplyTo,
Data: child,
Time: time.Now().UnixNano() / int64(time.Millisecond),
Resource: client.Resource{Id: parentUUID},
}
if parent.ReplyTo == "" {
return nil
}
if reply.ResourceType != "agent" {
logrus.Infof("Reply: %v, %v, %v:%v", event.ID, event.Name, reply.ResourceId, reply.ResourceType)
}
logrus.Debugf("Reply: %+v", reply)
err = publishReply(reply, cli)
if err != nil {
return fmt.Errorf("Error sending reply %v: %v", event.ID, err)
}
return nil
}
func getUUID() (string, error) {
newUUID, err := goUUID.NewV4()
if err != nil {
return "", errors.Wrap(err, "can't generate uuid")
}
return newUUID.String(), nil
}
func publishReply(reply *client.Publish, apiClient *client.RancherClient) error {
_, err := apiClient.Publish.Create(reply)
if err != nil {
logrus.Error(err)
}
return err
}
clean log
package handlers
import (
"fmt"
"github.com/Sirupsen/logrus"
"github.com/docker/engine-api/types"
goUUID "github.com/nu7hatch/gouuid"
"github.com/pkg/errors"
"github.com/rancher/agent/core/hostInfo"
"github.com/rancher/agent/model"
"github.com/rancher/agent/utilities/constants"
"github.com/rancher/agent/utilities/docker"
"github.com/rancher/agent/utilities/utils"
revents "github.com/rancher/event-subscriber/events"
"github.com/rancher/go-rancher/v2"
"golang.org/x/net/context"
"os"
"time"
)
type Handler struct {
compute *ComputeHandler
storage *StorageHandler
configUpdate *ConfigUpdateHandler
ping *PingHandler
delegate *DelegateRequestHandler
}
func GetHandlers() map[string]revents.EventHandler {
handler := initializeHandlers()
return map[string]revents.EventHandler{
"compute.instance.activate": cleanLog(logRequest(handler.compute.InstanceActivate)),
"compute.instance.deactivate": cleanLog(logRequest(handler.compute.InstanceDeactivate)),
"compute.instance.force.stop": cleanLog(logRequest(handler.compute.InstanceForceStop)),
"compute.instance.inspect": cleanLog(logRequest(handler.compute.InstanceInspect)),
"compute.instance.pull": cleanLog(logRequest(handler.compute.InstancePull)),
"compute.instance.remove": cleanLog(logRequest(handler.compute.InstanceRemove)),
"storage.image.activate": cleanLog(logRequest(handler.storage.ImageActivate)),
"storage.volume.activate": cleanLog(logRequest(handler.storage.VolumeActivate)),
"storage.volume.deactivate": cleanLog(logRequest(handler.storage.VolumeDeactivate)),
"storage.volume.remove": cleanLog(logRequest(handler.storage.VolumeRemove)),
"delegate.request": cleanLog(logRequest(handler.delegate.DelegateRequest)),
"ping": cleanLog(handler.ping.Ping),
"config.update": cleanLog(logRequest(handler.configUpdate.ConfigUpdate)),
}
}
func logRequest(f revents.EventHandler) revents.EventHandler {
return func(event *revents.Event, cli *client.RancherClient) error {
logrus.Infof("Received event: Name: %s, Event Id: %s, Resource Id: %s", event.Name, event.ID, event.ResourceID)
return f(event, cli)
}
}
func cleanLog(f revents.EventHandler) revents.EventHandler {
return func(event *revents.Event, cli *client.RancherClient) error {
err := f(event, cli)
if err != nil {
logrus.WithFields(logrus.Fields{"err": err}).Debug("Verbose error message")
}
return errors.Cause(err)
}
}
func reply(replyData map[string]interface{}, event *revents.Event, cli *client.RancherClient) error {
if replyData == nil {
replyData = make(map[string]interface{})
}
uuid, err := getUUID()
if err != nil {
return errors.Wrap(err, "can not aasign uuid to reply event")
}
reply := &client.Publish{
ResourceId: event.ResourceID,
PreviousIds: []string{event.ID},
ResourceType: event.ResourceType,
Name: event.ReplyTo,
Data: replyData,
Time: time.Now().UnixNano() / int64(time.Millisecond),
Resource: client.Resource{Id: uuid},
}
if reply.ResourceType != "agent" {
logrus.Infof("Reply: %v, %v, %v:%v", event.ID, event.Name, reply.ResourceId, reply.ResourceType)
}
logrus.Debugf("Reply: %+v", reply)
err = publishReply(reply, cli)
if err != nil {
return fmt.Errorf("Error sending reply %v: %v", event.ID, err)
}
return nil
}
func initializeHandlers() *Handler {
client := docker.GetClient(constants.DefaultVersion)
clientWithTimeout, err := docker.NewEnvClientWithTimeout(time.Duration(2) * time.Second)
if err != nil {
logrus.Errorf("Err: %v. Can not initialize docker client. Exiting go-agent", err)
}
clientWithTimeout.UpdateClientVersion(constants.DefaultVersion)
info := types.Info{}
version := types.Version{}
systemImages := map[string]string{}
flags := [3]bool{}
// initialize the info and version so we don't have to call docker API every time a ping request comes
for i := 0; i < 10; i++ {
in, err := client.Info(context.Background())
if err == nil {
info = in
flags[0] = true
break
}
time.Sleep(time.Duration(1) * time.Second)
}
for i := 0; i < 10; i++ {
v, err := client.ServerVersion(context.Background())
if err == nil {
version = v
flags[1] = true
break
}
time.Sleep(time.Duration(1) * time.Second)
}
for i := 0; i < 10; i++ {
ret, err := utils.GetAgentImage(client)
if err == nil {
systemImages = ret
flags[2] = true
break
}
time.Sleep(time.Duration(1) * time.Second)
}
// if we can't get the initialization data the program should exit
if !flags[0] || !flags[1] || !flags[2] {
logrus.Fatalf("Failed to initialize handlers. Exiting go-agent")
os.Exit(1)
}
Collectors := []hostInfo.Collector{
hostInfo.CPUCollector{},
hostInfo.DiskCollector{
Unit: 1048576,
InfoData: model.InfoData{
Info: info,
Version: version,
},
},
hostInfo.IopsCollector{},
hostInfo.MemoryCollector{
Unit: 1024.00,
},
hostInfo.OSCollector{
InfoData: model.InfoData{
Info: info,
Version: version,
},
},
}
computerHandler := ComputeHandler{
dockerClient: clientWithTimeout,
infoData: model.InfoData{
Info: info,
Version: version,
},
}
storageHandler := StorageHandler{
dockerClient: client,
}
delegateHandler := DelegateRequestHandler{
dockerClient: clientWithTimeout,
}
pingHandler := PingHandler{
dockerClient: clientWithTimeout,
collectors: Collectors,
SystemImage: systemImages,
}
configHandler := ConfigUpdateHandler{}
handler := Handler{
compute: &computerHandler,
storage: &storageHandler,
ping: &pingHandler,
configUpdate: &configHandler,
delegate: &delegateHandler,
}
return &handler
}
func replyWithParent(replyData map[string]interface{}, event *revents.Event, parent *revents.Event, cli *client.RancherClient) error {
childUUID, err := getUUID()
if err != nil {
return errors.Wrap(err, "can not aasign uuid to reply event")
}
child := map[string]interface{}{
"resourceId": event.ResourceID,
"previousIds": []string{event.ID},
"resourceType": event.ResourceType,
"name": event.ReplyTo,
"data": replyData,
"id": childUUID,
"time": time.Now().UnixNano() / int64(time.Millisecond),
"previousNames": []string{event.Name},
}
parentUUID, err := getUUID()
if err != nil {
return errors.Wrap(err, "can not aasign uuid to reply event")
}
reply := &client.Publish{
ResourceId: parent.ResourceID,
PreviousIds: []string{parent.ID},
ResourceType: parent.ResourceType,
Name: parent.ReplyTo,
Data: child,
Time: time.Now().UnixNano() / int64(time.Millisecond),
Resource: client.Resource{Id: parentUUID},
}
if parent.ReplyTo == "" {
return nil
}
if reply.ResourceType != "agent" {
logrus.Infof("Reply: %v, %v, %v:%v", event.ID, event.Name, reply.ResourceId, reply.ResourceType)
}
logrus.Debugf("Reply: %+v", reply)
err = publishReply(reply, cli)
if err != nil {
return fmt.Errorf("Error sending reply %v: %v", event.ID, err)
}
return nil
}
func getUUID() (string, error) {
newUUID, err := goUUID.NewV4()
if err != nil {
return "", errors.Wrap(err, "can't generate uuid")
}
return newUUID.String(), nil
}
func publishReply(reply *client.Publish, apiClient *client.RancherClient) error {
_, err := apiClient.Publish.Create(reply)
if err != nil {
logrus.Error(err)
}
return err
}
|
package tcp
import (
"bufio"
"net"
"sync"
"time"
log "github.com/cihub/seelog"
"github.com/pkg/errors"
"github.com/schollz/croc/src/comm"
"github.com/schollz/croc/src/logger"
"github.com/schollz/croc/src/models"
)
type roomInfo struct {
receiver *comm.Comm
opened time.Time
}
type roomMap struct {
rooms map[string]roomInfo
sync.Mutex
}
var rooms roomMap
// Run starts a tcp listener, run async
func Run(debugLevel, port string) {
logger.SetLogLevel(debugLevel)
rooms.Lock()
rooms.rooms = make(map[string]roomInfo)
rooms.Unlock()
err := run(port)
if err != nil {
log.Error(err)
}
// TODO:
// delete old rooms
}
func run(port string) (err error) {
log.Debugf("starting TCP server on " + port)
rAddr, err := net.ResolveTCPAddr("tcp", "0.0.0.0:"+port)
if err != nil {
panic(err)
}
server, err := net.ListenTCP("tcp", rAddr)
if err != nil {
return errors.Wrap(err, "Error listening on :"+port)
}
defer server.Close()
// spawn a new goroutine whenever a client connects
for {
connection, err := server.Accept()
if err != nil {
return errors.Wrap(err, "problem accepting connection")
}
log.Debugf("client %s connected", connection.RemoteAddr().String())
go func(port string, connection net.Conn) {
errCommunication := clientCommuncation(port, comm.New(connection))
if errCommunication != nil {
log.Warnf("relay-%s: %s", connection.RemoteAddr().String(), errCommunication.Error())
}
}(port, connection)
}
}
func clientCommuncation(port string, c *comm.Comm) (err error) {
// send ok to tell client they are connected
err = c.Send("ok")
if err != nil {
return
}
// wait for client to tell me which room they want
room, err := c.Receive()
if err != nil {
return
}
rooms.Lock()
// first connection is always the receiver
if _, ok := rooms.rooms[room]; !ok {
rooms.rooms[room] = roomInfo{
receiver: c,
opened: time.Now(),
}
rooms.Unlock()
// tell the client that they got the room
err = c.Send("recipient")
if err != nil {
return
}
return nil
}
receiver := rooms.rooms[room].receiver
rooms.Unlock()
// second connection is the sender, time to staple connections
var wg sync.WaitGroup
wg.Add(1)
// start piping
go func(com1, com2 *comm.Comm, wg *sync.WaitGroup) {
log.Debug("starting pipes")
pipe(com1.Connection(), com2.Connection())
wg.Done()
log.Debug("done piping")
}(c, receiver, &wg)
// tell the sender everything is ready
err = c.Send("sender")
if err != nil {
return
}
wg.Wait()
// delete room
rooms.Lock()
log.Debugf("deleting room: %s", room)
delete(rooms.rooms, room)
rooms.Unlock()
return nil
}
// chanFromConn creates a channel from a Conn object, and sends everything it
// Read()s from the socket to the channel.
func chanFromConn(conn net.Conn) chan []byte {
c := make(chan []byte)
reader := bufio.NewReader(conn)
go func() {
for {
b := make([]byte, models.TCP_BUFFER_SIZE)
n, err := reader.Read(b)
if n > 0 {
// c <- b[:n]
res := make([]byte, n)
// Copy the buffer so it doesn't get changed while read by the recipient.
copy(res, b[:n])
c <- res
}
if err != nil {
c <- nil
break
}
}
}()
return c
}
// pipe creates a full-duplex pipe between the two sockets and
// transfers data from one to the other.
func pipe(conn1 net.Conn, conn2 net.Conn) {
chan1 := chanFromConn(conn1)
// chan2 := chanFromConn(conn2)
// writer1 := bufio.NewWriter(conn1)
writer2 := bufio.NewWriter(conn2)
for {
b1 := <-chan1
if b1 == nil {
return
}
writer2.Write(b1)
writer2.Flush()
// case b2 := <-chan2:
// if b2 == nil {
// return
// }
// writer1.Write(b2)
// writer1.Flush()
// }
}
}
try fix
package tcp
import (
"bufio"
"net"
"sync"
"time"
log "github.com/cihub/seelog"
"github.com/pkg/errors"
"github.com/schollz/croc/src/comm"
"github.com/schollz/croc/src/logger"
"github.com/schollz/croc/src/models"
)
type roomInfo struct {
receiver *comm.Comm
opened time.Time
}
type roomMap struct {
rooms map[string]roomInfo
sync.Mutex
}
var rooms roomMap
// Run starts a tcp listener, run async
func Run(debugLevel, port string) {
logger.SetLogLevel(debugLevel)
rooms.Lock()
rooms.rooms = make(map[string]roomInfo)
rooms.Unlock()
err := run(port)
if err != nil {
log.Error(err)
}
// TODO:
// delete old rooms
}
func run(port string) (err error) {
log.Debugf("starting TCP server on " + port)
rAddr, err := net.ResolveTCPAddr("tcp", "0.0.0.0:"+port)
if err != nil {
panic(err)
}
server, err := net.ListenTCP("tcp", rAddr)
if err != nil {
return errors.Wrap(err, "Error listening on :"+port)
}
defer server.Close()
// spawn a new goroutine whenever a client connects
for {
connection, err := server.Accept()
if err != nil {
return errors.Wrap(err, "problem accepting connection")
}
log.Debugf("client %s connected", connection.RemoteAddr().String())
go func(port string, connection net.Conn) {
errCommunication := clientCommuncation(port, comm.New(connection))
if errCommunication != nil {
log.Warnf("relay-%s: %s", connection.RemoteAddr().String(), errCommunication.Error())
}
}(port, connection)
}
}
func clientCommuncation(port string, c *comm.Comm) (err error) {
// send ok to tell client they are connected
err = c.Send("ok")
if err != nil {
return
}
// wait for client to tell me which room they want
room, err := c.Receive()
if err != nil {
return
}
rooms.Lock()
// first connection is always the receiver
if _, ok := rooms.rooms[room]; !ok {
rooms.rooms[room] = roomInfo{
receiver: c,
opened: time.Now(),
}
rooms.Unlock()
// tell the client that they got the room
err = c.Send("recipient")
if err != nil {
return
}
return nil
}
receiver := rooms.rooms[room].receiver
rooms.Unlock()
// second connection is the sender, time to staple connections
var wg sync.WaitGroup
wg.Add(1)
// start piping
go func(com1, com2 *comm.Comm, wg *sync.WaitGroup) {
log.Debug("starting pipes")
pipe(com1.Connection(), com2.Connection())
wg.Done()
log.Debug("done piping")
}(c, receiver, &wg)
// tell the sender everything is ready
err = c.Send("sender")
if err != nil {
return
}
wg.Wait()
// delete room
rooms.Lock()
log.Debugf("deleting room: %s", room)
delete(rooms.rooms, room)
rooms.Unlock()
return nil
}
// chanFromConn creates a channel from a Conn object, and sends everything it
// Read()s from the socket to the channel.
func chanFromConn(conn net.Conn) chan []byte {
c := make(chan []byte)
reader := bufio.NewReader(conn)
go func() {
for {
b := make([]byte, models.TCP_BUFFER_SIZE)
n, err := reader.Read(b)
if n > 0 {
// c <- b[:n]
res := make([]byte, n)
// Copy the buffer so it doesn't get changed while read by the recipient.
copy(res, b[:n])
c <- res
}
if err != nil {
c <- nil
break
}
}
}()
return c
}
// pipe creates a full-duplex pipe between the two sockets and
// transfers data from one to the other.
func pipe(conn1 net.Conn, conn2 net.Conn) {
chan1 := chanFromConn(conn1)
// chan2 := chanFromConn(conn2)
// writer1 := bufio.NewWriter(conn1)
// writer2 := bufio.NewWriter(conn2)
for {
b1 := <-chan1
if b1 == nil {
return
}
conn2.Write(b1)
// writer2.Write(b1)
// writer2.Flush()
// case b2 := <-chan2:
// if b2 == nil {
// return
// }
// writer1.Write(b2)
// writer1.Flush()
// }
}
}
|
//
// Package eprinttools is a collection of structures and functions for working with the E-Prints REST API
//
// @author R. S. Doiel, <rsdoiel@caltech.edu>
//
// Copyright (c) 2017, Caltech
// All rights not granted herein are expressly reserved by Caltech.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
package harvest
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"path"
"sort"
"strconv"
"strings"
"time"
// CaltechLibrary packages
"github.com/caltechlibrary/dataset"
"github.com/caltechlibrary/eprinttools"
)
var (
// EPrintsExportBatchSize sets the summary output frequency when exporting content from E-Prints
EPrintsExportBatchSize = 1000
)
type byURI []string
func (s byURI) Len() int {
return len(s)
}
func (s byURI) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s byURI) Less(i, j int) bool {
var (
a1 int
a2 int
err error
)
s1 := strings.TrimSuffix(path.Base(s[i]), path.Ext(s[i]))
s2 := strings.TrimSuffix(path.Base(s[j]), path.Ext(s[j]))
a1, err = strconv.Atoi(s1)
if err != nil {
return false
}
a2, err = strconv.Atoi(s2)
if err != nil {
return false
}
//NOTE: We're creating a descending sort, so a1 should be larger than a2
return a1 > a2
}
// ExportEPrintsKeyList export a list of eprints from a list of keys
func ExportEPrintsKeyList(api *eprinttools.EPrintsAPI, keys []string, saveKeys string, verbose bool) error {
var (
exportedKeys []string
err error
src []byte
)
c, err := dataset.Open(api.Dataset)
if err != nil {
return fmt.Errorf("ExportEPrintsKeyList() %s, %s", api.Dataset, err)
}
defer c.Close()
uris := []string{}
for _, key := range keys {
uri := fmt.Sprintf("/rest/eprint/%s.xml", strings.TrimSpace(key))
uris = append(uris, uri)
}
uriCount := len(uris)
count := uriCount
j := 0 // success count
k := 0 // error count
if verbose == true {
log.Printf("Exporting %d of %d uris", count, uriCount)
}
for i := 0; i < uriCount && i < count; i++ {
uri := uris[i]
rec, xmlSrc, err := api.GetEPrint(uri)
if err != nil {
log.Printf("Failed, %s\n", err)
k++
} else {
key := fmt.Sprintf("%d", rec.EPrintID)
src, err = json.Marshal(rec)
if err != nil {
log.Printf("can't marshal key %s, %s", key, err)
} else {
// NOTE: Check to see if we're doing an update or create
if c.HasKey(key) == true {
err = c.UpdateJSON(key, src)
} else {
err = c.CreateJSON(key, src)
}
}
if err == nil {
if len(saveKeys) > 0 {
exportedKeys = append(exportedKeys, key)
}
// We've exported a record successfully, now update select lists
j++
} else {
if verbose == true {
log.Printf("Failed to save eprint %s (%s) to %s, %s\n", key, uri, api.Dataset, err)
}
k++
}
c.AttachFile(key, key+".xml", bytes.NewReader(xmlSrc))
}
if verbose == true && (i%EPrintsExportBatchSize) == 0 {
log.Printf("%d/%d uri processed, %d exported, %d unexported", i+1, count, j, k)
}
}
if verbose == true {
log.Printf("%d/%d uri processed, %d exported, %d unexported", len(uris), count, j, k)
}
if len(saveKeys) > 0 {
if err := ioutil.WriteFile(saveKeys, []byte(strings.Join(exportedKeys, "\n")), 0664); err != nil {
return fmt.Errorf("Failed to export %s, %s", saveKeys, err)
}
}
return nil
}
// ExportEPrints from highest ID to lowest for cnt. Saves each record in a DB and indexes published ones
func ExportEPrints(api *eprinttools.EPrintsAPI, count int, saveKeys string, verbose bool) error {
var (
exportedKeys []string
err error
src []byte
)
c, err := dataset.InitCollection(api.Dataset)
if err != nil {
return fmt.Errorf("ExportEPrints() %s, %s", api.Dataset, err)
}
defer c.Close()
uris, err := api.ListEPrintsURI()
if err != nil {
return fmt.Errorf("Export %s failed, %s", api.URL.String(), err)
}
// NOTE: I am sorting the URI by decscending ID number so that the
// newest articles are exported first
sort.Sort(byURI(uris))
uriCount := len(uris)
if count < 0 {
count = uriCount
}
j := 0 // success count
k := 0 // error count
if verbose == true {
log.Printf("Exporting %d of %d uris", count, uriCount)
}
for i := 0; i < uriCount && i < count; i++ {
uri := uris[i]
rec, xmlSrc, err := api.GetEPrint(uri)
if err != nil {
log.Printf("Failed, %s\n", err)
k++
} else {
key := fmt.Sprintf("%d", rec.EPrintID)
src, err = json.Marshal(rec)
if err != nil {
log.Printf("Can't marshal key %s, %s", key, err)
} else {
// NOTE: Check to see if we're doing an update or create
if c.HasKey(key) == true {
err = c.UpdateJSON(key, src)
} else {
err = c.CreateJSON(key, src)
}
}
if err == nil {
if len(saveKeys) > 0 {
exportedKeys = append(exportedKeys, key)
}
// We've exported a record successfully, now update select lists
j++
} else {
if verbose == true {
log.Printf("Failed to save eprint %s (%s) to %s, %s\n", key, uri, api.Dataset, err)
}
k++
}
c.AttachFile(key, key+".xml", bytes.NewReader(xmlSrc))
}
if verbose == true && (i%EPrintsExportBatchSize) == 0 {
log.Printf("%d/%d uri processed, %d exported, %d unexported", i+1, count, j, k)
}
}
if verbose == true {
log.Printf("%d/%d uri processed, %d exported, %d unexported", len(uris), count, j, k)
}
if len(saveKeys) > 0 {
if err := ioutil.WriteFile(saveKeys, []byte(strings.Join(exportedKeys, "\n")), 0664); err != nil {
return fmt.Errorf("Failed to export %s, %s", saveKeys, err)
}
}
return nil
}
// ExportModifiedEPrints returns a list of ids modified in one or between the start, end times
func ExportModifiedEPrints(api *eprinttools.EPrintsAPI, start, end time.Time, saveKeys string, verbose bool) error {
var (
exportedKeys []string
err error
src []byte
)
c, err := dataset.InitCollection(api.Dataset)
if err != nil {
return fmt.Errorf("ExportModifiedEPrints() %s, %s", api.Dataset, err)
}
defer c.Close()
uris, err := api.ListModifiedEPrintURI(start, end, verbose)
if err != nil {
return fmt.Errorf("Export modified %s to %s failed, %s", start, end, err)
}
// NOTE: I am sorting the URI by decscending ID number so that the
// newest articles are exported first
sort.Sort(byURI(uris))
count := len(uris)
j := 0 // success count
k := 0 // error count
if verbose == true {
log.Printf("Exporting %d uris", count)
}
for i := 0; i < count; i++ {
uri := uris[i]
rec, xmlSrc, err := api.GetEPrint(uri)
if err != nil {
if verbose == true {
log.Printf("Failed to get %s, %s\n", uri, err)
}
k++
} else {
key := fmt.Sprintf("%d", rec.EPrintID)
src, err = json.Marshal(rec)
if err != nil {
log.Printf("Can't marshel key %s, %s", key, err)
} else {
// NOTE: Check to see if we're doing an update or create
if c.HasKey(key) == true {
err = c.UpdateJSON(key, src)
} else {
err = c.CreateJSON(key, src)
}
}
if err == nil {
if len(saveKeys) > 0 {
exportedKeys = append(exportedKeys, key)
}
// We've exported a record successfully, now update select lists
j++
} else {
if verbose == true {
log.Printf("Failed to save eprint %s (%s) to %s, %s\n", key, uri, api.Dataset, err)
}
k++
}
c.AttachFile(key, key+".xml", bytes.NewReader(xmlSrc))
}
if verbose == true && (i%EPrintsExportBatchSize) == 0 {
log.Printf("%d/%d uri processed, %d exported, %d unexported", i+1, count, j, k)
}
}
if verbose == true {
log.Printf("%d/%d uri processed, %d exported, %d unexported", len(uris), count, j, k)
}
if len(saveKeys) > 0 {
if err := ioutil.WriteFile(saveKeys, []byte(strings.Join(exportedKeys, "\n")), 0664); err != nil {
return fmt.Errorf("Failed to export %s, %s", saveKeys, err)
}
}
return nil
}
added pid to logging in harvest sub module
//
// Package eprinttools is a collection of structures and functions for working with the E-Prints REST API
//
// @author R. S. Doiel, <rsdoiel@caltech.edu>
//
// Copyright (c) 2017, Caltech
// All rights not granted herein are expressly reserved by Caltech.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
package harvest
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"path"
"sort"
"strconv"
"strings"
"time"
// CaltechLibrary packages
"github.com/caltechlibrary/dataset"
"github.com/caltechlibrary/eprinttools"
)
var (
// EPrintsExportBatchSize sets the summary output frequency when exporting content from E-Prints
EPrintsExportBatchSize = 1000
)
type byURI []string
func (s byURI) Len() int {
return len(s)
}
func (s byURI) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s byURI) Less(i, j int) bool {
var (
a1 int
a2 int
err error
)
s1 := strings.TrimSuffix(path.Base(s[i]), path.Ext(s[i]))
s2 := strings.TrimSuffix(path.Base(s[j]), path.Ext(s[j]))
a1, err = strconv.Atoi(s1)
if err != nil {
return false
}
a2, err = strconv.Atoi(s2)
if err != nil {
return false
}
//NOTE: We're creating a descending sort, so a1 should be larger than a2
return a1 > a2
}
// ExportEPrintsKeyList export a list of eprints from a list of keys
func ExportEPrintsKeyList(api *eprinttools.EPrintsAPI, keys []string, saveKeys string, verbose bool) error {
var (
exportedKeys []string
err error
src []byte
)
c, err := dataset.Open(api.Dataset)
if err != nil {
return fmt.Errorf("ExportEPrintsKeyList() %s, %s", api.Dataset, err)
}
defer c.Close()
uris := []string{}
for _, key := range keys {
uri := fmt.Sprintf("/rest/eprint/%s.xml", strings.TrimSpace(key))
uris = append(uris, uri)
}
pid := os.Getpid()
uriCount := len(uris)
count := uriCount
j := 0 // success count
k := 0 // error count
if verbose == true {
log.Printf("(pid %d) Exporting %d of %d uris", pid, count, uriCount)
}
for i := 0; i < uriCount && i < count; i++ {
uri := uris[i]
rec, xmlSrc, err := api.GetEPrint(uri)
if err != nil {
log.Printf("(pid %d) Failed, %s\n", pid, err)
k++
} else {
key := fmt.Sprintf("%d", rec.EPrintID)
src, err = json.Marshal(rec)
if err != nil {
log.Printf("(pid %d) can't marshal key %s, %s", pid, key, err)
} else {
// NOTE: Check to see if we're doing an update or create
if c.HasKey(key) == true {
err = c.UpdateJSON(key, src)
} else {
err = c.CreateJSON(key, src)
}
}
if err == nil {
if len(saveKeys) > 0 {
exportedKeys = append(exportedKeys, key)
}
// We've exported a record successfully, now update select lists
j++
} else {
if verbose == true {
log.Printf("(pid %d) Failed to save eprint %s (%s) to %s, %s\n", pid, key, uri, api.Dataset, err)
}
k++
}
c.AttachFile(key, key+".xml", bytes.NewReader(xmlSrc))
}
if verbose == true && (i%EPrintsExportBatchSize) == 0 {
log.Printf("(pid %d) %d/%d uri processed, %d exported, %d unexported", pid, i+1, count, j, k)
}
}
if verbose == true {
log.Printf("(pid %d) %d/%d uri processed, %d exported, %d unexported", pid, len(uris), count, j, k)
}
if len(saveKeys) > 0 {
if err := ioutil.WriteFile(saveKeys, []byte(strings.Join(exportedKeys, "\n")), 0664); err != nil {
return fmt.Errorf("Failed to export %s, %s", saveKeys, err)
}
}
return nil
}
// ExportEPrints from highest ID to lowest for cnt. Saves each record in a DB and indexes published ones
func ExportEPrints(api *eprinttools.EPrintsAPI, count int, saveKeys string, verbose bool) error {
var (
exportedKeys []string
err error
src []byte
)
c, err := dataset.InitCollection(api.Dataset)
if err != nil {
return fmt.Errorf("ExportEPrints() %s, %s", api.Dataset, err)
}
defer c.Close()
uris, err := api.ListEPrintsURI()
if err != nil {
return fmt.Errorf("Export %s failed, %s", api.URL.String(), err)
}
// NOTE: I am sorting the URI by decscending ID number so that the
// newest articles are exported first
sort.Sort(byURI(uris))
pid := os.Getpid()
uriCount := len(uris)
if count < 0 {
count = uriCount
}
j := 0 // success count
k := 0 // error count
if verbose == true {
log.Printf("(pid %d) Exporting %d of %d uris", pid, count, uriCount)
}
for i := 0; i < uriCount && i < count; i++ {
uri := uris[i]
rec, xmlSrc, err := api.GetEPrint(uri)
if err != nil {
log.Printf("(pid %d) Failed, %s\n", pid, err)
k++
} else {
key := fmt.Sprintf("%d", rec.EPrintID)
src, err = json.Marshal(rec)
if err != nil {
log.Printf("(pid %d) Can't marshal key %s, %s", pid, key, err)
} else {
// NOTE: Check to see if we're doing an update or create
if c.HasKey(key) == true {
err = c.UpdateJSON(key, src)
} else {
err = c.CreateJSON(key, src)
}
}
if err == nil {
if len(saveKeys) > 0 {
exportedKeys = append(exportedKeys, key)
}
// We've exported a record successfully, now update select lists
j++
} else {
if verbose == true {
log.Printf("(pid %d) Failed to save eprint %s (%s) to %s, %s\n", pid, key, uri, api.Dataset, err)
}
k++
}
c.AttachFile(key, key+".xml", bytes.NewReader(xmlSrc))
}
if verbose == true && (i%EPrintsExportBatchSize) == 0 {
log.Printf("(pid %d) %d/%d uri processed, %d exported, %d unexported", pid, i+1, count, j, k)
}
}
if verbose == true {
log.Printf("(pid %d) %d/%d uri processed, %d exported, %d unexported", pid, len(uris), count, j, k)
}
if len(saveKeys) > 0 {
if err := ioutil.WriteFile(saveKeys, []byte(strings.Join(exportedKeys, "\n")), 0664); err != nil {
return fmt.Errorf("Failed to export %s, %s", saveKeys, err)
}
}
return nil
}
// ExportModifiedEPrints returns a list of ids modified in one or between the start, end times
func ExportModifiedEPrints(api *eprinttools.EPrintsAPI, start, end time.Time, saveKeys string, verbose bool) error {
var (
exportedKeys []string
err error
src []byte
)
c, err := dataset.InitCollection(api.Dataset)
if err != nil {
return fmt.Errorf("ExportModifiedEPrints() %s, %s", api.Dataset, err)
}
defer c.Close()
uris, err := api.ListModifiedEPrintURI(start, end, verbose)
if err != nil {
return fmt.Errorf("Export modified %s to %s failed, %s", start, end, err)
}
// NOTE: I am sorting the URI by decscending ID number so that the
// newest articles are exported first
sort.Sort(byURI(uris))
pid := os.Getpid()
count := len(uris)
j := 0 // success count
k := 0 // error count
if verbose == true {
log.Printf("(pid %d) Exporting %d uris", pid, count)
}
for i := 0; i < count; i++ {
uri := uris[i]
rec, xmlSrc, err := api.GetEPrint(uri)
if err != nil {
if verbose == true {
log.Printf("(pid %d) Failed to get %s, %s\n", pid, uri, err)
}
k++
} else {
key := fmt.Sprintf("%d", rec.EPrintID)
src, err = json.Marshal(rec)
if err != nil {
log.Printf("(pid %d) Can't marshel key %s, %s", pid, key, err)
} else {
// NOTE: Check to see if we're doing an update or create
if c.HasKey(key) == true {
err = c.UpdateJSON(key, src)
} else {
err = c.CreateJSON(key, src)
}
}
if err == nil {
if len(saveKeys) > 0 {
exportedKeys = append(exportedKeys, key)
}
// We've exported a record successfully, now update select lists
j++
} else {
if verbose == true {
log.Printf("(pid %d) Failed to save eprint %s (%s) to %s, %s\n", pid, key, uri, api.Dataset, err)
}
k++
}
c.AttachFile(key, key+".xml", bytes.NewReader(xmlSrc))
}
if verbose == true && (i%EPrintsExportBatchSize) == 0 {
log.Printf("(pid %d) %d/%d uri processed, %d exported, %d unexported", pid, i+1, count, j, k)
}
}
if verbose == true {
log.Printf("(pid %d) %d/%d uri processed, %d exported, %d unexported", pid, len(uris), count, j, k)
}
if len(saveKeys) > 0 {
if err := ioutil.WriteFile(saveKeys, []byte(strings.Join(exportedKeys, "\n")), 0664); err != nil {
return fmt.Errorf("Failed to export %s, %s", saveKeys, err)
}
}
return nil
}
|
package helpers
import (
"git.lukas.moe/sn0w/Karen/cache"
"github.com/bwmarrin/discordgo"
"strings"
"fmt"
"time"
"strconv"
)
var botAdmins = []string{
"157834823594016768", // 0xFADED#3237
"165345731706748929", // Serraniel#8978
}
// IsBotAdmin checks if $id is in $botAdmins
func IsBotAdmin(id string) bool {
for _, s := range botAdmins {
if s == id {
return true
}
}
return false
}
func IsAdmin(msg *discordgo.Message) bool {
channel, e := cache.GetSession().Channel(msg.ChannelID)
if e != nil {
return false
}
guild, e := cache.GetSession().Guild(channel.GuildID)
if e != nil {
return false
}
if msg.Author.ID == guild.OwnerID || IsBotAdmin(msg.Author.ID) {
return true
}
// Check if role may manage server
for _, role := range guild.Roles {
if role.Permissions&8 == 8 {
return true
}
}
return false
}
// RequireAdmin only calls $cb if the author is an admin or has MANAGE_SERVER permission
func RequireAdmin(msg *discordgo.Message, cb Callback) {
if !IsAdmin(msg) {
cache.GetSession().ChannelMessageSend(msg.ChannelID, GetText("admin.no_permission"))
return
}
cb()
}
func GetAvatarUrl(user *discordgo.User) string {
return GetAvatarUrlWithSize(user, 1024)
}
func GetAvatarUrlWithSize(user *discordgo.User, size uint16) string {
if user.Avatar == "" {
return ""
}
avatarUrl := "https://cdn.discordapp.com/avatars/%s/%s.%s?size=%d"
if strings.HasPrefix(user.Avatar, "a_") {
return fmt.Sprintf(avatarUrl, user.ID, user.Avatar, "gif", size)
}
return fmt.Sprintf(avatarUrl, user.ID, user.Avatar, "webp", size)
}
func GetTimeFromSnowflake(id string) time.Time {
iid, err := strconv.Atoi(id)
Relax(err)
return time.Unix(int64(((iid>>22)+1420070400000)/1000), 0).UTC()
}
Fix integer overflow in discord helper
package helpers
import (
"git.lukas.moe/sn0w/Karen/cache"
"github.com/bwmarrin/discordgo"
"strings"
"fmt"
"time"
"strconv"
)
const (
DISCORD_EPOCH int64 = 1420070400000
)
var botAdmins = []string{
"157834823594016768", // 0xFADED#3237
"165345731706748929", // Serraniel#8978
}
// IsBotAdmin checks if $id is in $botAdmins
func IsBotAdmin(id string) bool {
for _, s := range botAdmins {
if s == id {
return true
}
}
return false
}
func IsAdmin(msg *discordgo.Message) bool {
channel, e := cache.GetSession().Channel(msg.ChannelID)
if e != nil {
return false
}
guild, e := cache.GetSession().Guild(channel.GuildID)
if e != nil {
return false
}
if msg.Author.ID == guild.OwnerID || IsBotAdmin(msg.Author.ID) {
return true
}
// Check if role may manage server
for _, role := range guild.Roles {
if role.Permissions&8 == 8 {
return true
}
}
return false
}
// RequireAdmin only calls $cb if the author is an admin or has MANAGE_SERVER permission
func RequireAdmin(msg *discordgo.Message, cb Callback) {
if !IsAdmin(msg) {
cache.GetSession().ChannelMessageSend(msg.ChannelID, GetText("admin.no_permission"))
return
}
cb()
}
func GetAvatarUrl(user *discordgo.User) string {
return GetAvatarUrlWithSize(user, 1024)
}
func GetAvatarUrlWithSize(user *discordgo.User, size uint16) string {
if user.Avatar == "" {
return ""
}
avatarUrl := "https://cdn.discordapp.com/avatars/%s/%s.%s?size=%d"
if strings.HasPrefix(user.Avatar, "a_") {
return fmt.Sprintf(avatarUrl, user.ID, user.Avatar, "gif", size)
}
return fmt.Sprintf(avatarUrl, user.ID, user.Avatar, "webp", size)
}
func GetTimeFromSnowflake(id string) time.Time {
iid, err := strconv.Atoi(id)
Relax(err)
return time.Unix(((int64(iid)>>22)+DISCORD_EPOCH)/1000, 0).UTC()
}
|
package pgghelpers
import (
"encoding/json"
"fmt"
"reflect"
"regexp"
"strings"
"sync"
"text/template"
"github.com/Masterminds/sprig"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/protoc-gen-go/descriptor"
ggdescriptor "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor"
"github.com/huandu/xstrings"
options "google.golang.org/genproto/googleapis/api/annotations"
)
var jsReservedRe = regexp.MustCompile(`(^|[^A-Za-z])(do|if|in|for|let|new|try|var|case|else|enum|eval|false|null|this|true|void|with|break|catch|class|const|super|throw|while|yield|delete|export|import|public|return|static|switch|typeof|default|extends|finally|package|private|continue|debugger|function|arguments|interface|protected|implements|instanceof)($|[^A-Za-z])`)
var (
registry *ggdescriptor.Registry // some helpers need access to registry
)
var ProtoHelpersFuncMap = template.FuncMap{
"string": func(i interface {
String() string
}) string {
return i.String()
},
"json": func(v interface{}) string {
a, err := json.Marshal(v)
if err != nil {
return err.Error()
}
return string(a)
},
"prettyjson": func(v interface{}) string {
a, err := json.MarshalIndent(v, "", " ")
if err != nil {
return err.Error()
}
return string(a)
},
"splitArray": func(sep string, s string) []interface{} {
var r []interface{}
t := strings.Split(s, sep)
for i := range t {
if t[i] != "" {
r = append(r, t[i])
}
}
return r
},
"first": func(a []string) string {
return a[0]
},
"last": func(a []string) string {
return a[len(a)-1]
},
"concat": func(a string, b ...string) string {
return strings.Join(append([]string{a}, b...), "")
},
"join": func(sep string, a ...string) string {
return strings.Join(a, sep)
},
"upperFirst": func(s string) string {
return strings.ToUpper(s[:1]) + s[1:]
},
"lowerFirst": func(s string) string {
return strings.ToLower(s[:1]) + s[1:]
},
"camelCase": func(s string) string {
if len(s) > 1 {
return xstrings.ToCamelCase(s)
}
return strings.ToUpper(s[:1])
},
"lowerCamelCase": func(s string) string {
if len(s) > 1 {
s = xstrings.ToCamelCase(s)
}
return strings.ToLower(s[:1]) + s[1:]
},
"kebabCase": func(s string) string {
return strings.Replace(xstrings.ToSnakeCase(s), "_", "-", -1)
},
"contains": func(sub, s string) bool {
return strings.Contains(s, sub)
},
"trimstr": func(cutset, s string) string {
return strings.Trim(s, cutset)
},
"index": func(array interface{}, i int) interface{} {
slice := reflect.ValueOf(array)
if slice.Kind() != reflect.Slice {
panic("Error in index(): given a non-slice type")
}
if i < 0 || i >= slice.Len() {
panic("Error in index(): index out of bounds")
}
return slice.Index(i).Interface()
},
"add": func(a int, b int) int {
return a + b
},
"subtract": func(a int, b int) int {
return a - b
},
"multiply": func(a int, b int) int {
return a * b
},
"divide": func(a int, b int) int {
if b == 0 {
panic("psssst ... little help here ... you cannot divide by 0")
}
return a / b
},
"snakeCase": xstrings.ToSnakeCase,
"getProtoFile": getProtoFile,
"getMessageType": getMessageType,
"getEnumValue": getEnumValue,
"isFieldMessage": isFieldMessage,
"isFieldMessageTimeStamp": isFieldMessageTimeStamp,
"isFieldRepeated": isFieldRepeated,
"haskellType": haskellType,
"goType": goType,
"goZeroValue": goZeroValue,
"goTypeWithPackage": goTypeWithPackage,
"goTypeWithGoPackage": goTypeWithGoPackage,
"jsType": jsType,
"jsSuffixReserved": jsSuffixReservedKeyword,
"namespacedFlowType": namespacedFlowType,
"httpVerb": httpVerb,
"httpPath": httpPath,
"httpPathsAdditionalBindings": httpPathsAdditionalBindings,
"httpBody": httpBody,
"shortType": shortType,
"urlHasVarsFromMessage": urlHasVarsFromMessage,
"lowerGoNormalize": lowerGoNormalize,
"goNormalize": goNormalize,
"leadingComment": leadingComment,
"trailingComment": trailingComment,
"leadingDetachedComments": leadingDetachedComments,
"stringFileOptionsExtension": stringFileOptionsExtension,
"stringMessageExtension": stringMessageExtension,
"stringFieldExtension": stringFieldExtension,
"int64FieldExtension": int64FieldExtension,
"int64MessageExtension": int64MessageExtension,
"stringMethodOptionsExtension": stringMethodOptionsExtension,
"boolMethodOptionsExtension": boolMethodOptionsExtension,
"boolMessageExtension": boolMessageExtension,
"boolFieldExtension": boolFieldExtension,
"isFieldMap": isFieldMap,
"fieldMapKeyType": fieldMapKeyType,
"fieldMapValueType": fieldMapValueType,
"replaceDict": replaceDict,
"setStore": setStore,
"getStore": getStore,
"goPkg": goPkg,
"goPkgLastElement": goPkgLastElement,
}
var pathMap map[interface{}]*descriptor.SourceCodeInfo_Location
var store = newStore()
// Utility to store some vars across multiple scope
type globalStore struct {
store map[string]interface{}
mu sync.Mutex
}
func newStore() *globalStore {
return &globalStore{
store: make(map[string]interface{}),
}
}
func (s *globalStore) getData(key string) interface{} {
s.mu.Lock()
defer s.mu.Unlock()
if v, ok := s.store[key]; ok {
return v
}
return false
}
func (s *globalStore) setData(key string, o interface{}) {
s.mu.Lock()
s.store[key] = o
s.mu.Unlock()
}
func setStore(key string, o interface{}) string {
store.setData(key, o)
return ""
}
func getStore(key string) interface{} {
return store.getData(key)
}
func SetRegistry(reg *ggdescriptor.Registry) {
registry = reg
}
func InitPathMap(file *descriptor.FileDescriptorProto) {
pathMap = make(map[interface{}]*descriptor.SourceCodeInfo_Location)
addToPathMap(file.GetSourceCodeInfo(), file, []int32{})
}
func InitPathMaps(files []*descriptor.FileDescriptorProto) {
pathMap = make(map[interface{}]*descriptor.SourceCodeInfo_Location)
for _, file := range files {
addToPathMap(file.GetSourceCodeInfo(), file, []int32{})
}
}
// addToPathMap traverses through the AST adding SourceCodeInfo_Location entries to the pathMap.
// Since the AST is a tree, the recursion finishes once it has gone through all the nodes.
func addToPathMap(info *descriptor.SourceCodeInfo, i interface{}, path []int32) {
loc := findLoc(info, path)
if loc != nil {
pathMap[i] = loc
}
switch d := i.(type) {
case *descriptor.FileDescriptorProto:
for index, descriptor := range d.MessageType {
addToPathMap(info, descriptor, newPath(path, 4, index))
}
for index, descriptor := range d.EnumType {
addToPathMap(info, descriptor, newPath(path, 5, index))
}
for index, descriptor := range d.Service {
addToPathMap(info, descriptor, newPath(path, 6, index))
}
case *descriptor.DescriptorProto:
for index, descriptor := range d.Field {
addToPathMap(info, descriptor, newPath(path, 2, index))
}
for index, descriptor := range d.NestedType {
addToPathMap(info, descriptor, newPath(path, 3, index))
}
for index, descriptor := range d.EnumType {
addToPathMap(info, descriptor, newPath(path, 4, index))
}
case *descriptor.EnumDescriptorProto:
for index, descriptor := range d.Value {
addToPathMap(info, descriptor, newPath(path, 2, index))
}
case *descriptor.ServiceDescriptorProto:
for index, descriptor := range d.Method {
addToPathMap(info, descriptor, newPath(path, 2, index))
}
}
}
func newPath(base []int32, field int32, index int) []int32 {
p := append([]int32{}, base...)
p = append(p, field, int32(index))
return p
}
func findLoc(info *descriptor.SourceCodeInfo, path []int32) *descriptor.SourceCodeInfo_Location {
for _, loc := range info.GetLocation() {
if samePath(loc.Path, path) {
return loc
}
}
return nil
}
func samePath(a, b []int32) bool {
if len(a) != len(b) {
return false
}
for i, p := range a {
if p != b[i] {
return false
}
}
return true
}
/*func findSourceInfoLocation(i interface{}) *descriptor.SourceCodeInfo_Location {
if pathMap == nil {
return nil
}
return pathMap[i]
}*/
func leadingComment(i interface{}) string {
loc := pathMap[i]
return loc.GetLeadingComments()
}
func trailingComment(i interface{}) string {
loc := pathMap[i]
return loc.GetTrailingComments()
}
func leadingDetachedComments(i interface{}) []string {
loc := pathMap[i]
return loc.GetLeadingDetachedComments()
}
// stringMethodOptionsExtension extracts method options of a string type.
// To define your own extensions see:
// https://developers.google.com/protocol-buffers/docs/proto#customoptions
// Typically the fieldID of private extensions should be in the range:
// 50000-99999
func stringMethodOptionsExtension(fieldID int32, f *descriptor.MethodDescriptorProto) string {
if f == nil {
return ""
}
if f.Options == nil {
return ""
}
var extendedType *descriptor.MethodOptions
var extensionType *string
eds := proto.RegisteredExtensions(f.Options)
if eds[fieldID] == nil {
ed := &proto.ExtensionDesc{
ExtendedType: extendedType,
ExtensionType: extensionType,
Field: fieldID,
Tag: fmt.Sprintf("bytes,%d", fieldID),
}
proto.RegisterExtension(ed)
eds = proto.RegisteredExtensions(f.Options)
}
ext, err := proto.GetExtension(f.Options, eds[fieldID])
if err != nil {
return ""
}
str, ok := ext.(*string)
if !ok {
return ""
}
return *str
}
// stringFileOptionsExtension extracts file options of a string type.
// To define your own extensions see:
// https://developers.google.com/protocol-buffers/docs/proto#customoptions
// Typically the fieldID of private extensions should be in the range:
// 50000-99999
func stringFileOptionsExtension(fieldID int32, f *descriptor.FileDescriptorProto) string {
if f == nil {
return ""
}
if f.Options == nil {
return ""
}
var extendedType *descriptor.FileOptions
var extensionType *string
eds := proto.RegisteredExtensions(f.Options)
if eds[fieldID] == nil {
ed := &proto.ExtensionDesc{
ExtendedType: extendedType,
ExtensionType: extensionType,
Field: fieldID,
Tag: fmt.Sprintf("bytes,%d", fieldID),
}
proto.RegisterExtension(ed)
eds = proto.RegisteredExtensions(f.Options)
}
ext, err := proto.GetExtension(f.Options, eds[fieldID])
if err != nil {
return ""
}
str, ok := ext.(*string)
if !ok {
return ""
}
return *str
}
func stringFieldExtension(fieldID int32, f *descriptor.FieldDescriptorProto) string {
if f == nil {
return ""
}
if f.Options == nil {
return ""
}
var extendedType *descriptor.FieldOptions
var extensionType *string
eds := proto.RegisteredExtensions(f.Options)
if eds[fieldID] == nil {
ed := &proto.ExtensionDesc{
ExtendedType: extendedType,
ExtensionType: extensionType,
Field: fieldID,
Tag: fmt.Sprintf("bytes,%d", fieldID),
}
proto.RegisterExtension(ed)
eds = proto.RegisteredExtensions(f.Options)
}
ext, err := proto.GetExtension(f.Options, eds[fieldID])
if err != nil {
return ""
}
str, ok := ext.(*string)
if !ok {
return ""
}
return *str
}
func int64FieldExtension(fieldID int32, f *descriptor.FieldDescriptorProto) int64 {
if f == nil {
return 0
}
if f.Options == nil {
return 0
}
var extendedType *descriptor.FieldOptions
var extensionType *int64
eds := proto.RegisteredExtensions(f.Options)
if eds[fieldID] == nil {
ed := &proto.ExtensionDesc{
ExtendedType: extendedType,
ExtensionType: extensionType,
Field: fieldID,
Tag: fmt.Sprintf("varint,%d", fieldID),
}
proto.RegisterExtension(ed)
eds = proto.RegisteredExtensions(f.Options)
}
ext, err := proto.GetExtension(f.Options, eds[fieldID])
if err != nil {
return 0
}
i, ok := ext.(*int64)
if !ok {
return 0
}
return *i
}
func int64MessageExtension(fieldID int32, f *descriptor.DescriptorProto) int64 {
if f == nil {
return 0
}
if f.Options == nil {
return 0
}
var extendedType *descriptor.MessageOptions
var extensionType *int64
eds := proto.RegisteredExtensions(f.Options)
if eds[fieldID] == nil {
ed := &proto.ExtensionDesc{
ExtendedType: extendedType,
ExtensionType: extensionType,
Field: fieldID,
Tag: fmt.Sprintf("varint,%d", fieldID),
}
proto.RegisterExtension(ed)
eds = proto.RegisteredExtensions(f.Options)
}
ext, err := proto.GetExtension(f.Options, eds[fieldID])
if err != nil {
return 0
}
i, ok := ext.(*int64)
if !ok {
return 0
}
return *i
}
func stringMessageExtension(fieldID int32, f *descriptor.DescriptorProto) string {
if f == nil {
return ""
}
if f.Options == nil {
return ""
}
var extendedType *descriptor.MessageOptions
var extensionType *string
eds := proto.RegisteredExtensions(f.Options)
if eds[fieldID] == nil {
ed := &proto.ExtensionDesc{
ExtendedType: extendedType,
ExtensionType: extensionType,
Field: fieldID,
Tag: fmt.Sprintf("bytes,%d", fieldID),
}
proto.RegisterExtension(ed)
eds = proto.RegisteredExtensions(f.Options)
}
ext, err := proto.GetExtension(f.Options, eds[fieldID])
if err != nil {
return ""
}
str, ok := ext.(*string)
if !ok {
return ""
}
return *str
}
func boolMethodOptionsExtension(fieldID int32, f *descriptor.MethodDescriptorProto) bool {
if f == nil {
return false
}
if f.Options == nil {
return false
}
var extendedType *descriptor.MethodOptions
var extensionType *bool
eds := proto.RegisteredExtensions(f.Options)
if eds[fieldID] == nil {
ed := &proto.ExtensionDesc{
ExtendedType: extendedType,
ExtensionType: extensionType,
Field: fieldID,
Tag: fmt.Sprintf("varint,%d", fieldID),
}
proto.RegisterExtension(ed)
eds = proto.RegisteredExtensions(f.Options)
}
ext, err := proto.GetExtension(f.Options, eds[fieldID])
if err != nil {
return false
}
b, ok := ext.(*bool)
if !ok {
return false
}
return *b
}
func boolFieldExtension(fieldID int32, f *descriptor.FieldDescriptorProto) bool {
if f == nil {
return false
}
if f.Options == nil {
return false
}
var extendedType *descriptor.FieldOptions
var extensionType *bool
eds := proto.RegisteredExtensions(f.Options)
if eds[fieldID] == nil {
ed := &proto.ExtensionDesc{
ExtendedType: extendedType,
ExtensionType: extensionType,
Field: fieldID,
Tag: fmt.Sprintf("varint,%d", fieldID),
}
proto.RegisterExtension(ed)
eds = proto.RegisteredExtensions(f.Options)
}
ext, err := proto.GetExtension(f.Options, eds[fieldID])
if err != nil {
return false
}
b, ok := ext.(*bool)
if !ok {
return false
}
return *b
}
func boolMessageExtension(fieldID int32, f *descriptor.DescriptorProto) bool {
if f == nil {
return false
}
if f.Options == nil {
return false
}
var extendedType *descriptor.MessageOptions
var extensionType *bool
eds := proto.RegisteredExtensions(f.Options)
if eds[fieldID] == nil {
ed := &proto.ExtensionDesc{
ExtendedType: extendedType,
ExtensionType: extensionType,
Field: fieldID,
Tag: fmt.Sprintf("varint,%d", fieldID),
}
proto.RegisterExtension(ed)
eds = proto.RegisteredExtensions(f.Options)
}
ext, err := proto.GetExtension(f.Options, eds[fieldID])
if err != nil {
return false
}
b, ok := ext.(*bool)
if !ok {
return false
}
return *b
}
func init() {
for k, v := range sprig.TxtFuncMap() {
ProtoHelpersFuncMap[k] = v
}
}
func getProtoFile(name string) *ggdescriptor.File {
if registry == nil {
return nil
}
file, err := registry.LookupFile(name)
if err != nil {
panic(err)
}
return file
}
func getMessageType(f *descriptor.FileDescriptorProto, name string) *ggdescriptor.Message {
if registry != nil {
msg, err := registry.LookupMsg(".", name)
if err != nil {
panic(err)
}
return msg
}
// name is in the form .packageName.MessageTypeName.InnerMessageTypeName...
// e.g. .article.ProductTag
splits := strings.Split(name, ".")
target := splits[len(splits)-1]
for _, m := range f.MessageType {
if target == *m.Name {
return &ggdescriptor.Message{
DescriptorProto: m,
}
}
}
return nil
}
func getEnumValue(f []*descriptor.EnumDescriptorProto, name string) []*descriptor.EnumValueDescriptorProto {
for _, item := range f {
if strings.EqualFold(*item.Name, name) {
return item.GetValue()
}
}
return nil
}
func isFieldMessageTimeStamp(f *descriptor.FieldDescriptorProto) bool {
if f.Type != nil && *f.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE {
if strings.Compare(*f.TypeName, ".google.protobuf.Timestamp") == 0 {
return true
}
}
return false
}
func isFieldMessage(f *descriptor.FieldDescriptorProto) bool {
if f.Type != nil && *f.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE {
return true
}
return false
}
func isFieldRepeated(f *descriptor.FieldDescriptorProto) bool {
if f == nil {
return false
}
if f.Type != nil && f.Label != nil && *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return true
}
return false
}
func isFieldMap(f *descriptor.FieldDescriptorProto, m *descriptor.DescriptorProto) bool {
if f.TypeName == nil {
return false
}
shortName := shortType(*f.TypeName)
var nt *descriptor.DescriptorProto
for _, t := range m.NestedType {
if *t.Name == shortName {
nt = t
break
}
}
if nt == nil {
return false
}
for _, f := range nt.Field {
switch *f.Name {
case "key":
if *f.Number != 1 {
return false
}
case "value":
if *f.Number != 2 {
return false
}
default:
return false
}
}
return true
}
func fieldMapKeyType(f *descriptor.FieldDescriptorProto, m *descriptor.DescriptorProto) *descriptor.FieldDescriptorProto {
if f.TypeName == nil {
return nil
}
shortName := shortType(*f.TypeName)
var nt *descriptor.DescriptorProto
for _, t := range m.NestedType {
if *t.Name == shortName {
nt = t
break
}
}
if nt == nil {
return nil
}
for _, f := range nt.Field {
if *f.Name == "key" {
return f
}
}
return nil
}
func fieldMapValueType(f *descriptor.FieldDescriptorProto, m *descriptor.DescriptorProto) *descriptor.FieldDescriptorProto {
if f.TypeName == nil {
return nil
}
shortName := shortType(*f.TypeName)
var nt *descriptor.DescriptorProto
for _, t := range m.NestedType {
if *t.Name == shortName {
nt = t
break
}
}
if nt == nil {
return nil
}
for _, f := range nt.Field {
if *f.Name == "value" {
return f
}
}
return nil
}
// goTypeWithGoPackage types the field MESSAGE and ENUM with the go_package name.
// This method is an evolution of goTypeWithPackage. It handles message embedded.
//
// example:
// ```proto
// message GetArticleResponse {
// Article article = 1;
// message Storage {
// string code = 1;
// }
// repeated Storage storages = 2;
// }
// ```
// Then the type of `storages` is `GetArticleResponse_Storage` for the go language.
//
func goTypeWithGoPackage(p *descriptor.FileDescriptorProto, f *descriptor.FieldDescriptorProto) string {
pkg := ""
if *f.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE || *f.Type == descriptor.FieldDescriptorProto_TYPE_ENUM {
if isTimestampPackage(*f.TypeName) {
pkg = "timestamp"
} else {
pkg = *p.GetOptions().GoPackage
if strings.Contains(*p.GetOptions().GoPackage, ";") {
pkg = strings.Split(*p.GetOptions().GoPackage, ";")[1]
}
}
}
return goTypeWithEmbedded(pkg, f, p)
}
// Warning does not handle message embedded like goTypeWithGoPackage does.
func goTypeWithPackage(f *descriptor.FieldDescriptorProto) string {
pkg := ""
if *f.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE || *f.Type == descriptor.FieldDescriptorProto_TYPE_ENUM {
if isTimestampPackage(*f.TypeName) {
pkg = "timestamp"
} else {
pkg = getPackageTypeName(*f.TypeName)
}
}
return goType(pkg, f)
}
func haskellType(pkg string, f *descriptor.FieldDescriptorProto) string {
switch *f.Type {
case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[Float]"
}
return "Float"
case descriptor.FieldDescriptorProto_TYPE_FLOAT:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[Float]"
}
return "Float"
case descriptor.FieldDescriptorProto_TYPE_INT64:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[Int64]"
}
return "Int64"
case descriptor.FieldDescriptorProto_TYPE_UINT64:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[Word]"
}
return "Word"
case descriptor.FieldDescriptorProto_TYPE_INT32:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[Int]"
}
return "Int"
case descriptor.FieldDescriptorProto_TYPE_UINT32:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[Word]"
}
return "Word"
case descriptor.FieldDescriptorProto_TYPE_BOOL:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[Bool]"
}
return "Bool"
case descriptor.FieldDescriptorProto_TYPE_STRING:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[Text]"
}
return "Text"
case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
if pkg != "" {
pkg = pkg + "."
}
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return fmt.Sprintf("[%s%s]", pkg, shortType(*f.TypeName))
}
return fmt.Sprintf("%s%s", pkg, shortType(*f.TypeName))
case descriptor.FieldDescriptorProto_TYPE_BYTES:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[Word8]"
}
return "Word8"
case descriptor.FieldDescriptorProto_TYPE_ENUM:
return fmt.Sprintf("%s%s", pkg, shortType(*f.TypeName))
default:
return "Generic"
}
}
func goTypeWithEmbedded(pkg string, f *descriptor.FieldDescriptorProto, p *descriptor.FileDescriptorProto) string {
if pkg != "" {
pkg = pkg + "."
}
switch *f.Type {
case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]float64"
}
return "float64"
case descriptor.FieldDescriptorProto_TYPE_FLOAT:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]float32"
}
return "float32"
case descriptor.FieldDescriptorProto_TYPE_INT64:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int64"
}
return "int64"
case descriptor.FieldDescriptorProto_TYPE_UINT64:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]uint64"
}
return "uint64"
case descriptor.FieldDescriptorProto_TYPE_INT32:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int32"
}
return "int32"
case descriptor.FieldDescriptorProto_TYPE_UINT32:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]uint32"
}
return "uint32"
case descriptor.FieldDescriptorProto_TYPE_BOOL:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]bool"
}
return "bool"
case descriptor.FieldDescriptorProto_TYPE_STRING:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]string"
}
return "string"
case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
name := *f.TypeName
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
fieldPackage := strings.Split(*f.TypeName, ".")
filePackage := strings.Split(*p.Package, ".")
// check if we are working with a message embedded.
if len(fieldPackage) > 1 && len(fieldPackage)+1 > len(filePackage)+1 {
name = strings.Join(fieldPackage[len(filePackage)+1:], "_")
}
return fmt.Sprintf("[]*%s%s", pkg, shortType(name))
}
return fmt.Sprintf("*%s%s", pkg, shortType(name))
case descriptor.FieldDescriptorProto_TYPE_BYTES:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]byte"
}
return "byte"
case descriptor.FieldDescriptorProto_TYPE_ENUM:
name := *f.TypeName
fieldPackage := strings.Split(*f.TypeName, ".")
filePackage := strings.Split(*p.Package, ".")
// check if we are working with a message embedded.
if len(fieldPackage) > 1 && len(fieldPackage)+1 > len(filePackage)+1 {
name = strings.Join(fieldPackage[len(filePackage)+1:], "_")
}
return fmt.Sprintf("*%s%s", pkg, shortType(name))
default:
return "interface{}"
}
}
//Deprecated. Instead use goTypeWithEmbedded
func goType(pkg string, f *descriptor.FieldDescriptorProto) string {
if pkg != "" {
pkg = pkg + "."
}
switch *f.Type {
case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]float64"
}
return "float64"
case descriptor.FieldDescriptorProto_TYPE_FLOAT:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]float32"
}
return "float32"
case descriptor.FieldDescriptorProto_TYPE_INT64:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int64"
}
return "int64"
case descriptor.FieldDescriptorProto_TYPE_UINT64:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]uint64"
}
return "uint64"
case descriptor.FieldDescriptorProto_TYPE_INT32:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int32"
}
return "int32"
case descriptor.FieldDescriptorProto_TYPE_UINT32:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]uint32"
}
return "uint32"
case descriptor.FieldDescriptorProto_TYPE_BOOL:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]bool"
}
return "bool"
case descriptor.FieldDescriptorProto_TYPE_STRING:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]string"
}
return "string"
case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return fmt.Sprintf("[]*%s%s", pkg, shortType(*f.TypeName))
}
return fmt.Sprintf("*%s%s", pkg, shortType(*f.TypeName))
case descriptor.FieldDescriptorProto_TYPE_BYTES:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]byte"
}
return "byte"
case descriptor.FieldDescriptorProto_TYPE_ENUM:
return fmt.Sprintf("*%s%s", pkg, shortType(*f.TypeName))
default:
return "interface{}"
}
}
func goZeroValue(f *descriptor.FieldDescriptorProto) string {
const nilString = "nil"
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return nilString
}
switch *f.Type {
case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
return "0.0"
case descriptor.FieldDescriptorProto_TYPE_FLOAT:
return "0.0"
case descriptor.FieldDescriptorProto_TYPE_INT64:
return "0"
case descriptor.FieldDescriptorProto_TYPE_UINT64:
return "0"
case descriptor.FieldDescriptorProto_TYPE_INT32:
return "0"
case descriptor.FieldDescriptorProto_TYPE_UINT32:
return "0"
case descriptor.FieldDescriptorProto_TYPE_BOOL:
return "false"
case descriptor.FieldDescriptorProto_TYPE_STRING:
return "\"\""
case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
return nilString
case descriptor.FieldDescriptorProto_TYPE_BYTES:
return "0"
case descriptor.FieldDescriptorProto_TYPE_ENUM:
return nilString
default:
return nilString
}
}
func jsType(f *descriptor.FieldDescriptorProto) string {
template := "%s"
if isFieldRepeated(f) {
template = "Array<%s>"
}
switch *f.Type {
case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
descriptor.FieldDescriptorProto_TYPE_ENUM:
return fmt.Sprintf(template, namespacedFlowType(*f.TypeName))
case descriptor.FieldDescriptorProto_TYPE_DOUBLE,
descriptor.FieldDescriptorProto_TYPE_FLOAT,
descriptor.FieldDescriptorProto_TYPE_INT64,
descriptor.FieldDescriptorProto_TYPE_UINT64,
descriptor.FieldDescriptorProto_TYPE_INT32,
descriptor.FieldDescriptorProto_TYPE_FIXED64,
descriptor.FieldDescriptorProto_TYPE_FIXED32,
descriptor.FieldDescriptorProto_TYPE_UINT32,
descriptor.FieldDescriptorProto_TYPE_SFIXED32,
descriptor.FieldDescriptorProto_TYPE_SFIXED64,
descriptor.FieldDescriptorProto_TYPE_SINT32,
descriptor.FieldDescriptorProto_TYPE_SINT64:
return fmt.Sprintf(template, "number")
case descriptor.FieldDescriptorProto_TYPE_BOOL:
return fmt.Sprintf(template, "boolean")
case descriptor.FieldDescriptorProto_TYPE_BYTES:
return fmt.Sprintf(template, "Uint8Array")
case descriptor.FieldDescriptorProto_TYPE_STRING:
return fmt.Sprintf(template, "string")
default:
return fmt.Sprintf(template, "any")
}
}
func jsSuffixReservedKeyword(s string) string {
return jsReservedRe.ReplaceAllString(s, "${1}${2}_${3}")
}
func isTimestampPackage(s string) bool {
var isTimestampPackage bool
if strings.Compare(s, ".google.protobuf.Timestamp") == 0 {
isTimestampPackage = true
}
return isTimestampPackage
}
func getPackageTypeName(s string) string {
if strings.Contains(s, ".") {
return strings.Split(s, ".")[1]
}
return ""
}
func shortType(s string) string {
t := strings.Split(s, ".")
return t[len(t)-1]
}
func namespacedFlowType(s string) string {
trimmed := strings.TrimLeft(s, ".")
splitted := strings.Split(trimmed, ".")
return strings.Join(splitted, "$")
}
func httpPath(m *descriptor.MethodDescriptorProto) string {
ext, err := proto.GetExtension(m.Options, options.E_Http)
if err != nil {
return err.Error()
}
opts, ok := ext.(*options.HttpRule)
if !ok {
return fmt.Sprintf("extension is %T; want an HttpRule", ext)
}
switch t := opts.Pattern.(type) {
default:
return ""
case *options.HttpRule_Get:
return t.Get
case *options.HttpRule_Post:
return t.Post
case *options.HttpRule_Put:
return t.Put
case *options.HttpRule_Delete:
return t.Delete
case *options.HttpRule_Patch:
return t.Patch
case *options.HttpRule_Custom:
return t.Custom.Path
}
}
func httpPathsAdditionalBindings(m *descriptor.MethodDescriptorProto) []string {
ext, err := proto.GetExtension(m.Options, options.E_Http)
if err != nil {
panic(err.Error())
}
opts, ok := ext.(*options.HttpRule)
if !ok {
panic(fmt.Sprintf("extension is %T; want an HttpRule", ext))
}
var httpPaths []string
var optsAdditionalBindings = opts.GetAdditionalBindings()
for _, optAdditionalBindings := range optsAdditionalBindings {
switch t := optAdditionalBindings.Pattern.(type) {
case *options.HttpRule_Get:
httpPaths = append(httpPaths, t.Get)
case *options.HttpRule_Post:
httpPaths = append(httpPaths, t.Post)
case *options.HttpRule_Put:
httpPaths = append(httpPaths, t.Put)
case *options.HttpRule_Delete:
httpPaths = append(httpPaths, t.Delete)
case *options.HttpRule_Patch:
httpPaths = append(httpPaths, t.Patch)
case *options.HttpRule_Custom:
httpPaths = append(httpPaths, t.Custom.Path)
default:
// nothing
}
}
return httpPaths
}
func httpVerb(m *descriptor.MethodDescriptorProto) string {
ext, err := proto.GetExtension(m.Options, options.E_Http)
if err != nil {
return err.Error()
}
opts, ok := ext.(*options.HttpRule)
if !ok {
return fmt.Sprintf("extension is %T; want an HttpRule", ext)
}
switch t := opts.Pattern.(type) {
default:
return ""
case *options.HttpRule_Get:
return "GET"
case *options.HttpRule_Post:
return "POST"
case *options.HttpRule_Put:
return "PUT"
case *options.HttpRule_Delete:
return "DELETE"
case *options.HttpRule_Patch:
return "PATCH"
case *options.HttpRule_Custom:
return t.Custom.Kind
}
}
func httpBody(m *descriptor.MethodDescriptorProto) string {
ext, err := proto.GetExtension(m.Options, options.E_Http)
if err != nil {
return err.Error()
}
opts, ok := ext.(*options.HttpRule)
if !ok {
return fmt.Sprintf("extension is %T; want an HttpRule", ext)
}
return opts.Body
}
func urlHasVarsFromMessage(path string, d *ggdescriptor.Message) bool {
for _, field := range d.Field {
if !isFieldMessage(field) {
if strings.Contains(path, fmt.Sprintf("{%s}", *field.Name)) {
return true
}
}
}
return false
}
// lowerGoNormalize takes a string and applies formatting
// rules to conform to Golang convention. It applies a camel
// case filter, lowers the first character and formats fields
// with `id` to `ID`.
func lowerGoNormalize(s string) string {
fmtd := xstrings.ToCamelCase(s)
fmtd = xstrings.FirstRuneToLower(fmtd)
return formatID(s, fmtd)
}
// goNormalize takes a string and applies formatting rules
// to conform to Golang convention. It applies a camel case
// filter and formats fields with `id` to `ID`.
func goNormalize(s string) string {
fmtd := xstrings.ToCamelCase(s)
return formatID(s, fmtd)
}
// formatID takes a base string alonsgide a formatted string.
// It acts as a transformation filter for fields containing
// `id` in order to conform to Golang convention.
func formatID(base string, formatted string) string {
if formatted == "" {
return formatted
}
switch {
case base == "id":
// id -> ID
return "ID"
case strings.HasPrefix(base, "id_"):
// id_some -> IDSome
return "ID" + formatted[2:]
case strings.HasSuffix(base, "_id"):
// some_id -> SomeID
return formatted[:len(formatted)-2] + "ID"
case strings.HasSuffix(base, "_ids"):
// some_ids -> SomeIDs
return formatted[:len(formatted)-3] + "IDs"
}
return formatted
}
func replaceDict(src string, dict map[string]interface{}) string {
for old, v := range dict {
new, ok := v.(string)
if !ok {
continue
}
src = strings.Replace(src, old, new, -1)
}
return src
}
func goPkg(f *descriptor.FileDescriptorProto) string {
return f.Options.GetGoPackage()
}
func goPkgLastElement(f *descriptor.FileDescriptorProto) string {
pkg := goPkg(f)
pkgSplitted := strings.Split(pkg, "/")
return pkgSplitted[len(pkgSplitted)-1]
}
feat: add cppType and cppTypeWithPackage
Signed-off-by: Manfred Touron <e55e0291083ecbd7f065f55c4af741bf5707ac26@users.noreply.github.com>
package pgghelpers
import (
"encoding/json"
"fmt"
"reflect"
"regexp"
"strings"
"sync"
"text/template"
"github.com/Masterminds/sprig"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/protoc-gen-go/descriptor"
ggdescriptor "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor"
"github.com/huandu/xstrings"
options "google.golang.org/genproto/googleapis/api/annotations"
)
var jsReservedRe = regexp.MustCompile(`(^|[^A-Za-z])(do|if|in|for|let|new|try|var|case|else|enum|eval|false|null|this|true|void|with|break|catch|class|const|super|throw|while|yield|delete|export|import|public|return|static|switch|typeof|default|extends|finally|package|private|continue|debugger|function|arguments|interface|protected|implements|instanceof)($|[^A-Za-z])`)
var (
registry *ggdescriptor.Registry // some helpers need access to registry
)
var ProtoHelpersFuncMap = template.FuncMap{
"string": func(i interface {
String() string
}) string {
return i.String()
},
"json": func(v interface{}) string {
a, err := json.Marshal(v)
if err != nil {
return err.Error()
}
return string(a)
},
"prettyjson": func(v interface{}) string {
a, err := json.MarshalIndent(v, "", " ")
if err != nil {
return err.Error()
}
return string(a)
},
"splitArray": func(sep string, s string) []interface{} {
var r []interface{}
t := strings.Split(s, sep)
for i := range t {
if t[i] != "" {
r = append(r, t[i])
}
}
return r
},
"first": func(a []string) string {
return a[0]
},
"last": func(a []string) string {
return a[len(a)-1]
},
"concat": func(a string, b ...string) string {
return strings.Join(append([]string{a}, b...), "")
},
"join": func(sep string, a ...string) string {
return strings.Join(a, sep)
},
"upperFirst": func(s string) string {
return strings.ToUpper(s[:1]) + s[1:]
},
"lowerFirst": func(s string) string {
return strings.ToLower(s[:1]) + s[1:]
},
"camelCase": func(s string) string {
if len(s) > 1 {
return xstrings.ToCamelCase(s)
}
return strings.ToUpper(s[:1])
},
"lowerCamelCase": func(s string) string {
if len(s) > 1 {
s = xstrings.ToCamelCase(s)
}
return strings.ToLower(s[:1]) + s[1:]
},
"kebabCase": func(s string) string {
return strings.Replace(xstrings.ToSnakeCase(s), "_", "-", -1)
},
"contains": func(sub, s string) bool {
return strings.Contains(s, sub)
},
"trimstr": func(cutset, s string) string {
return strings.Trim(s, cutset)
},
"index": func(array interface{}, i int) interface{} {
slice := reflect.ValueOf(array)
if slice.Kind() != reflect.Slice {
panic("Error in index(): given a non-slice type")
}
if i < 0 || i >= slice.Len() {
panic("Error in index(): index out of bounds")
}
return slice.Index(i).Interface()
},
"add": func(a int, b int) int {
return a + b
},
"subtract": func(a int, b int) int {
return a - b
},
"multiply": func(a int, b int) int {
return a * b
},
"divide": func(a int, b int) int {
if b == 0 {
panic("psssst ... little help here ... you cannot divide by 0")
}
return a / b
},
"snakeCase": xstrings.ToSnakeCase,
"getProtoFile": getProtoFile,
"getMessageType": getMessageType,
"getEnumValue": getEnumValue,
"isFieldMessage": isFieldMessage,
"isFieldMessageTimeStamp": isFieldMessageTimeStamp,
"isFieldRepeated": isFieldRepeated,
"haskellType": haskellType,
"goType": goType,
"goZeroValue": goZeroValue,
"goTypeWithPackage": goTypeWithPackage,
"goTypeWithGoPackage": goTypeWithGoPackage,
"jsType": jsType,
"jsSuffixReserved": jsSuffixReservedKeyword,
"namespacedFlowType": namespacedFlowType,
"httpVerb": httpVerb,
"httpPath": httpPath,
"httpPathsAdditionalBindings": httpPathsAdditionalBindings,
"httpBody": httpBody,
"shortType": shortType,
"urlHasVarsFromMessage": urlHasVarsFromMessage,
"lowerGoNormalize": lowerGoNormalize,
"goNormalize": goNormalize,
"leadingComment": leadingComment,
"trailingComment": trailingComment,
"leadingDetachedComments": leadingDetachedComments,
"stringFileOptionsExtension": stringFileOptionsExtension,
"stringMessageExtension": stringMessageExtension,
"stringFieldExtension": stringFieldExtension,
"int64FieldExtension": int64FieldExtension,
"int64MessageExtension": int64MessageExtension,
"stringMethodOptionsExtension": stringMethodOptionsExtension,
"boolMethodOptionsExtension": boolMethodOptionsExtension,
"boolMessageExtension": boolMessageExtension,
"boolFieldExtension": boolFieldExtension,
"isFieldMap": isFieldMap,
"fieldMapKeyType": fieldMapKeyType,
"fieldMapValueType": fieldMapValueType,
"replaceDict": replaceDict,
"setStore": setStore,
"getStore": getStore,
"goPkg": goPkg,
"goPkgLastElement": goPkgLastElement,
"cppType": cppType,
"cppTypeWithPackage": cppTypeWithPackage,
}
var pathMap map[interface{}]*descriptor.SourceCodeInfo_Location
var store = newStore()
// Utility to store some vars across multiple scope
type globalStore struct {
store map[string]interface{}
mu sync.Mutex
}
func newStore() *globalStore {
return &globalStore{
store: make(map[string]interface{}),
}
}
func (s *globalStore) getData(key string) interface{} {
s.mu.Lock()
defer s.mu.Unlock()
if v, ok := s.store[key]; ok {
return v
}
return false
}
func (s *globalStore) setData(key string, o interface{}) {
s.mu.Lock()
s.store[key] = o
s.mu.Unlock()
}
func setStore(key string, o interface{}) string {
store.setData(key, o)
return ""
}
func getStore(key string) interface{} {
return store.getData(key)
}
func SetRegistry(reg *ggdescriptor.Registry) {
registry = reg
}
func InitPathMap(file *descriptor.FileDescriptorProto) {
pathMap = make(map[interface{}]*descriptor.SourceCodeInfo_Location)
addToPathMap(file.GetSourceCodeInfo(), file, []int32{})
}
func InitPathMaps(files []*descriptor.FileDescriptorProto) {
pathMap = make(map[interface{}]*descriptor.SourceCodeInfo_Location)
for _, file := range files {
addToPathMap(file.GetSourceCodeInfo(), file, []int32{})
}
}
// addToPathMap traverses through the AST adding SourceCodeInfo_Location entries to the pathMap.
// Since the AST is a tree, the recursion finishes once it has gone through all the nodes.
func addToPathMap(info *descriptor.SourceCodeInfo, i interface{}, path []int32) {
loc := findLoc(info, path)
if loc != nil {
pathMap[i] = loc
}
switch d := i.(type) {
case *descriptor.FileDescriptorProto:
for index, descriptor := range d.MessageType {
addToPathMap(info, descriptor, newPath(path, 4, index))
}
for index, descriptor := range d.EnumType {
addToPathMap(info, descriptor, newPath(path, 5, index))
}
for index, descriptor := range d.Service {
addToPathMap(info, descriptor, newPath(path, 6, index))
}
case *descriptor.DescriptorProto:
for index, descriptor := range d.Field {
addToPathMap(info, descriptor, newPath(path, 2, index))
}
for index, descriptor := range d.NestedType {
addToPathMap(info, descriptor, newPath(path, 3, index))
}
for index, descriptor := range d.EnumType {
addToPathMap(info, descriptor, newPath(path, 4, index))
}
case *descriptor.EnumDescriptorProto:
for index, descriptor := range d.Value {
addToPathMap(info, descriptor, newPath(path, 2, index))
}
case *descriptor.ServiceDescriptorProto:
for index, descriptor := range d.Method {
addToPathMap(info, descriptor, newPath(path, 2, index))
}
}
}
func newPath(base []int32, field int32, index int) []int32 {
p := append([]int32{}, base...)
p = append(p, field, int32(index))
return p
}
func findLoc(info *descriptor.SourceCodeInfo, path []int32) *descriptor.SourceCodeInfo_Location {
for _, loc := range info.GetLocation() {
if samePath(loc.Path, path) {
return loc
}
}
return nil
}
func samePath(a, b []int32) bool {
if len(a) != len(b) {
return false
}
for i, p := range a {
if p != b[i] {
return false
}
}
return true
}
/*func findSourceInfoLocation(i interface{}) *descriptor.SourceCodeInfo_Location {
if pathMap == nil {
return nil
}
return pathMap[i]
}*/
func leadingComment(i interface{}) string {
loc := pathMap[i]
return loc.GetLeadingComments()
}
func trailingComment(i interface{}) string {
loc := pathMap[i]
return loc.GetTrailingComments()
}
func leadingDetachedComments(i interface{}) []string {
loc := pathMap[i]
return loc.GetLeadingDetachedComments()
}
// stringMethodOptionsExtension extracts method options of a string type.
// To define your own extensions see:
// https://developers.google.com/protocol-buffers/docs/proto#customoptions
// Typically the fieldID of private extensions should be in the range:
// 50000-99999
func stringMethodOptionsExtension(fieldID int32, f *descriptor.MethodDescriptorProto) string {
if f == nil {
return ""
}
if f.Options == nil {
return ""
}
var extendedType *descriptor.MethodOptions
var extensionType *string
eds := proto.RegisteredExtensions(f.Options)
if eds[fieldID] == nil {
ed := &proto.ExtensionDesc{
ExtendedType: extendedType,
ExtensionType: extensionType,
Field: fieldID,
Tag: fmt.Sprintf("bytes,%d", fieldID),
}
proto.RegisterExtension(ed)
eds = proto.RegisteredExtensions(f.Options)
}
ext, err := proto.GetExtension(f.Options, eds[fieldID])
if err != nil {
return ""
}
str, ok := ext.(*string)
if !ok {
return ""
}
return *str
}
// stringFileOptionsExtension extracts file options of a string type.
// To define your own extensions see:
// https://developers.google.com/protocol-buffers/docs/proto#customoptions
// Typically the fieldID of private extensions should be in the range:
// 50000-99999
func stringFileOptionsExtension(fieldID int32, f *descriptor.FileDescriptorProto) string {
if f == nil {
return ""
}
if f.Options == nil {
return ""
}
var extendedType *descriptor.FileOptions
var extensionType *string
eds := proto.RegisteredExtensions(f.Options)
if eds[fieldID] == nil {
ed := &proto.ExtensionDesc{
ExtendedType: extendedType,
ExtensionType: extensionType,
Field: fieldID,
Tag: fmt.Sprintf("bytes,%d", fieldID),
}
proto.RegisterExtension(ed)
eds = proto.RegisteredExtensions(f.Options)
}
ext, err := proto.GetExtension(f.Options, eds[fieldID])
if err != nil {
return ""
}
str, ok := ext.(*string)
if !ok {
return ""
}
return *str
}
func stringFieldExtension(fieldID int32, f *descriptor.FieldDescriptorProto) string {
if f == nil {
return ""
}
if f.Options == nil {
return ""
}
var extendedType *descriptor.FieldOptions
var extensionType *string
eds := proto.RegisteredExtensions(f.Options)
if eds[fieldID] == nil {
ed := &proto.ExtensionDesc{
ExtendedType: extendedType,
ExtensionType: extensionType,
Field: fieldID,
Tag: fmt.Sprintf("bytes,%d", fieldID),
}
proto.RegisterExtension(ed)
eds = proto.RegisteredExtensions(f.Options)
}
ext, err := proto.GetExtension(f.Options, eds[fieldID])
if err != nil {
return ""
}
str, ok := ext.(*string)
if !ok {
return ""
}
return *str
}
func int64FieldExtension(fieldID int32, f *descriptor.FieldDescriptorProto) int64 {
if f == nil {
return 0
}
if f.Options == nil {
return 0
}
var extendedType *descriptor.FieldOptions
var extensionType *int64
eds := proto.RegisteredExtensions(f.Options)
if eds[fieldID] == nil {
ed := &proto.ExtensionDesc{
ExtendedType: extendedType,
ExtensionType: extensionType,
Field: fieldID,
Tag: fmt.Sprintf("varint,%d", fieldID),
}
proto.RegisterExtension(ed)
eds = proto.RegisteredExtensions(f.Options)
}
ext, err := proto.GetExtension(f.Options, eds[fieldID])
if err != nil {
return 0
}
i, ok := ext.(*int64)
if !ok {
return 0
}
return *i
}
func int64MessageExtension(fieldID int32, f *descriptor.DescriptorProto) int64 {
if f == nil {
return 0
}
if f.Options == nil {
return 0
}
var extendedType *descriptor.MessageOptions
var extensionType *int64
eds := proto.RegisteredExtensions(f.Options)
if eds[fieldID] == nil {
ed := &proto.ExtensionDesc{
ExtendedType: extendedType,
ExtensionType: extensionType,
Field: fieldID,
Tag: fmt.Sprintf("varint,%d", fieldID),
}
proto.RegisterExtension(ed)
eds = proto.RegisteredExtensions(f.Options)
}
ext, err := proto.GetExtension(f.Options, eds[fieldID])
if err != nil {
return 0
}
i, ok := ext.(*int64)
if !ok {
return 0
}
return *i
}
func stringMessageExtension(fieldID int32, f *descriptor.DescriptorProto) string {
if f == nil {
return ""
}
if f.Options == nil {
return ""
}
var extendedType *descriptor.MessageOptions
var extensionType *string
eds := proto.RegisteredExtensions(f.Options)
if eds[fieldID] == nil {
ed := &proto.ExtensionDesc{
ExtendedType: extendedType,
ExtensionType: extensionType,
Field: fieldID,
Tag: fmt.Sprintf("bytes,%d", fieldID),
}
proto.RegisterExtension(ed)
eds = proto.RegisteredExtensions(f.Options)
}
ext, err := proto.GetExtension(f.Options, eds[fieldID])
if err != nil {
return ""
}
str, ok := ext.(*string)
if !ok {
return ""
}
return *str
}
func boolMethodOptionsExtension(fieldID int32, f *descriptor.MethodDescriptorProto) bool {
if f == nil {
return false
}
if f.Options == nil {
return false
}
var extendedType *descriptor.MethodOptions
var extensionType *bool
eds := proto.RegisteredExtensions(f.Options)
if eds[fieldID] == nil {
ed := &proto.ExtensionDesc{
ExtendedType: extendedType,
ExtensionType: extensionType,
Field: fieldID,
Tag: fmt.Sprintf("varint,%d", fieldID),
}
proto.RegisterExtension(ed)
eds = proto.RegisteredExtensions(f.Options)
}
ext, err := proto.GetExtension(f.Options, eds[fieldID])
if err != nil {
return false
}
b, ok := ext.(*bool)
if !ok {
return false
}
return *b
}
func boolFieldExtension(fieldID int32, f *descriptor.FieldDescriptorProto) bool {
if f == nil {
return false
}
if f.Options == nil {
return false
}
var extendedType *descriptor.FieldOptions
var extensionType *bool
eds := proto.RegisteredExtensions(f.Options)
if eds[fieldID] == nil {
ed := &proto.ExtensionDesc{
ExtendedType: extendedType,
ExtensionType: extensionType,
Field: fieldID,
Tag: fmt.Sprintf("varint,%d", fieldID),
}
proto.RegisterExtension(ed)
eds = proto.RegisteredExtensions(f.Options)
}
ext, err := proto.GetExtension(f.Options, eds[fieldID])
if err != nil {
return false
}
b, ok := ext.(*bool)
if !ok {
return false
}
return *b
}
func boolMessageExtension(fieldID int32, f *descriptor.DescriptorProto) bool {
if f == nil {
return false
}
if f.Options == nil {
return false
}
var extendedType *descriptor.MessageOptions
var extensionType *bool
eds := proto.RegisteredExtensions(f.Options)
if eds[fieldID] == nil {
ed := &proto.ExtensionDesc{
ExtendedType: extendedType,
ExtensionType: extensionType,
Field: fieldID,
Tag: fmt.Sprintf("varint,%d", fieldID),
}
proto.RegisterExtension(ed)
eds = proto.RegisteredExtensions(f.Options)
}
ext, err := proto.GetExtension(f.Options, eds[fieldID])
if err != nil {
return false
}
b, ok := ext.(*bool)
if !ok {
return false
}
return *b
}
func init() {
for k, v := range sprig.TxtFuncMap() {
ProtoHelpersFuncMap[k] = v
}
}
func getProtoFile(name string) *ggdescriptor.File {
if registry == nil {
return nil
}
file, err := registry.LookupFile(name)
if err != nil {
panic(err)
}
return file
}
func getMessageType(f *descriptor.FileDescriptorProto, name string) *ggdescriptor.Message {
if registry != nil {
msg, err := registry.LookupMsg(".", name)
if err != nil {
panic(err)
}
return msg
}
// name is in the form .packageName.MessageTypeName.InnerMessageTypeName...
// e.g. .article.ProductTag
splits := strings.Split(name, ".")
target := splits[len(splits)-1]
for _, m := range f.MessageType {
if target == *m.Name {
return &ggdescriptor.Message{
DescriptorProto: m,
}
}
}
return nil
}
func getEnumValue(f []*descriptor.EnumDescriptorProto, name string) []*descriptor.EnumValueDescriptorProto {
for _, item := range f {
if strings.EqualFold(*item.Name, name) {
return item.GetValue()
}
}
return nil
}
func isFieldMessageTimeStamp(f *descriptor.FieldDescriptorProto) bool {
if f.Type != nil && *f.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE {
if strings.Compare(*f.TypeName, ".google.protobuf.Timestamp") == 0 {
return true
}
}
return false
}
func isFieldMessage(f *descriptor.FieldDescriptorProto) bool {
if f.Type != nil && *f.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE {
return true
}
return false
}
func isFieldRepeated(f *descriptor.FieldDescriptorProto) bool {
if f == nil {
return false
}
if f.Type != nil && f.Label != nil && *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return true
}
return false
}
func isFieldMap(f *descriptor.FieldDescriptorProto, m *descriptor.DescriptorProto) bool {
if f.TypeName == nil {
return false
}
shortName := shortType(*f.TypeName)
var nt *descriptor.DescriptorProto
for _, t := range m.NestedType {
if *t.Name == shortName {
nt = t
break
}
}
if nt == nil {
return false
}
for _, f := range nt.Field {
switch *f.Name {
case "key":
if *f.Number != 1 {
return false
}
case "value":
if *f.Number != 2 {
return false
}
default:
return false
}
}
return true
}
func fieldMapKeyType(f *descriptor.FieldDescriptorProto, m *descriptor.DescriptorProto) *descriptor.FieldDescriptorProto {
if f.TypeName == nil {
return nil
}
shortName := shortType(*f.TypeName)
var nt *descriptor.DescriptorProto
for _, t := range m.NestedType {
if *t.Name == shortName {
nt = t
break
}
}
if nt == nil {
return nil
}
for _, f := range nt.Field {
if *f.Name == "key" {
return f
}
}
return nil
}
func fieldMapValueType(f *descriptor.FieldDescriptorProto, m *descriptor.DescriptorProto) *descriptor.FieldDescriptorProto {
if f.TypeName == nil {
return nil
}
shortName := shortType(*f.TypeName)
var nt *descriptor.DescriptorProto
for _, t := range m.NestedType {
if *t.Name == shortName {
nt = t
break
}
}
if nt == nil {
return nil
}
for _, f := range nt.Field {
if *f.Name == "value" {
return f
}
}
return nil
}
// goTypeWithGoPackage types the field MESSAGE and ENUM with the go_package name.
// This method is an evolution of goTypeWithPackage. It handles message embedded.
//
// example:
// ```proto
// message GetArticleResponse {
// Article article = 1;
// message Storage {
// string code = 1;
// }
// repeated Storage storages = 2;
// }
// ```
// Then the type of `storages` is `GetArticleResponse_Storage` for the go language.
//
func goTypeWithGoPackage(p *descriptor.FileDescriptorProto, f *descriptor.FieldDescriptorProto) string {
pkg := ""
if *f.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE || *f.Type == descriptor.FieldDescriptorProto_TYPE_ENUM {
if isTimestampPackage(*f.TypeName) {
pkg = "timestamp"
} else {
pkg = *p.GetOptions().GoPackage
if strings.Contains(*p.GetOptions().GoPackage, ";") {
pkg = strings.Split(*p.GetOptions().GoPackage, ";")[1]
}
}
}
return goTypeWithEmbedded(pkg, f, p)
}
// Warning does not handle message embedded like goTypeWithGoPackage does.
func goTypeWithPackage(f *descriptor.FieldDescriptorProto) string {
pkg := ""
if *f.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE || *f.Type == descriptor.FieldDescriptorProto_TYPE_ENUM {
if isTimestampPackage(*f.TypeName) {
pkg = "timestamp"
} else {
pkg = getPackageTypeName(*f.TypeName)
}
}
return goType(pkg, f)
}
func haskellType(pkg string, f *descriptor.FieldDescriptorProto) string {
switch *f.Type {
case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[Float]"
}
return "Float"
case descriptor.FieldDescriptorProto_TYPE_FLOAT:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[Float]"
}
return "Float"
case descriptor.FieldDescriptorProto_TYPE_INT64:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[Int64]"
}
return "Int64"
case descriptor.FieldDescriptorProto_TYPE_UINT64:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[Word]"
}
return "Word"
case descriptor.FieldDescriptorProto_TYPE_INT32:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[Int]"
}
return "Int"
case descriptor.FieldDescriptorProto_TYPE_UINT32:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[Word]"
}
return "Word"
case descriptor.FieldDescriptorProto_TYPE_BOOL:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[Bool]"
}
return "Bool"
case descriptor.FieldDescriptorProto_TYPE_STRING:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[Text]"
}
return "Text"
case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
if pkg != "" {
pkg = pkg + "."
}
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return fmt.Sprintf("[%s%s]", pkg, shortType(*f.TypeName))
}
return fmt.Sprintf("%s%s", pkg, shortType(*f.TypeName))
case descriptor.FieldDescriptorProto_TYPE_BYTES:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[Word8]"
}
return "Word8"
case descriptor.FieldDescriptorProto_TYPE_ENUM:
return fmt.Sprintf("%s%s", pkg, shortType(*f.TypeName))
default:
return "Generic"
}
}
// Warning does not handle message embedded like goTypeWithGoPackage does.
func cppTypeWithPackage(f *descriptor.FieldDescriptorProto) string {
pkg := ""
if *f.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE || *f.Type == descriptor.FieldDescriptorProto_TYPE_ENUM {
if isTimestampPackage(*f.TypeName) {
pkg = "timestamp"
} else {
pkg = getPackageTypeName(*f.TypeName)
}
}
return cppType(pkg, f)
}
func cppType(pkg string, f *descriptor.FieldDescriptorProto) string {
isRepeat := *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED
typeName := "???"
switch *f.Type {
case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
typeName = "double"
case descriptor.FieldDescriptorProto_TYPE_FLOAT:
typeName = "float"
case descriptor.FieldDescriptorProto_TYPE_INT64:
typeName = "int64_t"
case descriptor.FieldDescriptorProto_TYPE_UINT64:
typeName = "uint64_t"
case descriptor.FieldDescriptorProto_TYPE_INT32:
typeName = "int32_t"
case descriptor.FieldDescriptorProto_TYPE_UINT32:
typeName = "uint32_t"
case descriptor.FieldDescriptorProto_TYPE_BOOL:
typeName = "bool"
case descriptor.FieldDescriptorProto_TYPE_STRING:
typeName = "std::string"
case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
if pkg != "" {
pkg = pkg + "."
}
typeName = fmt.Sprintf("%s%s", pkg, shortType(*f.TypeName))
case descriptor.FieldDescriptorProto_TYPE_BYTES:
typeName = "std::vector<uint8_t>"
case descriptor.FieldDescriptorProto_TYPE_ENUM:
typeName = fmt.Sprintf("%s%s", pkg, shortType(*f.TypeName))
default:
break
}
if isRepeat {
return "std::vector<" + typeName + ">"
}
return typeName
}
func goTypeWithEmbedded(pkg string, f *descriptor.FieldDescriptorProto, p *descriptor.FileDescriptorProto) string {
if pkg != "" {
pkg = pkg + "."
}
switch *f.Type {
case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]float64"
}
return "float64"
case descriptor.FieldDescriptorProto_TYPE_FLOAT:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]float32"
}
return "float32"
case descriptor.FieldDescriptorProto_TYPE_INT64:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int64"
}
return "int64"
case descriptor.FieldDescriptorProto_TYPE_UINT64:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]uint64"
}
return "uint64"
case descriptor.FieldDescriptorProto_TYPE_INT32:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int32"
}
return "int32"
case descriptor.FieldDescriptorProto_TYPE_UINT32:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]uint32"
}
return "uint32"
case descriptor.FieldDescriptorProto_TYPE_BOOL:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]bool"
}
return "bool"
case descriptor.FieldDescriptorProto_TYPE_STRING:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]string"
}
return "string"
case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
name := *f.TypeName
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
fieldPackage := strings.Split(*f.TypeName, ".")
filePackage := strings.Split(*p.Package, ".")
// check if we are working with a message embedded.
if len(fieldPackage) > 1 && len(fieldPackage)+1 > len(filePackage)+1 {
name = strings.Join(fieldPackage[len(filePackage)+1:], "_")
}
return fmt.Sprintf("[]*%s%s", pkg, shortType(name))
}
return fmt.Sprintf("*%s%s", pkg, shortType(name))
case descriptor.FieldDescriptorProto_TYPE_BYTES:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]byte"
}
return "byte"
case descriptor.FieldDescriptorProto_TYPE_ENUM:
name := *f.TypeName
fieldPackage := strings.Split(*f.TypeName, ".")
filePackage := strings.Split(*p.Package, ".")
// check if we are working with a message embedded.
if len(fieldPackage) > 1 && len(fieldPackage)+1 > len(filePackage)+1 {
name = strings.Join(fieldPackage[len(filePackage)+1:], "_")
}
return fmt.Sprintf("*%s%s", pkg, shortType(name))
default:
return "interface{}"
}
}
//Deprecated. Instead use goTypeWithEmbedded
func goType(pkg string, f *descriptor.FieldDescriptorProto) string {
if pkg != "" {
pkg = pkg + "."
}
switch *f.Type {
case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]float64"
}
return "float64"
case descriptor.FieldDescriptorProto_TYPE_FLOAT:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]float32"
}
return "float32"
case descriptor.FieldDescriptorProto_TYPE_INT64:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int64"
}
return "int64"
case descriptor.FieldDescriptorProto_TYPE_UINT64:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]uint64"
}
return "uint64"
case descriptor.FieldDescriptorProto_TYPE_INT32:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int32"
}
return "int32"
case descriptor.FieldDescriptorProto_TYPE_UINT32:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]uint32"
}
return "uint32"
case descriptor.FieldDescriptorProto_TYPE_BOOL:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]bool"
}
return "bool"
case descriptor.FieldDescriptorProto_TYPE_STRING:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]string"
}
return "string"
case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return fmt.Sprintf("[]*%s%s", pkg, shortType(*f.TypeName))
}
return fmt.Sprintf("*%s%s", pkg, shortType(*f.TypeName))
case descriptor.FieldDescriptorProto_TYPE_BYTES:
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]byte"
}
return "byte"
case descriptor.FieldDescriptorProto_TYPE_ENUM:
return fmt.Sprintf("*%s%s", pkg, shortType(*f.TypeName))
default:
return "interface{}"
}
}
func goZeroValue(f *descriptor.FieldDescriptorProto) string {
const nilString = "nil"
if *f.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return nilString
}
switch *f.Type {
case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
return "0.0"
case descriptor.FieldDescriptorProto_TYPE_FLOAT:
return "0.0"
case descriptor.FieldDescriptorProto_TYPE_INT64:
return "0"
case descriptor.FieldDescriptorProto_TYPE_UINT64:
return "0"
case descriptor.FieldDescriptorProto_TYPE_INT32:
return "0"
case descriptor.FieldDescriptorProto_TYPE_UINT32:
return "0"
case descriptor.FieldDescriptorProto_TYPE_BOOL:
return "false"
case descriptor.FieldDescriptorProto_TYPE_STRING:
return "\"\""
case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
return nilString
case descriptor.FieldDescriptorProto_TYPE_BYTES:
return "0"
case descriptor.FieldDescriptorProto_TYPE_ENUM:
return nilString
default:
return nilString
}
}
func jsType(f *descriptor.FieldDescriptorProto) string {
template := "%s"
if isFieldRepeated(f) {
template = "Array<%s>"
}
switch *f.Type {
case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
descriptor.FieldDescriptorProto_TYPE_ENUM:
return fmt.Sprintf(template, namespacedFlowType(*f.TypeName))
case descriptor.FieldDescriptorProto_TYPE_DOUBLE,
descriptor.FieldDescriptorProto_TYPE_FLOAT,
descriptor.FieldDescriptorProto_TYPE_INT64,
descriptor.FieldDescriptorProto_TYPE_UINT64,
descriptor.FieldDescriptorProto_TYPE_INT32,
descriptor.FieldDescriptorProto_TYPE_FIXED64,
descriptor.FieldDescriptorProto_TYPE_FIXED32,
descriptor.FieldDescriptorProto_TYPE_UINT32,
descriptor.FieldDescriptorProto_TYPE_SFIXED32,
descriptor.FieldDescriptorProto_TYPE_SFIXED64,
descriptor.FieldDescriptorProto_TYPE_SINT32,
descriptor.FieldDescriptorProto_TYPE_SINT64:
return fmt.Sprintf(template, "number")
case descriptor.FieldDescriptorProto_TYPE_BOOL:
return fmt.Sprintf(template, "boolean")
case descriptor.FieldDescriptorProto_TYPE_BYTES:
return fmt.Sprintf(template, "Uint8Array")
case descriptor.FieldDescriptorProto_TYPE_STRING:
return fmt.Sprintf(template, "string")
default:
return fmt.Sprintf(template, "any")
}
}
func jsSuffixReservedKeyword(s string) string {
return jsReservedRe.ReplaceAllString(s, "${1}${2}_${3}")
}
func isTimestampPackage(s string) bool {
var isTimestampPackage bool
if strings.Compare(s, ".google.protobuf.Timestamp") == 0 {
isTimestampPackage = true
}
return isTimestampPackage
}
func getPackageTypeName(s string) string {
if strings.Contains(s, ".") {
return strings.Split(s, ".")[1]
}
return ""
}
func shortType(s string) string {
t := strings.Split(s, ".")
return t[len(t)-1]
}
func namespacedFlowType(s string) string {
trimmed := strings.TrimLeft(s, ".")
splitted := strings.Split(trimmed, ".")
return strings.Join(splitted, "$")
}
func httpPath(m *descriptor.MethodDescriptorProto) string {
ext, err := proto.GetExtension(m.Options, options.E_Http)
if err != nil {
return err.Error()
}
opts, ok := ext.(*options.HttpRule)
if !ok {
return fmt.Sprintf("extension is %T; want an HttpRule", ext)
}
switch t := opts.Pattern.(type) {
default:
return ""
case *options.HttpRule_Get:
return t.Get
case *options.HttpRule_Post:
return t.Post
case *options.HttpRule_Put:
return t.Put
case *options.HttpRule_Delete:
return t.Delete
case *options.HttpRule_Patch:
return t.Patch
case *options.HttpRule_Custom:
return t.Custom.Path
}
}
func httpPathsAdditionalBindings(m *descriptor.MethodDescriptorProto) []string {
ext, err := proto.GetExtension(m.Options, options.E_Http)
if err != nil {
panic(err.Error())
}
opts, ok := ext.(*options.HttpRule)
if !ok {
panic(fmt.Sprintf("extension is %T; want an HttpRule", ext))
}
var httpPaths []string
var optsAdditionalBindings = opts.GetAdditionalBindings()
for _, optAdditionalBindings := range optsAdditionalBindings {
switch t := optAdditionalBindings.Pattern.(type) {
case *options.HttpRule_Get:
httpPaths = append(httpPaths, t.Get)
case *options.HttpRule_Post:
httpPaths = append(httpPaths, t.Post)
case *options.HttpRule_Put:
httpPaths = append(httpPaths, t.Put)
case *options.HttpRule_Delete:
httpPaths = append(httpPaths, t.Delete)
case *options.HttpRule_Patch:
httpPaths = append(httpPaths, t.Patch)
case *options.HttpRule_Custom:
httpPaths = append(httpPaths, t.Custom.Path)
default:
// nothing
}
}
return httpPaths
}
func httpVerb(m *descriptor.MethodDescriptorProto) string {
ext, err := proto.GetExtension(m.Options, options.E_Http)
if err != nil {
return err.Error()
}
opts, ok := ext.(*options.HttpRule)
if !ok {
return fmt.Sprintf("extension is %T; want an HttpRule", ext)
}
switch t := opts.Pattern.(type) {
default:
return ""
case *options.HttpRule_Get:
return "GET"
case *options.HttpRule_Post:
return "POST"
case *options.HttpRule_Put:
return "PUT"
case *options.HttpRule_Delete:
return "DELETE"
case *options.HttpRule_Patch:
return "PATCH"
case *options.HttpRule_Custom:
return t.Custom.Kind
}
}
func httpBody(m *descriptor.MethodDescriptorProto) string {
ext, err := proto.GetExtension(m.Options, options.E_Http)
if err != nil {
return err.Error()
}
opts, ok := ext.(*options.HttpRule)
if !ok {
return fmt.Sprintf("extension is %T; want an HttpRule", ext)
}
return opts.Body
}
func urlHasVarsFromMessage(path string, d *ggdescriptor.Message) bool {
for _, field := range d.Field {
if !isFieldMessage(field) {
if strings.Contains(path, fmt.Sprintf("{%s}", *field.Name)) {
return true
}
}
}
return false
}
// lowerGoNormalize takes a string and applies formatting
// rules to conform to Golang convention. It applies a camel
// case filter, lowers the first character and formats fields
// with `id` to `ID`.
func lowerGoNormalize(s string) string {
fmtd := xstrings.ToCamelCase(s)
fmtd = xstrings.FirstRuneToLower(fmtd)
return formatID(s, fmtd)
}
// goNormalize takes a string and applies formatting rules
// to conform to Golang convention. It applies a camel case
// filter and formats fields with `id` to `ID`.
func goNormalize(s string) string {
fmtd := xstrings.ToCamelCase(s)
return formatID(s, fmtd)
}
// formatID takes a base string alonsgide a formatted string.
// It acts as a transformation filter for fields containing
// `id` in order to conform to Golang convention.
func formatID(base string, formatted string) string {
if formatted == "" {
return formatted
}
switch {
case base == "id":
// id -> ID
return "ID"
case strings.HasPrefix(base, "id_"):
// id_some -> IDSome
return "ID" + formatted[2:]
case strings.HasSuffix(base, "_id"):
// some_id -> SomeID
return formatted[:len(formatted)-2] + "ID"
case strings.HasSuffix(base, "_ids"):
// some_ids -> SomeIDs
return formatted[:len(formatted)-3] + "IDs"
}
return formatted
}
func replaceDict(src string, dict map[string]interface{}) string {
for old, v := range dict {
new, ok := v.(string)
if !ok {
continue
}
src = strings.Replace(src, old, new, -1)
}
return src
}
func goPkg(f *descriptor.FileDescriptorProto) string {
return f.Options.GetGoPackage()
}
func goPkgLastElement(f *descriptor.FileDescriptorProto) string {
pkg := goPkg(f)
pkgSplitted := strings.Split(pkg, "/")
return pkgSplitted[len(pkgSplitted)-1]
}
|
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"encoding/binary"
"fmt"
)
type vType int
func (t vType) String() string {
switch t {
case tDel:
return "d"
case tVal:
return "v"
}
return "x"
}
// Value types encoded as the last component of internal keys.
// Don't modify; this value are saved to disk.
const (
tDel vType = iota
tVal
)
// tSeek defines the vType that should be passed when constructing an
// internal key for seeking to a particular sequence number (since we
// sort sequence numbers in decreasing order and the value type is
// embedded as the low 8 bits in the sequence number in internal keys,
// we need to use the highest-numbered ValueType, not the lowest).
const tSeek = tVal
const (
// Maximum value possible for sequence number; the 8-bits are
// used by value type, so its can packed together in single
// 64-bit integer.
kMaxSeq uint64 = (uint64(1) << 56) - 1
// Maximum value possible for packed sequence number and type.
kMaxNum uint64 = (kMaxSeq << 8) | uint64(tSeek)
)
// Maximum number encoded in bytes.
var kMaxNumBytes = make([]byte, 8)
func init() {
binary.LittleEndian.PutUint64(kMaxNumBytes, kMaxNum)
}
type iKey []byte
func newIKey(ukey []byte, seq uint64, t vType) iKey {
if seq > kMaxSeq || t > tVal {
panic("invalid seq number or value type")
}
b := make(iKey, len(ukey)+8)
copy(b, ukey)
binary.LittleEndian.PutUint64(b[len(ukey):], (seq<<8)|uint64(t))
return b
}
func (p iKey) assert() {
if p == nil {
panic("nil iKey")
}
if len(p) < 8 {
panic(fmt.Sprintf("invalid iKey %q, len=%d", []byte(p), len(p)))
}
}
func (p iKey) ukey() []byte {
p.assert()
return p[:len(p)-8]
}
func (p iKey) num() uint64 {
p.assert()
return binary.LittleEndian.Uint64(p[len(p)-8:])
}
func (p iKey) parseNum() (seq uint64, t vType, ok bool) {
p.assert()
num := p.num()
seq, t = uint64(num>>8), vType(num&0xff)
if t > tVal {
return 0, 0, false
}
ok = true
return
}
func (p iKey) String() string {
if len(p) == 0 {
return "<nil>"
}
if seq, t, ok := p.parseNum(); ok {
return fmt.Sprintf("%s:%s:%d", shorten(string(p.ukey())), t, seq)
}
return "<invalid>"
}
leveldb: iKey: don't use assert on iKey.parseNum()
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"encoding/binary"
"fmt"
)
type vType int
func (t vType) String() string {
switch t {
case tDel:
return "d"
case tVal:
return "v"
}
return "x"
}
// Value types encoded as the last component of internal keys.
// Don't modify; this value are saved to disk.
const (
tDel vType = iota
tVal
)
// tSeek defines the vType that should be passed when constructing an
// internal key for seeking to a particular sequence number (since we
// sort sequence numbers in decreasing order and the value type is
// embedded as the low 8 bits in the sequence number in internal keys,
// we need to use the highest-numbered ValueType, not the lowest).
const tSeek = tVal
const (
// Maximum value possible for sequence number; the 8-bits are
// used by value type, so its can packed together in single
// 64-bit integer.
kMaxSeq uint64 = (uint64(1) << 56) - 1
// Maximum value possible for packed sequence number and type.
kMaxNum uint64 = (kMaxSeq << 8) | uint64(tSeek)
)
// Maximum number encoded in bytes.
var kMaxNumBytes = make([]byte, 8)
func init() {
binary.LittleEndian.PutUint64(kMaxNumBytes, kMaxNum)
}
type iKey []byte
func newIKey(ukey []byte, seq uint64, t vType) iKey {
if seq > kMaxSeq || t > tVal {
panic("invalid seq number or value type")
}
b := make(iKey, len(ukey)+8)
copy(b, ukey)
binary.LittleEndian.PutUint64(b[len(ukey):], (seq<<8)|uint64(t))
return b
}
func (p iKey) assert() {
if p == nil {
panic("nil iKey")
}
if len(p) < 8 {
panic(fmt.Sprintf("invalid iKey %q, len=%d", []byte(p), len(p)))
}
}
func (p iKey) ukey() []byte {
p.assert()
return p[:len(p)-8]
}
func (p iKey) num() uint64 {
p.assert()
return binary.LittleEndian.Uint64(p[len(p)-8:])
}
func (p iKey) parseNum() (seq uint64, t vType, ok bool) {
if p == nil {
panic("nil iKey")
}
if len(p) < 8 {
return
}
num := p.num()
seq, t = uint64(num>>8), vType(num&0xff)
if t > tVal {
return 0, 0, false
}
ok = true
return
}
func (p iKey) String() string {
if len(p) == 0 {
return "<nil>"
}
if seq, t, ok := p.parseNum(); ok {
return fmt.Sprintf("%s:%s:%d", shorten(string(p.ukey())), t, seq)
}
return "<invalid>"
}
|
package layouts
import (
"fmt"
"github.com/skelterjohn/geom"
"github.com/skelterjohn/go.uik"
)
type Flow struct {
uik.Foundation
childSizeHints map[*uik.Block]uik.SizeHint
childIndices map[*uik.Block]int
count int
size geom.Coord
sizeHint uik.SizeHint
Add chan *uik.Block
Remove chan *uik.Block
}
func NewFlow(size geom.Coord) (f *Flow) {
f = new(Flow)
f.Size = size
f.Initialize()
go f.HandleEvents()
return
}
func (f *Flow) Initialize() {
f.Foundation.Initialize()
f.Add = make(chan *uik.Block, 10)
f.Remove = make(chan *uik.Block, 10)
f.childSizeHints = map[*uik.Block]uik.SizeHint{}
f.childIndices = map[*uik.Block]int{}
}
// places the block immediately to the right of the last block placed
func (f *Flow) PlaceBlock(b *uik.Block) {
f.AddBlock(b)
f.ChildrenBounds[b] = geom.Rect{
Min: geom.Coord{f.size.X, 0},
Max: geom.Coord{f.size.X + b.Size.X, b.Size.Y},
}
f.size.X += b.Size.X
}
func (f *Flow) reflow() {
fmt.Println(f.sizeHint.PreferredSize)
}
// dispense events to children, as appropriate
func (f *Flow) HandleEvents() {
for {
select {
case e := <-f.Events:
switch e := e.(type) {
case uik.ResizeEvent:
f.Size = e.Size
f.reflow()
default:
f.Foundation.HandleEvent(e)
}
case e := <-f.Redraw:
f.DoRedraw(e)
case e := <-f.CompositeBlockRequests:
f.DoCompositeBlockRequest(e)
case bsh := <-f.BlockSizeHints:
if !f.Children[bsh.Block] {
// Do I know you?
break
}
if osh, ok := f.childSizeHints[bsh.Block]; ok {
f.sizeHint.MinSize.X -= osh.MinSize.X
f.sizeHint.MinSize.Y -= osh.MinSize.Y
f.sizeHint.PreferredSize.X -= osh.PreferredSize.X
f.sizeHint.PreferredSize.Y -= osh.PreferredSize.Y
f.sizeHint.MaxSize.X -= osh.MaxSize.X
f.sizeHint.MaxSize.Y -= osh.MaxSize.Y
}
f.childSizeHints[bsh.Block] = bsh.SizeHint
f.sizeHint.MinSize.X += bsh.SizeHint.MinSize.X
f.sizeHint.MinSize.Y += bsh.SizeHint.MinSize.Y
f.sizeHint.PreferredSize.X += bsh.SizeHint.PreferredSize.X
f.sizeHint.PreferredSize.Y += bsh.SizeHint.PreferredSize.Y
f.sizeHint.MaxSize.X += bsh.SizeHint.MaxSize.X
f.sizeHint.MaxSize.Y += bsh.SizeHint.MaxSize.Y
//f.SizeHints <- f.sizeHint
f.reflow()
var bbs geom.Rect
var ok bool
if bbs, ok = f.ChildrenBounds[bsh.Block]; !ok {
break
}
bbs.Max.X = bbs.Min.X + bsh.SizeHint.PreferredSize.X
bbs.Max.Y = bbs.Min.Y + bsh.SizeHint.PreferredSize.Y
f.ChildrenBounds[bsh.Block] = bbs
bsh.Block.EventsIn <- uik.ResizeEvent {
Size: bsh.SizeHint.PreferredSize,
}
case b := <-f.Add:
f.PlaceBlock(b)
f.childIndices[b] = f.count
f.count++
f.reflow()
case b := <-f.Remove:
i, ok := f.childIndices[b]
if !ok {
break
}
// decrement all following blocks
for ob, j := range f.childIndices {
if j > i {
f.childIndices[ob] = j-1
}
}
delete(f.childIndices, b)
f.count--
delete(f.childSizeHints, b)
f.RemoveBlock(b)
f.reflow()
}
}
}
flow layout working, for some definition of working
package layouts
import (
"math"
"github.com/skelterjohn/geom"
"github.com/skelterjohn/go.uik"
)
type Flow struct {
uik.Foundation
childSizeHints map[*uik.Block]uik.SizeHint
childIndices map[*uik.Block]int
count int
size geom.Coord
sizeHint uik.SizeHint
Add chan *uik.Block
Remove chan *uik.Block
}
func NewFlow(size geom.Coord) (f *Flow) {
f = new(Flow)
f.Size = size
f.Initialize()
go f.HandleEvents()
return
}
func (f *Flow) Initialize() {
f.Foundation.Initialize()
f.Add = make(chan *uik.Block, 10)
f.Remove = make(chan *uik.Block, 10)
f.childSizeHints = map[*uik.Block]uik.SizeHint{}
f.childIndices = map[*uik.Block]int{}
}
// places the block immediately to the right of the last block placed
func (f *Flow) PlaceBlock(b *uik.Block) {
f.AddBlock(b)
f.ChildrenBounds[b] = geom.Rect{
Min: geom.Coord{f.size.X, 0},
Max: geom.Coord{f.size.X + b.Size.X, b.Size.Y},
}
f.size.X += b.Size.X
}
func (f *Flow) reflow() {
children := make([]*uik.Block, f.count)
for child, i := range f.childIndices {
children[i] = child
}
renderSize := f.Size
renderSize.X = math.Max(f.sizeHint.MinSize.X, renderSize.X)
renderSize.Y = math.Max(f.sizeHint.MinSize.X, renderSize.Y)
ratioX := 1.0
if renderSize.X < f.sizeHint.PreferredSize.X {
ratioX = renderSize.X / f.sizeHint.PreferredSize.X
}
var left float64
for i:=0; i<f.count; i++ {
child := children[i]
csh, ok := f.childSizeHints[child]
if !ok {
continue
}
cbounds := geom.Rect{geom.Coord{left, 0}, geom.Coord{}}
if csh.PreferredSize.Y <= renderSize.Y {
cbounds.Max.Y = csh.PreferredSize.Y
} else if csh.MinSize.Y <= renderSize.Y {
cbounds.Max.Y = renderSize.Y
} else {
cbounds.Max.Y = csh.MinSize.Y
}
cbounds.Max.X = left + ratioX * csh.PreferredSize.X
f.ChildrenBounds[child] = cbounds
left = cbounds.Max.X
}
}
// dispense events to children, as appropriate
func (f *Flow) HandleEvents() {
for {
select {
case e := <-f.Events:
switch e := e.(type) {
case uik.ResizeEvent:
f.Size = e.Size
f.reflow()
default:
f.Foundation.HandleEvent(e)
}
case e := <-f.Redraw:
f.DoRedraw(e)
case e := <-f.CompositeBlockRequests:
f.DoCompositeBlockRequest(e)
case bsh := <-f.BlockSizeHints:
if !f.Children[bsh.Block] {
// Do I know you?
break
}
if osh, ok := f.childSizeHints[bsh.Block]; ok {
f.sizeHint.MinSize.X -= osh.MinSize.X
f.sizeHint.MinSize.Y -= osh.MinSize.Y
f.sizeHint.PreferredSize.X -= osh.PreferredSize.X
f.sizeHint.PreferredSize.Y -= osh.PreferredSize.Y
f.sizeHint.MaxSize.X -= osh.MaxSize.X
f.sizeHint.MaxSize.Y -= osh.MaxSize.Y
}
f.childSizeHints[bsh.Block] = bsh.SizeHint
f.sizeHint.MinSize.X += bsh.SizeHint.MinSize.X
f.sizeHint.MinSize.Y += bsh.SizeHint.MinSize.Y
f.sizeHint.PreferredSize.X += bsh.SizeHint.PreferredSize.X
f.sizeHint.PreferredSize.Y += bsh.SizeHint.PreferredSize.Y
f.sizeHint.MaxSize.X += bsh.SizeHint.MaxSize.X
f.sizeHint.MaxSize.Y += bsh.SizeHint.MaxSize.Y
f.SizeHints.Stack(f.sizeHint)
f.reflow()
var bbs geom.Rect
var ok bool
if bbs, ok = f.ChildrenBounds[bsh.Block]; !ok {
break
}
bbs.Max.X = bbs.Min.X + bsh.SizeHint.PreferredSize.X
bbs.Max.Y = bbs.Min.Y + bsh.SizeHint.PreferredSize.Y
f.ChildrenBounds[bsh.Block] = bbs
bsh.Block.EventsIn <- uik.ResizeEvent {
Size: bsh.SizeHint.PreferredSize,
}
case b := <-f.Add:
f.PlaceBlock(b)
f.childIndices[b] = f.count
f.count++
f.reflow()
case b := <-f.Remove:
i, ok := f.childIndices[b]
if !ok {
break
}
// decrement all following blocks
for ob, j := range f.childIndices {
if j > i {
f.childIndices[ob] = j-1
}
}
delete(f.childIndices, b)
f.count--
delete(f.childSizeHints, b)
f.RemoveBlock(b)
f.reflow()
}
}
} |
// Copyright 2015 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Author: Ben Darnell
package storage
import (
"bytes"
"fmt"
"strconv"
"time"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"github.com/gogo/protobuf/proto"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"golang.org/x/net/context"
"github.com/cockroachdb/cockroach/keys"
"github.com/cockroachdb/cockroach/roachpb"
"github.com/cockroachdb/cockroach/storage/engine"
"github.com/cockroachdb/cockroach/storage/engine/enginepb"
"github.com/cockroachdb/cockroach/util/bufalloc"
"github.com/cockroachdb/cockroach/util/hlc"
"github.com/cockroachdb/cockroach/util/log"
"github.com/cockroachdb/cockroach/util/protoutil"
"github.com/cockroachdb/cockroach/util/retry"
"github.com/cockroachdb/cockroach/util/timeutil"
)
var _ raft.Storage = (*Replica)(nil)
// All calls to raft.RawNode require that an exclusive lock is held.
// All of the functions exposed via the raft.Storage interface will in
// turn be called from RawNode. So the lock that guards raftGroup must
// be the same as the lock that guards all the inner fields.
//
// Many of the methods defined in this file are wrappers around static
// functions. This is done to facilitate their use from
// Replica.Snapshot(), where it is important that all the data that
// goes into the snapshot comes from a consistent view of the
// database, and not the replica's in-memory state or via a reference
// to Replica.store.Engine().
// InitialState implements the raft.Storage interface.
// InitialState requires that the replica lock be held.
func (r *Replica) InitialState() (raftpb.HardState, raftpb.ConfState, error) {
hs, err := loadHardState(context.Background(), r.store.Engine(), r.RangeID)
// For uninitialized ranges, membership is unknown at this point.
if raft.IsEmptyHardState(hs) || err != nil {
return raftpb.HardState{}, raftpb.ConfState{}, err
}
var cs raftpb.ConfState
for _, rep := range r.mu.state.Desc.Replicas {
cs.Nodes = append(cs.Nodes, uint64(rep.ReplicaID))
}
return hs, cs, nil
}
// Entries implements the raft.Storage interface. Note that maxBytes is advisory
// and this method will always return at least one entry even if it exceeds
// maxBytes. Passing maxBytes equal to zero disables size checking.
// TODO(bdarnell): consider caching for recent entries, if rocksdb's built in
// caching is insufficient.
// Entries requires that the replica lock is held.
func (r *Replica) Entries(lo, hi, maxBytes uint64) ([]raftpb.Entry, error) {
snap := r.store.NewSnapshot()
defer snap.Close()
return entries(context.Background(), snap, r.RangeID, lo, hi, maxBytes)
}
func entries(
ctx context.Context,
e engine.Reader,
rangeID roachpb.RangeID,
lo, hi, maxBytes uint64,
) ([]raftpb.Entry, error) {
if lo > hi {
return nil, errors.Errorf("lo:%d is greater than hi:%d", lo, hi)
}
// Scan over the log to find the requested entries in the range [lo, hi),
// stopping once we have enough.
ents := make([]raftpb.Entry, 0, hi-lo)
size := uint64(0)
var ent raftpb.Entry
expectedIndex := lo
exceededMaxBytes := false
scanFunc := func(kv roachpb.KeyValue) (bool, error) {
if err := kv.Value.GetProto(&ent); err != nil {
return false, err
}
// Exit early if we have any gaps or it has been compacted.
if ent.Index != expectedIndex {
return true, nil
}
expectedIndex++
size += uint64(ent.Size())
ents = append(ents, ent)
exceededMaxBytes = maxBytes > 0 && size > maxBytes
return exceededMaxBytes, nil
}
if err := iterateEntries(ctx, e, rangeID, lo, hi, scanFunc); err != nil {
return nil, err
}
// Did the correct number of results come back? If so, we're all good.
if uint64(len(ents)) == hi-lo {
return ents, nil
}
// Did we hit the size limit? If so, return what we have.
if exceededMaxBytes {
return ents, nil
}
// Did we get any results at all? Because something went wrong.
if len(ents) > 0 {
// Was the lo already truncated?
if ents[0].Index > lo {
return nil, raft.ErrCompacted
}
// Was the missing index after the last index?
lastIndex, err := loadLastIndex(ctx, e, rangeID)
if err != nil {
return nil, err
}
if lastIndex <= expectedIndex {
return nil, raft.ErrUnavailable
}
// We have a gap in the record, if so, return a nasty error.
return nil, errors.Errorf("there is a gap in the index record between lo:%d and hi:%d at index:%d", lo, hi, expectedIndex)
}
// No results, was it due to unavailability or truncation?
ts, err := loadTruncatedState(ctx, e, rangeID)
if err != nil {
return nil, err
}
if ts.Index >= lo {
// The requested lo index has already been truncated.
return nil, raft.ErrCompacted
}
// The requested lo index does not yet exist.
return nil, raft.ErrUnavailable
}
func iterateEntries(
ctx context.Context,
e engine.Reader,
rangeID roachpb.RangeID,
lo,
hi uint64,
scanFunc func(roachpb.KeyValue) (bool, error),
) error {
_, err := engine.MVCCIterate(
ctx, e,
keys.RaftLogKey(rangeID, lo),
keys.RaftLogKey(rangeID, hi),
hlc.ZeroTimestamp,
true, /* consistent */
nil, /* txn */
false, /* !reverse */
scanFunc,
)
return err
}
// Term implements the raft.Storage interface.
// Term requires that the replica lock is held.
func (r *Replica) Term(i uint64) (uint64, error) {
snap := r.store.NewSnapshot()
defer snap.Close()
return term(context.Background(), snap, r.RangeID, i)
}
func term(ctx context.Context, eng engine.Reader, rangeID roachpb.RangeID, i uint64) (uint64, error) {
ents, err := entries(ctx, eng, rangeID, i, i+1, 0)
if err == raft.ErrCompacted {
ts, err := loadTruncatedState(ctx, eng, rangeID)
if err != nil {
return 0, err
}
if i == ts.Index {
return ts.Term, nil
}
return 0, raft.ErrCompacted
} else if err != nil {
return 0, err
}
if len(ents) == 0 {
return 0, nil
}
return ents[0].Term, nil
}
// LastIndex implements the raft.Storage interface.
// LastIndex requires that the replica lock is held.
func (r *Replica) LastIndex() (uint64, error) {
return r.mu.lastIndex, nil
}
// raftTruncatedStateLocked returns metadata about the log that preceded the
// first current entry. This includes both entries that have been compacted away
// and the dummy entries that make up the starting point of an empty log.
// raftTruncatedStateLocked requires that the replica lock be held.
func (r *Replica) raftTruncatedStateLocked(ctx context.Context) (roachpb.RaftTruncatedState, error) {
if r.mu.state.TruncatedState != nil {
return *r.mu.state.TruncatedState, nil
}
ts, err := loadTruncatedState(ctx, r.store.Engine(), r.RangeID)
if err != nil {
return ts, err
}
if ts.Index != 0 {
r.mu.state.TruncatedState = &ts
}
return ts, nil
}
// FirstIndex implements the raft.Storage interface.
// FirstIndex requires that the replica lock is held.
func (r *Replica) FirstIndex() (uint64, error) {
ts, err := r.raftTruncatedStateLocked(context.Background())
if err != nil {
return 0, err
}
return ts.Index + 1, nil
}
// GetFirstIndex is the same function as FirstIndex but it does not require
// that the replica lock is held.
func (r *Replica) GetFirstIndex() (uint64, error) {
r.mu.Lock()
defer r.mu.Unlock()
return r.FirstIndex()
}
// Snapshot implements the raft.Storage interface.
// Snapshot requires that the replica lock is held.
func (r *Replica) Snapshot() (raftpb.Snapshot, error) {
return r.SnapshotWithContext(context.Background())
}
// SnapshotWithContext is main implementation for Snapshot() but it takes a
// context to allow tracing.
func (r *Replica) SnapshotWithContext(ctx context.Context) (raftpb.Snapshot, error) {
rangeID := r.RangeID
// If a snapshot is in progress, see if it's ready.
if r.mu.snapshotChan != nil {
select {
case snapData, ok := <-r.mu.snapshotChan:
if ok {
return snapData, nil
}
// If the old channel was closed, fall through to start a new task.
default:
// If the result is not ready, return immediately.
log.Trace(ctx, "snapshot not yet ready")
return raftpb.Snapshot{}, raft.ErrSnapshotTemporarilyUnavailable
}
}
if r.exceedsDoubleSplitSizeLocked() {
r.mu.Lock()
maxBytes := r.mu.maxBytes
size := r.mu.state.Stats.Total()
r.mu.Unlock()
log.Infof(ctx,
"%s: not generating snapshot because replica is too large: %d > 2 * %d",
r, size, maxBytes)
return raftpb.Snapshot{}, raft.ErrSnapshotTemporarilyUnavailable
}
// See if there is already a snapshot running for this store.
if !r.store.AcquireRaftSnapshot() {
log.Trace(ctx, "snapshot already running")
return raftpb.Snapshot{}, raft.ErrSnapshotTemporarilyUnavailable
}
// Use an unbuffered channel so the worker stays alive until someone
// reads from the channel, and can abandon the snapshot if it gets stale.
ch := make(chan (raftpb.Snapshot))
if r.store.Stopper().RunAsyncTask(func() {
defer close(ch)
sp := r.store.Tracer().StartSpan(fmt.Sprintf("snapshot async %s", r))
ctxInner := opentracing.ContextWithSpan(context.Background(), sp)
defer sp.Finish()
snap := r.store.NewSnapshot()
log.Trace(ctxInner, "new engine snapshot")
defer snap.Close()
defer r.store.ReleaseRaftSnapshot()
// Delegate to a static function to make sure that we do not depend
// on any indirect calls to r.store.Engine() (or other in-memory
// state of the Replica). Everything must come from the snapshot.
snapData, err := snapshot(context.Background(), snap, rangeID, r.mu.state.Desc.StartKey)
if err != nil {
log.Errorf(ctxInner, "%s: error generating snapshot: %s", r, err)
} else {
log.Trace(ctxInner, "snapshot generated")
r.store.metrics.rangeSnapshotsGenerated.Inc(1)
select {
case ch <- snapData:
log.Trace(ctxInner, "snapshot accepted")
case <-time.After(r.store.ctx.AsyncSnapshotMaxAge):
// If raft decides it doesn't need this snapshot any more (or
// just takes too long to use it), abandon it to save memory.
log.Infof(ctxInner, "%s: abandoning snapshot after %s", r, r.store.ctx.AsyncSnapshotMaxAge)
case <-r.store.Stopper().ShouldQuiesce():
}
}
}) == nil {
r.mu.snapshotChan = ch
} else {
r.store.ReleaseRaftSnapshot()
}
if r.store.ctx.BlockingSnapshotDuration > 0 {
select {
case snap, ok := <-r.mu.snapshotChan:
if ok {
return snap, nil
}
case <-time.After(r.store.ctx.BlockingSnapshotDuration):
log.Trace(ctx, "snapshot blocking duration exceeded")
}
}
return raftpb.Snapshot{}, raft.ErrSnapshotTemporarilyUnavailable
}
// GetSnapshot wraps Snapshot() but does not require the replica lock
// to be held and it will block instead of returning
// ErrSnapshotTemporaryUnavailable.
func (r *Replica) GetSnapshot(ctx context.Context) (raftpb.Snapshot, error) {
retryOptions := retry.Options{
InitialBackoff: 1 * time.Millisecond,
MaxBackoff: 50 * time.Millisecond,
Multiplier: 2,
Closer: r.store.Stopper().ShouldQuiesce(),
}
for retry := retry.Start(retryOptions); retry.Next(); {
log.Tracef(ctx, "snapshot retry loop pass %d", retry.CurrentAttempt())
r.mu.Lock()
snap, err := r.SnapshotWithContext(ctx)
snapshotChan := r.mu.snapshotChan
r.mu.Unlock()
if err == raft.ErrSnapshotTemporarilyUnavailable {
if snapshotChan == nil {
// The call to Snapshot() didn't start an async process due to
// rate limiting. Try again later.
continue
}
var ok bool
snap, ok = <-snapshotChan
if ok {
return snap, nil
}
// Each snapshot worker's output can only be consumed once.
// We could be racing with raft itself, so if we get a closed
// channel loop back and try again.
} else {
return snap, err
}
}
return raftpb.Snapshot{}, &roachpb.NodeUnavailableError{}
}
func snapshot(
ctx context.Context,
snap engine.Reader,
rangeID roachpb.RangeID,
startKey roachpb.RKey,
) (raftpb.Snapshot, error) {
start := timeutil.Now()
var snapData roachpb.RaftSnapshotData
truncState, err := loadTruncatedState(ctx, snap, rangeID)
if err != nil {
return raftpb.Snapshot{}, err
}
firstIndex := truncState.Index + 1
// Read the range metadata from the snapshot instead of the members
// of the Range struct because they might be changed concurrently.
appliedIndex, _, err := loadAppliedIndex(ctx, snap, rangeID)
if err != nil {
return raftpb.Snapshot{}, err
}
var desc roachpb.RangeDescriptor
// We ignore intents on the range descriptor (consistent=false) because we
// know they cannot be committed yet; operations that modify range
// descriptors resolve their own intents when they commit.
ok, err := engine.MVCCGetProto(ctx, snap, keys.RangeDescriptorKey(startKey),
hlc.MaxTimestamp, false /* !consistent */, nil, &desc)
if err != nil {
return raftpb.Snapshot{}, errors.Errorf("failed to get desc: %s", err)
}
if !ok {
return raftpb.Snapshot{}, errors.Errorf("couldn't find range descriptor")
}
// Store RangeDescriptor as metadata, it will be retrieved by ApplySnapshot()
snapData.RangeDescriptor = desc
// Iterate over all the data in the range, including local-only data like
// the sequence cache.
iter := NewReplicaDataIterator(&desc, snap, true /* replicatedOnly */)
defer iter.Close()
var alloc bufalloc.ByteAllocator
for ; iter.Valid(); iter.Next() {
var key engine.MVCCKey
var value []byte
alloc, key, value = iter.allocIterKeyValue(alloc)
snapData.KV = append(snapData.KV,
roachpb.RaftSnapshotData_KeyValue{
Key: key.Key,
Value: value,
Timestamp: key.Timestamp,
})
}
endIndex := appliedIndex + 1
snapData.LogEntries = make([][]byte, 0, endIndex-firstIndex)
scanFunc := func(kv roachpb.KeyValue) (bool, error) {
bytes, err := kv.Value.GetBytes()
if err == nil {
snapData.LogEntries = append(snapData.LogEntries, bytes)
}
return false, err
}
if err := iterateEntries(ctx, snap, rangeID, firstIndex, endIndex, scanFunc); err != nil {
return raftpb.Snapshot{}, err
}
data, err := protoutil.Marshal(&snapData)
if err != nil {
return raftpb.Snapshot{}, err
}
// Synthesize our raftpb.ConfState from desc.
var cs raftpb.ConfState
for _, rep := range desc.Replicas {
cs.Nodes = append(cs.Nodes, uint64(rep.ReplicaID))
}
term, err := term(ctx, snap, rangeID, appliedIndex)
if err != nil {
return raftpb.Snapshot{}, errors.Errorf("failed to fetch term of %d: %s", appliedIndex, err)
}
log.Infof(ctx, "generated snapshot for range %s at index %d in %s. encoded size=%d, %d KV pairs, %d log entries",
rangeID, appliedIndex, timeutil.Since(start), len(data), len(snapData.KV), len(snapData.LogEntries))
return raftpb.Snapshot{
Data: data,
Metadata: raftpb.SnapshotMetadata{
Index: appliedIndex,
Term: term,
ConfState: cs,
},
}, nil
}
// append the given entries to the raft log. Takes the previous values of
// r.mu.lastIndex and r.mu.raftLogSize, and returns new values. We do this
// rather than modifying them directly because these modifications need to be
// atomic with the commit of the batch.
func (r *Replica) append(
ctx context.Context,
batch engine.ReadWriter,
prevLastIndex uint64,
prevRaftLogSize int64,
entries []raftpb.Entry,
) (uint64, int64, error) {
if len(entries) == 0 {
return prevLastIndex, prevRaftLogSize, nil
}
var diff enginepb.MVCCStats
for i := range entries {
ent := &entries[i]
key := keys.RaftLogKey(r.RangeID, ent.Index)
if err := engine.MVCCPutProto(ctx, batch, &diff, key, hlc.ZeroTimestamp, nil /* txn */, ent); err != nil {
return 0, 0, err
}
}
lastIndex := entries[len(entries)-1].Index
// Delete any previously appended log entries which never committed.
for i := lastIndex + 1; i <= prevLastIndex; i++ {
err := engine.MVCCDelete(ctx, batch, &diff, keys.RaftLogKey(r.RangeID, i),
hlc.ZeroTimestamp, nil /* txn */)
if err != nil {
return 0, 0, err
}
}
if err := setLastIndex(ctx, batch, r.RangeID, lastIndex); err != nil {
return 0, 0, err
}
raftLogSize := prevRaftLogSize + diff.SysBytes
return lastIndex, raftLogSize, nil
}
// updateRangeInfo is called whenever a range is updated by ApplySnapshot
// or is created by range splitting to setup the fields which are
// uninitialized or need updating.
func (r *Replica) updateRangeInfo(desc *roachpb.RangeDescriptor) error {
// RangeMaxBytes should be updated by looking up Zone Config in two cases:
// 1. After applying a snapshot, if the zone config was not updated for
// this key range, then maxBytes of this range will not be updated either.
// 2. After a new range is created by a split, only copying maxBytes from
// the original range wont work as the original and new ranges might belong
// to different zones.
// Load the system config.
cfg, ok := r.store.Gossip().GetSystemConfig()
if !ok {
// This could be before the system config was ever gossiped,
// or it expired. Let the gossip callback set the info.
log.Warningf(context.TODO(), "%s: no system config available, cannot determine range MaxBytes", r)
return nil
}
// Find zone config for this range.
zone, err := cfg.GetZoneConfigForKey(desc.StartKey)
if err != nil {
return errors.Errorf("%s: failed to lookup zone config: %s", r, err)
}
r.SetMaxBytes(zone.RangeMaxBytes)
return nil
}
// applySnapshot updates the replica based on the given snapshot and associated
// HardState (which may be empty, as Raft may apply some snapshots which don't
// require an update to the HardState). All snapshots must pass through Raft
// for correctness, i.e. the parameters to this method must be taken from
// a raft.Ready.
func (r *Replica) applySnapshot(
ctx context.Context, snap raftpb.Snapshot, hs raftpb.HardState,
) error {
// We use a separate batch to apply the snapshot since the Replica (and in
// particular the last index) is updated after the batch commits. Using a
// separate batch also allows for future optimization (such as using a
// Distinct() batch).
batch := r.store.Engine().NewBatch()
defer batch.Close()
snapData := roachpb.RaftSnapshotData{}
err := proto.Unmarshal(snap.Data, &snapData)
if err != nil {
return err
}
// Extract the updated range descriptor.
desc := snapData.RangeDescriptor
// Fill the reservation if there was one for this range, regardless of
// whether the application succeeded.
defer r.store.bookie.Fill(desc.RangeID)
r.mu.Lock()
replicaID := r.mu.replicaID
raftLogSize := r.mu.raftLogSize
r.mu.Unlock()
isPreemptive := replicaID == 0 // only used for accounting and log format
replicaIDStr := "[?]"
snapType := "preemptive"
if !isPreemptive {
replicaIDStr = strconv.FormatInt(int64(replicaID), 10)
snapType = "Raft"
}
log.Infof(ctx, "%s: with replicaID %s, applying %s snapshot for range %d at index %d "+
"(encoded size=%d, %d KV pairs, %d log entries)",
r, replicaIDStr, snapType, desc.RangeID, snap.Metadata.Index,
len(snap.Data), len(snapData.KV), len(snapData.LogEntries))
defer func(start time.Time) {
log.Infof(ctx, "%s: with replicaID %s, applied %s snapshot for range %d in %s",
r, replicaIDStr, snapType, desc.RangeID, timeutil.Since(start))
}(timeutil.Now())
// Delete everything in the range and recreate it from the snapshot.
// We need to delete any old Raft log entries here because any log entries
// that predate the snapshot will be orphaned and never truncated or GC'd.
iter := NewReplicaDataIterator(&desc, batch, false /* !replicatedOnly */)
defer iter.Close()
for ; iter.Valid(); iter.Next() {
if err := batch.Clear(iter.Key()); err != nil {
return err
}
}
// Determine the unreplicated key prefix so we can drop any
// unreplicated keys from the snapshot.
unreplicatedPrefix := keys.MakeRangeIDUnreplicatedPrefix(desc.RangeID)
// Write the snapshot into the range.
for _, kv := range snapData.KV {
if bytes.HasPrefix(kv.Key, unreplicatedPrefix) {
continue
}
mvccKey := engine.MVCCKey{
Key: kv.Key,
Timestamp: kv.Timestamp,
}
if err := batch.Put(mvccKey, kv.Value); err != nil {
return err
}
}
logEntries := make([]raftpb.Entry, len(snapData.LogEntries))
for i, bytes := range snapData.LogEntries {
if err := logEntries[i].Unmarshal(bytes); err != nil {
return err
}
}
// Write the snapshot's Raft log into the range.
_, raftLogSize, err = r.append(ctx, batch, 0, raftLogSize, logEntries)
if err != nil {
return err
}
s, err := loadState(ctx, batch, &desc)
if err != nil {
return err
}
// As outlined above, last and applied index are the same after applying
// the snapshot (i.e. the snapshot has no uncommitted tail).
if s.RaftAppliedIndex != snap.Metadata.Index {
log.Fatalf(ctx, "%s with state loaded from %d: snapshot resulted in appliedIndex=%d, metadataIndex=%d",
r, s.Desc.RangeID, s.RaftAppliedIndex, snap.Metadata.Index)
}
if !raft.IsEmptyHardState(hs) {
if err := setHardState(ctx, batch, s.Desc.RangeID, hs); err != nil {
return errors.Wrapf(err, "unable to persist HardState %+v", &hs)
}
} else {
// Note that we don't require that Raft supply us with a nonempty
// HardState on a snapshot. We don't want to make that assumption
// because it's not guaranteed by the contract. Raft *must* send us
// a HardState when it increases the committed index as a result of the
// snapshot, but who is to say it isn't going to accept a snapshot
// which is identical to the current state?
}
if err := batch.Commit(); err != nil {
return err
}
r.mu.Lock()
// We set the persisted last index to the last applied index. This is
// not a correctness issue, but means that we may have just transferred
// some entries we're about to re-request from the leader and overwrite.
// However, raft.MultiNode currently expects this behaviour, and the
// performance implications are not likely to be drastic. If our
// feelings about this ever change, we can add a LastIndex field to
// raftpb.SnapshotMetadata.
r.mu.lastIndex = s.RaftAppliedIndex
r.mu.raftLogSize = raftLogSize
// Update the range and store stats.
r.store.metrics.subtractMVCCStats(r.mu.state.Stats)
r.store.metrics.addMVCCStats(s.Stats)
r.mu.state = s
r.assertStateLocked(r.store.Engine())
r.mu.Unlock()
// As the last deferred action after committing the batch, update other
// fields which are uninitialized or need updating. This may not happen
// if the system config has not yet been loaded. While config update
// will correctly set the fields, there is no order guarantee in
// ApplySnapshot.
// TODO: should go through the standard store lock when adding a replica.
if err := r.updateRangeInfo(&desc); err != nil {
panic(err)
}
// Update the range descriptor. This is done last as this is the step that
// makes the Replica visible in the Store.
if err := r.setDesc(&desc); err != nil {
panic(err)
}
if !isPreemptive {
r.store.metrics.rangeSnapshotsNormalApplied.Inc(1)
} else {
r.store.metrics.rangeSnapshotsPreemptiveApplied.Inc(1)
}
return nil
}
// Raft commands are encoded with a 1-byte version (currently 0), an 8-byte ID,
// followed by the payload. This inflexible encoding is used so we can efficiently
// parse the command id while processing the logs.
// TODO(bdarnell): Is this commandID still appropriate for our needs?
const (
// The prescribed length for each command ID.
raftCommandIDLen = 8
raftCommandEncodingVersion byte = 0
)
func encodeRaftCommand(commandID string, command []byte) []byte {
if len(commandID) != raftCommandIDLen {
log.Fatalf(context.TODO(), "invalid command ID length; %d != %d", len(commandID), raftCommandIDLen)
}
x := make([]byte, 1, 1+raftCommandIDLen+len(command))
x[0] = raftCommandEncodingVersion
x = append(x, []byte(commandID)...)
x = append(x, command...)
return x
}
// DecodeRaftCommand splits a raftpb.Entry.Data into its commandID and
// command portions. The caller is responsible for checking that the data
// is not empty (which indicates a dummy entry generated by raft rather
// than a real command). Usage is mostly internal to the storage package
// but is exported for use by debugging tools.
func DecodeRaftCommand(data []byte) (commandID string, command []byte) {
if data[0] != raftCommandEncodingVersion {
log.Fatalf(context.TODO(), "unknown command encoding version %v", data[0])
}
return string(data[1 : 1+raftCommandIDLen]), data[1+raftCommandIDLen:]
}
storage: fix data race and deadlock
Closes #8381.
// Copyright 2015 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Author: Ben Darnell
package storage
import (
"bytes"
"fmt"
"strconv"
"time"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"github.com/gogo/protobuf/proto"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"golang.org/x/net/context"
"github.com/cockroachdb/cockroach/keys"
"github.com/cockroachdb/cockroach/roachpb"
"github.com/cockroachdb/cockroach/storage/engine"
"github.com/cockroachdb/cockroach/storage/engine/enginepb"
"github.com/cockroachdb/cockroach/util/bufalloc"
"github.com/cockroachdb/cockroach/util/hlc"
"github.com/cockroachdb/cockroach/util/log"
"github.com/cockroachdb/cockroach/util/protoutil"
"github.com/cockroachdb/cockroach/util/retry"
"github.com/cockroachdb/cockroach/util/timeutil"
)
var _ raft.Storage = (*Replica)(nil)
// All calls to raft.RawNode require that an exclusive lock is held.
// All of the functions exposed via the raft.Storage interface will in
// turn be called from RawNode. So the lock that guards raftGroup must
// be the same as the lock that guards all the inner fields.
//
// Many of the methods defined in this file are wrappers around static
// functions. This is done to facilitate their use from
// Replica.Snapshot(), where it is important that all the data that
// goes into the snapshot comes from a consistent view of the
// database, and not the replica's in-memory state or via a reference
// to Replica.store.Engine().
// InitialState implements the raft.Storage interface.
// InitialState requires that the replica lock be held.
func (r *Replica) InitialState() (raftpb.HardState, raftpb.ConfState, error) {
hs, err := loadHardState(context.Background(), r.store.Engine(), r.RangeID)
// For uninitialized ranges, membership is unknown at this point.
if raft.IsEmptyHardState(hs) || err != nil {
return raftpb.HardState{}, raftpb.ConfState{}, err
}
var cs raftpb.ConfState
for _, rep := range r.mu.state.Desc.Replicas {
cs.Nodes = append(cs.Nodes, uint64(rep.ReplicaID))
}
return hs, cs, nil
}
// Entries implements the raft.Storage interface. Note that maxBytes is advisory
// and this method will always return at least one entry even if it exceeds
// maxBytes. Passing maxBytes equal to zero disables size checking.
// TODO(bdarnell): consider caching for recent entries, if rocksdb's built in
// caching is insufficient.
// Entries requires that the replica lock is held.
func (r *Replica) Entries(lo, hi, maxBytes uint64) ([]raftpb.Entry, error) {
snap := r.store.NewSnapshot()
defer snap.Close()
return entries(context.Background(), snap, r.RangeID, lo, hi, maxBytes)
}
func entries(
ctx context.Context,
e engine.Reader,
rangeID roachpb.RangeID,
lo, hi, maxBytes uint64,
) ([]raftpb.Entry, error) {
if lo > hi {
return nil, errors.Errorf("lo:%d is greater than hi:%d", lo, hi)
}
// Scan over the log to find the requested entries in the range [lo, hi),
// stopping once we have enough.
ents := make([]raftpb.Entry, 0, hi-lo)
size := uint64(0)
var ent raftpb.Entry
expectedIndex := lo
exceededMaxBytes := false
scanFunc := func(kv roachpb.KeyValue) (bool, error) {
if err := kv.Value.GetProto(&ent); err != nil {
return false, err
}
// Exit early if we have any gaps or it has been compacted.
if ent.Index != expectedIndex {
return true, nil
}
expectedIndex++
size += uint64(ent.Size())
ents = append(ents, ent)
exceededMaxBytes = maxBytes > 0 && size > maxBytes
return exceededMaxBytes, nil
}
if err := iterateEntries(ctx, e, rangeID, lo, hi, scanFunc); err != nil {
return nil, err
}
// Did the correct number of results come back? If so, we're all good.
if uint64(len(ents)) == hi-lo {
return ents, nil
}
// Did we hit the size limit? If so, return what we have.
if exceededMaxBytes {
return ents, nil
}
// Did we get any results at all? Because something went wrong.
if len(ents) > 0 {
// Was the lo already truncated?
if ents[0].Index > lo {
return nil, raft.ErrCompacted
}
// Was the missing index after the last index?
lastIndex, err := loadLastIndex(ctx, e, rangeID)
if err != nil {
return nil, err
}
if lastIndex <= expectedIndex {
return nil, raft.ErrUnavailable
}
// We have a gap in the record, if so, return a nasty error.
return nil, errors.Errorf("there is a gap in the index record between lo:%d and hi:%d at index:%d", lo, hi, expectedIndex)
}
// No results, was it due to unavailability or truncation?
ts, err := loadTruncatedState(ctx, e, rangeID)
if err != nil {
return nil, err
}
if ts.Index >= lo {
// The requested lo index has already been truncated.
return nil, raft.ErrCompacted
}
// The requested lo index does not yet exist.
return nil, raft.ErrUnavailable
}
func iterateEntries(
ctx context.Context,
e engine.Reader,
rangeID roachpb.RangeID,
lo,
hi uint64,
scanFunc func(roachpb.KeyValue) (bool, error),
) error {
_, err := engine.MVCCIterate(
ctx, e,
keys.RaftLogKey(rangeID, lo),
keys.RaftLogKey(rangeID, hi),
hlc.ZeroTimestamp,
true, /* consistent */
nil, /* txn */
false, /* !reverse */
scanFunc,
)
return err
}
// Term implements the raft.Storage interface.
// Term requires that the replica lock is held.
func (r *Replica) Term(i uint64) (uint64, error) {
snap := r.store.NewSnapshot()
defer snap.Close()
return term(context.Background(), snap, r.RangeID, i)
}
func term(ctx context.Context, eng engine.Reader, rangeID roachpb.RangeID, i uint64) (uint64, error) {
ents, err := entries(ctx, eng, rangeID, i, i+1, 0)
if err == raft.ErrCompacted {
ts, err := loadTruncatedState(ctx, eng, rangeID)
if err != nil {
return 0, err
}
if i == ts.Index {
return ts.Term, nil
}
return 0, raft.ErrCompacted
} else if err != nil {
return 0, err
}
if len(ents) == 0 {
return 0, nil
}
return ents[0].Term, nil
}
// LastIndex implements the raft.Storage interface.
// LastIndex requires that the replica lock is held.
func (r *Replica) LastIndex() (uint64, error) {
return r.mu.lastIndex, nil
}
// raftTruncatedStateLocked returns metadata about the log that preceded the
// first current entry. This includes both entries that have been compacted away
// and the dummy entries that make up the starting point of an empty log.
// raftTruncatedStateLocked requires that the replica lock be held.
func (r *Replica) raftTruncatedStateLocked(ctx context.Context) (roachpb.RaftTruncatedState, error) {
if r.mu.state.TruncatedState != nil {
return *r.mu.state.TruncatedState, nil
}
ts, err := loadTruncatedState(ctx, r.store.Engine(), r.RangeID)
if err != nil {
return ts, err
}
if ts.Index != 0 {
r.mu.state.TruncatedState = &ts
}
return ts, nil
}
// FirstIndex implements the raft.Storage interface.
// FirstIndex requires that the replica lock is held.
func (r *Replica) FirstIndex() (uint64, error) {
ts, err := r.raftTruncatedStateLocked(context.Background())
if err != nil {
return 0, err
}
return ts.Index + 1, nil
}
// GetFirstIndex is the same function as FirstIndex but it does not require
// that the replica lock is held.
func (r *Replica) GetFirstIndex() (uint64, error) {
r.mu.Lock()
defer r.mu.Unlock()
return r.FirstIndex()
}
// Snapshot implements the raft.Storage interface.
// Snapshot requires that the replica lock is held.
func (r *Replica) Snapshot() (raftpb.Snapshot, error) {
return r.SnapshotWithContext(context.Background())
}
// SnapshotWithContext is main implementation for Snapshot() but it takes a
// context to allow tracing.
func (r *Replica) SnapshotWithContext(ctx context.Context) (raftpb.Snapshot, error) {
rangeID := r.RangeID
// If a snapshot is in progress, see if it's ready.
if r.mu.snapshotChan != nil {
select {
case snapData, ok := <-r.mu.snapshotChan:
if ok {
return snapData, nil
}
// If the old channel was closed, fall through to start a new task.
default:
// If the result is not ready, return immediately.
log.Trace(ctx, "snapshot not yet ready")
return raftpb.Snapshot{}, raft.ErrSnapshotTemporarilyUnavailable
}
}
if r.exceedsDoubleSplitSizeLocked() {
maxBytes := r.mu.maxBytes
size := r.mu.state.Stats.Total()
log.Infof(ctx,
"%s: not generating snapshot because replica is too large: %d > 2 * %d",
r, size, maxBytes)
return raftpb.Snapshot{}, raft.ErrSnapshotTemporarilyUnavailable
}
// See if there is already a snapshot running for this store.
if !r.store.AcquireRaftSnapshot() {
log.Trace(ctx, "snapshot already running")
return raftpb.Snapshot{}, raft.ErrSnapshotTemporarilyUnavailable
}
startKey := r.mu.state.Desc.StartKey
// Use an unbuffered channel so the worker stays alive until someone
// reads from the channel, and can abandon the snapshot if it gets stale.
ch := make(chan (raftpb.Snapshot))
if r.store.Stopper().RunAsyncTask(func() {
defer close(ch)
sp := r.store.Tracer().StartSpan(fmt.Sprintf("snapshot async %s", r))
ctxInner := opentracing.ContextWithSpan(context.Background(), sp)
defer sp.Finish()
snap := r.store.NewSnapshot()
log.Trace(ctxInner, "new engine snapshot")
defer snap.Close()
defer r.store.ReleaseRaftSnapshot()
// Delegate to a static function to make sure that we do not depend
// on any indirect calls to r.store.Engine() (or other in-memory
// state of the Replica). Everything must come from the snapshot.
snapData, err := snapshot(context.Background(), snap, rangeID, startKey)
if err != nil {
log.Errorf(ctxInner, "%s: error generating snapshot: %s", r, err)
} else {
log.Trace(ctxInner, "snapshot generated")
r.store.metrics.rangeSnapshotsGenerated.Inc(1)
select {
case ch <- snapData:
log.Trace(ctxInner, "snapshot accepted")
case <-time.After(r.store.ctx.AsyncSnapshotMaxAge):
// If raft decides it doesn't need this snapshot any more (or
// just takes too long to use it), abandon it to save memory.
log.Infof(ctxInner, "%s: abandoning snapshot after %s", r, r.store.ctx.AsyncSnapshotMaxAge)
case <-r.store.Stopper().ShouldQuiesce():
}
}
}) == nil {
r.mu.snapshotChan = ch
} else {
r.store.ReleaseRaftSnapshot()
}
if r.store.ctx.BlockingSnapshotDuration > 0 {
select {
case snap, ok := <-r.mu.snapshotChan:
if ok {
return snap, nil
}
case <-time.After(r.store.ctx.BlockingSnapshotDuration):
log.Trace(ctx, "snapshot blocking duration exceeded")
}
}
return raftpb.Snapshot{}, raft.ErrSnapshotTemporarilyUnavailable
}
// GetSnapshot wraps Snapshot() but does not require the replica lock
// to be held and it will block instead of returning
// ErrSnapshotTemporaryUnavailable.
func (r *Replica) GetSnapshot(ctx context.Context) (raftpb.Snapshot, error) {
retryOptions := retry.Options{
InitialBackoff: 1 * time.Millisecond,
MaxBackoff: 50 * time.Millisecond,
Multiplier: 2,
Closer: r.store.Stopper().ShouldQuiesce(),
}
for retry := retry.Start(retryOptions); retry.Next(); {
log.Tracef(ctx, "snapshot retry loop pass %d", retry.CurrentAttempt())
r.mu.Lock()
snap, err := r.SnapshotWithContext(ctx)
snapshotChan := r.mu.snapshotChan
r.mu.Unlock()
if err == raft.ErrSnapshotTemporarilyUnavailable {
if snapshotChan == nil {
// The call to Snapshot() didn't start an async process due to
// rate limiting. Try again later.
continue
}
var ok bool
snap, ok = <-snapshotChan
if ok {
return snap, nil
}
// Each snapshot worker's output can only be consumed once.
// We could be racing with raft itself, so if we get a closed
// channel loop back and try again.
} else {
return snap, err
}
}
return raftpb.Snapshot{}, &roachpb.NodeUnavailableError{}
}
func snapshot(
ctx context.Context,
snap engine.Reader,
rangeID roachpb.RangeID,
startKey roachpb.RKey,
) (raftpb.Snapshot, error) {
start := timeutil.Now()
var snapData roachpb.RaftSnapshotData
truncState, err := loadTruncatedState(ctx, snap, rangeID)
if err != nil {
return raftpb.Snapshot{}, err
}
firstIndex := truncState.Index + 1
// Read the range metadata from the snapshot instead of the members
// of the Range struct because they might be changed concurrently.
appliedIndex, _, err := loadAppliedIndex(ctx, snap, rangeID)
if err != nil {
return raftpb.Snapshot{}, err
}
var desc roachpb.RangeDescriptor
// We ignore intents on the range descriptor (consistent=false) because we
// know they cannot be committed yet; operations that modify range
// descriptors resolve their own intents when they commit.
ok, err := engine.MVCCGetProto(ctx, snap, keys.RangeDescriptorKey(startKey),
hlc.MaxTimestamp, false /* !consistent */, nil, &desc)
if err != nil {
return raftpb.Snapshot{}, errors.Errorf("failed to get desc: %s", err)
}
if !ok {
return raftpb.Snapshot{}, errors.Errorf("couldn't find range descriptor")
}
// Store RangeDescriptor as metadata, it will be retrieved by ApplySnapshot()
snapData.RangeDescriptor = desc
// Iterate over all the data in the range, including local-only data like
// the sequence cache.
iter := NewReplicaDataIterator(&desc, snap, true /* replicatedOnly */)
defer iter.Close()
var alloc bufalloc.ByteAllocator
for ; iter.Valid(); iter.Next() {
var key engine.MVCCKey
var value []byte
alloc, key, value = iter.allocIterKeyValue(alloc)
snapData.KV = append(snapData.KV,
roachpb.RaftSnapshotData_KeyValue{
Key: key.Key,
Value: value,
Timestamp: key.Timestamp,
})
}
endIndex := appliedIndex + 1
snapData.LogEntries = make([][]byte, 0, endIndex-firstIndex)
scanFunc := func(kv roachpb.KeyValue) (bool, error) {
bytes, err := kv.Value.GetBytes()
if err == nil {
snapData.LogEntries = append(snapData.LogEntries, bytes)
}
return false, err
}
if err := iterateEntries(ctx, snap, rangeID, firstIndex, endIndex, scanFunc); err != nil {
return raftpb.Snapshot{}, err
}
data, err := protoutil.Marshal(&snapData)
if err != nil {
return raftpb.Snapshot{}, err
}
// Synthesize our raftpb.ConfState from desc.
var cs raftpb.ConfState
for _, rep := range desc.Replicas {
cs.Nodes = append(cs.Nodes, uint64(rep.ReplicaID))
}
term, err := term(ctx, snap, rangeID, appliedIndex)
if err != nil {
return raftpb.Snapshot{}, errors.Errorf("failed to fetch term of %d: %s", appliedIndex, err)
}
log.Infof(ctx, "generated snapshot for range %s at index %d in %s. encoded size=%d, %d KV pairs, %d log entries",
rangeID, appliedIndex, timeutil.Since(start), len(data), len(snapData.KV), len(snapData.LogEntries))
return raftpb.Snapshot{
Data: data,
Metadata: raftpb.SnapshotMetadata{
Index: appliedIndex,
Term: term,
ConfState: cs,
},
}, nil
}
// append the given entries to the raft log. Takes the previous values of
// r.mu.lastIndex and r.mu.raftLogSize, and returns new values. We do this
// rather than modifying them directly because these modifications need to be
// atomic with the commit of the batch.
func (r *Replica) append(
ctx context.Context,
batch engine.ReadWriter,
prevLastIndex uint64,
prevRaftLogSize int64,
entries []raftpb.Entry,
) (uint64, int64, error) {
if len(entries) == 0 {
return prevLastIndex, prevRaftLogSize, nil
}
var diff enginepb.MVCCStats
for i := range entries {
ent := &entries[i]
key := keys.RaftLogKey(r.RangeID, ent.Index)
if err := engine.MVCCPutProto(ctx, batch, &diff, key, hlc.ZeroTimestamp, nil /* txn */, ent); err != nil {
return 0, 0, err
}
}
lastIndex := entries[len(entries)-1].Index
// Delete any previously appended log entries which never committed.
for i := lastIndex + 1; i <= prevLastIndex; i++ {
err := engine.MVCCDelete(ctx, batch, &diff, keys.RaftLogKey(r.RangeID, i),
hlc.ZeroTimestamp, nil /* txn */)
if err != nil {
return 0, 0, err
}
}
if err := setLastIndex(ctx, batch, r.RangeID, lastIndex); err != nil {
return 0, 0, err
}
raftLogSize := prevRaftLogSize + diff.SysBytes
return lastIndex, raftLogSize, nil
}
// updateRangeInfo is called whenever a range is updated by ApplySnapshot
// or is created by range splitting to setup the fields which are
// uninitialized or need updating.
func (r *Replica) updateRangeInfo(desc *roachpb.RangeDescriptor) error {
// RangeMaxBytes should be updated by looking up Zone Config in two cases:
// 1. After applying a snapshot, if the zone config was not updated for
// this key range, then maxBytes of this range will not be updated either.
// 2. After a new range is created by a split, only copying maxBytes from
// the original range wont work as the original and new ranges might belong
// to different zones.
// Load the system config.
cfg, ok := r.store.Gossip().GetSystemConfig()
if !ok {
// This could be before the system config was ever gossiped,
// or it expired. Let the gossip callback set the info.
log.Warningf(context.TODO(), "%s: no system config available, cannot determine range MaxBytes", r)
return nil
}
// Find zone config for this range.
zone, err := cfg.GetZoneConfigForKey(desc.StartKey)
if err != nil {
return errors.Errorf("%s: failed to lookup zone config: %s", r, err)
}
r.SetMaxBytes(zone.RangeMaxBytes)
return nil
}
// applySnapshot updates the replica based on the given snapshot and associated
// HardState (which may be empty, as Raft may apply some snapshots which don't
// require an update to the HardState). All snapshots must pass through Raft
// for correctness, i.e. the parameters to this method must be taken from
// a raft.Ready.
func (r *Replica) applySnapshot(
ctx context.Context, snap raftpb.Snapshot, hs raftpb.HardState,
) error {
// We use a separate batch to apply the snapshot since the Replica (and in
// particular the last index) is updated after the batch commits. Using a
// separate batch also allows for future optimization (such as using a
// Distinct() batch).
batch := r.store.Engine().NewBatch()
defer batch.Close()
snapData := roachpb.RaftSnapshotData{}
err := proto.Unmarshal(snap.Data, &snapData)
if err != nil {
return err
}
// Extract the updated range descriptor.
desc := snapData.RangeDescriptor
// Fill the reservation if there was one for this range, regardless of
// whether the application succeeded.
defer r.store.bookie.Fill(desc.RangeID)
r.mu.Lock()
replicaID := r.mu.replicaID
raftLogSize := r.mu.raftLogSize
r.mu.Unlock()
isPreemptive := replicaID == 0 // only used for accounting and log format
replicaIDStr := "[?]"
snapType := "preemptive"
if !isPreemptive {
replicaIDStr = strconv.FormatInt(int64(replicaID), 10)
snapType = "Raft"
}
log.Infof(ctx, "%s: with replicaID %s, applying %s snapshot for range %d at index %d "+
"(encoded size=%d, %d KV pairs, %d log entries)",
r, replicaIDStr, snapType, desc.RangeID, snap.Metadata.Index,
len(snap.Data), len(snapData.KV), len(snapData.LogEntries))
defer func(start time.Time) {
log.Infof(ctx, "%s: with replicaID %s, applied %s snapshot for range %d in %s",
r, replicaIDStr, snapType, desc.RangeID, timeutil.Since(start))
}(timeutil.Now())
// Delete everything in the range and recreate it from the snapshot.
// We need to delete any old Raft log entries here because any log entries
// that predate the snapshot will be orphaned and never truncated or GC'd.
iter := NewReplicaDataIterator(&desc, batch, false /* !replicatedOnly */)
defer iter.Close()
for ; iter.Valid(); iter.Next() {
if err := batch.Clear(iter.Key()); err != nil {
return err
}
}
// Determine the unreplicated key prefix so we can drop any
// unreplicated keys from the snapshot.
unreplicatedPrefix := keys.MakeRangeIDUnreplicatedPrefix(desc.RangeID)
// Write the snapshot into the range.
for _, kv := range snapData.KV {
if bytes.HasPrefix(kv.Key, unreplicatedPrefix) {
continue
}
mvccKey := engine.MVCCKey{
Key: kv.Key,
Timestamp: kv.Timestamp,
}
if err := batch.Put(mvccKey, kv.Value); err != nil {
return err
}
}
logEntries := make([]raftpb.Entry, len(snapData.LogEntries))
for i, bytes := range snapData.LogEntries {
if err := logEntries[i].Unmarshal(bytes); err != nil {
return err
}
}
// Write the snapshot's Raft log into the range.
_, raftLogSize, err = r.append(ctx, batch, 0, raftLogSize, logEntries)
if err != nil {
return err
}
s, err := loadState(ctx, batch, &desc)
if err != nil {
return err
}
// As outlined above, last and applied index are the same after applying
// the snapshot (i.e. the snapshot has no uncommitted tail).
if s.RaftAppliedIndex != snap.Metadata.Index {
log.Fatalf(ctx, "%s with state loaded from %d: snapshot resulted in appliedIndex=%d, metadataIndex=%d",
r, s.Desc.RangeID, s.RaftAppliedIndex, snap.Metadata.Index)
}
if !raft.IsEmptyHardState(hs) {
if err := setHardState(ctx, batch, s.Desc.RangeID, hs); err != nil {
return errors.Wrapf(err, "unable to persist HardState %+v", &hs)
}
} else {
// Note that we don't require that Raft supply us with a nonempty
// HardState on a snapshot. We don't want to make that assumption
// because it's not guaranteed by the contract. Raft *must* send us
// a HardState when it increases the committed index as a result of the
// snapshot, but who is to say it isn't going to accept a snapshot
// which is identical to the current state?
}
if err := batch.Commit(); err != nil {
return err
}
r.mu.Lock()
// We set the persisted last index to the last applied index. This is
// not a correctness issue, but means that we may have just transferred
// some entries we're about to re-request from the leader and overwrite.
// However, raft.MultiNode currently expects this behaviour, and the
// performance implications are not likely to be drastic. If our
// feelings about this ever change, we can add a LastIndex field to
// raftpb.SnapshotMetadata.
r.mu.lastIndex = s.RaftAppliedIndex
r.mu.raftLogSize = raftLogSize
// Update the range and store stats.
r.store.metrics.subtractMVCCStats(r.mu.state.Stats)
r.store.metrics.addMVCCStats(s.Stats)
r.mu.state = s
r.assertStateLocked(r.store.Engine())
r.mu.Unlock()
// As the last deferred action after committing the batch, update other
// fields which are uninitialized or need updating. This may not happen
// if the system config has not yet been loaded. While config update
// will correctly set the fields, there is no order guarantee in
// ApplySnapshot.
// TODO: should go through the standard store lock when adding a replica.
if err := r.updateRangeInfo(&desc); err != nil {
panic(err)
}
// Update the range descriptor. This is done last as this is the step that
// makes the Replica visible in the Store.
if err := r.setDesc(&desc); err != nil {
panic(err)
}
if !isPreemptive {
r.store.metrics.rangeSnapshotsNormalApplied.Inc(1)
} else {
r.store.metrics.rangeSnapshotsPreemptiveApplied.Inc(1)
}
return nil
}
// Raft commands are encoded with a 1-byte version (currently 0), an 8-byte ID,
// followed by the payload. This inflexible encoding is used so we can efficiently
// parse the command id while processing the logs.
// TODO(bdarnell): Is this commandID still appropriate for our needs?
const (
// The prescribed length for each command ID.
raftCommandIDLen = 8
raftCommandEncodingVersion byte = 0
)
func encodeRaftCommand(commandID string, command []byte) []byte {
if len(commandID) != raftCommandIDLen {
log.Fatalf(context.TODO(), "invalid command ID length; %d != %d", len(commandID), raftCommandIDLen)
}
x := make([]byte, 1, 1+raftCommandIDLen+len(command))
x[0] = raftCommandEncodingVersion
x = append(x, []byte(commandID)...)
x = append(x, command...)
return x
}
// DecodeRaftCommand splits a raftpb.Entry.Data into its commandID and
// command portions. The caller is responsible for checking that the data
// is not empty (which indicates a dummy entry generated by raft rather
// than a real command). Usage is mostly internal to the storage package
// but is exported for use by debugging tools.
func DecodeRaftCommand(data []byte) (commandID string, command []byte) {
if data[0] != raftCommandEncodingVersion {
log.Fatalf(context.TODO(), "unknown command encoding version %v", data[0])
}
return string(data[1 : 1+raftCommandIDLen]), data[1+raftCommandIDLen:]
}
|
package main
import (
"github.com/docker/containerd/api/grpc/types"
"github.com/docker/docker/pkg/integration/checker"
"github.com/go-check/check"
)
func (cs *ContainerdSuite) TestBusyboxTopExecEcho(t *check.C) {
bundleName := "busybox-top"
if err := CreateBusyboxBundle(bundleName, []string{"top"}); err != nil {
t.Fatal(err)
}
var (
err error
initp *containerProcess
echop *containerProcess
)
containerId := "top"
initp, err = cs.StartContainer(containerId, bundleName)
t.Assert(err, checker.Equals, nil)
echop, err = cs.AddProcessToContainer(initp, "echo", "/", []string{"PATH=/bin"}, []string{"sh", "-c", "echo -n Ay Caramba! ; exit 1"}, 0, 0)
t.Assert(err, checker.Equals, nil)
for _, evt := range []types.Event{
{
Type: "start-container",
Id: containerId,
Status: 0,
Pid: "",
},
{
Type: "start-process",
Id: containerId,
Status: 0,
Pid: "echo",
},
{
Type: "exit",
Id: containerId,
Status: 1,
Pid: "echo",
},
} {
ch := initp.GetEventsChannel()
e := <-ch
evt.Timestamp = e.Timestamp
t.Assert(*e, checker.Equals, evt)
}
t.Assert(echop.io.stdoutBuffer.String(), checker.Equals, "Ay Caramba!")
}
Add test for killing a long lived exec process
Signed-off-by: Kenfe-Mickael Laventure <10e26728841e39109c2809b6029d4b0763bd81d3@gmail.com>
package main
import (
"path/filepath"
"syscall"
"github.com/docker/containerd/api/grpc/types"
"github.com/docker/docker/pkg/integration/checker"
"github.com/go-check/check"
)
func (cs *ContainerdSuite) TestBusyboxTopExecEcho(t *check.C) {
bundleName := "busybox-top"
if err := CreateBusyboxBundle(bundleName, []string{"top"}); err != nil {
t.Fatal(err)
}
var (
err error
initp *containerProcess
echop *containerProcess
)
containerId := "top"
initp, err = cs.StartContainer(containerId, bundleName)
t.Assert(err, checker.Equals, nil)
echop, err = cs.AddProcessToContainer(initp, "echo", "/", []string{"PATH=/bin"}, []string{"sh", "-c", "echo -n Ay Caramba! ; exit 1"}, 0, 0)
t.Assert(err, checker.Equals, nil)
for _, evt := range []types.Event{
{
Type: "start-container",
Id: containerId,
Status: 0,
Pid: "",
},
{
Type: "start-process",
Id: containerId,
Status: 0,
Pid: "echo",
},
{
Type: "exit",
Id: containerId,
Status: 1,
Pid: "echo",
},
} {
ch := initp.GetEventsChannel()
e := <-ch
evt.Timestamp = e.Timestamp
t.Assert(*e, checker.Equals, evt)
}
t.Assert(echop.io.stdoutBuffer.String(), checker.Equals, "Ay Caramba!")
}
func (cs *ContainerdSuite) TestBusyboxTopExecTop(t *check.C) {
bundleName := "busybox-top"
if err := CreateBusyboxBundle(bundleName, []string{"top"}); err != nil {
t.Fatal(err)
}
var (
err error
initp *containerProcess
)
containerId := "top"
initp, err = cs.StartContainer(containerId, bundleName)
t.Assert(err, checker.Equals, nil)
execId := "top1"
_, err = cs.AddProcessToContainer(initp, execId, "/", []string{"PATH=/usr/bin"}, []string{"top"}, 0, 0)
t.Assert(err, checker.Equals, nil)
for idx, evt := range []types.Event{
{
Type: "start-container",
Id: containerId,
Status: 0,
Pid: "",
},
{
Type: "start-process",
Id: containerId,
Status: 0,
Pid: execId,
},
{
Type: "exit",
Id: containerId,
Status: 137,
Pid: execId,
},
} {
ch := initp.GetEventsChannel()
e := <-ch
evt.Timestamp = e.Timestamp
t.Assert(*e, checker.Equals, evt)
if idx == 1 {
// Process Started, kill it
cs.SignalContainerProcess(containerId, "top1", uint32(syscall.SIGKILL))
}
}
// Container should still be running
containers, err := cs.ListRunningContainers()
if err != nil {
t.Fatal(err)
}
t.Assert(len(containers), checker.Equals, 1)
t.Assert(containers[0].Id, checker.Equals, "top")
t.Assert(containers[0].Status, checker.Equals, "running")
t.Assert(containers[0].BundlePath, check.Equals, filepath.Join(cs.cwd, GetBundle(bundleName).Path))
}
|
package storage
import (
"io"
"time"
"github.com/lxc/lxd/lxd/backup"
"github.com/lxc/lxd/lxd/cluster/request"
"github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/migration"
"github.com/lxc/lxd/lxd/operations"
"github.com/lxc/lxd/lxd/revert"
"github.com/lxc/lxd/lxd/state"
"github.com/lxc/lxd/lxd/storage/drivers"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/instancewriter"
"github.com/lxc/lxd/shared/logger"
)
type mockBackend struct {
name string
state *state.State
logger logger.Logger
driver drivers.Driver
}
func (b *mockBackend) ID() int64 {
return 1 // The tests expect the storage pool ID to be 1.
}
func (b *mockBackend) Name() string {
return b.name
}
func (b *mockBackend) Description() string {
return ""
}
func (b *mockBackend) Status() string {
return api.NetworkStatusUnknown
}
func (b *mockBackend) LocalStatus() string {
return api.NetworkStatusUnknown
}
func (b *mockBackend) Driver() drivers.Driver {
return b.driver
}
func (b *mockBackend) MigrationTypes(contentType drivers.ContentType, refresh bool) []migration.Type {
return []migration.Type{
{
FSType: FallbackMigrationType(contentType),
Features: []string{"xattrs", "delete", "compress", "bidirectional"},
},
}
}
func (b *mockBackend) GetResources() (*api.ResourcesStoragePool, error) {
return nil, nil
}
func (b *mockBackend) IsUsed() (bool, error) {
return false, nil
}
func (b *mockBackend) Delete(clientType request.ClientType, op *operations.Operation) error {
return nil
}
func (b *mockBackend) Update(clientType request.ClientType, newDescription string, newConfig map[string]string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) Create(clientType request.ClientType, op *operations.Operation) error {
return nil
}
func (b *mockBackend) Mount() (bool, error) {
return true, nil
}
func (b *mockBackend) Unmount() (bool, error) {
return true, nil
}
func (b *mockBackend) ApplyPatch(name string) error {
return nil
}
func (b *mockBackend) FillInstanceConfig(inst instance.Instance, config map[string]string) error {
return nil
}
func (b *mockBackend) CreateInstance(inst instance.Instance, op *operations.Operation) error {
return nil
}
func (b *mockBackend) CreateInstanceFromBackup(srcBackup backup.Info, srcData io.ReadSeeker, op *operations.Operation) (func(instance.Instance) error, revert.Hook, error) {
return nil, nil, nil
}
func (b *mockBackend) CreateInstanceFromCopy(inst instance.Instance, src instance.Instance, snapshots bool, op *operations.Operation) error {
return nil
}
func (b *mockBackend) CreateInstanceFromImage(inst instance.Instance, fingerprint string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) CreateInstanceFromMigration(inst instance.Instance, conn io.ReadWriteCloser, args migration.VolumeTargetArgs, op *operations.Operation) error {
return nil
}
func (b *mockBackend) RenameInstance(inst instance.Instance, newName string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) DeleteInstance(inst instance.Instance, op *operations.Operation) error {
return nil
}
func (b *mockBackend) UpdateInstance(inst instance.Instance, newDesc string, newConfig map[string]string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) UpdateInstanceBackupFile(inst instance.Instance, op *operations.Operation) error {
return nil
}
func (b *mockBackend) CheckInstanceBackupFileSnapshots(backupConf *backup.Config, projectName string, deleteMissing bool, op *operations.Operation) ([]*api.InstanceSnapshot, error) {
return nil, nil
}
func (b *mockBackend) ListUnknownVolumes(op *operations.Operation) (map[string][]*backup.Config, error) {
return nil, nil
}
func (b *mockBackend) ImportInstance(inst instance.Instance, op *operations.Operation) error {
return nil
}
func (b *mockBackend) MigrateInstance(inst instance.Instance, conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error {
return nil
}
func (b *mockBackend) RefreshInstance(i instance.Instance, src instance.Instance, srcSnapshots []instance.Instance, op *operations.Operation) error {
return nil
}
func (b *mockBackend) BackupInstance(inst instance.Instance, tarWriter *instancewriter.InstanceTarWriter, optimized bool, snapshots bool, op *operations.Operation) error {
return nil
}
func (b *mockBackend) GetInstanceUsage(inst instance.Instance) (int64, error) {
return 0, nil
}
func (b *mockBackend) SetInstanceQuota(inst instance.Instance, size string, vmStateSize string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) MountInstance(inst instance.Instance, op *operations.Operation) (*MountInfo, error) {
return &MountInfo{}, nil
}
func (b *mockBackend) UnmountInstance(inst instance.Instance, op *operations.Operation) (bool, error) {
return true, nil
}
func (b *mockBackend) CreateInstanceSnapshot(i instance.Instance, src instance.Instance, op *operations.Operation) error {
return nil
}
func (b *mockBackend) RenameInstanceSnapshot(inst instance.Instance, newName string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) DeleteInstanceSnapshot(inst instance.Instance, op *operations.Operation) error {
return nil
}
func (b *mockBackend) RestoreInstanceSnapshot(inst instance.Instance, src instance.Instance, op *operations.Operation) error {
return nil
}
func (b *mockBackend) MountInstanceSnapshot(inst instance.Instance, op *operations.Operation) (*MountInfo, error) {
return &MountInfo{}, nil
}
func (b *mockBackend) UnmountInstanceSnapshot(inst instance.Instance, op *operations.Operation) (bool, error) {
return true, nil
}
func (b *mockBackend) UpdateInstanceSnapshot(inst instance.Instance, newDesc string, newConfig map[string]string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) EnsureImage(fingerprint string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) DeleteImage(fingerprint string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) UpdateImage(fingerprint, newDesc string, newConfig map[string]string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) CreateCustomVolume(projectName string, volName string, desc string, config map[string]string, contentType drivers.ContentType, op *operations.Operation) error {
return nil
}
func (b *mockBackend) CreateCustomVolumeFromCopy(projectName string, srcProjectName string, volName string, desc string, config map[string]string, srcPoolName string, srcVolName string, srcVolOnly bool, op *operations.Operation) error {
return nil
}
func (b *mockBackend) RenameCustomVolume(projectName string, volName string, newName string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) UpdateCustomVolume(projectName string, volName string, newDesc string, newConfig map[string]string, op *operations.Operation) error {
return drivers.ErrNotImplemented
}
func (b *mockBackend) DeleteCustomVolume(projectName string, volName string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) MigrateCustomVolume(projectName string, conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error {
return nil
}
func (b *mockBackend) CreateCustomVolumeFromMigration(projectName string, conn io.ReadWriteCloser, args migration.VolumeTargetArgs, op *operations.Operation) error {
return nil
}
func (b *mockBackend) GetCustomVolumeDisk(projectName string, volName string) (string, error) {
return "", nil
}
func (b *mockBackend) GetCustomVolumeUsage(projectName string, volName string) (int64, error) {
return 0, nil
}
func (b *mockBackend) MountCustomVolume(projectName string, volName string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) UnmountCustomVolume(projectName string, volName string, op *operations.Operation) (bool, error) {
return true, nil
}
func (b *mockBackend) CreateCustomVolumeSnapshot(projectName string, volName string, newSnapshotName string, expiryDate time.Time, op *operations.Operation) error {
return nil
}
func (b *mockBackend) RenameCustomVolumeSnapshot(projectName string, volName string, newName string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) DeleteCustomVolumeSnapshot(projectName string, volName string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) UpdateCustomVolumeSnapshot(projectName string, volName string, newDesc string, newConfig map[string]string, expiryDate time.Time, op *operations.Operation) error {
return nil
}
func (b *mockBackend) RestoreCustomVolume(projectName string, volName string, snapshotName string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) BackupCustomVolume(projectName string, volName string, tarWriter *instancewriter.InstanceTarWriter, optimized bool, snapshots bool, op *operations.Operation) error {
return nil
}
func (b *mockBackend) CreateCustomVolumeFromBackup(srcBackup backup.Info, srcData io.ReadSeeker, op *operations.Operation) error {
return nil
}
lxd/storage/backend/mock: Adds ImportCustomVolume placeholder
Signed-off-by: Thomas Parrott <6b778ce645fb0e3dde76d79eccad490955b1ae74@canonical.com>
package storage
import (
"io"
"time"
"github.com/lxc/lxd/lxd/backup"
"github.com/lxc/lxd/lxd/cluster/request"
"github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/migration"
"github.com/lxc/lxd/lxd/operations"
"github.com/lxc/lxd/lxd/revert"
"github.com/lxc/lxd/lxd/state"
"github.com/lxc/lxd/lxd/storage/drivers"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/instancewriter"
"github.com/lxc/lxd/shared/logger"
)
type mockBackend struct {
name string
state *state.State
logger logger.Logger
driver drivers.Driver
}
func (b *mockBackend) ID() int64 {
return 1 // The tests expect the storage pool ID to be 1.
}
func (b *mockBackend) Name() string {
return b.name
}
func (b *mockBackend) Description() string {
return ""
}
func (b *mockBackend) Status() string {
return api.NetworkStatusUnknown
}
func (b *mockBackend) LocalStatus() string {
return api.NetworkStatusUnknown
}
func (b *mockBackend) Driver() drivers.Driver {
return b.driver
}
func (b *mockBackend) MigrationTypes(contentType drivers.ContentType, refresh bool) []migration.Type {
return []migration.Type{
{
FSType: FallbackMigrationType(contentType),
Features: []string{"xattrs", "delete", "compress", "bidirectional"},
},
}
}
func (b *mockBackend) GetResources() (*api.ResourcesStoragePool, error) {
return nil, nil
}
func (b *mockBackend) IsUsed() (bool, error) {
return false, nil
}
func (b *mockBackend) Delete(clientType request.ClientType, op *operations.Operation) error {
return nil
}
func (b *mockBackend) Update(clientType request.ClientType, newDescription string, newConfig map[string]string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) Create(clientType request.ClientType, op *operations.Operation) error {
return nil
}
func (b *mockBackend) Mount() (bool, error) {
return true, nil
}
func (b *mockBackend) Unmount() (bool, error) {
return true, nil
}
func (b *mockBackend) ApplyPatch(name string) error {
return nil
}
func (b *mockBackend) FillInstanceConfig(inst instance.Instance, config map[string]string) error {
return nil
}
func (b *mockBackend) CreateInstance(inst instance.Instance, op *operations.Operation) error {
return nil
}
func (b *mockBackend) CreateInstanceFromBackup(srcBackup backup.Info, srcData io.ReadSeeker, op *operations.Operation) (func(instance.Instance) error, revert.Hook, error) {
return nil, nil, nil
}
func (b *mockBackend) CreateInstanceFromCopy(inst instance.Instance, src instance.Instance, snapshots bool, op *operations.Operation) error {
return nil
}
func (b *mockBackend) CreateInstanceFromImage(inst instance.Instance, fingerprint string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) CreateInstanceFromMigration(inst instance.Instance, conn io.ReadWriteCloser, args migration.VolumeTargetArgs, op *operations.Operation) error {
return nil
}
func (b *mockBackend) RenameInstance(inst instance.Instance, newName string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) DeleteInstance(inst instance.Instance, op *operations.Operation) error {
return nil
}
func (b *mockBackend) UpdateInstance(inst instance.Instance, newDesc string, newConfig map[string]string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) UpdateInstanceBackupFile(inst instance.Instance, op *operations.Operation) error {
return nil
}
func (b *mockBackend) CheckInstanceBackupFileSnapshots(backupConf *backup.Config, projectName string, deleteMissing bool, op *operations.Operation) ([]*api.InstanceSnapshot, error) {
return nil, nil
}
func (b *mockBackend) ListUnknownVolumes(op *operations.Operation) (map[string][]*backup.Config, error) {
return nil, nil
}
func (b *mockBackend) ImportInstance(inst instance.Instance, op *operations.Operation) error {
return nil
}
func (b *mockBackend) MigrateInstance(inst instance.Instance, conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error {
return nil
}
func (b *mockBackend) RefreshInstance(i instance.Instance, src instance.Instance, srcSnapshots []instance.Instance, op *operations.Operation) error {
return nil
}
func (b *mockBackend) BackupInstance(inst instance.Instance, tarWriter *instancewriter.InstanceTarWriter, optimized bool, snapshots bool, op *operations.Operation) error {
return nil
}
func (b *mockBackend) GetInstanceUsage(inst instance.Instance) (int64, error) {
return 0, nil
}
func (b *mockBackend) SetInstanceQuota(inst instance.Instance, size string, vmStateSize string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) MountInstance(inst instance.Instance, op *operations.Operation) (*MountInfo, error) {
return &MountInfo{}, nil
}
func (b *mockBackend) UnmountInstance(inst instance.Instance, op *operations.Operation) (bool, error) {
return true, nil
}
func (b *mockBackend) CreateInstanceSnapshot(i instance.Instance, src instance.Instance, op *operations.Operation) error {
return nil
}
func (b *mockBackend) RenameInstanceSnapshot(inst instance.Instance, newName string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) DeleteInstanceSnapshot(inst instance.Instance, op *operations.Operation) error {
return nil
}
func (b *mockBackend) RestoreInstanceSnapshot(inst instance.Instance, src instance.Instance, op *operations.Operation) error {
return nil
}
func (b *mockBackend) MountInstanceSnapshot(inst instance.Instance, op *operations.Operation) (*MountInfo, error) {
return &MountInfo{}, nil
}
func (b *mockBackend) UnmountInstanceSnapshot(inst instance.Instance, op *operations.Operation) (bool, error) {
return true, nil
}
func (b *mockBackend) UpdateInstanceSnapshot(inst instance.Instance, newDesc string, newConfig map[string]string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) EnsureImage(fingerprint string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) DeleteImage(fingerprint string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) UpdateImage(fingerprint, newDesc string, newConfig map[string]string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) CreateCustomVolume(projectName string, volName string, desc string, config map[string]string, contentType drivers.ContentType, op *operations.Operation) error {
return nil
}
func (b *mockBackend) CreateCustomVolumeFromCopy(projectName string, srcProjectName string, volName string, desc string, config map[string]string, srcPoolName string, srcVolName string, srcVolOnly bool, op *operations.Operation) error {
return nil
}
func (b *mockBackend) RenameCustomVolume(projectName string, volName string, newName string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) UpdateCustomVolume(projectName string, volName string, newDesc string, newConfig map[string]string, op *operations.Operation) error {
return drivers.ErrNotImplemented
}
func (b *mockBackend) DeleteCustomVolume(projectName string, volName string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) MigrateCustomVolume(projectName string, conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error {
return nil
}
func (b *mockBackend) CreateCustomVolumeFromMigration(projectName string, conn io.ReadWriteCloser, args migration.VolumeTargetArgs, op *operations.Operation) error {
return nil
}
func (b *mockBackend) GetCustomVolumeDisk(projectName string, volName string) (string, error) {
return "", nil
}
func (b *mockBackend) GetCustomVolumeUsage(projectName string, volName string) (int64, error) {
return 0, nil
}
func (b *mockBackend) MountCustomVolume(projectName string, volName string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) UnmountCustomVolume(projectName string, volName string, op *operations.Operation) (bool, error) {
return true, nil
}
func (b *mockBackend) ImportCustomVolume(projectName string, poolVol backup.Config, op *operations.Operation) error {
return nil
}
func (b *mockBackend) CreateCustomVolumeSnapshot(projectName string, volName string, newSnapshotName string, expiryDate time.Time, op *operations.Operation) error {
return nil
}
func (b *mockBackend) RenameCustomVolumeSnapshot(projectName string, volName string, newName string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) DeleteCustomVolumeSnapshot(projectName string, volName string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) UpdateCustomVolumeSnapshot(projectName string, volName string, newDesc string, newConfig map[string]string, expiryDate time.Time, op *operations.Operation) error {
return nil
}
func (b *mockBackend) RestoreCustomVolume(projectName string, volName string, snapshotName string, op *operations.Operation) error {
return nil
}
func (b *mockBackend) BackupCustomVolume(projectName string, volName string, tarWriter *instancewriter.InstanceTarWriter, optimized bool, snapshots bool, op *operations.Operation) error {
return nil
}
func (b *mockBackend) CreateCustomVolumeFromBackup(srcBackup backup.Info, srcData io.ReadSeeker, op *operations.Operation) error {
return nil
}
|
package kite
import (
"code.google.com/p/go.net/websocket"
"encoding/json"
"errors"
"flag"
"fmt"
"github.com/golang/groupcache"
"github.com/op/go-logging"
"io"
"koding/messaging/moh"
"koding/newkite/peers"
"koding/newkite/protocol"
"koding/newkite/utils"
"koding/tools/dnode"
stdlog "log"
"net"
"net/http"
"net/rpc"
"os"
"reflect"
"runtime"
"sync"
"time"
)
var (
log = logging.MustGetLogger("Kite")
// in-memory hash table for kites of same types
kites = peers.New()
// roundrobin load balancing helpers
balance = NewBalancer()
// registers to kontrol in this interval
registerInterval = 700 * time.Millisecond
// after hitting the limit the register interval is no more increased
maxRegisterLimit = 30
)
// Kite defines a single process that enables distributed service messaging
// amongst the peers it is connected. A Kite process acts as a Client and as a
// Server. That means it can receive request, process them, but it also can
// make request to other kites. A Kite can be anything. It can be simple Image
// processing kite (which would process data), it could be a Chat kite that
// enables peer-to-peer chat. For examples we have FileSystem kite that expose
// the file system to a client, which in order build the filetree.
type Kite struct {
protocol.Kite
// KodingKey is used for authenticate to Kontrol.
KodingKey string
// Registered is true if the Kite is registered to kontrol itself
Registered bool
// other kites that needs to be run, in order to run this one
Dependencies string
// by default yes, if disabled it bypasses kontrol
KontrolEnabled bool
// method map for shared methods
Methods map[string]string
// implements the Clients interface
clients *clients
// GroupCache variables
Pool *groupcache.HTTPPool
Group *groupcache.Group
// RpcServer
Server *rpc.Server
// To allow only one register request at the same time
registerMutex sync.Mutex
// Used to talk with Kontrol server
kontrolClient *moh.MessagingClient
}
// New creates, initialize and then returns a new Kite instance. It accept
// three arguments. options is a config struct that needs to be filled with
// several informations like Name, Port, IP and so on.
func New(options *protocol.Options) *Kite {
var err error
if options == nil {
options, err = utils.ReadKiteOptions("manifest.json")
if err != nil {
log.Fatal("error: could not read config file", err)
}
}
// some simple validations for config
if options.Kitename == "" {
log.Fatal("error: options data is not set properly")
}
hostname, _ := os.Hostname()
kiteID := utils.GenerateUUID()
kodingKey, err := utils.GetKodingKey()
if err != nil {
log.Fatal("Couldn't find koding.key. Please run 'kd register'.")
}
port := options.Port
if options.Port == "" {
port = "0" // OS binds to an automatic port
}
if options.KontrolAddr == "" {
options.KontrolAddr = "127.0.0.1:4000" // local fallback address
}
k := &Kite{
Kite: protocol.Kite{
Name: options.Kitename,
Username: options.Username,
ID: kiteID,
Version: options.Version,
Hostname: hostname,
Port: port,
Kind: options.Kind,
// PublicIP will be set by Kontrol after registering if it is not set.
PublicIP: options.PublicIP,
},
KodingKey: kodingKey,
Server: rpc.NewServer(),
KontrolEnabled: true,
Methods: make(map[string]string),
clients: NewClients(),
}
k.kontrolClient = moh.NewMessagingClient(options.KontrolAddr, k.handle)
k.kontrolClient.Subscribe(kiteID)
k.kontrolClient.Subscribe("all")
// Register our internal method
k.Methods["vm.info"] = "status.Info"
k.Server.RegisterName("status", new(Status))
return k
}
// AddMethods is used to add new structs with exposed methods with a different
// name. rcvr is a struct on which your exported method's are defined. methods
// is a map that expose your methods with different names to the outside world.
func (k *Kite) AddMethods(rcvr interface{}, methods map[string]string) error {
if rcvr == nil {
panic(errors.New("method struct should not be nil"))
}
k.createMethodMap(rcvr, methods)
return k.Server.RegisterName(k.Name, rcvr)
}
func (k *Kite) createMethodMap(rcvr interface{}, methods map[string]string) {
kiteStruct := reflect.TypeOf(rcvr)
for alternativeName, method := range methods {
m, ok := kiteStruct.MethodByName(method)
if !ok {
panic(fmt.Sprintf("addmethods err: no method with name: %s", method))
continue
}
// map alternativeName to go's net/rpc methodname
k.Methods[alternativeName] = k.Name + "." + m.Name
}
}
// Start is a blocking method. It runs the kite server and then accepts requests
// asynchronously. It can be started in a goroutine if you wish to use kite as a
// client too.
func (k *Kite) Start() {
k.parseVersionFlag()
k.setupLogging()
err := k.listenAndServe()
if err != nil {
log.Fatal(err)
}
}
// setLogging is used to setup the logging format, destination and level.
func (k *Kite) setupLogging() {
log.Module = k.Name
logging.SetFormatter(logging.MustStringFormatter("▶ %{level} %{message}"))
stderrBackend := logging.NewLogBackend(os.Stderr, "", stdlog.LstdFlags|stdlog.Lshortfile)
stderrBackend.Color = true
syslogBackend, _ := logging.NewSyslogBackend(k.Name)
logging.SetBackend(stderrBackend, syslogBackend)
// Set logging level. Default level is INFO.
level := logging.INFO
if k.hasDebugFlag() {
level = logging.DEBUG
}
logging.SetLevel(level, log.Module)
}
// If the user wants to call flag.Parse() the flag must be defined in advance.
var _ = flag.Bool("version", false, "show version")
var _ = flag.Bool("debug", false, "print debug logs")
// parseVersionFlag prints the version number of the kite and exits with 0
// if "-version" flag is enabled.
// We did not use the "flag" package because it causes trouble if the user
// also calls "flag.Parse()" in his code. flag.Parse() can be called only once.
func (k *Kite) parseVersionFlag() {
for _, flag := range os.Args {
if flag == "-version" {
log.Info(k.Version)
os.Exit(0)
}
}
}
// hasDebugFlag returns true if -debug flag is present in os.Args.
func (k *Kite) hasDebugFlag() bool {
for _, flag := range os.Args {
if flag == "-debug" {
return true
}
}
return false
}
// handle is a method that interprets the incoming message from Kontrol. The
// incoming message must be in form of protocol.KontrolMessage.
func (k *Kite) handle(msg []byte) {
var r protocol.KontrolMessage
err := json.Unmarshal(msg, &r)
if err != nil {
log.Info(err.Error())
return
}
// log.Debug("INCOMING KONTROL MSG: %#v", r)
switch r.Type {
case protocol.KiteRegistered:
k.AddKite(r)
case protocol.KiteDisconnected:
k.RemoveKite(r)
case protocol.KiteUpdated:
k.Registered = false //trigger reinitialization
case protocol.Ping:
k.Pong()
default:
return
}
}
func unmarshalKiteArg(r *protocol.KontrolMessage) (kite *protocol.Kite, err error) {
defer func() {
if r := recover(); r != nil {
// Only type assertions below can panic with runtime.Error
if _, ok := r.(runtime.Error); ok {
// err will be returned at the end of this func (named returns)
err = errors.New("Invalid kite argument")
}
}
}()
k := r.Args["kite"].(map[string]interface{})
// Must set all fields manually
kite = &protocol.Kite{
Name: k["name"].(string),
Username: k["username"].(string),
ID: k["id"].(string),
Kind: k["kind"].(string),
Version: k["version"].(string),
Hostname: k["hostname"].(string),
PublicIP: k["publicIP"].(string),
Port: k["port"].(string),
}
return
}
// AddKite is executed when a protocol.AddKite message has been received
// trough the handler.
func (k *Kite) AddKite(r protocol.KontrolMessage) {
kite, err := unmarshalKiteArg(&r)
if err != nil {
return
}
kites.Add(kite)
// Groupache settings, enable when ready
// k.SetPeers(k.PeersAddr()...)
log.Info("[%s] -> known peers -> %v", r.Type, k.PeersAddr())
}
// RemoveKite is executed when a protocol.AddKite message has been received
// trough the handler.
func (k *Kite) RemoveKite(r protocol.KontrolMessage) {
kite, err := unmarshalKiteArg(&r)
if err != nil {
return
}
kites.Remove(kite.ID)
log.Info("[%s] -> known peers -> %v", r.Type, k.PeersAddr())
}
// Pong sends a 'pong' message whenever the kite receives a message from Kontrol.
// This is used for node coordination and notifier Kontrol that the Kite is alive.
func (k *Kite) Pong() {
m := protocol.KiteToKontrolRequest{
Kite: k.Kite,
Method: protocol.Pong,
KodingKey: k.KodingKey,
}
msg, _ := json.Marshal(&m)
resp, _ := k.kontrolClient.Request(msg)
if string(resp) == "UPDATE" {
k.Registered = false
k.registerMutex.Lock()
defer k.registerMutex.Unlock()
if k.Registered {
return
}
err := k.registerToKontrol()
if err != nil {
log.Fatal(err)
}
k.Registered = true
}
}
// registerToKontrol sends a register message to Kontrol. It returns an error
// when it is not allowed by Kontrol. If allowed, nil is returned.
func (k *Kite) registerToKontrol() error {
m := protocol.KiteToKontrolRequest{
Method: protocol.RegisterKite,
Kite: k.Kite,
KodingKey: k.KodingKey,
}
msg, err := json.Marshal(&m)
if err != nil {
log.Info("kontrolRequest marshall err", err)
return err
}
result, err := k.kontrolClient.Request(msg)
if err != nil {
return err
}
var resp protocol.RegisterResponse
err = json.Unmarshal(result, &resp)
if err != nil {
return err
}
switch resp.Result {
case protocol.AllowKite:
log.Info("registered to kontrol: \n Addr\t\t: %s\n Version\t: %s\n Uuid\t\t: %s\n", k.Addr(), k.Version, k.ID)
k.Username = resp.Username // we know now which user that is
// Set the correct PublicIP if left empty in options.
if k.PublicIP == "" {
k.PublicIP = resp.PublicIP
}
return nil
case protocol.RejectKite:
return errors.New("no permission to run")
}
return errors.New("got a nonstandard response")
}
/******************************************
RPC
******************************************/
// Can connect to RPC service using HTTP CONNECT to rpcPath.
var connected = "200 Connected to Go RPC"
// listenAndServe starts our rpc server with the given addr.
func (k *Kite) listenAndServe() error {
listener, err := net.Listen("tcp4", ":"+k.Port)
if err != nil {
return err
}
log.Info("serve addr is: %s", listener.Addr().String())
// Port is known here if "0" is used as port number
_, k.Port, err = net.SplitHostPort(listener.Addr().String())
if err != nil {
log.Fatal("Invalid address")
}
// We must connect to Kontrol after starting to listen on port
if k.KontrolEnabled {
// Listen Kontrol messages
k.kontrolClient.Connect()
}
// GroupCache settings, enable it when ready
// k.newPool(k.Addr) // registers to http.DefaultServeMux
// k.newGroup()
k.Server.HandleHTTP(rpc.DefaultRPCPath, rpc.DefaultDebugPath)
return http.Serve(listener, k)
}
// ServeHTTP interface for http package.
func (k *Kite) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == protocol.WEBSOCKET_PATH {
websocket.Handler(k.serveWS).ServeHTTP(w, r)
return
}
log.Info("a new rpc call is done from", r.RemoteAddr)
if r.Method != "CONNECT" {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.WriteHeader(http.StatusMethodNotAllowed)
io.WriteString(w, "405 must CONNECT\n")
return
}
conn, _, err := w.(http.Hijacker).Hijack()
if err != nil {
log.Info("rpc hijacking ", r.RemoteAddr, ": ", err.Error())
return
}
io.WriteString(conn, "HTTP/1.0 "+connected+"\n\n")
k.Server.ServeCodec(NewKiteServerCodec(k, conn))
}
// serveWS is used serving content over WebSocket. Is used internally via
// ServeHTTP method.
func (k *Kite) serveWS(ws *websocket.Conn) {
addr := ws.Request().RemoteAddr
log.Info("[%s] client connected", addr)
client := NewClient()
client.Conn = ws
client.Addr = addr
k.clients.AddClient(addr, client)
// k.Server.ServeCodec(NewJsonServerCodec(k, ws))
k.Server.ServeCodec(NewDnodeServerCodec(k, ws))
}
func (k *Kite) OnDisconnect(username string, f func()) {
addrs := k.clients.GetAddresses(username)
if addrs == nil {
return
}
for _, addr := range addrs {
client := k.clients.GetClient(addr)
client.onDisconnect = append(client.onDisconnect, f)
k.clients.AddClient(addr, client)
}
}
// broadcast sends messages in dnode protocol to all connected websocket
// clients method and arguments is mapped to dnode's method and arguments
// fields.
func (k *Kite) broadcast(method string, arguments interface{}) {
for _, client := range k.clients.List() {
rawArgs, err := json.Marshal(arguments)
if err != nil {
log.Info("collect json unmarshal %+v", err)
}
message := dnode.Message{
Method: "info",
Arguments: &dnode.Partial{Raw: rawArgs},
Links: []string{},
Callbacks: make(map[string][]string),
}
websocket.JSON.Send(client.Conn, message)
}
}
/******************************************
GroupCache
******************************************/
func (k *Kite) newPool(addr string) {
k.Pool = groupcache.NewHTTPPool(addr)
}
func (k *Kite) newGroup() {
k.Group = groupcache.NewGroup(k.Name, 64<<20, groupcache.GetterFunc(
func(ctx groupcache.Context, key string, dest groupcache.Sink) error {
dest.SetString("fatih")
return nil
}))
}
func (k *Kite) GetString(name, key string) (result string) {
if k.Group == nil {
return
}
k.Group.Get(nil, key, groupcache.StringSink(&result))
return
}
func (k *Kite) GetByte(name, key string) (result []byte) {
if k.Group == nil {
return
}
k.Group.Get(nil, key, groupcache.AllocatingByteSliceSink(&result))
return
}
func (k *Kite) SetPeers(peers ...string) {
k.Pool.Set(peers...)
}
func (k *Kite) PeersAddr() []string {
list := make([]string, 0)
for _, kite := range kites.List() {
list = append(list, kite.Addr())
}
return list
}
kite: fix comment
package kite
import (
"code.google.com/p/go.net/websocket"
"encoding/json"
"errors"
"flag"
"fmt"
"github.com/golang/groupcache"
"github.com/op/go-logging"
"io"
"koding/messaging/moh"
"koding/newkite/peers"
"koding/newkite/protocol"
"koding/newkite/utils"
"koding/tools/dnode"
stdlog "log"
"net"
"net/http"
"net/rpc"
"os"
"reflect"
"runtime"
"sync"
"time"
)
var (
log = logging.MustGetLogger("Kite")
// in-memory hash table for kites of same types
kites = peers.New()
// roundrobin load balancing helpers
balance = NewBalancer()
// registers to kontrol in this interval
registerInterval = 700 * time.Millisecond
// after hitting the limit the register interval is no more increased
maxRegisterLimit = 30
)
// Kite defines a single process that enables distributed service messaging
// amongst the peers it is connected. A Kite process acts as a Client and as a
// Server. That means it can receive request, process them, but it also can
// make request to other kites. A Kite can be anything. It can be simple Image
// processing kite (which would process data), it could be a Chat kite that
// enables peer-to-peer chat. For examples we have FileSystem kite that expose
// the file system to a client, which in order build the filetree.
type Kite struct {
protocol.Kite
// KodingKey is used for authenticate to Kontrol.
KodingKey string
// Registered is true if the Kite is registered to kontrol itself
Registered bool
// other kites that needs to be run, in order to run this one
Dependencies string
// by default yes, if disabled it bypasses kontrol
KontrolEnabled bool
// method map for shared methods
Methods map[string]string
// implements the Clients interface
clients *clients
// GroupCache variables
Pool *groupcache.HTTPPool
Group *groupcache.Group
// RpcServer
Server *rpc.Server
// To allow only one register request at the same time
registerMutex sync.Mutex
// Used to talk with Kontrol server
kontrolClient *moh.MessagingClient
}
// New creates, initialize and then returns a new Kite instance. It accept
// three arguments. options is a config struct that needs to be filled with
// several informations like Name, Port, IP and so on.
func New(options *protocol.Options) *Kite {
var err error
if options == nil {
options, err = utils.ReadKiteOptions("manifest.json")
if err != nil {
log.Fatal("error: could not read config file", err)
}
}
// some simple validations for config
if options.Kitename == "" {
log.Fatal("error: options data is not set properly")
}
hostname, _ := os.Hostname()
kiteID := utils.GenerateUUID()
kodingKey, err := utils.GetKodingKey()
if err != nil {
log.Fatal("Couldn't find koding.key. Please run 'kd register'.")
}
port := options.Port
if options.Port == "" {
port = "0" // OS binds to an automatic port
}
if options.KontrolAddr == "" {
options.KontrolAddr = "127.0.0.1:4000" // local fallback address
}
k := &Kite{
Kite: protocol.Kite{
Name: options.Kitename,
Username: options.Username,
ID: kiteID,
Version: options.Version,
Hostname: hostname,
Port: port,
Kind: options.Kind,
// PublicIP will be set by Kontrol after registering if it is not set.
PublicIP: options.PublicIP,
},
KodingKey: kodingKey,
Server: rpc.NewServer(),
KontrolEnabled: true,
Methods: make(map[string]string),
clients: NewClients(),
}
k.kontrolClient = moh.NewMessagingClient(options.KontrolAddr, k.handle)
k.kontrolClient.Subscribe(kiteID)
k.kontrolClient.Subscribe("all")
// Register our internal method
k.Methods["vm.info"] = "status.Info"
k.Server.RegisterName("status", new(Status))
return k
}
// AddMethods is used to add new structs with exposed methods with a different
// name. rcvr is a struct on which your exported method's are defined. methods
// is a map that expose your methods with different names to the outside world.
func (k *Kite) AddMethods(rcvr interface{}, methods map[string]string) error {
if rcvr == nil {
panic(errors.New("method struct should not be nil"))
}
k.createMethodMap(rcvr, methods)
return k.Server.RegisterName(k.Name, rcvr)
}
func (k *Kite) createMethodMap(rcvr interface{}, methods map[string]string) {
kiteStruct := reflect.TypeOf(rcvr)
for alternativeName, method := range methods {
m, ok := kiteStruct.MethodByName(method)
if !ok {
panic(fmt.Sprintf("addmethods err: no method with name: %s", method))
continue
}
// map alternativeName to go's net/rpc methodname
k.Methods[alternativeName] = k.Name + "." + m.Name
}
}
// Start is a blocking method. It runs the kite server and then accepts requests
// asynchronously. It can be started in a goroutine if you wish to use kite as a
// client too.
func (k *Kite) Start() {
k.parseVersionFlag()
k.setupLogging()
err := k.listenAndServe()
if err != nil {
log.Fatal(err)
}
}
// setupLogging is used to setup the logging format, destination and level.
func (k *Kite) setupLogging() {
log.Module = k.Name
logging.SetFormatter(logging.MustStringFormatter("▶ %{level} %{message}"))
stderrBackend := logging.NewLogBackend(os.Stderr, "", stdlog.LstdFlags|stdlog.Lshortfile)
stderrBackend.Color = true
syslogBackend, _ := logging.NewSyslogBackend(k.Name)
logging.SetBackend(stderrBackend, syslogBackend)
// Set logging level. Default level is INFO.
level := logging.INFO
if k.hasDebugFlag() {
level = logging.DEBUG
}
logging.SetLevel(level, log.Module)
}
// If the user wants to call flag.Parse() the flag must be defined in advance.
var _ = flag.Bool("version", false, "show version")
var _ = flag.Bool("debug", false, "print debug logs")
// parseVersionFlag prints the version number of the kite and exits with 0
// if "-version" flag is enabled.
// We did not use the "flag" package because it causes trouble if the user
// also calls "flag.Parse()" in his code. flag.Parse() can be called only once.
func (k *Kite) parseVersionFlag() {
for _, flag := range os.Args {
if flag == "-version" {
log.Info(k.Version)
os.Exit(0)
}
}
}
// hasDebugFlag returns true if -debug flag is present in os.Args.
func (k *Kite) hasDebugFlag() bool {
for _, flag := range os.Args {
if flag == "-debug" {
return true
}
}
return false
}
// handle is a method that interprets the incoming message from Kontrol. The
// incoming message must be in form of protocol.KontrolMessage.
func (k *Kite) handle(msg []byte) {
var r protocol.KontrolMessage
err := json.Unmarshal(msg, &r)
if err != nil {
log.Info(err.Error())
return
}
// log.Debug("INCOMING KONTROL MSG: %#v", r)
switch r.Type {
case protocol.KiteRegistered:
k.AddKite(r)
case protocol.KiteDisconnected:
k.RemoveKite(r)
case protocol.KiteUpdated:
k.Registered = false //trigger reinitialization
case protocol.Ping:
k.Pong()
default:
return
}
}
func unmarshalKiteArg(r *protocol.KontrolMessage) (kite *protocol.Kite, err error) {
defer func() {
if r := recover(); r != nil {
// Only type assertions below can panic with runtime.Error
if _, ok := r.(runtime.Error); ok {
// err will be returned at the end of this func (named returns)
err = errors.New("Invalid kite argument")
}
}
}()
k := r.Args["kite"].(map[string]interface{})
// Must set all fields manually
kite = &protocol.Kite{
Name: k["name"].(string),
Username: k["username"].(string),
ID: k["id"].(string),
Kind: k["kind"].(string),
Version: k["version"].(string),
Hostname: k["hostname"].(string),
PublicIP: k["publicIP"].(string),
Port: k["port"].(string),
}
return
}
// AddKite is executed when a protocol.AddKite message has been received
// trough the handler.
func (k *Kite) AddKite(r protocol.KontrolMessage) {
kite, err := unmarshalKiteArg(&r)
if err != nil {
return
}
kites.Add(kite)
// Groupache settings, enable when ready
// k.SetPeers(k.PeersAddr()...)
log.Info("[%s] -> known peers -> %v", r.Type, k.PeersAddr())
}
// RemoveKite is executed when a protocol.AddKite message has been received
// trough the handler.
func (k *Kite) RemoveKite(r protocol.KontrolMessage) {
kite, err := unmarshalKiteArg(&r)
if err != nil {
return
}
kites.Remove(kite.ID)
log.Info("[%s] -> known peers -> %v", r.Type, k.PeersAddr())
}
// Pong sends a 'pong' message whenever the kite receives a message from Kontrol.
// This is used for node coordination and notifier Kontrol that the Kite is alive.
func (k *Kite) Pong() {
m := protocol.KiteToKontrolRequest{
Kite: k.Kite,
Method: protocol.Pong,
KodingKey: k.KodingKey,
}
msg, _ := json.Marshal(&m)
resp, _ := k.kontrolClient.Request(msg)
if string(resp) == "UPDATE" {
k.Registered = false
k.registerMutex.Lock()
defer k.registerMutex.Unlock()
if k.Registered {
return
}
err := k.registerToKontrol()
if err != nil {
log.Fatal(err)
}
k.Registered = true
}
}
// registerToKontrol sends a register message to Kontrol. It returns an error
// when it is not allowed by Kontrol. If allowed, nil is returned.
func (k *Kite) registerToKontrol() error {
m := protocol.KiteToKontrolRequest{
Method: protocol.RegisterKite,
Kite: k.Kite,
KodingKey: k.KodingKey,
}
msg, err := json.Marshal(&m)
if err != nil {
log.Info("kontrolRequest marshall err", err)
return err
}
result, err := k.kontrolClient.Request(msg)
if err != nil {
return err
}
var resp protocol.RegisterResponse
err = json.Unmarshal(result, &resp)
if err != nil {
return err
}
switch resp.Result {
case protocol.AllowKite:
log.Info("registered to kontrol: \n Addr\t\t: %s\n Version\t: %s\n Uuid\t\t: %s\n", k.Addr(), k.Version, k.ID)
k.Username = resp.Username // we know now which user that is
// Set the correct PublicIP if left empty in options.
if k.PublicIP == "" {
k.PublicIP = resp.PublicIP
}
return nil
case protocol.RejectKite:
return errors.New("no permission to run")
}
return errors.New("got a nonstandard response")
}
/******************************************
RPC
******************************************/
// Can connect to RPC service using HTTP CONNECT to rpcPath.
var connected = "200 Connected to Go RPC"
// listenAndServe starts our rpc server with the given addr.
func (k *Kite) listenAndServe() error {
listener, err := net.Listen("tcp4", ":"+k.Port)
if err != nil {
return err
}
log.Info("serve addr is: %s", listener.Addr().String())
// Port is known here if "0" is used as port number
_, k.Port, err = net.SplitHostPort(listener.Addr().String())
if err != nil {
log.Fatal("Invalid address")
}
// We must connect to Kontrol after starting to listen on port
if k.KontrolEnabled {
// Listen Kontrol messages
k.kontrolClient.Connect()
}
// GroupCache settings, enable it when ready
// k.newPool(k.Addr) // registers to http.DefaultServeMux
// k.newGroup()
k.Server.HandleHTTP(rpc.DefaultRPCPath, rpc.DefaultDebugPath)
return http.Serve(listener, k)
}
// ServeHTTP interface for http package.
func (k *Kite) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == protocol.WEBSOCKET_PATH {
websocket.Handler(k.serveWS).ServeHTTP(w, r)
return
}
log.Info("a new rpc call is done from", r.RemoteAddr)
if r.Method != "CONNECT" {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.WriteHeader(http.StatusMethodNotAllowed)
io.WriteString(w, "405 must CONNECT\n")
return
}
conn, _, err := w.(http.Hijacker).Hijack()
if err != nil {
log.Info("rpc hijacking ", r.RemoteAddr, ": ", err.Error())
return
}
io.WriteString(conn, "HTTP/1.0 "+connected+"\n\n")
k.Server.ServeCodec(NewKiteServerCodec(k, conn))
}
// serveWS is used serving content over WebSocket. Is used internally via
// ServeHTTP method.
func (k *Kite) serveWS(ws *websocket.Conn) {
addr := ws.Request().RemoteAddr
log.Info("[%s] client connected", addr)
client := NewClient()
client.Conn = ws
client.Addr = addr
k.clients.AddClient(addr, client)
// k.Server.ServeCodec(NewJsonServerCodec(k, ws))
k.Server.ServeCodec(NewDnodeServerCodec(k, ws))
}
func (k *Kite) OnDisconnect(username string, f func()) {
addrs := k.clients.GetAddresses(username)
if addrs == nil {
return
}
for _, addr := range addrs {
client := k.clients.GetClient(addr)
client.onDisconnect = append(client.onDisconnect, f)
k.clients.AddClient(addr, client)
}
}
// broadcast sends messages in dnode protocol to all connected websocket
// clients method and arguments is mapped to dnode's method and arguments
// fields.
func (k *Kite) broadcast(method string, arguments interface{}) {
for _, client := range k.clients.List() {
rawArgs, err := json.Marshal(arguments)
if err != nil {
log.Info("collect json unmarshal %+v", err)
}
message := dnode.Message{
Method: "info",
Arguments: &dnode.Partial{Raw: rawArgs},
Links: []string{},
Callbacks: make(map[string][]string),
}
websocket.JSON.Send(client.Conn, message)
}
}
/******************************************
GroupCache
******************************************/
func (k *Kite) newPool(addr string) {
k.Pool = groupcache.NewHTTPPool(addr)
}
func (k *Kite) newGroup() {
k.Group = groupcache.NewGroup(k.Name, 64<<20, groupcache.GetterFunc(
func(ctx groupcache.Context, key string, dest groupcache.Sink) error {
dest.SetString("fatih")
return nil
}))
}
func (k *Kite) GetString(name, key string) (result string) {
if k.Group == nil {
return
}
k.Group.Get(nil, key, groupcache.StringSink(&result))
return
}
func (k *Kite) GetByte(name, key string) (result []byte) {
if k.Group == nil {
return
}
k.Group.Get(nil, key, groupcache.AllocatingByteSliceSink(&result))
return
}
func (k *Kite) SetPeers(peers ...string) {
k.Pool.Set(peers...)
}
func (k *Kite) PeersAddr() []string {
list := make([]string, 0)
for _, kite := range kites.List() {
list = append(list, kite.Addr())
}
return list
}
|
package archiver
import (
"context"
"encoding/json"
"os"
"path"
"runtime"
"sort"
"time"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/restic"
"golang.org/x/sync/errgroup"
)
// SelectByNameFunc returns true for all items that should be included (files and
// dirs). If false is returned, files are ignored and dirs are not even walked.
type SelectByNameFunc func(item string) bool
// SelectFunc returns true for all items that should be included (files and
// dirs). If false is returned, files are ignored and dirs are not even walked.
type SelectFunc func(item string, fi os.FileInfo) bool
// ErrorFunc is called when an error during archiving occurs. When nil is
// returned, the archiver continues, otherwise it aborts and passes the error
// up the call stack.
type ErrorFunc func(file string, fi os.FileInfo, err error) error
// ItemStats collects some statistics about a particular file or directory.
type ItemStats struct {
DataBlobs int // number of new data blobs added for this item
DataSize uint64 // sum of the sizes of all new data blobs
DataSizeInRepo uint64 // sum of the bytes added to the repo (including compression and crypto overhead)
TreeBlobs int // number of new tree blobs added for this item
TreeSize uint64 // sum of the sizes of all new tree blobs
TreeSizeInRepo uint64 // sum of the bytes added to the repo (including compression and crypto overhead)
}
// Add adds other to the current ItemStats.
func (s *ItemStats) Add(other ItemStats) {
s.DataBlobs += other.DataBlobs
s.DataSize += other.DataSize
s.DataSizeInRepo += other.DataSizeInRepo
s.TreeBlobs += other.TreeBlobs
s.TreeSize += other.TreeSize
s.TreeSizeInRepo += other.TreeSizeInRepo
}
// Archiver saves a directory structure to the repo.
type Archiver struct {
Repo restic.Repository
SelectByName SelectByNameFunc
Select SelectFunc
FS fs.FS
Options Options
blobSaver *BlobSaver
fileSaver *FileSaver
treeSaver *TreeSaver
// Error is called for all errors that occur during backup.
Error ErrorFunc
// CompleteItem is called for all files and dirs once they have been
// processed successfully. The parameter item contains the path as it will
// be in the snapshot after saving. s contains some statistics about this
// particular file/dir.
//
// CompleteItem may be called asynchronously from several different
// goroutines!
CompleteItem func(item string, previous, current *restic.Node, s ItemStats, d time.Duration)
// StartFile is called when a file is being processed by a worker.
StartFile func(filename string)
// CompleteBlob is called for all saved blobs for files.
CompleteBlob func(filename string, bytes uint64)
// WithAtime configures if the access time for files and directories should
// be saved. Enabling it may result in much metadata, so it's off by
// default.
WithAtime bool
// Flags controlling change detection. See doc/040_backup.rst for details.
ChangeIgnoreFlags uint
}
// Flags for the ChangeIgnoreFlags bitfield.
const (
ChangeIgnoreCtime = 1 << iota
ChangeIgnoreInode
)
// Options is used to configure the archiver.
type Options struct {
// FileReadConcurrency sets how many files are read in concurrently. If
// it's set to zero, at most two files are read in concurrently (which
// turned out to be a good default for most situations).
FileReadConcurrency uint
// SaveBlobConcurrency sets how many blobs are hashed and saved
// concurrently. If it's set to zero, the default is the number of CPUs
// available in the system.
SaveBlobConcurrency uint
// SaveTreeConcurrency sets how many trees are marshalled and saved to the
// repo concurrently.
SaveTreeConcurrency uint
}
// ApplyDefaults returns a copy of o with the default options set for all unset
// fields.
func (o Options) ApplyDefaults() Options {
if o.FileReadConcurrency == 0 {
// two is a sweet spot for almost all situations. We've done some
// experiments documented here:
// https://github.com/borgbackup/borg/issues/3500
o.FileReadConcurrency = 2
}
if o.SaveBlobConcurrency == 0 {
o.SaveBlobConcurrency = uint(runtime.NumCPU())
}
if o.SaveTreeConcurrency == 0 {
// use a relatively high concurrency here, having multiple SaveTree
// workers is cheap
o.SaveTreeConcurrency = o.SaveBlobConcurrency * 20
}
return o
}
// New initializes a new archiver.
func New(repo restic.Repository, fs fs.FS, opts Options) *Archiver {
arch := &Archiver{
Repo: repo,
SelectByName: func(item string) bool { return true },
Select: func(item string, fi os.FileInfo) bool { return true },
FS: fs,
Options: opts.ApplyDefaults(),
CompleteItem: func(string, *restic.Node, *restic.Node, ItemStats, time.Duration) {},
StartFile: func(string) {},
CompleteBlob: func(string, uint64) {},
}
return arch
}
// error calls arch.Error if it is set and the error is different from context.Canceled.
func (arch *Archiver) error(item string, fi os.FileInfo, err error) error {
if arch.Error == nil || err == nil {
return err
}
if err == context.Canceled {
return err
}
errf := arch.Error(item, fi, err)
if err != errf {
debug.Log("item %v: error was filtered by handler, before: %q, after: %v", item, err, errf)
}
return errf
}
// saveTree stores a tree in the repo. It checks the index and the known blobs
// before saving anything.
func (arch *Archiver) saveTree(ctx context.Context, t *restic.Tree) (restic.ID, ItemStats, error) {
var s ItemStats
buf, err := json.Marshal(t)
if err != nil {
return restic.ID{}, s, errors.Wrap(err, "MarshalJSON")
}
// append a newline so that the data is always consistent (json.Encoder
// adds a newline after each object)
buf = append(buf, '\n')
b := &Buffer{Data: buf}
res := arch.blobSaver.Save(ctx, restic.TreeBlob, b)
res.Wait(ctx)
if !res.Known() {
s.TreeBlobs++
s.TreeSize += uint64(res.Length())
s.TreeSizeInRepo += uint64(res.SizeInRepo())
}
// The context was canceled in the meantime, res.ID() might be invalid
if ctx.Err() != nil {
return restic.ID{}, s, ctx.Err()
}
return res.ID(), s, nil
}
// nodeFromFileInfo returns the restic node from an os.FileInfo.
func (arch *Archiver) nodeFromFileInfo(filename string, fi os.FileInfo) (*restic.Node, error) {
node, err := restic.NodeFromFileInfo(filename, fi)
if !arch.WithAtime {
node.AccessTime = node.ModTime
}
return node, errors.Wrap(err, "NodeFromFileInfo")
}
// loadSubtree tries to load the subtree referenced by node. In case of an error, nil is returned.
// If there is no node to load, then nil is returned without an error.
func (arch *Archiver) loadSubtree(ctx context.Context, node *restic.Node) (*restic.Tree, error) {
if node == nil || node.Type != "dir" || node.Subtree == nil {
return nil, nil
}
tree, err := arch.Repo.LoadTree(ctx, *node.Subtree)
if err != nil {
debug.Log("unable to load tree %v: %v", node.Subtree.Str(), err)
// a tree in the repository is not readable -> warn the user
return nil, arch.wrapLoadTreeError(*node.Subtree, err)
}
return tree, nil
}
func (arch *Archiver) wrapLoadTreeError(id restic.ID, err error) error {
if arch.Repo.Index().Has(restic.BlobHandle{ID: id, Type: restic.TreeBlob}) {
err = errors.Errorf("tree %v could not be loaded; the repository could be damaged: %v", id, err)
} else {
err = errors.Errorf("tree %v is not known; the repository could be damaged, run `rebuild-index` to try to repair it", id)
}
return err
}
// SaveDir stores a directory in the repo and returns the node. snPath is the
// path within the current snapshot.
func (arch *Archiver) SaveDir(ctx context.Context, snPath string, fi os.FileInfo, dir string, previous *restic.Tree, complete CompleteFunc) (d FutureTree, err error) {
debug.Log("%v %v", snPath, dir)
treeNode, err := arch.nodeFromFileInfo(dir, fi)
if err != nil {
return FutureTree{}, err
}
names, err := readdirnames(arch.FS, dir, fs.O_NOFOLLOW)
if err != nil {
return FutureTree{}, err
}
sort.Strings(names)
nodes := make([]FutureNode, 0, len(names))
for _, name := range names {
// test if context has been cancelled
if ctx.Err() != nil {
debug.Log("context has been cancelled, aborting")
return FutureTree{}, ctx.Err()
}
pathname := arch.FS.Join(dir, name)
oldNode := previous.Find(name)
snItem := join(snPath, name)
fn, excluded, err := arch.Save(ctx, snItem, pathname, oldNode)
// return error early if possible
if err != nil {
err = arch.error(pathname, fi, err)
if err == nil {
// ignore error
continue
}
return FutureTree{}, err
}
if excluded {
continue
}
nodes = append(nodes, fn)
}
ft := arch.treeSaver.Save(ctx, snPath, treeNode, nodes, complete)
return ft, nil
}
// FutureNode holds a reference to a node, FutureFile, or FutureTree.
type FutureNode struct {
snPath, target string
// kept to call the error callback function
absTarget string
fi os.FileInfo
node *restic.Node
stats ItemStats
err error
isFile bool
file FutureFile
isTree bool
tree FutureTree
}
func (fn *FutureNode) wait(ctx context.Context) {
switch {
case fn.isFile:
// wait for and collect the data for the file
fn.file.Wait(ctx)
fn.node = fn.file.Node()
fn.err = fn.file.Err()
fn.stats = fn.file.Stats()
// ensure the other stuff can be garbage-collected
fn.file = FutureFile{}
fn.isFile = false
case fn.isTree:
// wait for and collect the data for the dir
fn.tree.Wait(ctx)
fn.node = fn.tree.Node()
fn.stats = fn.tree.Stats()
// ensure the other stuff can be garbage-collected
fn.tree = FutureTree{}
fn.isTree = false
}
}
// allBlobsPresent checks if all blobs (contents) of the given node are
// present in the index.
func (arch *Archiver) allBlobsPresent(previous *restic.Node) bool {
// check if all blobs are contained in index
for _, id := range previous.Content {
if !arch.Repo.Index().Has(restic.BlobHandle{ID: id, Type: restic.DataBlob}) {
return false
}
}
return true
}
// Save saves a target (file or directory) to the repo. If the item is
// excluded, this function returns a nil node and error, with excluded set to
// true.
//
// Errors and completion needs to be handled by the caller.
//
// snPath is the path within the current snapshot.
func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous *restic.Node) (fn FutureNode, excluded bool, err error) {
start := time.Now()
fn = FutureNode{
snPath: snPath,
target: target,
}
debug.Log("%v target %q, previous %v", snPath, target, previous)
abstarget, err := arch.FS.Abs(target)
if err != nil {
return FutureNode{}, false, err
}
fn.absTarget = abstarget
// exclude files by path before running Lstat to reduce number of lstat calls
if !arch.SelectByName(abstarget) {
debug.Log("%v is excluded by path", target)
return FutureNode{}, true, nil
}
// get file info and run remaining select functions that require file information
fi, err := arch.FS.Lstat(target)
if err != nil {
debug.Log("lstat() for %v returned error: %v", target, err)
err = arch.error(abstarget, fi, err)
if err != nil {
return FutureNode{}, false, errors.Wrap(err, "Lstat")
}
return FutureNode{}, true, nil
}
if !arch.Select(abstarget, fi) {
debug.Log("%v is excluded", target)
return FutureNode{}, true, nil
}
switch {
case fs.IsRegularFile(fi):
debug.Log(" %v regular file", target)
start := time.Now()
// check if the file has not changed before performing a fopen operation (more expensive, specially
// in network filesystems)
if previous != nil && !fileChanged(fi, previous, arch.ChangeIgnoreFlags) {
if arch.allBlobsPresent(previous) {
debug.Log("%v hasn't changed, using old list of blobs", target)
arch.CompleteItem(snPath, previous, previous, ItemStats{}, time.Since(start))
arch.CompleteBlob(snPath, previous.Size)
fn.node, err = arch.nodeFromFileInfo(target, fi)
if err != nil {
return FutureNode{}, false, err
}
// copy list of blobs
fn.node.Content = previous.Content
return fn, false, nil
}
debug.Log("%v hasn't changed, but contents are missing!", target)
// There are contents missing - inform user!
err := errors.Errorf("parts of %v not found in the repository index; storing the file again", target)
err = arch.error(abstarget, fi, err)
if err != nil {
return FutureNode{}, false, err
}
}
// reopen file and do an fstat() on the open file to check it is still
// a file (and has not been exchanged for e.g. a symlink)
file, err := arch.FS.OpenFile(target, fs.O_RDONLY|fs.O_NOFOLLOW, 0)
if err != nil {
debug.Log("Openfile() for %v returned error: %v", target, err)
err = arch.error(abstarget, fi, err)
if err != nil {
return FutureNode{}, false, errors.Wrap(err, "Lstat")
}
return FutureNode{}, true, nil
}
fi, err = file.Stat()
if err != nil {
debug.Log("stat() on opened file %v returned error: %v", target, err)
_ = file.Close()
err = arch.error(abstarget, fi, err)
if err != nil {
return FutureNode{}, false, errors.Wrap(err, "Lstat")
}
return FutureNode{}, true, nil
}
// make sure it's still a file
if !fs.IsRegularFile(fi) {
err = errors.Errorf("file %v changed type, refusing to archive")
_ = file.Close()
err = arch.error(abstarget, fi, err)
if err != nil {
return FutureNode{}, false, err
}
return FutureNode{}, true, nil
}
fn.isFile = true
// Save will close the file, we don't need to do that
fn.file = arch.fileSaver.Save(ctx, snPath, file, fi, func() {
arch.StartFile(snPath)
}, func(node *restic.Node, stats ItemStats) {
arch.CompleteItem(snPath, previous, node, stats, time.Since(start))
})
case fi.IsDir():
debug.Log(" %v dir", target)
snItem := snPath + "/"
start := time.Now()
oldSubtree, err := arch.loadSubtree(ctx, previous)
if err != nil {
err = arch.error(abstarget, fi, err)
}
if err != nil {
return FutureNode{}, false, err
}
fn.isTree = true
fn.tree, err = arch.SaveDir(ctx, snPath, fi, target, oldSubtree,
func(node *restic.Node, stats ItemStats) {
arch.CompleteItem(snItem, previous, node, stats, time.Since(start))
})
if err != nil {
debug.Log("SaveDir for %v returned error: %v", snPath, err)
return FutureNode{}, false, err
}
case fi.Mode()&os.ModeSocket > 0:
debug.Log(" %v is a socket, ignoring", target)
return FutureNode{}, true, nil
default:
debug.Log(" %v other", target)
fn.node, err = arch.nodeFromFileInfo(target, fi)
if err != nil {
return FutureNode{}, false, err
}
}
debug.Log("return after %.3f", time.Since(start).Seconds())
return fn, false, nil
}
// fileChanged tries to detect whether a file's content has changed compared
// to the contents of node, which describes the same path in the parent backup.
// It should only be run for regular files.
func fileChanged(fi os.FileInfo, node *restic.Node, ignoreFlags uint) bool {
switch {
case node == nil:
return true
case node.Type != "file":
// We're only called for regular files, so this is a type change.
return true
case uint64(fi.Size()) != node.Size:
return true
case !fi.ModTime().Equal(node.ModTime):
return true
}
checkCtime := ignoreFlags&ChangeIgnoreCtime == 0
checkInode := ignoreFlags&ChangeIgnoreInode == 0
extFI := fs.ExtendedStat(fi)
switch {
case checkCtime && !extFI.ChangeTime.Equal(node.ChangeTime):
return true
case checkInode && node.Inode != extFI.Inode:
return true
}
return false
}
// join returns all elements separated with a forward slash.
func join(elem ...string) string {
return path.Join(elem...)
}
// statDir returns the file info for the directory. Symbolic links are
// resolved. If the target directory is not a directory, an error is returned.
func (arch *Archiver) statDir(dir string) (os.FileInfo, error) {
fi, err := arch.FS.Stat(dir)
if err != nil {
return nil, errors.Wrap(err, "Lstat")
}
tpe := fi.Mode() & (os.ModeType | os.ModeCharDevice)
if tpe != os.ModeDir {
return fi, errors.Errorf("path is not a directory: %v", dir)
}
return fi, nil
}
// SaveTree stores a Tree in the repo, returned is the tree. snPath is the path
// within the current snapshot.
func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree, previous *restic.Tree) (*restic.Tree, error) {
debug.Log("%v (%v nodes), parent %v", snPath, len(atree.Nodes), previous)
nodeNames := atree.NodeNames()
tree := restic.NewTree(len(nodeNames))
futureNodes := make(map[string]FutureNode)
// iterate over the nodes of atree in lexicographic (=deterministic) order
for _, name := range nodeNames {
subatree := atree.Nodes[name]
// test if context has been cancelled
if ctx.Err() != nil {
return nil, ctx.Err()
}
// this is a leaf node
if subatree.Leaf() {
fn, excluded, err := arch.Save(ctx, join(snPath, name), subatree.Path, previous.Find(name))
if err != nil {
err = arch.error(subatree.Path, fn.fi, err)
if err == nil {
// ignore error
continue
}
return nil, err
}
if err != nil {
return nil, err
}
if !excluded {
futureNodes[name] = fn
}
continue
}
snItem := join(snPath, name) + "/"
start := time.Now()
oldNode := previous.Find(name)
oldSubtree, err := arch.loadSubtree(ctx, oldNode)
if err != nil {
err = arch.error(join(snPath, name), nil, err)
}
if err != nil {
return nil, err
}
// not a leaf node, archive subtree
subtree, err := arch.SaveTree(ctx, join(snPath, name), &subatree, oldSubtree)
if err != nil {
return nil, err
}
id, nodeStats, err := arch.saveTree(ctx, subtree)
if err != nil {
return nil, err
}
if subatree.FileInfoPath == "" {
return nil, errors.Errorf("FileInfoPath for %v/%v is empty", snPath, name)
}
debug.Log("%v, saved subtree %v as %v", snPath, subtree, id.Str())
fi, err := arch.statDir(subatree.FileInfoPath)
if err != nil {
return nil, err
}
debug.Log("%v, dir node data loaded from %v", snPath, subatree.FileInfoPath)
node, err := arch.nodeFromFileInfo(subatree.FileInfoPath, fi)
if err != nil {
return nil, err
}
node.Name = name
node.Subtree = &id
err = tree.Insert(node)
if err != nil {
return nil, err
}
arch.CompleteItem(snItem, oldNode, node, nodeStats, time.Since(start))
}
debug.Log("waiting on %d nodes", len(futureNodes))
// process all futures
for name, fn := range futureNodes {
fn.wait(ctx)
// return the error, or ignore it
if fn.err != nil {
fn.err = arch.error(fn.target, fn.fi, fn.err)
if fn.err == nil {
// ignore error
continue
}
return nil, fn.err
}
// when the error is ignored, the node could not be saved, so ignore it
if fn.node == nil {
debug.Log("%v excluded: %v", fn.snPath, fn.target)
continue
}
fn.node.Name = name
err := tree.Insert(fn.node)
if err != nil {
return nil, err
}
}
return tree, nil
}
// flags are passed to fs.OpenFile. O_RDONLY is implied.
func readdirnames(filesystem fs.FS, dir string, flags int) ([]string, error) {
f, err := filesystem.OpenFile(dir, fs.O_RDONLY|flags, 0)
if err != nil {
return nil, errors.Wrap(err, "Open")
}
entries, err := f.Readdirnames(-1)
if err != nil {
_ = f.Close()
return nil, errors.Wrapf(err, "Readdirnames %v failed", dir)
}
err = f.Close()
if err != nil {
return nil, err
}
return entries, nil
}
// resolveRelativeTargets replaces targets that only contain relative
// directories ("." or "../../") with the contents of the directory. Each
// element of target is processed with fs.Clean().
func resolveRelativeTargets(filesys fs.FS, targets []string) ([]string, error) {
debug.Log("targets before resolving: %v", targets)
result := make([]string, 0, len(targets))
for _, target := range targets {
target = filesys.Clean(target)
pc, _ := pathComponents(filesys, target, false)
if len(pc) > 0 {
result = append(result, target)
continue
}
debug.Log("replacing %q with readdir(%q)", target, target)
entries, err := readdirnames(filesys, target, fs.O_NOFOLLOW)
if err != nil {
return nil, err
}
sort.Strings(entries)
for _, name := range entries {
result = append(result, filesys.Join(target, name))
}
}
debug.Log("targets after resolving: %v", result)
return result, nil
}
// SnapshotOptions collect attributes for a new snapshot.
type SnapshotOptions struct {
Tags restic.TagList
Hostname string
Excludes []string
Time time.Time
ParentSnapshot restic.ID
}
// loadParentTree loads a tree referenced by snapshot id. If id is null, nil is returned.
func (arch *Archiver) loadParentTree(ctx context.Context, snapshotID restic.ID) *restic.Tree {
if snapshotID.IsNull() {
return nil
}
debug.Log("load parent snapshot %v", snapshotID)
sn, err := restic.LoadSnapshot(ctx, arch.Repo, snapshotID)
if err != nil {
debug.Log("unable to load snapshot %v: %v", snapshotID, err)
return nil
}
if sn.Tree == nil {
debug.Log("snapshot %v has empty tree %v", snapshotID)
return nil
}
debug.Log("load parent tree %v", *sn.Tree)
tree, err := arch.Repo.LoadTree(ctx, *sn.Tree)
if err != nil {
debug.Log("unable to load tree %v: %v", *sn.Tree, err)
_ = arch.error("/", nil, arch.wrapLoadTreeError(*sn.Tree, err))
return nil
}
return tree
}
// runWorkers starts the worker pools, which are stopped when the context is cancelled.
func (arch *Archiver) runWorkers(ctx context.Context, wg *errgroup.Group) {
arch.blobSaver = NewBlobSaver(ctx, wg, arch.Repo, arch.Options.SaveBlobConcurrency)
arch.fileSaver = NewFileSaver(ctx, wg,
arch.blobSaver.Save,
arch.Repo.Config().ChunkerPolynomial,
arch.Options.FileReadConcurrency, arch.Options.SaveBlobConcurrency)
arch.fileSaver.CompleteBlob = arch.CompleteBlob
arch.fileSaver.NodeFromFileInfo = arch.nodeFromFileInfo
arch.treeSaver = NewTreeSaver(ctx, wg, arch.Options.SaveTreeConcurrency, arch.saveTree, arch.Error)
}
func (arch *Archiver) stopWorkers() {
arch.blobSaver.TriggerShutdown()
arch.fileSaver.TriggerShutdown()
arch.treeSaver.TriggerShutdown()
arch.blobSaver = nil
arch.fileSaver = nil
arch.treeSaver = nil
}
// Snapshot saves several targets and returns a snapshot.
func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts SnapshotOptions) (*restic.Snapshot, restic.ID, error) {
cleanTargets, err := resolveRelativeTargets(arch.FS, targets)
if err != nil {
return nil, restic.ID{}, err
}
atree, err := NewTree(arch.FS, cleanTargets)
if err != nil {
return nil, restic.ID{}, err
}
var rootTreeID restic.ID
wgUp, wgUpCtx := errgroup.WithContext(ctx)
arch.Repo.StartPackUploader(wgUpCtx, wgUp)
wgUp.Go(func() error {
wg, wgCtx := errgroup.WithContext(wgUpCtx)
start := time.Now()
var stats ItemStats
wg.Go(func() error {
arch.runWorkers(wgCtx, wg)
debug.Log("starting snapshot")
tree, err := arch.SaveTree(wgCtx, "/", atree, arch.loadParentTree(wgCtx, opts.ParentSnapshot))
if err != nil {
return err
}
if len(tree.Nodes) == 0 {
return errors.New("snapshot is empty")
}
rootTreeID, stats, err = arch.saveTree(wgCtx, tree)
arch.stopWorkers()
return err
})
err = wg.Wait()
debug.Log("err is %v", err)
if err != nil {
debug.Log("error while saving tree: %v", err)
return err
}
arch.CompleteItem("/", nil, nil, stats, time.Since(start))
return arch.Repo.Flush(ctx)
})
err = wgUp.Wait()
if err != nil {
return nil, restic.ID{}, err
}
sn, err := restic.NewSnapshot(targets, opts.Tags, opts.Hostname, opts.Time)
if err != nil {
return nil, restic.ID{}, err
}
sn.Excludes = opts.Excludes
if !opts.ParentSnapshot.IsNull() {
id := opts.ParentSnapshot
sn.Parent = &id
}
sn.Tree = &rootTreeID
id, err := arch.Repo.SaveJSONUnpacked(ctx, restic.SnapshotFile, sn)
if err != nil {
return nil, restic.ID{}, err
}
return sn, id, nil
}
archiver: Limit blob saver count to GOMAXPROCS
Now with the asynchronous uploaders there's no more benefit from using
more blob savers than we have CPUs. Thus use just one blob saver for
each CPU we are allowed to use.
package archiver
import (
"context"
"encoding/json"
"os"
"path"
"runtime"
"sort"
"time"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/restic"
"golang.org/x/sync/errgroup"
)
// SelectByNameFunc returns true for all items that should be included (files and
// dirs). If false is returned, files are ignored and dirs are not even walked.
type SelectByNameFunc func(item string) bool
// SelectFunc returns true for all items that should be included (files and
// dirs). If false is returned, files are ignored and dirs are not even walked.
type SelectFunc func(item string, fi os.FileInfo) bool
// ErrorFunc is called when an error during archiving occurs. When nil is
// returned, the archiver continues, otherwise it aborts and passes the error
// up the call stack.
type ErrorFunc func(file string, fi os.FileInfo, err error) error
// ItemStats collects some statistics about a particular file or directory.
type ItemStats struct {
DataBlobs int // number of new data blobs added for this item
DataSize uint64 // sum of the sizes of all new data blobs
DataSizeInRepo uint64 // sum of the bytes added to the repo (including compression and crypto overhead)
TreeBlobs int // number of new tree blobs added for this item
TreeSize uint64 // sum of the sizes of all new tree blobs
TreeSizeInRepo uint64 // sum of the bytes added to the repo (including compression and crypto overhead)
}
// Add adds other to the current ItemStats.
func (s *ItemStats) Add(other ItemStats) {
s.DataBlobs += other.DataBlobs
s.DataSize += other.DataSize
s.DataSizeInRepo += other.DataSizeInRepo
s.TreeBlobs += other.TreeBlobs
s.TreeSize += other.TreeSize
s.TreeSizeInRepo += other.TreeSizeInRepo
}
// Archiver saves a directory structure to the repo.
type Archiver struct {
Repo restic.Repository
SelectByName SelectByNameFunc
Select SelectFunc
FS fs.FS
Options Options
blobSaver *BlobSaver
fileSaver *FileSaver
treeSaver *TreeSaver
// Error is called for all errors that occur during backup.
Error ErrorFunc
// CompleteItem is called for all files and dirs once they have been
// processed successfully. The parameter item contains the path as it will
// be in the snapshot after saving. s contains some statistics about this
// particular file/dir.
//
// CompleteItem may be called asynchronously from several different
// goroutines!
CompleteItem func(item string, previous, current *restic.Node, s ItemStats, d time.Duration)
// StartFile is called when a file is being processed by a worker.
StartFile func(filename string)
// CompleteBlob is called for all saved blobs for files.
CompleteBlob func(filename string, bytes uint64)
// WithAtime configures if the access time for files and directories should
// be saved. Enabling it may result in much metadata, so it's off by
// default.
WithAtime bool
// Flags controlling change detection. See doc/040_backup.rst for details.
ChangeIgnoreFlags uint
}
// Flags for the ChangeIgnoreFlags bitfield.
const (
ChangeIgnoreCtime = 1 << iota
ChangeIgnoreInode
)
// Options is used to configure the archiver.
type Options struct {
// FileReadConcurrency sets how many files are read in concurrently. If
// it's set to zero, at most two files are read in concurrently (which
// turned out to be a good default for most situations).
FileReadConcurrency uint
// SaveBlobConcurrency sets how many blobs are hashed and saved
// concurrently. If it's set to zero, the default is the number of CPUs
// available in the system.
SaveBlobConcurrency uint
// SaveTreeConcurrency sets how many trees are marshalled and saved to the
// repo concurrently.
SaveTreeConcurrency uint
}
// ApplyDefaults returns a copy of o with the default options set for all unset
// fields.
func (o Options) ApplyDefaults() Options {
if o.FileReadConcurrency == 0 {
// two is a sweet spot for almost all situations. We've done some
// experiments documented here:
// https://github.com/borgbackup/borg/issues/3500
o.FileReadConcurrency = 2
}
if o.SaveBlobConcurrency == 0 {
// blob saving is CPU bound due to hash checking and encryption
// the actual upload is handled by the repository itself
o.SaveBlobConcurrency = uint(runtime.GOMAXPROCS(0))
}
if o.SaveTreeConcurrency == 0 {
// use a relatively high concurrency here, having multiple SaveTree
// workers is cheap
o.SaveTreeConcurrency = o.SaveBlobConcurrency * 20
}
return o
}
// New initializes a new archiver.
func New(repo restic.Repository, fs fs.FS, opts Options) *Archiver {
arch := &Archiver{
Repo: repo,
SelectByName: func(item string) bool { return true },
Select: func(item string, fi os.FileInfo) bool { return true },
FS: fs,
Options: opts.ApplyDefaults(),
CompleteItem: func(string, *restic.Node, *restic.Node, ItemStats, time.Duration) {},
StartFile: func(string) {},
CompleteBlob: func(string, uint64) {},
}
return arch
}
// error calls arch.Error if it is set and the error is different from context.Canceled.
func (arch *Archiver) error(item string, fi os.FileInfo, err error) error {
if arch.Error == nil || err == nil {
return err
}
if err == context.Canceled {
return err
}
errf := arch.Error(item, fi, err)
if err != errf {
debug.Log("item %v: error was filtered by handler, before: %q, after: %v", item, err, errf)
}
return errf
}
// saveTree stores a tree in the repo. It checks the index and the known blobs
// before saving anything.
func (arch *Archiver) saveTree(ctx context.Context, t *restic.Tree) (restic.ID, ItemStats, error) {
var s ItemStats
buf, err := json.Marshal(t)
if err != nil {
return restic.ID{}, s, errors.Wrap(err, "MarshalJSON")
}
// append a newline so that the data is always consistent (json.Encoder
// adds a newline after each object)
buf = append(buf, '\n')
b := &Buffer{Data: buf}
res := arch.blobSaver.Save(ctx, restic.TreeBlob, b)
res.Wait(ctx)
if !res.Known() {
s.TreeBlobs++
s.TreeSize += uint64(res.Length())
s.TreeSizeInRepo += uint64(res.SizeInRepo())
}
// The context was canceled in the meantime, res.ID() might be invalid
if ctx.Err() != nil {
return restic.ID{}, s, ctx.Err()
}
return res.ID(), s, nil
}
// nodeFromFileInfo returns the restic node from an os.FileInfo.
func (arch *Archiver) nodeFromFileInfo(filename string, fi os.FileInfo) (*restic.Node, error) {
node, err := restic.NodeFromFileInfo(filename, fi)
if !arch.WithAtime {
node.AccessTime = node.ModTime
}
return node, errors.Wrap(err, "NodeFromFileInfo")
}
// loadSubtree tries to load the subtree referenced by node. In case of an error, nil is returned.
// If there is no node to load, then nil is returned without an error.
func (arch *Archiver) loadSubtree(ctx context.Context, node *restic.Node) (*restic.Tree, error) {
if node == nil || node.Type != "dir" || node.Subtree == nil {
return nil, nil
}
tree, err := arch.Repo.LoadTree(ctx, *node.Subtree)
if err != nil {
debug.Log("unable to load tree %v: %v", node.Subtree.Str(), err)
// a tree in the repository is not readable -> warn the user
return nil, arch.wrapLoadTreeError(*node.Subtree, err)
}
return tree, nil
}
func (arch *Archiver) wrapLoadTreeError(id restic.ID, err error) error {
if arch.Repo.Index().Has(restic.BlobHandle{ID: id, Type: restic.TreeBlob}) {
err = errors.Errorf("tree %v could not be loaded; the repository could be damaged: %v", id, err)
} else {
err = errors.Errorf("tree %v is not known; the repository could be damaged, run `rebuild-index` to try to repair it", id)
}
return err
}
// SaveDir stores a directory in the repo and returns the node. snPath is the
// path within the current snapshot.
func (arch *Archiver) SaveDir(ctx context.Context, snPath string, fi os.FileInfo, dir string, previous *restic.Tree, complete CompleteFunc) (d FutureTree, err error) {
debug.Log("%v %v", snPath, dir)
treeNode, err := arch.nodeFromFileInfo(dir, fi)
if err != nil {
return FutureTree{}, err
}
names, err := readdirnames(arch.FS, dir, fs.O_NOFOLLOW)
if err != nil {
return FutureTree{}, err
}
sort.Strings(names)
nodes := make([]FutureNode, 0, len(names))
for _, name := range names {
// test if context has been cancelled
if ctx.Err() != nil {
debug.Log("context has been cancelled, aborting")
return FutureTree{}, ctx.Err()
}
pathname := arch.FS.Join(dir, name)
oldNode := previous.Find(name)
snItem := join(snPath, name)
fn, excluded, err := arch.Save(ctx, snItem, pathname, oldNode)
// return error early if possible
if err != nil {
err = arch.error(pathname, fi, err)
if err == nil {
// ignore error
continue
}
return FutureTree{}, err
}
if excluded {
continue
}
nodes = append(nodes, fn)
}
ft := arch.treeSaver.Save(ctx, snPath, treeNode, nodes, complete)
return ft, nil
}
// FutureNode holds a reference to a node, FutureFile, or FutureTree.
type FutureNode struct {
snPath, target string
// kept to call the error callback function
absTarget string
fi os.FileInfo
node *restic.Node
stats ItemStats
err error
isFile bool
file FutureFile
isTree bool
tree FutureTree
}
func (fn *FutureNode) wait(ctx context.Context) {
switch {
case fn.isFile:
// wait for and collect the data for the file
fn.file.Wait(ctx)
fn.node = fn.file.Node()
fn.err = fn.file.Err()
fn.stats = fn.file.Stats()
// ensure the other stuff can be garbage-collected
fn.file = FutureFile{}
fn.isFile = false
case fn.isTree:
// wait for and collect the data for the dir
fn.tree.Wait(ctx)
fn.node = fn.tree.Node()
fn.stats = fn.tree.Stats()
// ensure the other stuff can be garbage-collected
fn.tree = FutureTree{}
fn.isTree = false
}
}
// allBlobsPresent checks if all blobs (contents) of the given node are
// present in the index.
func (arch *Archiver) allBlobsPresent(previous *restic.Node) bool {
// check if all blobs are contained in index
for _, id := range previous.Content {
if !arch.Repo.Index().Has(restic.BlobHandle{ID: id, Type: restic.DataBlob}) {
return false
}
}
return true
}
// Save saves a target (file or directory) to the repo. If the item is
// excluded, this function returns a nil node and error, with excluded set to
// true.
//
// Errors and completion needs to be handled by the caller.
//
// snPath is the path within the current snapshot.
func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous *restic.Node) (fn FutureNode, excluded bool, err error) {
start := time.Now()
fn = FutureNode{
snPath: snPath,
target: target,
}
debug.Log("%v target %q, previous %v", snPath, target, previous)
abstarget, err := arch.FS.Abs(target)
if err != nil {
return FutureNode{}, false, err
}
fn.absTarget = abstarget
// exclude files by path before running Lstat to reduce number of lstat calls
if !arch.SelectByName(abstarget) {
debug.Log("%v is excluded by path", target)
return FutureNode{}, true, nil
}
// get file info and run remaining select functions that require file information
fi, err := arch.FS.Lstat(target)
if err != nil {
debug.Log("lstat() for %v returned error: %v", target, err)
err = arch.error(abstarget, fi, err)
if err != nil {
return FutureNode{}, false, errors.Wrap(err, "Lstat")
}
return FutureNode{}, true, nil
}
if !arch.Select(abstarget, fi) {
debug.Log("%v is excluded", target)
return FutureNode{}, true, nil
}
switch {
case fs.IsRegularFile(fi):
debug.Log(" %v regular file", target)
start := time.Now()
// check if the file has not changed before performing a fopen operation (more expensive, specially
// in network filesystems)
if previous != nil && !fileChanged(fi, previous, arch.ChangeIgnoreFlags) {
if arch.allBlobsPresent(previous) {
debug.Log("%v hasn't changed, using old list of blobs", target)
arch.CompleteItem(snPath, previous, previous, ItemStats{}, time.Since(start))
arch.CompleteBlob(snPath, previous.Size)
fn.node, err = arch.nodeFromFileInfo(target, fi)
if err != nil {
return FutureNode{}, false, err
}
// copy list of blobs
fn.node.Content = previous.Content
return fn, false, nil
}
debug.Log("%v hasn't changed, but contents are missing!", target)
// There are contents missing - inform user!
err := errors.Errorf("parts of %v not found in the repository index; storing the file again", target)
err = arch.error(abstarget, fi, err)
if err != nil {
return FutureNode{}, false, err
}
}
// reopen file and do an fstat() on the open file to check it is still
// a file (and has not been exchanged for e.g. a symlink)
file, err := arch.FS.OpenFile(target, fs.O_RDONLY|fs.O_NOFOLLOW, 0)
if err != nil {
debug.Log("Openfile() for %v returned error: %v", target, err)
err = arch.error(abstarget, fi, err)
if err != nil {
return FutureNode{}, false, errors.Wrap(err, "Lstat")
}
return FutureNode{}, true, nil
}
fi, err = file.Stat()
if err != nil {
debug.Log("stat() on opened file %v returned error: %v", target, err)
_ = file.Close()
err = arch.error(abstarget, fi, err)
if err != nil {
return FutureNode{}, false, errors.Wrap(err, "Lstat")
}
return FutureNode{}, true, nil
}
// make sure it's still a file
if !fs.IsRegularFile(fi) {
err = errors.Errorf("file %v changed type, refusing to archive")
_ = file.Close()
err = arch.error(abstarget, fi, err)
if err != nil {
return FutureNode{}, false, err
}
return FutureNode{}, true, nil
}
fn.isFile = true
// Save will close the file, we don't need to do that
fn.file = arch.fileSaver.Save(ctx, snPath, file, fi, func() {
arch.StartFile(snPath)
}, func(node *restic.Node, stats ItemStats) {
arch.CompleteItem(snPath, previous, node, stats, time.Since(start))
})
case fi.IsDir():
debug.Log(" %v dir", target)
snItem := snPath + "/"
start := time.Now()
oldSubtree, err := arch.loadSubtree(ctx, previous)
if err != nil {
err = arch.error(abstarget, fi, err)
}
if err != nil {
return FutureNode{}, false, err
}
fn.isTree = true
fn.tree, err = arch.SaveDir(ctx, snPath, fi, target, oldSubtree,
func(node *restic.Node, stats ItemStats) {
arch.CompleteItem(snItem, previous, node, stats, time.Since(start))
})
if err != nil {
debug.Log("SaveDir for %v returned error: %v", snPath, err)
return FutureNode{}, false, err
}
case fi.Mode()&os.ModeSocket > 0:
debug.Log(" %v is a socket, ignoring", target)
return FutureNode{}, true, nil
default:
debug.Log(" %v other", target)
fn.node, err = arch.nodeFromFileInfo(target, fi)
if err != nil {
return FutureNode{}, false, err
}
}
debug.Log("return after %.3f", time.Since(start).Seconds())
return fn, false, nil
}
// fileChanged tries to detect whether a file's content has changed compared
// to the contents of node, which describes the same path in the parent backup.
// It should only be run for regular files.
func fileChanged(fi os.FileInfo, node *restic.Node, ignoreFlags uint) bool {
switch {
case node == nil:
return true
case node.Type != "file":
// We're only called for regular files, so this is a type change.
return true
case uint64(fi.Size()) != node.Size:
return true
case !fi.ModTime().Equal(node.ModTime):
return true
}
checkCtime := ignoreFlags&ChangeIgnoreCtime == 0
checkInode := ignoreFlags&ChangeIgnoreInode == 0
extFI := fs.ExtendedStat(fi)
switch {
case checkCtime && !extFI.ChangeTime.Equal(node.ChangeTime):
return true
case checkInode && node.Inode != extFI.Inode:
return true
}
return false
}
// join returns all elements separated with a forward slash.
func join(elem ...string) string {
return path.Join(elem...)
}
// statDir returns the file info for the directory. Symbolic links are
// resolved. If the target directory is not a directory, an error is returned.
func (arch *Archiver) statDir(dir string) (os.FileInfo, error) {
fi, err := arch.FS.Stat(dir)
if err != nil {
return nil, errors.Wrap(err, "Lstat")
}
tpe := fi.Mode() & (os.ModeType | os.ModeCharDevice)
if tpe != os.ModeDir {
return fi, errors.Errorf("path is not a directory: %v", dir)
}
return fi, nil
}
// SaveTree stores a Tree in the repo, returned is the tree. snPath is the path
// within the current snapshot.
func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree, previous *restic.Tree) (*restic.Tree, error) {
debug.Log("%v (%v nodes), parent %v", snPath, len(atree.Nodes), previous)
nodeNames := atree.NodeNames()
tree := restic.NewTree(len(nodeNames))
futureNodes := make(map[string]FutureNode)
// iterate over the nodes of atree in lexicographic (=deterministic) order
for _, name := range nodeNames {
subatree := atree.Nodes[name]
// test if context has been cancelled
if ctx.Err() != nil {
return nil, ctx.Err()
}
// this is a leaf node
if subatree.Leaf() {
fn, excluded, err := arch.Save(ctx, join(snPath, name), subatree.Path, previous.Find(name))
if err != nil {
err = arch.error(subatree.Path, fn.fi, err)
if err == nil {
// ignore error
continue
}
return nil, err
}
if err != nil {
return nil, err
}
if !excluded {
futureNodes[name] = fn
}
continue
}
snItem := join(snPath, name) + "/"
start := time.Now()
oldNode := previous.Find(name)
oldSubtree, err := arch.loadSubtree(ctx, oldNode)
if err != nil {
err = arch.error(join(snPath, name), nil, err)
}
if err != nil {
return nil, err
}
// not a leaf node, archive subtree
subtree, err := arch.SaveTree(ctx, join(snPath, name), &subatree, oldSubtree)
if err != nil {
return nil, err
}
id, nodeStats, err := arch.saveTree(ctx, subtree)
if err != nil {
return nil, err
}
if subatree.FileInfoPath == "" {
return nil, errors.Errorf("FileInfoPath for %v/%v is empty", snPath, name)
}
debug.Log("%v, saved subtree %v as %v", snPath, subtree, id.Str())
fi, err := arch.statDir(subatree.FileInfoPath)
if err != nil {
return nil, err
}
debug.Log("%v, dir node data loaded from %v", snPath, subatree.FileInfoPath)
node, err := arch.nodeFromFileInfo(subatree.FileInfoPath, fi)
if err != nil {
return nil, err
}
node.Name = name
node.Subtree = &id
err = tree.Insert(node)
if err != nil {
return nil, err
}
arch.CompleteItem(snItem, oldNode, node, nodeStats, time.Since(start))
}
debug.Log("waiting on %d nodes", len(futureNodes))
// process all futures
for name, fn := range futureNodes {
fn.wait(ctx)
// return the error, or ignore it
if fn.err != nil {
fn.err = arch.error(fn.target, fn.fi, fn.err)
if fn.err == nil {
// ignore error
continue
}
return nil, fn.err
}
// when the error is ignored, the node could not be saved, so ignore it
if fn.node == nil {
debug.Log("%v excluded: %v", fn.snPath, fn.target)
continue
}
fn.node.Name = name
err := tree.Insert(fn.node)
if err != nil {
return nil, err
}
}
return tree, nil
}
// flags are passed to fs.OpenFile. O_RDONLY is implied.
func readdirnames(filesystem fs.FS, dir string, flags int) ([]string, error) {
f, err := filesystem.OpenFile(dir, fs.O_RDONLY|flags, 0)
if err != nil {
return nil, errors.Wrap(err, "Open")
}
entries, err := f.Readdirnames(-1)
if err != nil {
_ = f.Close()
return nil, errors.Wrapf(err, "Readdirnames %v failed", dir)
}
err = f.Close()
if err != nil {
return nil, err
}
return entries, nil
}
// resolveRelativeTargets replaces targets that only contain relative
// directories ("." or "../../") with the contents of the directory. Each
// element of target is processed with fs.Clean().
func resolveRelativeTargets(filesys fs.FS, targets []string) ([]string, error) {
debug.Log("targets before resolving: %v", targets)
result := make([]string, 0, len(targets))
for _, target := range targets {
target = filesys.Clean(target)
pc, _ := pathComponents(filesys, target, false)
if len(pc) > 0 {
result = append(result, target)
continue
}
debug.Log("replacing %q with readdir(%q)", target, target)
entries, err := readdirnames(filesys, target, fs.O_NOFOLLOW)
if err != nil {
return nil, err
}
sort.Strings(entries)
for _, name := range entries {
result = append(result, filesys.Join(target, name))
}
}
debug.Log("targets after resolving: %v", result)
return result, nil
}
// SnapshotOptions collect attributes for a new snapshot.
type SnapshotOptions struct {
Tags restic.TagList
Hostname string
Excludes []string
Time time.Time
ParentSnapshot restic.ID
}
// loadParentTree loads a tree referenced by snapshot id. If id is null, nil is returned.
func (arch *Archiver) loadParentTree(ctx context.Context, snapshotID restic.ID) *restic.Tree {
if snapshotID.IsNull() {
return nil
}
debug.Log("load parent snapshot %v", snapshotID)
sn, err := restic.LoadSnapshot(ctx, arch.Repo, snapshotID)
if err != nil {
debug.Log("unable to load snapshot %v: %v", snapshotID, err)
return nil
}
if sn.Tree == nil {
debug.Log("snapshot %v has empty tree %v", snapshotID)
return nil
}
debug.Log("load parent tree %v", *sn.Tree)
tree, err := arch.Repo.LoadTree(ctx, *sn.Tree)
if err != nil {
debug.Log("unable to load tree %v: %v", *sn.Tree, err)
_ = arch.error("/", nil, arch.wrapLoadTreeError(*sn.Tree, err))
return nil
}
return tree
}
// runWorkers starts the worker pools, which are stopped when the context is cancelled.
func (arch *Archiver) runWorkers(ctx context.Context, wg *errgroup.Group) {
arch.blobSaver = NewBlobSaver(ctx, wg, arch.Repo, arch.Options.SaveBlobConcurrency)
arch.fileSaver = NewFileSaver(ctx, wg,
arch.blobSaver.Save,
arch.Repo.Config().ChunkerPolynomial,
arch.Options.FileReadConcurrency, arch.Options.SaveBlobConcurrency)
arch.fileSaver.CompleteBlob = arch.CompleteBlob
arch.fileSaver.NodeFromFileInfo = arch.nodeFromFileInfo
arch.treeSaver = NewTreeSaver(ctx, wg, arch.Options.SaveTreeConcurrency, arch.saveTree, arch.Error)
}
func (arch *Archiver) stopWorkers() {
arch.blobSaver.TriggerShutdown()
arch.fileSaver.TriggerShutdown()
arch.treeSaver.TriggerShutdown()
arch.blobSaver = nil
arch.fileSaver = nil
arch.treeSaver = nil
}
// Snapshot saves several targets and returns a snapshot.
func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts SnapshotOptions) (*restic.Snapshot, restic.ID, error) {
cleanTargets, err := resolveRelativeTargets(arch.FS, targets)
if err != nil {
return nil, restic.ID{}, err
}
atree, err := NewTree(arch.FS, cleanTargets)
if err != nil {
return nil, restic.ID{}, err
}
var rootTreeID restic.ID
wgUp, wgUpCtx := errgroup.WithContext(ctx)
arch.Repo.StartPackUploader(wgUpCtx, wgUp)
wgUp.Go(func() error {
wg, wgCtx := errgroup.WithContext(wgUpCtx)
start := time.Now()
var stats ItemStats
wg.Go(func() error {
arch.runWorkers(wgCtx, wg)
debug.Log("starting snapshot")
tree, err := arch.SaveTree(wgCtx, "/", atree, arch.loadParentTree(wgCtx, opts.ParentSnapshot))
if err != nil {
return err
}
if len(tree.Nodes) == 0 {
return errors.New("snapshot is empty")
}
rootTreeID, stats, err = arch.saveTree(wgCtx, tree)
arch.stopWorkers()
return err
})
err = wg.Wait()
debug.Log("err is %v", err)
if err != nil {
debug.Log("error while saving tree: %v", err)
return err
}
arch.CompleteItem("/", nil, nil, stats, time.Since(start))
return arch.Repo.Flush(ctx)
})
err = wgUp.Wait()
if err != nil {
return nil, restic.ID{}, err
}
sn, err := restic.NewSnapshot(targets, opts.Tags, opts.Hostname, opts.Time)
if err != nil {
return nil, restic.ID{}, err
}
sn.Excludes = opts.Excludes
if !opts.ParentSnapshot.IsNull() {
id := opts.ParentSnapshot
sn.Parent = &id
}
sn.Tree = &rootTreeID
id, err := arch.Repo.SaveJSONUnpacked(ctx, restic.SnapshotFile, sn)
if err != nil {
return nil, restic.ID{}, err
}
return sn, id, nil
}
|
package rest
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
"strings"
"golang.org/x/net/context/ctxhttp"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
"github.com/cenkalti/backoff/v4"
)
// make sure the rest backend implements restic.Backend
var _ restic.Backend = &Backend{}
// Backend uses the REST protocol to access data stored on a server.
type Backend struct {
url *url.URL
sem *backend.Semaphore
client *http.Client
backend.Layout
}
// the REST API protocol version is decided by HTTP request headers, these are the constants.
const (
ContentTypeV1 = "application/vnd.x.restic.rest.v1"
ContentTypeV2 = "application/vnd.x.restic.rest.v2"
)
// Open opens the REST backend with the given config.
func Open(cfg Config, rt http.RoundTripper) (*Backend, error) {
client := &http.Client{Transport: rt}
sem, err := backend.NewSemaphore(cfg.Connections)
if err != nil {
return nil, err
}
// use url without trailing slash for layout
url := cfg.URL.String()
if url[len(url)-1] == '/' {
url = url[:len(url)-1]
}
be := &Backend{
url: cfg.URL,
client: client,
Layout: &backend.RESTLayout{URL: url, Join: path.Join},
sem: sem,
}
return be, nil
}
// Create creates a new REST on server configured in config.
func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) {
be, err := Open(cfg, rt)
if err != nil {
return nil, err
}
_, err = be.Stat(ctx, restic.Handle{Type: restic.ConfigFile})
if err == nil {
return nil, errors.Fatal("config file already exists")
}
url := *cfg.URL
values := url.Query()
values.Set("create", "true")
url.RawQuery = values.Encode()
resp, err := be.client.Post(url.String(), "binary/octet-stream", strings.NewReader(""))
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, errors.Fatalf("server response unexpected: %v (%v)", resp.Status, resp.StatusCode)
}
_, err = io.Copy(ioutil.Discard, resp.Body)
if err != nil {
return nil, err
}
err = resp.Body.Close()
if err != nil {
return nil, err
}
return be, nil
}
// Location returns this backend's location (the server's URL).
func (b *Backend) Location() string {
return b.url.String()
}
// Save stores data in the backend at the handle.
func (b *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
if err := h.Valid(); err != nil {
return backoff.Permanent(err)
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// make sure that client.Post() cannot close the reader by wrapping it
req, err := http.NewRequest(http.MethodPost, b.Filename(h), ioutil.NopCloser(rd))
if err != nil {
return errors.Wrap(err, "NewRequest")
}
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Accept", ContentTypeV2)
// explicitly set the content length, this prevents chunked encoding and
// let's the server know what's coming.
req.ContentLength = rd.Length()
b.sem.GetToken()
resp, err := ctxhttp.Do(ctx, b.client, req)
b.sem.ReleaseToken()
var cerr error
if resp != nil {
_, _ = io.Copy(ioutil.Discard, resp.Body)
cerr = resp.Body.Close()
}
if err != nil {
return errors.Wrap(err, "client.Post")
}
if resp.StatusCode != 200 {
return errors.Errorf("server response unexpected: %v (%v)", resp.Status, resp.StatusCode)
}
return errors.Wrap(cerr, "Close")
}
// ErrIsNotExist is returned whenever the requested file does not exist on the
// server.
type ErrIsNotExist struct {
restic.Handle
}
func (e ErrIsNotExist) Error() string {
return fmt.Sprintf("%v does not exist", e.Handle)
}
// IsNotExist returns true if the error was caused by a non-existing file.
func (b *Backend) IsNotExist(err error) bool {
err = errors.Cause(err)
_, ok := err.(ErrIsNotExist)
return ok
}
// Load runs fn with a reader that yields the contents of the file at h at the
// given offset.
func (b *Backend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
r, err := b.openReader(ctx, h, length, offset)
if err != nil {
return err
}
err = fn(r)
if err != nil {
_ = r.Close() // ignore error here
return err
}
// Note: readerat.ReadAt() (the fn) uses io.ReadFull() that doesn't
// wait for EOF after reading body. Due to HTTP/2 stream multiplexing
// and goroutine timings the EOF frame arrives from server (eg. rclone)
// with a delay after reading body. Immediate close might trigger
// HTTP/2 stream reset resulting in the *stream closed* error on server,
// so we wait for EOF before closing body.
var buf [1]byte
_, err = r.Read(buf[:])
if err == io.EOF {
err = nil
}
if e := r.Close(); err == nil {
err = e
}
return err
}
func (b *Backend) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
debug.Log("Load %v, length %v, offset %v", h, length, offset)
if err := h.Valid(); err != nil {
return nil, backoff.Permanent(err)
}
if offset < 0 {
return nil, errors.New("offset is negative")
}
if length < 0 {
return nil, errors.Errorf("invalid length %d", length)
}
req, err := http.NewRequest("GET", b.Filename(h), nil)
if err != nil {
return nil, errors.Wrap(err, "http.NewRequest")
}
byteRange := fmt.Sprintf("bytes=%d-", offset)
if length > 0 {
byteRange = fmt.Sprintf("bytes=%d-%d", offset, offset+int64(length)-1)
}
req.Header.Set("Range", byteRange)
req.Header.Set("Accept", ContentTypeV2)
debug.Log("Load(%v) send range %v", h, byteRange)
b.sem.GetToken()
resp, err := ctxhttp.Do(ctx, b.client, req)
b.sem.ReleaseToken()
if err != nil {
if resp != nil {
_, _ = io.Copy(ioutil.Discard, resp.Body)
_ = resp.Body.Close()
}
return nil, errors.Wrap(err, "client.Do")
}
if resp.StatusCode == http.StatusNotFound {
_ = resp.Body.Close()
return nil, ErrIsNotExist{h}
}
if resp.StatusCode != 200 && resp.StatusCode != 206 {
_ = resp.Body.Close()
return nil, errors.Errorf("unexpected HTTP response (%v): %v", resp.StatusCode, resp.Status)
}
return resp.Body, nil
}
// Stat returns information about a blob.
func (b *Backend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
if err := h.Valid(); err != nil {
return restic.FileInfo{}, backoff.Permanent(err)
}
req, err := http.NewRequest(http.MethodHead, b.Filename(h), nil)
if err != nil {
return restic.FileInfo{}, errors.Wrap(err, "NewRequest")
}
req.Header.Set("Accept", ContentTypeV2)
b.sem.GetToken()
resp, err := ctxhttp.Do(ctx, b.client, req)
b.sem.ReleaseToken()
if err != nil {
return restic.FileInfo{}, errors.Wrap(err, "client.Head")
}
_, _ = io.Copy(ioutil.Discard, resp.Body)
if err = resp.Body.Close(); err != nil {
return restic.FileInfo{}, errors.Wrap(err, "Close")
}
if resp.StatusCode == http.StatusNotFound {
_ = resp.Body.Close()
return restic.FileInfo{}, ErrIsNotExist{h}
}
if resp.StatusCode != 200 {
return restic.FileInfo{}, errors.Errorf("unexpected HTTP response (%v): %v", resp.StatusCode, resp.Status)
}
if resp.ContentLength < 0 {
return restic.FileInfo{}, errors.New("negative content length")
}
bi := restic.FileInfo{
Size: resp.ContentLength,
Name: h.Name,
}
return bi, nil
}
// Test returns true if a blob of the given type and name exists in the backend.
func (b *Backend) Test(ctx context.Context, h restic.Handle) (bool, error) {
_, err := b.Stat(ctx, h)
if err != nil {
return false, nil
}
return true, nil
}
// Remove removes the blob with the given name and type.
func (b *Backend) Remove(ctx context.Context, h restic.Handle) error {
if err := h.Valid(); err != nil {
return backoff.Permanent(err)
}
req, err := http.NewRequest("DELETE", b.Filename(h), nil)
if err != nil {
return errors.Wrap(err, "http.NewRequest")
}
req.Header.Set("Accept", ContentTypeV2)
b.sem.GetToken()
resp, err := ctxhttp.Do(ctx, b.client, req)
b.sem.ReleaseToken()
if err != nil {
return errors.Wrap(err, "client.Do")
}
if resp.StatusCode == http.StatusNotFound {
_ = resp.Body.Close()
return ErrIsNotExist{h}
}
if resp.StatusCode != 200 {
return errors.Errorf("blob not removed, server response: %v (%v)", resp.Status, resp.StatusCode)
}
_, err = io.Copy(ioutil.Discard, resp.Body)
if err != nil {
return errors.Wrap(err, "Copy")
}
return errors.Wrap(resp.Body.Close(), "Close")
}
// List runs fn for each file in the backend which has the type t. When an
// error occurs (or fn returns an error), List stops and returns it.
func (b *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error {
url := b.Dirname(restic.Handle{Type: t})
if !strings.HasSuffix(url, "/") {
url += "/"
}
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return errors.Wrap(err, "NewRequest")
}
req.Header.Set("Accept", ContentTypeV2)
b.sem.GetToken()
resp, err := ctxhttp.Do(ctx, b.client, req)
b.sem.ReleaseToken()
if err != nil {
return errors.Wrap(err, "List")
}
if resp.StatusCode != 200 {
return errors.Errorf("List failed, server response: %v (%v)", resp.Status, resp.StatusCode)
}
if resp.Header.Get("Content-Type") == ContentTypeV2 {
return b.listv2(ctx, t, resp, fn)
}
return b.listv1(ctx, t, resp, fn)
}
// listv1 uses the REST protocol v1, where a list HTTP request (e.g. `GET
// /data/`) only returns the names of the files, so we need to issue an HTTP
// HEAD request for each file.
func (b *Backend) listv1(ctx context.Context, t restic.FileType, resp *http.Response, fn func(restic.FileInfo) error) error {
debug.Log("parsing API v1 response")
dec := json.NewDecoder(resp.Body)
var list []string
if err := dec.Decode(&list); err != nil {
return errors.Wrap(err, "Decode")
}
for _, m := range list {
fi, err := b.Stat(ctx, restic.Handle{Name: m, Type: t})
if err != nil {
return err
}
if ctx.Err() != nil {
return ctx.Err()
}
fi.Name = m
err = fn(fi)
if err != nil {
return err
}
if ctx.Err() != nil {
return ctx.Err()
}
}
return ctx.Err()
}
// listv2 uses the REST protocol v2, where a list HTTP request (e.g. `GET
// /data/`) returns the names and sizes of all files.
func (b *Backend) listv2(ctx context.Context, t restic.FileType, resp *http.Response, fn func(restic.FileInfo) error) error {
debug.Log("parsing API v2 response")
dec := json.NewDecoder(resp.Body)
var list []struct {
Name string `json:"name"`
Size int64 `json:"size"`
}
if err := dec.Decode(&list); err != nil {
return errors.Wrap(err, "Decode")
}
for _, item := range list {
if ctx.Err() != nil {
return ctx.Err()
}
fi := restic.FileInfo{
Name: item.Name,
Size: item.Size,
}
err := fn(fi)
if err != nil {
return err
}
if ctx.Err() != nil {
return ctx.Err()
}
}
return ctx.Err()
}
// Close closes all open files.
func (b *Backend) Close() error {
// this does not need to do anything, all open files are closed within the
// same function.
return nil
}
// Remove keys for a specified backend type.
func (b *Backend) removeKeys(ctx context.Context, t restic.FileType) error {
return b.List(ctx, t, func(fi restic.FileInfo) error {
return b.Remove(ctx, restic.Handle{Type: t, Name: fi.Name})
})
}
// Delete removes all data in the backend.
func (b *Backend) Delete(ctx context.Context) error {
alltypes := []restic.FileType{
restic.PackFile,
restic.KeyFile,
restic.LockFile,
restic.SnapshotFile,
restic.IndexFile}
for _, t := range alltypes {
err := b.removeKeys(ctx, t)
if err != nil {
return nil
}
}
err := b.Remove(ctx, restic.Handle{Type: restic.ConfigFile})
if err != nil && b.IsNotExist(err) {
return nil
}
return err
}
rest: workaround for HTTP2 zero-length replies bug
The golang http client does not return an error when a HTTP2 reply
includes a non-zero content length but does not return any data at all.
This scenario can occur e.g. when using rclone when a file stored in a
backend seems to be accessible but then fails to download.
package rest
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"golang.org/x/net/context/ctxhttp"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
"github.com/cenkalti/backoff/v4"
)
// make sure the rest backend implements restic.Backend
var _ restic.Backend = &Backend{}
// Backend uses the REST protocol to access data stored on a server.
type Backend struct {
url *url.URL
sem *backend.Semaphore
client *http.Client
backend.Layout
}
// the REST API protocol version is decided by HTTP request headers, these are the constants.
const (
ContentTypeV1 = "application/vnd.x.restic.rest.v1"
ContentTypeV2 = "application/vnd.x.restic.rest.v2"
)
// Open opens the REST backend with the given config.
func Open(cfg Config, rt http.RoundTripper) (*Backend, error) {
client := &http.Client{Transport: rt}
sem, err := backend.NewSemaphore(cfg.Connections)
if err != nil {
return nil, err
}
// use url without trailing slash for layout
url := cfg.URL.String()
if url[len(url)-1] == '/' {
url = url[:len(url)-1]
}
be := &Backend{
url: cfg.URL,
client: client,
Layout: &backend.RESTLayout{URL: url, Join: path.Join},
sem: sem,
}
return be, nil
}
// Create creates a new REST on server configured in config.
func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) {
be, err := Open(cfg, rt)
if err != nil {
return nil, err
}
_, err = be.Stat(ctx, restic.Handle{Type: restic.ConfigFile})
if err == nil {
return nil, errors.Fatal("config file already exists")
}
url := *cfg.URL
values := url.Query()
values.Set("create", "true")
url.RawQuery = values.Encode()
resp, err := be.client.Post(url.String(), "binary/octet-stream", strings.NewReader(""))
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, errors.Fatalf("server response unexpected: %v (%v)", resp.Status, resp.StatusCode)
}
_, err = io.Copy(ioutil.Discard, resp.Body)
if err != nil {
return nil, err
}
err = resp.Body.Close()
if err != nil {
return nil, err
}
return be, nil
}
// Location returns this backend's location (the server's URL).
func (b *Backend) Location() string {
return b.url.String()
}
// Save stores data in the backend at the handle.
func (b *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
if err := h.Valid(); err != nil {
return backoff.Permanent(err)
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// make sure that client.Post() cannot close the reader by wrapping it
req, err := http.NewRequest(http.MethodPost, b.Filename(h), ioutil.NopCloser(rd))
if err != nil {
return errors.Wrap(err, "NewRequest")
}
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Accept", ContentTypeV2)
// explicitly set the content length, this prevents chunked encoding and
// let's the server know what's coming.
req.ContentLength = rd.Length()
b.sem.GetToken()
resp, err := ctxhttp.Do(ctx, b.client, req)
b.sem.ReleaseToken()
var cerr error
if resp != nil {
_, _ = io.Copy(ioutil.Discard, resp.Body)
cerr = resp.Body.Close()
}
if err != nil {
return errors.Wrap(err, "client.Post")
}
if resp.StatusCode != 200 {
return errors.Errorf("server response unexpected: %v (%v)", resp.Status, resp.StatusCode)
}
return errors.Wrap(cerr, "Close")
}
// ErrIsNotExist is returned whenever the requested file does not exist on the
// server.
type ErrIsNotExist struct {
restic.Handle
}
func (e ErrIsNotExist) Error() string {
return fmt.Sprintf("%v does not exist", e.Handle)
}
// IsNotExist returns true if the error was caused by a non-existing file.
func (b *Backend) IsNotExist(err error) bool {
err = errors.Cause(err)
_, ok := err.(ErrIsNotExist)
return ok
}
// Load runs fn with a reader that yields the contents of the file at h at the
// given offset.
func (b *Backend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
r, err := b.openReader(ctx, h, length, offset)
if err != nil {
return err
}
err = fn(r)
if err != nil {
_ = r.Close() // ignore error here
return err
}
// Note: readerat.ReadAt() (the fn) uses io.ReadFull() that doesn't
// wait for EOF after reading body. Due to HTTP/2 stream multiplexing
// and goroutine timings the EOF frame arrives from server (eg. rclone)
// with a delay after reading body. Immediate close might trigger
// HTTP/2 stream reset resulting in the *stream closed* error on server,
// so we wait for EOF before closing body.
var buf [1]byte
_, err = r.Read(buf[:])
if err == io.EOF {
err = nil
}
if e := r.Close(); err == nil {
err = e
}
return err
}
func (b *Backend) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
debug.Log("Load %v, length %v, offset %v", h, length, offset)
if err := h.Valid(); err != nil {
return nil, backoff.Permanent(err)
}
if offset < 0 {
return nil, errors.New("offset is negative")
}
if length < 0 {
return nil, errors.Errorf("invalid length %d", length)
}
req, err := http.NewRequest("GET", b.Filename(h), nil)
if err != nil {
return nil, errors.Wrap(err, "http.NewRequest")
}
byteRange := fmt.Sprintf("bytes=%d-", offset)
if length > 0 {
byteRange = fmt.Sprintf("bytes=%d-%d", offset, offset+int64(length)-1)
}
req.Header.Set("Range", byteRange)
req.Header.Set("Accept", ContentTypeV2)
debug.Log("Load(%v) send range %v", h, byteRange)
b.sem.GetToken()
resp, err := ctxhttp.Do(ctx, b.client, req)
b.sem.ReleaseToken()
if err != nil {
if resp != nil {
_, _ = io.Copy(ioutil.Discard, resp.Body)
_ = resp.Body.Close()
}
return nil, errors.Wrap(err, "client.Do")
}
if resp.StatusCode == http.StatusNotFound {
_ = resp.Body.Close()
return nil, ErrIsNotExist{h}
}
if resp.StatusCode != 200 && resp.StatusCode != 206 {
_ = resp.Body.Close()
return nil, errors.Errorf("unexpected HTTP response (%v): %v", resp.StatusCode, resp.Status)
}
// workaround https://github.com/golang/go/issues/46071
// see also https://forum.restic.net/t/http2-stream-closed-connection-reset-context-canceled/3743/10
if resp.ContentLength == 0 && resp.ProtoMajor == 2 && resp.ProtoMinor == 0 {
if clens := resp.Header["Content-Length"]; len(clens) == 1 {
if cl, err := strconv.ParseUint(clens[0], 10, 63); err == nil {
resp.ContentLength = int64(cl)
}
if resp.ContentLength != 0 {
_ = resp.Body.Close()
return nil, errors.Errorf("unexpected EOF got 0 instead of %v bytes", resp.ContentLength)
}
}
}
return resp.Body, nil
}
// Stat returns information about a blob.
func (b *Backend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
if err := h.Valid(); err != nil {
return restic.FileInfo{}, backoff.Permanent(err)
}
req, err := http.NewRequest(http.MethodHead, b.Filename(h), nil)
if err != nil {
return restic.FileInfo{}, errors.Wrap(err, "NewRequest")
}
req.Header.Set("Accept", ContentTypeV2)
b.sem.GetToken()
resp, err := ctxhttp.Do(ctx, b.client, req)
b.sem.ReleaseToken()
if err != nil {
return restic.FileInfo{}, errors.Wrap(err, "client.Head")
}
_, _ = io.Copy(ioutil.Discard, resp.Body)
if err = resp.Body.Close(); err != nil {
return restic.FileInfo{}, errors.Wrap(err, "Close")
}
if resp.StatusCode == http.StatusNotFound {
_ = resp.Body.Close()
return restic.FileInfo{}, ErrIsNotExist{h}
}
if resp.StatusCode != 200 {
return restic.FileInfo{}, errors.Errorf("unexpected HTTP response (%v): %v", resp.StatusCode, resp.Status)
}
if resp.ContentLength < 0 {
return restic.FileInfo{}, errors.New("negative content length")
}
bi := restic.FileInfo{
Size: resp.ContentLength,
Name: h.Name,
}
return bi, nil
}
// Test returns true if a blob of the given type and name exists in the backend.
func (b *Backend) Test(ctx context.Context, h restic.Handle) (bool, error) {
_, err := b.Stat(ctx, h)
if err != nil {
return false, nil
}
return true, nil
}
// Remove removes the blob with the given name and type.
func (b *Backend) Remove(ctx context.Context, h restic.Handle) error {
if err := h.Valid(); err != nil {
return backoff.Permanent(err)
}
req, err := http.NewRequest("DELETE", b.Filename(h), nil)
if err != nil {
return errors.Wrap(err, "http.NewRequest")
}
req.Header.Set("Accept", ContentTypeV2)
b.sem.GetToken()
resp, err := ctxhttp.Do(ctx, b.client, req)
b.sem.ReleaseToken()
if err != nil {
return errors.Wrap(err, "client.Do")
}
if resp.StatusCode == http.StatusNotFound {
_ = resp.Body.Close()
return ErrIsNotExist{h}
}
if resp.StatusCode != 200 {
return errors.Errorf("blob not removed, server response: %v (%v)", resp.Status, resp.StatusCode)
}
_, err = io.Copy(ioutil.Discard, resp.Body)
if err != nil {
return errors.Wrap(err, "Copy")
}
return errors.Wrap(resp.Body.Close(), "Close")
}
// List runs fn for each file in the backend which has the type t. When an
// error occurs (or fn returns an error), List stops and returns it.
func (b *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error {
url := b.Dirname(restic.Handle{Type: t})
if !strings.HasSuffix(url, "/") {
url += "/"
}
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return errors.Wrap(err, "NewRequest")
}
req.Header.Set("Accept", ContentTypeV2)
b.sem.GetToken()
resp, err := ctxhttp.Do(ctx, b.client, req)
b.sem.ReleaseToken()
if err != nil {
return errors.Wrap(err, "List")
}
if resp.StatusCode != 200 {
return errors.Errorf("List failed, server response: %v (%v)", resp.Status, resp.StatusCode)
}
if resp.Header.Get("Content-Type") == ContentTypeV2 {
return b.listv2(ctx, t, resp, fn)
}
return b.listv1(ctx, t, resp, fn)
}
// listv1 uses the REST protocol v1, where a list HTTP request (e.g. `GET
// /data/`) only returns the names of the files, so we need to issue an HTTP
// HEAD request for each file.
func (b *Backend) listv1(ctx context.Context, t restic.FileType, resp *http.Response, fn func(restic.FileInfo) error) error {
debug.Log("parsing API v1 response")
dec := json.NewDecoder(resp.Body)
var list []string
if err := dec.Decode(&list); err != nil {
return errors.Wrap(err, "Decode")
}
for _, m := range list {
fi, err := b.Stat(ctx, restic.Handle{Name: m, Type: t})
if err != nil {
return err
}
if ctx.Err() != nil {
return ctx.Err()
}
fi.Name = m
err = fn(fi)
if err != nil {
return err
}
if ctx.Err() != nil {
return ctx.Err()
}
}
return ctx.Err()
}
// listv2 uses the REST protocol v2, where a list HTTP request (e.g. `GET
// /data/`) returns the names and sizes of all files.
func (b *Backend) listv2(ctx context.Context, t restic.FileType, resp *http.Response, fn func(restic.FileInfo) error) error {
debug.Log("parsing API v2 response")
dec := json.NewDecoder(resp.Body)
var list []struct {
Name string `json:"name"`
Size int64 `json:"size"`
}
if err := dec.Decode(&list); err != nil {
return errors.Wrap(err, "Decode")
}
for _, item := range list {
if ctx.Err() != nil {
return ctx.Err()
}
fi := restic.FileInfo{
Name: item.Name,
Size: item.Size,
}
err := fn(fi)
if err != nil {
return err
}
if ctx.Err() != nil {
return ctx.Err()
}
}
return ctx.Err()
}
// Close closes all open files.
func (b *Backend) Close() error {
// this does not need to do anything, all open files are closed within the
// same function.
return nil
}
// Remove keys for a specified backend type.
func (b *Backend) removeKeys(ctx context.Context, t restic.FileType) error {
return b.List(ctx, t, func(fi restic.FileInfo) error {
return b.Remove(ctx, restic.Handle{Type: t, Name: fi.Name})
})
}
// Delete removes all data in the backend.
func (b *Backend) Delete(ctx context.Context) error {
alltypes := []restic.FileType{
restic.PackFile,
restic.KeyFile,
restic.LockFile,
restic.SnapshotFile,
restic.IndexFile}
for _, t := range alltypes {
err := b.removeKeys(ctx, t)
if err != nil {
return nil
}
}
err := b.Remove(ctx, restic.Handle{Type: restic.ConfigFile})
if err != nil && b.IsNotExist(err) {
return nil
}
return err
}
|
package main
import (
"bufio"
"encoding/binary"
"flag"
"fmt"
"io"
"log"
"math/rand"
"os"
)
var (
nevts = flag.Int("nevts", 100, "number of events to compute")
ievt = flag.Int("ievt", 0, "first event of the event loop")
fname = flag.String("o", "hpcsim.out", "path to output file to write")
simpkg = flag.String("sim", "", "fully qualified name of the package to run")
)
type Result struct {
ID [48]byte
Data []byte
}
type Context struct {
Total float64
Inside float64
}
type Sim struct {
NEvts int
IEvt int
}
type App struct {
nevts int
ievt int
fname string
procs []Proc
errc chan error
resc chan Result
}
type Proc interface {
}
func main() {
flag.Parse()
sim := Sim{
NEvts: *nevts,
IEvt: *ievt,
}
err := sim.Initialize()
if err != nil {
log.Fatalf("error initializing simulation: %v\n", err)
}
f, err := os.Create(*fname)
if err != nil {
log.Fatalf("error creating output file [%s]: %v\n",
*fname,
err,
)
}
defer f.Close()
errc := make(chan error)
results := make(chan Result)
go writeResults(f, results, errc)
//go sim.Run(results)
for i := 0; i < sim.NEvts; i++ {
go sim.Simulate(i, results)
}
for i := 0; i < sim.NEvts; i++ {
err := <-errc
if err != nil {
log.Fatalf("error: %v\n", err)
}
}
}
func (sim *Sim) Initialize() error {
var err error
return err
}
func (sim *Sim) Simulate(ievt int, results chan<- Result) {
src := rand.NewSource(int64(ievt))
rng := rand.New(src)
total := 0
inside := 0
for total = 0; total < 10000; total++ {
x := rng.Float64()
y := rng.Float64()
if x*x+y*y < 1.0 {
inside++
}
}
buf := make([]byte, 2*8)
binary.LittleEndian.PutUint64(buf[:8], uint64(total))
binary.LittleEndian.PutUint64(buf[8:], uint64(inside))
results <- Result{Data: buf}
}
func writeResults(f io.Writer, input <-chan Result, errc chan<- error) {
w := bufio.NewWriter(f)
defer w.Flush()
for res := range input {
_, err := f.Write(res.Data)
if err != nil {
err = fmt.Errorf(
"error writing result-id=%q: %v",
string(res.ID[:]), err,
)
}
errc <- err
}
}
hpc-sim-pi: allow cpu-profiling+tracing
package main
import (
"bufio"
"encoding/binary"
"flag"
"fmt"
"io"
"log"
"math/rand"
"os"
"runtime/pprof"
"runtime/trace"
)
var (
nthreads = flag.Int("t", 1, "number of concurrent goroutines")
nevts = flag.Int("nevts", 100, "number of events to compute")
ievt = flag.Int("ievt", 0, "first event of the event loop")
fname = flag.String("o", "hpcsim.out", "path to output file to write")
simpkg = flag.String("sim", "", "fully qualified name of the package to run")
doprof = flag.Bool("prof", false, "enable CPU profiling")
dotrace = flag.Bool("trace", false, "enable tracing")
)
type Result struct {
ID [48]byte
Data []byte
}
type Context struct {
Total float64
Inside float64
}
type Sim struct {
NEvts int
IEvt int
}
type App struct {
nevts int
ievt int
fname string
procs []Proc
errc chan error
resc chan Result
}
type Proc interface {
}
func main() {
flag.Parse()
if *doprof {
fprof, err := os.Create("prof.out")
if err != nil {
log.Fatalf("error creating pprof output file: %v\n", err)
}
defer fprof.Close()
pprof.StartCPUProfile(fprof)
defer pprof.StopCPUProfile()
}
if *dotrace {
ftrace, err := os.Create("trace.out")
if err != nil {
log.Fatalf("error creating trace output file: %v\n", err)
}
defer ftrace.Close()
err = trace.Start(ftrace)
if err != nil {
log.Fatalf("error starting tracer: %v\n", err)
}
defer trace.Stop()
}
sim := Sim{
NEvts: *nevts,
IEvt: *ievt,
}
err := sim.Initialize()
if err != nil {
log.Fatalf("error initializing simulation: %v\n", err)
}
f, err := os.Create(*fname)
if err != nil {
log.Fatalf("error creating output file [%s]: %v\n",
*fname,
err,
)
}
defer f.Close()
errc := make(chan error, *nthreads)
results := make(chan Result, *nthreads)
go writeResults(f, results, errc)
//go sim.Run(results)
for i := 0; i < sim.NEvts; i++ {
go sim.Simulate(i, results)
}
for i := 0; i < sim.NEvts; i++ {
err := <-errc
if err != nil {
log.Fatalf("error: %v\n", err)
}
}
}
func (sim *Sim) Initialize() error {
var err error
return err
}
func (sim *Sim) Simulate(ievt int, results chan<- Result) {
src := rand.NewSource(int64(ievt))
rng := rand.New(src)
total := 0
inside := 0
for total = 0; total < 10000; total++ {
x := rng.Float64()
y := rng.Float64()
if x*x+y*y < 1.0 {
inside++
}
}
buf := make([]byte, 2*8)
binary.LittleEndian.PutUint64(buf[:8], uint64(total))
binary.LittleEndian.PutUint64(buf[8:], uint64(inside))
results <- Result{Data: buf}
}
func writeResults(f io.Writer, input <-chan Result, errc chan<- error) {
w := bufio.NewWriter(f)
defer w.Flush()
for res := range input {
_, err := f.Write(res.Data)
if err != nil {
err = fmt.Errorf(
"error writing result-id=%q: %v",
string(res.ID[:]), err,
)
}
errc <- err
}
}
|
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package database adds some useful functionality to a sql.DB.
// It is independent of the database driver and the
// DB schema.
package database
import (
"context"
"database/sql"
"errors"
"fmt"
"regexp"
"strings"
"sync"
"time"
"github.com/jackc/pgconn"
"github.com/lib/pq"
"golang.org/x/pkgsite/internal/derrors"
"golang.org/x/pkgsite/internal/log"
)
// DB wraps a sql.DB. The methods it exports correspond closely to those of
// sql.DB. They enhance the original by requiring a context argument, and by
// logging the query and any resulting errors.
//
// A DB may represent a transaction. If so, its execution and query methods
// operate within the transaction.
type DB struct {
db *sql.DB
instanceID string
tx *sql.Tx
mu sync.Mutex
maxRetries int // max times a single transaction was retried
}
// Open creates a new DB for the given connection string.
func Open(driverName, dbinfo, instanceID string) (_ *DB, err error) {
defer derrors.Wrap(&err, "database.Open(%q, %q)",
driverName, redactPassword(dbinfo))
db, err := sql.Open(driverName, dbinfo)
if err != nil {
return nil, err
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
if err := db.PingContext(ctx); err != nil {
return nil, err
}
return New(db, instanceID), nil
}
// New creates a new DB from a sql.DB.
func New(db *sql.DB, instanceID string) *DB {
return &DB{db: db, instanceID: instanceID}
}
func (db *DB) Ping() error {
return db.db.Ping()
}
func (db *DB) InTransaction() bool {
return db.tx != nil
}
var passwordRegexp = regexp.MustCompile(`password=\S+`)
func redactPassword(dbinfo string) string {
return passwordRegexp.ReplaceAllLiteralString(dbinfo, "password=REDACTED")
}
// Close closes the database connection.
func (db *DB) Close() error {
return db.db.Close()
}
// Exec executes a SQL statement and returns the number of rows it affected.
func (db *DB) Exec(ctx context.Context, query string, args ...interface{}) (_ int64, err error) {
defer logQuery(ctx, query, args, db.instanceID)(&err)
res, err := db.execResult(ctx, query, args...)
if err != nil {
return 0, err
}
n, err := res.RowsAffected()
if err != nil {
return 0, fmt.Errorf("RowsAffected: %v", err)
}
return n, nil
}
// execResult executes a SQL statement and returns a sql.Result.
func (db *DB) execResult(ctx context.Context, query string, args ...interface{}) (res sql.Result, err error) {
if db.tx != nil {
return db.tx.ExecContext(ctx, query, args...)
}
return db.db.ExecContext(ctx, query, args...)
}
// Query runs the DB query.
func (db *DB) Query(ctx context.Context, query string, args ...interface{}) (_ *sql.Rows, err error) {
defer logQuery(ctx, query, args, db.instanceID)(&err)
if db.tx != nil {
return db.tx.QueryContext(ctx, query, args...)
}
return db.db.QueryContext(ctx, query, args...)
}
// QueryRow runs the query and returns a single row.
func (db *DB) QueryRow(ctx context.Context, query string, args ...interface{}) *sql.Row {
defer logQuery(ctx, query, args, db.instanceID)(nil)
start := time.Now()
defer func() {
if ctx.Err() != nil {
d, _ := ctx.Deadline()
msg := fmt.Sprintf("args=%v; elapsed=%q, start=%q, deadline=%q", args, time.Since(start), start, d)
log.Errorf(ctx, "QueryRow context error: %v "+msg, ctx.Err())
}
}()
if db.tx != nil {
return db.tx.QueryRowContext(ctx, query, args...)
}
return db.db.QueryRowContext(ctx, query, args...)
}
func (db *DB) Prepare(ctx context.Context, query string) (*sql.Stmt, error) {
defer logQuery(ctx, "preparing "+query, nil, db.instanceID)
if db.tx != nil {
return db.tx.PrepareContext(ctx, query)
}
return db.db.PrepareContext(ctx, query)
}
// RunQuery executes query, then calls f on each row.
func (db *DB) RunQuery(ctx context.Context, query string, f func(*sql.Rows) error, params ...interface{}) error {
rows, err := db.Query(ctx, query, params...)
if err != nil {
return err
}
return processRows(rows, f)
}
func processRows(rows *sql.Rows, f func(*sql.Rows) error) error {
defer rows.Close()
for rows.Next() {
if err := f(rows); err != nil {
return err
}
}
return rows.Err()
}
// Transact executes the given function in the context of a SQL transaction at
// the given isolation level, rolling back the transaction if the function
// panics or returns an error.
//
// The given function is called with a DB that is associated with a transaction.
// The DB should be used only inside the function; if it is used to access the
// database after the function returns, the calls will return errors.
//
// If the isolation level requires it, Transact will retry the transaction upon
// serialization failure, so txFunc may be called more than once.
func (db *DB) Transact(ctx context.Context, iso sql.IsolationLevel, txFunc func(*DB) error) (err error) {
defer derrors.Wrap(&err, "Transact(%s)", iso)
// For the levels which require retry, see
// https://www.postgresql.org/docs/11/transaction-iso.html.
opts := &sql.TxOptions{Isolation: iso}
if iso == sql.LevelRepeatableRead || iso == sql.LevelSerializable {
return db.transactWithRetry(ctx, opts, txFunc)
}
return db.transact(ctx, opts, txFunc)
}
// serializationFailureCode is the Postgres error code returned when a serializable
// transaction fails because it would violate serializability.
// See https://www.postgresql.org/docs/current/errcodes-appendix.html.
const serializationFailureCode = "40001"
func (db *DB) transactWithRetry(ctx context.Context, opts *sql.TxOptions, txFunc func(*DB) error) (err error) {
defer derrors.Wrap(&err, "transactWithRetry(%v)", opts)
// Retry on serialization failure, up to some max.
// See https://www.postgresql.org/docs/11/transaction-iso.html.
const maxRetries = 30
for i := 0; i <= maxRetries; i++ {
err = db.transact(ctx, opts, txFunc)
if isSerializationFailure(err) {
db.mu.Lock()
if i > db.maxRetries {
db.maxRetries = i
}
db.mu.Unlock()
continue
}
if err != nil && strings.Contains(err.Error(), serializationFailureCode) {
return fmt.Errorf("error text has %q but not recognized as serialization failure: type %T, err %v",
serializationFailureCode, err, err)
}
if i > 0 {
log.Debugf(ctx, "retried serializable transaction %d time(s)", i)
}
return err
}
return fmt.Errorf("reached max number of tries due to serialization failure (%d)", maxRetries)
}
func isSerializationFailure(err error) bool {
// The underlying error type depends on the driver. Try both pq and pgx types.
var perr *pq.Error
if errors.As(err, &perr) && perr.Code == serializationFailureCode {
return true
}
var gerr *pgconn.PgError
if errors.As(err, &gerr) && gerr.Code == serializationFailureCode {
return true
}
return false
}
func (db *DB) transact(ctx context.Context, opts *sql.TxOptions, txFunc func(*DB) error) (err error) {
if db.InTransaction() {
return errors.New("a DB Transact function was called on a DB already in a transaction")
}
tx, err := db.db.BeginTx(ctx, opts)
if err != nil {
return fmt.Errorf("db.BeginTx(): %w", err)
}
defer func() {
if p := recover(); p != nil {
tx.Rollback()
panic(p)
} else if err != nil {
tx.Rollback()
} else {
if txErr := tx.Commit(); txErr != nil {
err = fmt.Errorf("tx.Commit(): %w", txErr)
}
}
}()
dbtx := New(db.db, db.instanceID)
dbtx.tx = tx
defer dbtx.logTransaction(ctx, opts)(&err)
if err := txFunc(dbtx); err != nil {
return fmt.Errorf("txFunc(tx): %w", err)
}
return nil
}
// MaxRetries returns the maximum number of times thata serializable transaction was retried.
func (db *DB) MaxRetries() int {
db.mu.Lock()
defer db.mu.Unlock()
return db.maxRetries
}
const OnConflictDoNothing = "ON CONFLICT DO NOTHING"
// BulkInsert constructs and executes a multi-value insert statement. The
// query is constructed using the format:
// INSERT INTO <table> (<columns>) VALUES (<placeholders-for-each-item-in-values>)
// If conflictAction is not empty, it is appended to the statement.
//
// The query is executed using a PREPARE statement with the provided values.
func (db *DB) BulkInsert(ctx context.Context, table string, columns []string, values []interface{}, conflictAction string) (err error) {
defer derrors.Wrap(&err, "DB.BulkInsert(ctx, %q, %v, [%d values], %q)",
table, columns, len(values), conflictAction)
return db.bulkInsert(ctx, table, columns, nil, values, conflictAction, nil)
}
// BulkInsertReturning is like BulkInsert, but supports returning values from the INSERT statement.
// In addition to the arguments of BulkInsert, it takes a list of columns to return and a function
// to scan those columns. To get the returned values, provide a function that scans them as if
// they were the selected columns of a query. See TestBulkInsert for an example.
func (db *DB) BulkInsertReturning(ctx context.Context, table string, columns []string, values []interface{}, conflictAction string, returningColumns []string, scanFunc func(*sql.Rows) error) (err error) {
defer derrors.Wrap(&err, "DB.BulkInsertReturning(ctx, %q, %v, [%d values], %q, %v, scanFunc)",
table, columns, len(values), conflictAction, returningColumns)
if returningColumns == nil || scanFunc == nil {
return errors.New("need returningColumns and scan function")
}
return db.bulkInsert(ctx, table, columns, returningColumns, values, conflictAction, scanFunc)
}
// BulkUpsert is like BulkInsert, but instead of a conflict action, a list of
// conflicting columns is provided. An "ON CONFLICT (conflict_columns) DO
// UPDATE" clause is added to the statement, with assignments "c=excluded.c" for
// every column c.
func (db *DB) BulkUpsert(ctx context.Context, table string, columns []string, values []interface{}, conflictColumns []string) error {
conflictAction := buildUpsertConflictAction(columns, conflictColumns)
return db.BulkInsert(ctx, table, columns, values, conflictAction)
}
// BulkUpsertReturning is like BulkInsertReturning, but performs an upsert like BulkUpsert.
func (db *DB) BulkUpsertReturning(ctx context.Context, table string, columns []string, values []interface{}, conflictColumns, returningColumns []string, scanFunc func(*sql.Rows) error) error {
conflictAction := buildUpsertConflictAction(columns, conflictColumns)
return db.BulkInsertReturning(ctx, table, columns, values, conflictAction, returningColumns, scanFunc)
}
func (db *DB) bulkInsert(ctx context.Context, table string, columns, returningColumns []string, values []interface{}, conflictAction string, scanFunc func(*sql.Rows) error) (err error) {
if remainder := len(values) % len(columns); remainder != 0 {
return fmt.Errorf("modulus of len(values) and len(columns) must be 0: got %d", remainder)
}
// Postgres supports up to 65535 parameters, but stop well before that
// so we don't construct humongous queries.
const maxParameters = 1000
stride := (maxParameters / len(columns)) * len(columns)
if stride == 0 {
// This is a pathological case (len(columns) > maxParameters), but we
// handle it cautiously.
return fmt.Errorf("too many columns to insert: %d", len(columns))
}
prepare := func(n int) (*sql.Stmt, error) {
return db.Prepare(ctx, buildInsertQuery(table, columns, returningColumns, n, conflictAction))
}
var stmt *sql.Stmt
for leftBound := 0; leftBound < len(values); leftBound += stride {
rightBound := leftBound + stride
if rightBound <= len(values) && stmt == nil {
stmt, err = prepare(stride)
if err != nil {
return err
}
defer stmt.Close()
} else if rightBound > len(values) {
rightBound = len(values)
stmt, err = prepare(rightBound - leftBound)
if err != nil {
return err
}
defer stmt.Close()
}
valueSlice := values[leftBound:rightBound]
var err error
if returningColumns == nil {
_, err = stmt.ExecContext(ctx, valueSlice...)
} else {
var rows *sql.Rows
rows, err = stmt.QueryContext(ctx, valueSlice...)
if err != nil {
return err
}
err = processRows(rows, scanFunc)
}
if err != nil {
return fmt.Errorf("running bulk insert query, values[%d:%d]): %w", leftBound, rightBound, err)
}
}
return nil
}
// buildInsertQuery builds an multi-value insert query, following the format:
// INSERT TO <table> (<columns>) VALUES (<placeholders-for-each-item-in-values>) <conflictAction>
// If returningColumns is not empty, it appends a RETURNING clause to the query.
//
// When calling buildInsertQuery, it must be true that nvalues % len(columns) == 0.
func buildInsertQuery(table string, columns, returningColumns []string, nvalues int, conflictAction string) string {
var b strings.Builder
fmt.Fprintf(&b, "INSERT INTO %s", table)
fmt.Fprintf(&b, "(%s) VALUES", strings.Join(columns, ", "))
var placeholders []string
for i := 1; i <= nvalues; i++ {
// Construct the full query by adding placeholders for each
// set of values that we want to insert.
placeholders = append(placeholders, fmt.Sprintf("$%d", i))
if i%len(columns) != 0 {
continue
}
// When the end of a set is reached, write it to the query
// builder and reset placeholders.
fmt.Fprintf(&b, "(%s)", strings.Join(placeholders, ", "))
placeholders = nil
// Do not add a comma delimiter after the last set of values.
if i == nvalues {
break
}
b.WriteString(", ")
}
if conflictAction != "" {
b.WriteString(" " + conflictAction)
}
if len(returningColumns) > 0 {
fmt.Fprintf(&b, " RETURNING %s", strings.Join(returningColumns, ", "))
}
return b.String()
}
func buildUpsertConflictAction(columns, conflictColumns []string) string {
var sets []string
for _, c := range columns {
sets = append(sets, fmt.Sprintf("%s=excluded.%[1]s", c))
}
return fmt.Sprintf("ON CONFLICT (%s) DO UPDATE SET %s",
strings.Join(conflictColumns, ", "),
strings.Join(sets, ", "))
}
// maxBulkUpdateArrayLen is the maximum size of an array that BulkUpdate will send to
// Postgres. (Postgres has no size limit on arrays, but we want to keep the statements
// to a reasonable size.)
// It is a variable for testing.
var maxBulkUpdateArrayLen = 10000
// BulkUpdate executes multiple UPDATE statements in a transaction.
//
// Columns must contain the names of some of table's columns. The first is treated
// as a key; that is, the values to update are matched with existing rows by comparing
// the values of the first column.
//
// Types holds the database type of each column. For example,
// []string{"INT", "TEXT"}
//
// Values contains one slice of values per column. (Note that this is unlike BulkInsert, which
// takes a single slice of interleaved values.)
func (db *DB) BulkUpdate(ctx context.Context, table string, columns, types []string, values [][]interface{}) (err error) {
defer derrors.Wrap(&err, "DB.BulkUpdate(ctx, tx, %q, %v, [%d values])",
table, columns, len(values))
if len(columns) < 2 {
return errors.New("need at least two columns")
}
if len(columns) != len(values) {
return errors.New("len(values) != len(columns)")
}
nRows := len(values[0])
for _, v := range values[1:] {
if len(v) != nRows {
return errors.New("all values slices must be the same length")
}
}
query := buildBulkUpdateQuery(table, columns, types)
for left := 0; left < nRows; left += maxBulkUpdateArrayLen {
right := left + maxBulkUpdateArrayLen
if right > nRows {
right = nRows
}
var args []interface{}
for _, vs := range values {
args = append(args, pq.Array(vs[left:right]))
}
if _, err := db.Exec(ctx, query, args...); err != nil {
return fmt.Errorf("db.Exec(%q, values[%d:%d]): %w", query, left, right, err)
}
}
return nil
}
func buildBulkUpdateQuery(table string, columns, types []string) string {
var sets, unnests []string
// Build "c = data.c" for each non-key column.
for _, c := range columns[1:] {
sets = append(sets, fmt.Sprintf("%s = data.%[1]s", c))
}
// Build "UNNEST($1::TYPE) AS c" for each column.
// We need the type, or Postgres complains that UNNEST is not unique.
for i, c := range columns {
unnests = append(unnests, fmt.Sprintf("UNNEST($%d::%s[]) AS %s", i+1, types[i], c))
}
return fmt.Sprintf(`
UPDATE %[1]s
SET %[2]s
FROM (SELECT %[3]s) AS data
WHERE %[1]s.%[4]s = data.%[4]s`,
table, // 1
strings.Join(sets, ", "), // 2
strings.Join(unnests, ", "), // 3
columns[0], // 4
)
}
// emptyStringScanner wraps the functionality of sql.NullString to just write
// an empty string if the value is NULL.
type emptyStringScanner struct {
ptr *string
}
func (e emptyStringScanner) Scan(value interface{}) error {
var ns sql.NullString
if err := ns.Scan(value); err != nil {
return err
}
*e.ptr = ns.String
return nil
}
// NullIsEmpty returns a sql.Scanner that writes the empty string to s if the
// sql.Value is NULL.
func NullIsEmpty(s *string) sql.Scanner {
return emptyStringScanner{s}
}
internal/database: gather more info about serialization retry
A serialization failure isn't being retried for reasons I
don't understand, so added logging to gather more information.
Change-Id: Ifa2bc12d4caccde3d48520706692ad8f7bc75cd5
Reviewed-on: https://go-review.googlesource.com/c/pkgsite/+/294951
Trust: Jonathan Amsterdam <e3d3698b2ccd5955e4adf250d0785062d0f9018b@google.com>
Run-TryBot: Jonathan Amsterdam <e3d3698b2ccd5955e4adf250d0785062d0f9018b@google.com>
TryBot-Result: kokoro <2ac7b1f3fa578934c95181d4272be0d3bca00121@google.com>
Reviewed-by: Jamal Carvalho <e0195770807aa8c82b0b128d9c0423b5ad035172@golang.org>
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package database adds some useful functionality to a sql.DB.
// It is independent of the database driver and the
// DB schema.
package database
import (
"context"
"database/sql"
"errors"
"fmt"
"regexp"
"strings"
"sync"
"time"
"github.com/jackc/pgconn"
"github.com/lib/pq"
"golang.org/x/pkgsite/internal/derrors"
"golang.org/x/pkgsite/internal/log"
)
// DB wraps a sql.DB. The methods it exports correspond closely to those of
// sql.DB. They enhance the original by requiring a context argument, and by
// logging the query and any resulting errors.
//
// A DB may represent a transaction. If so, its execution and query methods
// operate within the transaction.
type DB struct {
db *sql.DB
instanceID string
tx *sql.Tx
mu sync.Mutex
maxRetries int // max times a single transaction was retried
}
// Open creates a new DB for the given connection string.
func Open(driverName, dbinfo, instanceID string) (_ *DB, err error) {
defer derrors.Wrap(&err, "database.Open(%q, %q)",
driverName, redactPassword(dbinfo))
db, err := sql.Open(driverName, dbinfo)
if err != nil {
return nil, err
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
if err := db.PingContext(ctx); err != nil {
return nil, err
}
return New(db, instanceID), nil
}
// New creates a new DB from a sql.DB.
func New(db *sql.DB, instanceID string) *DB {
return &DB{db: db, instanceID: instanceID}
}
func (db *DB) Ping() error {
return db.db.Ping()
}
func (db *DB) InTransaction() bool {
return db.tx != nil
}
var passwordRegexp = regexp.MustCompile(`password=\S+`)
func redactPassword(dbinfo string) string {
return passwordRegexp.ReplaceAllLiteralString(dbinfo, "password=REDACTED")
}
// Close closes the database connection.
func (db *DB) Close() error {
return db.db.Close()
}
// Exec executes a SQL statement and returns the number of rows it affected.
func (db *DB) Exec(ctx context.Context, query string, args ...interface{}) (_ int64, err error) {
defer logQuery(ctx, query, args, db.instanceID)(&err)
res, err := db.execResult(ctx, query, args...)
if err != nil {
return 0, err
}
n, err := res.RowsAffected()
if err != nil {
return 0, fmt.Errorf("RowsAffected: %v", err)
}
return n, nil
}
// execResult executes a SQL statement and returns a sql.Result.
func (db *DB) execResult(ctx context.Context, query string, args ...interface{}) (res sql.Result, err error) {
if db.tx != nil {
return db.tx.ExecContext(ctx, query, args...)
}
return db.db.ExecContext(ctx, query, args...)
}
// Query runs the DB query.
func (db *DB) Query(ctx context.Context, query string, args ...interface{}) (_ *sql.Rows, err error) {
defer logQuery(ctx, query, args, db.instanceID)(&err)
if db.tx != nil {
return db.tx.QueryContext(ctx, query, args...)
}
return db.db.QueryContext(ctx, query, args...)
}
// QueryRow runs the query and returns a single row.
func (db *DB) QueryRow(ctx context.Context, query string, args ...interface{}) *sql.Row {
defer logQuery(ctx, query, args, db.instanceID)(nil)
start := time.Now()
defer func() {
if ctx.Err() != nil {
d, _ := ctx.Deadline()
msg := fmt.Sprintf("args=%v; elapsed=%q, start=%q, deadline=%q", args, time.Since(start), start, d)
log.Errorf(ctx, "QueryRow context error: %v "+msg, ctx.Err())
}
}()
if db.tx != nil {
return db.tx.QueryRowContext(ctx, query, args...)
}
return db.db.QueryRowContext(ctx, query, args...)
}
func (db *DB) Prepare(ctx context.Context, query string) (*sql.Stmt, error) {
defer logQuery(ctx, "preparing "+query, nil, db.instanceID)
if db.tx != nil {
return db.tx.PrepareContext(ctx, query)
}
return db.db.PrepareContext(ctx, query)
}
// RunQuery executes query, then calls f on each row.
func (db *DB) RunQuery(ctx context.Context, query string, f func(*sql.Rows) error, params ...interface{}) error {
rows, err := db.Query(ctx, query, params...)
if err != nil {
return err
}
return processRows(rows, f)
}
func processRows(rows *sql.Rows, f func(*sql.Rows) error) error {
defer rows.Close()
for rows.Next() {
if err := f(rows); err != nil {
return err
}
}
return rows.Err()
}
// Transact executes the given function in the context of a SQL transaction at
// the given isolation level, rolling back the transaction if the function
// panics or returns an error.
//
// The given function is called with a DB that is associated with a transaction.
// The DB should be used only inside the function; if it is used to access the
// database after the function returns, the calls will return errors.
//
// If the isolation level requires it, Transact will retry the transaction upon
// serialization failure, so txFunc may be called more than once.
func (db *DB) Transact(ctx context.Context, iso sql.IsolationLevel, txFunc func(*DB) error) (err error) {
defer derrors.Wrap(&err, "Transact(%s)", iso)
// For the levels which require retry, see
// https://www.postgresql.org/docs/11/transaction-iso.html.
opts := &sql.TxOptions{Isolation: iso}
if iso == sql.LevelRepeatableRead || iso == sql.LevelSerializable {
return db.transactWithRetry(ctx, opts, txFunc)
}
return db.transact(ctx, opts, txFunc)
}
// serializationFailureCode is the Postgres error code returned when a serializable
// transaction fails because it would violate serializability.
// See https://www.postgresql.org/docs/current/errcodes-appendix.html.
const serializationFailureCode = "40001"
func (db *DB) transactWithRetry(ctx context.Context, opts *sql.TxOptions, txFunc func(*DB) error) (err error) {
defer derrors.Wrap(&err, "transactWithRetry(%v)", opts)
// Retry on serialization failure, up to some max.
// See https://www.postgresql.org/docs/11/transaction-iso.html.
const maxRetries = 30
for i := 0; i <= maxRetries; i++ {
err = db.transact(ctx, opts, txFunc)
if isSerializationFailure(err) {
db.mu.Lock()
if i > db.maxRetries {
db.maxRetries = i
}
db.mu.Unlock()
continue
}
if err != nil {
log.Debugf(ctx, "transactWithRetry: error type %T: %[1]v", err)
if strings.Contains(err.Error(), serializationFailureCode) {
return fmt.Errorf("error text has %q but not recognized as serialization failure: type %T, err %v",
serializationFailureCode, err, err)
}
}
if i > 0 {
log.Debugf(ctx, "retried serializable transaction %d time(s)", i)
}
return err
}
return fmt.Errorf("reached max number of tries due to serialization failure (%d)", maxRetries)
}
func isSerializationFailure(err error) bool {
// The underlying error type depends on the driver. Try both pq and pgx types.
var perr *pq.Error
if errors.As(err, &perr) && perr.Code == serializationFailureCode {
return true
}
var gerr *pgconn.PgError
if errors.As(err, &gerr) && gerr.Code == serializationFailureCode {
return true
}
return false
}
func (db *DB) transact(ctx context.Context, opts *sql.TxOptions, txFunc func(*DB) error) (err error) {
if db.InTransaction() {
return errors.New("a DB Transact function was called on a DB already in a transaction")
}
tx, err := db.db.BeginTx(ctx, opts)
if err != nil {
return fmt.Errorf("db.BeginTx(): %w", err)
}
defer func() {
if p := recover(); p != nil {
tx.Rollback()
panic(p)
} else if err != nil {
tx.Rollback()
} else {
if txErr := tx.Commit(); txErr != nil {
err = fmt.Errorf("tx.Commit(): %w", txErr)
}
}
}()
dbtx := New(db.db, db.instanceID)
dbtx.tx = tx
defer dbtx.logTransaction(ctx, opts)(&err)
if err := txFunc(dbtx); err != nil {
return fmt.Errorf("txFunc(tx): %w", err)
}
return nil
}
// MaxRetries returns the maximum number of times thata serializable transaction was retried.
func (db *DB) MaxRetries() int {
db.mu.Lock()
defer db.mu.Unlock()
return db.maxRetries
}
const OnConflictDoNothing = "ON CONFLICT DO NOTHING"
// BulkInsert constructs and executes a multi-value insert statement. The
// query is constructed using the format:
// INSERT INTO <table> (<columns>) VALUES (<placeholders-for-each-item-in-values>)
// If conflictAction is not empty, it is appended to the statement.
//
// The query is executed using a PREPARE statement with the provided values.
func (db *DB) BulkInsert(ctx context.Context, table string, columns []string, values []interface{}, conflictAction string) (err error) {
defer derrors.Wrap(&err, "DB.BulkInsert(ctx, %q, %v, [%d values], %q)",
table, columns, len(values), conflictAction)
return db.bulkInsert(ctx, table, columns, nil, values, conflictAction, nil)
}
// BulkInsertReturning is like BulkInsert, but supports returning values from the INSERT statement.
// In addition to the arguments of BulkInsert, it takes a list of columns to return and a function
// to scan those columns. To get the returned values, provide a function that scans them as if
// they were the selected columns of a query. See TestBulkInsert for an example.
func (db *DB) BulkInsertReturning(ctx context.Context, table string, columns []string, values []interface{}, conflictAction string, returningColumns []string, scanFunc func(*sql.Rows) error) (err error) {
defer derrors.Wrap(&err, "DB.BulkInsertReturning(ctx, %q, %v, [%d values], %q, %v, scanFunc)",
table, columns, len(values), conflictAction, returningColumns)
if returningColumns == nil || scanFunc == nil {
return errors.New("need returningColumns and scan function")
}
return db.bulkInsert(ctx, table, columns, returningColumns, values, conflictAction, scanFunc)
}
// BulkUpsert is like BulkInsert, but instead of a conflict action, a list of
// conflicting columns is provided. An "ON CONFLICT (conflict_columns) DO
// UPDATE" clause is added to the statement, with assignments "c=excluded.c" for
// every column c.
func (db *DB) BulkUpsert(ctx context.Context, table string, columns []string, values []interface{}, conflictColumns []string) error {
conflictAction := buildUpsertConflictAction(columns, conflictColumns)
return db.BulkInsert(ctx, table, columns, values, conflictAction)
}
// BulkUpsertReturning is like BulkInsertReturning, but performs an upsert like BulkUpsert.
func (db *DB) BulkUpsertReturning(ctx context.Context, table string, columns []string, values []interface{}, conflictColumns, returningColumns []string, scanFunc func(*sql.Rows) error) error {
conflictAction := buildUpsertConflictAction(columns, conflictColumns)
return db.BulkInsertReturning(ctx, table, columns, values, conflictAction, returningColumns, scanFunc)
}
func (db *DB) bulkInsert(ctx context.Context, table string, columns, returningColumns []string, values []interface{}, conflictAction string, scanFunc func(*sql.Rows) error) (err error) {
if remainder := len(values) % len(columns); remainder != 0 {
return fmt.Errorf("modulus of len(values) and len(columns) must be 0: got %d", remainder)
}
// Postgres supports up to 65535 parameters, but stop well before that
// so we don't construct humongous queries.
const maxParameters = 1000
stride := (maxParameters / len(columns)) * len(columns)
if stride == 0 {
// This is a pathological case (len(columns) > maxParameters), but we
// handle it cautiously.
return fmt.Errorf("too many columns to insert: %d", len(columns))
}
prepare := func(n int) (*sql.Stmt, error) {
return db.Prepare(ctx, buildInsertQuery(table, columns, returningColumns, n, conflictAction))
}
var stmt *sql.Stmt
for leftBound := 0; leftBound < len(values); leftBound += stride {
rightBound := leftBound + stride
if rightBound <= len(values) && stmt == nil {
stmt, err = prepare(stride)
if err != nil {
return err
}
defer stmt.Close()
} else if rightBound > len(values) {
rightBound = len(values)
stmt, err = prepare(rightBound - leftBound)
if err != nil {
return err
}
defer stmt.Close()
}
valueSlice := values[leftBound:rightBound]
var err error
if returningColumns == nil {
_, err = stmt.ExecContext(ctx, valueSlice...)
} else {
var rows *sql.Rows
rows, err = stmt.QueryContext(ctx, valueSlice...)
if err != nil {
return err
}
err = processRows(rows, scanFunc)
}
if err != nil {
return fmt.Errorf("running bulk insert query, values[%d:%d]): %w", leftBound, rightBound, err)
}
}
return nil
}
// buildInsertQuery builds an multi-value insert query, following the format:
// INSERT TO <table> (<columns>) VALUES (<placeholders-for-each-item-in-values>) <conflictAction>
// If returningColumns is not empty, it appends a RETURNING clause to the query.
//
// When calling buildInsertQuery, it must be true that nvalues % len(columns) == 0.
func buildInsertQuery(table string, columns, returningColumns []string, nvalues int, conflictAction string) string {
var b strings.Builder
fmt.Fprintf(&b, "INSERT INTO %s", table)
fmt.Fprintf(&b, "(%s) VALUES", strings.Join(columns, ", "))
var placeholders []string
for i := 1; i <= nvalues; i++ {
// Construct the full query by adding placeholders for each
// set of values that we want to insert.
placeholders = append(placeholders, fmt.Sprintf("$%d", i))
if i%len(columns) != 0 {
continue
}
// When the end of a set is reached, write it to the query
// builder and reset placeholders.
fmt.Fprintf(&b, "(%s)", strings.Join(placeholders, ", "))
placeholders = nil
// Do not add a comma delimiter after the last set of values.
if i == nvalues {
break
}
b.WriteString(", ")
}
if conflictAction != "" {
b.WriteString(" " + conflictAction)
}
if len(returningColumns) > 0 {
fmt.Fprintf(&b, " RETURNING %s", strings.Join(returningColumns, ", "))
}
return b.String()
}
func buildUpsertConflictAction(columns, conflictColumns []string) string {
var sets []string
for _, c := range columns {
sets = append(sets, fmt.Sprintf("%s=excluded.%[1]s", c))
}
return fmt.Sprintf("ON CONFLICT (%s) DO UPDATE SET %s",
strings.Join(conflictColumns, ", "),
strings.Join(sets, ", "))
}
// maxBulkUpdateArrayLen is the maximum size of an array that BulkUpdate will send to
// Postgres. (Postgres has no size limit on arrays, but we want to keep the statements
// to a reasonable size.)
// It is a variable for testing.
var maxBulkUpdateArrayLen = 10000
// BulkUpdate executes multiple UPDATE statements in a transaction.
//
// Columns must contain the names of some of table's columns. The first is treated
// as a key; that is, the values to update are matched with existing rows by comparing
// the values of the first column.
//
// Types holds the database type of each column. For example,
// []string{"INT", "TEXT"}
//
// Values contains one slice of values per column. (Note that this is unlike BulkInsert, which
// takes a single slice of interleaved values.)
func (db *DB) BulkUpdate(ctx context.Context, table string, columns, types []string, values [][]interface{}) (err error) {
defer derrors.Wrap(&err, "DB.BulkUpdate(ctx, tx, %q, %v, [%d values])",
table, columns, len(values))
if len(columns) < 2 {
return errors.New("need at least two columns")
}
if len(columns) != len(values) {
return errors.New("len(values) != len(columns)")
}
nRows := len(values[0])
for _, v := range values[1:] {
if len(v) != nRows {
return errors.New("all values slices must be the same length")
}
}
query := buildBulkUpdateQuery(table, columns, types)
for left := 0; left < nRows; left += maxBulkUpdateArrayLen {
right := left + maxBulkUpdateArrayLen
if right > nRows {
right = nRows
}
var args []interface{}
for _, vs := range values {
args = append(args, pq.Array(vs[left:right]))
}
if _, err := db.Exec(ctx, query, args...); err != nil {
return fmt.Errorf("db.Exec(%q, values[%d:%d]): %w", query, left, right, err)
}
}
return nil
}
func buildBulkUpdateQuery(table string, columns, types []string) string {
var sets, unnests []string
// Build "c = data.c" for each non-key column.
for _, c := range columns[1:] {
sets = append(sets, fmt.Sprintf("%s = data.%[1]s", c))
}
// Build "UNNEST($1::TYPE) AS c" for each column.
// We need the type, or Postgres complains that UNNEST is not unique.
for i, c := range columns {
unnests = append(unnests, fmt.Sprintf("UNNEST($%d::%s[]) AS %s", i+1, types[i], c))
}
return fmt.Sprintf(`
UPDATE %[1]s
SET %[2]s
FROM (SELECT %[3]s) AS data
WHERE %[1]s.%[4]s = data.%[4]s`,
table, // 1
strings.Join(sets, ", "), // 2
strings.Join(unnests, ", "), // 3
columns[0], // 4
)
}
// emptyStringScanner wraps the functionality of sql.NullString to just write
// an empty string if the value is NULL.
type emptyStringScanner struct {
ptr *string
}
func (e emptyStringScanner) Scan(value interface{}) error {
var ns sql.NullString
if err := ns.Scan(value); err != nil {
return err
}
*e.ptr = ns.String
return nil
}
// NullIsEmpty returns a sql.Scanner that writes the empty string to s if the
// sql.Value is NULL.
func NullIsEmpty(s *string) sql.Scanner {
return emptyStringScanner{s}
}
|
package display
import (
"strconv"
"unicode/utf8"
runewidth "github.com/mattn/go-runewidth"
"github.com/zyedidia/micro/internal/buffer"
"github.com/zyedidia/micro/internal/config"
"github.com/zyedidia/micro/internal/screen"
"github.com/zyedidia/micro/internal/util"
"github.com/zyedidia/tcell"
)
// The BufWindow provides a way of displaying a certain section
// of a buffer
type BufWindow struct {
*View
// Buffer being shown in this window
Buf *buffer.Buffer
active bool
sline *StatusLine
gutterOffset int
drawStatus bool
}
// NewBufWindow creates a new window at a location in the screen with a width and height
func NewBufWindow(x, y, width, height int, buf *buffer.Buffer) *BufWindow {
w := new(BufWindow)
w.View = new(View)
w.X, w.Y, w.Width, w.Height, w.Buf = x, y, width, height, buf
w.active = true
w.sline = NewStatusLine(w)
return w
}
func (w *BufWindow) SetBuffer(b *buffer.Buffer) {
w.Buf = b
}
func (w *BufWindow) GetView() *View {
return w.View
}
func (w *BufWindow) SetView(view *View) {
w.View = view
}
func (w *BufWindow) Resize(width, height int) {
w.Width, w.Height = width, height
w.Relocate()
}
func (w *BufWindow) SetActive(b bool) {
w.active = b
}
func (w *BufWindow) IsActive() bool {
return w.active
}
func (w *BufWindow) getStartInfo(n, lineN int) ([]byte, int, int, *tcell.Style) {
tabsize := util.IntOpt(w.Buf.Settings["tabsize"])
width := 0
bloc := buffer.Loc{0, lineN}
b := w.Buf.LineBytes(lineN)
curStyle := config.DefStyle
var s *tcell.Style
for len(b) > 0 {
r, size := utf8.DecodeRune(b)
curStyle, found := w.getStyle(curStyle, bloc, r)
if found {
s = &curStyle
}
w := 0
switch r {
case '\t':
ts := tabsize - (width % tabsize)
w = ts
default:
w = runewidth.RuneWidth(r)
}
if width+w > n {
return b, n - width, bloc.X, s
}
width += w
b = b[size:]
bloc.X++
}
return b, n - width, bloc.X, s
}
// Clear resets all cells in this window to the default style
func (w *BufWindow) Clear() {
for y := 0; y < w.Height; y++ {
for x := 0; x < w.Width; x++ {
screen.SetContent(w.X+x, w.Y+y, ' ', nil, config.DefStyle)
}
}
}
// Bottomline returns the line number of the lowest line in the view
// You might think that this is obviously just v.StartLine + v.Height
// but if softwrap is enabled things get complicated since one buffer
// line can take up multiple lines in the view
func (w *BufWindow) Bottomline() int {
if !w.Buf.Settings["softwrap"].(bool) {
h := w.StartLine + w.Height - 1
if w.drawStatus {
h--
}
return h
}
l := w.LocFromVisual(buffer.Loc{0, w.Y + w.Height})
return l.Y
}
// Relocate moves the view window so that the cursor is in view
// This is useful if the user has scrolled far away, and then starts typing
// Returns true if the window location is moved
func (w *BufWindow) Relocate() bool {
b := w.Buf
// how many buffer lines are in the view
height := w.Bottomline() + 1 - w.StartLine
h := w.Height
if w.drawStatus {
h--
}
if b.LinesNum() <= h {
height = w.Height
}
ret := false
activeC := w.Buf.GetActiveCursor()
cy := activeC.Y
scrollmargin := int(b.Settings["scrollmargin"].(float64))
if cy < w.StartLine+scrollmargin && cy > scrollmargin-1 {
w.StartLine = cy - scrollmargin
ret = true
} else if cy < w.StartLine {
w.StartLine = cy
ret = true
}
if cy > w.StartLine+height-1-scrollmargin && cy < b.LinesNum()-scrollmargin {
w.StartLine = cy - height + 1 + scrollmargin
ret = true
} else if cy >= b.LinesNum()-scrollmargin && cy >= height {
w.StartLine = b.LinesNum() - height
ret = true
}
// horizontal relocation (scrolling)
if !b.Settings["softwrap"].(bool) {
cx := activeC.GetVisualX()
if cx < w.StartCol {
w.StartCol = cx
ret = true
}
if cx+w.gutterOffset+1 > w.StartCol+w.Width {
w.StartCol = cx - w.Width + w.gutterOffset + 1
ret = true
}
}
return ret
}
// LocFromVisual takes a visual location (x and y position) and returns the
// position in the buffer corresponding to the visual location
// Computing the buffer location requires essentially drawing the entire screen
// to account for complications like softwrap, wide characters, and horizontal scrolling
// If the requested position does not correspond to a buffer location it returns
// the nearest position
func (w *BufWindow) LocFromVisual(svloc buffer.Loc) buffer.Loc {
b := w.Buf
hasMessage := len(b.Messages) > 0
bufHeight := w.Height
if w.drawStatus {
bufHeight--
}
// We need to know the string length of the largest line number
// so we can pad appropriately when displaying line numbers
maxLineNumLength := len(strconv.Itoa(b.LinesNum()))
tabsize := int(b.Settings["tabsize"].(float64))
softwrap := b.Settings["softwrap"].(bool)
// this represents the current draw position
// within the current window
vloc := buffer.Loc{X: 0, Y: 0}
// this represents the current draw position in the buffer (char positions)
bloc := buffer.Loc{X: -1, Y: w.StartLine}
for vloc.Y = 0; vloc.Y < bufHeight; vloc.Y++ {
vloc.X = 0
if hasMessage {
vloc.X += 2
}
if b.Settings["ruler"].(bool) {
vloc.X += maxLineNumLength + 1
}
line := b.LineBytes(bloc.Y)
line, nColsBeforeStart, bslice := util.SliceVisualEnd(line, w.StartCol, tabsize)
bloc.X = bslice
draw := func() {
if nColsBeforeStart <= 0 {
vloc.X++
}
nColsBeforeStart--
}
totalwidth := w.StartCol - nColsBeforeStart
if svloc.X <= vloc.X+w.X && vloc.Y+w.Y == svloc.Y {
return bloc
}
for len(line) > 0 {
if vloc.X+w.X == svloc.X && vloc.Y+w.Y == svloc.Y {
return bloc
}
r, size := utf8.DecodeRune(line)
draw()
width := 0
switch r {
case '\t':
ts := tabsize - (totalwidth % tabsize)
width = ts
default:
width = runewidth.RuneWidth(r)
}
// Draw any extra characters either spaces for tabs or @ for incomplete wide runes
if width > 1 {
for i := 1; i < width; i++ {
if vloc.X+w.X == svloc.X && vloc.Y+w.Y == svloc.Y {
return bloc
}
draw()
}
}
bloc.X++
line = line[size:]
totalwidth += width
// If we reach the end of the window then we either stop or we wrap for softwrap
if vloc.X >= w.Width {
if !softwrap {
break
} else {
vloc.Y++
if vloc.Y >= bufHeight {
break
}
vloc.X = 0
// This will draw an empty line number because the current line is wrapped
vloc.X += maxLineNumLength + 1
}
}
}
if vloc.Y+w.Y == svloc.Y {
return bloc
}
if bloc.Y+1 >= b.LinesNum() || vloc.Y+1 >= bufHeight {
return bloc
}
bloc.X = w.StartCol
bloc.Y++
}
return buffer.Loc{}
}
func (w *BufWindow) drawGutter(vloc *buffer.Loc, bloc *buffer.Loc) {
char := ' '
s := config.DefStyle
for _, m := range w.Buf.Messages {
if m.Start.Y == bloc.Y || m.End.Y == bloc.Y {
s = m.Style()
char = '>'
break
}
}
screen.SetContent(w.X+vloc.X, w.Y+vloc.Y, char, nil, s)
vloc.X++
screen.SetContent(w.X+vloc.X, w.Y+vloc.Y, char, nil, s)
vloc.X++
}
func (w *BufWindow) drawLineNum(lineNumStyle tcell.Style, softwrapped bool, maxLineNumLength int, vloc *buffer.Loc, bloc *buffer.Loc) {
lineNum := strconv.Itoa(bloc.Y + 1)
// Write the spaces before the line number if necessary
for i := 0; i < maxLineNumLength-len(lineNum); i++ {
screen.SetContent(w.X+vloc.X, w.Y+vloc.Y, ' ', nil, lineNumStyle)
vloc.X++
}
// Write the actual line number
for _, ch := range lineNum {
if softwrapped {
screen.SetContent(w.X+vloc.X, w.Y+vloc.Y, ' ', nil, lineNumStyle)
} else {
screen.SetContent(w.X+vloc.X, w.Y+vloc.Y, ch, nil, lineNumStyle)
}
vloc.X++
}
// Write the extra space
screen.SetContent(w.X+vloc.X, w.Y+vloc.Y, ' ', nil, lineNumStyle)
vloc.X++
}
// getStyle returns the highlight style for the given character position
// If there is no change to the current highlight style it just returns that
func (w *BufWindow) getStyle(style tcell.Style, bloc buffer.Loc, r rune) (tcell.Style, bool) {
if group, ok := w.Buf.Match(bloc.Y)[bloc.X]; ok {
s := config.GetColor(group.String())
return s, true
}
return style, false
}
func (w *BufWindow) showCursor(x, y int, main bool) {
if w.active {
if main {
screen.ShowCursor(x, y)
} else {
screen.ShowFakeCursorMulti(x, y)
}
}
}
// displayBuffer draws the buffer being shown in this window on the screen.Screen
func (w *BufWindow) displayBuffer() {
b := w.Buf
hasMessage := len(b.Messages) > 0
bufHeight := w.Height
if w.drawStatus {
bufHeight--
}
bufWidth := w.Width
if w.Buf.Settings["scrollbar"].(bool) && w.Buf.LinesNum() > w.Height {
bufWidth--
}
if b.Settings["syntax"].(bool) && b.SyntaxDef != nil {
for _, c := range b.GetCursors() {
// rehighlight starting from where the cursor is
start := c.Y
if start > 0 && b.Rehighlight(start-1) {
b.Highlighter.ReHighlightLine(b, start-1)
b.SetRehighlight(start-1, false)
}
b.Highlighter.ReHighlightStates(b, start)
b.Highlighter.HighlightMatches(b, w.StartLine, w.StartLine+bufHeight)
}
}
var matchingBraces []buffer.Loc
// bracePairs is defined in buffer.go
if b.Settings["matchbrace"].(bool) {
for _, bp := range buffer.BracePairs {
for _, c := range b.GetCursors() {
if c.HasSelection() {
continue
}
curX := c.X
curLoc := c.Loc
r := c.RuneUnder(curX)
rl := c.RuneUnder(curX - 1)
if r == bp[0] || r == bp[1] || rl == bp[0] || rl == bp[1] {
mb, left := b.FindMatchingBrace(bp, curLoc)
matchingBraces = append(matchingBraces, mb)
if !left {
matchingBraces = append(matchingBraces, curLoc)
} else {
matchingBraces = append(matchingBraces, curLoc.Move(-1, b))
}
}
}
}
}
lineNumStyle := config.DefStyle
if style, ok := config.Colorscheme["line-number"]; ok {
lineNumStyle = style
}
curNumStyle := config.DefStyle
if style, ok := config.Colorscheme["current-line-number"]; ok {
curNumStyle = style
}
// We need to know the string length of the largest line number
// so we can pad appropriately when displaying line numbers
maxLineNumLength := len(strconv.Itoa(b.LinesNum()))
softwrap := b.Settings["softwrap"].(bool)
tabsize := util.IntOpt(b.Settings["tabsize"])
colorcolumn := util.IntOpt(b.Settings["colorcolumn"])
// this represents the current draw position
// within the current window
vloc := buffer.Loc{X: 0, Y: 0}
// this represents the current draw position in the buffer (char positions)
bloc := buffer.Loc{X: -1, Y: w.StartLine}
cursors := b.GetCursors()
curStyle := config.DefStyle
for vloc.Y = 0; vloc.Y < bufHeight; vloc.Y++ {
vloc.X = 0
if hasMessage {
w.drawGutter(&vloc, &bloc)
}
if b.Settings["ruler"].(bool) {
s := lineNumStyle
for _, c := range cursors {
if bloc.Y == c.Y && w.active {
s = curNumStyle
break
}
}
w.drawLineNum(s, false, maxLineNumLength, &vloc, &bloc)
}
w.gutterOffset = vloc.X
line, nColsBeforeStart, bslice, startStyle := w.getStartInfo(w.StartCol, bloc.Y)
if startStyle != nil {
curStyle = *startStyle
}
bloc.X = bslice
draw := func(r rune, style tcell.Style, showcursor bool) {
if nColsBeforeStart <= 0 {
for _, c := range cursors {
if c.HasSelection() &&
(bloc.GreaterEqual(c.CurSelection[0]) && bloc.LessThan(c.CurSelection[1]) ||
bloc.LessThan(c.CurSelection[0]) && bloc.GreaterEqual(c.CurSelection[1])) {
// The current character is selected
style = config.DefStyle.Reverse(true)
if s, ok := config.Colorscheme["selection"]; ok {
style = s
}
}
if b.Settings["cursorline"].(bool) && w.active &&
!c.HasSelection() && c.Y == bloc.Y {
if s, ok := config.Colorscheme["cursor-line"]; ok {
fg, _, _ := s.Decompose()
style = style.Background(fg)
}
}
}
for _, m := range b.Messages {
if bloc.GreaterEqual(m.Start) && bloc.LessThan(m.End) ||
bloc.LessThan(m.End) && bloc.GreaterEqual(m.Start) {
style = style.Underline(true)
break
}
}
if r == '\t' {
indentrunes := []rune(b.Settings["indentchar"].(string))
// if empty indentchar settings, use space
if indentrunes == nil || len(indentrunes) == 0 {
indentrunes = []rune{' '}
}
r = indentrunes[0]
if s, ok := config.Colorscheme["indent-char"]; ok && r != ' ' {
fg, _, _ := s.Decompose()
style = style.Foreground(fg)
}
}
if s, ok := config.Colorscheme["color-column"]; ok {
if colorcolumn != 0 && vloc.X-w.gutterOffset == colorcolumn {
fg, _, _ := s.Decompose()
style = style.Background(fg)
}
}
for _, mb := range matchingBraces {
if mb.X == bloc.X && mb.Y == bloc.Y {
style = style.Underline(true)
}
}
screen.SetContent(w.X+vloc.X, w.Y+vloc.Y, r, nil, style)
if showcursor {
for _, c := range cursors {
if c.X == bloc.X && c.Y == bloc.Y && !c.HasSelection() {
w.showCursor(w.X+vloc.X, w.Y+vloc.Y, c.Num == 0)
}
}
}
vloc.X++
}
nColsBeforeStart--
}
totalwidth := w.StartCol - nColsBeforeStart
for len(line) > 0 {
r, size := utf8.DecodeRune(line)
curStyle, _ = w.getStyle(curStyle, bloc, r)
draw(r, curStyle, true)
width := 0
char := ' '
switch r {
case '\t':
ts := tabsize - (totalwidth % tabsize)
width = ts
default:
width = runewidth.RuneWidth(r)
char = '@'
}
// Draw any extra characters either spaces for tabs or @ for incomplete wide runes
if width > 1 {
for i := 1; i < width; i++ {
draw(char, curStyle, false)
}
}
bloc.X++
line = line[size:]
totalwidth += width
// If we reach the end of the window then we either stop or we wrap for softwrap
if vloc.X >= bufWidth {
if !softwrap {
break
} else {
vloc.Y++
if vloc.Y >= bufHeight {
break
}
vloc.X = 0
// This will draw an empty line number because the current line is wrapped
w.drawLineNum(lineNumStyle, true, maxLineNumLength, &vloc, &bloc)
}
}
}
style := config.DefStyle
for _, c := range cursors {
if b.Settings["cursorline"].(bool) && w.active &&
!c.HasSelection() && c.Y == bloc.Y {
if s, ok := config.Colorscheme["cursor-line"]; ok {
fg, _, _ := s.Decompose()
style = style.Background(fg)
}
}
}
for i := vloc.X; i < bufWidth; i++ {
curStyle := style
if s, ok := config.Colorscheme["color-column"]; ok {
if colorcolumn != 0 && i-w.gutterOffset == colorcolumn {
fg, _, _ := s.Decompose()
curStyle = style.Background(fg)
}
}
screen.SetContent(i+w.X, vloc.Y+w.Y, ' ', nil, curStyle)
}
draw(' ', curStyle, true)
bloc.X = w.StartCol
bloc.Y++
if bloc.Y >= b.LinesNum() {
break
}
}
}
func (w *BufWindow) displayStatusLine() {
_, h := screen.Screen.Size()
infoY := h
if config.GetGlobalOption("infobar").(bool) {
infoY--
}
if w.Buf.Settings["statusline"].(bool) {
w.drawStatus = true
w.sline.Display()
} else if w.Y+w.Height != infoY {
w.drawStatus = true
for x := w.X; x < w.X+w.Width; x++ {
screen.SetContent(x, w.Y+w.Height-1, '-', nil, config.DefStyle.Reverse(true))
}
} else {
w.drawStatus = false
}
}
func (w *BufWindow) displayScrollBar() {
if w.Buf.Settings["scrollbar"].(bool) && w.Buf.LinesNum() > w.Height {
scrollX := w.X + w.Width - 1
bufHeight := w.Height
if w.drawStatus {
bufHeight--
}
barsize := int(float64(w.Height) / float64(w.Buf.LinesNum()) * float64(w.Height))
if barsize < 1 {
barsize = 1
}
barstart := w.Y + int(float64(w.StartLine)/float64(w.Buf.LinesNum())*float64(w.Height))
for y := barstart; y < util.Min(barstart+barsize, w.Y+bufHeight); y++ {
screen.SetContent(scrollX, y, '|', nil, config.DefStyle.Reverse(true))
}
}
}
// Display displays the buffer and the statusline
func (w *BufWindow) Display() {
w.displayStatusLine()
w.displayScrollBar()
w.displayBuffer()
}
Minor view fix
package display
import (
"strconv"
"unicode/utf8"
runewidth "github.com/mattn/go-runewidth"
"github.com/zyedidia/micro/internal/buffer"
"github.com/zyedidia/micro/internal/config"
"github.com/zyedidia/micro/internal/screen"
"github.com/zyedidia/micro/internal/util"
"github.com/zyedidia/tcell"
)
// The BufWindow provides a way of displaying a certain section
// of a buffer
type BufWindow struct {
*View
// Buffer being shown in this window
Buf *buffer.Buffer
active bool
sline *StatusLine
gutterOffset int
drawStatus bool
}
// NewBufWindow creates a new window at a location in the screen with a width and height
func NewBufWindow(x, y, width, height int, buf *buffer.Buffer) *BufWindow {
w := new(BufWindow)
w.View = new(View)
w.X, w.Y, w.Width, w.Height, w.Buf = x, y, width, height, buf
w.active = true
w.sline = NewStatusLine(w)
return w
}
func (w *BufWindow) SetBuffer(b *buffer.Buffer) {
w.Buf = b
}
func (w *BufWindow) GetView() *View {
return w.View
}
func (w *BufWindow) SetView(view *View) {
w.View = view
}
func (w *BufWindow) Resize(width, height int) {
w.Width, w.Height = width, height
w.Relocate()
}
func (w *BufWindow) SetActive(b bool) {
w.active = b
}
func (w *BufWindow) IsActive() bool {
return w.active
}
func (w *BufWindow) getStartInfo(n, lineN int) ([]byte, int, int, *tcell.Style) {
tabsize := util.IntOpt(w.Buf.Settings["tabsize"])
width := 0
bloc := buffer.Loc{0, lineN}
b := w.Buf.LineBytes(lineN)
curStyle := config.DefStyle
var s *tcell.Style
for len(b) > 0 {
r, size := utf8.DecodeRune(b)
curStyle, found := w.getStyle(curStyle, bloc, r)
if found {
s = &curStyle
}
w := 0
switch r {
case '\t':
ts := tabsize - (width % tabsize)
w = ts
default:
w = runewidth.RuneWidth(r)
}
if width+w > n {
return b, n - width, bloc.X, s
}
width += w
b = b[size:]
bloc.X++
}
return b, n - width, bloc.X, s
}
// Clear resets all cells in this window to the default style
func (w *BufWindow) Clear() {
for y := 0; y < w.Height; y++ {
for x := 0; x < w.Width; x++ {
screen.SetContent(w.X+x, w.Y+y, ' ', nil, config.DefStyle)
}
}
}
// Bottomline returns the line number of the lowest line in the view
// You might think that this is obviously just v.StartLine + v.Height
// but if softwrap is enabled things get complicated since one buffer
// line can take up multiple lines in the view
func (w *BufWindow) Bottomline() int {
if !w.Buf.Settings["softwrap"].(bool) {
h := w.StartLine + w.Height - 1
if w.drawStatus {
h--
}
return h
}
l := w.LocFromVisual(buffer.Loc{0, w.Y + w.Height})
return l.Y
}
// Relocate moves the view window so that the cursor is in view
// This is useful if the user has scrolled far away, and then starts typing
// Returns true if the window location is moved
func (w *BufWindow) Relocate() bool {
b := w.Buf
// how many buffer lines are in the view
height := w.Bottomline() + 1 - w.StartLine
h := w.Height
if w.drawStatus {
h--
}
if b.LinesNum() <= h {
height = w.Height
}
ret := false
activeC := w.Buf.GetActiveCursor()
cy := activeC.Y
scrollmargin := int(b.Settings["scrollmargin"].(float64))
if cy < w.StartLine+scrollmargin && cy > scrollmargin-1 {
w.StartLine = cy - scrollmargin
ret = true
} else if cy < w.StartLine {
w.StartLine = cy
ret = true
}
if cy > w.StartLine+height-1-scrollmargin && cy < b.LinesNum()-scrollmargin {
w.StartLine = cy - height + 1 + scrollmargin
ret = true
} else if cy >= b.LinesNum()-scrollmargin && cy >= height {
w.StartLine = b.LinesNum() - height
ret = true
}
// horizontal relocation (scrolling)
if !b.Settings["softwrap"].(bool) {
cx := activeC.GetVisualX()
if cx < w.StartCol {
w.StartCol = cx
ret = true
}
if cx+w.gutterOffset+1 > w.StartCol+w.Width {
w.StartCol = cx - w.Width + w.gutterOffset + 1
ret = true
}
}
return ret
}
// LocFromVisual takes a visual location (x and y position) and returns the
// position in the buffer corresponding to the visual location
// Computing the buffer location requires essentially drawing the entire screen
// to account for complications like softwrap, wide characters, and horizontal scrolling
// If the requested position does not correspond to a buffer location it returns
// the nearest position
func (w *BufWindow) LocFromVisual(svloc buffer.Loc) buffer.Loc {
b := w.Buf
hasMessage := len(b.Messages) > 0
bufHeight := w.Height
if w.drawStatus {
bufHeight--
}
bufWidth := w.Width
if w.Buf.Settings["scrollbar"].(bool) && w.Buf.LinesNum() > w.Height {
bufWidth--
}
// We need to know the string length of the largest line number
// so we can pad appropriately when displaying line numbers
maxLineNumLength := len(strconv.Itoa(b.LinesNum()))
tabsize := int(b.Settings["tabsize"].(float64))
softwrap := b.Settings["softwrap"].(bool)
// this represents the current draw position
// within the current window
vloc := buffer.Loc{X: 0, Y: 0}
// this represents the current draw position in the buffer (char positions)
bloc := buffer.Loc{X: -1, Y: w.StartLine}
for vloc.Y = 0; vloc.Y < bufHeight; vloc.Y++ {
vloc.X = 0
if hasMessage {
vloc.X += 2
}
if b.Settings["ruler"].(bool) {
vloc.X += maxLineNumLength + 1
}
line := b.LineBytes(bloc.Y)
line, nColsBeforeStart, bslice := util.SliceVisualEnd(line, w.StartCol, tabsize)
bloc.X = bslice
draw := func() {
if nColsBeforeStart <= 0 {
vloc.X++
}
nColsBeforeStart--
}
totalwidth := w.StartCol - nColsBeforeStart
if svloc.X <= vloc.X+w.X && vloc.Y+w.Y == svloc.Y {
return bloc
}
for len(line) > 0 {
if vloc.X+w.X == svloc.X && vloc.Y+w.Y == svloc.Y {
return bloc
}
r, size := utf8.DecodeRune(line)
draw()
width := 0
switch r {
case '\t':
ts := tabsize - (totalwidth % tabsize)
width = ts
default:
width = runewidth.RuneWidth(r)
}
// Draw any extra characters either spaces for tabs or @ for incomplete wide runes
if width > 1 {
for i := 1; i < width; i++ {
if vloc.X+w.X == svloc.X && vloc.Y+w.Y == svloc.Y {
return bloc
}
draw()
}
}
bloc.X++
line = line[size:]
totalwidth += width
// If we reach the end of the window then we either stop or we wrap for softwrap
if vloc.X >= bufWidth {
if !softwrap {
break
} else {
vloc.Y++
if vloc.Y >= bufHeight {
break
}
vloc.X = 0
// This will draw an empty line number because the current line is wrapped
vloc.X += maxLineNumLength + 1
}
}
}
if vloc.Y+w.Y == svloc.Y {
return bloc
}
if bloc.Y+1 >= b.LinesNum() || vloc.Y+1 >= bufHeight {
return bloc
}
bloc.X = w.StartCol
bloc.Y++
}
return buffer.Loc{}
}
func (w *BufWindow) drawGutter(vloc *buffer.Loc, bloc *buffer.Loc) {
char := ' '
s := config.DefStyle
for _, m := range w.Buf.Messages {
if m.Start.Y == bloc.Y || m.End.Y == bloc.Y {
s = m.Style()
char = '>'
break
}
}
screen.SetContent(w.X+vloc.X, w.Y+vloc.Y, char, nil, s)
vloc.X++
screen.SetContent(w.X+vloc.X, w.Y+vloc.Y, char, nil, s)
vloc.X++
}
func (w *BufWindow) drawLineNum(lineNumStyle tcell.Style, softwrapped bool, maxLineNumLength int, vloc *buffer.Loc, bloc *buffer.Loc) {
lineNum := strconv.Itoa(bloc.Y + 1)
// Write the spaces before the line number if necessary
for i := 0; i < maxLineNumLength-len(lineNum); i++ {
screen.SetContent(w.X+vloc.X, w.Y+vloc.Y, ' ', nil, lineNumStyle)
vloc.X++
}
// Write the actual line number
for _, ch := range lineNum {
if softwrapped {
screen.SetContent(w.X+vloc.X, w.Y+vloc.Y, ' ', nil, lineNumStyle)
} else {
screen.SetContent(w.X+vloc.X, w.Y+vloc.Y, ch, nil, lineNumStyle)
}
vloc.X++
}
// Write the extra space
screen.SetContent(w.X+vloc.X, w.Y+vloc.Y, ' ', nil, lineNumStyle)
vloc.X++
}
// getStyle returns the highlight style for the given character position
// If there is no change to the current highlight style it just returns that
func (w *BufWindow) getStyle(style tcell.Style, bloc buffer.Loc, r rune) (tcell.Style, bool) {
if group, ok := w.Buf.Match(bloc.Y)[bloc.X]; ok {
s := config.GetColor(group.String())
return s, true
}
return style, false
}
func (w *BufWindow) showCursor(x, y int, main bool) {
if w.active {
if main {
screen.ShowCursor(x, y)
} else {
screen.ShowFakeCursorMulti(x, y)
}
}
}
// displayBuffer draws the buffer being shown in this window on the screen.Screen
func (w *BufWindow) displayBuffer() {
b := w.Buf
hasMessage := len(b.Messages) > 0
bufHeight := w.Height
if w.drawStatus {
bufHeight--
}
bufWidth := w.Width
if w.Buf.Settings["scrollbar"].(bool) && w.Buf.LinesNum() > w.Height {
bufWidth--
}
if b.Settings["syntax"].(bool) && b.SyntaxDef != nil {
for _, c := range b.GetCursors() {
// rehighlight starting from where the cursor is
start := c.Y
if start > 0 && b.Rehighlight(start-1) {
b.Highlighter.ReHighlightLine(b, start-1)
b.SetRehighlight(start-1, false)
}
b.Highlighter.ReHighlightStates(b, start)
b.Highlighter.HighlightMatches(b, w.StartLine, w.StartLine+bufHeight)
}
}
var matchingBraces []buffer.Loc
// bracePairs is defined in buffer.go
if b.Settings["matchbrace"].(bool) {
for _, bp := range buffer.BracePairs {
for _, c := range b.GetCursors() {
if c.HasSelection() {
continue
}
curX := c.X
curLoc := c.Loc
r := c.RuneUnder(curX)
rl := c.RuneUnder(curX - 1)
if r == bp[0] || r == bp[1] || rl == bp[0] || rl == bp[1] {
mb, left := b.FindMatchingBrace(bp, curLoc)
matchingBraces = append(matchingBraces, mb)
if !left {
matchingBraces = append(matchingBraces, curLoc)
} else {
matchingBraces = append(matchingBraces, curLoc.Move(-1, b))
}
}
}
}
}
lineNumStyle := config.DefStyle
if style, ok := config.Colorscheme["line-number"]; ok {
lineNumStyle = style
}
curNumStyle := config.DefStyle
if style, ok := config.Colorscheme["current-line-number"]; ok {
curNumStyle = style
}
// We need to know the string length of the largest line number
// so we can pad appropriately when displaying line numbers
maxLineNumLength := len(strconv.Itoa(b.LinesNum()))
softwrap := b.Settings["softwrap"].(bool)
tabsize := util.IntOpt(b.Settings["tabsize"])
colorcolumn := util.IntOpt(b.Settings["colorcolumn"])
// this represents the current draw position
// within the current window
vloc := buffer.Loc{X: 0, Y: 0}
// this represents the current draw position in the buffer (char positions)
bloc := buffer.Loc{X: -1, Y: w.StartLine}
cursors := b.GetCursors()
curStyle := config.DefStyle
for vloc.Y = 0; vloc.Y < bufHeight; vloc.Y++ {
vloc.X = 0
if hasMessage {
w.drawGutter(&vloc, &bloc)
}
if b.Settings["ruler"].(bool) {
s := lineNumStyle
for _, c := range cursors {
if bloc.Y == c.Y && w.active {
s = curNumStyle
break
}
}
w.drawLineNum(s, false, maxLineNumLength, &vloc, &bloc)
}
w.gutterOffset = vloc.X
line, nColsBeforeStart, bslice, startStyle := w.getStartInfo(w.StartCol, bloc.Y)
if startStyle != nil {
curStyle = *startStyle
}
bloc.X = bslice
draw := func(r rune, style tcell.Style, showcursor bool) {
if nColsBeforeStart <= 0 {
for _, c := range cursors {
if c.HasSelection() &&
(bloc.GreaterEqual(c.CurSelection[0]) && bloc.LessThan(c.CurSelection[1]) ||
bloc.LessThan(c.CurSelection[0]) && bloc.GreaterEqual(c.CurSelection[1])) {
// The current character is selected
style = config.DefStyle.Reverse(true)
if s, ok := config.Colorscheme["selection"]; ok {
style = s
}
}
if b.Settings["cursorline"].(bool) && w.active &&
!c.HasSelection() && c.Y == bloc.Y {
if s, ok := config.Colorscheme["cursor-line"]; ok {
fg, _, _ := s.Decompose()
style = style.Background(fg)
}
}
}
for _, m := range b.Messages {
if bloc.GreaterEqual(m.Start) && bloc.LessThan(m.End) ||
bloc.LessThan(m.End) && bloc.GreaterEqual(m.Start) {
style = style.Underline(true)
break
}
}
if r == '\t' {
indentrunes := []rune(b.Settings["indentchar"].(string))
// if empty indentchar settings, use space
if indentrunes == nil || len(indentrunes) == 0 {
indentrunes = []rune{' '}
}
r = indentrunes[0]
if s, ok := config.Colorscheme["indent-char"]; ok && r != ' ' {
fg, _, _ := s.Decompose()
style = style.Foreground(fg)
}
}
if s, ok := config.Colorscheme["color-column"]; ok {
if colorcolumn != 0 && vloc.X-w.gutterOffset == colorcolumn {
fg, _, _ := s.Decompose()
style = style.Background(fg)
}
}
for _, mb := range matchingBraces {
if mb.X == bloc.X && mb.Y == bloc.Y {
style = style.Underline(true)
}
}
screen.SetContent(w.X+vloc.X, w.Y+vloc.Y, r, nil, style)
if showcursor {
for _, c := range cursors {
if c.X == bloc.X && c.Y == bloc.Y && !c.HasSelection() {
w.showCursor(w.X+vloc.X, w.Y+vloc.Y, c.Num == 0)
}
}
}
vloc.X++
}
nColsBeforeStart--
}
totalwidth := w.StartCol - nColsBeforeStart
for len(line) > 0 {
r, size := utf8.DecodeRune(line)
curStyle, _ = w.getStyle(curStyle, bloc, r)
draw(r, curStyle, true)
width := 0
char := ' '
switch r {
case '\t':
ts := tabsize - (totalwidth % tabsize)
width = ts
default:
width = runewidth.RuneWidth(r)
char = '@'
}
// Draw any extra characters either spaces for tabs or @ for incomplete wide runes
if width > 1 {
for i := 1; i < width; i++ {
draw(char, curStyle, false)
}
}
bloc.X++
line = line[size:]
totalwidth += width
// If we reach the end of the window then we either stop or we wrap for softwrap
if vloc.X >= bufWidth {
if !softwrap {
break
} else {
vloc.Y++
if vloc.Y >= bufHeight {
break
}
vloc.X = 0
// This will draw an empty line number because the current line is wrapped
w.drawLineNum(lineNumStyle, true, maxLineNumLength, &vloc, &bloc)
}
}
}
style := config.DefStyle
for _, c := range cursors {
if b.Settings["cursorline"].(bool) && w.active &&
!c.HasSelection() && c.Y == bloc.Y {
if s, ok := config.Colorscheme["cursor-line"]; ok {
fg, _, _ := s.Decompose()
style = style.Background(fg)
}
}
}
for i := vloc.X; i < bufWidth; i++ {
curStyle := style
if s, ok := config.Colorscheme["color-column"]; ok {
if colorcolumn != 0 && i-w.gutterOffset == colorcolumn {
fg, _, _ := s.Decompose()
curStyle = style.Background(fg)
}
}
screen.SetContent(i+w.X, vloc.Y+w.Y, ' ', nil, curStyle)
}
if vloc.X != bufWidth {
draw(' ', curStyle, true)
}
bloc.X = w.StartCol
bloc.Y++
if bloc.Y >= b.LinesNum() {
break
}
}
}
func (w *BufWindow) displayStatusLine() {
_, h := screen.Screen.Size()
infoY := h
if config.GetGlobalOption("infobar").(bool) {
infoY--
}
if w.Buf.Settings["statusline"].(bool) {
w.drawStatus = true
w.sline.Display()
} else if w.Y+w.Height != infoY {
w.drawStatus = true
for x := w.X; x < w.X+w.Width; x++ {
screen.SetContent(x, w.Y+w.Height-1, '-', nil, config.DefStyle.Reverse(true))
}
} else {
w.drawStatus = false
}
}
func (w *BufWindow) displayScrollBar() {
if w.Buf.Settings["scrollbar"].(bool) && w.Buf.LinesNum() > w.Height {
scrollX := w.X + w.Width - 1
bufHeight := w.Height
if w.drawStatus {
bufHeight--
}
barsize := int(float64(w.Height) / float64(w.Buf.LinesNum()) * float64(w.Height))
if barsize < 1 {
barsize = 1
}
barstart := w.Y + int(float64(w.StartLine)/float64(w.Buf.LinesNum())*float64(w.Height))
for y := barstart; y < util.Min(barstart+barsize, w.Y+bufHeight); y++ {
screen.SetContent(scrollX, y, '|', nil, config.DefStyle.Reverse(true))
}
}
}
// Display displays the buffer and the statusline
func (w *BufWindow) Display() {
w.displayStatusLine()
w.displayScrollBar()
w.displayBuffer()
}
|
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package feedback
import (
"encoding/hex"
"fmt"
"log"
"github.com/golang/dep/internal/gps"
)
// Constraint types
const ConsTypeConstraint = "constraint"
const ConsTypeHint = "hint"
// DepTypeDirect represents a direct dependency
const DepTypeDirect = "direct dep"
// DepTypeTransitive represents a transitive dependency,
// or a dependency of a dependency
const DepTypeTransitive = "transitive dep"
// DepTypeImported represents a dependency imported by an external tool
const DepTypeImported = "imported dep"
// ConstraintFeedback holds project constraint feedback data
type ConstraintFeedback struct {
Constraint, LockedVersion, Revision, ConstraintType, DependencyType, ProjectPath string
}
// NewConstraintFeedback builds a feedback entry for a constraint in the manifest.
func NewConstraintFeedback(pc gps.ProjectConstraint, depType string) *ConstraintFeedback {
cf := &ConstraintFeedback{
Constraint: pc.Constraint.String(),
ProjectPath: string(pc.Ident.ProjectRoot),
DependencyType: depType,
}
if _, ok := pc.Constraint.(gps.Revision); ok {
cf.ConstraintType = ConsTypeHint
} else {
cf.ConstraintType = ConsTypeConstraint
}
return cf
}
// NewLockedProjectFeedback builds a feedback entry for a project in the lock.
func NewLockedProjectFeedback(lp gps.LockedProject, depType string) *ConstraintFeedback {
cf := &ConstraintFeedback{
ProjectPath: string(lp.Ident().ProjectRoot),
DependencyType: depType,
}
switch vt := lp.Version().(type) {
case gps.PairedVersion:
cf.LockedVersion = vt.String()
cf.Revision = vt.Revision().String()
case gps.UnpairedVersion: // Logically this should never occur, but handle for completeness sake
cf.LockedVersion = vt.String()
case gps.Revision:
cf.Revision = vt.String()
}
return cf
}
// LogFeedback logs feedback on changes made to the manifest or lock.
func (cf ConstraintFeedback) LogFeedback(logger *log.Logger) {
if cf.Constraint != "" {
logger.Printf(" %v", GetUsingFeedback(cf.Constraint, cf.ConstraintType, cf.DependencyType, cf.ProjectPath))
}
if cf.LockedVersion != "" && cf.Revision != "" {
logger.Printf(" %v", GetLockingFeedback(cf.LockedVersion, cf.Revision, cf.DependencyType, cf.ProjectPath))
}
}
// GetUsingFeedback returns a dependency "using" feedback message. For example:
//
// Using ^1.0.0 as constraint for direct dep github.com/foo/bar
// Using 1b8edb3 as hint for direct dep github.com/bar/baz
func GetUsingFeedback(version, consType, depType, projectPath string) string {
if depType == DepTypeImported {
return fmt.Sprintf("Using %s as initial %s for %s %s", version, consType, depType, projectPath)
}
return fmt.Sprintf("Using %s as %s for %s %s", version, consType, depType, projectPath)
}
// GetLockingFeedback returns a dependency "locking" feedback message. For
// example:
//
// Locking in v1.1.4 (bc29b4f) for direct dep github.com/foo/bar
// Locking in master (436f39d) for transitive dep github.com/baz/qux
func GetLockingFeedback(version, revision, depType, projectPath string) string {
// Check if it's a valid SHA1 digest and trim to 7 characters.
if len(revision) == 40 {
if _, err := hex.DecodeString(revision); err == nil {
// Valid SHA1 digest
revision = revision[0:7]
}
}
if depType == DepTypeImported {
return fmt.Sprintf("Trying %s (%s) as initial lock for %s %s", version, revision, depType, projectPath)
}
return fmt.Sprintf("Locking in %s (%s) for %s %s", version, revision, depType, projectPath)
}
Update feedback to support revision without version
Doing this so we can get feedback for the detached head
use case that govendor frequently has.
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package feedback
import (
"encoding/hex"
"fmt"
"log"
"github.com/golang/dep/internal/gps"
)
// Constraint types
const ConsTypeConstraint = "constraint"
const ConsTypeHint = "hint"
// DepTypeDirect represents a direct dependency
const DepTypeDirect = "direct dep"
// DepTypeTransitive represents a transitive dependency,
// or a dependency of a dependency
const DepTypeTransitive = "transitive dep"
// DepTypeImported represents a dependency imported by an external tool
const DepTypeImported = "imported dep"
// ConstraintFeedback holds project constraint feedback data
type ConstraintFeedback struct {
Constraint, LockedVersion, Revision, ConstraintType, DependencyType, ProjectPath string
}
// NewConstraintFeedback builds a feedback entry for a constraint in the manifest.
func NewConstraintFeedback(pc gps.ProjectConstraint, depType string) *ConstraintFeedback {
cf := &ConstraintFeedback{
Constraint: pc.Constraint.String(),
ProjectPath: string(pc.Ident.ProjectRoot),
DependencyType: depType,
}
if _, ok := pc.Constraint.(gps.Revision); ok {
cf.ConstraintType = ConsTypeHint
} else {
cf.ConstraintType = ConsTypeConstraint
}
return cf
}
// NewLockedProjectFeedback builds a feedback entry for a project in the lock.
func NewLockedProjectFeedback(lp gps.LockedProject, depType string) *ConstraintFeedback {
cf := &ConstraintFeedback{
ProjectPath: string(lp.Ident().ProjectRoot),
DependencyType: depType,
}
switch vt := lp.Version().(type) {
case gps.PairedVersion:
cf.LockedVersion = vt.String()
cf.Revision = vt.Revision().String()
case gps.UnpairedVersion: // Logically this should never occur, but handle for completeness sake
cf.LockedVersion = vt.String()
case gps.Revision:
cf.Revision = vt.String()
}
return cf
}
// LogFeedback logs feedback on changes made to the manifest or lock.
func (cf ConstraintFeedback) LogFeedback(logger *log.Logger) {
if cf.Constraint != "" {
logger.Printf(" %v", GetUsingFeedback(cf.Constraint, cf.ConstraintType, cf.DependencyType, cf.ProjectPath))
}
if cf.Revision != "" {
logger.Printf(" %v", GetLockingFeedback(cf.LockedVersion, cf.Revision, cf.DependencyType, cf.ProjectPath))
}
}
// GetUsingFeedback returns a dependency "using" feedback message. For example:
//
// Using ^1.0.0 as constraint for direct dep github.com/foo/bar
// Using 1b8edb3 as hint for direct dep github.com/bar/baz
func GetUsingFeedback(version, consType, depType, projectPath string) string {
if depType == DepTypeImported {
return fmt.Sprintf("Using %s as initial %s for %s %s", version, consType, depType, projectPath)
}
return fmt.Sprintf("Using %s as %s for %s %s", version, consType, depType, projectPath)
}
// GetLockingFeedback returns a dependency "locking" feedback message. For
// example:
//
// Locking in v1.1.4 (bc29b4f) for direct dep github.com/foo/bar
// Locking in master (436f39d) for transitive dep github.com/baz/qux
func GetLockingFeedback(version, revision, depType, projectPath string) string {
// Check if it's a valid SHA1 digest and trim to 7 characters.
if len(revision) == 40 {
if _, err := hex.DecodeString(revision); err == nil {
// Valid SHA1 digest
revision = revision[0:7]
}
}
if depType == DepTypeImported {
if version != "" {
return fmt.Sprintf("Trying %s (%s) as initial lock for %s %s", version, revision, depType, projectPath)
} else {
return fmt.Sprintf("Trying * (%s) as initial lock for %s %s", revision, depType, projectPath)
}
}
return fmt.Sprintf("Locking in %s (%s) for %s %s", version, revision, depType, projectPath)
}
|
package sanitize
import "regexp"
var reStripName = regexp.MustCompile("[^\\w.-]")
const maxLength = 16
// Name returns a name with only allowed characters and a reasonable length
func Name(s string) string {
s = reStripName.ReplaceAllString(s, "")
nameLength := maxLength
if len(s) <= maxLength {
nameLength = len(s)
}
s = s[:nameLength]
return s
}
var reStripData = regexp.MustCompile("[^[:ascii:]]|[[:cntrl:]]")
// Data returns a string with only allowed characters for client-provided metadata inputs.
func Data(s string, maxlen int) string {
if len(s) > maxlen {
s = s[:maxlen]
}
return reStripData.ReplaceAllString(s, "")
}
sanitize: Move global variables to top of file
package sanitize
import "regexp"
var (
reStripName = regexp.MustCompile("[^\\w.-]")
reStripData = regexp.MustCompile("[^[:ascii:]]|[[:cntrl:]]")
)
const maxLength = 16
// Name returns a name with only allowed characters and a reasonable length
func Name(s string) string {
s = reStripName.ReplaceAllString(s, "")
nameLength := maxLength
if len(s) <= maxLength {
nameLength = len(s)
}
s = s[:nameLength]
return s
}
// Data returns a string with only allowed characters for client-provided metadata inputs.
func Data(s string, maxlen int) string {
if len(s) > maxlen {
s = s[:maxlen]
}
return reStripData.ReplaceAllString(s, "")
}
|
package protobuf
import (
pb "github.com/dvyukov/go-fuzz/examples/protobuf/pb"
"github.com/golang/protobuf/proto"
)
func Fuzz(data []byte) int {
defer func() {
v := recover()
if v != nil {
str := ""
switch vv := v.(type) {
case string:
str = vv
case error:
str = vv.Error()
}
if str == "reflect: call of reflect.Value.SetMapIndex on zero Value" {
return
}
panic(v)
}
}()
vars := []proto.Message{
new(pb.M0),
new(pb.M1),
new(pb.M2),
new(pb.M3),
new(pb.M4),
new(pb.M5),
new(pb.M6),
new(pb.M7),
new(pb.M8),
new(pb.M9),
new(pb.M10),
new(pb.M11),
new(pb.M12),
new(pb.M13),
new(pb.M14),
new(pb.M15),
new(pb.M16),
new(pb.M17),
new(pb.M18),
new(pb.M19),
new(pb.M20),
new(pb.M21),
new(pb.M22),
new(pb.M23),
new(pb.M24),
new(pb.M25),
}
score := 0
for _, v := range vars {
if err := proto.Unmarshal(data, v); err == nil {
score++
if _, err := proto.Marshal(v); err != nil {
panic(err)
}
}
}
return score
}
remove suppression for a fixed bug from protobuf test
package protobuf
import (
pb "github.com/dvyukov/go-fuzz/examples/protobuf/pb"
"github.com/golang/protobuf/proto"
)
func Fuzz(data []byte) int {
vars := []proto.Message{
new(pb.M0),
new(pb.M1),
new(pb.M2),
new(pb.M3),
new(pb.M4),
new(pb.M5),
new(pb.M6),
new(pb.M7),
new(pb.M8),
new(pb.M9),
new(pb.M10),
new(pb.M11),
new(pb.M12),
new(pb.M13),
new(pb.M14),
new(pb.M15),
new(pb.M16),
new(pb.M17),
new(pb.M18),
new(pb.M19),
new(pb.M20),
new(pb.M21),
new(pb.M22),
new(pb.M23),
new(pb.M24),
new(pb.M25),
}
score := 0
for _, v := range vars {
if err := proto.Unmarshal(data, v); err == nil {
score++
if _, err := proto.Marshal(v); err != nil {
panic(err)
}
}
}
return score
}
|
package usergrid
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"reflect"
"runtime"
"strings"
)
var (
API string
PAGE_SIZE int
ORGNAME string
APPNAME string
CLIENT_ID string
CLIENT_SECRET string
ENDPOINT string
REQUESTS int
RESPONSES int
RESPONSE_SIZE int
MAX_CONCURRENT_REQUESTS int = runtime.NumCPU()
)
func init(){
log.SetOutput(os.Stderr)
runtime.GOMAXPROCS(MAX_CONCURRENT_REQUESTS)
}
type Client struct {
Organization,Application,Uri,access_token string
_client *http.Client
}
type ResponseHandlerInterface func(responseBody []byte) error
func NOOPResponseHandler(objmap *interface{}) (ResponseHandlerInterface){
return func(responseBody []byte) (error){
return nil
}
}
func JSONResponseHandler(objmap *interface{}) (ResponseHandlerInterface){
return func(responseBody []byte) (error){
if err := json.Unmarshal(responseBody, &objmap); err == nil{
err:=CheckForError(objmap)
return err
}else{
return err
}
return nil
}
}
func CheckForError(objmap *interface{}) (error){
omap:=(*objmap).(map[string]interface{})
str := ""
if omap["error"] != nil {
if omap["error_description"] != nil {
str = omap["error_description"].(string)
}else{
str = omap["error"].(string)
}
return errors.New(str)
}
return nil
}
func PrintAll(vals []interface{}) {
for k, v := range vals {
log.Println(k, reflect.TypeOf(v), v)
}
}
func AppendQueryParams(endpoint string, params map[string]string) string{
u, _ := url.Parse(endpoint)
if params != nil {
q := u.Query()
for k, v := range params {
q.Set(k,v)
}
u.RawQuery=q.Encode()
endpoint = fmt.Sprintf("%s?%s",endpoint,u.RawQuery)
}
return endpoint
}
func (client *Client) Authenticate(grant_type string, client_id string, client_secret string){}
func (client *Client) Login(username string, password string) error{
urlStr := fmt.Sprintf("%s/%s/%s/%s/%s/%s",client.Uri,client.Organization, client.Application, "users", username, "token")
data := map[string]string{"grant_type":"password","username":username,"password":password}
var objmap interface{}
err := client.RequestWithHandler("POST", urlStr, nil, data, func(responseBody []byte) (error){
err := json.Unmarshal(responseBody, &objmap)
omap:=objmap.(map[string]interface{})
client.access_token=omap["access_token"].(string)
err=CheckForError(&objmap)
return err
})
return err
}
func (client *Client) OrgLogin(client_id string, client_secret string) error {
urlStr := fmt.Sprintf("%s/%s",client.Uri,"management/token")
data := map[string]string{"grant_type":"client_credentials","client_id":client_id,"client_secret":client_secret}
var objmap interface{}
err := client.RequestWithHandler("POST", urlStr, nil, data, func(responseBody []byte) (error){
err := json.Unmarshal(responseBody, &objmap)
omap:=objmap.(map[string]interface{})
client.access_token=omap["access_token"].(string)
err=CheckForError(&objmap)
return err
})
return err
}
func (client *Client) AddAuthorizationHeaders(req *http.Request){
if(len(client.access_token) > 0){
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s",client.access_token))
}
}
func (client *Client) MakeRequest(method string, endpoint string, params map[string]string, data interface{}) (*http.Request, error) {
var err error
var req *http.Request
method = strings.ToUpper(method)
endpoint = AppendQueryParams(endpoint, params)
switch strings.ToUpper(method) {
case "POST":
body, _ := json.Marshal(data)
req, err = http.NewRequest(method, endpoint, strings.NewReader(string(body)))
case "PUT":
body, _ := json.Marshal(data)
req, err = http.NewRequest(method, endpoint, strings.NewReader(string(body)))
case "DELETE":
req, err = http.NewRequest(method, endpoint, nil)
default: //GET
method="GET"
req, err = http.NewRequest(method, endpoint, nil)
}
if err != nil {
return nil, err
}
client.AddAuthorizationHeaders(req)
return req, nil
}
func (client *Client) Request(method string, endpoint string, params map[string]string, data interface{}, responseChan chan []byte){
//intialize an http client if we don't already have one
if client._client == nil {
client._client= &http.Client{}
}
defer func() {
if r := recover(); r != nil {
responseChan <- []byte(fmt.Sprintf("{\"error\":\"%s\", \"error_description\":\"%s: %v\"}", "network_error", "The request failed at the network level", r))
}
}()
go func(){
req, err :=client.MakeRequest(method, endpoint, params, data)
if err != nil {
log.Panic(err)
}
REQUESTS++
resp, err :=client._client.Do(req)
if err != nil {
log.Panic(err)
}
defer resp.Body.Close()
RESPONSES++
responseBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Panic(err)
}
responseChan <- responseBody
}()
}
func (client *Client) RequestWithHandler(method string, endpoint string, params map[string]string, data interface{}, handler ResponseHandlerInterface) (error) {
responseChan := make(chan []byte)
client.Request(method, endpoint, params, data, responseChan)
responseBody := <-responseChan
return handler(responseBody)
}
func (client *Client) Get(endpoint string, params map[string]string, handler ResponseHandlerInterface) (error) {
urlStr := fmt.Sprintf("%s/%s/%s/%s",client.Uri,client.Organization, client.Application, endpoint);
return client.RequestWithHandler("GET",urlStr, params, nil, handler)
}
func (client *Client) Delete(endpoint string, params map[string]string, handler ResponseHandlerInterface) (error) {
urlStr := fmt.Sprintf("%s/%s/%s/%s",client.Uri,client.Organization, client.Application, endpoint);
return client.RequestWithHandler("DELETE",urlStr, params, nil, handler)
}
func (client *Client) Post(endpoint string, params map[string]string, data interface{}, handler ResponseHandlerInterface) (error) {
urlStr := fmt.Sprintf("%s/%s/%s/%s",client.Uri,client.Organization, client.Application, endpoint);
return client.RequestWithHandler("POST",urlStr, params, data, handler)
}
func (client *Client) Put(endpoint string, params map[string]string, data interface{}, handler ResponseHandlerInterface) (error) {
urlStr := fmt.Sprintf("%s/%s/%s/%s",client.Uri,client.Organization, client.Application, endpoint);
return client.RequestWithHandler("PUT",urlStr, params, data, handler)
}
Added struct definitions for returned data.
package usergrid
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"reflect"
"runtime"
"strings"
)
var (
API string
PAGE_SIZE int
ORGNAME string
APPNAME string
CLIENT_ID string
CLIENT_SECRET string
ENDPOINT string
REQUESTS int
RESPONSES int
RESPONSE_SIZE int
MAX_CONCURRENT_REQUESTS int = runtime.NumCPU()
)
func init() {
log.SetOutput(os.Stderr)
runtime.GOMAXPROCS(MAX_CONCURRENT_REQUESTS)
}
type OrgUser struct {
ApplicationId string `json:applicationId`
Username string `json:username`
Name string `json:name`
Email string `json:email`
Activated bool `json:activated`
Confirmed bool `json:confirmed`
Disabled bool `json:disabled`
Properties interface{} `json:properties`
Uuid string `json:uuid`
AdminUser bool `json:adminUser`
DisplayEmailAddress string `json:displayEmailAddress`
HtmlDisplayEmailAddress string `json:htmldisplayEmailAddress`
}
type Organization struct {
Name string `json:name`
Users map[string]OrgUser `json:users`
Applications map[string]string `json:applications`
Uuid string `json:uuid`
Properties interface{} `json:properties`
PasswordHistorySize int `json:passwordHistorySize`
}
type OrgLogin struct {
Response
Access_Token string `json:access_token`
Expires_In int `json:expires_in`
Organization Organization `json:organization`
}
type EntityMetadata interface{}
type Entity struct {
Created int64 `json:created`
Modified int64 `json:modified`
Name string `json:name`
Type string `json:type`
Uuid string `json:uuid`
Metadata EntityMetadata `json:metadata`
}
type Application struct {
Entity
AccessTokenTtl int64 `json:accesstokenttl`
ApigeeMobileConfig string `json:apigeeMobileConfig`
ApplicationName string `json:applicationName`
OrganizationName string `json:organizationName`
Metadata ApplicationMetadata `json:metadata`
}
type Collection struct {
Response
Action string `json:action`
Application string `json:application`
ApplicationName string `json:applicationName`
Duration int64 `json:duration`
Entities []Entity `json:entities`
Organization string `json:organization`
Params map[string]string `json:params`
Timestamp int64 `json:timestamp`
URI string `json:uri`
}
type ApplicationMetadata struct {
Collections map[string]CollectionMetadata `json:collections`
}
type CollectionMetadata struct {
Count int64 `json:count`
Name string `json:name`
Title string `json:title`
Type string `json:type`
}
type ApplicationResponse struct {
Collection
Entities []Application `json:entities`
}
type Response struct {
Error string `json:error`
Error_Description string `json:error_description`
}
type Client struct {
Organization string `json:organization`
Application string `json:application`
Uri string `json:uri`
Access_Token string `json:access_token`
_client *http.Client
}
type ResponseHandlerInterface func(responseBody []byte) error
func NOOPResponseHandler(objmap *interface{}) ResponseHandlerInterface {
return func(responseBody []byte) error {
return nil
}
}
func JSONResponseHandler(objmap *interface{}) ResponseHandlerInterface {
return func(responseBody []byte) error {
if err := json.Unmarshal(responseBody, &objmap); err == nil {
err := CheckForError(objmap)
return err
} else {
return err
}
return nil
}
}
func CheckForError(objmap *interface{}) error {
omap := (*objmap).(map[string]interface{})
str := ""
if omap["error"] != nil {
if omap["error_description"] != nil {
str = omap["error_description"].(string)
} else {
str = omap["error"].(string)
}
return errors.New(str)
}
return nil
}
func PrintAll(vals []interface{}) {
for k, v := range vals {
log.Println(k, reflect.TypeOf(v), v)
}
}
func AppendQueryParams(endpoint string, params map[string]string) string {
u, _ := url.Parse(endpoint)
if params != nil {
q := u.Query()
for k, v := range params {
q.Set(k, v)
}
u.RawQuery = q.Encode()
endpoint = fmt.Sprintf("%s?%s", endpoint, u.RawQuery)
}
return endpoint
}
func (client *Client) Authenticate(grant_type string, client_id string, client_secret string) {}
func (client *Client) Login(username string, password string) error {
urlStr := fmt.Sprintf("%s/%s/%s/%s/%s/%s", client.Uri, client.Organization, client.Application, "users", username, "token")
data := map[string]string{"grant_type": "password", "username": username, "password": password}
var objmap OrgLogin
err := client.RequestWithHandler("POST", urlStr, nil, data, func(responseBody []byte) error {
err := json.Unmarshal(responseBody, &objmap)
if err != nil {
return err
} else if objmap.Error != "" {
return errors.New(objmap.Error)
}
client.Access_Token = objmap.Access_Token
return nil
})
return err
}
func (client *Client) OrgLogin(client_id string, client_secret string) error {
urlStr := fmt.Sprintf("%s/%s", client.Uri, "management/token")
data := map[string]string{"grant_type": "client_credentials", "client_id": client_id, "client_secret": client_secret}
var objmap OrgLogin
err := client.RequestWithHandler("POST", urlStr, nil, data, func(responseBody []byte) error {
err := json.Unmarshal(responseBody, &objmap)
if err != nil {
return err
} else if objmap.Error != "" {
return errors.New(objmap.Error)
}
client.Access_Token = objmap.Access_Token
return nil
})
return err
}
func (client *Client) AddAuthorizationHeaders(req *http.Request) {
if len(client.Access_Token) > 0 {
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", client.Access_Token))
}
}
func (client *Client) MakeRequest(method string, endpoint string, params map[string]string, data interface{}) (*http.Request, error) {
var err error
var req *http.Request
method = strings.ToUpper(method)
endpoint = AppendQueryParams(endpoint, params)
switch strings.ToUpper(method) {
case "POST":
body, _ := json.Marshal(data)
req, err = http.NewRequest(method, endpoint, strings.NewReader(string(body)))
case "PUT":
body, _ := json.Marshal(data)
req, err = http.NewRequest(method, endpoint, strings.NewReader(string(body)))
case "DELETE":
req, err = http.NewRequest(method, endpoint, nil)
default: //GET
method = "GET"
req, err = http.NewRequest(method, endpoint, nil)
}
if err != nil {
return nil, err
}
client.AddAuthorizationHeaders(req)
return req, nil
}
func (client *Client) Request(method string, endpoint string, params map[string]string, data interface{}, responseChan chan []byte) {
//intialize an http client if we don't already have one
if client._client == nil {
client._client = &http.Client{}
}
// defer func() {
// if r := recover(); r != nil {
// responseChan <- []byte(fmt.Sprintf("{\"error\":\"%s\", \"error_description\":\"%s: %v\"}", "network_error", "The request failed at the network level", r))
// }
// }()
go func() {
req, err := client.MakeRequest(method, endpoint, params, data)
if err != nil {
log.Panic(err)
}
REQUESTS++
resp, err := client._client.Do(req)
if err != nil {
log.Panic(err)
}
defer resp.Body.Close()
RESPONSES++
responseBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Panic(err)
}
responseChan <- responseBody
}()
}
func (client *Client) RequestWithHandler(method string, endpoint string, params map[string]string, data interface{}, handler ResponseHandlerInterface) error {
responseChan := make(chan []byte)
client.Request(method, endpoint, params, data, responseChan)
responseBody := <-responseChan
return handler(responseBody)
}
func (client *Client) Get(endpoint string, params map[string]string, handler ResponseHandlerInterface) error {
urlStr := fmt.Sprintf("%s/%s/%s/%s", client.Uri, client.Organization, client.Application, endpoint)
return client.RequestWithHandler("GET", urlStr, params, nil, handler)
}
func (client *Client) Delete(endpoint string, params map[string]string, handler ResponseHandlerInterface) error {
urlStr := fmt.Sprintf("%s/%s/%s/%s", client.Uri, client.Organization, client.Application, endpoint)
return client.RequestWithHandler("DELETE", urlStr, params, nil, handler)
}
func (client *Client) Post(endpoint string, params map[string]string, data interface{}, handler ResponseHandlerInterface) error {
urlStr := fmt.Sprintf("%s/%s/%s/%s", client.Uri, client.Organization, client.Application, endpoint)
return client.RequestWithHandler("POST", urlStr, params, data, handler)
}
func (client *Client) Put(endpoint string, params map[string]string, data interface{}, handler ResponseHandlerInterface) error {
urlStr := fmt.Sprintf("%s/%s/%s/%s", client.Uri, client.Organization, client.Application, endpoint)
return client.RequestWithHandler("PUT", urlStr, params, data, handler)
}
|
package design
import (
"crypto/md5"
"encoding/binary"
"math/rand"
"time"
"github.com/manveru/faker"
)
// RandomGenerator generates consistent random values of different types given a seed.
// The random values are consistent in that given the same seed the same random values get
// generated.
type RandomGenerator struct {
Seed string
faker *faker.Faker
rand *rand.Rand
}
// NewRandomGenerator returns a random value generator seeded from the given string value.
func NewRandomGenerator(seed string) *RandomGenerator {
hasher := md5.New()
hasher.Write([]byte(seed))
sint := int64(binary.BigEndian.Uint64(hasher.Sum(nil)))
source := rand.NewSource(sint)
ran := rand.New(source)
faker := &faker.Faker{
Language: "end",
Dict: faker.Dict["en"],
Rand: ran,
}
return &RandomGenerator{
Seed: seed,
faker: faker,
rand: ran,
}
}
// Int produces a random integer.
func (r *RandomGenerator) Int() int {
return r.rand.Int()
}
// String produces a random string.
func (r *RandomGenerator) String() string {
return r.faker.Sentence(2, false)
}
// DateTime produces a random date.
func (r *RandomGenerator) DateTime() time.Time {
// get the time now as seconds since epoch
// seed the int random generator with it
// and convert that back to a time
unix := r.rand.Int63n(time.Now().Unix())
return time.Unix(unix, 0)
}
// Bool produces a random boolean.
func (r *RandomGenerator) Bool() bool {
return r.rand.Int()%2 == 0
}
// Float64 produces a random float64 value.
func (r *RandomGenerator) Float64() float64 {
return r.rand.Float64()
}
Make DateTime examples consistant across generation
package design
import (
"crypto/md5"
"encoding/binary"
"math/rand"
"time"
"github.com/manveru/faker"
)
// RandomGenerator generates consistent random values of different types given a seed.
// The random values are consistent in that given the same seed the same random values get
// generated.
type RandomGenerator struct {
Seed string
faker *faker.Faker
rand *rand.Rand
}
// NewRandomGenerator returns a random value generator seeded from the given string value.
func NewRandomGenerator(seed string) *RandomGenerator {
hasher := md5.New()
hasher.Write([]byte(seed))
sint := int64(binary.BigEndian.Uint64(hasher.Sum(nil)))
source := rand.NewSource(sint)
ran := rand.New(source)
faker := &faker.Faker{
Language: "end",
Dict: faker.Dict["en"],
Rand: ran,
}
return &RandomGenerator{
Seed: seed,
faker: faker,
rand: ran,
}
}
// Int produces a random integer.
func (r *RandomGenerator) Int() int {
return r.rand.Int()
}
// String produces a random string.
func (r *RandomGenerator) String() string {
return r.faker.Sentence(2, false)
}
// DateTime produces a random date.
func (r *RandomGenerator) DateTime() time.Time {
// Use a constant max value to make sure the same pseudo random
// values get generated for a given API.
max := time.Date(2016, time.July, 11, 23, 0, 0, 0, time.UTC).Unix()
unix := r.rand.Int63n(max)
return time.Unix(unix, 0)
}
// Bool produces a random boolean.
func (r *RandomGenerator) Bool() bool {
return r.rand.Int()%2 == 0
}
// Float64 produces a random float64 value.
func (r *RandomGenerator) Float64() float64 {
return r.rand.Float64()
}
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"net/http"
"runtime"
"time"
"github.com/fsnotify/fsnotify"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
"github.com/spf13/viper"
corev1 "k8s.io/api/core/v1"
"k8s.io/test-infra/boskos/crds"
"k8s.io/test-infra/boskos/handlers"
"k8s.io/test-infra/boskos/metrics"
"k8s.io/test-infra/boskos/ranch"
"k8s.io/test-infra/prow/config"
"k8s.io/test-infra/prow/interrupts"
"k8s.io/test-infra/prow/logrusutil"
prowmetrics "k8s.io/test-infra/prow/metrics"
"k8s.io/test-infra/prow/pjutil"
)
const (
defaultDynamicResourceUpdatePeriod = 10 * time.Minute
defaultRequestTTL = 30 * time.Second
defaultRequestGCPeriod = time.Minute
)
var (
configPath = flag.String("config", "config.yaml", "Path to init resource file")
dynamicResourceUpdatePeriod = flag.Duration("dynamic-resource-update-period", defaultDynamicResourceUpdatePeriod,
"Period at which to update dynamic resources. Set to 0 to disable.")
storagePath = flag.String("storage", "", "Path to persistent volume to load the state")
requestTTL = flag.Duration("request-ttl", defaultRequestTTL, "request TTL before losing priority in the queue")
kubeClientOptions crds.KubernetesClientOptions
logLevel = flag.String("log-level", "info", fmt.Sprintf("Log level is one of %v.", logrus.AllLevels))
namespace = flag.String("namespace", corev1.NamespaceDefault, "namespace to install on")
)
var (
httpRequestDuration = prowmetrics.HttpRequestDuration("boskos", 0.005, 1200)
httpResponseSize = prowmetrics.HttpResponseSize("boskos", 128, 65536)
traceHandler = prowmetrics.TraceHandler(handlers.NewBoskosSimplifier(), httpRequestDuration, httpResponseSize)
)
func init() {
prometheus.MustRegister(httpRequestDuration)
prometheus.MustRegister(httpResponseSize)
}
func main() {
logrusutil.ComponentInit("boskos")
kubeClientOptions.AddFlags(flag.CommandLine)
flag.Parse()
level, err := logrus.ParseLevel(*logLevel)
if err != nil {
logrus.WithError(err).Fatal("invalid log level specified")
}
logrus.SetLevel(level)
kubeClientOptions.Validate()
// collect data on mutex holders and blocking profiles
runtime.SetBlockProfileRate(1)
runtime.SetMutexProfileFraction(1)
defer interrupts.WaitForGracefulShutdown()
pjutil.ServePProf()
prowmetrics.ExposeMetrics("boskos", config.PushGateway{})
// signal to the world that we are healthy
// this needs to be in a separate port as we don't start the
// main server with the main mux until we're ready
health := pjutil.NewHealth()
client, err := kubeClientOptions.CacheBackedClient(*namespace, &crds.ResourceObject{}, &crds.DRLCObject{})
if err != nil {
logrus.WithError(err).Fatal("unable to get client")
}
storage, err := ranch.NewStorage(interrupts.Context(), client, *namespace, *storagePath)
if err != nil {
logrus.WithError(err).Fatal("failed to create storage")
}
r, err := ranch.NewRanch(*configPath, storage, *requestTTL)
if err != nil {
logrus.WithError(err).Fatalf("failed to create ranch! Config: %v", *configPath)
}
boskos := &http.Server{
Handler: traceHandler(handlers.NewBoskosHandler(r)),
Addr: ":8080",
}
v := viper.New()
v.SetConfigFile(*configPath)
v.SetConfigType("yaml")
v.WatchConfig()
v.OnConfigChange(func(in fsnotify.Event) {
logrus.Infof("Updating Boskos Config")
if err := r.SyncConfig(*configPath); err != nil {
logrus.WithError(err).Errorf("Failed to update config")
} else {
logrus.Infof("Updated Boskos Config successfully")
}
})
prometheus.MustRegister(metrics.NewResourcesCollector(r))
r.StartDynamicResourceUpdater(*dynamicResourceUpdatePeriod)
r.StartRequestGC(defaultRequestGCPeriod)
logrus.Info("Start Service")
interrupts.ListenAndServe(boskos, 5*time.Second)
// signal to the world that we're ready
health.ServeReady()
}
Allow running boskos without a config
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"net/http"
"runtime"
"time"
"github.com/fsnotify/fsnotify"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
"github.com/spf13/viper"
corev1 "k8s.io/api/core/v1"
"k8s.io/test-infra/boskos/crds"
"k8s.io/test-infra/boskos/handlers"
"k8s.io/test-infra/boskos/metrics"
"k8s.io/test-infra/boskos/ranch"
"k8s.io/test-infra/prow/config"
"k8s.io/test-infra/prow/interrupts"
"k8s.io/test-infra/prow/logrusutil"
prowmetrics "k8s.io/test-infra/prow/metrics"
"k8s.io/test-infra/prow/pjutil"
)
const (
defaultDynamicResourceUpdatePeriod = 10 * time.Minute
defaultRequestTTL = 30 * time.Second
defaultRequestGCPeriod = time.Minute
)
var (
configPath = flag.String("config", "config.yaml", "Path to init resource file")
dynamicResourceUpdatePeriod = flag.Duration("dynamic-resource-update-period", defaultDynamicResourceUpdatePeriod,
"Period at which to update dynamic resources. Set to 0 to disable.")
storagePath = flag.String("storage", "", "Path to persistent volume to load the state")
requestTTL = flag.Duration("request-ttl", defaultRequestTTL, "request TTL before losing priority in the queue")
kubeClientOptions crds.KubernetesClientOptions
logLevel = flag.String("log-level", "info", fmt.Sprintf("Log level is one of %v.", logrus.AllLevels))
namespace = flag.String("namespace", corev1.NamespaceDefault, "namespace to install on")
)
var (
httpRequestDuration = prowmetrics.HttpRequestDuration("boskos", 0.005, 1200)
httpResponseSize = prowmetrics.HttpResponseSize("boskos", 128, 65536)
traceHandler = prowmetrics.TraceHandler(handlers.NewBoskosSimplifier(), httpRequestDuration, httpResponseSize)
)
func init() {
prometheus.MustRegister(httpRequestDuration)
prometheus.MustRegister(httpResponseSize)
}
func main() {
logrusutil.ComponentInit("boskos")
kubeClientOptions.AddFlags(flag.CommandLine)
flag.Parse()
level, err := logrus.ParseLevel(*logLevel)
if err != nil {
logrus.WithError(err).Fatal("invalid log level specified")
}
logrus.SetLevel(level)
kubeClientOptions.Validate()
// collect data on mutex holders and blocking profiles
runtime.SetBlockProfileRate(1)
runtime.SetMutexProfileFraction(1)
defer interrupts.WaitForGracefulShutdown()
pjutil.ServePProf()
prowmetrics.ExposeMetrics("boskos", config.PushGateway{})
// signal to the world that we are healthy
// this needs to be in a separate port as we don't start the
// main server with the main mux until we're ready
health := pjutil.NewHealth()
client, err := kubeClientOptions.CacheBackedClient(*namespace, &crds.ResourceObject{}, &crds.DRLCObject{})
if err != nil {
logrus.WithError(err).Fatal("unable to get client")
}
storage, err := ranch.NewStorage(interrupts.Context(), client, *namespace, *storagePath)
if err != nil {
logrus.WithError(err).Fatal("failed to create storage")
}
r, err := ranch.NewRanch(*configPath, storage, *requestTTL)
if err != nil {
logrus.WithError(err).Fatalf("failed to create ranch! Config: %v", *configPath)
}
boskos := &http.Server{
Handler: traceHandler(handlers.NewBoskosHandler(r)),
Addr: ":8080",
}
// Viper defaults the configfile name to `config` and `SetConfigFile` only
// has an effect when the configfile name is not an empty string, so we
// just disable it entirely if there is no config.
if *configPath != "" {
v := viper.New()
v.SetConfigFile(*configPath)
v.SetConfigType("yaml")
v.WatchConfig()
v.OnConfigChange(func(in fsnotify.Event) {
logrus.Infof("Updating Boskos Config")
if err := r.SyncConfig(*configPath); err != nil {
logrus.WithError(err).Errorf("Failed to update config")
} else {
logrus.Infof("Updated Boskos Config successfully")
}
})
}
prometheus.MustRegister(metrics.NewResourcesCollector(r))
r.StartDynamicResourceUpdater(*dynamicResourceUpdatePeriod)
r.StartRequestGC(defaultRequestGCPeriod)
logrus.Info("Start Service")
interrupts.ListenAndServe(boskos, 5*time.Second)
// signal to the world that we're ready
health.ServeReady()
}
|
package http
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"github.com/pilosa/pilosa"
)
// Ensure implementation implements inteface.
var _ pilosa.TranslateStore = (*TranslateStore)(nil)
// TranslateStore represents an implementation of TranslateStore that
// communicates over HTTP. This is used with the TranslateHandler.
type TranslateStore struct {
URL string
}
// NewTranslateStore returns a new instance of TranslateStore.
func NewTranslateStore(rawurl string) *TranslateStore {
return &TranslateStore{URL: rawurl}
}
// TranslateColumnsToUint64 is not currently implemented.
func (s *TranslateStore) TranslateColumnsToUint64(index string, values []string) ([]uint64, error) {
return nil, pilosa.ErrNotImplemented
}
// TranslateColumnToString is not currently implemented.
func (s *TranslateStore) TranslateColumnToString(index string, values uint64) (string, error) {
return "", pilosa.ErrNotImplemented
}
// TranslateRowsToUint64 is not currently implemented.
func (s *TranslateStore) TranslateRowsToUint64(index, frame string, values []string) ([]uint64, error) {
return nil, pilosa.ErrNotImplemented
}
// TranslateRowToString is not currently implemented.
func (s *TranslateStore) TranslateRowToString(index, frame string, values uint64) (string, error) {
return "", pilosa.ErrNotImplemented
}
// Reader returns a reader that can stream data from a remote store.
func (s *TranslateStore) Reader(ctx context.Context, off int64) (io.ReadCloser, error) {
// Generate remote URL.
u, err := url.Parse(s.URL)
if err != nil {
return nil, err
}
u.Path = "/internal/translate/data"
u.RawQuery = (url.Values{
"offset": {strconv.FormatInt(off, 10)},
}).Encode()
// Connect a stream to the remote server.
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return nil, err
}
req = req.WithContext(ctx)
// Connect a stream to the remote server.
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, fmt.Errorf("http: cannot connect to translate store endpoint: %s", err)
}
// Handle error codes or return body as stream.
switch resp.StatusCode {
case http.StatusOK:
return resp.Body, nil
case http.StatusNotImplemented:
resp.Body.Close()
return nil, pilosa.ErrNotImplemented
default:
body, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
return nil, fmt.Errorf("http: invalid translate store endpoint status: code=%d url=%s body=%q", resp.StatusCode, u.String(), bytes.TrimSpace(body))
}
}
Unexport http.TranslateStore
package http
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"github.com/pilosa/pilosa"
)
// Ensure implementation implements inteface.
var _ pilosa.TranslateStore = (*translateStore)(nil)
// translateStore represents an implementation of translateStore that
// communicates over HTTP. This is used with the TranslateHandler.
type translateStore struct {
URL string
}
// NewTranslateStore returns a new instance of TranslateStore.
func NewTranslateStore(rawurl string) *translateStore {
return &translateStore{URL: rawurl}
}
// TranslateColumnsToUint64 is not currently implemented.
func (s *translateStore) TranslateColumnsToUint64(index string, values []string) ([]uint64, error) {
return nil, pilosa.ErrNotImplemented
}
// TranslateColumnToString is not currently implemented.
func (s *translateStore) TranslateColumnToString(index string, values uint64) (string, error) {
return "", pilosa.ErrNotImplemented
}
// TranslateRowsToUint64 is not currently implemented.
func (s *translateStore) TranslateRowsToUint64(index, frame string, values []string) ([]uint64, error) {
return nil, pilosa.ErrNotImplemented
}
// TranslateRowToString is not currently implemented.
func (s *translateStore) TranslateRowToString(index, frame string, values uint64) (string, error) {
return "", pilosa.ErrNotImplemented
}
// Reader returns a reader that can stream data from a remote store.
func (s *translateStore) Reader(ctx context.Context, off int64) (io.ReadCloser, error) {
// Generate remote URL.
u, err := url.Parse(s.URL)
if err != nil {
return nil, err
}
u.Path = "/internal/translate/data"
u.RawQuery = (url.Values{
"offset": {strconv.FormatInt(off, 10)},
}).Encode()
// Connect a stream to the remote server.
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return nil, err
}
req = req.WithContext(ctx)
// Connect a stream to the remote server.
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, fmt.Errorf("http: cannot connect to translate store endpoint: %s", err)
}
// Handle error codes or return body as stream.
switch resp.StatusCode {
case http.StatusOK:
return resp.Body, nil
case http.StatusNotImplemented:
resp.Body.Close()
return nil, pilosa.ErrNotImplemented
default:
body, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
return nil, fmt.Errorf("http: invalid translate store endpoint status: code=%d url=%s body=%q", resp.StatusCode, u.String(), bytes.TrimSpace(body))
}
}
|
package httpseverywhere
import (
"bytes"
"encoding/gob"
"encoding/xml"
"regexp"
"strings"
"github.com/getlantern/golog"
"github.com/getlantern/tldextract"
)
var (
log = golog.LoggerFor("httpseverywhere")
extract = tldextract.New()
)
// rewrite changes an HTTP URL to rewrite.
type rewrite func(url string) (string, bool)
// Rewrite exports the rewrite method for users of this library.
var Rewrite = new()
type https struct {
log golog.Logger
targets map[string]*Rules
}
// A rule maps the regular expression to match and the string to change it to.
// It also stores the compiled regular expression for efficiency.
type rule struct {
from *regexp.Regexp
From string
To string
}
// An exclusion just contains the compiled regular expression exclusion pattern.
type exclusion struct {
Pattern string
pattern *regexp.Regexp
}
// Rules is a struct containing rules and exclusions for a given rule set. This
// is public so that we can encode and decode it from GOB format.
type Rules struct {
Rules []*rule
Exclusions []*exclusion
}
// new creates a new rewrite instance from embedded GOB data.
func new() rewrite {
data := MustAsset("targets.gob")
buf := bytes.NewBuffer(data)
dec := gob.NewDecoder(buf)
targets := make(map[string]*Rules)
err := dec.Decode(&targets)
if err != nil {
log.Errorf("Could not decode: %v", err)
return nil
}
// The compiled regular expressions aren't serialized, so we have to manually
// compile them.
for _, v := range targets {
for _, r := range v.Rules {
r.from, _ = regexp.Compile(r.From)
}
for _, e := range v.Exclusions {
e.pattern, _ = regexp.Compile(e.Pattern)
}
}
return newRewrite(targets)
}
// AddRuleSet adds the specified rule set to the map of targets. Returns
// whether or not the rule processed correctly and whether or not the
// target was a duplicate. Duplicates are ignored but are considered to have
// processed correctly.
func AddRuleSet(rules []byte, targets map[string]*Rules) (bool, int) {
var r Ruleset
xml.Unmarshal(rules, &r)
// If the rule is turned off, ignore it.
if len(r.Off) > 0 {
return false, 0
}
// We don't run on any platforms (aka Tor) that support mixed content, so
// ignore any rule that is mixedcontent-only.
if r.Platform == "mixedcontent" {
return false, 0
}
rs, err := ruleSetToRules(r)
if err != nil {
return false, 0
}
duplicates := 0
for _, target := range r.Target {
if strings.HasPrefix(target.Host, "*") {
// This artificially turns the target into a valid URL for processing
// by TLD extract.
urlStr := "http://" + strings.Replace(target.Host, "*", "sub", 1)
e := extract.Extract(urlStr)
if strings.Contains(e.Sub, ".") {
log.Debugf("Ingoring wildcard rule with multiple subdomains: %+v;%s\n", e, target.Host)
return false, duplicates
}
duplicates += addRules(targets, target.Host, rs)
} else {
duplicates += addRules(targets, target.Host, rs)
}
}
return true, duplicates
}
func addRules(targets map[string]*Rules, host string, rules *Rules) int {
if _, ok := targets[host]; ok {
// Ignoring duplicate.
return 1
}
targets[host] = rules
return 0
}
// rewrite converts the given URL to HTTPS if there is an associated rule for
// it.
func (r *Rules) rewrite(url string) (string, bool) {
for _, exclude := range r.Exclusions {
if exclude.pattern.MatchString(url) {
return url, false
}
}
for _, rule := range r.Rules {
if rule.from.MatchString(url) {
return rule.from.ReplaceAllString(url, rule.To), true
}
}
return url, false
}
// newHTTPS creates a new rewrite instance from a single rule set string. In
// practice this is used for testing.
func newHTTPS(rules string) (rewrite, map[string]*Rules) {
targets := make(map[string]*Rules)
AddRuleSet([]byte(rules), targets)
return newRewrite(targets), targets
}
func newRewrite(targets map[string]*Rules) rewrite {
return (&https{log: log, targets: targets}).rewrite
}
func ruleSetToRules(set Ruleset) (*Rules, error) {
mod := make([]*rule, 0)
for _, r := range set.Rule {
// We ignore any rules that attempt to redirect to HTTP, as they would
// trigger mixed content in most cases (all cases in browsers that don't
// allow mixed content)?
if r.To == "http:" {
continue
}
f, err := regexp.Compile(r.From)
if err != nil {
log.Debugf("Could not compile regex: %v", err)
return nil, err
}
mod = append(mod, &rule{From: r.From, from: f, To: r.To})
}
exclude := make([]*exclusion, 0)
for _, e := range set.Exclusion {
p, err := regexp.Compile(e.Pattern)
if err != nil {
log.Debugf("Could not compile regex for exclusion: %v", err)
return nil, err
}
exclude = append(exclude, &exclusion{Pattern: e.Pattern, pattern: p})
}
return &Rules{Rules: mod, Exclusions: exclude}, nil
}
func (h *https) rewrite(urlStr string) (string, bool) {
result := extract.Extract(urlStr)
domain := result.Root + "." + result.Tld
if rules, ok := h.targets[domain]; ok {
//h.log.Debugf("Got rules: %+v", rules)
return rules.rewrite(urlStr)
}
if rules, ok := h.targets[wildcardSuffix(result)]; ok {
//h.log.Debugf("Got suffix rules: %+v", rules)
return rules.rewrite(urlStr)
}
if rules, ok := h.targets["*."+domain]; ok {
//h.log.Debugf("Got prefix rules: %+v", rules)
return rules.rewrite(urlStr)
}
return urlStr, false
}
func wildcardSuffix(result *tldextract.Result) string {
var base string
if len(result.Sub) > 0 {
base = result.Sub + "."
}
return base + result.Root + ".*"
}
fix for premature return from loop
package httpseverywhere
import (
"bytes"
"encoding/gob"
"encoding/xml"
"regexp"
"strings"
"github.com/getlantern/golog"
"github.com/getlantern/tldextract"
)
var (
log = golog.LoggerFor("httpseverywhere")
extract = tldextract.New()
)
// rewrite changes an HTTP URL to rewrite.
type rewrite func(url string) (string, bool)
// Rewrite exports the rewrite method for users of this library.
var Rewrite = new()
type https struct {
log golog.Logger
targets map[string]*Rules
}
// A rule maps the regular expression to match and the string to change it to.
// It also stores the compiled regular expression for efficiency.
type rule struct {
from *regexp.Regexp
From string
To string
}
// An exclusion just contains the compiled regular expression exclusion pattern.
type exclusion struct {
Pattern string
pattern *regexp.Regexp
}
// Rules is a struct containing rules and exclusions for a given rule set. This
// is public so that we can encode and decode it from GOB format.
type Rules struct {
Rules []*rule
Exclusions []*exclusion
}
// new creates a new rewrite instance from embedded GOB data.
func new() rewrite {
data := MustAsset("targets.gob")
buf := bytes.NewBuffer(data)
dec := gob.NewDecoder(buf)
targets := make(map[string]*Rules)
err := dec.Decode(&targets)
if err != nil {
log.Errorf("Could not decode: %v", err)
return nil
}
// The compiled regular expressions aren't serialized, so we have to manually
// compile them.
for _, v := range targets {
for _, r := range v.Rules {
r.from, _ = regexp.Compile(r.From)
}
for _, e := range v.Exclusions {
e.pattern, _ = regexp.Compile(e.Pattern)
}
}
return newRewrite(targets)
}
// AddRuleSet adds the specified rule set to the map of targets. Returns
// whether or not the rule processed correctly and whether or not the
// target was a duplicate. Duplicates are ignored but are considered to have
// processed correctly.
func AddRuleSet(rules []byte, targets map[string]*Rules) (bool, int) {
var r Ruleset
xml.Unmarshal(rules, &r)
// If the rule is turned off, ignore it.
if len(r.Off) > 0 {
return false, 0
}
// We don't run on any platforms (aka Tor) that support mixed content, so
// ignore any rule that is mixedcontent-only.
if r.Platform == "mixedcontent" {
return false, 0
}
rs, err := ruleSetToRules(r)
if err != nil {
return false, 0
}
duplicates := 0
for _, target := range r.Target {
if strings.HasPrefix(target.Host, "*") {
// This artificially turns the target into a valid URL for processing
// by TLD extract.
urlStr := "http://" + strings.Replace(target.Host, "*", "pre", 1)
e := extract.Extract(urlStr)
if strings.Contains(e.Sub, ".") {
log.Debugf("Ingoring wildcard rule with multiple subdomains: %+v;%s\n", e, target.Host)
continue
}
duplicates += addRules(targets, target.Host, rs)
} else {
duplicates += addRules(targets, target.Host, rs)
}
}
return true, duplicates
}
func addRules(targets map[string]*Rules, host string, rules *Rules) int {
if _, ok := targets[host]; ok {
// Ignoring duplicate.
return 1
}
targets[host] = rules
return 0
}
// rewrite converts the given URL to HTTPS if there is an associated rule for
// it.
func (r *Rules) rewrite(url string) (string, bool) {
for _, exclude := range r.Exclusions {
if exclude.pattern.MatchString(url) {
return url, false
}
}
for _, rule := range r.Rules {
if rule.from.MatchString(url) {
return rule.from.ReplaceAllString(url, rule.To), true
}
}
return url, false
}
// newHTTPS creates a new rewrite instance from a single rule set string. In
// practice this is used for testing.
func newHTTPS(rules string) (rewrite, map[string]*Rules) {
targets := make(map[string]*Rules)
AddRuleSet([]byte(rules), targets)
return newRewrite(targets), targets
}
func newRewrite(targets map[string]*Rules) rewrite {
return (&https{log: log, targets: targets}).rewrite
}
func ruleSetToRules(set Ruleset) (*Rules, error) {
mod := make([]*rule, 0)
for _, r := range set.Rule {
// We ignore any rules that attempt to redirect to HTTP, as they would
// trigger mixed content in most cases (all cases in browsers that don't
// allow mixed content)?
if r.To == "http:" {
continue
}
f, err := regexp.Compile(r.From)
if err != nil {
log.Debugf("Could not compile regex: %v", err)
return nil, err
}
mod = append(mod, &rule{From: r.From, from: f, To: r.To})
}
exclude := make([]*exclusion, 0)
for _, e := range set.Exclusion {
p, err := regexp.Compile(e.Pattern)
if err != nil {
log.Debugf("Could not compile regex for exclusion: %v", err)
return nil, err
}
exclude = append(exclude, &exclusion{Pattern: e.Pattern, pattern: p})
}
return &Rules{Rules: mod, Exclusions: exclude}, nil
}
func (h *https) rewrite(urlStr string) (string, bool) {
result := extract.Extract(urlStr)
domain := result.Root + "." + result.Tld
if rules, ok := h.targets[domain]; ok {
//h.log.Debugf("Got rules: %+v", rules)
return rules.rewrite(urlStr)
}
if rules, ok := h.targets[wildcardSuffix(result)]; ok {
//h.log.Debugf("Got suffix rules: %+v", rules)
return rules.rewrite(urlStr)
}
if rules, ok := h.targets["*."+domain]; ok {
//h.log.Debugf("Got prefix rules: %+v", rules)
return rules.rewrite(urlStr)
}
return urlStr, false
}
func wildcardSuffix(result *tldextract.Result) string {
var base string
if len(result.Sub) > 0 {
base = result.Sub + "."
}
return base + result.Root + ".*"
}
|
package hueupnp
import (
"bytes"
"errors"
"fmt"
"log"
"net"
"strings"
"text/template"
"golang.org/x/net/ipv4"
)
/**
stolen from amazon-echo-ha-bridge
String discoveryTemplate = "HTTP/1.1 200 OK\r\n" +
"CACHE-CONTROL: max-age=86400\r\n" +
"EXT:\r\n" +
"LOCATION: http://%s:%s/upnp/%s/setup.xml\r\n" +
"OPT: \"http://schemas.upnp.org/upnp/1/0/\"; ns=01\r\n" +
"01-NLS: %s\r\n" +
"ST: urn:schemas-upnp-org:device:basic:1\r\n" +
"USN: uuid:Socket-1_0-221438K0100073::urn:Belkin:device:**\r\n\r\n";
**/
var upnpTemplate = template.Must(template.New("upnp").Parse(`HTTP/1.1 200 OK
CACHE-CONTROL: max-age=86400
EXT:
LOCATION: {{.location}}
OPT: "http://schemas.upnp.org/upnp/1/0/"; ns=01
ST: urn:schemas-upnp-org:device:basic:1
USN: uuid:{{.uuid}}::urn:Belkin:device:**
`))
func createSocket() (*ipv4.PacketConn, net.PacketConn, error) {
group := net.IPv4(239, 255, 255, 250)
interfaces, err := net.Interfaces()
if err != nil {
log.Fatalf("net.Interfaces error: %s", err)
return nil, nil, err
}
con, err := net.ListenPacket("udp4", "0.0.0.0:1900")
if err != nil {
log.Fatalf("net.ListenPacket error: %s", err)
return nil, nil, err
}
p := ipv4.NewPacketConn(con)
p.SetMulticastLoopback(true)
didFindInterface := false
for i, v := range interfaces {
ef, err := v.Addrs()
if err != nil {
continue
}
hasRealAddress := false
for k := range ef {
asIp := net.ParseIP(ef[k].String())
if asIp.IsUnspecified() {
continue
}
hasRealAddress = true
break
}
if !hasRealAddress {
continue
}
err = p.JoinGroup(&v, &net.UDPAddr{IP: group})
if err != nil {
log.Printf("join group %d %s", i, err)
continue
}
didFindInterface = true
}
if !didFindInterface {
return nil, nil, errors.New("Unable to find a compatible network interface!")
}
return p, con, nil
}
// CreateUPNPResponder takes in the setupLocation http://[IP]:[POST]/upnp/setup.xml
func CreateUPNPResponder(setupLocation string, uuid string, upnpAddr string) {
sock, rawCon, err := createSocket()
if err != nil {
panic(err)
}
defer sock.Close()
defer rawCon.Close()
for {
b := make([]byte, 1024)
n, src, err := rawCon.ReadFrom(b)
if err != nil {
log.Fatal("[UPNP] ReadFromUDP failed:", err)
}
if strings.HasPrefix(string(b[:n]), "M-SEARCH * HTTP/1.1") && strings.Contains(string(b[:n]), "MAN: \"ssdp:discover\"") {
addr, err := net.ResolveUDPAddr("udp4", src.String())
if err != nil {
log.Fatal("[UPNP] DialUDP failed:", err)
}
log.Println("[UPNP] discovery request from", src)
b := &bytes.Buffer{}
err = upnpTemplate.Execute(b, map[string]string{"location": setupLocation, "uuid": uuid})
if err != nil {
log.Fatal("[UPNP] execute template failed:", err)
}
fmt.Printf("[UPNP] Sending\n%s\nto %s\n", b.Bytes(), src)
rawCon.WriteTo([]byte(b.String()), addr)
}
}
}
Increase buffer size
package hueupnp
import (
"bytes"
"errors"
"fmt"
"log"
"net"
"strings"
"text/template"
"golang.org/x/net/ipv4"
)
/**
stolen from amazon-echo-ha-bridge
String discoveryTemplate = "HTTP/1.1 200 OK\r\n" +
"CACHE-CONTROL: max-age=86400\r\n" +
"EXT:\r\n" +
"LOCATION: http://%s:%s/upnp/%s/setup.xml\r\n" +
"OPT: \"http://schemas.upnp.org/upnp/1/0/\"; ns=01\r\n" +
"01-NLS: %s\r\n" +
"ST: urn:schemas-upnp-org:device:basic:1\r\n" +
"USN: uuid:Socket-1_0-221438K0100073::urn:Belkin:device:**\r\n\r\n";
**/
var upnpTemplate = template.Must(template.New("upnp").Parse(`HTTP/1.1 200 OK
CACHE-CONTROL: max-age=86400
EXT:
LOCATION: {{.location}}
OPT: "http://schemas.upnp.org/upnp/1/0/"; ns=01
ST: urn:schemas-upnp-org:device:basic:1
USN: uuid:{{.uuid}}::urn:Belkin:device:**
`))
func createSocket() (*ipv4.PacketConn, net.PacketConn, error) {
group := net.IPv4(239, 255, 255, 250)
interfaces, err := net.Interfaces()
if err != nil {
log.Fatalf("net.Interfaces error: %s", err)
return nil, nil, err
}
con, err := net.ListenPacket("udp4", "0.0.0.0:1900")
if err != nil {
log.Fatalf("net.ListenPacket error: %s", err)
return nil, nil, err
}
p := ipv4.NewPacketConn(con)
p.SetMulticastLoopback(true)
didFindInterface := false
for i, v := range interfaces {
ef, err := v.Addrs()
if err != nil {
continue
}
hasRealAddress := false
for k := range ef {
asIp := net.ParseIP(ef[k].String())
if asIp.IsUnspecified() {
continue
}
hasRealAddress = true
break
}
if !hasRealAddress {
continue
}
err = p.JoinGroup(&v, &net.UDPAddr{IP: group})
if err != nil {
log.Printf("join group %d %s", i, err)
continue
}
didFindInterface = true
}
if !didFindInterface {
return nil, nil, errors.New("Unable to find a compatible network interface!")
}
return p, con, nil
}
// CreateUPNPResponder takes in the setupLocation http://[IP]:[POST]/upnp/setup.xml
func CreateUPNPResponder(setupLocation string, uuid string, upnpAddr string) {
sock, rawCon, err := createSocket()
if err != nil {
panic(err)
}
defer sock.Close()
defer rawCon.Close()
for {
b := make([]byte, 2048)
n, src, err := rawCon.ReadFrom(b)
if err != nil {
log.Fatal("[UPNP] ReadFromUDP failed:", err)
}
if strings.HasPrefix(string(b[:n]), "M-SEARCH * HTTP/1.1") && strings.Contains(string(b[:n]), "MAN: \"ssdp:discover\"") {
addr, err := net.ResolveUDPAddr("udp4", src.String())
if err != nil {
log.Fatal("[UPNP] DialUDP failed:", err)
}
log.Println("[UPNP] discovery request from", src)
b := &bytes.Buffer{}
err = upnpTemplate.Execute(b, map[string]string{"location": setupLocation, "uuid": uuid})
if err != nil {
log.Fatal("[UPNP] execute template failed:", err)
}
fmt.Printf("[UPNP] Sending\n%s\nto %s\n", b.Bytes(), src)
rawCon.WriteTo([]byte(b.String()), addr)
}
}
}
|
package device
import (
"bufio"
"fmt"
"log"
"path/filepath"
"sync"
"time"
"github.com/jochenvg/go-udev"
"github.com/tarm/serial"
)
// Manager manages devices that are plugged into the system. It supports auto
// detection of devices.
//
// Serial ports are opened each for a device, and a clean API for communicating
// is provided via Read, Write and Flush methods.
//
// The devices are monitored via udev, and any changes that requires reloading
// of the ports are handled by reloading the ports to the devices.
//
// This is safe to use concurrently in multiple goroutines
type Manager struct {
devices map[string]serial.Config
conn []*Conn
mu sync.RWMutex
monitor *udev.Monitor
done chan struct{}
stop chan struct{}
}
// New returns a new Manager instance
func New() *Manager {
return &Manager{
devices: make(map[string]serial.Config),
done: make(chan struct{}),
stop: make(chan struct{}),
}
}
// Init initializes the manager. This involves creating a new goroutine to watch
// over the changes detected by udev for any device interaction with the system.
//
// The only interesting device actions are add and reomove for adding and
// removing devices respctively.
func (m *Manager) Init() {
u := udev.Udev{}
monitor := u.NewMonitorFromNetlink("udev")
monitor.FilterAddMatchTag("systemd")
devCh, err := monitor.DeviceChan(m.done)
if err != nil {
panic(err)
}
m.monitor = monitor
go func() {
stop:
for {
select {
case d := <-devCh:
switch d.Action() {
case "add":
dpath := filepath.Join("/dev", filepath.Base(d.Devpath()))
m.AddDevice(dpath)
fmt.Printf(" new device added %s\n", dpath)
m.reload()
case "remove":
dpath := filepath.Join("/dev", filepath.Base(d.Devpath()))
fmt.Printf(" %s was removed\n", dpath)
m.RemoveDevice(dpath)
m.reload()
default:
fmt.Println(d.Action())
}
case quit := <-m.stop:
m.done <- quit
break stop
}
}
}()
}
// AddDevice adds device name to the manager
func (m *Manager) AddDevice(name string) error {
cfg := serial.Config{Name: name, Baud: 9600, ReadTimeout: time.Second}
m.mu.Lock()
m.devices[name] = cfg
m.mu.Unlock()
return nil
}
// RemoveDevice removes device name from the manager
func (m *Manager) RemoveDevice(name string) error {
m.mu.RLock()
delete(m.devices, name)
m.mu.RUnlock()
return nil
}
// close all ports that are open for the devices
func (m *Manager) releaseAllPorts() {
for _, c := range m.conn {
err := c.Close()
if err != nil {
log.Printf("[ERR] closing port %s %v\n", c.device.Name, err)
}
}
}
func (m *Manager) reload() {
m.releaseAllPorts()
var conns []*Conn
for _, v := range m.devices {
conn := &Conn{device: v}
imei, err := conn.Exec("AT+GSN \r")
if err != nil {
log.Printf("[ERR] closing port %s %v\n", v.Name, err)
_ = conn.Close()
continue
}
fmt.Printf(" EMEI %s \n", string(imei))
conns = append(conns, conn)
}
m.conn = conns
}
//Close shuts down the device manager. This makes sure the udev monitor is
//closed and all goroutines are properly exited.
func (m *Manager) Close() {
m.stop <- struct{}{}
}
// Conn is a device serial connection
type Conn struct {
device serial.Config
port *serial.Port
isOpen bool
}
// Open opens a serial port to the undelying device
func (c *Conn) Open() error {
p, err := serial.OpenPort(&c.device)
if err != nil {
return err
}
c.port = p
c.isOpen = true
return nil
}
// Close closes the port helt by *Conn.
func (c *Conn) Close() error {
if c.isOpen {
return c.port.Close()
}
return nil
}
// Write wites b to the serieal port
func (c *Conn) Write(b []byte) (int, error) {
return c.port.Write(b)
}
// Read reads from serial port
func (c *Conn) Read(b []byte) (int, error) {
return c.port.Read(b)
}
// Exec sends the command over serial port and rrturns the response. If the port
// is closed it is opened before sending the command.
func (c *Conn) Exec(cmd string) ([]byte, error) {
if !c.isOpen {
fmt.Println("Opening port")
err := c.Open()
if err != nil {
return nil, err
}
}
defer func() { _ = c.port.Flush() }()
_, err := c.Write([]byte(cmd))
if err != nil {
return nil, err
}
buf := bufio.NewReader(c)
line, err := buf.ReadString('\n')
if err != nil {
return nil, err
}
line, err = buf.ReadString('\n')
if err != nil {
return nil, err
}
return []byte(line), nil
}
Add modemCommands
This holds string representation of various modem commands
package device
import (
"bufio"
"fmt"
"log"
"path/filepath"
"sync"
"time"
"github.com/jochenvg/go-udev"
"github.com/tarm/serial"
)
var modemCommands = struct {
IMEI, IMSI string
}{
"AT+GSN", "AT+CIMI",
}
// Manager manages devices that are plugged into the system. It supports auto
// detection of devices.
//
// Serial ports are opened each for a device, and a clean API for communicating
// is provided via Read, Write and Flush methods.
//
// The devices are monitored via udev, and any changes that requires reloading
// of the ports are handled by reloading the ports to the devices.
//
// This is safe to use concurrently in multiple goroutines
type Manager struct {
devices map[string]serial.Config
conn []*Conn
mu sync.RWMutex
monitor *udev.Monitor
done chan struct{}
stop chan struct{}
}
// New returns a new Manager instance
func New() *Manager {
return &Manager{
devices: make(map[string]serial.Config),
done: make(chan struct{}),
stop: make(chan struct{}),
}
}
// Init initializes the manager. This involves creating a new goroutine to watch
// over the changes detected by udev for any device interaction with the system.
//
// The only interesting device actions are add and reomove for adding and
// removing devices respctively.
func (m *Manager) Init() {
u := udev.Udev{}
monitor := u.NewMonitorFromNetlink("udev")
monitor.FilterAddMatchTag("systemd")
devCh, err := monitor.DeviceChan(m.done)
if err != nil {
panic(err)
}
m.monitor = monitor
go func() {
stop:
for {
select {
case d := <-devCh:
switch d.Action() {
case "add":
dpath := filepath.Join("/dev", filepath.Base(d.Devpath()))
m.AddDevice(dpath)
fmt.Printf(" new device added %s\n", dpath)
m.reload()
case "remove":
dpath := filepath.Join("/dev", filepath.Base(d.Devpath()))
fmt.Printf(" %s was removed\n", dpath)
m.RemoveDevice(dpath)
m.reload()
default:
fmt.Println(d.Action())
}
case quit := <-m.stop:
m.done <- quit
break stop
}
}
}()
}
// AddDevice adds device name to the manager
func (m *Manager) AddDevice(name string) error {
cfg := serial.Config{Name: name, Baud: 9600, ReadTimeout: time.Second}
m.mu.Lock()
m.devices[name] = cfg
m.mu.Unlock()
return nil
}
// RemoveDevice removes device name from the manager
func (m *Manager) RemoveDevice(name string) error {
m.mu.RLock()
delete(m.devices, name)
m.mu.RUnlock()
return nil
}
// close all ports that are open for the devices
func (m *Manager) releaseAllPorts() {
for _, c := range m.conn {
err := c.Close()
if err != nil {
log.Printf("[ERR] closing port %s %v\n", c.device.Name, err)
}
}
}
func (m *Manager) reload() {
m.releaseAllPorts()
var conns []*Conn
for _, v := range m.devices {
conn := &Conn{device: v}
imei, err := conn.Exec("AT+GSN \r")
if err != nil {
log.Printf("[ERR] closing port %s %v\n", v.Name, err)
_ = conn.Close()
continue
}
fmt.Printf(" EMEI %s \n", string(imei))
conns = append(conns, conn)
}
m.conn = conns
}
//Close shuts down the device manager. This makes sure the udev monitor is
//closed and all goroutines are properly exited.
func (m *Manager) Close() {
m.stop <- struct{}{}
}
// Conn is a device serial connection
type Conn struct {
device serial.Config
port *serial.Port
isOpen bool
}
// Open opens a serial port to the undelying device
func (c *Conn) Open() error {
p, err := serial.OpenPort(&c.device)
if err != nil {
return err
}
c.port = p
c.isOpen = true
return nil
}
// Close closes the port helt by *Conn.
func (c *Conn) Close() error {
if c.isOpen {
return c.port.Close()
}
return nil
}
// Write wites b to the serieal port
func (c *Conn) Write(b []byte) (int, error) {
return c.port.Write(b)
}
// Read reads from serial port
func (c *Conn) Read(b []byte) (int, error) {
return c.port.Read(b)
}
// Exec sends the command over serial port and rrturns the response. If the port
// is closed it is opened before sending the command.
func (c *Conn) Exec(cmd string) ([]byte, error) {
if !c.isOpen {
fmt.Println("Opening port")
err := c.Open()
if err != nil {
return nil, err
}
}
defer func() { _ = c.port.Flush() }()
_, err := c.Write([]byte(cmd))
if err != nil {
return nil, err
}
buf := bufio.NewReader(c)
line, err := buf.ReadString('\n')
if err != nil {
return nil, err
}
line, err = buf.ReadString('\n')
if err != nil {
return nil, err
}
return []byte(line), nil
}
|
package proxy
import (
"bufio"
"context"
"fmt"
"io"
"net"
"net/http"
"strings"
"time"
"github.com/getlantern/errors"
"github.com/getlantern/lampshade"
"github.com/getlantern/netx"
"github.com/getlantern/proxy/filters"
"github.com/getlantern/reconn"
)
const (
connectRequest = "CONNECT %v HTTP/1.1\r\nHost: %v\r\n\r\n"
maxHTTPSize = 2 << 15 // 64K
)
// BufferSource is a source for buffers used in reading/writing.
type BufferSource interface {
Get() []byte
Put(buf []byte)
}
func (proxy *proxy) applyCONNECTDefaults() {
// Apply defaults
if proxy.BufferSource == nil {
proxy.BufferSource = &defaultBufferSource{}
}
if proxy.ShouldMITM == nil {
proxy.ShouldMITM = proxy.defaultShouldMITM
} else {
orig := proxy.ShouldMITM
proxy.ShouldMITM = func(req *http.Request, upstreamAddr string) bool {
if !orig(req, upstreamAddr) {
return false
}
return proxy.defaultShouldMITM(req, upstreamAddr)
}
}
}
// interceptor configures an Interceptor.
type connectInterceptor struct {
idleTimeout time.Duration
bufferSource BufferSource
dial DialFunc
okWaitsForUpstream bool
}
func (proxy *proxy) nextCONNECT(downstream net.Conn) filters.Next {
return func(ctx filters.Context, modifiedReq *http.Request) (*http.Response, filters.Context, error) {
var resp *http.Response
upstreamAddr := modifiedReq.URL.Host
nextCtx := ctx.WithValue(ctxKeyUpstreamAddr, upstreamAddr)
if !proxy.OKWaitsForUpstream {
// We preemptively respond with an OK on the client. Some user agents like
// Chrome consider any non-200 OK response from the proxy to indicate that
// there's a problem with the proxy rather than the origin, causing the user
// agent to mark the proxy itself as bad and avoid using it in the future.
// By immediately responding 200 OK irrespective of what happens with the
// origin, we are signaling to the user agent that the proxy itself is good.
// If there is a subsequent problem dialing the origin, the user agent will
// (mostly correctly) attribute that to a problem with the origin rather
// than the proxy and continue to consider the proxy good. See the extensive
// discussion here: https://github.com/getlantern/lantern/issues/5514.
resp, nextCtx = respondOK(resp, modifiedReq, nextCtx)
return resp, nextCtx, nil
}
// Note - for CONNECT requests, we use the Host from the request URL, not the
// Host header. See discussion here:
// https://ask.wireshark.org/questions/22988/http-host-header-with-and-without-port-number
upstream, err := proxy.Dial(ctx, true, "tcp", upstreamAddr)
if err != nil {
if proxy.OKWaitsForUpstream {
return badGateway(ctx, modifiedReq, err)
}
log.Error(err)
return nil, ctx, err
}
// In this case, waited to successfully dial upstream before responding
// OK. Lantern uses this logic on server-side proxies so that the Lantern
// client retains the opportunity to fail over to a different proxy server
// just in case that one is able to reach the origin. This is relevant,
// for example, if some proxy servers reside in jurisdictions where an
// origin site is blocked but other proxy servers don't.
resp, nextCtx = respondOK(resp, modifiedReq, nextCtx)
nextCtx = nextCtx.WithValue(ctxKeyUpstream, upstream)
return resp, nextCtx, nil
}
}
func respondOK(resp *http.Response, req *http.Request, ctx filters.Context) (*http.Response, filters.Context) {
suppressOK := ctx.Value(ctxKeyNoRespondOkay) != nil
if !suppressOK {
resp, ctx, _ = filters.ShortCircuit(ctx, req, &http.Response{
StatusCode: http.StatusOK,
})
}
return resp, ctx
}
func (proxy *proxy) Connect(ctx context.Context, in io.Reader, conn net.Conn, origin string) error {
pin := io.MultiReader(strings.NewReader(fmt.Sprintf(connectRequest, origin, origin)), in)
return proxy.Handle(context.WithValue(ctx, ctxKeyNoRespondOkay, "true"), pin, conn)
}
func (proxy *proxy) proceedWithConnect(ctx filters.Context, req *http.Request, upstreamAddr string, upstream net.Conn, downstream net.Conn) error {
if upstream == nil {
var dialErr error
upstream, dialErr = proxy.Dial(ctx, true, "tcp", upstreamAddr)
if dialErr != nil {
return dialErr
}
}
defer func() {
if closeErr := upstream.Close(); closeErr != nil {
log.Tracef("Error closing upstream connection: %s", closeErr)
}
}()
var rr io.Reader
if proxy.ShouldMITM(req, upstreamAddr) {
// Try to MITM the connection
downstreamMITM, upstreamMITM, mitming, err := proxy.mitmIC.MITM(downstream, upstream)
if err != nil {
log.Errorf("Unable to MITM %v: %v", upstreamAddr, err)
return errors.New("Unable to MITM connection: %v", err)
}
downstream = downstreamMITM
upstream = upstreamMITM
if mitming {
// Try to read HTTP request and process as HTTP assuming that requests
// (not including body) are always smaller than 65K. If this assumption is
// violated, we won't be able to process the data on this connection.
downstreamRR := reconn.Wrap(downstream, maxHTTPSize)
_, peekReqErr := http.ReadRequest(bufio.NewReader(downstreamRR))
var rrErr error
rr, rrErr = downstreamRR.Rereader()
if rrErr != nil {
// Reading request overflowed, abort
return errors.New("Unable to re-read data: %v", rrErr)
}
if peekReqErr == nil {
// Handle as HTTP, prepend already read HTTP request
fullDownstream := io.MultiReader(rr, downstream)
// Remove upstream info from context so that handle doesn't try to
// process this as a CONNECT
ctx = ctx.WithValue(ctxKeyUpstream, nil).WithValue(ctxKeyUpstreamAddr, nil)
ctx = ctx.WithMITMing()
return proxy.handle(ctx, fullDownstream, downstream, upstream)
}
// We couldn't read the first HTTP Request, fall back to piping data
}
}
// Prepare to pipe data between the client and the proxy.
bufOut := proxy.BufferSource.Get()
bufIn := proxy.BufferSource.Get()
defer proxy.BufferSource.Put(bufOut)
defer proxy.BufferSource.Put(bufIn)
if rr != nil {
// We tried and failed to MITM. First copy already read data to upstream
// before we start piping as usual
_, copyErr := io.CopyBuffer(upstream, rr, bufOut)
if copyErr != nil {
return errors.New("Error copying initial data to upstream: %v", copyErr)
}
}
// Pipe data between the client and the proxy.
writeErr, readErr := netx.BidiCopy(upstream, downstream, bufOut, bufIn)
if isUnexpected(readErr) {
return errors.New("Error piping data to downstream: %v", readErr)
} else if isUnexpected(writeErr) {
return errors.New("Error piping data to upstream: %v", writeErr)
}
return nil
}
func badGateway(ctx filters.Context, req *http.Request, err error) (*http.Response, filters.Context, error) {
log.Debugf("Responding BadGateway: %v", err)
return filters.Fail(ctx, req, http.StatusBadGateway, err)
}
type defaultBufferSource struct{}
func (dbs *defaultBufferSource) Get() []byte {
// We limit ourselves to lampshade.MaxDataLen to ensure compatibility with it
return make([]byte, lampshade.MaxDataLen)
}
func (dbs *defaultBufferSource) Put(buf []byte) {
// do nothing
}
func (proxy *proxy) defaultShouldMITM(req *http.Request, upstreamAddr string) bool {
if proxy.mitmIC == nil {
return false
}
host, _, err := net.SplitHostPort(upstreamAddr)
if err != nil {
return false
}
for _, mitmDomain := range proxy.mitmDomains {
if mitmDomain.MatchString(host) {
return true
}
}
return false
}
remove log
package proxy
import (
"bufio"
"context"
"fmt"
"io"
"net"
"net/http"
"strings"
"time"
"github.com/getlantern/errors"
"github.com/getlantern/lampshade"
"github.com/getlantern/netx"
"github.com/getlantern/proxy/filters"
"github.com/getlantern/reconn"
)
const (
connectRequest = "CONNECT %v HTTP/1.1\r\nHost: %v\r\n\r\n"
maxHTTPSize = 2 << 15 // 64K
)
// BufferSource is a source for buffers used in reading/writing.
type BufferSource interface {
Get() []byte
Put(buf []byte)
}
func (proxy *proxy) applyCONNECTDefaults() {
// Apply defaults
if proxy.BufferSource == nil {
proxy.BufferSource = &defaultBufferSource{}
}
if proxy.ShouldMITM == nil {
proxy.ShouldMITM = proxy.defaultShouldMITM
} else {
orig := proxy.ShouldMITM
proxy.ShouldMITM = func(req *http.Request, upstreamAddr string) bool {
if !orig(req, upstreamAddr) {
return false
}
return proxy.defaultShouldMITM(req, upstreamAddr)
}
}
}
// interceptor configures an Interceptor.
type connectInterceptor struct {
idleTimeout time.Duration
bufferSource BufferSource
dial DialFunc
okWaitsForUpstream bool
}
func (proxy *proxy) nextCONNECT(downstream net.Conn) filters.Next {
return func(ctx filters.Context, modifiedReq *http.Request) (*http.Response, filters.Context, error) {
var resp *http.Response
upstreamAddr := modifiedReq.URL.Host
nextCtx := ctx.WithValue(ctxKeyUpstreamAddr, upstreamAddr)
if !proxy.OKWaitsForUpstream {
// We preemptively respond with an OK on the client. Some user agents like
// Chrome consider any non-200 OK response from the proxy to indicate that
// there's a problem with the proxy rather than the origin, causing the user
// agent to mark the proxy itself as bad and avoid using it in the future.
// By immediately responding 200 OK irrespective of what happens with the
// origin, we are signaling to the user agent that the proxy itself is good.
// If there is a subsequent problem dialing the origin, the user agent will
// (mostly correctly) attribute that to a problem with the origin rather
// than the proxy and continue to consider the proxy good. See the extensive
// discussion here: https://github.com/getlantern/lantern/issues/5514.
resp, nextCtx = respondOK(resp, modifiedReq, nextCtx)
return resp, nextCtx, nil
}
// Note - for CONNECT requests, we use the Host from the request URL, not the
// Host header. See discussion here:
// https://ask.wireshark.org/questions/22988/http-host-header-with-and-without-port-number
upstream, err := proxy.Dial(ctx, true, "tcp", upstreamAddr)
if err != nil {
if proxy.OKWaitsForUpstream {
return badGateway(ctx, modifiedReq, err)
}
return nil, ctx, err
}
// In this case, waited to successfully dial upstream before responding
// OK. Lantern uses this logic on server-side proxies so that the Lantern
// client retains the opportunity to fail over to a different proxy server
// just in case that one is able to reach the origin. This is relevant,
// for example, if some proxy servers reside in jurisdictions where an
// origin site is blocked but other proxy servers don't.
resp, nextCtx = respondOK(resp, modifiedReq, nextCtx)
nextCtx = nextCtx.WithValue(ctxKeyUpstream, upstream)
return resp, nextCtx, nil
}
}
func respondOK(resp *http.Response, req *http.Request, ctx filters.Context) (*http.Response, filters.Context) {
suppressOK := ctx.Value(ctxKeyNoRespondOkay) != nil
if !suppressOK {
resp, ctx, _ = filters.ShortCircuit(ctx, req, &http.Response{
StatusCode: http.StatusOK,
})
}
return resp, ctx
}
func (proxy *proxy) Connect(ctx context.Context, in io.Reader, conn net.Conn, origin string) error {
pin := io.MultiReader(strings.NewReader(fmt.Sprintf(connectRequest, origin, origin)), in)
return proxy.Handle(context.WithValue(ctx, ctxKeyNoRespondOkay, "true"), pin, conn)
}
func (proxy *proxy) proceedWithConnect(ctx filters.Context, req *http.Request, upstreamAddr string, upstream net.Conn, downstream net.Conn) error {
if upstream == nil {
var dialErr error
upstream, dialErr = proxy.Dial(ctx, true, "tcp", upstreamAddr)
if dialErr != nil {
return dialErr
}
}
defer func() {
if closeErr := upstream.Close(); closeErr != nil {
log.Tracef("Error closing upstream connection: %s", closeErr)
}
}()
var rr io.Reader
if proxy.ShouldMITM(req, upstreamAddr) {
// Try to MITM the connection
downstreamMITM, upstreamMITM, mitming, err := proxy.mitmIC.MITM(downstream, upstream)
if err != nil {
log.Errorf("Unable to MITM %v: %v", upstreamAddr, err)
return errors.New("Unable to MITM connection: %v", err)
}
downstream = downstreamMITM
upstream = upstreamMITM
if mitming {
// Try to read HTTP request and process as HTTP assuming that requests
// (not including body) are always smaller than 65K. If this assumption is
// violated, we won't be able to process the data on this connection.
downstreamRR := reconn.Wrap(downstream, maxHTTPSize)
_, peekReqErr := http.ReadRequest(bufio.NewReader(downstreamRR))
var rrErr error
rr, rrErr = downstreamRR.Rereader()
if rrErr != nil {
// Reading request overflowed, abort
return errors.New("Unable to re-read data: %v", rrErr)
}
if peekReqErr == nil {
// Handle as HTTP, prepend already read HTTP request
fullDownstream := io.MultiReader(rr, downstream)
// Remove upstream info from context so that handle doesn't try to
// process this as a CONNECT
ctx = ctx.WithValue(ctxKeyUpstream, nil).WithValue(ctxKeyUpstreamAddr, nil)
ctx = ctx.WithMITMing()
return proxy.handle(ctx, fullDownstream, downstream, upstream)
}
// We couldn't read the first HTTP Request, fall back to piping data
}
}
// Prepare to pipe data between the client and the proxy.
bufOut := proxy.BufferSource.Get()
bufIn := proxy.BufferSource.Get()
defer proxy.BufferSource.Put(bufOut)
defer proxy.BufferSource.Put(bufIn)
if rr != nil {
// We tried and failed to MITM. First copy already read data to upstream
// before we start piping as usual
_, copyErr := io.CopyBuffer(upstream, rr, bufOut)
if copyErr != nil {
return errors.New("Error copying initial data to upstream: %v", copyErr)
}
}
// Pipe data between the client and the proxy.
writeErr, readErr := netx.BidiCopy(upstream, downstream, bufOut, bufIn)
if isUnexpected(readErr) {
return errors.New("Error piping data to downstream: %v", readErr)
} else if isUnexpected(writeErr) {
return errors.New("Error piping data to upstream: %v", writeErr)
}
return nil
}
func badGateway(ctx filters.Context, req *http.Request, err error) (*http.Response, filters.Context, error) {
log.Debugf("Responding BadGateway: %v", err)
return filters.Fail(ctx, req, http.StatusBadGateway, err)
}
type defaultBufferSource struct{}
func (dbs *defaultBufferSource) Get() []byte {
// We limit ourselves to lampshade.MaxDataLen to ensure compatibility with it
return make([]byte, lampshade.MaxDataLen)
}
func (dbs *defaultBufferSource) Put(buf []byte) {
// do nothing
}
func (proxy *proxy) defaultShouldMITM(req *http.Request, upstreamAddr string) bool {
if proxy.mitmIC == nil {
return false
}
host, _, err := net.SplitHostPort(upstreamAddr)
if err != nil {
return false
}
for _, mitmDomain := range proxy.mitmDomains {
if mitmDomain.MatchString(host) {
return true
}
}
return false
}
|
package slackbot
import (
"fmt"
"github.com/nlopes/slack"
"github.com/jguyomard/slackbot-links/src/links"
)
type Message struct {
originalMsg *slack.Msg // Message posted by user
extendedLinkMsg *slack.Msg // Message with extended links informations
}
func NewMessageFromEvent(eventData *slack.MessageEvent) *Message {
m := new(Message)
m.originalMsg = &eventData.Msg
m.extendedLinkMsg = eventData.SubMessage
return m
}
func (m *Message) Analyse() bool {
// Ignore bot messages!
if len(m.originalMsg.BotID) > 0 {
return false
}
// There is one link?
links := m.GetLinks()
if len(links) == 0 {
return false
}
// Save all links!
for _, link := range links {
// link already posted?
duplicates := link.FindDuplicates()
if duplicates.GetTotal() > 0 {
duplicateLink := duplicates.GetLinks()[0]
duplicateAuthor := duplicateLink.SharedBy.Name
if duplicateAuthor == "" {
duplicateAuthor = "Someone"
}
// TODO add duplicate date?
duplicateMsg := fmt.Sprintf("Pssst! %s already posted this link!", duplicateAuthor)
rtm.SendMessage(rtm.NewOutgoingMessage(duplicateMsg, m.originalMsg.Channel))
continue
}
//rtm.SendMessage(rtm.NewOutgoingMessage("Link saved, Thank you!", m.originalMsg.Channel))
link.Save()
}
return true
}
func (m *Message) GetLinks() []*links.Link {
var messagelinks []*links.Link
// No SubMessage, No Attachment, No Link
if m.extendedLinkMsg == nil || len(m.extendedLinkMsg.Attachments) == 0 {
return messagelinks
}
// Links Filter
for _, attachment := range m.extendedLinkMsg.Attachments {
if len(attachment.TitleLink) > 0 {
link := links.NewLink(attachment.TitleLink)
link.SetTitle(attachment.Title)
link.SetExcerpt(attachment.Text)
link.SetImageURL(attachment.ImageURL)
link.SetSharedBy(m.extendedLinkMsg.User, getUserName(m.extendedLinkMsg.User))
link.SetSharedOn(m.originalMsg.Channel, getChannelName(m.originalMsg.Channel))
messagelinks = append(messagelinks, link)
}
}
return messagelinks
}
func getUserName(userID string) string {
userInfos, err := rtm.GetUserInfo(userID)
if err != nil {
return ""
}
return userInfos.Name
}
func getChannelName(channelID string) string {
channelInfos, err := rtm.GetChannelInfo(channelID)
if err != nil {
return ""
}
return channelInfos.Name
}
Improve duplicate message
package slackbot
import (
"fmt"
"time"
"github.com/dustin/go-humanize"
"github.com/nlopes/slack"
"github.com/jguyomard/slackbot-links/src/links"
)
type Message struct {
originalMsg *slack.Msg // Message posted by user
extendedLinkMsg *slack.Msg // Message with extended links informations
}
func NewMessageFromEvent(eventData *slack.MessageEvent) *Message {
m := new(Message)
m.originalMsg = &eventData.Msg
m.extendedLinkMsg = eventData.SubMessage
return m
}
func (m *Message) Analyse() bool {
// Ignore some messages
if !m.isValidMessage() {
return false
}
// There is one link?
links := m.GetLinks()
if len(links) == 0 {
return false
}
// Save all links!
for _, link := range links {
// link already posted?
duplicates := link.FindDuplicates()
if duplicates.GetTotal() > 0 {
duplicateLink := duplicates.GetLinks()[0]
// if the same author has posted this link recently, it's OK
if duplicateLink.SharedBy.ID != link.SharedBy.ID || time.Since(*duplicateLink.SharedAt) > 6*time.Hour {
duplicateAuthor := "Someone"
if duplicateLink.SharedBy.ID == link.SharedBy.ID {
duplicateAuthor = "You"
} else if duplicateLink.SharedBy.Name != "" {
duplicateAuthor = duplicateLink.SharedBy.Name
}
duplicateHumanDate := "a long while ago"
if duplicateLink.SharedAt != nil {
duplicateHumanDate = humanize.Time(*duplicateLink.SharedAt)
}
duplicateMsg := fmt.Sprintf("@%s Pssst! %s already posted this link %s!", link.SharedBy.Name, duplicateAuthor, duplicateHumanDate)
rtm.SendMessage(rtm.NewOutgoingMessage(duplicateMsg, m.originalMsg.Channel))
}
continue
}
//rtm.SendMessage(rtm.NewOutgoingMessage("Link saved, Thank you!", m.originalMsg.Channel))
link.Save()
}
return true
}
func (m *Message) GetLinks() []*links.Link {
var messagelinks []*links.Link
// No SubMessage, No Attachment, No Link
if m.extendedLinkMsg == nil || len(m.extendedLinkMsg.Attachments) == 0 {
return messagelinks
}
// Links Filter
for _, attachment := range m.extendedLinkMsg.Attachments {
if len(attachment.TitleLink) > 0 {
link := links.NewLink(attachment.TitleLink)
link.SetTitle(attachment.Title)
link.SetExcerpt(attachment.Text)
link.SetImageURL(attachment.ImageURL)
link.SetSharedBy(m.extendedLinkMsg.User, getUserName(m.extendedLinkMsg.User))
link.SetSharedOn(m.originalMsg.Channel, getChannelName(m.originalMsg.Channel))
messagelinks = append(messagelinks, link)
}
}
return messagelinks
}
func (m *Message) isValidMessage() bool {
// Ignore bot messages!
if len(m.originalMsg.BotID) > 0 {
return false
}
// Ignore replies, etc
if m.originalMsg.SubType != "message_changed" {
return false
}
// Ignore messages without attachments
if m.extendedLinkMsg == nil || len(m.extendedLinkMsg.Attachments) == 0 {
return false
}
// This is probably an edit
if m.extendedLinkMsg.Edited != nil {
return false
}
return true
}
func getUserName(userID string) string {
userInfos, err := rtm.GetUserInfo(userID)
if err != nil {
return ""
}
return userInfos.Name
}
func getChannelName(channelID string) string {
channelInfos, err := rtm.GetChannelInfo(channelID)
if err != nil {
return ""
}
return channelInfos.Name
}
|
package main
import (
"flag"
"fmt"
"go/build"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/naoina/kocha"
)
// newCommand implements `command` interface for `new` command.
type newCommand struct {
flag *flag.FlagSet
}
// Name returns name of `new` command.
func (c *newCommand) Name() string {
return "new"
}
// Alias returns alias of `new` command.
func (c *newCommand) Alias() string {
return ""
}
// Short returns short description for help.
func (c *newCommand) Short() string {
return "create a new application"
}
// Usage returns usage of `new` command.
func (c *newCommand) Usage() string {
return fmt.Sprintf("%s APP_PATH", c.Name())
}
func (c *newCommand) DefineFlags(fs *flag.FlagSet) {
c.flag = fs
}
// Run execute the process for `new` command.
func (c *newCommand) Run() {
appPath := c.flag.Arg(0)
if appPath == "" {
kocha.PanicOnError(c, "abort: no APP_PATH given")
}
dstBasePath := filepath.Join(filepath.SplitList(build.Default.GOPATH)[0], "src", appPath)
_, filename, _, _ := runtime.Caller(0)
baseDir := filepath.Dir(filename)
skeletonDir := filepath.Join(baseDir, "skeleton", "new")
if _, err := os.Stat(filepath.Join(dstBasePath, "config", "app.go")); err == nil {
kocha.PanicOnError(c, "abort: Kocha application is already exists")
}
data := map[string]interface{}{
"appName": filepath.Base(appPath),
"appPath": appPath,
"secretKey": strings.Trim(fmt.Sprintf("%q", string(kocha.GenerateRandomKey(32))), `"`), // AES-256
"signedKey": strings.Trim(fmt.Sprintf("%q", string(kocha.GenerateRandomKey(16))), `"`),
}
filepath.Walk(skeletonDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
panic(err)
}
if info.IsDir() {
return nil
}
dstPath := filepath.Join(dstBasePath, strings.TrimSuffix(strings.TrimPrefix(path, skeletonDir), ".template"))
dstDir := filepath.Dir(dstPath)
dirCreated, err := mkdirAllIfNotExists(dstDir)
if err != nil {
kocha.PanicOnError(c, "abort: failed to create directory: %v", err)
}
if dirCreated {
kocha.PrintCreateDirectory(dstDir)
} else {
kocha.PrintExist(dstDir)
}
kocha.CopyTemplate(c, path, dstPath, data)
return nil
})
}
func mkdirAllIfNotExists(dstDir string) (created bool, err error) {
if _, err := os.Stat(dstDir); os.IsNotExist(err) {
if err := os.MkdirAll(dstDir, 0755); err != nil {
return false, err
}
return true, nil
}
return false, nil
}
Modify to 'kocha new' not display existing directory
package main
import (
"flag"
"fmt"
"go/build"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/naoina/kocha"
)
// newCommand implements `command` interface for `new` command.
type newCommand struct {
flag *flag.FlagSet
}
// Name returns name of `new` command.
func (c *newCommand) Name() string {
return "new"
}
// Alias returns alias of `new` command.
func (c *newCommand) Alias() string {
return ""
}
// Short returns short description for help.
func (c *newCommand) Short() string {
return "create a new application"
}
// Usage returns usage of `new` command.
func (c *newCommand) Usage() string {
return fmt.Sprintf("%s APP_PATH", c.Name())
}
func (c *newCommand) DefineFlags(fs *flag.FlagSet) {
c.flag = fs
}
// Run execute the process for `new` command.
func (c *newCommand) Run() {
appPath := c.flag.Arg(0)
if appPath == "" {
kocha.PanicOnError(c, "abort: no APP_PATH given")
}
dstBasePath := filepath.Join(filepath.SplitList(build.Default.GOPATH)[0], "src", appPath)
_, filename, _, _ := runtime.Caller(0)
baseDir := filepath.Dir(filename)
skeletonDir := filepath.Join(baseDir, "skeleton", "new")
if _, err := os.Stat(filepath.Join(dstBasePath, "config", "app.go")); err == nil {
kocha.PanicOnError(c, "abort: Kocha application is already exists")
}
data := map[string]interface{}{
"appName": filepath.Base(appPath),
"appPath": appPath,
"secretKey": strings.Trim(fmt.Sprintf("%q", string(kocha.GenerateRandomKey(32))), `"`), // AES-256
"signedKey": strings.Trim(fmt.Sprintf("%q", string(kocha.GenerateRandomKey(16))), `"`),
}
filepath.Walk(skeletonDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
panic(err)
}
if info.IsDir() {
return nil
}
dstPath := filepath.Join(dstBasePath, strings.TrimSuffix(strings.TrimPrefix(path, skeletonDir), ".template"))
dstDir := filepath.Dir(dstPath)
dirCreated, err := mkdirAllIfNotExists(dstDir)
if err != nil {
kocha.PanicOnError(c, "abort: failed to create directory: %v", err)
}
if dirCreated {
kocha.PrintCreateDirectory(dstDir)
}
kocha.CopyTemplate(c, path, dstPath, data)
return nil
})
}
func mkdirAllIfNotExists(dstDir string) (created bool, err error) {
if _, err := os.Stat(dstDir); os.IsNotExist(err) {
if err := os.MkdirAll(dstDir, 0755); err != nil {
return false, err
}
return true, nil
}
return false, nil
}
|
package pubsub
import (
"context"
"strings"
"sync"
"time"
"cloud.google.com/go/pubsub"
ctxNet "golang.org/x/net/context"
"github.com/golang/protobuf/proto"
"github.com/jpillora/backoff"
opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"github.com/sirupsen/logrus"
)
var mutex = &sync.Mutex{}
var pubsubTag = opentracing.Tag{string(ext.Component), "pubsub"}
type GoogleCloud struct {
subName string
client *pubsub.Client
topics map[string]*pubsub.Topic
}
func NewGoogleCloud(project_id string, subName string) (*GoogleCloud, error) {
ctx := ctxNet.Background()
c, err := pubsub.NewClient(ctx, project_id)
if err != nil {
return nil, err
}
return &GoogleCloud{
subName: subName,
client: c,
topics: map[string]*pubsub.Topic{},
}, nil
}
func (g *GoogleCloud) Publish(ctx context.Context, topic string, msg proto.Message) error {
b, err := proto.Marshal(msg)
if err != nil {
logrus.Errorf("Cant marshal msg for topic %s, err: %v", topic, err)
}
mutex.Lock()
t, err := g.getTopic(topic)
mutex.Unlock()
if err != nil {
return err
}
attrs := map[string]string{}
var parentCtx opentracing.SpanContext
if parent := opentracing.SpanFromContext(ctx); parent != nil {
parentCtx = parent.Context()
}
tracer := opentracing.GlobalTracer()
clientSpan := tracer.StartSpan(
topic,
opentracing.ChildOf(parentCtx),
ext.SpanKindProducer,
pubsubTag,
)
defer clientSpan.Finish()
tracer.Inject(
clientSpan.Context(),
opentracing.TextMap,
opentracing.TextMapCarrier(attrs))
res := t.Publish(ctxNet.Background(), &pubsub.Message{
Data: b,
Attributes: attrs,
})
_, err = res.Get(ctxNet.Background())
return err
}
func (g *GoogleCloud) Subscribe(topic string, h MsgHandler, deadline time.Duration, autoAck bool) {
g.subscribe(topic, h, deadline, autoAck, make(chan bool, 1))
}
func (g *GoogleCloud) subscribe(topic string, h MsgHandler, deadline time.Duration, autoAck bool, ready chan<- bool) {
go func() {
var sub *pubsub.Subscription
var err error
b := &backoff.Backoff{
Min: 500 * time.Millisecond,
Max: 10 * time.Second,
}
// Subscribe with backoff for failure (i.e topic doesn't exist yet)
for {
t := g.client.Topic(topic)
subName := g.subName + "--" + topic
sub, err = g.client.CreateSubscription(ctxNet.Background(), subName, t, deadline, nil)
if err != nil && !strings.Contains(err.Error(), "AlreadyExists") {
d := b.Duration()
logrus.Errorf("Can't subscribe to topic: %s. Subscribing again in %s", err.Error(), d)
time.Sleep(d)
continue
}
b.Reset()
logrus.Infof("Subscribed to topic %s with name %s", topic, subName)
break
}
ready <- true
// Listen to messages and call the MsgHandler
for {
err = sub.Receive(ctxNet.Background(), func(ctx ctxNet.Context, m *pubsub.Message) {
logrus.Infof("Recevied on topic %s, id: %s", topic, m.ID)
tracer := opentracing.GlobalTracer()
spanContext, err := tracer.Extract(
opentracing.TextMap,
opentracing.TextMapCarrier(m.Attributes))
if err != nil {
logrus.Error(err)
return
}
handlerSpan := tracer.StartSpan(
g.subName,
consumerOption{clientContext: spanContext},
pubsubTag,
)
defer handlerSpan.Finish()
ctx = opentracing.ContextWithSpan(ctx, handlerSpan)
msg := Msg{
ID: m.ID,
Metadata: m.Attributes,
Data: m.Data,
Ack: func() {
m.Ack()
},
Nack: func() {
m.Nack()
},
}
err = h(ctx, msg)
if err != nil {
logrus.Error(err)
return
}
if autoAck {
m.Ack()
}
})
if err != nil {
logrus.Error(err)
}
}
}()
}
type consumerOption struct {
clientContext opentracing.SpanContext
}
func (c consumerOption) Apply(o *opentracing.StartSpanOptions) {
if c.clientContext != nil {
opentracing.ChildOf(c.clientContext).Apply(o)
}
ext.SpanKindConsumer.Apply(o)
}
func (g *GoogleCloud) getTopic(name string) (*pubsub.Topic, error) {
if g.topics[name] != nil {
return g.topics[name], nil
}
ctx := ctxNet.Background()
topic := g.client.Topic(name)
ok, err := topic.Exists(ctx)
if err != nil {
return nil, err
}
if ok {
return topic, nil
}
t, err := g.client.CreateTopic(ctx, name)
if err != nil {
return nil, err
}
return t, nil
}
func (g *GoogleCloud) deleteTopic(name string) error {
t, err := g.getTopic(name)
if err != nil {
return err
}
return t.Delete(context.Background())
}
Cache topic and add tracing for Google pub sub performance
package pubsub
import (
"context"
"strings"
"sync"
"time"
"cloud.google.com/go/pubsub"
ctxNet "golang.org/x/net/context"
"github.com/golang/protobuf/proto"
"github.com/jpillora/backoff"
opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"github.com/sirupsen/logrus"
)
var mutex = &sync.Mutex{}
var pubsubTag = opentracing.Tag{string(ext.Component), "pubsub"}
type GoogleCloud struct {
subName string
client *pubsub.Client
topics map[string]*pubsub.Topic
}
func NewGoogleCloud(project_id string, subName string) (*GoogleCloud, error) {
ctx := ctxNet.Background()
c, err := pubsub.NewClient(ctx, project_id)
if err != nil {
return nil, err
}
return &GoogleCloud{
subName: subName,
client: c,
topics: map[string]*pubsub.Topic{},
}, nil
}
func (g *GoogleCloud) Publish(ctx context.Context, topic string, msg proto.Message) error {
var parentCtx opentracing.SpanContext
if parent := opentracing.SpanFromContext(ctx); parent != nil {
parentCtx = parent.Context()
}
tracer := opentracing.GlobalTracer()
clientSpan := tracer.StartSpan(
topic,
opentracing.ChildOf(parentCtx),
ext.SpanKindProducer,
pubsubTag,
)
defer clientSpan.Finish()
b, err := proto.Marshal(msg)
if err != nil {
logrus.Errorf("Cant marshal msg for topic %s, err: %v", topic, err)
}
clientSpan.LogEvent("get topic")
mutex.Lock()
t, err := g.getTopic(topic)
mutex.Unlock()
clientSpan.LogEvent("topic received")
if err != nil {
return err
}
attrs := map[string]string{}
tracer.Inject(
clientSpan.Context(),
opentracing.TextMap,
opentracing.TextMapCarrier(attrs))
clientSpan.LogEvent("publish")
res := t.Publish(ctxNet.Background(), &pubsub.Message{
Data: b,
Attributes: attrs,
})
_, err = res.Get(ctxNet.Background())
clientSpan.LogEvent("publish confirmed")
return err
}
func (g *GoogleCloud) Subscribe(topic string, h MsgHandler, deadline time.Duration, autoAck bool) {
g.subscribe(topic, h, deadline, autoAck, make(chan bool, 1))
}
func (g *GoogleCloud) subscribe(topic string, h MsgHandler, deadline time.Duration, autoAck bool, ready chan<- bool) {
go func() {
var sub *pubsub.Subscription
var err error
b := &backoff.Backoff{
Min: 500 * time.Millisecond,
Max: 10 * time.Second,
}
// Subscribe with backoff for failure (i.e topic doesn't exist yet)
for {
t := g.client.Topic(topic)
subName := g.subName + "--" + topic
sub, err = g.client.CreateSubscription(ctxNet.Background(), subName, t, deadline, nil)
if err != nil && !strings.Contains(err.Error(), "AlreadyExists") {
d := b.Duration()
logrus.Errorf("Can't subscribe to topic: %s. Subscribing again in %s", err.Error(), d)
time.Sleep(d)
continue
}
b.Reset()
logrus.Infof("Subscribed to topic %s with name %s", topic, subName)
break
}
ready <- true
// Listen to messages and call the MsgHandler
for {
err = sub.Receive(ctxNet.Background(), func(ctx ctxNet.Context, m *pubsub.Message) {
logrus.Infof("Recevied on topic %s, id: %s", topic, m.ID)
tracer := opentracing.GlobalTracer()
spanContext, err := tracer.Extract(
opentracing.TextMap,
opentracing.TextMapCarrier(m.Attributes))
if err != nil {
logrus.Error(err)
return
}
handlerSpan := tracer.StartSpan(
g.subName,
consumerOption{clientContext: spanContext},
pubsubTag,
)
defer handlerSpan.Finish()
ctx = opentracing.ContextWithSpan(ctx, handlerSpan)
msg := Msg{
ID: m.ID,
Metadata: m.Attributes,
Data: m.Data,
Ack: func() {
m.Ack()
},
Nack: func() {
m.Nack()
},
}
err = h(ctx, msg)
if err != nil {
logrus.Error(err)
return
}
if autoAck {
m.Ack()
}
})
if err != nil {
logrus.Error(err)
}
}
}()
}
type consumerOption struct {
clientContext opentracing.SpanContext
}
func (c consumerOption) Apply(o *opentracing.StartSpanOptions) {
if c.clientContext != nil {
opentracing.ChildOf(c.clientContext).Apply(o)
}
ext.SpanKindConsumer.Apply(o)
}
func (g *GoogleCloud) getTopic(name string) (*pubsub.Topic, error) {
if g.topics[name] != nil {
return g.topics[name], nil
}
ctx := ctxNet.Background()
t, err := g.client.CreateTopic(ctx, name)
if err != nil && !strings.Contains(err.Error(), "exists") {
return nil, err
}
g.topics[name] = t
return t, nil
}
func (g *GoogleCloud) deleteTopic(name string) error {
t, err := g.getTopic(name)
if err != nil {
return err
}
return t.Delete(context.Background())
}
|
//
// Copyright (c) 2016 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package ssntp
import (
"fmt"
"github.com/docker/distribution/uuid"
"time"
"github.com/01org/ciao/payloads"
)
// TraceConfig is the SSNTP tracing configuration to be used
// when calling into the client SendTraced* APIs.
type TraceConfig struct {
// Label places a a label in the SSNTP frame sent
// using this config.
Label []byte
// Start is defined by the API caller to specify when
// operations related to that frames actually started.
// Together with SetEndStamp, this allows for an
// end-to-end timestamping.
Start time.Time
// PathTrace turns frame timestamping on or off.
PathTrace bool
}
// Node represent an SSNTP networking node.
type Node struct {
UUID []byte
Role uint32
TxTimestamp time.Time
RxTimestamp time.Time
}
// FrameTrace gathers all SSNTP frame tracing information,
// including frame labelling, per Node timestamping and both
// start and end timestamps as provided by the frame API callers.
type FrameTrace struct {
Label []byte
StartTimestamp time.Time
EndTimestamp time.Time
PathLength uint8
Path []Node
}
// Frame represents an SSNTP frame structure.
type Frame struct {
Major uint8
Minor uint8
Type Type
Operand uint8
PayloadLength uint32
Trace *FrameTrace
Payload []byte
}
// ConnectFrame is the SSNPT connection frame structure.
type ConnectFrame struct {
Major uint8
Minor uint8
Type Type
Operand uint8
Role uint32
Source []byte
Destination []byte
}
const majorMask = 0x7f
const pathTraceEnabled = 1 << 7
// PathTrace tells if an SSNTP frames contains tracing information or not.
func (f Frame) PathTrace() bool {
if f.Trace == nil {
return false
}
return (f.Major & pathTraceEnabled) == pathTraceEnabled
}
func (f *Frame) setTrace(trace *TraceConfig) {
if trace == nil || (len(trace.Label) == 0 && trace.PathTrace == false) {
f.Major = f.Major &^ pathTraceEnabled
return
}
f.Trace = &FrameTrace{Label: trace.Label}
if trace.PathTrace == true {
f.Major |= pathTraceEnabled
f.Trace.StartTimestamp = trace.Start
}
}
func (f Frame) major() uint8 {
return f.Major & majorMask
}
func (f Frame) String() string {
var node uuid.UUID
var op string
t := f.Type
switch t {
case COMMAND:
op = (Command)(f.Operand).String()
case STATUS:
op = (Status)(f.Operand).String()
case EVENT:
op = (Event)(f.Operand).String()
case ERROR:
op = fmt.Sprintf("%d", f.Operand)
}
if f.PathTrace() == true {
path := ""
for i, n := range f.Trace.Path {
ts := ""
copy(node[:], n.UUID[:16])
if n.RxTimestamp.IsZero() == false {
ts = ts + fmt.Sprintf("\t\tRx %q\n", n.RxTimestamp.Format(time.StampNano))
}
if n.TxTimestamp.IsZero() == false {
ts = ts + fmt.Sprintf("\t\tTx %q\n", n.TxTimestamp.Format(time.StampNano))
}
path = path + fmt.Sprintf("\n\t\tNode #%d\n\t\tUUID %s\n", i, node) + ts
}
return fmt.Sprintf("\n\tMajor %d\n\tMinor %d\n\tType %s\n\tOp %s\n\tPayload len %d\n\tPath %s\n",
f.major(), f.Minor, t, op, f.PayloadLength, path)
}
return fmt.Sprintf("\n\tMajor %d\n\tMinor %d\n\tType %s\n\tOp %s\n\tPayload len %d\n",
f.major(), f.Minor, t, op, f.PayloadLength)
}
func (f ConnectFrame) String() string {
var dest, src uuid.UUID
var op string
t := f.Type
switch t {
case COMMAND:
op = (Command)(f.Operand).String()
case STATUS:
op = (Status)(f.Operand).String()
case ERROR:
op = fmt.Sprintf("%d", f.Operand)
}
copy(src[:], f.Source[:16])
copy(dest[:], f.Destination[:16])
return fmt.Sprintf("\tMajor %d\n\tMinor %d\n\tType %s\n\tOp %s\n\tRole %s\n\tSource %s\n\tDestination %s\n",
f.Major, f.Minor, (Type)(f.Type), op, (*Role)(&f.Role), src, dest)
}
func (f *Frame) addPathNode(session *session) {
if f.PathTrace() == false {
return
}
node := Node{
UUID: session.src[:],
Role: session.srcRole,
}
f.Trace.Path = append(f.Trace.Path, node)
f.Trace.PathLength++
}
// Duration returns the time spent between the first frame transmission
// and its last reception.
func (f Frame) Duration() (time.Duration, error) {
if f.PathTrace() != true {
return 0, fmt.Errorf("Timestamps not available")
}
return f.Trace.Path[f.Trace.PathLength-1].RxTimestamp.Sub(f.Trace.Path[0].TxTimestamp), nil
}
// SetEndStamp adds the final timestamp to an SSNTP frame.
// This is called by the SSNTP node that believes it's the
// last frame receiver. It provides information to build the
// complete duration of the operation related to an SSNTP frame.
func (f *Frame) SetEndStamp() {
if f.PathTrace() != true {
return
}
f.Trace.EndTimestamp = time.Now()
}
// DumpTrace builds SSNTP frame tracing data into a FrameTrace
// payload. Callers typically marshall this structure into a
// TraceReport YAML payload.
func (f Frame) DumpTrace() (*payloads.FrameTrace, error) {
var s payloads.FrameTrace
var node uuid.UUID
if f.PathTrace() != true {
return nil, fmt.Errorf("Traces not available")
}
s.Label = string(f.Trace.Label)
s.StartTimestamp = f.Trace.StartTimestamp.Format(time.RFC3339Nano)
s.EndTimestamp = f.Trace.EndTimestamp.Format(time.RFC3339Nano)
s.Type = f.Type.String()
switch f.Type {
case COMMAND:
s.Operand = (Command)(f.Operand).String()
case STATUS:
s.Operand = (Status)(f.Operand).String()
case EVENT:
s.Operand = (Event)(f.Operand).String()
case ERROR:
s.Operand = fmt.Sprintf("%d", f.Operand)
}
for _, n := range f.Trace.Path {
copy(node[:], n.UUID[:16])
sNode := payloads.SSNTPNode{
SSNTPUUID: node.String(),
SSNTPRole: (*Role)(&n.Role).String(),
}
if n.TxTimestamp.IsZero() == false {
sNode.TxTimestamp = n.TxTimestamp.Format(time.RFC3339Nano)
}
if n.RxTimestamp.IsZero() == false {
sNode.RxTimestamp = n.RxTimestamp.Format(time.RFC3339Nano)
}
s.Nodes = append(s.Nodes, sNode)
}
return &s, nil
}
ssntp: Fix typo in ConnectFrame comment
SSNTP, not SSPNT.
Signed-off-by: Samuel Ortiz <0ba86cb3f08bbb861958e54bd3438887adb4263c@linux.intel.com>
//
// Copyright (c) 2016 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package ssntp
import (
"fmt"
"github.com/docker/distribution/uuid"
"time"
"github.com/01org/ciao/payloads"
)
// TraceConfig is the SSNTP tracing configuration to be used
// when calling into the client SendTraced* APIs.
type TraceConfig struct {
// Label places a a label in the SSNTP frame sent
// using this config.
Label []byte
// Start is defined by the API caller to specify when
// operations related to that frames actually started.
// Together with SetEndStamp, this allows for an
// end-to-end timestamping.
Start time.Time
// PathTrace turns frame timestamping on or off.
PathTrace bool
}
// Node represent an SSNTP networking node.
type Node struct {
UUID []byte
Role uint32
TxTimestamp time.Time
RxTimestamp time.Time
}
// FrameTrace gathers all SSNTP frame tracing information,
// including frame labelling, per Node timestamping and both
// start and end timestamps as provided by the frame API callers.
type FrameTrace struct {
Label []byte
StartTimestamp time.Time
EndTimestamp time.Time
PathLength uint8
Path []Node
}
// Frame represents an SSNTP frame structure.
type Frame struct {
Major uint8
Minor uint8
Type Type
Operand uint8
PayloadLength uint32
Trace *FrameTrace
Payload []byte
}
// ConnectFrame is the SSNTP connection frame structure.
type ConnectFrame struct {
Major uint8
Minor uint8
Type Type
Operand uint8
Role uint32
Source []byte
Destination []byte
}
const majorMask = 0x7f
const pathTraceEnabled = 1 << 7
// PathTrace tells if an SSNTP frames contains tracing information or not.
func (f Frame) PathTrace() bool {
if f.Trace == nil {
return false
}
return (f.Major & pathTraceEnabled) == pathTraceEnabled
}
func (f *Frame) setTrace(trace *TraceConfig) {
if trace == nil || (len(trace.Label) == 0 && trace.PathTrace == false) {
f.Major = f.Major &^ pathTraceEnabled
return
}
f.Trace = &FrameTrace{Label: trace.Label}
if trace.PathTrace == true {
f.Major |= pathTraceEnabled
f.Trace.StartTimestamp = trace.Start
}
}
func (f Frame) major() uint8 {
return f.Major & majorMask
}
func (f Frame) String() string {
var node uuid.UUID
var op string
t := f.Type
switch t {
case COMMAND:
op = (Command)(f.Operand).String()
case STATUS:
op = (Status)(f.Operand).String()
case EVENT:
op = (Event)(f.Operand).String()
case ERROR:
op = fmt.Sprintf("%d", f.Operand)
}
if f.PathTrace() == true {
path := ""
for i, n := range f.Trace.Path {
ts := ""
copy(node[:], n.UUID[:16])
if n.RxTimestamp.IsZero() == false {
ts = ts + fmt.Sprintf("\t\tRx %q\n", n.RxTimestamp.Format(time.StampNano))
}
if n.TxTimestamp.IsZero() == false {
ts = ts + fmt.Sprintf("\t\tTx %q\n", n.TxTimestamp.Format(time.StampNano))
}
path = path + fmt.Sprintf("\n\t\tNode #%d\n\t\tUUID %s\n", i, node) + ts
}
return fmt.Sprintf("\n\tMajor %d\n\tMinor %d\n\tType %s\n\tOp %s\n\tPayload len %d\n\tPath %s\n",
f.major(), f.Minor, t, op, f.PayloadLength, path)
}
return fmt.Sprintf("\n\tMajor %d\n\tMinor %d\n\tType %s\n\tOp %s\n\tPayload len %d\n",
f.major(), f.Minor, t, op, f.PayloadLength)
}
func (f ConnectFrame) String() string {
var dest, src uuid.UUID
var op string
t := f.Type
switch t {
case COMMAND:
op = (Command)(f.Operand).String()
case STATUS:
op = (Status)(f.Operand).String()
case ERROR:
op = fmt.Sprintf("%d", f.Operand)
}
copy(src[:], f.Source[:16])
copy(dest[:], f.Destination[:16])
return fmt.Sprintf("\tMajor %d\n\tMinor %d\n\tType %s\n\tOp %s\n\tRole %s\n\tSource %s\n\tDestination %s\n",
f.Major, f.Minor, (Type)(f.Type), op, (*Role)(&f.Role), src, dest)
}
func (f *Frame) addPathNode(session *session) {
if f.PathTrace() == false {
return
}
node := Node{
UUID: session.src[:],
Role: session.srcRole,
}
f.Trace.Path = append(f.Trace.Path, node)
f.Trace.PathLength++
}
// Duration returns the time spent between the first frame transmission
// and its last reception.
func (f Frame) Duration() (time.Duration, error) {
if f.PathTrace() != true {
return 0, fmt.Errorf("Timestamps not available")
}
return f.Trace.Path[f.Trace.PathLength-1].RxTimestamp.Sub(f.Trace.Path[0].TxTimestamp), nil
}
// SetEndStamp adds the final timestamp to an SSNTP frame.
// This is called by the SSNTP node that believes it's the
// last frame receiver. It provides information to build the
// complete duration of the operation related to an SSNTP frame.
func (f *Frame) SetEndStamp() {
if f.PathTrace() != true {
return
}
f.Trace.EndTimestamp = time.Now()
}
// DumpTrace builds SSNTP frame tracing data into a FrameTrace
// payload. Callers typically marshall this structure into a
// TraceReport YAML payload.
func (f Frame) DumpTrace() (*payloads.FrameTrace, error) {
var s payloads.FrameTrace
var node uuid.UUID
if f.PathTrace() != true {
return nil, fmt.Errorf("Traces not available")
}
s.Label = string(f.Trace.Label)
s.StartTimestamp = f.Trace.StartTimestamp.Format(time.RFC3339Nano)
s.EndTimestamp = f.Trace.EndTimestamp.Format(time.RFC3339Nano)
s.Type = f.Type.String()
switch f.Type {
case COMMAND:
s.Operand = (Command)(f.Operand).String()
case STATUS:
s.Operand = (Status)(f.Operand).String()
case EVENT:
s.Operand = (Event)(f.Operand).String()
case ERROR:
s.Operand = fmt.Sprintf("%d", f.Operand)
}
for _, n := range f.Trace.Path {
copy(node[:], n.UUID[:16])
sNode := payloads.SSNTPNode{
SSNTPUUID: node.String(),
SSNTPRole: (*Role)(&n.Role).String(),
}
if n.TxTimestamp.IsZero() == false {
sNode.TxTimestamp = n.TxTimestamp.Format(time.RFC3339Nano)
}
if n.RxTimestamp.IsZero() == false {
sNode.RxTimestamp = n.RxTimestamp.Format(time.RFC3339Nano)
}
s.Nodes = append(s.Nodes, sNode)
}
return &s, nil
}
|
package main
import (
"fmt"
"github.com/google/go-github/github"
"github.com/pkg/errors"
)
type PullRequests struct {
Repository *Repository `json:"repository" bson:"repository"`
Total int `json:"total" bson:"total"`
TotalMerged int `json:"total_merged" bson:"total_merged"`
SentVsMerged []*PRPerMonth `json:"sent_vs_merged" bson:"sent_vs_merged"`
}
type PRPerMonth struct {
MonthYear string `json:"month_year" bson:"month_year"`
Sent int `json:"sent" bson:"sent"`
Merged int `json:"merged" bson:"merged"`
}
type prStats struct {
sent int
merged int
}
func getPullRequests(prs *PullRequests) error {
ctx, client := newGithubClient(false)
repoOwner := prs.Repository.Owner
repoName := prs.Repository.Name
pullOpts := &github.PullRequestListOptions{
ListOptions: github.ListOptions{PerPage: 100},
State: "all",
}
pulls := []*github.PullRequest{}
for {
paginatedPulls, resp, err := client.PullRequests.List(ctx, repoOwner, repoName, pullOpts)
if err != nil {
return errors.Wrap(err, "error getting repo pull requests")
}
pulls = append(pulls, paginatedPulls...)
if resp.NextPage == 0 {
break
}
pullOpts.Page = resp.NextPage
}
prs.Total = len(pulls)
prs.TotalMerged = 0
// prPerMonth := []*PRPerMonth{}
var monthYearPRStats map[string]*prStats
for _, pull := range pulls {
pullCreatedAt := pull.GetCreatedAt()
merged := false
month := pullCreatedAt.Month()
year := pullCreatedAt.Year()
monthyear := fmt.Sprintf("%d/%d", int(month), int(year))
if pull.MergedAt != nil {
prs.TotalMerged++
merged = true
}
if monthYearPRStats[monthyear] == nil {
monthYearPRStats[monthyear] = &prStats{merged: 0, sent: 0}
}
if merged {
monthYearPRStats[monthyear].merged++
}
monthYearPRStats[monthyear].sent++
}
// fmt.Println(monthYearPRStats)
// for my, monthYearPRStat := range monthYearPRStats {
// prPerMonth = append(prPerMonth, &PRPerMonth{MonthYear: my, Merged: monthYearPRStat.merged, Sent: monthYearPRStat.sent})
// }
// prs.SentVsMerged = prPerMonth
return nil
}
Comment unnecessary lines
package main
import (
"github.com/google/go-github/github"
"github.com/pkg/errors"
)
type PullRequests struct {
Repository *Repository `json:"repository" bson:"repository"`
Total int `json:"total" bson:"total"`
TotalMerged int `json:"total_merged" bson:"total_merged"`
SentVsMerged []*PRPerMonth `json:"sent_vs_merged" bson:"sent_vs_merged"`
}
type PRPerMonth struct {
MonthYear string `json:"month_year" bson:"month_year"`
Sent int `json:"sent" bson:"sent"`
Merged int `json:"merged" bson:"merged"`
}
type prStats struct {
sent int
merged int
}
func getPullRequests(prs *PullRequests) error {
ctx, client := newGithubClient(false)
repoOwner := prs.Repository.Owner
repoName := prs.Repository.Name
pullOpts := &github.PullRequestListOptions{
ListOptions: github.ListOptions{PerPage: 100},
State: "all",
}
pulls := []*github.PullRequest{}
for {
paginatedPulls, resp, err := client.PullRequests.List(ctx, repoOwner, repoName, pullOpts)
if err != nil {
return errors.Wrap(err, "error getting repo pull requests")
}
pulls = append(pulls, paginatedPulls...)
if resp.NextPage == 0 {
break
}
pullOpts.Page = resp.NextPage
}
prs.Total = len(pulls)
prs.TotalMerged = 0
// prPerMonth := []*PRPerMonth{}
// var monthYearPRStats map[string]*prStats
for _, pull := range pulls {
// pullCreatedAt := pull.GetCreatedAt()
// merged := false
// month := pullCreatedAt.Month()
// year := pullCreatedAt.Year()
// monthyear := fmt.Sprintf("%d/%d", int(month), int(year))
if pull.MergedAt != nil {
prs.TotalMerged++
// merged = true
}
// if monthYearPRStats[monthyear] == nil {
// monthYearPRStats[monthyear] = &prStats{merged: 0, sent: 0}
// }
// if merged {
// monthYearPRStats[monthyear].merged++
// }
// monthYearPRStats[monthyear].sent++
}
// fmt.Println(monthYearPRStats)
// for my, monthYearPRStat := range monthYearPRStats {
// prPerMonth = append(prPerMonth, &PRPerMonth{MonthYear: my, Merged: monthYearPRStat.merged, Sent: monthYearPRStat.sent})
// }
// prs.SentVsMerged = prPerMonth
return nil
}
|
package resource_test
import (
"bytes"
"errors"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/ghttp"
"github.com/google/go-github/github"
"github.com/concourse/github-release-resource"
"github.com/concourse/github-release-resource/fakes"
)
var _ = Describe("In Command", func() {
var (
command *resource.InCommand
githubClient *fakes.FakeGitHub
githubServer *ghttp.Server
inRequest resource.InRequest
inResponse resource.InResponse
inErr error
tmpDir string
destDir string
)
BeforeEach(func() {
var err error
githubClient = &fakes.FakeGitHub{}
githubServer = ghttp.NewServer()
command = resource.NewInCommand(githubClient, ioutil.Discard)
tmpDir, err = ioutil.TempDir("", "github-release")
Ω(err).ShouldNot(HaveOccurred())
destDir = filepath.Join(tmpDir, "destination")
githubClient.DownloadReleaseAssetReturns(ioutil.NopCloser(bytes.NewBufferString("some-content")), nil)
inRequest = resource.InRequest{}
})
AfterEach(func() {
Ω(os.RemoveAll(tmpDir)).Should(Succeed())
})
buildRelease := func(id int, tag string, draft bool) *github.RepositoryRelease {
return &github.RepositoryRelease{
ID: github.Int(id),
TagName: github.String(tag),
HTMLURL: github.String("http://google.com"),
Name: github.String("release-name"),
Body: github.String("*markdown*"),
Draft: github.Bool(draft),
Prerelease: github.Bool(false),
}
}
buildNilTagRelease := func(id int) *github.RepositoryRelease {
return &github.RepositoryRelease{
ID: github.Int(id),
HTMLURL: github.String("http://google.com"),
Name: github.String("release-name"),
Body: github.String("*markdown*"),
Draft: github.Bool(true),
Prerelease: github.Bool(false),
}
}
buildAsset := func(id int, name string) *github.ReleaseAsset {
return &github.ReleaseAsset{
ID: github.Int(id),
Name: &name,
}
}
buildTagRef := func(tagRef, commitSHA string) *github.Reference {
return &github.Reference{
Ref: github.String(tagRef),
URL: github.String("https://example.com"),
Object: &github.GitObject{
Type: github.String("commit"),
SHA: github.String(commitSHA),
URL: github.String("https://example.com"),
},
}
}
Context("when there is a tagged release", func() {
Context("when a present version is specified", func() {
BeforeEach(func() {
githubClient.GetReleaseByTagReturns(buildRelease(1, "v0.35.0", false), nil)
githubClient.GetRefReturns(buildTagRef("v0.35.0", "f28085a4a8f744da83411f5e09fd7b1709149eee"), nil)
githubClient.ListReleaseAssetsReturns([]*github.ReleaseAsset{
buildAsset(0, "example.txt"),
buildAsset(1, "example.rtf"),
buildAsset(2, "example.wtf"),
}, nil)
inRequest.Version = &resource.Version{
Tag: "v0.35.0",
}
})
Context("when valid asset filename globs are given", func() {
BeforeEach(func() {
inRequest.Params = resource.InParams{
Globs: []string{"*.txt", "*.rtf"},
}
})
It("succeeds", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Ω(inErr).ShouldNot(HaveOccurred())
})
It("returns the fetched version", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Ω(inResponse.Version).Should(Equal(resource.Version{Tag: "v0.35.0"}))
})
It("has some sweet metadata", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Ω(inResponse.Metadata).Should(ConsistOf(
resource.MetadataPair{Name: "url", Value: "http://google.com"},
resource.MetadataPair{Name: "name", Value: "release-name", URL: "http://google.com"},
resource.MetadataPair{Name: "body", Value: "*markdown*", Markdown: true},
resource.MetadataPair{Name: "tag", Value: "v0.35.0"},
resource.MetadataPair{Name: "commit_sha", Value: "f28085a4a8f744da83411f5e09fd7b1709149eee"},
))
})
It("calls #GetReleastByTag with the correct arguments", func() {
command.Run(destDir, inRequest)
Ω(githubClient.GetReleaseByTagArgsForCall(0)).Should(Equal("v0.35.0"))
})
It("downloads only the files that match the globs", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Expect(githubClient.DownloadReleaseAssetCallCount()).To(Equal(2))
Ω(githubClient.DownloadReleaseAssetArgsForCall(0)).Should(Equal(*buildAsset(0, "example.txt")))
Ω(githubClient.DownloadReleaseAssetArgsForCall(1)).Should(Equal(*buildAsset(1, "example.rtf")))
})
It("does create the body, tag and version files", func() {
inResponse, inErr = command.Run(destDir, inRequest)
contents, err := ioutil.ReadFile(path.Join(destDir, "tag"))
Ω(err).ShouldNot(HaveOccurred())
Ω(string(contents)).Should(Equal("v0.35.0"))
contents, err = ioutil.ReadFile(path.Join(destDir, "version"))
Ω(err).ShouldNot(HaveOccurred())
Ω(string(contents)).Should(Equal("0.35.0"))
contents, err = ioutil.ReadFile(path.Join(destDir, "commit_sha"))
Ω(err).ShouldNot(HaveOccurred())
Ω(string(contents)).Should(Equal("f28085a4a8f744da83411f5e09fd7b1709149eee"))
contents, err = ioutil.ReadFile(path.Join(destDir, "body"))
Ω(err).ShouldNot(HaveOccurred())
Ω(string(contents)).Should(Equal("*markdown*"))
})
Context("when there is a custom tag filter", func() {
BeforeEach(func() {
inRequest.Source = resource.Source{
TagFilter: "package-(.*)",
}
githubClient.GetReleaseByTagReturns(buildRelease(1, "package-0.35.0", false), nil)
githubClient.GetRefReturns(buildTagRef("package-0.35.0", "f28085a4a8f744da83411f5e09fd7b1709149eee"), nil)
inResponse, inErr = command.Run(destDir, inRequest)
})
It("succeeds", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Expect(inErr).ToNot(HaveOccurred())
})
It("does create the body, tag and version files", func() {
inResponse, inErr = command.Run(destDir, inRequest)
contents, err := ioutil.ReadFile(path.Join(destDir, "tag"))
Ω(err).ShouldNot(HaveOccurred())
Ω(string(contents)).Should(Equal("package-0.35.0"))
contents, err = ioutil.ReadFile(path.Join(destDir, "version"))
Ω(err).ShouldNot(HaveOccurred())
Ω(string(contents)).Should(Equal("0.35.0"))
})
})
Context("when include_source_tarball is true", func() {
var tarballUrl *url.URL
BeforeEach(func() {
inRequest.Params.IncludeSourceTarball = true
tarballUrl, _ = url.Parse(githubServer.URL())
tarballUrl.Path = "/gimme-a-tarball/"
})
Context("when getting the tarball link succeeds", func() {
BeforeEach(func() {
githubClient.GetTarballLinkReturns(tarballUrl, nil)
})
Context("when downloading the tarball succeeds", func() {
BeforeEach(func() {
githubServer.AppendHandlers(
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", tarballUrl.Path),
ghttp.RespondWith(http.StatusOK, "source-tar-file-contents"),
),
)
})
It("succeeds", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Expect(inErr).ToNot(HaveOccurred())
})
It("downloads the source tarball", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Expect(githubServer.ReceivedRequests()).To(HaveLen(1))
})
It("saves the source tarball in the destination directory", func() {
inResponse, inErr = command.Run(destDir, inRequest)
fileContents, err := ioutil.ReadFile(filepath.Join(destDir, "source.tar.gz"))
fContents := string(fileContents)
Expect(err).NotTo(HaveOccurred())
Expect(fContents).To(Equal("source-tar-file-contents"))
})
})
Context("when downloading the tarball fails", func() {
BeforeEach(func() {
githubServer.AppendHandlers(
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", tarballUrl.Path),
ghttp.RespondWith(http.StatusInternalServerError, ""),
),
)
})
It("returns an appropriate error", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Expect(inErr).To(MatchError("failed to download file `source.tar.gz`: HTTP status 500"))
})
})
})
Context("when getting the tarball link fails", func() {
disaster := errors.New("oh my")
BeforeEach(func() {
githubClient.GetTarballLinkReturns(nil, disaster)
})
It("returns the error", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Expect(inErr).To(Equal(disaster))
})
})
})
Context("when include_source_zip is true", func() {
var zipUrl *url.URL
BeforeEach(func() {
inRequest.Params.IncludeSourceZip = true
zipUrl, _ = url.Parse(githubServer.URL())
zipUrl.Path = "/gimme-a-zip/"
})
Context("when getting the zip link succeeds", func() {
BeforeEach(func() {
githubClient.GetZipballLinkReturns(zipUrl, nil)
})
Context("when downloading the zip succeeds", func() {
BeforeEach(func() {
githubServer.AppendHandlers(
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", zipUrl.Path),
ghttp.RespondWith(http.StatusOK, "source-zip-file-contents"),
),
)
})
It("succeeds", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Expect(inErr).ToNot(HaveOccurred())
})
It("downloads the source zip", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Expect(githubServer.ReceivedRequests()).To(HaveLen(1))
})
It("saves the source zip in the destination directory", func() {
inResponse, inErr = command.Run(destDir, inRequest)
fileContents, err := ioutil.ReadFile(filepath.Join(destDir, "source.zip"))
fContents := string(fileContents)
Expect(err).NotTo(HaveOccurred())
Expect(fContents).To(Equal("source-zip-file-contents"))
})
})
Context("when downloading the zip fails", func() {
BeforeEach(func() {
githubServer.AppendHandlers(
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", zipUrl.Path),
ghttp.RespondWith(http.StatusInternalServerError, ""),
),
)
})
It("returns an appropriate error", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Expect(inErr).To(MatchError("failed to download file `source.zip`: HTTP status 500"))
})
})
})
Context("when getting the zip link fails", func() {
disaster := errors.New("oh my")
BeforeEach(func() {
githubClient.GetZipballLinkReturns(nil, disaster)
})
It("returns the error", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Expect(inErr).To(Equal(disaster))
})
})
})
})
Context("when no globs are specified", func() {
BeforeEach(func() {
inRequest.Source = resource.Source{}
inResponse, inErr = command.Run(destDir, inRequest)
})
It("succeeds", func() {
Ω(inErr).ShouldNot(HaveOccurred())
})
It("returns the fetched version", func() {
Ω(inResponse.Version).Should(Equal(resource.Version{Tag: "v0.35.0"}))
})
It("has some sweet metadata", func() {
Ω(inResponse.Metadata).Should(ConsistOf(
resource.MetadataPair{Name: "url", Value: "http://google.com"},
resource.MetadataPair{Name: "name", Value: "release-name", URL: "http://google.com"},
resource.MetadataPair{Name: "body", Value: "*markdown*", Markdown: true},
resource.MetadataPair{Name: "tag", Value: "v0.35.0"},
resource.MetadataPair{Name: "commit_sha", Value: "f28085a4a8f744da83411f5e09fd7b1709149eee"},
))
})
It("downloads all of the files", func() {
Ω(githubClient.DownloadReleaseAssetArgsForCall(0)).Should(Equal(*buildAsset(0, "example.txt")))
Ω(githubClient.DownloadReleaseAssetArgsForCall(1)).Should(Equal(*buildAsset(1, "example.rtf")))
Ω(githubClient.DownloadReleaseAssetArgsForCall(2)).Should(Equal(*buildAsset(2, "example.wtf")))
})
})
Context("when downloading an asset fails", func() {
BeforeEach(func() {
githubClient.DownloadReleaseAssetReturns(nil, errors.New("not this time"))
inResponse, inErr = command.Run(destDir, inRequest)
})
It("returns an error", func() {
Ω(inErr).Should(HaveOccurred())
})
})
Context("when listing release assets fails", func() {
disaster := errors.New("nope")
BeforeEach(func() {
githubClient.ListReleaseAssetsReturns(nil, disaster)
inResponse, inErr = command.Run(destDir, inRequest)
})
It("returns the error", func() {
Ω(inErr).Should(Equal(disaster))
})
})
})
})
Context("when no tagged release is present", func() {
BeforeEach(func() {
githubClient.GetReleaseByTagReturns(nil, nil)
inRequest.Version = &resource.Version{
Tag: "v0.40.0",
}
inResponse, inErr = command.Run(destDir, inRequest)
})
It("returns an error", func() {
Ω(inErr).Should(MatchError("no releases"))
})
})
Context("when getting a tagged release fails", func() {
disaster := errors.New("nope")
BeforeEach(func() {
githubClient.GetReleaseByTagReturns(nil, disaster)
inRequest.Version = &resource.Version{
Tag: "some-tag",
}
inResponse, inErr = command.Run(destDir, inRequest)
})
It("returns the error", func() {
Ω(inErr).Should(Equal(disaster))
})
})
Context("when there is a draft release", func() {
Context("which has a tag", func() {
BeforeEach(func() {
githubClient.GetReleaseReturns(buildRelease(1, "v0.35.0", true), nil)
githubClient.GetRefReturns(buildTagRef("v0.35.0", "f28085a4a8f744da83411f5e09fd7b1709149eee"), nil)
inRequest.Version = &resource.Version{ID: "1"}
inResponse, inErr = command.Run(destDir, inRequest)
})
It("succeeds", func() {
Ω(inErr).ShouldNot(HaveOccurred())
})
It("returns the fetched version", func() {
Ω(inResponse.Version).Should(Equal(resource.Version{ID: "1"}))
})
It("has some sweet metadata", func() {
Ω(inResponse.Metadata).Should(ConsistOf(
resource.MetadataPair{Name: "url", Value: "http://google.com"},
resource.MetadataPair{Name: "name", Value: "release-name", URL: "http://google.com"},
resource.MetadataPair{Name: "body", Value: "*markdown*", Markdown: true},
resource.MetadataPair{Name: "tag", Value: "v0.35.0"},
resource.MetadataPair{Name: "commit_sha", Value: "f28085a4a8f744da83411f5e09fd7b1709149eee"},
resource.MetadataPair{Name: "draft", Value: "true"},
))
})
It("does create the tag and version files", func() {
contents, err := ioutil.ReadFile(path.Join(destDir, "tag"))
Ω(err).ShouldNot(HaveOccurred())
Ω(string(contents)).Should(Equal("v0.35.0"))
contents, err = ioutil.ReadFile(path.Join(destDir, "version"))
Ω(err).ShouldNot(HaveOccurred())
Ω(string(contents)).Should(Equal("0.35.0"))
contents, err = ioutil.ReadFile(path.Join(destDir, "commit_sha"))
Ω(err).ShouldNot(HaveOccurred())
Ω(string(contents)).Should(Equal("f28085a4a8f744da83411f5e09fd7b1709149eee"))
})
})
Context("which has an empty tag", func() {
BeforeEach(func() {
githubClient.GetReleaseReturns(buildRelease(1, "", true), nil)
inRequest.Version = &resource.Version{ID: "1"}
inResponse, inErr = command.Run(destDir, inRequest)
})
It("succeeds", func() {
Ω(inErr).ShouldNot(HaveOccurred())
})
It("returns the fetched version", func() {
Ω(inResponse.Version).Should(Equal(resource.Version{ID: "1"}))
})
It("has some sweet metadata", func() {
Ω(inResponse.Metadata).Should(ConsistOf(
resource.MetadataPair{Name: "url", Value: "http://google.com"},
resource.MetadataPair{Name: "name", Value: "release-name", URL: "http://google.com"},
resource.MetadataPair{Name: "body", Value: "*markdown*", Markdown: true},
resource.MetadataPair{Name: "tag", Value: ""},
resource.MetadataPair{Name: "draft", Value: "true"},
))
})
It("does not create the tag and version files", func() {
Ω(path.Join(destDir, "tag")).ShouldNot(BeAnExistingFile())
Ω(path.Join(destDir, "version")).ShouldNot(BeAnExistingFile())
Ω(path.Join(destDir, "commit_sha")).ShouldNot(BeAnExistingFile())
})
})
Context("which has a nil tag", func() {
BeforeEach(func() {
githubClient.GetReleaseReturns(buildNilTagRelease(1), nil)
inRequest.Version = &resource.Version{ID: "1"}
inResponse, inErr = command.Run(destDir, inRequest)
})
It("succeeds", func() {
Ω(inErr).ShouldNot(HaveOccurred())
})
It("returns the fetched version", func() {
Ω(inResponse.Version).Should(Equal(resource.Version{ID: "1"}))
})
It("has some sweet metadata", func() {
Ω(inResponse.Metadata).Should(ConsistOf(
resource.MetadataPair{Name: "url", Value: "http://google.com"},
resource.MetadataPair{Name: "name", Value: "release-name", URL: "http://google.com"},
resource.MetadataPair{Name: "body", Value: "*markdown*", Markdown: true},
resource.MetadataPair{Name: "draft", Value: "true"},
))
})
It("does not create the tag and version files", func() {
Ω(path.Join(destDir, "tag")).ShouldNot(BeAnExistingFile())
Ω(path.Join(destDir, "version")).ShouldNot(BeAnExistingFile())
Ω(path.Join(destDir, "commit_sha")).ShouldNot(BeAnExistingFile())
})
})
})
})
Update in_command tests
package resource_test
import (
"bytes"
"errors"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/ghttp"
"github.com/google/go-github/github"
"github.com/concourse/github-release-resource"
"github.com/concourse/github-release-resource/fakes"
)
var _ = Describe("In Command", func() {
var (
command *resource.InCommand
githubClient *fakes.FakeGitHub
githubServer *ghttp.Server
inRequest resource.InRequest
inResponse resource.InResponse
inErr error
tmpDir string
destDir string
)
BeforeEach(func() {
var err error
githubClient = &fakes.FakeGitHub{}
githubServer = ghttp.NewServer()
command = resource.NewInCommand(githubClient, ioutil.Discard)
tmpDir, err = ioutil.TempDir("", "github-release")
Ω(err).ShouldNot(HaveOccurred())
destDir = filepath.Join(tmpDir, "destination")
githubClient.DownloadReleaseAssetReturns(ioutil.NopCloser(bytes.NewBufferString("some-content")), nil)
inRequest = resource.InRequest{}
})
AfterEach(func() {
Ω(os.RemoveAll(tmpDir)).Should(Succeed())
})
buildRelease := func(id int, tag string, draft bool) *github.RepositoryRelease {
return &github.RepositoryRelease{
ID: github.Int(id),
TagName: github.String(tag),
HTMLURL: github.String("http://google.com"),
Name: github.String("release-name"),
Body: github.String("*markdown*"),
Draft: github.Bool(draft),
Prerelease: github.Bool(false),
}
}
buildNilTagRelease := func(id int) *github.RepositoryRelease {
return &github.RepositoryRelease{
ID: github.Int(id),
HTMLURL: github.String("http://google.com"),
Name: github.String("release-name"),
Body: github.String("*markdown*"),
Draft: github.Bool(true),
Prerelease: github.Bool(false),
}
}
buildAsset := func(id int, name string) *github.ReleaseAsset {
return &github.ReleaseAsset{
ID: github.Int(id),
Name: &name,
}
}
buildTagRef := func(tagRef, commitSHA string) *github.Reference {
return &github.Reference{
Ref: github.String(tagRef),
URL: github.String("https://example.com"),
Object: &github.GitObject{
Type: github.String("commit"),
SHA: github.String(commitSHA),
URL: github.String("https://example.com"),
},
}
}
Context("when there is a tagged release", func() {
Context("when a present version is specified", func() {
BeforeEach(func() {
githubClient.GetReleaseByTagReturns(buildRelease(1, "v0.35.0", false), nil)
githubClient.GetRefReturns(buildTagRef("v0.35.0", "f28085a4a8f744da83411f5e09fd7b1709149eee"), nil)
githubClient.ListReleaseAssetsReturns([]*github.ReleaseAsset{
buildAsset(0, "example.txt"),
buildAsset(1, "example.rtf"),
buildAsset(2, "example.wtf"),
}, nil)
inRequest.Version = &resource.Version{
Tag: "v0.35.0",
}
})
Context("when valid asset filename globs are given", func() {
BeforeEach(func() {
inRequest.Params = resource.InParams{
Globs: []string{"*.txt", "*.rtf"},
}
})
It("succeeds", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Ω(inErr).ShouldNot(HaveOccurred())
})
It("returns the fetched version", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Ω(inResponse.Version).Should(Equal(resource.Version{Tag: "v0.35.0"}))
})
It("has some sweet metadata", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Ω(inResponse.Metadata).Should(ConsistOf(
resource.MetadataPair{Name: "url", Value: "http://google.com"},
resource.MetadataPair{Name: "name", Value: "release-name", URL: "http://google.com"},
resource.MetadataPair{Name: "body", Value: "*markdown*", Markdown: true},
resource.MetadataPair{Name: "tag", Value: "v0.35.0"},
resource.MetadataPair{Name: "commit_sha", Value: "f28085a4a8f744da83411f5e09fd7b1709149eee"},
))
})
It("calls #GetReleastByTag with the correct arguments", func() {
command.Run(destDir, inRequest)
Ω(githubClient.GetReleaseByTagArgsForCall(0)).Should(Equal("v0.35.0"))
})
It("downloads only the files that match the globs", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Expect(githubClient.DownloadReleaseAssetCallCount()).To(Equal(2))
Ω(githubClient.DownloadReleaseAssetArgsForCall(0)).Should(Equal(*buildAsset(0, "example.txt")))
Ω(githubClient.DownloadReleaseAssetArgsForCall(1)).Should(Equal(*buildAsset(1, "example.rtf")))
})
It("does create the body, tag and version files", func() {
inResponse, inErr = command.Run(destDir, inRequest)
contents, err := ioutil.ReadFile(path.Join(destDir, "tag"))
Ω(err).ShouldNot(HaveOccurred())
Ω(string(contents)).Should(Equal("v0.35.0"))
contents, err = ioutil.ReadFile(path.Join(destDir, "version"))
Ω(err).ShouldNot(HaveOccurred())
Ω(string(contents)).Should(Equal("0.35.0"))
contents, err = ioutil.ReadFile(path.Join(destDir, "commit_sha"))
Ω(err).ShouldNot(HaveOccurred())
Ω(string(contents)).Should(Equal("f28085a4a8f744da83411f5e09fd7b1709149eee"))
contents, err = ioutil.ReadFile(path.Join(destDir, "body"))
Ω(err).ShouldNot(HaveOccurred())
Ω(string(contents)).Should(Equal("*markdown*"))
})
Context("when there is a custom tag filter", func() {
BeforeEach(func() {
inRequest.Source = resource.Source{
TagFilter: "package-(.*)",
}
githubClient.GetReleaseByTagReturns(buildRelease(1, "package-0.35.0", false), nil)
githubClient.GetRefReturns(buildTagRef("package-0.35.0", "f28085a4a8f744da83411f5e09fd7b1709149eee"), nil)
inResponse, inErr = command.Run(destDir, inRequest)
})
It("succeeds", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Expect(inErr).ToNot(HaveOccurred())
})
It("does create the body, tag and version files", func() {
inResponse, inErr = command.Run(destDir, inRequest)
contents, err := ioutil.ReadFile(path.Join(destDir, "tag"))
Ω(err).ShouldNot(HaveOccurred())
Ω(string(contents)).Should(Equal("package-0.35.0"))
contents, err = ioutil.ReadFile(path.Join(destDir, "version"))
Ω(err).ShouldNot(HaveOccurred())
Ω(string(contents)).Should(Equal("0.35.0"))
})
})
Context("when include_source_tarball is true", func() {
var tarballUrl *url.URL
BeforeEach(func() {
inRequest.Params.IncludeSourceTarball = true
tarballUrl, _ = url.Parse(githubServer.URL())
tarballUrl.Path = "/gimme-a-tarball/"
})
Context("when getting the tarball link succeeds", func() {
BeforeEach(func() {
githubClient.GetTarballLinkReturns(tarballUrl, nil)
})
Context("when downloading the tarball succeeds", func() {
BeforeEach(func() {
githubServer.AppendHandlers(
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", tarballUrl.Path),
ghttp.RespondWith(http.StatusOK, "source-tar-file-contents"),
),
)
})
It("succeeds", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Expect(inErr).ToNot(HaveOccurred())
})
It("downloads the source tarball", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Expect(githubServer.ReceivedRequests()).To(HaveLen(1))
})
It("saves the source tarball in the destination directory", func() {
inResponse, inErr = command.Run(destDir, inRequest)
fileContents, err := ioutil.ReadFile(filepath.Join(destDir, "source.tar.gz"))
fContents := string(fileContents)
Expect(err).NotTo(HaveOccurred())
Expect(fContents).To(Equal("source-tar-file-contents"))
})
})
Context("when downloading the tarball fails", func() {
BeforeEach(func() {
githubServer.AppendHandlers(
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", tarballUrl.Path),
ghttp.RespondWith(http.StatusInternalServerError, ""),
),
)
})
It("returns an appropriate error", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Expect(inErr).To(MatchError("failed to download file `source.tar.gz`: HTTP status 500"))
})
})
})
Context("when getting the tarball link fails", func() {
disaster := errors.New("oh my")
BeforeEach(func() {
githubClient.GetTarballLinkReturns(nil, disaster)
})
It("returns the error", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Expect(inErr).To(Equal(disaster))
})
})
})
Context("when include_source_zip is true", func() {
var zipUrl *url.URL
BeforeEach(func() {
inRequest.Params.IncludeSourceZip = true
zipUrl, _ = url.Parse(githubServer.URL())
zipUrl.Path = "/gimme-a-zip/"
})
Context("when getting the zip link succeeds", func() {
BeforeEach(func() {
githubClient.GetZipballLinkReturns(zipUrl, nil)
})
Context("when downloading the zip succeeds", func() {
BeforeEach(func() {
githubServer.AppendHandlers(
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", zipUrl.Path),
ghttp.RespondWith(http.StatusOK, "source-zip-file-contents"),
),
)
})
It("succeeds", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Expect(inErr).ToNot(HaveOccurred())
})
It("downloads the source zip", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Expect(githubServer.ReceivedRequests()).To(HaveLen(1))
})
It("saves the source zip in the destination directory", func() {
inResponse, inErr = command.Run(destDir, inRequest)
fileContents, err := ioutil.ReadFile(filepath.Join(destDir, "source.zip"))
fContents := string(fileContents)
Expect(err).NotTo(HaveOccurred())
Expect(fContents).To(Equal("source-zip-file-contents"))
})
})
Context("when downloading the zip fails", func() {
BeforeEach(func() {
githubServer.AppendHandlers(
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", zipUrl.Path),
ghttp.RespondWith(http.StatusInternalServerError, ""),
),
)
})
It("returns an appropriate error", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Expect(inErr).To(MatchError("failed to download file `source.zip`: HTTP status 500"))
})
})
})
Context("when getting the zip link fails", func() {
disaster := errors.New("oh my")
BeforeEach(func() {
githubClient.GetZipballLinkReturns(nil, disaster)
})
It("returns the error", func() {
inResponse, inErr = command.Run(destDir, inRequest)
Expect(inErr).To(Equal(disaster))
})
})
})
})
Context("when no globs are specified", func() {
BeforeEach(func() {
inRequest.Source = resource.Source{}
inResponse, inErr = command.Run(destDir, inRequest)
})
It("succeeds", func() {
Ω(inErr).ShouldNot(HaveOccurred())
})
It("returns the fetched version", func() {
Ω(inResponse.Version).Should(Equal(resource.Version{Tag: "v0.35.0"}))
})
It("has some sweet metadata", func() {
Ω(inResponse.Metadata).Should(ConsistOf(
resource.MetadataPair{Name: "url", Value: "http://google.com"},
resource.MetadataPair{Name: "name", Value: "release-name", URL: "http://google.com"},
resource.MetadataPair{Name: "body", Value: "*markdown*", Markdown: true},
resource.MetadataPair{Name: "tag", Value: "v0.35.0"},
resource.MetadataPair{Name: "commit_sha", Value: "f28085a4a8f744da83411f5e09fd7b1709149eee"},
))
})
It("downloads all of the files", func() {
Ω(githubClient.DownloadReleaseAssetArgsForCall(0)).Should(Equal(*buildAsset(0, "example.txt")))
Ω(githubClient.DownloadReleaseAssetArgsForCall(1)).Should(Equal(*buildAsset(1, "example.rtf")))
Ω(githubClient.DownloadReleaseAssetArgsForCall(2)).Should(Equal(*buildAsset(2, "example.wtf")))
})
})
Context("when downloading an asset fails", func() {
BeforeEach(func() {
githubClient.DownloadReleaseAssetReturns(nil, errors.New("not this time"))
inResponse, inErr = command.Run(destDir, inRequest)
})
It("returns an error", func() {
Ω(inErr).Should(HaveOccurred())
})
})
Context("when listing release assets fails", func() {
disaster := errors.New("nope")
BeforeEach(func() {
githubClient.ListReleaseAssetsReturns(nil, disaster)
inResponse, inErr = command.Run(destDir, inRequest)
})
It("returns the error", func() {
Ω(inErr).Should(Equal(disaster))
})
})
})
})
Context("when no tagged release is present", func() {
BeforeEach(func() {
githubClient.GetReleaseByTagReturns(nil, nil)
inRequest.Version = &resource.Version{
Tag: "v0.40.0",
}
inResponse, inErr = command.Run(destDir, inRequest)
})
It("returns an error", func() {
Ω(inErr).Should(MatchError("no releases"))
})
})
Context("when getting a tagged release fails", func() {
disaster := errors.New("nope")
BeforeEach(func() {
githubClient.GetReleaseByTagReturns(nil, disaster)
inRequest.Version = &resource.Version{
Tag: "some-tag",
}
inResponse, inErr = command.Run(destDir, inRequest)
})
It("returns the error", func() {
Ω(inErr).Should(Equal(disaster))
})
})
Context("when there is a draft release", func() {
Context("which has a tag", func() {
BeforeEach(func() {
githubClient.GetReleaseReturns(buildRelease(1, "v0.35.0", true), nil)
inRequest.Version = &resource.Version{ID: "1"}
inRequest.Source.Drafts = true
inResponse, inErr = command.Run(destDir, inRequest)
})
It("succeeds", func() {
Ω(inErr).ShouldNot(HaveOccurred())
})
It("returns the fetched version", func() {
Ω(inResponse.Version).Should(Equal(resource.Version{ID: "1"}))
})
It("has some sweet metadata", func() {
Ω(inResponse.Metadata).Should(ConsistOf(
resource.MetadataPair{Name: "url", Value: "http://google.com"},
resource.MetadataPair{Name: "name", Value: "release-name", URL: "http://google.com"},
resource.MetadataPair{Name: "body", Value: "*markdown*", Markdown: true},
resource.MetadataPair{Name: "tag", Value: "v0.35.0"},
resource.MetadataPair{Name: "draft", Value: "true"},
))
})
It("does create the tag and version files", func() {
contents, err := ioutil.ReadFile(path.Join(destDir, "tag"))
Ω(err).ShouldNot(HaveOccurred())
Ω(string(contents)).Should(Equal("v0.35.0"))
contents, err = ioutil.ReadFile(path.Join(destDir, "version"))
Ω(err).ShouldNot(HaveOccurred())
Ω(string(contents)).Should(Equal("0.35.0"))
})
})
Context("which has an empty tag", func() {
BeforeEach(func() {
githubClient.GetReleaseReturns(buildRelease(1, "", true), nil)
inRequest.Version = &resource.Version{ID: "1"}
inRequest.Source.Drafts = true
inResponse, inErr = command.Run(destDir, inRequest)
})
It("succeeds", func() {
Ω(inErr).ShouldNot(HaveOccurred())
})
It("returns the fetched version", func() {
Ω(inResponse.Version).Should(Equal(resource.Version{ID: "1"}))
})
It("has some sweet metadata", func() {
Ω(inResponse.Metadata).Should(ConsistOf(
resource.MetadataPair{Name: "url", Value: "http://google.com"},
resource.MetadataPair{Name: "name", Value: "release-name", URL: "http://google.com"},
resource.MetadataPair{Name: "body", Value: "*markdown*", Markdown: true},
resource.MetadataPair{Name: "tag", Value: ""},
resource.MetadataPair{Name: "draft", Value: "true"},
))
})
It("does not create the tag and version files", func() {
Ω(path.Join(destDir, "tag")).ShouldNot(BeAnExistingFile())
Ω(path.Join(destDir, "version")).ShouldNot(BeAnExistingFile())
Ω(path.Join(destDir, "commit_sha")).ShouldNot(BeAnExistingFile())
})
})
Context("which has a nil tag", func() {
BeforeEach(func() {
githubClient.GetReleaseReturns(buildNilTagRelease(1), nil)
inRequest.Version = &resource.Version{ID: "1"}
inRequest.Source.Drafts = true
inResponse, inErr = command.Run(destDir, inRequest)
})
It("succeeds", func() {
Ω(inErr).ShouldNot(HaveOccurred())
})
It("returns the fetched version", func() {
Ω(inResponse.Version).Should(Equal(resource.Version{ID: "1"}))
})
It("has some sweet metadata", func() {
Ω(inResponse.Metadata).Should(ConsistOf(
resource.MetadataPair{Name: "url", Value: "http://google.com"},
resource.MetadataPair{Name: "name", Value: "release-name", URL: "http://google.com"},
resource.MetadataPair{Name: "body", Value: "*markdown*", Markdown: true},
resource.MetadataPair{Name: "draft", Value: "true"},
))
})
It("does not create the tag and version files", func() {
Ω(path.Join(destDir, "tag")).ShouldNot(BeAnExistingFile())
Ω(path.Join(destDir, "version")).ShouldNot(BeAnExistingFile())
Ω(path.Join(destDir, "commit_sha")).ShouldNot(BeAnExistingFile())
})
})
})
})
|
package terraform
import (
"bytes"
"errors"
"fmt"
"os"
"reflect"
"sort"
"strings"
"sync"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/google/go-cmp/cmp"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs/configschema"
"github.com/hashicorp/terraform/configs/hcl2shim"
"github.com/hashicorp/terraform/plans"
"github.com/hashicorp/terraform/providers"
"github.com/hashicorp/terraform/states"
"github.com/hashicorp/terraform/tfdiags"
)
func TestContext2Plan_basic(t *testing.T) {
m := testModule(t, "plan-good")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
ProviderSHA256s: map[string][]byte{
"aws": []byte("placeholder"),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if l := len(plan.Changes.Resources); l < 2 {
t.Fatalf("wrong number of resources %d; want fewer than two\n%s", l, spew.Sdump(plan.Changes.Resources))
}
if !reflect.DeepEqual(plan.ProviderSHA256s, ctx.providerSHA256s) {
t.Errorf("wrong ProviderSHA256s %#v; want %#v", plan.ProviderSHA256s, ctx.providerSHA256s)
}
if !ctx.State().Empty() {
t.Fatalf("expected empty state, got %#v\n", ctx.State())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
for _, r := range plan.Changes.Resources {
ric, err := r.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
foo := ric.After.GetAttr("foo").AsString()
if foo != "2" {
t.Fatalf("incorrect plan for 'bar': %#v", ric.After)
}
case "aws_instance.foo":
num, _ := ric.After.GetAttr("num").AsBigFloat().Int64()
if num != 2 {
t.Fatalf("incorrect plan for 'foo': %#v", ric.After)
}
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_createBefore_deposed(t *testing.T) {
m := testModule(t, "plan-cbd")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: []string{"root"},
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
Attributes: map[string]string{
"id": "baz",
},
},
Deposed: []*InstanceState{
&InstanceState{
ID: "foo",
Attributes: map[string]string{
"id": "foo",
},
},
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
// the state should still show one deposed
expectedState := strings.TrimSpace(`
aws_instance.foo: (1 deposed)
ID = baz
provider = provider["registry.terraform.io/-/aws"]
Deposed ID 1 = foo`)
if ctx.State().String() != expectedState {
t.Fatalf("\nexpected: %q\ngot: %q\n", expectedState, ctx.State().String())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
type InstanceGen struct {
Addr string
DeposedKey states.DeposedKey
}
want := map[InstanceGen]bool{
{
Addr: "aws_instance.foo",
}: true,
{
Addr: "aws_instance.foo",
DeposedKey: states.DeposedKey("00000001"),
}: true,
}
got := make(map[InstanceGen]bool)
changes := make(map[InstanceGen]*plans.ResourceInstanceChangeSrc)
for _, change := range plan.Changes.Resources {
k := InstanceGen{
Addr: change.Addr.String(),
DeposedKey: change.DeposedKey,
}
got[k] = true
changes[k] = change
}
if !reflect.DeepEqual(got, want) {
t.Fatalf("wrong resource instance object changes in plan\ngot: %s\nwant: %s", spew.Sdump(got), spew.Sdump(want))
}
{
ric, err := changes[InstanceGen{Addr: "aws_instance.foo"}].Decode(ty)
if err != nil {
t.Fatal(err)
}
if got, want := ric.Action, plans.NoOp; got != want {
t.Errorf("current object change action is %s; want %s", got, want)
}
// the existing instance should only have an unchanged id
expected, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{"id": cty.StringVal("baz")}))
if err != nil {
t.Fatal(err)
}
checkVals(t, expected, ric.After)
}
{
ric, err := changes[InstanceGen{Addr: "aws_instance.foo", DeposedKey: states.DeposedKey("00000001")}].Decode(ty)
if err != nil {
t.Fatal(err)
}
if got, want := ric.Action, plans.Delete; got != want {
t.Errorf("deposed object change action is %s; want %s", got, want)
}
}
}
func TestContext2Plan_createBefore_maintainRoot(t *testing.T) {
m := testModule(t, "plan-cbd-maintain-root")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if !ctx.State().Empty() {
t.Fatal("expected empty state, got:", ctx.State())
}
if len(plan.Changes.Resources) != 4 {
t.Error("expected 4 resource in plan, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
// these should all be creates
if res.Action != plans.Create {
t.Fatalf("unexpected action %s for %s", res.Action, res.Addr.String())
}
}
}
func TestContext2Plan_emptyDiff(t *testing.T) {
m := testModule(t, "plan-empty")
p := testProvider("aws")
p.DiffFn = func(
info *InstanceInfo,
s *InstanceState,
c *ResourceConfig) (*InstanceDiff, error) {
return nil, nil
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if !ctx.State().Empty() {
t.Fatal("expected empty state, got:", ctx.State())
}
if len(plan.Changes.Resources) != 2 {
t.Error("expected 2 resource in plan, got", len(plan.Changes.Resources))
}
actions := map[string]plans.Action{}
for _, res := range plan.Changes.Resources {
actions[res.Addr.String()] = res.Action
}
expected := map[string]plans.Action{
"aws_instance.foo": plans.Create,
"aws_instance.bar": plans.Create,
}
if !cmp.Equal(expected, actions) {
t.Fatal(cmp.Diff(expected, actions))
}
}
func TestContext2Plan_escapedVar(t *testing.T) {
m := testModule(t, "plan-escaped-var")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Changes.Resources) != 1 {
t.Error("expected 1 resource in plan, got", len(plan.Changes.Resources))
}
res := plan.Changes.Resources[0]
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
expected := objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("bar-${baz}"),
"type": cty.StringVal("aws_instance")},
)
checkVals(t, expected, ric.After)
}
func TestContext2Plan_minimal(t *testing.T) {
m := testModule(t, "plan-empty")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if !ctx.State().Empty() {
t.Fatal("expected empty state, got:", ctx.State())
}
if len(plan.Changes.Resources) != 2 {
t.Error("expected 2 resource in plan, got", len(plan.Changes.Resources))
}
actions := map[string]plans.Action{}
for _, res := range plan.Changes.Resources {
actions[res.Addr.String()] = res.Action
}
expected := map[string]plans.Action{
"aws_instance.foo": plans.Create,
"aws_instance.bar": plans.Create,
}
if !cmp.Equal(expected, actions) {
t.Fatal(cmp.Diff(expected, actions))
}
}
func TestContext2Plan_modules(t *testing.T) {
m := testModule(t, "plan-modules")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Changes.Resources) != 3 {
t.Error("expected 3 resource in plan, got", len(plan.Changes.Resources))
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
expectFoo := objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("2"),
"type": cty.StringVal("aws_instance")},
)
expectNum := objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"num": cty.NumberIntVal(2),
"type": cty.StringVal("aws_instance")},
)
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
var expected cty.Value
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
expected = expectFoo
case "aws_instance.foo":
expected = expectNum
case "module.child.aws_instance.foo":
expected = expectNum
default:
t.Fatal("unknown instance:", i)
}
checkVals(t, expected, ric.After)
}
}
func TestContext2Plan_moduleExpand(t *testing.T) {
// Test a smattering of plan expansion behavior
m := testModule(t, "plan-modules-expand")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
expected := map[string]struct{}{
`aws_instance.foo["a"]`: struct{}{},
`module.count_child[1].aws_instance.foo[0]`: struct{}{},
`module.count_child[1].aws_instance.foo[1]`: struct{}{},
`module.count_child[0].aws_instance.foo[0]`: struct{}{},
`module.count_child[0].aws_instance.foo[1]`: struct{}{},
`module.for_each_child["a"].aws_instance.foo[1]`: struct{}{},
`module.for_each_child["a"].aws_instance.foo[0]`: struct{}{},
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
_, ok := expected[ric.Addr.String()]
if !ok {
t.Fatal("unexpected resource:", ric.Addr.String())
}
delete(expected, ric.Addr.String())
}
for addr := range expected {
t.Error("missing resource", addr)
}
}
// GH-1475
func TestContext2Plan_moduleCycle(t *testing.T) {
m := testModule(t, "plan-module-cycle")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
"some_input": {Type: cty.String, Optional: true},
"type": {Type: cty.String, Computed: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
var expected cty.Value
switch i := ric.Addr.String(); i {
case "aws_instance.b":
expected = objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
})
case "aws_instance.c":
expected = objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"some_input": cty.UnknownVal(cty.String),
"type": cty.StringVal("aws_instance"),
})
default:
t.Fatal("unknown instance:", i)
}
checkVals(t, expected, ric.After)
}
}
func TestContext2Plan_moduleDeadlock(t *testing.T) {
testCheckDeadlock(t, func() {
m := testModule(t, "plan-module-deadlock")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, err := ctx.Plan()
if err != nil {
t.Fatalf("err: %s", err)
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
expected := objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
})
switch i := ric.Addr.String(); i {
case "module.child.aws_instance.foo[0]":
case "module.child.aws_instance.foo[1]":
case "module.child.aws_instance.foo[2]":
default:
t.Fatal("unknown instance:", i)
}
checkVals(t, expected, ric.After)
}
})
}
func TestContext2Plan_moduleInput(t *testing.T) {
m := testModule(t, "plan-module-input")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
var expected cty.Value
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
expected = objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("2"),
"type": cty.StringVal("aws_instance"),
})
case "module.child.aws_instance.foo":
expected = objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("42"),
"type": cty.StringVal("aws_instance"),
})
default:
t.Fatal("unknown instance:", i)
}
checkVals(t, expected, ric.After)
}
}
func TestContext2Plan_moduleInputComputed(t *testing.T) {
m := testModule(t, "plan-module-input-computed")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.UnknownVal(cty.String),
"type": cty.StringVal("aws_instance"),
"compute": cty.StringVal("foo"),
}), ric.After)
case "module.child.aws_instance.foo":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.UnknownVal(cty.String),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_moduleInputFromVar(t *testing.T) {
m := testModule(t, "plan-module-input-var")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Variables: InputValues{
"foo": &InputValue{
Value: cty.StringVal("52"),
SourceType: ValueFromCaller,
},
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("2"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "module.child.aws_instance.foo":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("52"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_moduleMultiVar(t *testing.T) {
m := testModule(t, "plan-module-multi-var")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
"foo": {Type: cty.String, Optional: true},
"baz": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 5 {
t.Fatal("expected 5 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.parent[0]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
}), ric.After)
case "aws_instance.parent[1]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
}), ric.After)
case "module.child.aws_instance.bar[0]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"baz": cty.StringVal("baz"),
}), ric.After)
case "module.child.aws_instance.bar[1]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"baz": cty.StringVal("baz"),
}), ric.After)
case "module.child.aws_instance.foo":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("baz,baz"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_moduleOrphans(t *testing.T) {
m := testModule(t, "plan-modules-remove")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: []string{"root", "child"},
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
},
Provider: "provider.aws",
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.foo":
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"num": cty.NumberIntVal(2),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "module.child.aws_instance.foo":
if res.Action != plans.Delete {
t.Fatalf("expected resource delete, got %s", res.Action)
}
default:
t.Fatal("unknown instance:", i)
}
}
expectedState := `<no state>
module.child:
aws_instance.foo:
ID = baz
provider = provider["registry.terraform.io/-/aws"]`
if ctx.State().String() != expectedState {
t.Fatalf("\nexpected state: %q\n\ngot: %q", expectedState, ctx.State().String())
}
}
// https://github.com/hashicorp/terraform/issues/3114
func TestContext2Plan_moduleOrphansWithProvisioner(t *testing.T) {
m := testModule(t, "plan-modules-remove-provisioners")
p := testProvider("aws")
pr := testProvisioner()
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: []string{"root"},
Resources: map[string]*ResourceState{
"aws_instance.top": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "top",
},
},
},
},
&ModuleState{
Path: []string{"root", "parent", "childone"},
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
},
Provider: "provider.aws",
},
},
},
&ModuleState{
Path: []string{"root", "parent", "childtwo"},
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
},
Provider: "provider.aws",
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Provisioners: map[string]ProvisionerFactory{
"shell": testProvisionerFuncFixed(pr),
},
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 3 {
t.Error("expected 3 planned resources, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "module.parent.module.childone.aws_instance.foo":
if res.Action != plans.Delete {
t.Fatalf("expected resource Delete, got %s", res.Action)
}
case "module.parent.module.childtwo.aws_instance.foo":
if res.Action != plans.Delete {
t.Fatalf("expected resource Delete, got %s", res.Action)
}
case "aws_instance.top":
if res.Action != plans.NoOp {
t.Fatal("expected no changes, got", res.Action)
}
default:
t.Fatalf("unknown instance: %s\nafter: %#v", i, hcl2shim.ConfigValueFromHCL2(ric.After))
}
}
expectedState := `aws_instance.top:
ID = top
provider = provider["registry.terraform.io/-/aws"]
module.parent.childone:
aws_instance.foo:
ID = baz
provider = provider["registry.terraform.io/-/aws"]
module.parent.childtwo:
aws_instance.foo:
ID = baz
provider = provider["registry.terraform.io/-/aws"]`
if expectedState != ctx.State().String() {
t.Fatalf("\nexpect state: %q\ngot state: %q\n", expectedState, ctx.State().String())
}
}
func TestContext2Plan_moduleProviderInherit(t *testing.T) {
var l sync.Mutex
var calls []string
m := testModule(t, "plan-module-provider-inherit")
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): func() (providers.Interface, error) {
l.Lock()
defer l.Unlock()
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
Provider: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"from": {Type: cty.String, Optional: true},
},
},
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"from": {Type: cty.String, Optional: true},
},
},
},
}
p.ConfigureFn = func(c *ResourceConfig) error {
if v, ok := c.Get("from"); !ok || v.(string) != "root" {
return fmt.Errorf("bad")
}
return nil
}
p.DiffFn = func(
info *InstanceInfo,
state *InstanceState,
c *ResourceConfig) (*InstanceDiff, error) {
v, _ := c.Get("from")
l.Lock()
defer l.Unlock()
calls = append(calls, v.(string))
return testDiffFn(info, state, c)
}
return p, nil
},
},
),
})
_, err := ctx.Plan()
if err != nil {
t.Fatalf("err: %s", err)
}
actual := calls
sort.Strings(actual)
expected := []string{"child", "root"}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
}
// This tests (for GH-11282) that deeply nested modules properly inherit
// configuration.
func TestContext2Plan_moduleProviderInheritDeep(t *testing.T) {
var l sync.Mutex
m := testModule(t, "plan-module-provider-inherit-deep")
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): func() (providers.Interface, error) {
l.Lock()
defer l.Unlock()
var from string
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
Provider: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"from": {Type: cty.String, Optional: true},
},
},
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{},
},
},
}
p.ConfigureFn = func(c *ResourceConfig) error {
v, ok := c.Get("from")
if !ok || v.(string) != "root" {
return fmt.Errorf("bad")
}
from = v.(string)
return nil
}
p.DiffFn = func(
info *InstanceInfo,
state *InstanceState,
c *ResourceConfig) (*InstanceDiff, error) {
if from != "root" {
return nil, fmt.Errorf("bad resource")
}
return testDiffFn(info, state, c)
}
return p, nil
},
},
),
})
_, err := ctx.Plan()
if err != nil {
t.Fatalf("err: %s", err)
}
}
func TestContext2Plan_moduleProviderDefaultsVar(t *testing.T) {
var l sync.Mutex
var calls []string
m := testModule(t, "plan-module-provider-defaults-var")
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): func() (providers.Interface, error) {
l.Lock()
defer l.Unlock()
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
Provider: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"to": {Type: cty.String, Optional: true},
"from": {Type: cty.String, Optional: true},
},
},
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"from": {Type: cty.String, Optional: true},
},
},
},
}
p.ConfigureFn = func(c *ResourceConfig) error {
var buf bytes.Buffer
if v, ok := c.Get("from"); ok {
buf.WriteString(v.(string) + "\n")
}
if v, ok := c.Get("to"); ok {
buf.WriteString(v.(string) + "\n")
}
l.Lock()
defer l.Unlock()
calls = append(calls, buf.String())
return nil
}
p.DiffFn = testDiffFn
return p, nil
},
},
),
Variables: InputValues{
"foo": &InputValue{
Value: cty.StringVal("root"),
SourceType: ValueFromCaller,
},
},
})
_, err := ctx.Plan()
if err != nil {
t.Fatalf("err: %s", err)
}
expected := []string{
"child\nchild\n",
"root\n",
}
sort.Strings(calls)
if !reflect.DeepEqual(calls, expected) {
t.Fatalf("expected:\n%#v\ngot:\n%#v\n", expected, calls)
}
}
func TestContext2Plan_moduleProviderVar(t *testing.T) {
m := testModule(t, "plan-module-provider-var")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
Provider: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"value": {Type: cty.String, Optional: true},
},
},
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"value": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 1 {
t.Fatal("expected 1 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "module.child.aws_instance.test":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"value": cty.StringVal("hello"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_moduleVar(t *testing.T) {
m := testModule(t, "plan-module-var")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
var expected cty.Value
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
expected = objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("2"),
"type": cty.StringVal("aws_instance"),
})
case "module.child.aws_instance.foo":
expected = objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"num": cty.NumberIntVal(2),
"type": cty.StringVal("aws_instance"),
})
default:
t.Fatal("unknown instance:", i)
}
checkVals(t, expected, ric.After)
}
}
func TestContext2Plan_moduleVarWrongTypeBasic(t *testing.T) {
m := testModule(t, "plan-module-wrong-var-type")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
t.Fatalf("succeeded; want errors")
}
}
func TestContext2Plan_moduleVarWrongTypeNested(t *testing.T) {
m := testModule(t, "plan-module-wrong-var-type-nested")
p := testProvider("null")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("null"): testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
t.Fatalf("succeeded; want errors")
}
}
func TestContext2Plan_moduleVarWithDefaultValue(t *testing.T) {
m := testModule(t, "plan-module-var-with-default-value")
p := testProvider("null")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("null"): testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
}
func TestContext2Plan_moduleVarComputed(t *testing.T) {
m := testModule(t, "plan-module-var-computed")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.UnknownVal(cty.String),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "module.child.aws_instance.foo":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.UnknownVal(cty.String),
"type": cty.StringVal("aws_instance"),
"compute": cty.StringVal("foo"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_preventDestroy_bad(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-bad")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
},
},
},
}),
})
plan, err := ctx.Plan()
expectedErr := "aws_instance.foo has lifecycle.prevent_destroy"
if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) {
if plan != nil {
t.Logf(legacyDiffComparisonString(plan.Changes))
}
t.Fatalf("expected err would contain %q\nerr: %s", expectedErr, err)
}
}
func TestContext2Plan_preventDestroy_good(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-good")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
},
},
},
}),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if !plan.Changes.Empty() {
t.Fatalf("expected no changes, got %#v\n", plan.Changes)
}
}
func TestContext2Plan_preventDestroy_countBad(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-count-bad")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc345",
},
},
},
},
},
}),
})
plan, err := ctx.Plan()
expectedErr := "aws_instance.foo[1] has lifecycle.prevent_destroy"
if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) {
if plan != nil {
t.Logf(legacyDiffComparisonString(plan.Changes))
}
t.Fatalf("expected err would contain %q\nerr: %s", expectedErr, err)
}
}
func TestContext2Plan_preventDestroy_countGood(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-count-good")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"current": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc345",
},
},
},
},
},
}),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if plan.Changes.Empty() {
t.Fatalf("Expected non-empty plan, got %s", legacyDiffComparisonString(plan.Changes))
}
}
func TestContext2Plan_preventDestroy_countGoodNoChange(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-count-good")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"current": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
Attributes: map[string]string{
"current": "0",
"type": "aws_instance",
},
},
},
},
},
},
}),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if !plan.Changes.Empty() {
t.Fatalf("Expected empty plan, got %s", legacyDiffComparisonString(plan.Changes))
}
}
func TestContext2Plan_preventDestroy_destroyPlan(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-good")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
},
},
},
}),
Destroy: true,
})
plan, diags := ctx.Plan()
expectedErr := "aws_instance.foo has lifecycle.prevent_destroy"
if !strings.Contains(fmt.Sprintf("%s", diags.Err()), expectedErr) {
if plan != nil {
t.Logf(legacyDiffComparisonString(plan.Changes))
}
t.Fatalf("expected err would contain %q\nerr: %s", expectedErr, diags.Err())
}
}
func TestContext2Plan_provisionerCycle(t *testing.T) {
m := testModule(t, "plan-provisioner-cycle")
p := testProvider("aws")
p.DiffFn = testDiffFn
pr := testProvisioner()
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Provisioners: map[string]ProvisionerFactory{
"local-exec": testProvisionerFuncFixed(pr),
},
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
t.Fatalf("succeeded; want errors")
}
}
func TestContext2Plan_computed(t *testing.T) {
m := testModule(t, "plan-computed")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.UnknownVal(cty.String),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.UnknownVal(cty.String),
"num": cty.NumberIntVal(2),
"type": cty.StringVal("aws_instance"),
"compute": cty.StringVal("foo"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_blockNestingGroup(t *testing.T) {
m := testModule(t, "plan-block-nesting-group")
p := testProvider("test")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"test": {
BlockTypes: map[string]*configschema.NestedBlock{
"blah": {
Nesting: configschema.NestingGroup,
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"baz": {Type: cty.String, Required: true},
},
},
},
},
},
},
}
p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse {
return providers.PlanResourceChangeResponse{
PlannedState: req.ProposedNewState,
}
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("test"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if got, want := 1, len(plan.Changes.Resources); got != want {
t.Fatalf("wrong number of planned resource changes %d; want %d\n%s", got, want, spew.Sdump(plan.Changes.Resources))
}
if !p.PlanResourceChangeCalled {
t.Fatalf("PlanResourceChange was not called at all")
}
got := p.PlanResourceChangeRequest
want := providers.PlanResourceChangeRequest{
TypeName: "test",
// Because block type "blah" is defined as NestingGroup, we get a non-null
// value for it with null nested attributes, rather than the "blah" object
// itself being null, when there's no "blah" block in the config at all.
//
// This represents the situation where the remote service _always_ creates
// a single "blah", regardless of whether the block is present, but when
// the block _is_ present the user can override some aspects of it. The
// absense of the block means "use the defaults", in that case.
Config: cty.ObjectVal(map[string]cty.Value{
"blah": cty.ObjectVal(map[string]cty.Value{
"baz": cty.NullVal(cty.String),
}),
}),
ProposedNewState: cty.ObjectVal(map[string]cty.Value{
"blah": cty.ObjectVal(map[string]cty.Value{
"baz": cty.NullVal(cty.String),
}),
}),
}
if !cmp.Equal(got, want, valueTrans) {
t.Errorf("wrong PlanResourceChange request\n%s", cmp.Diff(got, want, valueTrans))
}
}
func TestContext2Plan_computedDataResource(t *testing.T) {
m := testModule(t, "plan-computed-data-resource")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"num": {Type: cty.String, Optional: true},
"compute": {Type: cty.String, Optional: true},
"foo": {Type: cty.String, Computed: true},
},
},
},
DataSources: map[string]*configschema.Block{
"aws_vpc": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.DataSources["aws_vpc"]
ty := schema.ImpliedType()
if rc := plan.Changes.ResourceInstance(addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "aws_instance", Name: "foo"}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance)); rc == nil {
t.Fatalf("missing diff for aws_instance.foo")
}
rcs := plan.Changes.ResourceInstance(addrs.Resource{
Mode: addrs.DataResourceMode,
Type: "aws_vpc",
Name: "bar",
}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance))
if rcs == nil {
t.Fatalf("missing diff for data.aws_vpc.bar")
}
rc, err := rcs.Decode(ty)
if err != nil {
t.Fatal(err)
}
checkVals(t,
cty.ObjectVal(map[string]cty.Value{
"foo": cty.UnknownVal(cty.String),
}),
rc.After,
)
}
func TestContext2Plan_computedInFunction(t *testing.T) {
m := testModule(t, "plan-computed-in-function")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"attr": {Type: cty.Number, Optional: true},
},
},
},
DataSources: map[string]*configschema.Block{
"aws_data_source": {
Attributes: map[string]*configschema.Attribute{
"computed": {Type: cty.List(cty.String), Computed: true},
},
},
},
}
p.DiffFn = testDiffFn
p.ReadDataSourceResponse = providers.ReadDataSourceResponse{
State: cty.ObjectVal(map[string]cty.Value{
"computed": cty.ListVal([]cty.Value{
cty.StringVal("foo"),
}),
}),
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
diags := ctx.Validate()
assertNoErrors(t, diags)
state, diags := ctx.Refresh() // data resource is read in this step
assertNoErrors(t, diags)
if !p.ReadDataSourceCalled {
t.Fatalf("ReadDataSource was not called on provider during refresh; should've been called")
}
p.ReadDataSourceCalled = false // reset for next call
t.Logf("state after refresh:\n%s", state)
_, diags = ctx.Plan() // should do nothing with data resource in this step, since it was already read
assertNoErrors(t, diags)
if p.ReadDataSourceCalled {
t.Fatalf("ReadDataSource was called on provider during plan; should not have been called")
}
}
func TestContext2Plan_computedDataCountResource(t *testing.T) {
m := testModule(t, "plan-computed-data-count")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"num": {Type: cty.String, Optional: true},
"compute": {Type: cty.String, Optional: true},
"foo": {Type: cty.String, Computed: true},
},
},
},
DataSources: map[string]*configschema.Block{
"aws_vpc": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
// make sure we created 3 "bar"s
for i := 0; i < 3; i++ {
addr := addrs.Resource{
Mode: addrs.DataResourceMode,
Type: "aws_vpc",
Name: "bar",
}.Instance(addrs.IntKey(i)).Absolute(addrs.RootModuleInstance)
if rcs := plan.Changes.ResourceInstance(addr); rcs == nil {
t.Fatalf("missing changes for %s", addr)
}
}
}
func TestContext2Plan_localValueCount(t *testing.T) {
m := testModule(t, "plan-local-value-count")
p := testProvider("test")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("test"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
// make sure we created 3 "foo"s
for i := 0; i < 3; i++ {
addr := addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_resource",
Name: "foo",
}.Instance(addrs.IntKey(i)).Absolute(addrs.RootModuleInstance)
if rcs := plan.Changes.ResourceInstance(addr); rcs == nil {
t.Fatalf("missing changes for %s", addr)
}
}
}
func TestContext2Plan_dataResourceBecomesComputed(t *testing.T) {
m := testModule(t, "plan-data-resource-becomes-computed")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.String, Optional: true},
"computed": {Type: cty.String, Computed: true},
},
},
},
DataSources: map[string]*configschema.Block{
"aws_data_source": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.String, Optional: true},
},
},
},
}
p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse {
fooVal := req.ProposedNewState.GetAttr("foo")
return providers.PlanResourceChangeResponse{
PlannedState: cty.ObjectVal(map[string]cty.Value{
"foo": fooVal,
"computed": cty.UnknownVal(cty.String),
}),
PlannedPrivate: req.PriorPrivate,
}
}
schema := p.GetSchemaReturn.DataSources["aws_data_source"]
ty := schema.ImpliedType()
p.ReadDataSourceResponse = providers.ReadDataSourceResponse{
// This should not be called, because the configuration for the
// data resource contains an unknown value for "foo".
Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Errorf("ReadDataSource called, but should not have been")),
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"data.aws_data_source.foo": &ResourceState{
Type: "aws_data_source",
Primary: &InstanceState{
ID: "i-abc123",
Attributes: map[string]string{
"id": "i-abc123",
"foo": "baz",
},
},
},
},
},
},
}),
})
_, diags := ctx.Refresh()
if diags.HasErrors() {
t.Fatalf("unexpected errors during refresh: %s", diags.Err())
}
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors during plan: %s", diags.Err())
}
rcs := plan.Changes.ResourceInstance(addrs.Resource{
Mode: addrs.DataResourceMode,
Type: "aws_data_source",
Name: "foo",
}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance))
if rcs == nil {
t.Logf("full changeset: %s", spew.Sdump(plan.Changes))
t.Fatalf("missing diff for data.aws_data_resource.foo")
}
rc, err := rcs.Decode(ty)
if err != nil {
t.Fatal(err)
}
// foo should now be unknown
foo := rc.After.GetAttr("foo")
if foo.IsKnown() {
t.Fatalf("foo should be unknown, got %#v", foo)
}
}
func TestContext2Plan_computedList(t *testing.T) {
m := testModule(t, "plan-computed-list")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"compute": {Type: cty.String, Optional: true},
"foo": {Type: cty.String, Optional: true},
"num": {Type: cty.String, Optional: true},
"list": {Type: cty.List(cty.String), Computed: true},
},
},
},
}
p.DiffFn = func(info *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error) {
diff := &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{},
}
computedKeys := map[string]bool{}
for _, k := range c.ComputedKeys {
computedKeys[k] = true
}
compute, _ := c.Raw["compute"].(string)
if compute != "" {
diff.Attributes[compute] = &ResourceAttrDiff{
Old: "",
New: "",
NewComputed: true,
}
diff.Attributes["compute"] = &ResourceAttrDiff{
Old: "",
New: compute,
}
}
fooOld := s.Attributes["foo"]
fooNew, _ := c.Raw["foo"].(string)
if fooOld != fooNew {
diff.Attributes["foo"] = &ResourceAttrDiff{
Old: fooOld,
New: fooNew,
NewComputed: computedKeys["foo"],
}
}
numOld := s.Attributes["num"]
numNew, _ := c.Raw["num"].(string)
if numOld != numNew {
diff.Attributes["num"] = &ResourceAttrDiff{
Old: numOld,
New: numNew,
NewComputed: computedKeys["num"],
}
}
listOld := s.Attributes["list.#"]
if listOld == "" {
diff.Attributes["list.#"] = &ResourceAttrDiff{
Old: "",
New: "",
NewComputed: true,
}
}
return diff, nil
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"list": cty.UnknownVal(cty.List(cty.String)),
"foo": cty.UnknownVal(cty.String),
}), ric.After)
case "aws_instance.foo":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"list": cty.UnknownVal(cty.List(cty.String)),
"num": cty.NumberIntVal(2),
"compute": cty.StringVal("list.#"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
// GH-8695. This tests that you can index into a computed list on a
// splatted resource.
func TestContext2Plan_computedMultiIndex(t *testing.T) {
m := testModule(t, "plan-computed-multi-index")
p := testProvider("aws")
p.DiffFn = testDiffFn
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"compute": {Type: cty.String, Optional: true},
"foo": {Type: cty.List(cty.String), Optional: true},
"ip": {Type: cty.List(cty.String), Computed: true},
},
},
},
}
p.DiffFn = func(info *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error) {
diff := &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{},
}
compute, _ := c.Raw["compute"].(string)
if compute != "" {
diff.Attributes[compute] = &ResourceAttrDiff{
Old: "",
New: "",
NewComputed: true,
}
diff.Attributes["compute"] = &ResourceAttrDiff{
Old: "",
New: compute,
}
}
fooOld := s.Attributes["foo"]
fooNew, _ := c.Raw["foo"].(string)
fooComputed := false
for _, k := range c.ComputedKeys {
if k == "foo" {
fooComputed = true
}
}
if fooNew != "" {
diff.Attributes["foo"] = &ResourceAttrDiff{
Old: fooOld,
New: fooNew,
NewComputed: fooComputed,
}
}
ipOld := s.Attributes["ip"]
ipComputed := ipOld == ""
diff.Attributes["ip"] = &ResourceAttrDiff{
Old: ipOld,
New: "",
NewComputed: ipComputed,
}
return diff, nil
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 3 {
t.Fatal("expected 3 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.foo[0]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"ip": cty.UnknownVal(cty.List(cty.String)),
"foo": cty.NullVal(cty.List(cty.String)),
"compute": cty.StringVal("ip.#"),
}), ric.After)
case "aws_instance.foo[1]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"ip": cty.UnknownVal(cty.List(cty.String)),
"foo": cty.NullVal(cty.List(cty.String)),
"compute": cty.StringVal("ip.#"),
}), ric.After)
case "aws_instance.bar[0]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"ip": cty.UnknownVal(cty.List(cty.String)),
"foo": cty.UnknownVal(cty.List(cty.String)),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_count(t *testing.T) {
m := testModule(t, "plan-count")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 6 {
t.Fatal("expected 6 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo,foo,foo,foo,foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[0]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[1]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[2]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[3]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[4]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_countComputed(t *testing.T) {
m := testModule(t, "plan-count-computed")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, err := ctx.Plan()
if err == nil {
t.Fatal("should error")
}
}
func TestContext2Plan_countComputedModule(t *testing.T) {
m := testModule(t, "plan-count-computed-module")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, err := ctx.Plan()
expectedErr := `The "count" value depends on resource attributes`
if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) {
t.Fatalf("expected err would contain %q\nerr: %s\n",
expectedErr, err)
}
}
func TestContext2Plan_countModuleStatic(t *testing.T) {
m := testModule(t, "plan-count-module-static")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 3 {
t.Fatal("expected 3 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "module.child.aws_instance.foo[0]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
}), ric.After)
case "module.child.aws_instance.foo[1]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
}), ric.After)
case "module.child.aws_instance.foo[2]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_countModuleStaticGrandchild(t *testing.T) {
m := testModule(t, "plan-count-module-static-grandchild")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 3 {
t.Fatal("expected 3 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "module.child.module.child.aws_instance.foo[0]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
}), ric.After)
case "module.child.module.child.aws_instance.foo[1]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
}), ric.After)
case "module.child.module.child.aws_instance.foo[2]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_countIndex(t *testing.T) {
m := testModule(t, "plan-count-index")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.foo[0]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("0"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[1]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("1"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_countVar(t *testing.T) {
m := testModule(t, "plan-count-var")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Variables: InputValues{
"instance_count": &InputValue{
Value: cty.StringVal("3"),
SourceType: ValueFromCaller,
},
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 4 {
t.Fatal("expected 4 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo,foo,foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[0]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[1]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[2]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_countZero(t *testing.T) {
m := testModule(t, "plan-count-zero")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.DynamicPseudoType, Optional: true},
},
},
},
}
// This schema contains a DynamicPseudoType, and therefore can't go through any shim functions
p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) {
resp.PlannedState = req.ProposedNewState
resp.PlannedPrivate = req.PriorPrivate
return resp
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 1 {
t.Fatal("expected 1 changes, got", len(plan.Changes.Resources))
}
res := plan.Changes.Resources[0]
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
expected := cty.TupleVal(nil)
foo := ric.After.GetAttr("foo")
if !cmp.Equal(expected, foo, valueComparer) {
t.Fatal(cmp.Diff(expected, foo, valueComparer))
}
}
func TestContext2Plan_countOneIndex(t *testing.T) {
m := testModule(t, "plan-count-one-index")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[0]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_countDecreaseToOne(t *testing.T) {
m := testModule(t, "plan-count-dec")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo": "foo",
"type": "aws_instance",
},
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
"aws_instance.foo.2": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 4 {
t.Fatal("expected 4 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("bar"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo":
if res.Action != plans.NoOp {
t.Fatalf("resource %s should be unchanged", i)
}
case "aws_instance.foo[1]":
if res.Action != plans.Delete {
t.Fatalf("expected resource delete, got %s", res.Action)
}
case "aws_instance.foo[2]":
if res.Action != plans.Delete {
t.Fatalf("expected resource delete, got %s", res.Action)
}
default:
t.Fatal("unknown instance:", i)
}
}
expectedState := `aws_instance.foo.0:
ID = bar
provider = provider["registry.terraform.io/-/aws"]
foo = foo
type = aws_instance
aws_instance.foo.1:
ID = bar
provider = provider["registry.terraform.io/-/aws"]
aws_instance.foo.2:
ID = bar
provider = provider["registry.terraform.io/-/aws"]`
if ctx.State().String() != expectedState {
t.Fatalf("epected state:\n%q\n\ngot state:\n%q\n", expectedState, ctx.State().String())
}
}
func TestContext2Plan_countIncreaseFromNotSet(t *testing.T) {
m := testModule(t, "plan-count-inc")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo": "foo",
"type": "aws_instance",
},
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 4 {
t.Fatal("expected 4 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("bar"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[0]":
if res.Action != plans.NoOp {
t.Fatalf("resource %s should be unchanged", i)
}
case "aws_instance.foo[1]":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[2]":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_countIncreaseFromOne(t *testing.T) {
m := testModule(t, "plan-count-inc")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo": "foo",
"type": "aws_instance",
},
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 4 {
t.Fatal("expected 4 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("bar"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[0]":
if res.Action != plans.NoOp {
t.Fatalf("resource %s should be unchanged", i)
}
case "aws_instance.foo[1]":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[2]":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
// https://github.com/PeoplePerHour/terraform/pull/11
//
// This tests a case where both a "resource" and "resource.0" are in
// the state file, which apparently is a reasonable backwards compatibility
// concern found in the above 3rd party repo.
func TestContext2Plan_countIncreaseFromOneCorrupted(t *testing.T) {
m := testModule(t, "plan-count-inc")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo": "foo",
"type": "aws_instance",
},
},
},
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo": "foo",
"type": "aws_instance",
},
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 5 {
t.Fatal("expected 5 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("bar"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo":
if res.Action != plans.Delete {
t.Fatalf("resource %s should be removed", i)
}
case "aws_instance.foo[0]":
if res.Action != plans.NoOp {
t.Fatalf("resource %s should be unchanged", i)
}
case "aws_instance.foo[1]":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[2]":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
// A common pattern in TF configs is to have a set of resources with the same
// count and to use count.index to create correspondences between them:
//
// foo_id = "${foo.bar.*.id[count.index]}"
//
// This test is for the situation where some instances already exist and the
// count is increased. In that case, we should see only the create diffs
// for the new instances and not any update diffs for the existing ones.
func TestContext2Plan_countIncreaseWithSplatReference(t *testing.T) {
m := testModule(t, "plan-count-splat-reference")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"name": {Type: cty.String, Optional: true},
"foo_name": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"name": "foo 0",
},
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"name": "foo 1",
},
},
},
"aws_instance.bar.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo_name": "foo 0",
},
},
},
"aws_instance.bar.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo_name": "foo 1",
},
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 6 {
t.Fatal("expected 6 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar[0]", "aws_instance.bar[1]", "aws_instance.foo[0]", "aws_instance.foo[1]":
if res.Action != plans.NoOp {
t.Fatalf("resource %s should be unchanged", i)
}
case "aws_instance.bar[2]":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"foo_name": cty.StringVal("foo 2"),
}), ric.After)
case "aws_instance.foo[2]":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"name": cty.StringVal("foo 2"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_forEach(t *testing.T) {
m := testModule(t, "plan-for-each")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 8 {
t.Fatal("expected 8 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
_, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
}
}
func TestContext2Plan_forEachUnknownValue(t *testing.T) {
// This module has a variable defined, but it's value is unknown. We
// expect this to produce an error, but not to panic.
m := testModule(t, "plan-for-each-unknown-value")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Variables: InputValues{
"foo": {
Value: cty.UnknownVal(cty.String),
SourceType: ValueFromCLIArg,
},
},
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
// Should get this error:
// Invalid for_each argument: The "for_each" value depends on resource attributes that cannot be determined until apply...
t.Fatal("succeeded; want errors")
}
gotErrStr := diags.Err().Error()
wantErrStr := "Invalid for_each argument"
if !strings.Contains(gotErrStr, wantErrStr) {
t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr)
}
}
func TestContext2Plan_destroy(t *testing.T) {
m := testModule(t, "plan-destroy")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.one": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
"aws_instance.two": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
Destroy: true,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.one", "aws_instance.two":
if res.Action != plans.Delete {
t.Fatalf("resource %s should be removed", i)
}
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_moduleDestroy(t *testing.T) {
m := testModule(t, "plan-module-destroy")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
},
},
&ModuleState{
Path: []string{"root", "child"},
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
Destroy: true,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.foo", "module.child.aws_instance.foo":
if res.Action != plans.Delete {
t.Fatalf("resource %s should be removed", i)
}
default:
t.Fatal("unknown instance:", i)
}
}
}
// GH-1835
func TestContext2Plan_moduleDestroyCycle(t *testing.T) {
m := testModule(t, "plan-module-destroy-gh-1835")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: []string{"root", "a_module"},
Resources: map[string]*ResourceState{
"aws_instance.a": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "a",
},
},
},
},
&ModuleState{
Path: []string{"root", "b_module"},
Resources: map[string]*ResourceState{
"aws_instance.b": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "b",
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
Destroy: true,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "module.a_module.aws_instance.a", "module.b_module.aws_instance.b":
if res.Action != plans.Delete {
t.Fatalf("resource %s should be removed", i)
}
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_moduleDestroyMultivar(t *testing.T) {
m := testModule(t, "plan-module-destroy-multivar")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{},
},
&ModuleState{
Path: []string{"root", "child"},
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar0",
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar1",
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
Destroy: true,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "module.child.aws_instance.foo[0]", "module.child.aws_instance.foo[1]":
if res.Action != plans.Delete {
t.Fatalf("resource %s should be removed", i)
}
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_pathVar(t *testing.T) {
cwd, err := os.Getwd()
if err != nil {
t.Fatalf("err: %s", err)
}
m := testModule(t, "plan-path-var")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"cwd": {Type: cty.String, Optional: true},
"module": {Type: cty.String, Optional: true},
"root": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("err: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 1 {
t.Fatal("expected 1 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.foo":
if res.Action != plans.Create {
t.Fatalf("resource %s should be created", i)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"cwd": cty.StringVal(cwd + "/barpath"),
"module": cty.StringVal(m.Module.SourceDir + "/foopath"),
"root": cty.StringVal(m.Module.SourceDir + "/barpath"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_diffVar(t *testing.T) {
m := testModule(t, "plan-diffvar")
p := testProvider("aws")
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"num": "2",
},
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
p.DiffFn = func(
info *InstanceInfo,
s *InstanceState,
c *ResourceConfig) (*InstanceDiff, error) {
if s.ID != "bar" {
return testDiffFn(info, s, c)
}
return &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"num": &ResourceAttrDiff{
Old: "2",
New: "3",
},
},
}, nil
}
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
if res.Action != plans.Create {
t.Fatalf("resource %s should be created", i)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"num": cty.NumberIntVal(3),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo":
if res.Action != plans.Update {
t.Fatalf("resource %s should be updated", i)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.StringVal("bar"),
"num": cty.NumberIntVal(2),
}), ric.Before)
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.StringVal("bar"),
"num": cty.NumberIntVal(3),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_hook(t *testing.T) {
m := testModule(t, "plan-good")
h := new(MockHook)
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
Hooks: []Hook{h},
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if !h.PreDiffCalled {
t.Fatal("should be called")
}
if !h.PostDiffCalled {
t.Fatal("should be called")
}
}
func TestContext2Plan_closeProvider(t *testing.T) {
// this fixture only has an aliased provider located in the module, to make
// sure that the provier name contains a path more complex than
// "provider.aws".
m := testModule(t, "plan-close-module-provider")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if !p.CloseCalled {
t.Fatal("provider not closed")
}
}
func TestContext2Plan_orphan(t *testing.T) {
m := testModule(t, "plan-orphan")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.baz": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.baz":
if res.Action != plans.Delete {
t.Fatalf("resource %s should be removed", i)
}
case "aws_instance.foo":
if res.Action != plans.Create {
t.Fatalf("resource %s should be created", i)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"num": cty.NumberIntVal(2),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
// This tests that configurations with UUIDs don't produce errors.
// For shadows, this would produce errors since a UUID changes every time.
func TestContext2Plan_shadowUuid(t *testing.T) {
m := testModule(t, "plan-shadow-uuid")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
}
func TestContext2Plan_state(t *testing.T) {
m := testModule(t, "plan-good")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Changes.Resources) < 2 {
t.Fatalf("bad: %#v", plan.Changes.Resources)
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
if res.Action != plans.Create {
t.Fatalf("resource %s should be created", i)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("2"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo":
if res.Action != plans.Update {
t.Fatalf("resource %s should be updated", i)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.StringVal("bar"),
"num": cty.NullVal(cty.Number),
"type": cty.NullVal(cty.String),
}), ric.Before)
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.StringVal("bar"),
"num": cty.NumberIntVal(2),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_taint(t *testing.T) {
m := testModule(t, "plan-taint")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{"num": "2"},
},
},
"aws_instance.bar": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
Tainted: true,
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
if res.Action != plans.DeleteThenCreate {
t.Fatalf("resource %s should be replaced", i)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("2"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo":
if res.Action != plans.NoOp {
t.Fatalf("resource %s should not be changed", i)
}
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_taintIgnoreChanges(t *testing.T) {
m := testModule(t, "plan-taint-ignore-changes")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
"vars": {Type: cty.String, Optional: true},
"type": {Type: cty.String, Computed: true},
},
},
},
}
p.ApplyFn = testApplyFn
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "foo",
Attributes: map[string]string{
"vars": "foo",
"type": "aws_instance",
},
Tainted: true,
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 1 {
t.Fatal("expected 1 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.foo":
if res.Action != plans.DeleteThenCreate {
t.Fatalf("resource %s should be replaced", i)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.StringVal("foo"),
"vars": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.Before)
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"vars": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
// Fails about 50% of the time before the fix for GH-4982, covers the fix.
func TestContext2Plan_taintDestroyInterpolatedCountRace(t *testing.T) {
m := testModule(t, "plan-taint-interpolated-count")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Tainted: true,
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{ID: "bar"},
},
"aws_instance.foo.2": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{ID: "bar"},
},
},
},
},
})
for i := 0; i < 100; i++ {
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 3 {
t.Fatal("expected 3 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.foo[0]":
if res.Action != plans.DeleteThenCreate {
t.Fatalf("resource %s should be replaced", i)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.StringVal("bar"),
}), ric.Before)
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
}), ric.After)
case "aws_instance.foo[1]", "aws_instance.foo[2]":
if res.Action != plans.NoOp {
t.Fatalf("resource %s should not be changed", i)
}
default:
t.Fatal("unknown instance:", i)
}
}
}
}
func TestContext2Plan_targeted(t *testing.T) {
m := testModule(t, "plan-targeted")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Resource(
addrs.ManagedResourceMode, "aws_instance", "foo",
),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 1 {
t.Fatal("expected 1 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.foo":
if res.Action != plans.Create {
t.Fatalf("resource %s should be created", i)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"num": cty.NumberIntVal(2),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
// Test that targeting a module properly plans any inputs that depend
// on another module.
func TestContext2Plan_targetedCrossModule(t *testing.T) {
m := testModule(t, "plan-targeted-cross-module")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Child("B", addrs.NoKey),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
if res.Action != plans.Create {
t.Fatalf("resource %s should be created", ric.Addr)
}
switch i := ric.Addr.String(); i {
case "module.A.aws_instance.foo":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("bar"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "module.B.aws_instance.bar":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.UnknownVal(cty.String),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_targetedModuleWithProvider(t *testing.T) {
m := testModule(t, "plan-targeted-module-with-provider")
p := testProvider("null")
p.GetSchemaReturn = &ProviderSchema{
Provider: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"key": {Type: cty.String, Optional: true},
},
},
ResourceTypes: map[string]*configschema.Block{
"null_resource": {
Attributes: map[string]*configschema.Attribute{},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("null"): testProviderFuncFixed(p),
},
),
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Child("child2", addrs.NoKey),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["null_resource"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 1 {
t.Fatal("expected 1 changes, got", len(plan.Changes.Resources))
}
res := plan.Changes.Resources[0]
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
if ric.Addr.String() != "module.child2.null_resource.foo" {
t.Fatalf("unexpcetd resource: %s", ric.Addr)
}
}
func TestContext2Plan_targetedOrphan(t *testing.T) {
m := testModule(t, "plan-targeted-orphan")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.orphan": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-789xyz",
},
},
"aws_instance.nottargeted": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
},
},
},
}),
Destroy: true,
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Resource(
addrs.ManagedResourceMode, "aws_instance", "orphan",
),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 1 {
t.Fatal("expected 1 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.orphan":
if res.Action != plans.Delete {
t.Fatalf("resource %s should be destroyed", ric.Addr)
}
default:
t.Fatal("unknown instance:", i)
}
}
}
// https://github.com/hashicorp/terraform/issues/2538
func TestContext2Plan_targetedModuleOrphan(t *testing.T) {
m := testModule(t, "plan-targeted-module-orphan")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: []string{"root", "child"},
Resources: map[string]*ResourceState{
"aws_instance.orphan": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-789xyz",
},
Provider: "provider.aws",
},
"aws_instance.nottargeted": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
Provider: "provider.aws",
},
},
},
},
}),
Destroy: true,
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Child("child", addrs.NoKey).Resource(
addrs.ManagedResourceMode, "aws_instance", "orphan",
),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 1 {
t.Fatal("expected 1 changes, got", len(plan.Changes.Resources))
}
res := plan.Changes.Resources[0]
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
if ric.Addr.String() != "module.child.aws_instance.orphan" {
t.Fatalf("unexpected resource :%s", ric.Addr)
}
if res.Action != plans.Delete {
t.Fatalf("resource %s should be deleted", ric.Addr)
}
}
func TestContext2Plan_targetedModuleUntargetedVariable(t *testing.T) {
m := testModule(t, "plan-targeted-module-untargeted-variable")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Resource(
addrs.ManagedResourceMode, "aws_instance", "blue",
),
addrs.RootModuleInstance.Child("blue_mod", addrs.NoKey),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
if res.Action != plans.Create {
t.Fatalf("resource %s should be created", ric.Addr)
}
switch i := ric.Addr.String(); i {
case "aws_instance.blue":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
}), ric.After)
case "module.blue_mod.aws_instance.mod":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"value": cty.UnknownVal(cty.String),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
// ensure that outputs missing references due to targetting are removed from
// the graph.
func TestContext2Plan_outputContainsTargetedResource(t *testing.T) {
m := testModule(t, "plan-untargeted-resource-output")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Child("mod", addrs.NoKey).Resource(
addrs.ManagedResourceMode, "aws_instance", "a",
),
},
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("err: %s", diags)
}
if len(diags) != 1 {
t.Fatalf("got %d diagnostics; want 1", diags)
}
if got, want := diags[0].Severity(), tfdiags.Warning; got != want {
t.Errorf("wrong diagnostic severity %#v; want %#v", got, want)
}
if got, want := diags[0].Description().Summary, "Resource targeting is in effect"; got != want {
t.Errorf("wrong diagnostic summary %#v; want %#v", got, want)
}
}
// https://github.com/hashicorp/terraform/issues/4515
func TestContext2Plan_targetedOverTen(t *testing.T) {
m := testModule(t, "plan-targeted-over-ten")
p := testProvider("aws")
p.DiffFn = testDiffFn
resources := make(map[string]*ResourceState)
var expectedState []string
for i := 0; i < 13; i++ {
key := fmt.Sprintf("aws_instance.foo.%d", i)
id := fmt.Sprintf("i-abc%d", i)
resources[key] = &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{ID: id},
}
expectedState = append(expectedState,
fmt.Sprintf("%s:\n ID = %s\n", key, id))
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: resources,
},
},
}),
Targets: []addrs.Targetable{
addrs.RootModuleInstance.ResourceInstance(
addrs.ManagedResourceMode, "aws_instance", "foo", addrs.IntKey(1),
),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
if res.Action != plans.NoOp {
t.Fatalf("unexpected action %s for %s", res.Action, ric.Addr)
}
}
}
func TestContext2Plan_provider(t *testing.T) {
m := testModule(t, "plan-provider")
p := testProvider("aws")
p.DiffFn = testDiffFn
var value interface{}
p.ConfigureFn = func(c *ResourceConfig) error {
value, _ = c.Get("foo")
return nil
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Variables: InputValues{
"foo": &InputValue{
Value: cty.StringVal("bar"),
SourceType: ValueFromCaller,
},
},
})
if _, err := ctx.Plan(); err != nil {
t.Fatalf("err: %s", err)
}
if value != "bar" {
t.Fatalf("bad: %#v", value)
}
}
func TestContext2Plan_varListErr(t *testing.T) {
m := testModule(t, "plan-var-list-err")
p := testProvider("aws")
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, err := ctx.Plan()
if err == nil {
t.Fatal("should error")
}
}
func TestContext2Plan_ignoreChanges(t *testing.T) {
m := testModule(t, "plan-ignore-changes")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{"ami": "ami-abcd1234"},
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Variables: InputValues{
"foo": &InputValue{
Value: cty.StringVal("ami-1234abcd"),
SourceType: ValueFromCaller,
},
},
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 1 {
t.Fatal("expected 1 changes, got", len(plan.Changes.Resources))
}
res := plan.Changes.Resources[0]
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
if res.Action != plans.Update {
t.Fatalf("resource %s should be updated, got %s", ric.Addr, res.Action)
}
if ric.Addr.String() != "aws_instance.foo" {
t.Fatalf("unexpected resource: %s", ric.Addr)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.StringVal("bar"),
"ami": cty.StringVal("ami-abcd1234"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
}
func TestContext2Plan_ignoreChangesWildcard(t *testing.T) {
m := testModule(t, "plan-ignore-changes-wildcard")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"ami": "ami-abcd1234",
"instance": "t2.micro",
},
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Variables: InputValues{
"foo": &InputValue{
Value: cty.StringVal("ami-1234abcd"),
SourceType: ValueFromCaller,
},
"bar": &InputValue{
Value: cty.StringVal("t2.small"),
SourceType: ValueFromCaller,
},
},
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.NoOp {
t.Fatalf("unexpected resource diffs in root module: %s", spew.Sdump(plan.Changes.Resources))
}
}
}
func TestContext2Plan_ignoreChangesInMap(t *testing.T) {
p := testProvider("test")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"test_ignore_changes_map": {
Attributes: map[string]*configschema.Attribute{
"tags": {Type: cty.Map(cty.String), Optional: true},
},
},
},
}
p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse {
return providers.PlanResourceChangeResponse{
PlannedState: req.ProposedNewState,
}
}
p.DiffFn = testDiffFn
s := states.BuildState(func(ss *states.SyncState) {
ss.SetResourceInstanceCurrent(
addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_ignore_changes_map",
Name: "foo",
}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance),
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"tags":{"ignored":"from state","other":"from state"}}`),
},
addrs.AbsProviderConfig{
Provider: addrs.NewLegacyProvider("test"),
Module: addrs.RootModule,
},
)
})
m := testModule(t, "plan-ignore-changes-in-map")
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("test"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["test_ignore_changes_map"]
ty := schema.ImpliedType()
if got, want := len(plan.Changes.Resources), 1; got != want {
t.Fatalf("wrong number of changes %d; want %d", got, want)
}
res := plan.Changes.Resources[0]
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
if res.Action != plans.Update {
t.Fatalf("resource %s should be updated, got %s", ric.Addr, res.Action)
}
if got, want := ric.Addr.String(), "test_ignore_changes_map.foo"; got != want {
t.Fatalf("unexpected resource address %s; want %s", got, want)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"tags": cty.MapVal(map[string]cty.Value{
"ignored": cty.StringVal("from state"),
"other": cty.StringVal("from config"),
}),
}), ric.After)
}
func TestContext2Plan_moduleMapLiteral(t *testing.T) {
m := testModule(t, "plan-module-map-literal")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"meta": {Type: cty.Map(cty.String), Optional: true},
"tags": {Type: cty.Map(cty.String), Optional: true},
},
},
},
}
p.ApplyFn = testApplyFn
p.DiffFn = func(i *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error) {
// Here we verify that both the populated and empty map literals made it
// through to the resource attributes
val, _ := c.Get("tags")
m, ok := val.(map[string]interface{})
if !ok {
t.Fatalf("Tags attr not map: %#v", val)
}
if m["foo"] != "bar" {
t.Fatalf("Bad value in tags attr: %#v", m)
}
{
val, _ := c.Get("meta")
m, ok := val.(map[string]interface{})
if !ok {
t.Fatalf("Meta attr not map: %#v", val)
}
if len(m) != 0 {
t.Fatalf("Meta attr not empty: %#v", val)
}
}
return nil, nil
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
}
func TestContext2Plan_computedValueInMap(t *testing.T) {
m := testModule(t, "plan-computed-value-in-map")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"looked_up": {Type: cty.String, Optional: true},
},
},
"aws_computed_source": {
Attributes: map[string]*configschema.Attribute{
"computed_read_only": {Type: cty.String, Computed: true},
},
},
},
}
p.DiffFn = func(info *InstanceInfo, state *InstanceState, c *ResourceConfig) (*InstanceDiff, error) {
switch info.Type {
case "aws_computed_source":
return &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"computed_read_only": &ResourceAttrDiff{
NewComputed: true,
},
},
}, nil
}
return testDiffFn(info, state, c)
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
schema := p.GetSchemaReturn.ResourceTypes[res.Addr.Resource.Resource.Type]
ric, err := res.Decode(schema.ImpliedType())
if err != nil {
t.Fatal(err)
}
if res.Action != plans.Create {
t.Fatalf("resource %s should be created", ric.Addr)
}
switch i := ric.Addr.String(); i {
case "aws_computed_source.intermediates":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"computed_read_only": cty.UnknownVal(cty.String),
}), ric.After)
case "module.test_mod.aws_instance.inner2":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"looked_up": cty.UnknownVal(cty.String),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_moduleVariableFromSplat(t *testing.T) {
m := testModule(t, "plan-module-variable-from-splat")
p := testProvider("aws")
p.DiffFn = testDiffFn
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"thing": {Type: cty.String, Optional: true},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Changes.Resources) != 4 {
t.Fatal("expected 4 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
schema := p.GetSchemaReturn.ResourceTypes[res.Addr.Resource.Resource.Type]
ric, err := res.Decode(schema.ImpliedType())
if err != nil {
t.Fatal(err)
}
if res.Action != plans.Create {
t.Fatalf("resource %s should be created", ric.Addr)
}
switch i := ric.Addr.String(); i {
case "module.mod1.aws_instance.test[0]",
"module.mod1.aws_instance.test[1]",
"module.mod2.aws_instance.test[0]",
"module.mod2.aws_instance.test[1]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"thing": cty.StringVal("doesnt"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_createBeforeDestroy_depends_datasource(t *testing.T) {
m := testModule(t, "plan-cbd-depends-datasource")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"num": {Type: cty.String, Optional: true},
"computed": {Type: cty.String, Optional: true, Computed: true},
},
},
},
DataSources: map[string]*configschema.Block{
"aws_vpc": {
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
"foo": {Type: cty.Number, Optional: true},
},
},
},
}
p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse {
computedVal := req.ProposedNewState.GetAttr("computed")
if computedVal.IsNull() {
computedVal = cty.UnknownVal(cty.String)
}
return providers.PlanResourceChangeResponse{
PlannedState: cty.ObjectVal(map[string]cty.Value{
"num": req.ProposedNewState.GetAttr("num"),
"computed": computedVal,
}),
}
}
p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse {
return providers.ReadDataSourceResponse{
Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Errorf("ReadDataSource called, but should not have been")),
}
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
// We're skipping ctx.Refresh here, which simulates what happens when
// running "terraform plan -refresh=false". As a result, we don't get our
// usual opportunity to read the data source during the refresh step and
// thus the plan call below is forced to produce a deferred read action.
plan, diags := ctx.Plan()
if p.ReadDataSourceCalled {
t.Errorf("ReadDataSource was called on the provider, but should not have been because we didn't refresh")
}
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
seenAddrs := make(map[string]struct{})
for _, res := range plan.Changes.Resources {
var schema *configschema.Block
switch res.Addr.Resource.Resource.Mode {
case addrs.DataResourceMode:
schema = p.GetSchemaReturn.DataSources[res.Addr.Resource.Resource.Type]
case addrs.ManagedResourceMode:
schema = p.GetSchemaReturn.ResourceTypes[res.Addr.Resource.Resource.Type]
}
ric, err := res.Decode(schema.ImpliedType())
if err != nil {
t.Fatal(err)
}
seenAddrs[ric.Addr.String()] = struct{}{}
t.Run(ric.Addr.String(), func(t *testing.T) {
switch i := ric.Addr.String(); i {
case "aws_instance.foo[0]":
if res.Action != plans.Create {
t.Fatalf("resource %s should be created, got %s", ric.Addr, ric.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"num": cty.StringVal("2"),
"computed": cty.UnknownVal(cty.String),
}), ric.After)
case "aws_instance.foo[1]":
if res.Action != plans.Create {
t.Fatalf("resource %s should be created, got %s", ric.Addr, ric.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"num": cty.StringVal("2"),
"computed": cty.UnknownVal(cty.String),
}), ric.After)
case "data.aws_vpc.bar[0]":
if res.Action != plans.Read {
t.Fatalf("resource %s should be read, got %s", ric.Addr, ric.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
// In a normal flow we would've read an exact value in
// ReadDataSource, but because this test doesn't run
// cty.Refresh we have no opportunity to do that lookup
// and a deferred read is forced.
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("0"),
}), ric.After)
case "data.aws_vpc.bar[1]":
if res.Action != plans.Read {
t.Fatalf("resource %s should be read, got %s", ric.Addr, ric.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
// In a normal flow we would've read an exact value in
// ReadDataSource, but because this test doesn't run
// cty.Refresh we have no opportunity to do that lookup
// and a deferred read is forced.
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("1"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
})
}
wantAddrs := map[string]struct{}{
"aws_instance.foo[0]": struct{}{},
"aws_instance.foo[1]": struct{}{},
"data.aws_vpc.bar[0]": struct{}{},
"data.aws_vpc.bar[1]": struct{}{},
}
if !cmp.Equal(seenAddrs, wantAddrs) {
t.Errorf("incorrect addresses in changeset:\n%s", cmp.Diff(wantAddrs, seenAddrs))
}
}
// interpolated lists need to be stored in the original order.
func TestContext2Plan_listOrder(t *testing.T) {
m := testModule(t, "plan-list-order")
p := testProvider("aws")
p.ApplyFn = testApplyFn
p.DiffFn = testDiffFn
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.List(cty.String), Optional: true},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
changes := plan.Changes
rDiffA := changes.ResourceInstance(addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "aws_instance",
Name: "a",
}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance))
rDiffB := changes.ResourceInstance(addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "aws_instance",
Name: "b",
}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance))
if !cmp.Equal(rDiffA.After, rDiffB.After, valueComparer) {
t.Fatal(cmp.Diff(rDiffA.After, rDiffB.After, valueComparer))
}
}
// Make sure ignore-changes doesn't interfere with set/list/map diffs.
// If a resource was being replaced by a RequiresNew attribute that gets
// ignored, we need to filter the diff properly to properly update rather than
// replace.
func TestContext2Plan_ignoreChangesWithFlatmaps(t *testing.T) {
m := testModule(t, "plan-ignore-changes-with-flatmaps")
p := testProvider("aws")
p.DiffFn = testDiffFn
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"user_data": {Type: cty.String, Optional: true},
"require_new": {Type: cty.String, Optional: true},
// This test predates the 0.12 work to integrate cty and
// HCL, and so it was ported as-is where its expected
// test output was clearly expecting a list of maps here
// even though it is named "set".
"set": {Type: cty.List(cty.Map(cty.String)), Optional: true},
"lst": {Type: cty.List(cty.String), Optional: true},
},
},
},
}
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"user_data": "x",
"require_new": "",
"set.#": "1",
"set.0.%": "1",
"set.0.a": "1",
"lst.#": "1",
"lst.0": "j",
},
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Changes.Resources) != 1 {
t.Fatal("expected 1 changes, got", len(plan.Changes.Resources))
}
res := plan.Changes.Resources[0]
schema := p.GetSchemaReturn.ResourceTypes[res.Addr.Resource.Resource.Type]
ric, err := res.Decode(schema.ImpliedType())
if err != nil {
t.Fatal(err)
}
if res.Action != plans.Update {
t.Fatalf("resource %s should be updated, got %s", ric.Addr, ric.Action)
}
if ric.Addr.String() != "aws_instance.foo" {
t.Fatalf("unknown resource: %s", ric.Addr)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"lst": cty.ListVal([]cty.Value{
cty.StringVal("j"),
cty.StringVal("k"),
}),
"require_new": cty.StringVal(""),
"user_data": cty.StringVal("x"),
"set": cty.ListVal([]cty.Value{cty.MapVal(map[string]cty.Value{
"a": cty.StringVal("1"),
"b": cty.StringVal("2"),
})}),
}), ric.After)
}
// TestContext2Plan_resourceNestedCount ensures resource sets that depend on
// the count of another resource set (ie: count of a data source that depends
// on another data source's instance count - data.x.foo.*.id) get properly
// normalized to the indexes they should be. This case comes up when there is
// an existing state (after an initial apply).
func TestContext2Plan_resourceNestedCount(t *testing.T) {
m := testModule(t, "nested-resource-count-plan")
p := testProvider("aws")
p.DiffFn = testDiffFn
p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse {
return providers.ReadResourceResponse{
NewState: req.PriorState,
}
}
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Primary: &InstanceState{
ID: "foo0",
Attributes: map[string]string{
"id": "foo0",
},
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Primary: &InstanceState{
ID: "foo1",
Attributes: map[string]string{
"id": "foo1",
},
},
},
"aws_instance.bar.0": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Dependencies: []string{"aws_instance.foo"},
Primary: &InstanceState{
ID: "bar0",
Attributes: map[string]string{
"id": "bar0",
},
},
},
"aws_instance.bar.1": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Dependencies: []string{"aws_instance.foo"},
Primary: &InstanceState{
ID: "bar1",
Attributes: map[string]string{
"id": "bar1",
},
},
},
"aws_instance.baz.0": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Dependencies: []string{"aws_instance.bar"},
Primary: &InstanceState{
ID: "baz0",
Attributes: map[string]string{
"id": "baz0",
},
},
},
"aws_instance.baz.1": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Dependencies: []string{"aws_instance.bar"},
Primary: &InstanceState{
ID: "baz1",
Attributes: map[string]string{
"id": "baz1",
},
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
diags := ctx.Validate()
if diags.HasErrors() {
t.Fatalf("validate errors: %s", diags.Err())
}
_, diags = ctx.Refresh()
if diags.HasErrors() {
t.Fatalf("refresh errors: %s", diags.Err())
}
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("plan errors: %s", diags.Err())
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.NoOp {
t.Fatalf("resource %s should now change, plan returned %s", res.Addr, res.Action)
}
}
}
// Higher level test at TestResource_dataSourceListApplyPanic
func TestContext2Plan_computedAttrRefTypeMismatch(t *testing.T) {
m := testModule(t, "plan-computed-attr-ref-type-mismatch")
p := testProvider("aws")
p.DiffFn = testDiffFn
p.ValidateResourceTypeConfigFn = func(req providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse {
var diags tfdiags.Diagnostics
if req.TypeName == "aws_instance" {
amiVal := req.Config.GetAttr("ami")
if amiVal.Type() != cty.String {
diags = diags.Append(fmt.Errorf("Expected ami to be cty.String, got %#v", amiVal))
}
}
return providers.ValidateResourceTypeConfigResponse{
Diagnostics: diags,
}
}
p.DiffFn = func(
info *InstanceInfo,
state *InstanceState,
c *ResourceConfig) (*InstanceDiff, error) {
switch info.Type {
case "aws_ami_list":
// Emulate a diff that says "we'll create this list and ids will be populated"
return &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"ids.#": &ResourceAttrDiff{NewComputed: true},
},
}, nil
case "aws_instance":
// If we get to the diff for instance, we should be able to assume types
ami, _ := c.Get("ami")
_ = ami.(string)
}
return nil, nil
}
p.ApplyFn = func(info *InstanceInfo, s *InstanceState, d *InstanceDiff) (*InstanceState, error) {
if info.Type != "aws_ami_list" {
t.Fatalf("Reached apply for unexpected resource type! %s", info.Type)
}
// Pretend like we make a thing and the computed list "ids" is populated
return &InstanceState{
ID: "someid",
Attributes: map[string]string{
"ids.#": "2",
"ids.0": "ami-abc123",
"ids.1": "ami-bcd345",
},
}, nil
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
t.Fatalf("Succeeded; want type mismatch error for 'ami' argument")
}
expected := `Inappropriate value for attribute "ami"`
if errStr := diags.Err().Error(); !strings.Contains(errStr, expected) {
t.Fatalf("expected:\n\n%s\n\nto contain:\n\n%s", errStr, expected)
}
}
func TestContext2Plan_selfRef(t *testing.T) {
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.String, Optional: true},
},
},
},
}
m := testModule(t, "plan-self-ref")
c := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
diags := c.Validate()
if diags.HasErrors() {
t.Fatalf("unexpected validation failure: %s", diags.Err())
}
_, diags = c.Plan()
if !diags.HasErrors() {
t.Fatalf("plan succeeded; want error")
}
gotErrStr := diags.Err().Error()
wantErrStr := "Self-referential block"
if !strings.Contains(gotErrStr, wantErrStr) {
t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr)
}
}
func TestContext2Plan_selfRefMulti(t *testing.T) {
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.String, Optional: true},
},
},
},
}
m := testModule(t, "plan-self-ref-multi")
c := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
diags := c.Validate()
if diags.HasErrors() {
t.Fatalf("unexpected validation failure: %s", diags.Err())
}
_, diags = c.Plan()
if !diags.HasErrors() {
t.Fatalf("plan succeeded; want error")
}
gotErrStr := diags.Err().Error()
wantErrStr := "Self-referential block"
if !strings.Contains(gotErrStr, wantErrStr) {
t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr)
}
}
func TestContext2Plan_selfRefMultiAll(t *testing.T) {
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.List(cty.String), Optional: true},
},
},
},
}
m := testModule(t, "plan-self-ref-multi-all")
c := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
diags := c.Validate()
if diags.HasErrors() {
t.Fatalf("unexpected validation failure: %s", diags.Err())
}
_, diags = c.Plan()
if !diags.HasErrors() {
t.Fatalf("plan succeeded; want error")
}
gotErrStr := diags.Err().Error()
// The graph is checked for cycles before we can walk it, so we don't
// encounter the self-reference check.
//wantErrStr := "Self-referential block"
wantErrStr := "Cycle"
if !strings.Contains(gotErrStr, wantErrStr) {
t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr)
}
}
func TestContext2Plan_invalidOutput(t *testing.T) {
m := testModuleInline(t, map[string]string{
"main.tf": `
data "aws_data_source" "name" {}
output "out" {
value = "${data.aws_data_source.name.missing}"
}`,
})
p := testProvider("aws")
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
// Should get this error:
// Unsupported attribute: This object does not have an attribute named "missing"
t.Fatal("succeeded; want errors")
}
gotErrStr := diags.Err().Error()
wantErrStr := "Unsupported attribute"
if !strings.Contains(gotErrStr, wantErrStr) {
t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr)
}
}
func TestContext2Plan_invalidModuleOutput(t *testing.T) {
m := testModuleInline(t, map[string]string{
"child/main.tf": `
data "aws_data_source" "name" {}
output "out" {
value = "${data.aws_data_source.name.missing}"
}`,
"main.tf": `
module "child" {
source = "./child"
}
resource "aws_instance" "foo" {
foo = "${module.child.out}"
}`,
})
p := testProvider("aws")
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
// Should get this error:
// Unsupported attribute: This object does not have an attribute named "missing"
t.Fatal("succeeded; want errors")
}
gotErrStr := diags.Err().Error()
wantErrStr := "Unsupported attribute"
if !strings.Contains(gotErrStr, wantErrStr) {
t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr)
}
}
func TestContext2Plan_variableValidation(t *testing.T) {
m := testModuleInline(t, map[string]string{
"main.tf": `
variable "x" {
default = "bar"
}
resource "aws_instance" "foo" {
foo = var.x
}`,
})
p := testProvider("aws")
p.ValidateResourceTypeConfigFn = func(req providers.ValidateResourceTypeConfigRequest) (resp providers.ValidateResourceTypeConfigResponse) {
foo := req.Config.GetAttr("foo").AsString()
if foo == "bar" {
resp.Diagnostics = resp.Diagnostics.Append(errors.New("foo cannot be bar"))
}
return
}
p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) {
resp.PlannedState = req.ProposedNewState
return
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
// Should get this error:
// Unsupported attribute: This object does not have an attribute named "missing"
t.Fatal("succeeded; want errors")
}
}
func checkVals(t *testing.T, expected, got cty.Value) {
t.Helper()
if !cmp.Equal(expected, got, valueComparer, typeComparer, equateEmpty) {
t.Fatal(cmp.Diff(expected, got, valueTrans, equateEmpty))
}
}
func objectVal(t *testing.T, schema *configschema.Block, m map[string]cty.Value) cty.Value {
t.Helper()
v, err := schema.CoerceValue(
cty.ObjectVal(m),
)
if err != nil {
t.Fatal(err)
}
return v
}
func TestContext2Plan_requiredModuleOutput(t *testing.T) {
m := testModule(t, "plan-required-output")
p := testProvider("test")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"test_resource": {
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
"required": {Type: cty.String, Required: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("test"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["test_resource"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
t.Run(fmt.Sprintf("%s %s", res.Action, res.Addr), func(t *testing.T) {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
var expected cty.Value
switch i := ric.Addr.String(); i {
case "test_resource.root":
expected = objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"required": cty.UnknownVal(cty.String),
})
case "module.mod.test_resource.for_output":
expected = objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"required": cty.StringVal("val"),
})
default:
t.Fatal("unknown instance:", i)
}
checkVals(t, expected, ric.After)
})
}
}
func TestContext2Plan_requiredModuleObject(t *testing.T) {
m := testModule(t, "plan-required-whole-mod")
p := testProvider("test")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"test_resource": {
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
"required": {Type: cty.String, Required: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("test"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["test_resource"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
t.Run(fmt.Sprintf("%s %s", res.Action, res.Addr), func(t *testing.T) {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
var expected cty.Value
switch i := ric.Addr.String(); i {
case "test_resource.root":
expected = objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"required": cty.UnknownVal(cty.String),
})
case "module.mod.test_resource.for_output":
expected = objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"required": cty.StringVal("val"),
})
default:
t.Fatal("unknown instance:", i)
}
checkVals(t, expected, ric.After)
})
}
}
func TestContext2Plan_expandOrphan(t *testing.T) {
m := testModuleInline(t, map[string]string{
"main.tf": `
module "mod" {
count = 1
source = "./mod"
}
`,
"mod/main.tf": `
resource "aws_instance" "foo" {
}
`,
})
state := states.NewState()
state.EnsureModule(addrs.RootModuleInstance.Child("mod", addrs.IntKey(0))).SetResourceInstanceCurrent(
mustResourceInstanceAddr("aws_instance.foo").Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"child"}`),
},
mustProviderConfig(`provider["registry.terraform.io/-/aws"]`),
)
state.EnsureModule(addrs.RootModuleInstance.Child("mod", addrs.IntKey(1))).SetResourceInstanceCurrent(
mustResourceInstanceAddr("aws_instance.foo").Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"child"}`),
},
mustProviderConfig(`provider["registry.terraform.io/-/aws"]`),
)
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: state,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatal(diags.ErrWithWarnings())
}
expected := map[string]plans.Action{
`module.mod[1].aws_instance.foo`: plans.Delete,
`module.mod[0].aws_instance.foo`: plans.NoOp,
}
for _, res := range plan.Changes.Resources {
want := expected[res.Addr.String()]
if res.Action != want {
t.Fatalf("expected %s action, got: %q %s", want, res.Addr, res.Action)
}
}
}
add missing action check in orphan test
package terraform
import (
"bytes"
"errors"
"fmt"
"os"
"reflect"
"sort"
"strings"
"sync"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/google/go-cmp/cmp"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs/configschema"
"github.com/hashicorp/terraform/configs/hcl2shim"
"github.com/hashicorp/terraform/plans"
"github.com/hashicorp/terraform/providers"
"github.com/hashicorp/terraform/states"
"github.com/hashicorp/terraform/tfdiags"
)
func TestContext2Plan_basic(t *testing.T) {
m := testModule(t, "plan-good")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
ProviderSHA256s: map[string][]byte{
"aws": []byte("placeholder"),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if l := len(plan.Changes.Resources); l < 2 {
t.Fatalf("wrong number of resources %d; want fewer than two\n%s", l, spew.Sdump(plan.Changes.Resources))
}
if !reflect.DeepEqual(plan.ProviderSHA256s, ctx.providerSHA256s) {
t.Errorf("wrong ProviderSHA256s %#v; want %#v", plan.ProviderSHA256s, ctx.providerSHA256s)
}
if !ctx.State().Empty() {
t.Fatalf("expected empty state, got %#v\n", ctx.State())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
for _, r := range plan.Changes.Resources {
ric, err := r.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
foo := ric.After.GetAttr("foo").AsString()
if foo != "2" {
t.Fatalf("incorrect plan for 'bar': %#v", ric.After)
}
case "aws_instance.foo":
num, _ := ric.After.GetAttr("num").AsBigFloat().Int64()
if num != 2 {
t.Fatalf("incorrect plan for 'foo': %#v", ric.After)
}
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_createBefore_deposed(t *testing.T) {
m := testModule(t, "plan-cbd")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: []string{"root"},
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
Attributes: map[string]string{
"id": "baz",
},
},
Deposed: []*InstanceState{
&InstanceState{
ID: "foo",
Attributes: map[string]string{
"id": "foo",
},
},
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
// the state should still show one deposed
expectedState := strings.TrimSpace(`
aws_instance.foo: (1 deposed)
ID = baz
provider = provider["registry.terraform.io/-/aws"]
Deposed ID 1 = foo`)
if ctx.State().String() != expectedState {
t.Fatalf("\nexpected: %q\ngot: %q\n", expectedState, ctx.State().String())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
type InstanceGen struct {
Addr string
DeposedKey states.DeposedKey
}
want := map[InstanceGen]bool{
{
Addr: "aws_instance.foo",
}: true,
{
Addr: "aws_instance.foo",
DeposedKey: states.DeposedKey("00000001"),
}: true,
}
got := make(map[InstanceGen]bool)
changes := make(map[InstanceGen]*plans.ResourceInstanceChangeSrc)
for _, change := range plan.Changes.Resources {
k := InstanceGen{
Addr: change.Addr.String(),
DeposedKey: change.DeposedKey,
}
got[k] = true
changes[k] = change
}
if !reflect.DeepEqual(got, want) {
t.Fatalf("wrong resource instance object changes in plan\ngot: %s\nwant: %s", spew.Sdump(got), spew.Sdump(want))
}
{
ric, err := changes[InstanceGen{Addr: "aws_instance.foo"}].Decode(ty)
if err != nil {
t.Fatal(err)
}
if got, want := ric.Action, plans.NoOp; got != want {
t.Errorf("current object change action is %s; want %s", got, want)
}
// the existing instance should only have an unchanged id
expected, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{"id": cty.StringVal("baz")}))
if err != nil {
t.Fatal(err)
}
checkVals(t, expected, ric.After)
}
{
ric, err := changes[InstanceGen{Addr: "aws_instance.foo", DeposedKey: states.DeposedKey("00000001")}].Decode(ty)
if err != nil {
t.Fatal(err)
}
if got, want := ric.Action, plans.Delete; got != want {
t.Errorf("deposed object change action is %s; want %s", got, want)
}
}
}
func TestContext2Plan_createBefore_maintainRoot(t *testing.T) {
m := testModule(t, "plan-cbd-maintain-root")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if !ctx.State().Empty() {
t.Fatal("expected empty state, got:", ctx.State())
}
if len(plan.Changes.Resources) != 4 {
t.Error("expected 4 resource in plan, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
// these should all be creates
if res.Action != plans.Create {
t.Fatalf("unexpected action %s for %s", res.Action, res.Addr.String())
}
}
}
func TestContext2Plan_emptyDiff(t *testing.T) {
m := testModule(t, "plan-empty")
p := testProvider("aws")
p.DiffFn = func(
info *InstanceInfo,
s *InstanceState,
c *ResourceConfig) (*InstanceDiff, error) {
return nil, nil
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if !ctx.State().Empty() {
t.Fatal("expected empty state, got:", ctx.State())
}
if len(plan.Changes.Resources) != 2 {
t.Error("expected 2 resource in plan, got", len(plan.Changes.Resources))
}
actions := map[string]plans.Action{}
for _, res := range plan.Changes.Resources {
actions[res.Addr.String()] = res.Action
}
expected := map[string]plans.Action{
"aws_instance.foo": plans.Create,
"aws_instance.bar": plans.Create,
}
if !cmp.Equal(expected, actions) {
t.Fatal(cmp.Diff(expected, actions))
}
}
func TestContext2Plan_escapedVar(t *testing.T) {
m := testModule(t, "plan-escaped-var")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Changes.Resources) != 1 {
t.Error("expected 1 resource in plan, got", len(plan.Changes.Resources))
}
res := plan.Changes.Resources[0]
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
expected := objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("bar-${baz}"),
"type": cty.StringVal("aws_instance")},
)
checkVals(t, expected, ric.After)
}
func TestContext2Plan_minimal(t *testing.T) {
m := testModule(t, "plan-empty")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if !ctx.State().Empty() {
t.Fatal("expected empty state, got:", ctx.State())
}
if len(plan.Changes.Resources) != 2 {
t.Error("expected 2 resource in plan, got", len(plan.Changes.Resources))
}
actions := map[string]plans.Action{}
for _, res := range plan.Changes.Resources {
actions[res.Addr.String()] = res.Action
}
expected := map[string]plans.Action{
"aws_instance.foo": plans.Create,
"aws_instance.bar": plans.Create,
}
if !cmp.Equal(expected, actions) {
t.Fatal(cmp.Diff(expected, actions))
}
}
func TestContext2Plan_modules(t *testing.T) {
m := testModule(t, "plan-modules")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Changes.Resources) != 3 {
t.Error("expected 3 resource in plan, got", len(plan.Changes.Resources))
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
expectFoo := objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("2"),
"type": cty.StringVal("aws_instance")},
)
expectNum := objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"num": cty.NumberIntVal(2),
"type": cty.StringVal("aws_instance")},
)
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
var expected cty.Value
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
expected = expectFoo
case "aws_instance.foo":
expected = expectNum
case "module.child.aws_instance.foo":
expected = expectNum
default:
t.Fatal("unknown instance:", i)
}
checkVals(t, expected, ric.After)
}
}
func TestContext2Plan_moduleExpand(t *testing.T) {
// Test a smattering of plan expansion behavior
m := testModule(t, "plan-modules-expand")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
expected := map[string]struct{}{
`aws_instance.foo["a"]`: struct{}{},
`module.count_child[1].aws_instance.foo[0]`: struct{}{},
`module.count_child[1].aws_instance.foo[1]`: struct{}{},
`module.count_child[0].aws_instance.foo[0]`: struct{}{},
`module.count_child[0].aws_instance.foo[1]`: struct{}{},
`module.for_each_child["a"].aws_instance.foo[1]`: struct{}{},
`module.for_each_child["a"].aws_instance.foo[0]`: struct{}{},
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
_, ok := expected[ric.Addr.String()]
if !ok {
t.Fatal("unexpected resource:", ric.Addr.String())
}
delete(expected, ric.Addr.String())
}
for addr := range expected {
t.Error("missing resource", addr)
}
}
// GH-1475
func TestContext2Plan_moduleCycle(t *testing.T) {
m := testModule(t, "plan-module-cycle")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
"some_input": {Type: cty.String, Optional: true},
"type": {Type: cty.String, Computed: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
var expected cty.Value
switch i := ric.Addr.String(); i {
case "aws_instance.b":
expected = objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
})
case "aws_instance.c":
expected = objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"some_input": cty.UnknownVal(cty.String),
"type": cty.StringVal("aws_instance"),
})
default:
t.Fatal("unknown instance:", i)
}
checkVals(t, expected, ric.After)
}
}
func TestContext2Plan_moduleDeadlock(t *testing.T) {
testCheckDeadlock(t, func() {
m := testModule(t, "plan-module-deadlock")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, err := ctx.Plan()
if err != nil {
t.Fatalf("err: %s", err)
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
expected := objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
})
switch i := ric.Addr.String(); i {
case "module.child.aws_instance.foo[0]":
case "module.child.aws_instance.foo[1]":
case "module.child.aws_instance.foo[2]":
default:
t.Fatal("unknown instance:", i)
}
checkVals(t, expected, ric.After)
}
})
}
func TestContext2Plan_moduleInput(t *testing.T) {
m := testModule(t, "plan-module-input")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
var expected cty.Value
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
expected = objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("2"),
"type": cty.StringVal("aws_instance"),
})
case "module.child.aws_instance.foo":
expected = objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("42"),
"type": cty.StringVal("aws_instance"),
})
default:
t.Fatal("unknown instance:", i)
}
checkVals(t, expected, ric.After)
}
}
func TestContext2Plan_moduleInputComputed(t *testing.T) {
m := testModule(t, "plan-module-input-computed")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.UnknownVal(cty.String),
"type": cty.StringVal("aws_instance"),
"compute": cty.StringVal("foo"),
}), ric.After)
case "module.child.aws_instance.foo":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.UnknownVal(cty.String),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_moduleInputFromVar(t *testing.T) {
m := testModule(t, "plan-module-input-var")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Variables: InputValues{
"foo": &InputValue{
Value: cty.StringVal("52"),
SourceType: ValueFromCaller,
},
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("2"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "module.child.aws_instance.foo":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("52"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_moduleMultiVar(t *testing.T) {
m := testModule(t, "plan-module-multi-var")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
"foo": {Type: cty.String, Optional: true},
"baz": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 5 {
t.Fatal("expected 5 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.parent[0]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
}), ric.After)
case "aws_instance.parent[1]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
}), ric.After)
case "module.child.aws_instance.bar[0]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"baz": cty.StringVal("baz"),
}), ric.After)
case "module.child.aws_instance.bar[1]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"baz": cty.StringVal("baz"),
}), ric.After)
case "module.child.aws_instance.foo":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("baz,baz"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_moduleOrphans(t *testing.T) {
m := testModule(t, "plan-modules-remove")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: []string{"root", "child"},
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
},
Provider: "provider.aws",
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.foo":
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"num": cty.NumberIntVal(2),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "module.child.aws_instance.foo":
if res.Action != plans.Delete {
t.Fatalf("expected resource delete, got %s", res.Action)
}
default:
t.Fatal("unknown instance:", i)
}
}
expectedState := `<no state>
module.child:
aws_instance.foo:
ID = baz
provider = provider["registry.terraform.io/-/aws"]`
if ctx.State().String() != expectedState {
t.Fatalf("\nexpected state: %q\n\ngot: %q", expectedState, ctx.State().String())
}
}
// https://github.com/hashicorp/terraform/issues/3114
func TestContext2Plan_moduleOrphansWithProvisioner(t *testing.T) {
m := testModule(t, "plan-modules-remove-provisioners")
p := testProvider("aws")
pr := testProvisioner()
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: []string{"root"},
Resources: map[string]*ResourceState{
"aws_instance.top": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "top",
},
},
},
},
&ModuleState{
Path: []string{"root", "parent", "childone"},
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
},
Provider: "provider.aws",
},
},
},
&ModuleState{
Path: []string{"root", "parent", "childtwo"},
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
},
Provider: "provider.aws",
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Provisioners: map[string]ProvisionerFactory{
"shell": testProvisionerFuncFixed(pr),
},
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 3 {
t.Error("expected 3 planned resources, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "module.parent.module.childone.aws_instance.foo":
if res.Action != plans.Delete {
t.Fatalf("expected resource Delete, got %s", res.Action)
}
case "module.parent.module.childtwo.aws_instance.foo":
if res.Action != plans.Delete {
t.Fatalf("expected resource Delete, got %s", res.Action)
}
case "aws_instance.top":
if res.Action != plans.NoOp {
t.Fatal("expected no changes, got", res.Action)
}
default:
t.Fatalf("unknown instance: %s\nafter: %#v", i, hcl2shim.ConfigValueFromHCL2(ric.After))
}
}
expectedState := `aws_instance.top:
ID = top
provider = provider["registry.terraform.io/-/aws"]
module.parent.childone:
aws_instance.foo:
ID = baz
provider = provider["registry.terraform.io/-/aws"]
module.parent.childtwo:
aws_instance.foo:
ID = baz
provider = provider["registry.terraform.io/-/aws"]`
if expectedState != ctx.State().String() {
t.Fatalf("\nexpect state: %q\ngot state: %q\n", expectedState, ctx.State().String())
}
}
func TestContext2Plan_moduleProviderInherit(t *testing.T) {
var l sync.Mutex
var calls []string
m := testModule(t, "plan-module-provider-inherit")
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): func() (providers.Interface, error) {
l.Lock()
defer l.Unlock()
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
Provider: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"from": {Type: cty.String, Optional: true},
},
},
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"from": {Type: cty.String, Optional: true},
},
},
},
}
p.ConfigureFn = func(c *ResourceConfig) error {
if v, ok := c.Get("from"); !ok || v.(string) != "root" {
return fmt.Errorf("bad")
}
return nil
}
p.DiffFn = func(
info *InstanceInfo,
state *InstanceState,
c *ResourceConfig) (*InstanceDiff, error) {
v, _ := c.Get("from")
l.Lock()
defer l.Unlock()
calls = append(calls, v.(string))
return testDiffFn(info, state, c)
}
return p, nil
},
},
),
})
_, err := ctx.Plan()
if err != nil {
t.Fatalf("err: %s", err)
}
actual := calls
sort.Strings(actual)
expected := []string{"child", "root"}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
}
// This tests (for GH-11282) that deeply nested modules properly inherit
// configuration.
func TestContext2Plan_moduleProviderInheritDeep(t *testing.T) {
var l sync.Mutex
m := testModule(t, "plan-module-provider-inherit-deep")
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): func() (providers.Interface, error) {
l.Lock()
defer l.Unlock()
var from string
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
Provider: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"from": {Type: cty.String, Optional: true},
},
},
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{},
},
},
}
p.ConfigureFn = func(c *ResourceConfig) error {
v, ok := c.Get("from")
if !ok || v.(string) != "root" {
return fmt.Errorf("bad")
}
from = v.(string)
return nil
}
p.DiffFn = func(
info *InstanceInfo,
state *InstanceState,
c *ResourceConfig) (*InstanceDiff, error) {
if from != "root" {
return nil, fmt.Errorf("bad resource")
}
return testDiffFn(info, state, c)
}
return p, nil
},
},
),
})
_, err := ctx.Plan()
if err != nil {
t.Fatalf("err: %s", err)
}
}
func TestContext2Plan_moduleProviderDefaultsVar(t *testing.T) {
var l sync.Mutex
var calls []string
m := testModule(t, "plan-module-provider-defaults-var")
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): func() (providers.Interface, error) {
l.Lock()
defer l.Unlock()
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
Provider: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"to": {Type: cty.String, Optional: true},
"from": {Type: cty.String, Optional: true},
},
},
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"from": {Type: cty.String, Optional: true},
},
},
},
}
p.ConfigureFn = func(c *ResourceConfig) error {
var buf bytes.Buffer
if v, ok := c.Get("from"); ok {
buf.WriteString(v.(string) + "\n")
}
if v, ok := c.Get("to"); ok {
buf.WriteString(v.(string) + "\n")
}
l.Lock()
defer l.Unlock()
calls = append(calls, buf.String())
return nil
}
p.DiffFn = testDiffFn
return p, nil
},
},
),
Variables: InputValues{
"foo": &InputValue{
Value: cty.StringVal("root"),
SourceType: ValueFromCaller,
},
},
})
_, err := ctx.Plan()
if err != nil {
t.Fatalf("err: %s", err)
}
expected := []string{
"child\nchild\n",
"root\n",
}
sort.Strings(calls)
if !reflect.DeepEqual(calls, expected) {
t.Fatalf("expected:\n%#v\ngot:\n%#v\n", expected, calls)
}
}
func TestContext2Plan_moduleProviderVar(t *testing.T) {
m := testModule(t, "plan-module-provider-var")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
Provider: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"value": {Type: cty.String, Optional: true},
},
},
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"value": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 1 {
t.Fatal("expected 1 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "module.child.aws_instance.test":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"value": cty.StringVal("hello"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_moduleVar(t *testing.T) {
m := testModule(t, "plan-module-var")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
var expected cty.Value
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
expected = objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("2"),
"type": cty.StringVal("aws_instance"),
})
case "module.child.aws_instance.foo":
expected = objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"num": cty.NumberIntVal(2),
"type": cty.StringVal("aws_instance"),
})
default:
t.Fatal("unknown instance:", i)
}
checkVals(t, expected, ric.After)
}
}
func TestContext2Plan_moduleVarWrongTypeBasic(t *testing.T) {
m := testModule(t, "plan-module-wrong-var-type")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
t.Fatalf("succeeded; want errors")
}
}
func TestContext2Plan_moduleVarWrongTypeNested(t *testing.T) {
m := testModule(t, "plan-module-wrong-var-type-nested")
p := testProvider("null")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("null"): testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
t.Fatalf("succeeded; want errors")
}
}
func TestContext2Plan_moduleVarWithDefaultValue(t *testing.T) {
m := testModule(t, "plan-module-var-with-default-value")
p := testProvider("null")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("null"): testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
}
func TestContext2Plan_moduleVarComputed(t *testing.T) {
m := testModule(t, "plan-module-var-computed")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.UnknownVal(cty.String),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "module.child.aws_instance.foo":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.UnknownVal(cty.String),
"type": cty.StringVal("aws_instance"),
"compute": cty.StringVal("foo"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_preventDestroy_bad(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-bad")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
},
},
},
}),
})
plan, err := ctx.Plan()
expectedErr := "aws_instance.foo has lifecycle.prevent_destroy"
if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) {
if plan != nil {
t.Logf(legacyDiffComparisonString(plan.Changes))
}
t.Fatalf("expected err would contain %q\nerr: %s", expectedErr, err)
}
}
func TestContext2Plan_preventDestroy_good(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-good")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
},
},
},
}),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if !plan.Changes.Empty() {
t.Fatalf("expected no changes, got %#v\n", plan.Changes)
}
}
func TestContext2Plan_preventDestroy_countBad(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-count-bad")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc345",
},
},
},
},
},
}),
})
plan, err := ctx.Plan()
expectedErr := "aws_instance.foo[1] has lifecycle.prevent_destroy"
if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) {
if plan != nil {
t.Logf(legacyDiffComparisonString(plan.Changes))
}
t.Fatalf("expected err would contain %q\nerr: %s", expectedErr, err)
}
}
func TestContext2Plan_preventDestroy_countGood(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-count-good")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"current": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc345",
},
},
},
},
},
}),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if plan.Changes.Empty() {
t.Fatalf("Expected non-empty plan, got %s", legacyDiffComparisonString(plan.Changes))
}
}
func TestContext2Plan_preventDestroy_countGoodNoChange(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-count-good")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"current": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
Attributes: map[string]string{
"current": "0",
"type": "aws_instance",
},
},
},
},
},
},
}),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if !plan.Changes.Empty() {
t.Fatalf("Expected empty plan, got %s", legacyDiffComparisonString(plan.Changes))
}
}
func TestContext2Plan_preventDestroy_destroyPlan(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-good")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
},
},
},
}),
Destroy: true,
})
plan, diags := ctx.Plan()
expectedErr := "aws_instance.foo has lifecycle.prevent_destroy"
if !strings.Contains(fmt.Sprintf("%s", diags.Err()), expectedErr) {
if plan != nil {
t.Logf(legacyDiffComparisonString(plan.Changes))
}
t.Fatalf("expected err would contain %q\nerr: %s", expectedErr, diags.Err())
}
}
func TestContext2Plan_provisionerCycle(t *testing.T) {
m := testModule(t, "plan-provisioner-cycle")
p := testProvider("aws")
p.DiffFn = testDiffFn
pr := testProvisioner()
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Provisioners: map[string]ProvisionerFactory{
"local-exec": testProvisionerFuncFixed(pr),
},
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
t.Fatalf("succeeded; want errors")
}
}
func TestContext2Plan_computed(t *testing.T) {
m := testModule(t, "plan-computed")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.UnknownVal(cty.String),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.UnknownVal(cty.String),
"num": cty.NumberIntVal(2),
"type": cty.StringVal("aws_instance"),
"compute": cty.StringVal("foo"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_blockNestingGroup(t *testing.T) {
m := testModule(t, "plan-block-nesting-group")
p := testProvider("test")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"test": {
BlockTypes: map[string]*configschema.NestedBlock{
"blah": {
Nesting: configschema.NestingGroup,
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"baz": {Type: cty.String, Required: true},
},
},
},
},
},
},
}
p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse {
return providers.PlanResourceChangeResponse{
PlannedState: req.ProposedNewState,
}
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("test"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if got, want := 1, len(plan.Changes.Resources); got != want {
t.Fatalf("wrong number of planned resource changes %d; want %d\n%s", got, want, spew.Sdump(plan.Changes.Resources))
}
if !p.PlanResourceChangeCalled {
t.Fatalf("PlanResourceChange was not called at all")
}
got := p.PlanResourceChangeRequest
want := providers.PlanResourceChangeRequest{
TypeName: "test",
// Because block type "blah" is defined as NestingGroup, we get a non-null
// value for it with null nested attributes, rather than the "blah" object
// itself being null, when there's no "blah" block in the config at all.
//
// This represents the situation where the remote service _always_ creates
// a single "blah", regardless of whether the block is present, but when
// the block _is_ present the user can override some aspects of it. The
// absense of the block means "use the defaults", in that case.
Config: cty.ObjectVal(map[string]cty.Value{
"blah": cty.ObjectVal(map[string]cty.Value{
"baz": cty.NullVal(cty.String),
}),
}),
ProposedNewState: cty.ObjectVal(map[string]cty.Value{
"blah": cty.ObjectVal(map[string]cty.Value{
"baz": cty.NullVal(cty.String),
}),
}),
}
if !cmp.Equal(got, want, valueTrans) {
t.Errorf("wrong PlanResourceChange request\n%s", cmp.Diff(got, want, valueTrans))
}
}
func TestContext2Plan_computedDataResource(t *testing.T) {
m := testModule(t, "plan-computed-data-resource")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"num": {Type: cty.String, Optional: true},
"compute": {Type: cty.String, Optional: true},
"foo": {Type: cty.String, Computed: true},
},
},
},
DataSources: map[string]*configschema.Block{
"aws_vpc": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.DataSources["aws_vpc"]
ty := schema.ImpliedType()
if rc := plan.Changes.ResourceInstance(addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "aws_instance", Name: "foo"}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance)); rc == nil {
t.Fatalf("missing diff for aws_instance.foo")
}
rcs := plan.Changes.ResourceInstance(addrs.Resource{
Mode: addrs.DataResourceMode,
Type: "aws_vpc",
Name: "bar",
}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance))
if rcs == nil {
t.Fatalf("missing diff for data.aws_vpc.bar")
}
rc, err := rcs.Decode(ty)
if err != nil {
t.Fatal(err)
}
checkVals(t,
cty.ObjectVal(map[string]cty.Value{
"foo": cty.UnknownVal(cty.String),
}),
rc.After,
)
}
func TestContext2Plan_computedInFunction(t *testing.T) {
m := testModule(t, "plan-computed-in-function")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"attr": {Type: cty.Number, Optional: true},
},
},
},
DataSources: map[string]*configschema.Block{
"aws_data_source": {
Attributes: map[string]*configschema.Attribute{
"computed": {Type: cty.List(cty.String), Computed: true},
},
},
},
}
p.DiffFn = testDiffFn
p.ReadDataSourceResponse = providers.ReadDataSourceResponse{
State: cty.ObjectVal(map[string]cty.Value{
"computed": cty.ListVal([]cty.Value{
cty.StringVal("foo"),
}),
}),
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
diags := ctx.Validate()
assertNoErrors(t, diags)
state, diags := ctx.Refresh() // data resource is read in this step
assertNoErrors(t, diags)
if !p.ReadDataSourceCalled {
t.Fatalf("ReadDataSource was not called on provider during refresh; should've been called")
}
p.ReadDataSourceCalled = false // reset for next call
t.Logf("state after refresh:\n%s", state)
_, diags = ctx.Plan() // should do nothing with data resource in this step, since it was already read
assertNoErrors(t, diags)
if p.ReadDataSourceCalled {
t.Fatalf("ReadDataSource was called on provider during plan; should not have been called")
}
}
func TestContext2Plan_computedDataCountResource(t *testing.T) {
m := testModule(t, "plan-computed-data-count")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"num": {Type: cty.String, Optional: true},
"compute": {Type: cty.String, Optional: true},
"foo": {Type: cty.String, Computed: true},
},
},
},
DataSources: map[string]*configschema.Block{
"aws_vpc": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
// make sure we created 3 "bar"s
for i := 0; i < 3; i++ {
addr := addrs.Resource{
Mode: addrs.DataResourceMode,
Type: "aws_vpc",
Name: "bar",
}.Instance(addrs.IntKey(i)).Absolute(addrs.RootModuleInstance)
if rcs := plan.Changes.ResourceInstance(addr); rcs == nil {
t.Fatalf("missing changes for %s", addr)
}
}
}
func TestContext2Plan_localValueCount(t *testing.T) {
m := testModule(t, "plan-local-value-count")
p := testProvider("test")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("test"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
// make sure we created 3 "foo"s
for i := 0; i < 3; i++ {
addr := addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_resource",
Name: "foo",
}.Instance(addrs.IntKey(i)).Absolute(addrs.RootModuleInstance)
if rcs := plan.Changes.ResourceInstance(addr); rcs == nil {
t.Fatalf("missing changes for %s", addr)
}
}
}
func TestContext2Plan_dataResourceBecomesComputed(t *testing.T) {
m := testModule(t, "plan-data-resource-becomes-computed")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.String, Optional: true},
"computed": {Type: cty.String, Computed: true},
},
},
},
DataSources: map[string]*configschema.Block{
"aws_data_source": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.String, Optional: true},
},
},
},
}
p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse {
fooVal := req.ProposedNewState.GetAttr("foo")
return providers.PlanResourceChangeResponse{
PlannedState: cty.ObjectVal(map[string]cty.Value{
"foo": fooVal,
"computed": cty.UnknownVal(cty.String),
}),
PlannedPrivate: req.PriorPrivate,
}
}
schema := p.GetSchemaReturn.DataSources["aws_data_source"]
ty := schema.ImpliedType()
p.ReadDataSourceResponse = providers.ReadDataSourceResponse{
// This should not be called, because the configuration for the
// data resource contains an unknown value for "foo".
Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Errorf("ReadDataSource called, but should not have been")),
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"data.aws_data_source.foo": &ResourceState{
Type: "aws_data_source",
Primary: &InstanceState{
ID: "i-abc123",
Attributes: map[string]string{
"id": "i-abc123",
"foo": "baz",
},
},
},
},
},
},
}),
})
_, diags := ctx.Refresh()
if diags.HasErrors() {
t.Fatalf("unexpected errors during refresh: %s", diags.Err())
}
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors during plan: %s", diags.Err())
}
rcs := plan.Changes.ResourceInstance(addrs.Resource{
Mode: addrs.DataResourceMode,
Type: "aws_data_source",
Name: "foo",
}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance))
if rcs == nil {
t.Logf("full changeset: %s", spew.Sdump(plan.Changes))
t.Fatalf("missing diff for data.aws_data_resource.foo")
}
rc, err := rcs.Decode(ty)
if err != nil {
t.Fatal(err)
}
// foo should now be unknown
foo := rc.After.GetAttr("foo")
if foo.IsKnown() {
t.Fatalf("foo should be unknown, got %#v", foo)
}
}
func TestContext2Plan_computedList(t *testing.T) {
m := testModule(t, "plan-computed-list")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"compute": {Type: cty.String, Optional: true},
"foo": {Type: cty.String, Optional: true},
"num": {Type: cty.String, Optional: true},
"list": {Type: cty.List(cty.String), Computed: true},
},
},
},
}
p.DiffFn = func(info *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error) {
diff := &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{},
}
computedKeys := map[string]bool{}
for _, k := range c.ComputedKeys {
computedKeys[k] = true
}
compute, _ := c.Raw["compute"].(string)
if compute != "" {
diff.Attributes[compute] = &ResourceAttrDiff{
Old: "",
New: "",
NewComputed: true,
}
diff.Attributes["compute"] = &ResourceAttrDiff{
Old: "",
New: compute,
}
}
fooOld := s.Attributes["foo"]
fooNew, _ := c.Raw["foo"].(string)
if fooOld != fooNew {
diff.Attributes["foo"] = &ResourceAttrDiff{
Old: fooOld,
New: fooNew,
NewComputed: computedKeys["foo"],
}
}
numOld := s.Attributes["num"]
numNew, _ := c.Raw["num"].(string)
if numOld != numNew {
diff.Attributes["num"] = &ResourceAttrDiff{
Old: numOld,
New: numNew,
NewComputed: computedKeys["num"],
}
}
listOld := s.Attributes["list.#"]
if listOld == "" {
diff.Attributes["list.#"] = &ResourceAttrDiff{
Old: "",
New: "",
NewComputed: true,
}
}
return diff, nil
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"list": cty.UnknownVal(cty.List(cty.String)),
"foo": cty.UnknownVal(cty.String),
}), ric.After)
case "aws_instance.foo":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"list": cty.UnknownVal(cty.List(cty.String)),
"num": cty.NumberIntVal(2),
"compute": cty.StringVal("list.#"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
// GH-8695. This tests that you can index into a computed list on a
// splatted resource.
func TestContext2Plan_computedMultiIndex(t *testing.T) {
m := testModule(t, "plan-computed-multi-index")
p := testProvider("aws")
p.DiffFn = testDiffFn
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"compute": {Type: cty.String, Optional: true},
"foo": {Type: cty.List(cty.String), Optional: true},
"ip": {Type: cty.List(cty.String), Computed: true},
},
},
},
}
p.DiffFn = func(info *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error) {
diff := &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{},
}
compute, _ := c.Raw["compute"].(string)
if compute != "" {
diff.Attributes[compute] = &ResourceAttrDiff{
Old: "",
New: "",
NewComputed: true,
}
diff.Attributes["compute"] = &ResourceAttrDiff{
Old: "",
New: compute,
}
}
fooOld := s.Attributes["foo"]
fooNew, _ := c.Raw["foo"].(string)
fooComputed := false
for _, k := range c.ComputedKeys {
if k == "foo" {
fooComputed = true
}
}
if fooNew != "" {
diff.Attributes["foo"] = &ResourceAttrDiff{
Old: fooOld,
New: fooNew,
NewComputed: fooComputed,
}
}
ipOld := s.Attributes["ip"]
ipComputed := ipOld == ""
diff.Attributes["ip"] = &ResourceAttrDiff{
Old: ipOld,
New: "",
NewComputed: ipComputed,
}
return diff, nil
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 3 {
t.Fatal("expected 3 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.foo[0]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"ip": cty.UnknownVal(cty.List(cty.String)),
"foo": cty.NullVal(cty.List(cty.String)),
"compute": cty.StringVal("ip.#"),
}), ric.After)
case "aws_instance.foo[1]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"ip": cty.UnknownVal(cty.List(cty.String)),
"foo": cty.NullVal(cty.List(cty.String)),
"compute": cty.StringVal("ip.#"),
}), ric.After)
case "aws_instance.bar[0]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"ip": cty.UnknownVal(cty.List(cty.String)),
"foo": cty.UnknownVal(cty.List(cty.String)),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_count(t *testing.T) {
m := testModule(t, "plan-count")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 6 {
t.Fatal("expected 6 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo,foo,foo,foo,foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[0]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[1]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[2]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[3]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[4]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_countComputed(t *testing.T) {
m := testModule(t, "plan-count-computed")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, err := ctx.Plan()
if err == nil {
t.Fatal("should error")
}
}
func TestContext2Plan_countComputedModule(t *testing.T) {
m := testModule(t, "plan-count-computed-module")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, err := ctx.Plan()
expectedErr := `The "count" value depends on resource attributes`
if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) {
t.Fatalf("expected err would contain %q\nerr: %s\n",
expectedErr, err)
}
}
func TestContext2Plan_countModuleStatic(t *testing.T) {
m := testModule(t, "plan-count-module-static")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 3 {
t.Fatal("expected 3 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "module.child.aws_instance.foo[0]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
}), ric.After)
case "module.child.aws_instance.foo[1]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
}), ric.After)
case "module.child.aws_instance.foo[2]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_countModuleStaticGrandchild(t *testing.T) {
m := testModule(t, "plan-count-module-static-grandchild")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 3 {
t.Fatal("expected 3 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "module.child.module.child.aws_instance.foo[0]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
}), ric.After)
case "module.child.module.child.aws_instance.foo[1]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
}), ric.After)
case "module.child.module.child.aws_instance.foo[2]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_countIndex(t *testing.T) {
m := testModule(t, "plan-count-index")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.foo[0]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("0"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[1]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("1"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_countVar(t *testing.T) {
m := testModule(t, "plan-count-var")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Variables: InputValues{
"instance_count": &InputValue{
Value: cty.StringVal("3"),
SourceType: ValueFromCaller,
},
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 4 {
t.Fatal("expected 4 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo,foo,foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[0]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[1]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[2]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_countZero(t *testing.T) {
m := testModule(t, "plan-count-zero")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.DynamicPseudoType, Optional: true},
},
},
},
}
// This schema contains a DynamicPseudoType, and therefore can't go through any shim functions
p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) {
resp.PlannedState = req.ProposedNewState
resp.PlannedPrivate = req.PriorPrivate
return resp
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 1 {
t.Fatal("expected 1 changes, got", len(plan.Changes.Resources))
}
res := plan.Changes.Resources[0]
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
expected := cty.TupleVal(nil)
foo := ric.After.GetAttr("foo")
if !cmp.Equal(expected, foo, valueComparer) {
t.Fatal(cmp.Diff(expected, foo, valueComparer))
}
}
func TestContext2Plan_countOneIndex(t *testing.T) {
m := testModule(t, "plan-count-one-index")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[0]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_countDecreaseToOne(t *testing.T) {
m := testModule(t, "plan-count-dec")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo": "foo",
"type": "aws_instance",
},
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
"aws_instance.foo.2": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 4 {
t.Fatal("expected 4 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("bar"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo":
if res.Action != plans.NoOp {
t.Fatalf("resource %s should be unchanged", i)
}
case "aws_instance.foo[1]":
if res.Action != plans.Delete {
t.Fatalf("expected resource delete, got %s", res.Action)
}
case "aws_instance.foo[2]":
if res.Action != plans.Delete {
t.Fatalf("expected resource delete, got %s", res.Action)
}
default:
t.Fatal("unknown instance:", i)
}
}
expectedState := `aws_instance.foo.0:
ID = bar
provider = provider["registry.terraform.io/-/aws"]
foo = foo
type = aws_instance
aws_instance.foo.1:
ID = bar
provider = provider["registry.terraform.io/-/aws"]
aws_instance.foo.2:
ID = bar
provider = provider["registry.terraform.io/-/aws"]`
if ctx.State().String() != expectedState {
t.Fatalf("epected state:\n%q\n\ngot state:\n%q\n", expectedState, ctx.State().String())
}
}
func TestContext2Plan_countIncreaseFromNotSet(t *testing.T) {
m := testModule(t, "plan-count-inc")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo": "foo",
"type": "aws_instance",
},
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 4 {
t.Fatal("expected 4 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("bar"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[0]":
if res.Action != plans.NoOp {
t.Fatalf("resource %s should be unchanged", i)
}
case "aws_instance.foo[1]":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[2]":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_countIncreaseFromOne(t *testing.T) {
m := testModule(t, "plan-count-inc")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo": "foo",
"type": "aws_instance",
},
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 4 {
t.Fatal("expected 4 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("bar"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[0]":
if res.Action != plans.NoOp {
t.Fatalf("resource %s should be unchanged", i)
}
case "aws_instance.foo[1]":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[2]":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
// https://github.com/PeoplePerHour/terraform/pull/11
//
// This tests a case where both a "resource" and "resource.0" are in
// the state file, which apparently is a reasonable backwards compatibility
// concern found in the above 3rd party repo.
func TestContext2Plan_countIncreaseFromOneCorrupted(t *testing.T) {
m := testModule(t, "plan-count-inc")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo": "foo",
"type": "aws_instance",
},
},
},
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo": "foo",
"type": "aws_instance",
},
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 5 {
t.Fatal("expected 5 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("bar"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo":
if res.Action != plans.Delete {
t.Fatalf("resource %s should be removed", i)
}
case "aws_instance.foo[0]":
if res.Action != plans.NoOp {
t.Fatalf("resource %s should be unchanged", i)
}
case "aws_instance.foo[1]":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo[2]":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
// A common pattern in TF configs is to have a set of resources with the same
// count and to use count.index to create correspondences between them:
//
// foo_id = "${foo.bar.*.id[count.index]}"
//
// This test is for the situation where some instances already exist and the
// count is increased. In that case, we should see only the create diffs
// for the new instances and not any update diffs for the existing ones.
func TestContext2Plan_countIncreaseWithSplatReference(t *testing.T) {
m := testModule(t, "plan-count-splat-reference")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"name": {Type: cty.String, Optional: true},
"foo_name": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"name": "foo 0",
},
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"name": "foo 1",
},
},
},
"aws_instance.bar.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo_name": "foo 0",
},
},
},
"aws_instance.bar.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo_name": "foo 1",
},
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 6 {
t.Fatal("expected 6 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar[0]", "aws_instance.bar[1]", "aws_instance.foo[0]", "aws_instance.foo[1]":
if res.Action != plans.NoOp {
t.Fatalf("resource %s should be unchanged", i)
}
case "aws_instance.bar[2]":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"foo_name": cty.StringVal("foo 2"),
}), ric.After)
case "aws_instance.foo[2]":
if res.Action != plans.Create {
t.Fatalf("expected resource create, got %s", res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"name": cty.StringVal("foo 2"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_forEach(t *testing.T) {
m := testModule(t, "plan-for-each")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 8 {
t.Fatal("expected 8 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
_, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
}
}
func TestContext2Plan_forEachUnknownValue(t *testing.T) {
// This module has a variable defined, but it's value is unknown. We
// expect this to produce an error, but not to panic.
m := testModule(t, "plan-for-each-unknown-value")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Variables: InputValues{
"foo": {
Value: cty.UnknownVal(cty.String),
SourceType: ValueFromCLIArg,
},
},
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
// Should get this error:
// Invalid for_each argument: The "for_each" value depends on resource attributes that cannot be determined until apply...
t.Fatal("succeeded; want errors")
}
gotErrStr := diags.Err().Error()
wantErrStr := "Invalid for_each argument"
if !strings.Contains(gotErrStr, wantErrStr) {
t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr)
}
}
func TestContext2Plan_destroy(t *testing.T) {
m := testModule(t, "plan-destroy")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.one": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
"aws_instance.two": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
Destroy: true,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.one", "aws_instance.two":
if res.Action != plans.Delete {
t.Fatalf("resource %s should be removed", i)
}
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_moduleDestroy(t *testing.T) {
m := testModule(t, "plan-module-destroy")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
},
},
&ModuleState{
Path: []string{"root", "child"},
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
Destroy: true,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.foo", "module.child.aws_instance.foo":
if res.Action != plans.Delete {
t.Fatalf("resource %s should be removed", i)
}
default:
t.Fatal("unknown instance:", i)
}
}
}
// GH-1835
func TestContext2Plan_moduleDestroyCycle(t *testing.T) {
m := testModule(t, "plan-module-destroy-gh-1835")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: []string{"root", "a_module"},
Resources: map[string]*ResourceState{
"aws_instance.a": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "a",
},
},
},
},
&ModuleState{
Path: []string{"root", "b_module"},
Resources: map[string]*ResourceState{
"aws_instance.b": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "b",
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
Destroy: true,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "module.a_module.aws_instance.a", "module.b_module.aws_instance.b":
if res.Action != plans.Delete {
t.Fatalf("resource %s should be removed", i)
}
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_moduleDestroyMultivar(t *testing.T) {
m := testModule(t, "plan-module-destroy-multivar")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{},
},
&ModuleState{
Path: []string{"root", "child"},
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar0",
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar1",
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
Destroy: true,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "module.child.aws_instance.foo[0]", "module.child.aws_instance.foo[1]":
if res.Action != plans.Delete {
t.Fatalf("resource %s should be removed", i)
}
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_pathVar(t *testing.T) {
cwd, err := os.Getwd()
if err != nil {
t.Fatalf("err: %s", err)
}
m := testModule(t, "plan-path-var")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"cwd": {Type: cty.String, Optional: true},
"module": {Type: cty.String, Optional: true},
"root": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("err: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 1 {
t.Fatal("expected 1 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.foo":
if res.Action != plans.Create {
t.Fatalf("resource %s should be created", i)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"cwd": cty.StringVal(cwd + "/barpath"),
"module": cty.StringVal(m.Module.SourceDir + "/foopath"),
"root": cty.StringVal(m.Module.SourceDir + "/barpath"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_diffVar(t *testing.T) {
m := testModule(t, "plan-diffvar")
p := testProvider("aws")
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"num": "2",
},
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
p.DiffFn = func(
info *InstanceInfo,
s *InstanceState,
c *ResourceConfig) (*InstanceDiff, error) {
if s.ID != "bar" {
return testDiffFn(info, s, c)
}
return &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"num": &ResourceAttrDiff{
Old: "2",
New: "3",
},
},
}, nil
}
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
if res.Action != plans.Create {
t.Fatalf("resource %s should be created", i)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"num": cty.NumberIntVal(3),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo":
if res.Action != plans.Update {
t.Fatalf("resource %s should be updated", i)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.StringVal("bar"),
"num": cty.NumberIntVal(2),
}), ric.Before)
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.StringVal("bar"),
"num": cty.NumberIntVal(3),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_hook(t *testing.T) {
m := testModule(t, "plan-good")
h := new(MockHook)
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
Hooks: []Hook{h},
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if !h.PreDiffCalled {
t.Fatal("should be called")
}
if !h.PostDiffCalled {
t.Fatal("should be called")
}
}
func TestContext2Plan_closeProvider(t *testing.T) {
// this fixture only has an aliased provider located in the module, to make
// sure that the provier name contains a path more complex than
// "provider.aws".
m := testModule(t, "plan-close-module-provider")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if !p.CloseCalled {
t.Fatal("provider not closed")
}
}
func TestContext2Plan_orphan(t *testing.T) {
m := testModule(t, "plan-orphan")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.baz": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.baz":
if res.Action != plans.Delete {
t.Fatalf("resource %s should be removed", i)
}
case "aws_instance.foo":
if res.Action != plans.Create {
t.Fatalf("resource %s should be created", i)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"num": cty.NumberIntVal(2),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
// This tests that configurations with UUIDs don't produce errors.
// For shadows, this would produce errors since a UUID changes every time.
func TestContext2Plan_shadowUuid(t *testing.T) {
m := testModule(t, "plan-shadow-uuid")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
}
func TestContext2Plan_state(t *testing.T) {
m := testModule(t, "plan-good")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Changes.Resources) < 2 {
t.Fatalf("bad: %#v", plan.Changes.Resources)
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
if res.Action != plans.Create {
t.Fatalf("resource %s should be created", i)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("2"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo":
if res.Action != plans.Update {
t.Fatalf("resource %s should be updated", i)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.StringVal("bar"),
"num": cty.NullVal(cty.Number),
"type": cty.NullVal(cty.String),
}), ric.Before)
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.StringVal("bar"),
"num": cty.NumberIntVal(2),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_taint(t *testing.T) {
m := testModule(t, "plan-taint")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{"num": "2"},
},
},
"aws_instance.bar": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
Tainted: true,
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.bar":
if res.Action != plans.DeleteThenCreate {
t.Fatalf("resource %s should be replaced", i)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("2"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "aws_instance.foo":
if res.Action != plans.NoOp {
t.Fatalf("resource %s should not be changed", i)
}
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_taintIgnoreChanges(t *testing.T) {
m := testModule(t, "plan-taint-ignore-changes")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
"vars": {Type: cty.String, Optional: true},
"type": {Type: cty.String, Computed: true},
},
},
},
}
p.ApplyFn = testApplyFn
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "foo",
Attributes: map[string]string{
"vars": "foo",
"type": "aws_instance",
},
Tainted: true,
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 1 {
t.Fatal("expected 1 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.foo":
if res.Action != plans.DeleteThenCreate {
t.Fatalf("resource %s should be replaced", i)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.StringVal("foo"),
"vars": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.Before)
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"vars": cty.StringVal("foo"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
// Fails about 50% of the time before the fix for GH-4982, covers the fix.
func TestContext2Plan_taintDestroyInterpolatedCountRace(t *testing.T) {
m := testModule(t, "plan-taint-interpolated-count")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Tainted: true,
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{ID: "bar"},
},
"aws_instance.foo.2": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{ID: "bar"},
},
},
},
},
})
for i := 0; i < 100; i++ {
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 3 {
t.Fatal("expected 3 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.foo[0]":
if res.Action != plans.DeleteThenCreate {
t.Fatalf("resource %s should be replaced", i)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.StringVal("bar"),
}), ric.Before)
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
}), ric.After)
case "aws_instance.foo[1]", "aws_instance.foo[2]":
if res.Action != plans.NoOp {
t.Fatalf("resource %s should not be changed", i)
}
default:
t.Fatal("unknown instance:", i)
}
}
}
}
func TestContext2Plan_targeted(t *testing.T) {
m := testModule(t, "plan-targeted")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Resource(
addrs.ManagedResourceMode, "aws_instance", "foo",
),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 1 {
t.Fatal("expected 1 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.foo":
if res.Action != plans.Create {
t.Fatalf("resource %s should be created", i)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"num": cty.NumberIntVal(2),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
// Test that targeting a module properly plans any inputs that depend
// on another module.
func TestContext2Plan_targetedCrossModule(t *testing.T) {
m := testModule(t, "plan-targeted-cross-module")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Child("B", addrs.NoKey),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
if res.Action != plans.Create {
t.Fatalf("resource %s should be created", ric.Addr)
}
switch i := ric.Addr.String(); i {
case "module.A.aws_instance.foo":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("bar"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
case "module.B.aws_instance.bar":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"foo": cty.UnknownVal(cty.String),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_targetedModuleWithProvider(t *testing.T) {
m := testModule(t, "plan-targeted-module-with-provider")
p := testProvider("null")
p.GetSchemaReturn = &ProviderSchema{
Provider: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"key": {Type: cty.String, Optional: true},
},
},
ResourceTypes: map[string]*configschema.Block{
"null_resource": {
Attributes: map[string]*configschema.Attribute{},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("null"): testProviderFuncFixed(p),
},
),
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Child("child2", addrs.NoKey),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["null_resource"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 1 {
t.Fatal("expected 1 changes, got", len(plan.Changes.Resources))
}
res := plan.Changes.Resources[0]
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
if ric.Addr.String() != "module.child2.null_resource.foo" {
t.Fatalf("unexpcetd resource: %s", ric.Addr)
}
}
func TestContext2Plan_targetedOrphan(t *testing.T) {
m := testModule(t, "plan-targeted-orphan")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.orphan": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-789xyz",
},
},
"aws_instance.nottargeted": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
},
},
},
}),
Destroy: true,
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Resource(
addrs.ManagedResourceMode, "aws_instance", "orphan",
),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 1 {
t.Fatal("expected 1 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
switch i := ric.Addr.String(); i {
case "aws_instance.orphan":
if res.Action != plans.Delete {
t.Fatalf("resource %s should be destroyed", ric.Addr)
}
default:
t.Fatal("unknown instance:", i)
}
}
}
// https://github.com/hashicorp/terraform/issues/2538
func TestContext2Plan_targetedModuleOrphan(t *testing.T) {
m := testModule(t, "plan-targeted-module-orphan")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: []string{"root", "child"},
Resources: map[string]*ResourceState{
"aws_instance.orphan": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-789xyz",
},
Provider: "provider.aws",
},
"aws_instance.nottargeted": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
Provider: "provider.aws",
},
},
},
},
}),
Destroy: true,
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Child("child", addrs.NoKey).Resource(
addrs.ManagedResourceMode, "aws_instance", "orphan",
),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 1 {
t.Fatal("expected 1 changes, got", len(plan.Changes.Resources))
}
res := plan.Changes.Resources[0]
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
if ric.Addr.String() != "module.child.aws_instance.orphan" {
t.Fatalf("unexpected resource :%s", ric.Addr)
}
if res.Action != plans.Delete {
t.Fatalf("resource %s should be deleted", ric.Addr)
}
}
func TestContext2Plan_targetedModuleUntargetedVariable(t *testing.T) {
m := testModule(t, "plan-targeted-module-untargeted-variable")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Resource(
addrs.ManagedResourceMode, "aws_instance", "blue",
),
addrs.RootModuleInstance.Child("blue_mod", addrs.NoKey),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
if res.Action != plans.Create {
t.Fatalf("resource %s should be created", ric.Addr)
}
switch i := ric.Addr.String(); i {
case "aws_instance.blue":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
}), ric.After)
case "module.blue_mod.aws_instance.mod":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"value": cty.UnknownVal(cty.String),
"type": cty.StringVal("aws_instance"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
// ensure that outputs missing references due to targetting are removed from
// the graph.
func TestContext2Plan_outputContainsTargetedResource(t *testing.T) {
m := testModule(t, "plan-untargeted-resource-output")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Child("mod", addrs.NoKey).Resource(
addrs.ManagedResourceMode, "aws_instance", "a",
),
},
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("err: %s", diags)
}
if len(diags) != 1 {
t.Fatalf("got %d diagnostics; want 1", diags)
}
if got, want := diags[0].Severity(), tfdiags.Warning; got != want {
t.Errorf("wrong diagnostic severity %#v; want %#v", got, want)
}
if got, want := diags[0].Description().Summary, "Resource targeting is in effect"; got != want {
t.Errorf("wrong diagnostic summary %#v; want %#v", got, want)
}
}
// https://github.com/hashicorp/terraform/issues/4515
func TestContext2Plan_targetedOverTen(t *testing.T) {
m := testModule(t, "plan-targeted-over-ten")
p := testProvider("aws")
p.DiffFn = testDiffFn
resources := make(map[string]*ResourceState)
var expectedState []string
for i := 0; i < 13; i++ {
key := fmt.Sprintf("aws_instance.foo.%d", i)
id := fmt.Sprintf("i-abc%d", i)
resources[key] = &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{ID: id},
}
expectedState = append(expectedState,
fmt.Sprintf("%s:\n ID = %s\n", key, id))
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: resources,
},
},
}),
Targets: []addrs.Targetable{
addrs.RootModuleInstance.ResourceInstance(
addrs.ManagedResourceMode, "aws_instance", "foo", addrs.IntKey(1),
),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
for _, res := range plan.Changes.Resources {
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
if res.Action != plans.NoOp {
t.Fatalf("unexpected action %s for %s", res.Action, ric.Addr)
}
}
}
func TestContext2Plan_provider(t *testing.T) {
m := testModule(t, "plan-provider")
p := testProvider("aws")
p.DiffFn = testDiffFn
var value interface{}
p.ConfigureFn = func(c *ResourceConfig) error {
value, _ = c.Get("foo")
return nil
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Variables: InputValues{
"foo": &InputValue{
Value: cty.StringVal("bar"),
SourceType: ValueFromCaller,
},
},
})
if _, err := ctx.Plan(); err != nil {
t.Fatalf("err: %s", err)
}
if value != "bar" {
t.Fatalf("bad: %#v", value)
}
}
func TestContext2Plan_varListErr(t *testing.T) {
m := testModule(t, "plan-var-list-err")
p := testProvider("aws")
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, err := ctx.Plan()
if err == nil {
t.Fatal("should error")
}
}
func TestContext2Plan_ignoreChanges(t *testing.T) {
m := testModule(t, "plan-ignore-changes")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{"ami": "ami-abcd1234"},
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Variables: InputValues{
"foo": &InputValue{
Value: cty.StringVal("ami-1234abcd"),
SourceType: ValueFromCaller,
},
},
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["aws_instance"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 1 {
t.Fatal("expected 1 changes, got", len(plan.Changes.Resources))
}
res := plan.Changes.Resources[0]
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
if res.Action != plans.Update {
t.Fatalf("resource %s should be updated, got %s", ric.Addr, res.Action)
}
if ric.Addr.String() != "aws_instance.foo" {
t.Fatalf("unexpected resource: %s", ric.Addr)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.StringVal("bar"),
"ami": cty.StringVal("ami-abcd1234"),
"type": cty.StringVal("aws_instance"),
}), ric.After)
}
func TestContext2Plan_ignoreChangesWildcard(t *testing.T) {
m := testModule(t, "plan-ignore-changes-wildcard")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"ami": "ami-abcd1234",
"instance": "t2.micro",
},
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
Variables: InputValues{
"foo": &InputValue{
Value: cty.StringVal("ami-1234abcd"),
SourceType: ValueFromCaller,
},
"bar": &InputValue{
Value: cty.StringVal("t2.small"),
SourceType: ValueFromCaller,
},
},
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.NoOp {
t.Fatalf("unexpected resource diffs in root module: %s", spew.Sdump(plan.Changes.Resources))
}
}
}
func TestContext2Plan_ignoreChangesInMap(t *testing.T) {
p := testProvider("test")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"test_ignore_changes_map": {
Attributes: map[string]*configschema.Attribute{
"tags": {Type: cty.Map(cty.String), Optional: true},
},
},
},
}
p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse {
return providers.PlanResourceChangeResponse{
PlannedState: req.ProposedNewState,
}
}
p.DiffFn = testDiffFn
s := states.BuildState(func(ss *states.SyncState) {
ss.SetResourceInstanceCurrent(
addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_ignore_changes_map",
Name: "foo",
}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance),
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"tags":{"ignored":"from state","other":"from state"}}`),
},
addrs.AbsProviderConfig{
Provider: addrs.NewLegacyProvider("test"),
Module: addrs.RootModule,
},
)
})
m := testModule(t, "plan-ignore-changes-in-map")
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("test"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["test_ignore_changes_map"]
ty := schema.ImpliedType()
if got, want := len(plan.Changes.Resources), 1; got != want {
t.Fatalf("wrong number of changes %d; want %d", got, want)
}
res := plan.Changes.Resources[0]
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
if res.Action != plans.Update {
t.Fatalf("resource %s should be updated, got %s", ric.Addr, res.Action)
}
if got, want := ric.Addr.String(), "test_ignore_changes_map.foo"; got != want {
t.Fatalf("unexpected resource address %s; want %s", got, want)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"tags": cty.MapVal(map[string]cty.Value{
"ignored": cty.StringVal("from state"),
"other": cty.StringVal("from config"),
}),
}), ric.After)
}
func TestContext2Plan_moduleMapLiteral(t *testing.T) {
m := testModule(t, "plan-module-map-literal")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"meta": {Type: cty.Map(cty.String), Optional: true},
"tags": {Type: cty.Map(cty.String), Optional: true},
},
},
},
}
p.ApplyFn = testApplyFn
p.DiffFn = func(i *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error) {
// Here we verify that both the populated and empty map literals made it
// through to the resource attributes
val, _ := c.Get("tags")
m, ok := val.(map[string]interface{})
if !ok {
t.Fatalf("Tags attr not map: %#v", val)
}
if m["foo"] != "bar" {
t.Fatalf("Bad value in tags attr: %#v", m)
}
{
val, _ := c.Get("meta")
m, ok := val.(map[string]interface{})
if !ok {
t.Fatalf("Meta attr not map: %#v", val)
}
if len(m) != 0 {
t.Fatalf("Meta attr not empty: %#v", val)
}
}
return nil, nil
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
}
func TestContext2Plan_computedValueInMap(t *testing.T) {
m := testModule(t, "plan-computed-value-in-map")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"looked_up": {Type: cty.String, Optional: true},
},
},
"aws_computed_source": {
Attributes: map[string]*configschema.Attribute{
"computed_read_only": {Type: cty.String, Computed: true},
},
},
},
}
p.DiffFn = func(info *InstanceInfo, state *InstanceState, c *ResourceConfig) (*InstanceDiff, error) {
switch info.Type {
case "aws_computed_source":
return &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"computed_read_only": &ResourceAttrDiff{
NewComputed: true,
},
},
}, nil
}
return testDiffFn(info, state, c)
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
schema := p.GetSchemaReturn.ResourceTypes[res.Addr.Resource.Resource.Type]
ric, err := res.Decode(schema.ImpliedType())
if err != nil {
t.Fatal(err)
}
if res.Action != plans.Create {
t.Fatalf("resource %s should be created", ric.Addr)
}
switch i := ric.Addr.String(); i {
case "aws_computed_source.intermediates":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"computed_read_only": cty.UnknownVal(cty.String),
}), ric.After)
case "module.test_mod.aws_instance.inner2":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"looked_up": cty.UnknownVal(cty.String),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_moduleVariableFromSplat(t *testing.T) {
m := testModule(t, "plan-module-variable-from-splat")
p := testProvider("aws")
p.DiffFn = testDiffFn
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"thing": {Type: cty.String, Optional: true},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Changes.Resources) != 4 {
t.Fatal("expected 4 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
schema := p.GetSchemaReturn.ResourceTypes[res.Addr.Resource.Resource.Type]
ric, err := res.Decode(schema.ImpliedType())
if err != nil {
t.Fatal(err)
}
if res.Action != plans.Create {
t.Fatalf("resource %s should be created", ric.Addr)
}
switch i := ric.Addr.String(); i {
case "module.mod1.aws_instance.test[0]",
"module.mod1.aws_instance.test[1]",
"module.mod2.aws_instance.test[0]",
"module.mod2.aws_instance.test[1]":
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"thing": cty.StringVal("doesnt"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
}
}
func TestContext2Plan_createBeforeDestroy_depends_datasource(t *testing.T) {
m := testModule(t, "plan-cbd-depends-datasource")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"num": {Type: cty.String, Optional: true},
"computed": {Type: cty.String, Optional: true, Computed: true},
},
},
},
DataSources: map[string]*configschema.Block{
"aws_vpc": {
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
"foo": {Type: cty.Number, Optional: true},
},
},
},
}
p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse {
computedVal := req.ProposedNewState.GetAttr("computed")
if computedVal.IsNull() {
computedVal = cty.UnknownVal(cty.String)
}
return providers.PlanResourceChangeResponse{
PlannedState: cty.ObjectVal(map[string]cty.Value{
"num": req.ProposedNewState.GetAttr("num"),
"computed": computedVal,
}),
}
}
p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse {
return providers.ReadDataSourceResponse{
Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Errorf("ReadDataSource called, but should not have been")),
}
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
// We're skipping ctx.Refresh here, which simulates what happens when
// running "terraform plan -refresh=false". As a result, we don't get our
// usual opportunity to read the data source during the refresh step and
// thus the plan call below is forced to produce a deferred read action.
plan, diags := ctx.Plan()
if p.ReadDataSourceCalled {
t.Errorf("ReadDataSource was called on the provider, but should not have been because we didn't refresh")
}
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
seenAddrs := make(map[string]struct{})
for _, res := range plan.Changes.Resources {
var schema *configschema.Block
switch res.Addr.Resource.Resource.Mode {
case addrs.DataResourceMode:
schema = p.GetSchemaReturn.DataSources[res.Addr.Resource.Resource.Type]
case addrs.ManagedResourceMode:
schema = p.GetSchemaReturn.ResourceTypes[res.Addr.Resource.Resource.Type]
}
ric, err := res.Decode(schema.ImpliedType())
if err != nil {
t.Fatal(err)
}
seenAddrs[ric.Addr.String()] = struct{}{}
t.Run(ric.Addr.String(), func(t *testing.T) {
switch i := ric.Addr.String(); i {
case "aws_instance.foo[0]":
if res.Action != plans.Create {
t.Fatalf("resource %s should be created, got %s", ric.Addr, ric.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"num": cty.StringVal("2"),
"computed": cty.UnknownVal(cty.String),
}), ric.After)
case "aws_instance.foo[1]":
if res.Action != plans.Create {
t.Fatalf("resource %s should be created, got %s", ric.Addr, ric.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"num": cty.StringVal("2"),
"computed": cty.UnknownVal(cty.String),
}), ric.After)
case "data.aws_vpc.bar[0]":
if res.Action != plans.Read {
t.Fatalf("resource %s should be read, got %s", ric.Addr, ric.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
// In a normal flow we would've read an exact value in
// ReadDataSource, but because this test doesn't run
// cty.Refresh we have no opportunity to do that lookup
// and a deferred read is forced.
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("0"),
}), ric.After)
case "data.aws_vpc.bar[1]":
if res.Action != plans.Read {
t.Fatalf("resource %s should be read, got %s", ric.Addr, ric.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
// In a normal flow we would've read an exact value in
// ReadDataSource, but because this test doesn't run
// cty.Refresh we have no opportunity to do that lookup
// and a deferred read is forced.
"id": cty.UnknownVal(cty.String),
"foo": cty.StringVal("1"),
}), ric.After)
default:
t.Fatal("unknown instance:", i)
}
})
}
wantAddrs := map[string]struct{}{
"aws_instance.foo[0]": struct{}{},
"aws_instance.foo[1]": struct{}{},
"data.aws_vpc.bar[0]": struct{}{},
"data.aws_vpc.bar[1]": struct{}{},
}
if !cmp.Equal(seenAddrs, wantAddrs) {
t.Errorf("incorrect addresses in changeset:\n%s", cmp.Diff(wantAddrs, seenAddrs))
}
}
// interpolated lists need to be stored in the original order.
func TestContext2Plan_listOrder(t *testing.T) {
m := testModule(t, "plan-list-order")
p := testProvider("aws")
p.ApplyFn = testApplyFn
p.DiffFn = testDiffFn
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.List(cty.String), Optional: true},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
changes := plan.Changes
rDiffA := changes.ResourceInstance(addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "aws_instance",
Name: "a",
}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance))
rDiffB := changes.ResourceInstance(addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "aws_instance",
Name: "b",
}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance))
if !cmp.Equal(rDiffA.After, rDiffB.After, valueComparer) {
t.Fatal(cmp.Diff(rDiffA.After, rDiffB.After, valueComparer))
}
}
// Make sure ignore-changes doesn't interfere with set/list/map diffs.
// If a resource was being replaced by a RequiresNew attribute that gets
// ignored, we need to filter the diff properly to properly update rather than
// replace.
func TestContext2Plan_ignoreChangesWithFlatmaps(t *testing.T) {
m := testModule(t, "plan-ignore-changes-with-flatmaps")
p := testProvider("aws")
p.DiffFn = testDiffFn
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"user_data": {Type: cty.String, Optional: true},
"require_new": {Type: cty.String, Optional: true},
// This test predates the 0.12 work to integrate cty and
// HCL, and so it was ported as-is where its expected
// test output was clearly expecting a list of maps here
// even though it is named "set".
"set": {Type: cty.List(cty.Map(cty.String)), Optional: true},
"lst": {Type: cty.List(cty.String), Optional: true},
},
},
},
}
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"user_data": "x",
"require_new": "",
"set.#": "1",
"set.0.%": "1",
"set.0.a": "1",
"lst.#": "1",
"lst.0": "j",
},
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Changes.Resources) != 1 {
t.Fatal("expected 1 changes, got", len(plan.Changes.Resources))
}
res := plan.Changes.Resources[0]
schema := p.GetSchemaReturn.ResourceTypes[res.Addr.Resource.Resource.Type]
ric, err := res.Decode(schema.ImpliedType())
if err != nil {
t.Fatal(err)
}
if res.Action != plans.Update {
t.Fatalf("resource %s should be updated, got %s", ric.Addr, ric.Action)
}
if ric.Addr.String() != "aws_instance.foo" {
t.Fatalf("unknown resource: %s", ric.Addr)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"lst": cty.ListVal([]cty.Value{
cty.StringVal("j"),
cty.StringVal("k"),
}),
"require_new": cty.StringVal(""),
"user_data": cty.StringVal("x"),
"set": cty.ListVal([]cty.Value{cty.MapVal(map[string]cty.Value{
"a": cty.StringVal("1"),
"b": cty.StringVal("2"),
})}),
}), ric.After)
}
// TestContext2Plan_resourceNestedCount ensures resource sets that depend on
// the count of another resource set (ie: count of a data source that depends
// on another data source's instance count - data.x.foo.*.id) get properly
// normalized to the indexes they should be. This case comes up when there is
// an existing state (after an initial apply).
func TestContext2Plan_resourceNestedCount(t *testing.T) {
m := testModule(t, "nested-resource-count-plan")
p := testProvider("aws")
p.DiffFn = testDiffFn
p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse {
return providers.ReadResourceResponse{
NewState: req.PriorState,
}
}
s := MustShimLegacyState(&State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Primary: &InstanceState{
ID: "foo0",
Attributes: map[string]string{
"id": "foo0",
},
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Primary: &InstanceState{
ID: "foo1",
Attributes: map[string]string{
"id": "foo1",
},
},
},
"aws_instance.bar.0": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Dependencies: []string{"aws_instance.foo"},
Primary: &InstanceState{
ID: "bar0",
Attributes: map[string]string{
"id": "bar0",
},
},
},
"aws_instance.bar.1": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Dependencies: []string{"aws_instance.foo"},
Primary: &InstanceState{
ID: "bar1",
Attributes: map[string]string{
"id": "bar1",
},
},
},
"aws_instance.baz.0": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Dependencies: []string{"aws_instance.bar"},
Primary: &InstanceState{
ID: "baz0",
Attributes: map[string]string{
"id": "baz0",
},
},
},
"aws_instance.baz.1": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Dependencies: []string{"aws_instance.bar"},
Primary: &InstanceState{
ID: "baz1",
Attributes: map[string]string{
"id": "baz1",
},
},
},
},
},
},
})
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: s,
})
diags := ctx.Validate()
if diags.HasErrors() {
t.Fatalf("validate errors: %s", diags.Err())
}
_, diags = ctx.Refresh()
if diags.HasErrors() {
t.Fatalf("refresh errors: %s", diags.Err())
}
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("plan errors: %s", diags.Err())
}
for _, res := range plan.Changes.Resources {
if res.Action != plans.NoOp {
t.Fatalf("resource %s should now change, plan returned %s", res.Addr, res.Action)
}
}
}
// Higher level test at TestResource_dataSourceListApplyPanic
func TestContext2Plan_computedAttrRefTypeMismatch(t *testing.T) {
m := testModule(t, "plan-computed-attr-ref-type-mismatch")
p := testProvider("aws")
p.DiffFn = testDiffFn
p.ValidateResourceTypeConfigFn = func(req providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse {
var diags tfdiags.Diagnostics
if req.TypeName == "aws_instance" {
amiVal := req.Config.GetAttr("ami")
if amiVal.Type() != cty.String {
diags = diags.Append(fmt.Errorf("Expected ami to be cty.String, got %#v", amiVal))
}
}
return providers.ValidateResourceTypeConfigResponse{
Diagnostics: diags,
}
}
p.DiffFn = func(
info *InstanceInfo,
state *InstanceState,
c *ResourceConfig) (*InstanceDiff, error) {
switch info.Type {
case "aws_ami_list":
// Emulate a diff that says "we'll create this list and ids will be populated"
return &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"ids.#": &ResourceAttrDiff{NewComputed: true},
},
}, nil
case "aws_instance":
// If we get to the diff for instance, we should be able to assume types
ami, _ := c.Get("ami")
_ = ami.(string)
}
return nil, nil
}
p.ApplyFn = func(info *InstanceInfo, s *InstanceState, d *InstanceDiff) (*InstanceState, error) {
if info.Type != "aws_ami_list" {
t.Fatalf("Reached apply for unexpected resource type! %s", info.Type)
}
// Pretend like we make a thing and the computed list "ids" is populated
return &InstanceState{
ID: "someid",
Attributes: map[string]string{
"ids.#": "2",
"ids.0": "ami-abc123",
"ids.1": "ami-bcd345",
},
}, nil
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
t.Fatalf("Succeeded; want type mismatch error for 'ami' argument")
}
expected := `Inappropriate value for attribute "ami"`
if errStr := diags.Err().Error(); !strings.Contains(errStr, expected) {
t.Fatalf("expected:\n\n%s\n\nto contain:\n\n%s", errStr, expected)
}
}
func TestContext2Plan_selfRef(t *testing.T) {
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.String, Optional: true},
},
},
},
}
m := testModule(t, "plan-self-ref")
c := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
diags := c.Validate()
if diags.HasErrors() {
t.Fatalf("unexpected validation failure: %s", diags.Err())
}
_, diags = c.Plan()
if !diags.HasErrors() {
t.Fatalf("plan succeeded; want error")
}
gotErrStr := diags.Err().Error()
wantErrStr := "Self-referential block"
if !strings.Contains(gotErrStr, wantErrStr) {
t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr)
}
}
func TestContext2Plan_selfRefMulti(t *testing.T) {
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.String, Optional: true},
},
},
},
}
m := testModule(t, "plan-self-ref-multi")
c := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
diags := c.Validate()
if diags.HasErrors() {
t.Fatalf("unexpected validation failure: %s", diags.Err())
}
_, diags = c.Plan()
if !diags.HasErrors() {
t.Fatalf("plan succeeded; want error")
}
gotErrStr := diags.Err().Error()
wantErrStr := "Self-referential block"
if !strings.Contains(gotErrStr, wantErrStr) {
t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr)
}
}
func TestContext2Plan_selfRefMultiAll(t *testing.T) {
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.List(cty.String), Optional: true},
},
},
},
}
m := testModule(t, "plan-self-ref-multi-all")
c := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
diags := c.Validate()
if diags.HasErrors() {
t.Fatalf("unexpected validation failure: %s", diags.Err())
}
_, diags = c.Plan()
if !diags.HasErrors() {
t.Fatalf("plan succeeded; want error")
}
gotErrStr := diags.Err().Error()
// The graph is checked for cycles before we can walk it, so we don't
// encounter the self-reference check.
//wantErrStr := "Self-referential block"
wantErrStr := "Cycle"
if !strings.Contains(gotErrStr, wantErrStr) {
t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr)
}
}
func TestContext2Plan_invalidOutput(t *testing.T) {
m := testModuleInline(t, map[string]string{
"main.tf": `
data "aws_data_source" "name" {}
output "out" {
value = "${data.aws_data_source.name.missing}"
}`,
})
p := testProvider("aws")
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
// Should get this error:
// Unsupported attribute: This object does not have an attribute named "missing"
t.Fatal("succeeded; want errors")
}
gotErrStr := diags.Err().Error()
wantErrStr := "Unsupported attribute"
if !strings.Contains(gotErrStr, wantErrStr) {
t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr)
}
}
func TestContext2Plan_invalidModuleOutput(t *testing.T) {
m := testModuleInline(t, map[string]string{
"child/main.tf": `
data "aws_data_source" "name" {}
output "out" {
value = "${data.aws_data_source.name.missing}"
}`,
"main.tf": `
module "child" {
source = "./child"
}
resource "aws_instance" "foo" {
foo = "${module.child.out}"
}`,
})
p := testProvider("aws")
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
// Should get this error:
// Unsupported attribute: This object does not have an attribute named "missing"
t.Fatal("succeeded; want errors")
}
gotErrStr := diags.Err().Error()
wantErrStr := "Unsupported attribute"
if !strings.Contains(gotErrStr, wantErrStr) {
t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr)
}
}
func TestContext2Plan_variableValidation(t *testing.T) {
m := testModuleInline(t, map[string]string{
"main.tf": `
variable "x" {
default = "bar"
}
resource "aws_instance" "foo" {
foo = var.x
}`,
})
p := testProvider("aws")
p.ValidateResourceTypeConfigFn = func(req providers.ValidateResourceTypeConfigRequest) (resp providers.ValidateResourceTypeConfigResponse) {
foo := req.Config.GetAttr("foo").AsString()
if foo == "bar" {
resp.Diagnostics = resp.Diagnostics.Append(errors.New("foo cannot be bar"))
}
return
}
p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) {
resp.PlannedState = req.ProposedNewState
return
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
// Should get this error:
// Unsupported attribute: This object does not have an attribute named "missing"
t.Fatal("succeeded; want errors")
}
}
func checkVals(t *testing.T, expected, got cty.Value) {
t.Helper()
if !cmp.Equal(expected, got, valueComparer, typeComparer, equateEmpty) {
t.Fatal(cmp.Diff(expected, got, valueTrans, equateEmpty))
}
}
func objectVal(t *testing.T, schema *configschema.Block, m map[string]cty.Value) cty.Value {
t.Helper()
v, err := schema.CoerceValue(
cty.ObjectVal(m),
)
if err != nil {
t.Fatal(err)
}
return v
}
func TestContext2Plan_requiredModuleOutput(t *testing.T) {
m := testModule(t, "plan-required-output")
p := testProvider("test")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"test_resource": {
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
"required": {Type: cty.String, Required: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("test"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["test_resource"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
t.Run(fmt.Sprintf("%s %s", res.Action, res.Addr), func(t *testing.T) {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
var expected cty.Value
switch i := ric.Addr.String(); i {
case "test_resource.root":
expected = objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"required": cty.UnknownVal(cty.String),
})
case "module.mod.test_resource.for_output":
expected = objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"required": cty.StringVal("val"),
})
default:
t.Fatal("unknown instance:", i)
}
checkVals(t, expected, ric.After)
})
}
}
func TestContext2Plan_requiredModuleObject(t *testing.T) {
m := testModule(t, "plan-required-whole-mod")
p := testProvider("test")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"test_resource": {
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
"required": {Type: cty.String, Required: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("test"): testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
schema := p.GetSchemaReturn.ResourceTypes["test_resource"]
ty := schema.ImpliedType()
if len(plan.Changes.Resources) != 2 {
t.Fatal("expected 2 changes, got", len(plan.Changes.Resources))
}
for _, res := range plan.Changes.Resources {
t.Run(fmt.Sprintf("%s %s", res.Action, res.Addr), func(t *testing.T) {
if res.Action != plans.Create {
t.Fatalf("expected resource creation, got %s", res.Action)
}
ric, err := res.Decode(ty)
if err != nil {
t.Fatal(err)
}
var expected cty.Value
switch i := ric.Addr.String(); i {
case "test_resource.root":
expected = objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"required": cty.UnknownVal(cty.String),
})
case "module.mod.test_resource.for_output":
expected = objectVal(t, schema, map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"required": cty.StringVal("val"),
})
default:
t.Fatal("unknown instance:", i)
}
checkVals(t, expected, ric.After)
})
}
}
func TestContext2Plan_expandOrphan(t *testing.T) {
m := testModuleInline(t, map[string]string{
"main.tf": `
module "mod" {
count = 1
source = "./mod"
}
`,
"mod/main.tf": `
resource "aws_instance" "foo" {
}
`,
})
state := states.NewState()
state.EnsureModule(addrs.RootModuleInstance.Child("mod", addrs.IntKey(0))).SetResourceInstanceCurrent(
mustResourceInstanceAddr("aws_instance.foo").Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"child"}`),
},
mustProviderConfig(`provider["registry.terraform.io/-/aws"]`),
)
state.EnsureModule(addrs.RootModuleInstance.Child("mod", addrs.IntKey(1))).SetResourceInstanceCurrent(
mustResourceInstanceAddr("aws_instance.foo").Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"child"}`),
},
mustProviderConfig(`provider["registry.terraform.io/-/aws"]`),
)
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: providers.ResolverFixed(
map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("aws"): testProviderFuncFixed(p),
},
),
State: state,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatal(diags.ErrWithWarnings())
}
expected := map[string]plans.Action{
`module.mod[1].aws_instance.foo`: plans.Delete,
`module.mod[0].aws_instance.foo`: plans.NoOp,
}
for _, res := range plan.Changes.Resources {
want := expected[res.Addr.String()]
if res.Action != want {
t.Fatalf("expected %s action, got: %q %s", want, res.Addr, res.Action)
}
delete(expected, res.Addr.String())
}
for res, action := range expected {
t.Errorf("missing %s change for %s", action, res)
}
}
|
package terraform
import (
"bytes"
"fmt"
"os"
"reflect"
"sort"
"strings"
"sync"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/config/configschema"
)
func TestContext2Plan_basic(t *testing.T) {
m := testModule(t, "plan-good")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
ProviderSHA256s: map[string][]byte{
"aws": []byte("placeholder"),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Diff.RootModule().Resources) < 2 {
t.Fatalf("bad: %#v", plan.Diff.RootModule().Resources)
}
if !reflect.DeepEqual(plan.ProviderSHA256s, ctx.providerSHA256s) {
t.Errorf("wrong ProviderSHA256s %#v; want %#v", plan.ProviderSHA256s, ctx.providerSHA256s)
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_createBefore_deposed(t *testing.T) {
m := testModule(t, "plan-cbd")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: []string{"root"},
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
},
Deposed: []*InstanceState{
&InstanceState{ID: "foo"},
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
DESTROY: aws_instance.foo (deposed only)
STATE:
aws_instance.foo: (1 deposed)
ID = baz
Deposed ID 1 = foo
`)
if actual != expected {
t.Fatalf("expected:\n%s, got:\n%s", expected, actual)
}
}
func TestContext2Plan_createBefore_maintainRoot(t *testing.T) {
m := testModule(t, "plan-cbd-maintain-root")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Variables: InputValues{
"in": &InputValue{
Value: cty.StringVal("a,b,c"),
SourceType: ValueFromCaller,
},
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
CREATE: aws_instance.bar.0
CREATE: aws_instance.bar.1
CREATE: aws_instance.foo.0
CREATE: aws_instance.foo.1
STATE:
<no state>
`)
if actual != expected {
t.Fatalf("expected:\n%s, got:\n%s", expected, actual)
}
}
func TestContext2Plan_emptyDiff(t *testing.T) {
m := testModule(t, "plan-empty")
p := testProvider("aws")
p.DiffFn = func(
info *InstanceInfo,
s *InstanceState,
c *ResourceConfig) (*InstanceDiff, error) {
return nil, nil
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanEmptyStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_escapedVar(t *testing.T) {
m := testModule(t, "plan-escaped-var")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanEscapedVarStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_minimal(t *testing.T) {
m := testModule(t, "plan-empty")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanEmptyStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_modules(t *testing.T) {
m := testModule(t, "plan-modules")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModulesStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
// GH-1475
func TestContext2Plan_moduleCycle(t *testing.T) {
m := testModule(t, "plan-module-cycle")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
"some_input": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleCycleStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_moduleDeadlock(t *testing.T) {
testCheckDeadlock(t, func() {
m := testModule(t, "plan-module-deadlock")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, err := ctx.Plan()
if err != nil {
t.Fatalf("err: %s", err)
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
module.child:
CREATE: aws_instance.foo.0
CREATE: aws_instance.foo.1
CREATE: aws_instance.foo.2
STATE:
<no state>
`)
if actual != expected {
t.Fatalf("expected:\n%sgot:\n%s", expected, actual)
}
})
}
func TestContext2Plan_moduleInput(t *testing.T) {
m := testModule(t, "plan-module-input")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleInputStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_moduleInputComputed(t *testing.T) {
m := testModule(t, "plan-module-input-computed")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleInputComputedStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_moduleInputFromVar(t *testing.T) {
m := testModule(t, "plan-module-input-var")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Variables: InputValues{
"foo": &InputValue{
Value: cty.StringVal("52"),
SourceType: ValueFromCaller,
},
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleInputVarStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_moduleMultiVar(t *testing.T) {
m := testModule(t, "plan-module-multi-var")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
"foo": {Type: cty.String, Optional: true},
"baz": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleMultiVarStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_moduleOrphans(t *testing.T) {
m := testModule(t, "plan-modules-remove")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: []string{"root", "child"},
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleOrphansStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
// https://github.com/hashicorp/terraform/issues/3114
func TestContext2Plan_moduleOrphansWithProvisioner(t *testing.T) {
m := testModule(t, "plan-modules-remove-provisioners")
p := testProvider("aws")
pr := testProvisioner()
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: []string{"root"},
Resources: map[string]*ResourceState{
"aws_instance.top": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "top",
},
},
},
},
&ModuleState{
Path: []string{"root", "parent", "childone"},
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
},
},
},
},
&ModuleState{
Path: []string{"root", "parent", "childtwo"},
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Provisioners: map[string]ResourceProvisionerFactory{
"shell": testProvisionerFuncFixed(pr),
},
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
module.parent.childone:
DESTROY: aws_instance.foo
module.parent.childtwo:
DESTROY: aws_instance.foo
STATE:
aws_instance.top:
ID = top
module.parent.childone:
aws_instance.foo:
ID = baz
module.parent.childtwo:
aws_instance.foo:
ID = baz
`)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_moduleProviderInherit(t *testing.T) {
var l sync.Mutex
var calls []string
m := testModule(t, "plan-module-provider-inherit")
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": func() (ResourceProvider, error) {
l.Lock()
defer l.Unlock()
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
Provider: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"from": {Type: cty.String, Optional: true},
},
},
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"from": {Type: cty.String, Optional: true},
},
},
},
}
p.ConfigureFn = func(c *ResourceConfig) error {
if v, ok := c.Get("from"); !ok || v.(string) != "root" {
return fmt.Errorf("bad")
}
return nil
}
p.DiffFn = func(
info *InstanceInfo,
state *InstanceState,
c *ResourceConfig) (*InstanceDiff, error) {
v, _ := c.Get("from")
l.Lock()
defer l.Unlock()
calls = append(calls, v.(string))
return testDiffFn(info, state, c)
}
return p, nil
},
},
),
})
_, err := ctx.Plan()
if err != nil {
t.Fatalf("err: %s", err)
}
actual := calls
sort.Strings(actual)
expected := []string{"child", "root"}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
}
// This tests (for GH-11282) that deeply nested modules properly inherit
// configuration.
func TestContext2Plan_moduleProviderInheritDeep(t *testing.T) {
var l sync.Mutex
m := testModule(t, "plan-module-provider-inherit-deep")
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": func() (ResourceProvider, error) {
l.Lock()
defer l.Unlock()
var from string
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
Provider: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"from": {Type: cty.String, Optional: true},
},
},
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{},
},
},
}
p.ConfigureFn = func(c *ResourceConfig) error {
v, ok := c.Get("from")
if !ok || v.(string) != "root" {
return fmt.Errorf("bad")
}
from = v.(string)
return nil
}
p.DiffFn = func(
info *InstanceInfo,
state *InstanceState,
c *ResourceConfig) (*InstanceDiff, error) {
if from != "root" {
return nil, fmt.Errorf("bad resource")
}
return testDiffFn(info, state, c)
}
return p, nil
},
},
),
})
_, err := ctx.Plan()
if err != nil {
t.Fatalf("err: %s", err)
}
}
func TestContext2Plan_moduleProviderDefaultsVar(t *testing.T) {
var l sync.Mutex
var calls []string
m := testModule(t, "plan-module-provider-defaults-var")
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": func() (ResourceProvider, error) {
l.Lock()
defer l.Unlock()
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
Provider: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"to": {Type: cty.String, Optional: true},
"from": {Type: cty.String, Optional: true},
},
},
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"from": {Type: cty.String, Optional: true},
},
},
},
}
p.ConfigureFn = func(c *ResourceConfig) error {
var buf bytes.Buffer
if v, ok := c.Get("from"); ok {
buf.WriteString(v.(string) + "\n")
}
if v, ok := c.Get("to"); ok {
buf.WriteString(v.(string) + "\n")
}
l.Lock()
defer l.Unlock()
calls = append(calls, buf.String())
return nil
}
p.DiffFn = testDiffFn
return p, nil
},
},
),
Variables: InputValues{
"foo": &InputValue{
Value: cty.StringVal("root"),
SourceType: ValueFromCaller,
},
},
})
_, err := ctx.Plan()
if err != nil {
t.Fatalf("err: %s", err)
}
expected := []string{
"child\nchild\n",
"root\n",
}
sort.Strings(calls)
if !reflect.DeepEqual(calls, expected) {
t.Fatalf("expected:\n%#v\ngot:\n%#v\n", expected, calls)
}
}
func TestContext2Plan_moduleProviderVar(t *testing.T) {
m := testModule(t, "plan-module-provider-var")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
Provider: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"value": {Type: cty.String, Optional: true},
},
},
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"value": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleProviderVarStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_moduleVar(t *testing.T) {
m := testModule(t, "plan-module-var")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleVarStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_moduleVarWrongTypeBasic(t *testing.T) {
m := testModule(t, "plan-module-wrong-var-type")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
t.Fatalf("succeeded; want errors")
}
}
func TestContext2Plan_moduleVarWrongTypeNested(t *testing.T) {
m := testModule(t, "plan-module-wrong-var-type-nested")
p := testProvider("null")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"null": testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
t.Fatalf("succeeded; want errors")
}
}
func TestContext2Plan_moduleVarWithDefaultValue(t *testing.T) {
m := testModule(t, "plan-module-var-with-default-value")
p := testProvider("null")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"null": testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
}
func TestContext2Plan_moduleVarComputed(t *testing.T) {
m := testModule(t, "plan-module-var-computed")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleVarComputedStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_nil(t *testing.T) {
m := testModule(t, "plan-nil")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"nil": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
},
},
},
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Diff.RootModule().Resources) != 0 {
t.Fatalf("bad: %#v", plan.Diff.RootModule().Resources)
}
}
func TestContext2Plan_preventDestroy_bad(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-bad")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
},
},
},
},
})
plan, err := ctx.Plan()
expectedErr := "aws_instance.foo has lifecycle.prevent_destroy"
if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) {
t.Fatalf("expected err would contain %q\nerr: %s\nplan: %s",
expectedErr, err, plan)
}
}
func TestContext2Plan_preventDestroy_good(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-good")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
},
},
},
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if !plan.Diff.Empty() {
t.Fatalf("Expected empty plan, got %s", plan.String())
}
}
func TestContext2Plan_preventDestroy_countBad(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-count-bad")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc345",
},
},
},
},
},
},
})
plan, err := ctx.Plan()
expectedErr := "aws_instance.foo[1] has lifecycle.prevent_destroy"
if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) {
t.Fatalf("expected err would contain %q\nerr: %s\nplan: %s",
expectedErr, err, plan)
}
}
func TestContext2Plan_preventDestroy_countGood(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-count-good")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"current": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc345",
},
},
},
},
},
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if plan.Diff.Empty() {
t.Fatalf("Expected non-empty plan, got %s", plan.String())
}
}
func TestContext2Plan_preventDestroy_countGoodNoChange(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-count-good")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"current": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
Attributes: map[string]string{
"current": "0",
"type": "aws_instance",
},
},
},
},
},
},
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if !plan.Diff.Empty() {
t.Fatalf("Expected empty plan, got %s", plan.String())
}
}
func TestContext2Plan_preventDestroy_destroyPlan(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-good")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
},
},
},
},
Destroy: true,
})
plan, err := ctx.Plan()
expectedErr := "aws_instance.foo has lifecycle.prevent_destroy"
if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) {
t.Fatalf("expected err would contain %q\nerr: %s\nplan: %s",
expectedErr, err, plan)
}
}
func TestContext2Plan_provisionerCycle(t *testing.T) {
m := testModule(t, "plan-provisioner-cycle")
p := testProvider("aws")
p.DiffFn = testDiffFn
pr := testProvisioner()
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Provisioners: map[string]ResourceProvisionerFactory{
"local-exec": testProvisionerFuncFixed(pr),
},
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
t.Fatalf("succeeded; want errors")
}
}
func TestContext2Plan_computed(t *testing.T) {
m := testModule(t, "plan-computed")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanComputedStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_computedDataResource(t *testing.T) {
m := testModule(t, "plan-computed-data-resource")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"num": {Type: cty.String, Optional: true},
"compute": {Type: cty.String, Optional: true},
"foo": {Type: cty.String, Computed: true},
},
},
},
DataSources: map[string]*configschema.Block{
"aws_vpc": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if got := len(plan.Diff.Modules); got != 1 {
t.Fatalf("got %d modules; want 1", got)
}
moduleDiff := plan.Diff.Modules[0]
if _, ok := moduleDiff.Resources["aws_instance.foo"]; !ok {
t.Fatalf("missing diff for aws_instance.foo")
}
iDiff, ok := moduleDiff.Resources["data.aws_vpc.bar"]
if !ok {
t.Fatalf("missing diff for data.aws_vpc.bar")
}
expectedDiff := &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"id": {
NewComputed: true,
RequiresNew: true,
Type: DiffAttrOutput,
},
},
}
if same, _ := expectedDiff.Same(iDiff); !same {
t.Fatalf(
"incorrect diff for data.aws_vpc.bar\ngot: %#v\nwant: %#v",
iDiff, expectedDiff,
)
}
}
func TestContext2Plan_computedDataCountResource(t *testing.T) {
m := testModule(t, "plan-computed-data-count")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"num": {Type: cty.String, Optional: true},
"compute": {Type: cty.String, Optional: true},
"foo": {Type: cty.String, Computed: true},
},
},
},
DataSources: map[string]*configschema.Block{
"aws_vpc": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if got := len(plan.Diff.Modules); got != 1 {
t.Fatalf("got %d modules; want 1", got)
}
moduleDiff := plan.Diff.Modules[0]
// make sure we created 3 "bar"s
for i := 0; i < 3; i++ {
resource := fmt.Sprintf("data.aws_vpc.bar.%d", i)
if _, ok := moduleDiff.Resources[resource]; !ok {
t.Fatalf("missing diff for %s", resource)
}
}
}
func TestContext2Plan_localValueCount(t *testing.T) {
m := testModule(t, "plan-local-value-count")
p := testProvider("test")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"test": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if got := len(plan.Diff.Modules); got != 1 {
t.Fatalf("got %d modules; want 1", got)
}
moduleDiff := plan.Diff.Modules[0]
// make sure we created 3 "bar"s
for i := 0; i < 3; i++ {
resource := fmt.Sprintf("test_resource.foo.%d", i)
if _, ok := moduleDiff.Resources[resource]; !ok {
t.Fatalf("missing diff for %s", resource)
}
}
}
// Higher level test at TestResource_dataSourceListPlanPanic
func TestContext2Plan_dataSourceTypeMismatch(t *testing.T) {
m := testModule(t, "plan-data-source-type-mismatch")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"ami": {Type: cty.String, Optional: true},
},
},
},
DataSources: map[string]*configschema.Block{
"aws_availability_zones": {
Attributes: map[string]*configschema.Attribute{
"names": {Type: cty.List(cty.String), Computed: true},
},
},
},
}
p.ValidateResourceFn = func(t string, c *ResourceConfig) (ws []string, es []error) {
// Emulate the type checking behavior of helper/schema based validation
if t == "aws_instance" {
ami, _ := c.Get("ami")
switch a := ami.(type) {
case string:
// ok
default:
es = append(es, fmt.Errorf("Expected ami to be string, got %T", a))
}
}
return
}
p.DiffFn = func(
info *InstanceInfo,
state *InstanceState,
c *ResourceConfig) (*InstanceDiff, error) {
if info.Type == "aws_instance" {
// If we get to the diff, we should be able to assume types
ami, _ := c.Get("ami")
_ = ami.(string)
}
return nil, nil
}
ctx := testContext2(t, &ContextOpts{
Config: m,
// Pretend like we ran a Refresh and the AZs data source was populated.
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"data.aws_availability_zones.azs": &ResourceState{
Type: "aws_availability_zones",
Primary: &InstanceState{
ID: "i-abc123",
Attributes: map[string]string{
"names.#": "2",
"names.0": "us-east-1a",
"names.1": "us-east-1b",
},
},
},
},
},
},
},
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
t.Fatalf("Expected err, got none!")
}
expected := `Inappropriate value for attribute "ami": incorrect type; string required`
if errStr := diags.Err().Error(); !strings.Contains(errStr, expected) {
t.Fatalf("expected:\n\n%s\n\nto contain:\n\n%s", errStr, expected)
}
}
func TestContext2Plan_dataResourceBecomesComputed(t *testing.T) {
m := testModule(t, "plan-data-resource-becomes-computed")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.String, Optional: true},
"computed": {Type: cty.String, Computed: true},
},
},
},
DataSources: map[string]*configschema.Block{
"aws_data_resource": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = func(info *InstanceInfo, state *InstanceState, config *ResourceConfig) (*InstanceDiff, error) {
if info.Type != "aws_instance" {
t.Fatalf("don't know how to diff %s", info.Id)
return nil, nil
}
return &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"computed": &ResourceAttrDiff{
Old: "",
New: "",
NewComputed: true,
},
},
}, nil
}
p.ReadDataDiffReturn = &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"foo": &ResourceAttrDiff{
Old: "",
New: "",
NewComputed: true,
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"data.aws_data_resource.foo": &ResourceState{
Type: "aws_data_resource",
Primary: &InstanceState{
ID: "i-abc123",
Attributes: map[string]string{
"id": "i-abc123",
"value": "baz",
},
},
},
},
},
},
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if got := len(plan.Diff.Modules); got != 1 {
t.Fatalf("got %d modules; want 1", got)
}
if !p.ReadDataDiffCalled {
t.Fatal("ReadDataDiff wasn't called, but should've been")
}
if got, want := p.ReadDataDiffInfo.Id, "data.aws_data_resource.foo"; got != want {
t.Fatalf("ReadDataDiff info id is %s; want %s", got, want)
}
moduleDiff := plan.Diff.Modules[0]
iDiff, ok := moduleDiff.Resources["data.aws_data_resource.foo"]
if !ok {
t.Fatalf("missing diff for data.aws_data_resource.foo")
}
// This is added by the diff but we want to verify that we got
// the same diff as above minus the dynamic stuff.
delete(iDiff.Attributes, "id")
if same, _ := p.ReadDataDiffReturn.Same(iDiff); !same {
t.Fatalf(
"incorrect diff for data.data_resource.foo\ngot: %#v\nwant: %#v",
iDiff, p.ReadDataDiffReturn,
)
}
}
func TestContext2Plan_computedList(t *testing.T) {
m := testModule(t, "plan-computed-list")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"compute": {Type: cty.String, Optional: true},
"foo": {Type: cty.String, Optional: true},
"num": {Type: cty.String, Optional: true},
"list": {Type: cty.List(cty.String), Computed: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanComputedListStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
// GH-8695. This tests that you can index into a computed list on a
// splatted resource.
func TestContext2Plan_computedMultiIndex(t *testing.T) {
m := testModule(t, "plan-computed-multi-index")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanComputedMultiIndexStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_count(t *testing.T) {
m := testModule(t, "plan-count")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Diff.RootModule().Resources) < 6 {
t.Fatalf("bad: %#v", plan.Diff.RootModule().Resources)
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanCountStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_countComputed(t *testing.T) {
m := testModule(t, "plan-count-computed")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
_, err := ctx.Plan()
if err == nil {
t.Fatal("should error")
}
}
func TestContext2Plan_countComputedModule(t *testing.T) {
m := testModule(t, "plan-count-computed-module")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
_, err := ctx.Plan()
expectedErr := "a number is required"
if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) {
t.Fatalf("expected err would contain %q\nerr: %s\n",
expectedErr, err)
}
}
func TestContext2Plan_countModuleStatic(t *testing.T) {
m := testModule(t, "plan-count-module-static")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
module.child:
CREATE: aws_instance.foo.0
CREATE: aws_instance.foo.1
CREATE: aws_instance.foo.2
STATE:
<no state>
`)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_countModuleStaticGrandchild(t *testing.T) {
m := testModule(t, "plan-count-module-static-grandchild")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
module.child.child:
CREATE: aws_instance.foo.0
CREATE: aws_instance.foo.1
CREATE: aws_instance.foo.2
STATE:
<no state>
`)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_countIndex(t *testing.T) {
m := testModule(t, "plan-count-index")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanCountIndexStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_countVar(t *testing.T) {
m := testModule(t, "plan-count-var")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Variables: InputValues{
"count": &InputValue{
Value: cty.StringVal("3"),
SourceType: ValueFromCaller,
},
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanCountVarStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_countZero(t *testing.T) {
m := testModule(t, "plan-count-zero")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.DynamicPseudoType, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanCountZeroStr)
if actual != expected {
t.Logf("expected:\n%s", expected)
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_countOneIndex(t *testing.T) {
m := testModule(t, "plan-count-one-index")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanCountOneIndexStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_countDecreaseToOne(t *testing.T) {
m := testModule(t, "plan-count-dec")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo": "foo",
"type": "aws_instance",
},
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
"aws_instance.foo.2": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanCountDecreaseStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_countIncreaseFromNotSet(t *testing.T) {
m := testModule(t, "plan-count-inc")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo": "foo",
"type": "aws_instance",
},
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanCountIncreaseStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_countIncreaseFromOne(t *testing.T) {
m := testModule(t, "plan-count-inc")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo": "foo",
"type": "aws_instance",
},
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanCountIncreaseFromOneStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
// https://github.com/PeoplePerHour/terraform/pull/11
//
// This tests a case where both a "resource" and "resource.0" are in
// the state file, which apparently is a reasonable backwards compatibility
// concern found in the above 3rd party repo.
func TestContext2Plan_countIncreaseFromOneCorrupted(t *testing.T) {
m := testModule(t, "plan-count-inc")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo": "foo",
"type": "aws_instance",
},
},
},
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo": "foo",
"type": "aws_instance",
},
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanCountIncreaseFromOneCorruptedStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
// A common pattern in TF configs is to have a set of resources with the same
// count and to use count.index to create correspondences between them:
//
// foo_id = "${foo.bar.*.id[count.index]}"
//
// This test is for the situation where some instances already exist and the
// count is increased. In that case, we should see only the create diffs
// for the new instances and not any update diffs for the existing ones.
func TestContext2Plan_countIncreaseWithSplatReference(t *testing.T) {
m := testModule(t, "plan-count-splat-reference")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"name": {Type: cty.String, Optional: true},
"foo_name": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"name": "foo 0",
},
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"name": "foo 1",
},
},
},
"aws_instance.bar.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo_name": "foo 0",
},
},
},
"aws_instance.bar.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo_name": "foo 1",
},
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
CREATE: aws_instance.bar.2
foo_name: "" => "foo 2"
type: "" => "aws_instance"
CREATE: aws_instance.foo.2
name: "" => "foo 2"
type: "" => "aws_instance"
STATE:
aws_instance.bar.0:
ID = bar
foo_name = foo 0
aws_instance.bar.1:
ID = bar
foo_name = foo 1
aws_instance.foo.0:
ID = bar
name = foo 0
aws_instance.foo.1:
ID = bar
name = foo 1
`)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_destroy(t *testing.T) {
m := testModule(t, "plan-destroy")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.one": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
"aws_instance.two": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
Destroy: true,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Diff.RootModule().Resources) != 2 {
t.Fatalf("bad: %#v", plan.Diff.RootModule().Resources)
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanDestroyStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_moduleDestroy(t *testing.T) {
m := testModule(t, "plan-module-destroy")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
},
},
&ModuleState{
Path: []string{"root", "child"},
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
Destroy: true,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleDestroyStr)
if actual != expected {
t.Fatalf("bad:\n%s\n\nexpected:\n\n%s", actual, expected)
}
}
// GH-1835
func TestContext2Plan_moduleDestroyCycle(t *testing.T) {
m := testModule(t, "plan-module-destroy-gh-1835")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: []string{"root", "a_module"},
Resources: map[string]*ResourceState{
"aws_instance.a": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "a",
},
},
},
},
&ModuleState{
Path: []string{"root", "b_module"},
Resources: map[string]*ResourceState{
"aws_instance.b": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "b",
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
Destroy: true,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleDestroyCycleStr)
if actual != expected {
t.Fatalf("bad:\n%s\n\nexpected:\n\n%s", actual, expected)
}
}
func TestContext2Plan_moduleDestroyMultivar(t *testing.T) {
m := testModule(t, "plan-module-destroy-multivar")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{},
},
&ModuleState{
Path: []string{"root", "child"},
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar0",
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar1",
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
Destroy: true,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleDestroyMultivarStr)
if actual != expected {
t.Fatalf("bad:\n%s\n\nexpected:\n\n%s", actual, expected)
}
}
func TestContext2Plan_pathVar(t *testing.T) {
cwd, err := os.Getwd()
if err != nil {
t.Fatalf("err: %s", err)
}
m := testModule(t, "plan-path-var")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"cwd": {Type: cty.String, Optional: true},
"module": {Type: cty.String, Optional: true},
"root": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("err: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanPathVarStr)
// Warning: this ordering REALLY matters for this test. The
// order is: cwd, module, root.
expected = fmt.Sprintf(
expected,
cwd,
m.Module.SourceDir,
m.Module.SourceDir,
)
if actual != expected {
t.Fatalf("bad:\n%s\n\nexpected:\n\n%s", actual, expected)
}
}
func TestContext2Plan_diffVar(t *testing.T) {
m := testModule(t, "plan-diffvar")
p := testProvider("aws")
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"num": "2",
},
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
p.DiffFn = func(
info *InstanceInfo,
s *InstanceState,
c *ResourceConfig) (*InstanceDiff, error) {
if s.ID != "bar" {
return testDiffFn(info, s, c)
}
return &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"num": &ResourceAttrDiff{
Old: "2",
New: "3",
},
},
}, nil
}
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanDiffVarStr)
if actual != expected {
t.Fatalf("actual:\n%s\n\nexpected:\n%s", actual, expected)
}
}
func TestContext2Plan_hook(t *testing.T) {
m := testModule(t, "plan-good")
h := new(MockHook)
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
Hooks: []Hook{h},
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if !h.PreDiffCalled {
t.Fatal("should be called")
}
if !h.PostDiffCalled {
t.Fatal("should be called")
}
}
func TestContext2Plan_closeProvider(t *testing.T) {
// this fixture only has an aliased provider located in the module, to make
// sure that the provier name contains a path more complex than
// "provider.aws".
m := testModule(t, "plan-close-module-provider")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if !p.CloseCalled {
t.Fatal("provider not closed")
}
}
func TestContext2Plan_orphan(t *testing.T) {
m := testModule(t, "plan-orphan")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.baz": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanOrphanStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
// This tests that configurations with UUIDs don't produce errors.
// For shadows, this would produce errors since a UUID changes every time.
func TestContext2Plan_shadowUuid(t *testing.T) {
m := testModule(t, "plan-shadow-uuid")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
}
func TestContext2Plan_state(t *testing.T) {
m := testModule(t, "plan-good")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Diff.RootModule().Resources) < 2 {
t.Fatalf("bad: %#v", plan.Diff.RootModule().Resources)
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanStateStr)
if actual != expected {
t.Fatalf("bad:\n%s\n\nexpected:\n\n%s", actual, expected)
}
}
func TestContext2Plan_taint(t *testing.T) {
m := testModule(t, "plan-taint")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{"num": "2"},
},
},
"aws_instance.bar": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
Tainted: true,
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanTaintStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_taintIgnoreChanges(t *testing.T) {
m := testModule(t, "plan-taint-ignore-changes")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"vars": {Type: cty.String, Optional: true},
},
},
},
}
p.ApplyFn = testApplyFn
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "foo",
Attributes: map[string]string{
"vars": "foo",
"type": "aws_instance",
},
Tainted: true,
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanTaintIgnoreChangesStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
// Fails about 50% of the time before the fix for GH-4982, covers the fix.
func TestContext2Plan_taintDestroyInterpolatedCountRace(t *testing.T) {
m := testModule(t, "plan-taint-interpolated-count")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Tainted: true,
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{ID: "bar"},
},
"aws_instance.foo.2": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{ID: "bar"},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
for i := 0; i < 100; i++ {
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
DESTROY/CREATE: aws_instance.foo.0
type: "" => "aws_instance"
STATE:
aws_instance.foo.0: (tainted)
ID = bar
aws_instance.foo.1:
ID = bar
aws_instance.foo.2:
ID = bar
`)
if actual != expected {
t.Fatalf("[%d] bad:\n%s\nexpected:\n%s\n", i, actual, expected)
}
}
}
func TestContext2Plan_targeted(t *testing.T) {
m := testModule(t, "plan-targeted")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Resource(
addrs.ManagedResourceMode, "aws_instance", "foo",
),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
CREATE: aws_instance.foo
num: "" => "2"
type: "" => "aws_instance"
STATE:
<no state>
`)
if actual != expected {
t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual)
}
}
func TestContext2Plan_targetEmpty(t *testing.T) {
m := testModule(t, "plan-targeted")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Module: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Targets: []string{""},
})
_, err := ctx.Plan()
if err == nil {
t.Fatal("should error")
}
}
// Test that targeting a module properly plans any inputs that depend
// on another module.
func TestContext2Plan_targetedCrossModule(t *testing.T) {
m := testModule(t, "plan-targeted-cross-module")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Child("B", addrs.NoKey),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
module.A:
CREATE: aws_instance.foo
foo: "" => "bar"
type: "" => "aws_instance"
module.B:
CREATE: aws_instance.bar
foo: "" => "<computed>"
type: "" => "aws_instance"
STATE:
<no state>
`)
if actual != expected {
t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual)
}
}
func TestContext2Plan_targetedModuleWithProvider(t *testing.T) {
m := testModule(t, "plan-targeted-module-with-provider")
p := testProvider("null")
p.GetSchemaReturn = &ProviderSchema{
Provider: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"key": {Type: cty.String, Optional: true},
},
},
ResourceTypes: map[string]*configschema.Block{
"null_resource": {
Attributes: map[string]*configschema.Attribute{},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"null": testProviderFuncFixed(p),
},
),
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Child("child2", addrs.NoKey),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
module.child2:
CREATE: null_resource.foo
STATE:
<no state>
`)
if actual != expected {
t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual)
}
}
func TestContext2Plan_targetedOrphan(t *testing.T) {
m := testModule(t, "plan-targeted-orphan")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.orphan": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-789xyz",
},
},
"aws_instance.nottargeted": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
},
},
},
},
Destroy: true,
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Resource(
addrs.ManagedResourceMode, "aws_instance", "orphan",
),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`DIFF:
DESTROY: aws_instance.orphan
STATE:
aws_instance.nottargeted:
ID = i-abc123
aws_instance.orphan:
ID = i-789xyz
`)
if actual != expected {
t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual)
}
}
// https://github.com/hashicorp/terraform/issues/2538
func TestContext2Plan_targetedModuleOrphan(t *testing.T) {
m := testModule(t, "plan-targeted-module-orphan")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: []string{"root", "child"},
Resources: map[string]*ResourceState{
"aws_instance.orphan": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-789xyz",
},
},
"aws_instance.nottargeted": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
},
},
},
},
Destroy: true,
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Child("child", addrs.NoKey).Resource(
addrs.ManagedResourceMode, "aws_instance", "orphan",
),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`DIFF:
module.child:
DESTROY: aws_instance.orphan
STATE:
module.child:
aws_instance.nottargeted:
ID = i-abc123
aws_instance.orphan:
ID = i-789xyz
`)
if actual != expected {
t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual)
}
}
func TestContext2Plan_targetedModuleUntargetedVariable(t *testing.T) {
m := testModule(t, "plan-targeted-module-untargeted-variable")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Resource(
addrs.ManagedResourceMode, "aws_instance", "blue",
),
addrs.RootModuleInstance.Child("blue_mod", addrs.NoKey),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
CREATE: aws_instance.blue
module.blue_mod:
CREATE: aws_instance.mod
type: "" => "aws_instance"
value: "" => "<computed>"
STATE:
<no state>
`)
if actual != expected {
t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual)
}
}
// ensure that outputs missing references due to targetting are removed from
// the graph.
func TestContext2Plan_outputContainsTargetedResource(t *testing.T) {
m := testModule(t, "plan-untargeted-resource-output")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Child("mod", addrs.NoKey).Resource(
addrs.ManagedResourceMode, "aws_instance", "a",
),
},
})
_, err := ctx.Plan()
if err != nil {
t.Fatalf("err: %s", err)
}
}
// https://github.com/hashicorp/terraform/issues/4515
func TestContext2Plan_targetedOverTen(t *testing.T) {
m := testModule(t, "plan-targeted-over-ten")
p := testProvider("aws")
p.DiffFn = testDiffFn
resources := make(map[string]*ResourceState)
var expectedState []string
for i := 0; i < 13; i++ {
key := fmt.Sprintf("aws_instance.foo.%d", i)
id := fmt.Sprintf("i-abc%d", i)
resources[key] = &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{ID: id},
}
expectedState = append(expectedState,
fmt.Sprintf("%s:\n ID = %s\n", key, id))
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: resources,
},
},
},
Targets: []addrs.Targetable{
addrs.RootModuleInstance.ResourceInstance(
addrs.ManagedResourceMode, "aws_instance", "foo", addrs.IntKey(1),
),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
sort.Strings(expectedState)
expected := strings.TrimSpace(`
DIFF:
STATE:
aws_instance.foo.0:
ID = i-abc0
aws_instance.foo.1:
ID = i-abc1
aws_instance.foo.2:
ID = i-abc2
aws_instance.foo.3:
ID = i-abc3
aws_instance.foo.4:
ID = i-abc4
aws_instance.foo.5:
ID = i-abc5
aws_instance.foo.6:
ID = i-abc6
aws_instance.foo.7:
ID = i-abc7
aws_instance.foo.8:
ID = i-abc8
aws_instance.foo.9:
ID = i-abc9
aws_instance.foo.10:
ID = i-abc10
aws_instance.foo.11:
ID = i-abc11
aws_instance.foo.12:
ID = i-abc12
`)
if actual != expected {
t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual)
}
}
func TestContext2Plan_provider(t *testing.T) {
m := testModule(t, "plan-provider")
p := testProvider("aws")
p.DiffFn = testDiffFn
var value interface{}
p.ConfigureFn = func(c *ResourceConfig) error {
value, _ = c.Get("foo")
return nil
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Variables: InputValues{
"foo": &InputValue{
Value: cty.StringVal("bar"),
SourceType: ValueFromCaller,
},
},
})
if _, err := ctx.Plan(); err != nil {
t.Fatalf("err: %s", err)
}
if value != "bar" {
t.Fatalf("bad: %#v", value)
}
}
func TestContext2Plan_varListErr(t *testing.T) {
m := testModule(t, "plan-var-list-err")
p := testProvider("aws")
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
_, err := ctx.Plan()
if err == nil {
t.Fatal("should error")
}
}
func TestContext2Plan_ignoreChanges(t *testing.T) {
m := testModule(t, "plan-ignore-changes")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"ami": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{"ami": "ami-abcd1234"},
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Variables: InputValues{
"foo": &InputValue{
Value: cty.StringVal("ami-1234abcd"),
SourceType: ValueFromCaller,
},
},
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Diff.RootModule().Resources) < 1 {
t.Fatalf("bad: %#v", plan.Diff.RootModule().Resources)
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanIgnoreChangesStr)
if actual != expected {
t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected)
}
}
func TestContext2Plan_ignoreChangesWildcard(t *testing.T) {
m := testModule(t, "plan-ignore-changes-wildcard")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"ami": {Type: cty.String, Optional: true},
"instance_type": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"ami": "ami-abcd1234",
"instance_type": "t2.micro",
},
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Variables: InputValues{
"foo": &InputValue{
Value: cty.StringVal("ami-1234abcd"),
SourceType: ValueFromCaller,
},
"bar": &InputValue{
Value: cty.StringVal("t2.small"),
SourceType: ValueFromCaller,
},
},
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Diff.RootModule().Resources) > 0 {
t.Fatalf("unexpected resource diffs in root module: %s", spew.Sdump(plan.Diff.RootModule().Resources))
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanIgnoreChangesWildcardStr)
if actual != expected {
t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected)
}
}
func TestContext2Plan_moduleMapLiteral(t *testing.T) {
m := testModule(t, "plan-module-map-literal")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"meta": {Type: cty.Map(cty.String), Optional: true},
"tags": {Type: cty.Map(cty.String), Optional: true},
},
},
},
}
p.ApplyFn = testApplyFn
p.DiffFn = func(i *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error) {
// Here we verify that both the populated and empty map literals made it
// through to the resource attributes
val, _ := c.Get("tags")
m, ok := val.(map[string]interface{})
if !ok {
t.Fatalf("Tags attr not map: %#v", val)
}
if m["foo"] != "bar" {
t.Fatalf("Bad value in tags attr: %#v", m)
}
{
val, _ := c.Get("meta")
m, ok := val.(map[string]interface{})
if !ok {
t.Fatalf("Meta attr not map: %#v", val)
}
if len(m) != 0 {
t.Fatalf("Meta attr not empty: %#v", val)
}
}
return nil, nil
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
}
func TestContext2Plan_computedValueInMap(t *testing.T) {
m := testModule(t, "plan-computed-value-in-map")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"looked_up": {Type: cty.String, Optional: true},
},
},
"aws_computed_source": {
Attributes: map[string]*configschema.Attribute{
"computed_read_only": {Type: cty.String, Computed: true},
},
},
},
}
p.DiffFn = func(info *InstanceInfo, state *InstanceState, c *ResourceConfig) (*InstanceDiff, error) {
switch info.Type {
case "aws_computed_source":
return &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"computed_read_only": &ResourceAttrDiff{
NewComputed: true,
},
},
}, nil
}
return testDiffFn(info, state, c)
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
// (not sure why this is repeated here; I updated some earlier code that
// called ctx.Plan twice here, so retaining that in case it's somehow
// important.)
plan, diags = ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanComputedValueInMap)
if actual != expected {
t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected)
}
}
func TestContext2Plan_moduleVariableFromSplat(t *testing.T) {
m := testModule(t, "plan-module-variable-from-splat")
p := testProvider("aws")
p.DiffFn = testDiffFn
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"thing": {Type: cty.String, Optional: true},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
// (not sure why this is repeated here; I updated some earlier code that
// called ctx.Plan twice here, so retaining that in case it's somehow
// important.)
plan, diags = ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleVariableFromSplat)
if actual != expected {
t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected)
}
}
func TestContext2Plan_createBeforeDestroy_depends_datasource(t *testing.T) {
m := testModule(t, "plan-cdb-depends-datasource")
p := testProvider("aws")
p.DiffFn = testDiffFn
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"num": {Type: cty.String, Optional: true},
"compute": {Type: cty.String, Optional: true, Computed: true},
},
},
},
DataSources: map[string]*configschema.Block{
"aws_vpc": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.Number, Optional: true},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if got := len(plan.Diff.Modules); got != 1 {
t.Fatalf("got %d modules; want 1", got)
}
moduleDiff := plan.Diff.Modules[0]
if _, ok := moduleDiff.Resources["aws_instance.foo.0"]; !ok {
t.Fatalf("missing diff for aws_instance.foo.0")
}
if _, ok := moduleDiff.Resources["aws_instance.foo.1"]; !ok {
t.Fatalf("missing diff for aws_instance.foo.1")
}
if _, ok := moduleDiff.Resources["data.aws_vpc.bar.0"]; !ok {
t.Fatalf("missing diff for data.aws_vpc.bar.0")
}
if _, ok := moduleDiff.Resources["data.aws_vpc.bar.1"]; !ok {
t.Fatalf("missing diff for data.aws_vpc.bar.1")
}
}
// interpolated lists need to be stored in the original order.
func TestContext2Plan_listOrder(t *testing.T) {
m := testModule(t, "plan-list-order")
p := testProvider("aws")
p.ApplyFn = testApplyFn
p.DiffFn = testDiffFn
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.List(cty.String), Optional: true},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
rDiffs := plan.Diff.Modules[0].Resources
rDiffA := rDiffs["aws_instance.a"]
rDiffB := rDiffs["aws_instance.b"]
if !rDiffA.Equal(rDiffB) {
t.Fatal("aws_instance.a and aws_instance.b diffs should match:\n", plan)
}
}
// Make sure ignore-changes doesn't interfere with set/list/map diffs.
// If a resource was being replaced by a RequiresNew attribute that gets
// ignored, we need to filter the diff properly to properly update rather than
// replace.
func TestContext2Plan_ignoreChangesWithFlatmaps(t *testing.T) {
m := testModule(t, "plan-ignore-changes-with-flatmaps")
p := testProvider("aws")
p.DiffFn = testDiffFn
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"user_data": {Type: cty.String, Optional: true},
"require_new": {Type: cty.String, Optional: true},
"set": {Type: cty.Map(cty.String), Optional: true},
"lst": {Type: cty.List(cty.String), Optional: true},
},
},
},
}
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"user_data": "x",
"require_new": "",
"set.#": "1",
"set.0.a": "1",
"lst.#": "1",
"lst.0": "j",
},
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.Diff.String())
expected := strings.TrimSpace(testTFPlanDiffIgnoreChangesWithFlatmaps)
if actual != expected {
t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected)
}
}
// TestContext2Plan_resourceNestedCount ensures resource sets that depend on
// the count of another resource set (ie: count of a data source that depends
// on another data source's instance count - data.x.foo.*.id) get properly
// normalized to the indexes they should be. This case comes up when there is
// an existing state (after an initial apply).
func TestContext2Plan_resourceNestedCount(t *testing.T) {
m := testModule(t, "nested-resource-count-plan")
p := testProvider("aws")
p.DiffFn = testDiffFn
p.RefreshFn = func(i *InstanceInfo, is *InstanceState) (*InstanceState, error) {
return is, nil
}
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Primary: &InstanceState{
ID: "foo0",
Attributes: map[string]string{
"id": "foo0",
},
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Primary: &InstanceState{
ID: "foo1",
Attributes: map[string]string{
"id": "foo1",
},
},
},
"aws_instance.bar.0": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Dependencies: []string{"aws_instance.foo.*"},
Primary: &InstanceState{
ID: "bar0",
Attributes: map[string]string{
"id": "bar0",
},
},
},
"aws_instance.bar.1": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Dependencies: []string{"aws_instance.foo.*"},
Primary: &InstanceState{
ID: "bar1",
Attributes: map[string]string{
"id": "bar1",
},
},
},
"aws_instance.baz.0": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Dependencies: []string{"aws_instance.bar.*"},
Primary: &InstanceState{
ID: "baz0",
Attributes: map[string]string{
"id": "baz0",
},
},
},
"aws_instance.baz.1": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Dependencies: []string{"aws_instance.bar.*"},
Primary: &InstanceState{
ID: "baz1",
Attributes: map[string]string{
"id": "baz1",
},
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
diags := ctx.Validate()
if diags.HasErrors() {
t.Fatalf("validate errors: %s", diags.Err())
}
_, diags = ctx.Refresh()
if diags.HasErrors() {
t.Fatalf("refresh errors: %s", diags.Err())
}
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("plan errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
STATE:
aws_instance.bar.0:
ID = bar0
provider = provider.aws
Dependencies:
aws_instance.foo.*
aws_instance.bar.1:
ID = bar1
provider = provider.aws
Dependencies:
aws_instance.foo.*
aws_instance.baz.0:
ID = baz0
provider = provider.aws
Dependencies:
aws_instance.bar.*
aws_instance.baz.1:
ID = baz1
provider = provider.aws
Dependencies:
aws_instance.bar.*
aws_instance.foo.0:
ID = foo0
provider = provider.aws
aws_instance.foo.1:
ID = foo1
provider = provider.aws
`)
if actual != expected {
t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected)
}
}
core: Make context_plan_test failure output more helpful
Lots of the tests were previously only producing the actual result, and
not what was expected. Now we'll show both, to make debugging easier.
package terraform
import (
"bytes"
"fmt"
"os"
"reflect"
"sort"
"strings"
"sync"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/config/configschema"
)
func TestContext2Plan_basic(t *testing.T) {
m := testModule(t, "plan-good")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
ProviderSHA256s: map[string][]byte{
"aws": []byte("placeholder"),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Diff.RootModule().Resources) < 2 {
t.Fatalf("bad: %#v", plan.Diff.RootModule().Resources)
}
if !reflect.DeepEqual(plan.ProviderSHA256s, ctx.providerSHA256s) {
t.Errorf("wrong ProviderSHA256s %#v; want %#v", plan.ProviderSHA256s, ctx.providerSHA256s)
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_createBefore_deposed(t *testing.T) {
m := testModule(t, "plan-cbd")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: []string{"root"},
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
},
Deposed: []*InstanceState{
&InstanceState{ID: "foo"},
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
DESTROY: aws_instance.foo (deposed only)
STATE:
aws_instance.foo: (1 deposed)
ID = baz
Deposed ID 1 = foo
`)
if actual != expected {
t.Fatalf("expected:\n%s, got:\n%s", expected, actual)
}
}
func TestContext2Plan_createBefore_maintainRoot(t *testing.T) {
m := testModule(t, "plan-cbd-maintain-root")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Variables: InputValues{
"in": &InputValue{
Value: cty.StringVal("a,b,c"),
SourceType: ValueFromCaller,
},
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
CREATE: aws_instance.bar.0
CREATE: aws_instance.bar.1
CREATE: aws_instance.foo.0
CREATE: aws_instance.foo.1
STATE:
<no state>
`)
if actual != expected {
t.Fatalf("expected:\n%s, got:\n%s", expected, actual)
}
}
func TestContext2Plan_emptyDiff(t *testing.T) {
m := testModule(t, "plan-empty")
p := testProvider("aws")
p.DiffFn = func(
info *InstanceInfo,
s *InstanceState,
c *ResourceConfig) (*InstanceDiff, error) {
return nil, nil
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanEmptyStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_escapedVar(t *testing.T) {
m := testModule(t, "plan-escaped-var")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanEscapedVarStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_minimal(t *testing.T) {
m := testModule(t, "plan-empty")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanEmptyStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_modules(t *testing.T) {
m := testModule(t, "plan-modules")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModulesStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
// GH-1475
func TestContext2Plan_moduleCycle(t *testing.T) {
m := testModule(t, "plan-module-cycle")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
"some_input": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleCycleStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_moduleDeadlock(t *testing.T) {
testCheckDeadlock(t, func() {
m := testModule(t, "plan-module-deadlock")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, err := ctx.Plan()
if err != nil {
t.Fatalf("err: %s", err)
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
module.child:
CREATE: aws_instance.foo.0
CREATE: aws_instance.foo.1
CREATE: aws_instance.foo.2
STATE:
<no state>
`)
if actual != expected {
t.Fatalf("expected:\n%sgot:\n%s", expected, actual)
}
})
}
func TestContext2Plan_moduleInput(t *testing.T) {
m := testModule(t, "plan-module-input")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleInputStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_moduleInputComputed(t *testing.T) {
m := testModule(t, "plan-module-input-computed")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleInputComputedStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_moduleInputFromVar(t *testing.T) {
m := testModule(t, "plan-module-input-var")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Variables: InputValues{
"foo": &InputValue{
Value: cty.StringVal("52"),
SourceType: ValueFromCaller,
},
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleInputVarStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_moduleMultiVar(t *testing.T) {
m := testModule(t, "plan-module-multi-var")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
"foo": {Type: cty.String, Optional: true},
"baz": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleMultiVarStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_moduleOrphans(t *testing.T) {
m := testModule(t, "plan-modules-remove")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: []string{"root", "child"},
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleOrphansStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
// https://github.com/hashicorp/terraform/issues/3114
func TestContext2Plan_moduleOrphansWithProvisioner(t *testing.T) {
m := testModule(t, "plan-modules-remove-provisioners")
p := testProvider("aws")
pr := testProvisioner()
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: []string{"root"},
Resources: map[string]*ResourceState{
"aws_instance.top": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "top",
},
},
},
},
&ModuleState{
Path: []string{"root", "parent", "childone"},
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
},
},
},
},
&ModuleState{
Path: []string{"root", "parent", "childtwo"},
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Provisioners: map[string]ResourceProvisionerFactory{
"shell": testProvisionerFuncFixed(pr),
},
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
module.parent.childone:
DESTROY: aws_instance.foo
module.parent.childtwo:
DESTROY: aws_instance.foo
STATE:
aws_instance.top:
ID = top
module.parent.childone:
aws_instance.foo:
ID = baz
module.parent.childtwo:
aws_instance.foo:
ID = baz
`)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_moduleProviderInherit(t *testing.T) {
var l sync.Mutex
var calls []string
m := testModule(t, "plan-module-provider-inherit")
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": func() (ResourceProvider, error) {
l.Lock()
defer l.Unlock()
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
Provider: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"from": {Type: cty.String, Optional: true},
},
},
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"from": {Type: cty.String, Optional: true},
},
},
},
}
p.ConfigureFn = func(c *ResourceConfig) error {
if v, ok := c.Get("from"); !ok || v.(string) != "root" {
return fmt.Errorf("bad")
}
return nil
}
p.DiffFn = func(
info *InstanceInfo,
state *InstanceState,
c *ResourceConfig) (*InstanceDiff, error) {
v, _ := c.Get("from")
l.Lock()
defer l.Unlock()
calls = append(calls, v.(string))
return testDiffFn(info, state, c)
}
return p, nil
},
},
),
})
_, err := ctx.Plan()
if err != nil {
t.Fatalf("err: %s", err)
}
actual := calls
sort.Strings(actual)
expected := []string{"child", "root"}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
}
// This tests (for GH-11282) that deeply nested modules properly inherit
// configuration.
func TestContext2Plan_moduleProviderInheritDeep(t *testing.T) {
var l sync.Mutex
m := testModule(t, "plan-module-provider-inherit-deep")
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": func() (ResourceProvider, error) {
l.Lock()
defer l.Unlock()
var from string
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
Provider: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"from": {Type: cty.String, Optional: true},
},
},
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{},
},
},
}
p.ConfigureFn = func(c *ResourceConfig) error {
v, ok := c.Get("from")
if !ok || v.(string) != "root" {
return fmt.Errorf("bad")
}
from = v.(string)
return nil
}
p.DiffFn = func(
info *InstanceInfo,
state *InstanceState,
c *ResourceConfig) (*InstanceDiff, error) {
if from != "root" {
return nil, fmt.Errorf("bad resource")
}
return testDiffFn(info, state, c)
}
return p, nil
},
},
),
})
_, err := ctx.Plan()
if err != nil {
t.Fatalf("err: %s", err)
}
}
func TestContext2Plan_moduleProviderDefaultsVar(t *testing.T) {
var l sync.Mutex
var calls []string
m := testModule(t, "plan-module-provider-defaults-var")
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": func() (ResourceProvider, error) {
l.Lock()
defer l.Unlock()
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
Provider: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"to": {Type: cty.String, Optional: true},
"from": {Type: cty.String, Optional: true},
},
},
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"from": {Type: cty.String, Optional: true},
},
},
},
}
p.ConfigureFn = func(c *ResourceConfig) error {
var buf bytes.Buffer
if v, ok := c.Get("from"); ok {
buf.WriteString(v.(string) + "\n")
}
if v, ok := c.Get("to"); ok {
buf.WriteString(v.(string) + "\n")
}
l.Lock()
defer l.Unlock()
calls = append(calls, buf.String())
return nil
}
p.DiffFn = testDiffFn
return p, nil
},
},
),
Variables: InputValues{
"foo": &InputValue{
Value: cty.StringVal("root"),
SourceType: ValueFromCaller,
},
},
})
_, err := ctx.Plan()
if err != nil {
t.Fatalf("err: %s", err)
}
expected := []string{
"child\nchild\n",
"root\n",
}
sort.Strings(calls)
if !reflect.DeepEqual(calls, expected) {
t.Fatalf("expected:\n%#v\ngot:\n%#v\n", expected, calls)
}
}
func TestContext2Plan_moduleProviderVar(t *testing.T) {
m := testModule(t, "plan-module-provider-var")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
Provider: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"value": {Type: cty.String, Optional: true},
},
},
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"value": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleProviderVarStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_moduleVar(t *testing.T) {
m := testModule(t, "plan-module-var")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleVarStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_moduleVarWrongTypeBasic(t *testing.T) {
m := testModule(t, "plan-module-wrong-var-type")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
t.Fatalf("succeeded; want errors")
}
}
func TestContext2Plan_moduleVarWrongTypeNested(t *testing.T) {
m := testModule(t, "plan-module-wrong-var-type-nested")
p := testProvider("null")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"null": testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
t.Fatalf("succeeded; want errors")
}
}
func TestContext2Plan_moduleVarWithDefaultValue(t *testing.T) {
m := testModule(t, "plan-module-var-with-default-value")
p := testProvider("null")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"null": testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
}
func TestContext2Plan_moduleVarComputed(t *testing.T) {
m := testModule(t, "plan-module-var-computed")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleVarComputedStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_nil(t *testing.T) {
m := testModule(t, "plan-nil")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"nil": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
},
},
},
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Diff.RootModule().Resources) != 0 {
t.Fatalf("bad: %#v", plan.Diff.RootModule().Resources)
}
}
func TestContext2Plan_preventDestroy_bad(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-bad")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
},
},
},
},
})
plan, err := ctx.Plan()
expectedErr := "aws_instance.foo has lifecycle.prevent_destroy"
if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) {
t.Fatalf("expected err would contain %q\nerr: %s\nplan: %s",
expectedErr, err, plan)
}
}
func TestContext2Plan_preventDestroy_good(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-good")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
},
},
},
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if !plan.Diff.Empty() {
t.Fatalf("Expected empty plan, got %s", plan.String())
}
}
func TestContext2Plan_preventDestroy_countBad(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-count-bad")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc345",
},
},
},
},
},
},
})
plan, err := ctx.Plan()
expectedErr := "aws_instance.foo[1] has lifecycle.prevent_destroy"
if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) {
t.Fatalf("expected err would contain %q\nerr: %s\nplan: %s",
expectedErr, err, plan)
}
}
func TestContext2Plan_preventDestroy_countGood(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-count-good")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"current": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc345",
},
},
},
},
},
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if plan.Diff.Empty() {
t.Fatalf("Expected non-empty plan, got %s", plan.String())
}
}
func TestContext2Plan_preventDestroy_countGoodNoChange(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-count-good")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"current": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
Attributes: map[string]string{
"current": "0",
"type": "aws_instance",
},
},
},
},
},
},
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if !plan.Diff.Empty() {
t.Fatalf("Expected empty plan, got %s", plan.String())
}
}
func TestContext2Plan_preventDestroy_destroyPlan(t *testing.T) {
m := testModule(t, "plan-prevent-destroy-good")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
},
},
},
},
Destroy: true,
})
plan, err := ctx.Plan()
expectedErr := "aws_instance.foo has lifecycle.prevent_destroy"
if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) {
t.Fatalf("expected err would contain %q\nerr: %s\nplan: %s",
expectedErr, err, plan)
}
}
func TestContext2Plan_provisionerCycle(t *testing.T) {
m := testModule(t, "plan-provisioner-cycle")
p := testProvider("aws")
p.DiffFn = testDiffFn
pr := testProvisioner()
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Provisioners: map[string]ResourceProvisionerFactory{
"local-exec": testProvisionerFuncFixed(pr),
},
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
t.Fatalf("succeeded; want errors")
}
}
func TestContext2Plan_computed(t *testing.T) {
m := testModule(t, "plan-computed")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanComputedStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_computedDataResource(t *testing.T) {
m := testModule(t, "plan-computed-data-resource")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"num": {Type: cty.String, Optional: true},
"compute": {Type: cty.String, Optional: true},
"foo": {Type: cty.String, Computed: true},
},
},
},
DataSources: map[string]*configschema.Block{
"aws_vpc": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if got := len(plan.Diff.Modules); got != 1 {
t.Fatalf("got %d modules; want 1", got)
}
moduleDiff := plan.Diff.Modules[0]
if _, ok := moduleDiff.Resources["aws_instance.foo"]; !ok {
t.Fatalf("missing diff for aws_instance.foo")
}
iDiff, ok := moduleDiff.Resources["data.aws_vpc.bar"]
if !ok {
t.Fatalf("missing diff for data.aws_vpc.bar")
}
expectedDiff := &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"id": {
NewComputed: true,
RequiresNew: true,
Type: DiffAttrOutput,
},
},
}
if same, _ := expectedDiff.Same(iDiff); !same {
t.Fatalf(
"incorrect diff for data.aws_vpc.bar\ngot: %#v\nwant: %#v",
iDiff, expectedDiff,
)
}
}
func TestContext2Plan_computedDataCountResource(t *testing.T) {
m := testModule(t, "plan-computed-data-count")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"num": {Type: cty.String, Optional: true},
"compute": {Type: cty.String, Optional: true},
"foo": {Type: cty.String, Computed: true},
},
},
},
DataSources: map[string]*configschema.Block{
"aws_vpc": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if got := len(plan.Diff.Modules); got != 1 {
t.Fatalf("got %d modules; want 1", got)
}
moduleDiff := plan.Diff.Modules[0]
// make sure we created 3 "bar"s
for i := 0; i < 3; i++ {
resource := fmt.Sprintf("data.aws_vpc.bar.%d", i)
if _, ok := moduleDiff.Resources[resource]; !ok {
t.Fatalf("missing diff for %s", resource)
}
}
}
func TestContext2Plan_localValueCount(t *testing.T) {
m := testModule(t, "plan-local-value-count")
p := testProvider("test")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"test": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if got := len(plan.Diff.Modules); got != 1 {
t.Fatalf("got %d modules; want 1", got)
}
moduleDiff := plan.Diff.Modules[0]
// make sure we created 3 "bar"s
for i := 0; i < 3; i++ {
resource := fmt.Sprintf("test_resource.foo.%d", i)
if _, ok := moduleDiff.Resources[resource]; !ok {
t.Fatalf("missing diff for %s", resource)
}
}
}
// Higher level test at TestResource_dataSourceListPlanPanic
func TestContext2Plan_dataSourceTypeMismatch(t *testing.T) {
m := testModule(t, "plan-data-source-type-mismatch")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"ami": {Type: cty.String, Optional: true},
},
},
},
DataSources: map[string]*configschema.Block{
"aws_availability_zones": {
Attributes: map[string]*configschema.Attribute{
"names": {Type: cty.List(cty.String), Computed: true},
},
},
},
}
p.ValidateResourceFn = func(t string, c *ResourceConfig) (ws []string, es []error) {
// Emulate the type checking behavior of helper/schema based validation
if t == "aws_instance" {
ami, _ := c.Get("ami")
switch a := ami.(type) {
case string:
// ok
default:
es = append(es, fmt.Errorf("Expected ami to be string, got %T", a))
}
}
return
}
p.DiffFn = func(
info *InstanceInfo,
state *InstanceState,
c *ResourceConfig) (*InstanceDiff, error) {
if info.Type == "aws_instance" {
// If we get to the diff, we should be able to assume types
ami, _ := c.Get("ami")
_ = ami.(string)
}
return nil, nil
}
ctx := testContext2(t, &ContextOpts{
Config: m,
// Pretend like we ran a Refresh and the AZs data source was populated.
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"data.aws_availability_zones.azs": &ResourceState{
Type: "aws_availability_zones",
Primary: &InstanceState{
ID: "i-abc123",
Attributes: map[string]string{
"names.#": "2",
"names.0": "us-east-1a",
"names.1": "us-east-1b",
},
},
},
},
},
},
},
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if !diags.HasErrors() {
t.Fatalf("Expected err, got none!")
}
expected := `Inappropriate value for attribute "ami": incorrect type; string required`
if errStr := diags.Err().Error(); !strings.Contains(errStr, expected) {
t.Fatalf("expected:\n\n%s\n\nto contain:\n\n%s", errStr, expected)
}
}
func TestContext2Plan_dataResourceBecomesComputed(t *testing.T) {
m := testModule(t, "plan-data-resource-becomes-computed")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.String, Optional: true},
"computed": {Type: cty.String, Computed: true},
},
},
},
DataSources: map[string]*configschema.Block{
"aws_data_resource": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = func(info *InstanceInfo, state *InstanceState, config *ResourceConfig) (*InstanceDiff, error) {
if info.Type != "aws_instance" {
t.Fatalf("don't know how to diff %s", info.Id)
return nil, nil
}
return &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"computed": &ResourceAttrDiff{
Old: "",
New: "",
NewComputed: true,
},
},
}, nil
}
p.ReadDataDiffReturn = &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"foo": &ResourceAttrDiff{
Old: "",
New: "",
NewComputed: true,
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"data.aws_data_resource.foo": &ResourceState{
Type: "aws_data_resource",
Primary: &InstanceState{
ID: "i-abc123",
Attributes: map[string]string{
"id": "i-abc123",
"value": "baz",
},
},
},
},
},
},
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if got := len(plan.Diff.Modules); got != 1 {
t.Fatalf("got %d modules; want 1", got)
}
if !p.ReadDataDiffCalled {
t.Fatal("ReadDataDiff wasn't called, but should've been")
}
if got, want := p.ReadDataDiffInfo.Id, "data.aws_data_resource.foo"; got != want {
t.Fatalf("ReadDataDiff info id is %s; want %s", got, want)
}
moduleDiff := plan.Diff.Modules[0]
iDiff, ok := moduleDiff.Resources["data.aws_data_resource.foo"]
if !ok {
t.Fatalf("missing diff for data.aws_data_resource.foo")
}
// This is added by the diff but we want to verify that we got
// the same diff as above minus the dynamic stuff.
delete(iDiff.Attributes, "id")
if same, _ := p.ReadDataDiffReturn.Same(iDiff); !same {
t.Fatalf(
"incorrect diff for data.data_resource.foo\ngot: %#v\nwant: %#v",
iDiff, p.ReadDataDiffReturn,
)
}
}
func TestContext2Plan_computedList(t *testing.T) {
m := testModule(t, "plan-computed-list")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"compute": {Type: cty.String, Optional: true},
"foo": {Type: cty.String, Optional: true},
"num": {Type: cty.String, Optional: true},
"list": {Type: cty.List(cty.String), Computed: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanComputedListStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
// GH-8695. This tests that you can index into a computed list on a
// splatted resource.
func TestContext2Plan_computedMultiIndex(t *testing.T) {
m := testModule(t, "plan-computed-multi-index")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanComputedMultiIndexStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_count(t *testing.T) {
m := testModule(t, "plan-count")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Diff.RootModule().Resources) < 6 {
t.Fatalf("bad: %#v", plan.Diff.RootModule().Resources)
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanCountStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_countComputed(t *testing.T) {
m := testModule(t, "plan-count-computed")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
_, err := ctx.Plan()
if err == nil {
t.Fatal("should error")
}
}
func TestContext2Plan_countComputedModule(t *testing.T) {
m := testModule(t, "plan-count-computed-module")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
_, err := ctx.Plan()
expectedErr := "a number is required"
if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) {
t.Fatalf("expected err would contain %q\nerr: %s\n",
expectedErr, err)
}
}
func TestContext2Plan_countModuleStatic(t *testing.T) {
m := testModule(t, "plan-count-module-static")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
module.child:
CREATE: aws_instance.foo.0
CREATE: aws_instance.foo.1
CREATE: aws_instance.foo.2
STATE:
<no state>
`)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_countModuleStaticGrandchild(t *testing.T) {
m := testModule(t, "plan-count-module-static-grandchild")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
module.child.child:
CREATE: aws_instance.foo.0
CREATE: aws_instance.foo.1
CREATE: aws_instance.foo.2
STATE:
<no state>
`)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_countIndex(t *testing.T) {
m := testModule(t, "plan-count-index")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanCountIndexStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_countVar(t *testing.T) {
m := testModule(t, "plan-count-var")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Variables: InputValues{
"count": &InputValue{
Value: cty.StringVal("3"),
SourceType: ValueFromCaller,
},
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanCountVarStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_countZero(t *testing.T) {
m := testModule(t, "plan-count-zero")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.DynamicPseudoType, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanCountZeroStr)
if actual != expected {
t.Logf("expected:\n%s", expected)
t.Fatalf("bad:\n%s", actual)
}
}
func TestContext2Plan_countOneIndex(t *testing.T) {
m := testModule(t, "plan-count-one-index")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanCountOneIndexStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_countDecreaseToOne(t *testing.T) {
m := testModule(t, "plan-count-dec")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo": "foo",
"type": "aws_instance",
},
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
"aws_instance.foo.2": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanCountDecreaseStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_countIncreaseFromNotSet(t *testing.T) {
m := testModule(t, "plan-count-inc")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo": "foo",
"type": "aws_instance",
},
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanCountIncreaseStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_countIncreaseFromOne(t *testing.T) {
m := testModule(t, "plan-count-inc")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo": "foo",
"type": "aws_instance",
},
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanCountIncreaseFromOneStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
// https://github.com/PeoplePerHour/terraform/pull/11
//
// This tests a case where both a "resource" and "resource.0" are in
// the state file, which apparently is a reasonable backwards compatibility
// concern found in the above 3rd party repo.
func TestContext2Plan_countIncreaseFromOneCorrupted(t *testing.T) {
m := testModule(t, "plan-count-inc")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo": "foo",
"type": "aws_instance",
},
},
},
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo": "foo",
"type": "aws_instance",
},
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanCountIncreaseFromOneCorruptedStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
// A common pattern in TF configs is to have a set of resources with the same
// count and to use count.index to create correspondences between them:
//
// foo_id = "${foo.bar.*.id[count.index]}"
//
// This test is for the situation where some instances already exist and the
// count is increased. In that case, we should see only the create diffs
// for the new instances and not any update diffs for the existing ones.
func TestContext2Plan_countIncreaseWithSplatReference(t *testing.T) {
m := testModule(t, "plan-count-splat-reference")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"name": {Type: cty.String, Optional: true},
"foo_name": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"name": "foo 0",
},
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"name": "foo 1",
},
},
},
"aws_instance.bar.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo_name": "foo 0",
},
},
},
"aws_instance.bar.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"foo_name": "foo 1",
},
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
CREATE: aws_instance.bar.2
foo_name: "" => "foo 2"
type: "" => "aws_instance"
CREATE: aws_instance.foo.2
name: "" => "foo 2"
type: "" => "aws_instance"
STATE:
aws_instance.bar.0:
ID = bar
foo_name = foo 0
aws_instance.bar.1:
ID = bar
foo_name = foo 1
aws_instance.foo.0:
ID = bar
name = foo 0
aws_instance.foo.1:
ID = bar
name = foo 1
`)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_destroy(t *testing.T) {
m := testModule(t, "plan-destroy")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.one": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
"aws_instance.two": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
Destroy: true,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Diff.RootModule().Resources) != 2 {
t.Fatalf("bad: %#v", plan.Diff.RootModule().Resources)
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanDestroyStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_moduleDestroy(t *testing.T) {
m := testModule(t, "plan-module-destroy")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
},
},
&ModuleState{
Path: []string{"root", "child"},
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
Destroy: true,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleDestroyStr)
if actual != expected {
t.Fatalf("bad:\n%s\n\nexpected:\n\n%s", actual, expected)
}
}
// GH-1835
func TestContext2Plan_moduleDestroyCycle(t *testing.T) {
m := testModule(t, "plan-module-destroy-gh-1835")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: []string{"root", "a_module"},
Resources: map[string]*ResourceState{
"aws_instance.a": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "a",
},
},
},
},
&ModuleState{
Path: []string{"root", "b_module"},
Resources: map[string]*ResourceState{
"aws_instance.b": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "b",
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
Destroy: true,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleDestroyCycleStr)
if actual != expected {
t.Fatalf("bad:\n%s\n\nexpected:\n\n%s", actual, expected)
}
}
func TestContext2Plan_moduleDestroyMultivar(t *testing.T) {
m := testModule(t, "plan-module-destroy-multivar")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{},
},
&ModuleState{
Path: []string{"root", "child"},
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar0",
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar1",
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
Destroy: true,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleDestroyMultivarStr)
if actual != expected {
t.Fatalf("bad:\n%s\n\nexpected:\n\n%s", actual, expected)
}
}
func TestContext2Plan_pathVar(t *testing.T) {
cwd, err := os.Getwd()
if err != nil {
t.Fatalf("err: %s", err)
}
m := testModule(t, "plan-path-var")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"cwd": {Type: cty.String, Optional: true},
"module": {Type: cty.String, Optional: true},
"root": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("err: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanPathVarStr)
// Warning: this ordering REALLY matters for this test. The
// order is: cwd, module, root.
expected = fmt.Sprintf(
expected,
cwd,
m.Module.SourceDir,
m.Module.SourceDir,
)
if actual != expected {
t.Fatalf("bad:\n%s\n\nexpected:\n\n%s", actual, expected)
}
}
func TestContext2Plan_diffVar(t *testing.T) {
m := testModule(t, "plan-diffvar")
p := testProvider("aws")
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"num": "2",
},
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
p.DiffFn = func(
info *InstanceInfo,
s *InstanceState,
c *ResourceConfig) (*InstanceDiff, error) {
if s.ID != "bar" {
return testDiffFn(info, s, c)
}
return &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"num": &ResourceAttrDiff{
Old: "2",
New: "3",
},
},
}, nil
}
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanDiffVarStr)
if actual != expected {
t.Fatalf("actual:\n%s\n\nexpected:\n%s", actual, expected)
}
}
func TestContext2Plan_hook(t *testing.T) {
m := testModule(t, "plan-good")
h := new(MockHook)
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
Hooks: []Hook{h},
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if !h.PreDiffCalled {
t.Fatal("should be called")
}
if !h.PostDiffCalled {
t.Fatal("should be called")
}
}
func TestContext2Plan_closeProvider(t *testing.T) {
// this fixture only has an aliased provider located in the module, to make
// sure that the provier name contains a path more complex than
// "provider.aws".
m := testModule(t, "plan-close-module-provider")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if !p.CloseCalled {
t.Fatal("provider not closed")
}
}
func TestContext2Plan_orphan(t *testing.T) {
m := testModule(t, "plan-orphan")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.baz": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanOrphanStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
// This tests that configurations with UUIDs don't produce errors.
// For shadows, this would produce errors since a UUID changes every time.
func TestContext2Plan_shadowUuid(t *testing.T) {
m := testModule(t, "plan-shadow-uuid")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
}
func TestContext2Plan_state(t *testing.T) {
m := testModule(t, "plan-good")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Diff.RootModule().Resources) < 2 {
t.Fatalf("bad: %#v", plan.Diff.RootModule().Resources)
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanStateStr)
if actual != expected {
t.Fatalf("bad:\n%s\n\nexpected:\n\n%s", actual, expected)
}
}
func TestContext2Plan_taint(t *testing.T) {
m := testModule(t, "plan-taint")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{"num": "2"},
},
},
"aws_instance.bar": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
Tainted: true,
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanTaintStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestContext2Plan_taintIgnoreChanges(t *testing.T) {
m := testModule(t, "plan-taint-ignore-changes")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"vars": {Type: cty.String, Optional: true},
},
},
},
}
p.ApplyFn = testApplyFn
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "foo",
Attributes: map[string]string{
"vars": "foo",
"type": "aws_instance",
},
Tainted: true,
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanTaintIgnoreChangesStr)
if actual != expected {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
// Fails about 50% of the time before the fix for GH-4982, covers the fix.
func TestContext2Plan_taintDestroyInterpolatedCountRace(t *testing.T) {
m := testModule(t, "plan-taint-interpolated-count")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Tainted: true,
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{ID: "bar"},
},
"aws_instance.foo.2": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{ID: "bar"},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
for i := 0; i < 100; i++ {
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
DESTROY/CREATE: aws_instance.foo.0
type: "" => "aws_instance"
STATE:
aws_instance.foo.0: (tainted)
ID = bar
aws_instance.foo.1:
ID = bar
aws_instance.foo.2:
ID = bar
`)
if actual != expected {
t.Fatalf("[%d] bad:\n%s\nexpected:\n%s\n", i, actual, expected)
}
}
}
func TestContext2Plan_targeted(t *testing.T) {
m := testModule(t, "plan-targeted")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Resource(
addrs.ManagedResourceMode, "aws_instance", "foo",
),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
CREATE: aws_instance.foo
num: "" => "2"
type: "" => "aws_instance"
STATE:
<no state>
`)
if actual != expected {
t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual)
}
}
func TestContext2Plan_targetEmpty(t *testing.T) {
m := testModule(t, "plan-targeted")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Module: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Targets: []string{""},
})
_, err := ctx.Plan()
if err == nil {
t.Fatal("should error")
}
}
// Test that targeting a module properly plans any inputs that depend
// on another module.
func TestContext2Plan_targetedCrossModule(t *testing.T) {
m := testModule(t, "plan-targeted-cross-module")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Child("B", addrs.NoKey),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
module.A:
CREATE: aws_instance.foo
foo: "" => "bar"
type: "" => "aws_instance"
module.B:
CREATE: aws_instance.bar
foo: "" => "<computed>"
type: "" => "aws_instance"
STATE:
<no state>
`)
if actual != expected {
t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual)
}
}
func TestContext2Plan_targetedModuleWithProvider(t *testing.T) {
m := testModule(t, "plan-targeted-module-with-provider")
p := testProvider("null")
p.GetSchemaReturn = &ProviderSchema{
Provider: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"key": {Type: cty.String, Optional: true},
},
},
ResourceTypes: map[string]*configschema.Block{
"null_resource": {
Attributes: map[string]*configschema.Attribute{},
},
},
}
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"null": testProviderFuncFixed(p),
},
),
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Child("child2", addrs.NoKey),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
module.child2:
CREATE: null_resource.foo
STATE:
<no state>
`)
if actual != expected {
t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual)
}
}
func TestContext2Plan_targetedOrphan(t *testing.T) {
m := testModule(t, "plan-targeted-orphan")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.orphan": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-789xyz",
},
},
"aws_instance.nottargeted": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
},
},
},
},
Destroy: true,
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Resource(
addrs.ManagedResourceMode, "aws_instance", "orphan",
),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`DIFF:
DESTROY: aws_instance.orphan
STATE:
aws_instance.nottargeted:
ID = i-abc123
aws_instance.orphan:
ID = i-789xyz
`)
if actual != expected {
t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual)
}
}
// https://github.com/hashicorp/terraform/issues/2538
func TestContext2Plan_targetedModuleOrphan(t *testing.T) {
m := testModule(t, "plan-targeted-module-orphan")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: []string{"root", "child"},
Resources: map[string]*ResourceState{
"aws_instance.orphan": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-789xyz",
},
},
"aws_instance.nottargeted": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "i-abc123",
},
},
},
},
},
},
Destroy: true,
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Child("child", addrs.NoKey).Resource(
addrs.ManagedResourceMode, "aws_instance", "orphan",
),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`DIFF:
module.child:
DESTROY: aws_instance.orphan
STATE:
module.child:
aws_instance.nottargeted:
ID = i-abc123
aws_instance.orphan:
ID = i-789xyz
`)
if actual != expected {
t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual)
}
}
func TestContext2Plan_targetedModuleUntargetedVariable(t *testing.T) {
m := testModule(t, "plan-targeted-module-untargeted-variable")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Resource(
addrs.ManagedResourceMode, "aws_instance", "blue",
),
addrs.RootModuleInstance.Child("blue_mod", addrs.NoKey),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
CREATE: aws_instance.blue
module.blue_mod:
CREATE: aws_instance.mod
type: "" => "aws_instance"
value: "" => "<computed>"
STATE:
<no state>
`)
if actual != expected {
t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual)
}
}
// ensure that outputs missing references due to targetting are removed from
// the graph.
func TestContext2Plan_outputContainsTargetedResource(t *testing.T) {
m := testModule(t, "plan-untargeted-resource-output")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Targets: []addrs.Targetable{
addrs.RootModuleInstance.Child("mod", addrs.NoKey).Resource(
addrs.ManagedResourceMode, "aws_instance", "a",
),
},
})
_, err := ctx.Plan()
if err != nil {
t.Fatalf("err: %s", err)
}
}
// https://github.com/hashicorp/terraform/issues/4515
func TestContext2Plan_targetedOverTen(t *testing.T) {
m := testModule(t, "plan-targeted-over-ten")
p := testProvider("aws")
p.DiffFn = testDiffFn
resources := make(map[string]*ResourceState)
var expectedState []string
for i := 0; i < 13; i++ {
key := fmt.Sprintf("aws_instance.foo.%d", i)
id := fmt.Sprintf("i-abc%d", i)
resources[key] = &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{ID: id},
}
expectedState = append(expectedState,
fmt.Sprintf("%s:\n ID = %s\n", key, id))
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: resources,
},
},
},
Targets: []addrs.Targetable{
addrs.RootModuleInstance.ResourceInstance(
addrs.ManagedResourceMode, "aws_instance", "foo", addrs.IntKey(1),
),
},
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
sort.Strings(expectedState)
expected := strings.TrimSpace(`
DIFF:
STATE:
aws_instance.foo.0:
ID = i-abc0
aws_instance.foo.1:
ID = i-abc1
aws_instance.foo.2:
ID = i-abc2
aws_instance.foo.3:
ID = i-abc3
aws_instance.foo.4:
ID = i-abc4
aws_instance.foo.5:
ID = i-abc5
aws_instance.foo.6:
ID = i-abc6
aws_instance.foo.7:
ID = i-abc7
aws_instance.foo.8:
ID = i-abc8
aws_instance.foo.9:
ID = i-abc9
aws_instance.foo.10:
ID = i-abc10
aws_instance.foo.11:
ID = i-abc11
aws_instance.foo.12:
ID = i-abc12
`)
if actual != expected {
t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual)
}
}
func TestContext2Plan_provider(t *testing.T) {
m := testModule(t, "plan-provider")
p := testProvider("aws")
p.DiffFn = testDiffFn
var value interface{}
p.ConfigureFn = func(c *ResourceConfig) error {
value, _ = c.Get("foo")
return nil
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Variables: InputValues{
"foo": &InputValue{
Value: cty.StringVal("bar"),
SourceType: ValueFromCaller,
},
},
})
if _, err := ctx.Plan(); err != nil {
t.Fatalf("err: %s", err)
}
if value != "bar" {
t.Fatalf("bad: %#v", value)
}
}
func TestContext2Plan_varListErr(t *testing.T) {
m := testModule(t, "plan-var-list-err")
p := testProvider("aws")
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
_, err := ctx.Plan()
if err == nil {
t.Fatal("should error")
}
}
func TestContext2Plan_ignoreChanges(t *testing.T) {
m := testModule(t, "plan-ignore-changes")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"ami": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{"ami": "ami-abcd1234"},
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Variables: InputValues{
"foo": &InputValue{
Value: cty.StringVal("ami-1234abcd"),
SourceType: ValueFromCaller,
},
},
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Diff.RootModule().Resources) < 1 {
t.Fatalf("bad: %#v", plan.Diff.RootModule().Resources)
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanIgnoreChangesStr)
if actual != expected {
t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected)
}
}
func TestContext2Plan_ignoreChangesWildcard(t *testing.T) {
m := testModule(t, "plan-ignore-changes-wildcard")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"ami": {Type: cty.String, Optional: true},
"instance_type": {Type: cty.String, Optional: true},
},
},
},
}
p.DiffFn = testDiffFn
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"ami": "ami-abcd1234",
"instance_type": "t2.micro",
},
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
Variables: InputValues{
"foo": &InputValue{
Value: cty.StringVal("ami-1234abcd"),
SourceType: ValueFromCaller,
},
"bar": &InputValue{
Value: cty.StringVal("t2.small"),
SourceType: ValueFromCaller,
},
},
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if len(plan.Diff.RootModule().Resources) > 0 {
t.Fatalf("unexpected resource diffs in root module: %s", spew.Sdump(plan.Diff.RootModule().Resources))
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanIgnoreChangesWildcardStr)
if actual != expected {
t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected)
}
}
func TestContext2Plan_moduleMapLiteral(t *testing.T) {
m := testModule(t, "plan-module-map-literal")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"meta": {Type: cty.Map(cty.String), Optional: true},
"tags": {Type: cty.Map(cty.String), Optional: true},
},
},
},
}
p.ApplyFn = testApplyFn
p.DiffFn = func(i *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error) {
// Here we verify that both the populated and empty map literals made it
// through to the resource attributes
val, _ := c.Get("tags")
m, ok := val.(map[string]interface{})
if !ok {
t.Fatalf("Tags attr not map: %#v", val)
}
if m["foo"] != "bar" {
t.Fatalf("Bad value in tags attr: %#v", m)
}
{
val, _ := c.Get("meta")
m, ok := val.(map[string]interface{})
if !ok {
t.Fatalf("Meta attr not map: %#v", val)
}
if len(m) != 0 {
t.Fatalf("Meta attr not empty: %#v", val)
}
}
return nil, nil
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
}
func TestContext2Plan_computedValueInMap(t *testing.T) {
m := testModule(t, "plan-computed-value-in-map")
p := testProvider("aws")
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"looked_up": {Type: cty.String, Optional: true},
},
},
"aws_computed_source": {
Attributes: map[string]*configschema.Attribute{
"computed_read_only": {Type: cty.String, Computed: true},
},
},
},
}
p.DiffFn = func(info *InstanceInfo, state *InstanceState, c *ResourceConfig) (*InstanceDiff, error) {
switch info.Type {
case "aws_computed_source":
return &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"computed_read_only": &ResourceAttrDiff{
NewComputed: true,
},
},
}, nil
}
return testDiffFn(info, state, c)
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
// (not sure why this is repeated here; I updated some earlier code that
// called ctx.Plan twice here, so retaining that in case it's somehow
// important.)
plan, diags = ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanComputedValueInMap)
if actual != expected {
t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected)
}
}
func TestContext2Plan_moduleVariableFromSplat(t *testing.T) {
m := testModule(t, "plan-module-variable-from-splat")
p := testProvider("aws")
p.DiffFn = testDiffFn
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"thing": {Type: cty.String, Optional: true},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
// (not sure why this is repeated here; I updated some earlier code that
// called ctx.Plan twice here, so retaining that in case it's somehow
// important.)
plan, diags = ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanModuleVariableFromSplat)
if actual != expected {
t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected)
}
}
func TestContext2Plan_createBeforeDestroy_depends_datasource(t *testing.T) {
m := testModule(t, "plan-cdb-depends-datasource")
p := testProvider("aws")
p.DiffFn = testDiffFn
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"num": {Type: cty.String, Optional: true},
"compute": {Type: cty.String, Optional: true, Computed: true},
},
},
},
DataSources: map[string]*configschema.Block{
"aws_vpc": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.Number, Optional: true},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if got := len(plan.Diff.Modules); got != 1 {
t.Fatalf("got %d modules; want 1", got)
}
moduleDiff := plan.Diff.Modules[0]
if _, ok := moduleDiff.Resources["aws_instance.foo.0"]; !ok {
t.Fatalf("missing diff for aws_instance.foo.0")
}
if _, ok := moduleDiff.Resources["aws_instance.foo.1"]; !ok {
t.Fatalf("missing diff for aws_instance.foo.1")
}
if _, ok := moduleDiff.Resources["data.aws_vpc.bar.0"]; !ok {
t.Fatalf("missing diff for data.aws_vpc.bar.0")
}
if _, ok := moduleDiff.Resources["data.aws_vpc.bar.1"]; !ok {
t.Fatalf("missing diff for data.aws_vpc.bar.1")
}
}
// interpolated lists need to be stored in the original order.
func TestContext2Plan_listOrder(t *testing.T) {
m := testModule(t, "plan-list-order")
p := testProvider("aws")
p.ApplyFn = testApplyFn
p.DiffFn = testDiffFn
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.List(cty.String), Optional: true},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
rDiffs := plan.Diff.Modules[0].Resources
rDiffA := rDiffs["aws_instance.a"]
rDiffB := rDiffs["aws_instance.b"]
if !rDiffA.Equal(rDiffB) {
t.Fatal("aws_instance.a and aws_instance.b diffs should match:\n", plan)
}
}
// Make sure ignore-changes doesn't interfere with set/list/map diffs.
// If a resource was being replaced by a RequiresNew attribute that gets
// ignored, we need to filter the diff properly to properly update rather than
// replace.
func TestContext2Plan_ignoreChangesWithFlatmaps(t *testing.T) {
m := testModule(t, "plan-ignore-changes-with-flatmaps")
p := testProvider("aws")
p.DiffFn = testDiffFn
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"aws_instance": {
Attributes: map[string]*configschema.Attribute{
"user_data": {Type: cty.String, Optional: true},
"require_new": {Type: cty.String, Optional: true},
"set": {Type: cty.Map(cty.String), Optional: true},
"lst": {Type: cty.List(cty.String), Optional: true},
},
},
},
}
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{
"user_data": "x",
"require_new": "",
"set.#": "1",
"set.0.a": "1",
"lst.#": "1",
"lst.0": "j",
},
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.Diff.String())
expected := strings.TrimSpace(testTFPlanDiffIgnoreChangesWithFlatmaps)
if actual != expected {
t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected)
}
}
// TestContext2Plan_resourceNestedCount ensures resource sets that depend on
// the count of another resource set (ie: count of a data source that depends
// on another data source's instance count - data.x.foo.*.id) get properly
// normalized to the indexes they should be. This case comes up when there is
// an existing state (after an initial apply).
func TestContext2Plan_resourceNestedCount(t *testing.T) {
m := testModule(t, "nested-resource-count-plan")
p := testProvider("aws")
p.DiffFn = testDiffFn
p.RefreshFn = func(i *InstanceInfo, is *InstanceState) (*InstanceState, error) {
return is, nil
}
s := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Primary: &InstanceState{
ID: "foo0",
Attributes: map[string]string{
"id": "foo0",
},
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Primary: &InstanceState{
ID: "foo1",
Attributes: map[string]string{
"id": "foo1",
},
},
},
"aws_instance.bar.0": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Dependencies: []string{"aws_instance.foo.*"},
Primary: &InstanceState{
ID: "bar0",
Attributes: map[string]string{
"id": "bar0",
},
},
},
"aws_instance.bar.1": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Dependencies: []string{"aws_instance.foo.*"},
Primary: &InstanceState{
ID: "bar1",
Attributes: map[string]string{
"id": "bar1",
},
},
},
"aws_instance.baz.0": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Dependencies: []string{"aws_instance.bar.*"},
Primary: &InstanceState{
ID: "baz0",
Attributes: map[string]string{
"id": "baz0",
},
},
},
"aws_instance.baz.1": &ResourceState{
Type: "aws_instance",
Provider: "provider.aws",
Dependencies: []string{"aws_instance.bar.*"},
Primary: &InstanceState{
ID: "baz1",
Attributes: map[string]string{
"id": "baz1",
},
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Config: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: s,
})
diags := ctx.Validate()
if diags.HasErrors() {
t.Fatalf("validate errors: %s", diags.Err())
}
_, diags = ctx.Refresh()
if diags.HasErrors() {
t.Fatalf("refresh errors: %s", diags.Err())
}
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("plan errors: %s", diags.Err())
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
STATE:
aws_instance.bar.0:
ID = bar0
provider = provider.aws
Dependencies:
aws_instance.foo.*
aws_instance.bar.1:
ID = bar1
provider = provider.aws
Dependencies:
aws_instance.foo.*
aws_instance.baz.0:
ID = baz0
provider = provider.aws
Dependencies:
aws_instance.bar.*
aws_instance.baz.1:
ID = baz1
provider = provider.aws
Dependencies:
aws_instance.bar.*
aws_instance.foo.0:
ID = foo0
provider = provider.aws
aws_instance.foo.1:
ID = foo1
provider = provider.aws
`)
if actual != expected {
t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected)
}
}
|
// Copyright 2015, David Howden
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package index
import (
"reflect"
"strings"
"testing"
)
func stringToPath(s string) Path {
return stringSliceToPath(strings.Split(s, PathSeparator))
}
func stringSliceToPath(s []string) Path {
p := make(Path, len(s))
for i, x := range s {
p[i] = Key(x)
}
return p
}
func TestPathEqual(t *testing.T) {
tests := []struct {
p, q Path
equal bool
}{
{
Path(nil), Path(nil),
true,
},
{
Path([]Key{}), Path([]Key{}),
true,
},
{
stringToPath("a"), stringToPath("a"),
true,
},
{
stringToPath("a:b"), stringToPath("a"),
false,
},
{
Path(nil), stringToPath("a"),
false,
},
}
for ii, tt := range tests {
if tt.p.Equal(tt.q) != tt.equal {
t.Errorf("[%d] (%#v).Equal(%#v) = %v, expected %v", ii, tt.p, tt.q, !tt.equal, tt.equal)
}
}
}
func TestPathContains(t *testing.T) {
tests := []struct {
p, q Path
contains bool
}{
{
Path(nil), Path(nil),
false,
},
{
Path([]Key{}), Path([]Key{}),
false,
},
{
NewPath("a"), NewPath("a"),
true,
},
{
NewPath("a:b"), NewPath("a"),
false,
},
{
Path(nil), NewPath("a"),
false,
},
{
NewPath("a:b"), NewPath("a:b:c"),
true,
},
}
for ii, tt := range tests {
if tt.p.Contains(tt.q) != tt.contains {
t.Errorf("[%d] (%#v).Contains(%#v) = %v, expected %v", ii, tt.p, tt.q, !tt.contains, tt.contains)
}
}
}
func TestOrderedIntersection(t *testing.T) {
tests := []struct {
in [][]Path
out []Path
}{
{
in: nil,
out: []Path{},
},
{
in: [][]Path{
{stringToPath("A")},
},
out: []Path{stringToPath("A")},
},
{
in: [][]Path{
{stringToPath("A")},
{stringToPath("B")},
},
out: []Path{},
},
{
in: [][]Path{
{stringToPath("A")},
{stringToPath("B"), stringToPath("A")},
},
out: []Path{stringToPath("A")},
},
{
in: [][]Path{
{stringToPath("A"), stringToPath("B")},
{stringToPath("B"), stringToPath("A")},
{stringToPath("A"), stringToPath("B"), stringToPath("C")},
{stringToPath("C"), stringToPath("A"), stringToPath("B"), stringToPath("B")},
},
out: []Path{stringToPath("B"), stringToPath("A")},
},
}
for ii, tt := range tests {
got := OrderedIntersection(tt.in...)
if !reflect.DeepEqual(got, tt.out) {
t.Errorf("[%d] got %#v, expected: %#v", ii, got, tt.out)
}
}
}
func TestIndexOfPath(t *testing.T) {
tests := []struct {
haystack []Path
needle Path
idx int
}{
{
[]Path{},
Path{},
-1,
},
{
[]Path{
Path{"Root"},
},
Path{"Root"},
0,
}, {
[]Path{
Path{"Root"}, Path{"Root", "One"},
},
Path{"Root", "One"},
1,
},
}
for ii, tt := range tests {
i := IndexOfPath(tt.haystack, tt.needle)
if i != tt.idx {
t.Errorf("[%d] IndexOfPath(%v, %v) = %d, expected %d", ii, tt.haystack, tt.needle, i, tt.idx)
}
}
}
Added PathFromJSONInterface tests.
// Copyright 2015, David Howden
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package index
import (
"reflect"
"strings"
"testing"
)
func stringToPath(s string) Path {
return stringSliceToPath(strings.Split(s, PathSeparator))
}
func stringSliceToPath(s []string) Path {
p := make(Path, len(s))
for i, x := range s {
p[i] = Key(x)
}
return p
}
func TestPathEqual(t *testing.T) {
tests := []struct {
p, q Path
equal bool
}{
{
Path(nil), Path(nil),
true,
},
{
Path([]Key{}), Path([]Key{}),
true,
},
{
stringToPath("a"), stringToPath("a"),
true,
},
{
stringToPath("a:b"), stringToPath("a"),
false,
},
{
Path(nil), stringToPath("a"),
false,
},
}
for ii, tt := range tests {
if tt.p.Equal(tt.q) != tt.equal {
t.Errorf("[%d] (%#v).Equal(%#v) = %v, expected %v", ii, tt.p, tt.q, !tt.equal, tt.equal)
}
}
}
func TestPathContains(t *testing.T) {
tests := []struct {
p, q Path
contains bool
}{
{
Path(nil), Path(nil),
false,
},
{
Path([]Key{}), Path([]Key{}),
false,
},
{
NewPath("a"), NewPath("a"),
true,
},
{
NewPath("a:b"), NewPath("a"),
false,
},
{
Path(nil), NewPath("a"),
false,
},
{
NewPath("a:b"), NewPath("a:b:c"),
true,
},
}
for ii, tt := range tests {
if tt.p.Contains(tt.q) != tt.contains {
t.Errorf("[%d] (%#v).Contains(%#v) = %v, expected %v", ii, tt.p, tt.q, !tt.contains, tt.contains)
}
}
}
func TestOrderedIntersection(t *testing.T) {
tests := []struct {
in [][]Path
out []Path
}{
{
in: nil,
out: []Path{},
},
{
in: [][]Path{
{stringToPath("A")},
},
out: []Path{stringToPath("A")},
},
{
in: [][]Path{
{stringToPath("A")},
{stringToPath("B")},
},
out: []Path{},
},
{
in: [][]Path{
{stringToPath("A")},
{stringToPath("B"), stringToPath("A")},
},
out: []Path{stringToPath("A")},
},
{
in: [][]Path{
{stringToPath("A"), stringToPath("B")},
{stringToPath("B"), stringToPath("A")},
{stringToPath("A"), stringToPath("B"), stringToPath("C")},
{stringToPath("C"), stringToPath("A"), stringToPath("B"), stringToPath("B")},
},
out: []Path{stringToPath("B"), stringToPath("A")},
},
}
for ii, tt := range tests {
got := OrderedIntersection(tt.in...)
if !reflect.DeepEqual(got, tt.out) {
t.Errorf("[%d] got %#v, expected: %#v", ii, got, tt.out)
}
}
}
func TestIndexOfPath(t *testing.T) {
tests := []struct {
haystack []Path
needle Path
idx int
}{
{
[]Path{},
Path{},
-1,
},
{
[]Path{
Path{"Root"},
},
Path{"Root"},
0,
}, {
[]Path{
Path{"Root"}, Path{"Root", "One"},
},
Path{"Root", "One"},
1,
},
}
for ii, tt := range tests {
i := IndexOfPath(tt.haystack, tt.needle)
if i != tt.idx {
t.Errorf("[%d] IndexOfPath(%v, %v) = %d, expected %d", ii, tt.haystack, tt.needle, i, tt.idx)
}
}
}
func TestPathFromJSONInterface(t *testing.T) {
tests := []struct {
in interface{}
out Path
}{
{
interface{}(nil),
Path(nil),
},
{
[]interface{}{nil},
Path(nil),
},
{
[]interface{}{""},
Path{""},
},
}
for ii, tt := range tests {
got, _ := PathFromJSONInterface(tt.in)
if !reflect.DeepEqual(got, tt.out) {
t.Errorf("[%d] got: %#v, expected %#v", ii, got, tt.out)
}
}
}
|
// Package lg provides looking glass methods for selected looking glasses
// Cogent Carrier Looking Glass ASN 174
package lg
import (
"bufio"
"errors"
"io/ioutil"
"net/http"
"net/url"
"regexp"
"sort"
"strings"
)
// A Cogent represents a telia looking glass request
type Cogent struct {
Host string
IPv string
Node string
Nodes []string
}
var (
cogentNodes = map[string]string{}
cogentBGPNodes = map[string]string{}
cogentDefaultNode = "US - Los Angeles"
)
// Set configures host and ip version
func (p *Cogent) Set(host, version string) {
p.Host = host
p.IPv = version
if p.Node == "" {
p.Node = cogentDefaultNode
}
}
// GetDefaultNode returns telia default node
func (p *Cogent) GetDefaultNode() string {
return cogentDefaultNode
}
// GetNodes returns all Cogent nodes (US and International)
func (p *Cogent) GetNodes() []string {
// Memory cache
if len(p.Nodes) > 1 {
return p.Nodes
}
cogentNodes, cogentBGPNodes = p.FetchNodes()
var nodes []string
for node := range cogentNodes {
nodes = append(nodes, node)
}
sort.Strings(nodes)
p.Nodes = nodes
return nodes
}
// ChangeNode set new requested node
func (p *Cogent) ChangeNode(node string) {
var valid = false
// Validate
for _, n := range p.Nodes {
if node == n {
valid = true
break
}
}
if valid {
p.Node = node
} else {
p.Node = "NA"
println("Invalid node please press tab after node command to show the valid nodes")
}
}
// Ping tries to connect Cogent's ping looking glass through HTTP
// Returns the result
func (p *Cogent) Ping() (string, error) {
// Basic validate
if p.Node == "NA" || len(p.Host) < 5 {
print("Invalid node or host/ip address")
return "", errors.New("error")
}
var cmd = "P4"
if p.IPv == "ipv6" {
cmd = "P6"
}
resp, err := http.PostForm("http://www.cogentco.com/lookingglass.php",
url.Values{"FKT": {"go!"}, "CMD": {cmd}, "DST": {p.Host}, "LOC": {cogentNodes[p.Node]}})
if err != nil {
return "", err
}
if resp.StatusCode != 200 {
return "", errors.New("error: level3 looking glass is not available")
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
r, _ := regexp.Compile(`<pre>(?s)(.*?)</pre>`)
b := r.FindStringSubmatch(string(body))
if len(b) > 0 {
return b[1], nil
}
return "", errors.New("error")
}
// Trace gets traceroute information from Cogent
func (p *Cogent) Trace() chan string {
c := make(chan string)
var cmd = "T4"
if p.IPv == "ipv6" {
cmd = "T6"
}
resp, err := http.PostForm("http://www.cogentco.com/lookingglass.php",
url.Values{"FKT": {"go!"}, "CMD": {cmd}, "DST": {p.Host}, "LOC": {cogentNodes[p.Node]}})
if err != nil {
println(err)
}
go func() {
defer resp.Body.Close()
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
l := scanner.Text()
m, _ := regexp.MatchString(`^(traceroute|\s*\d{1,2})`, l)
if m {
l = replaceASNTrace(l)
c <- l
}
}
close(c)
}()
return c
}
// BGP gets bgp information from cogent
func (p *Cogent) BGP() chan string {
c := make(chan string)
if _, ok := cogentBGPNodes[p.Node]; !ok {
println("current node doesn't support bgp, please select one of the below nodes:")
go func() {
for n, _ := range cogentBGPNodes {
println(n)
}
close(c)
}()
return c
}
resp, err := http.PostForm("http://www.cogentco.com/lookingglass.php",
url.Values{"FKT": {"go!"}, "CMD": {"BGP"}, "DST": {p.Host}, "LOC": {cogentBGPNodes[p.Node]}})
if err != nil {
println(err)
}
go func() {
defer resp.Body.Close()
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
l := scanner.Text()
c <- l
}
close(c)
}()
return c
}
//FetchNodes returns all available nodes through HTTP
func (p *Cogent) FetchNodes() (map[string]string, map[string]string) {
var (
nodes = make(map[string]string, 100)
bgpNodes = make(map[string]string, 50)
)
resp, err := http.Get("http://www.cogentco.com/lookingglass.php")
if err != nil {
println("error: cogent looking glass unreachable (1)")
return map[string]string{}, map[string]string{}
}
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
println("error: cogent looking glass unreachable (2)" + err.Error())
return map[string]string{}, map[string]string{}
}
body := string(b)
// ping, trace nodes
i := strings.Index(body, "default:")
r, _ := regexp.Compile(`(?is)Option\("([\w|,|\s|-]+)","([\w|\d]+)"`)
f := r.FindAllStringSubmatch(body[i:], -1)
for _, v := range f {
nodes[v[1]] = v[2]
}
// bgp nodes
r, _ = regexp.Compile(`(?is)Option\("([\w|,|\s|-]+)","([\w|\d]+)"`)
f = r.FindAllStringSubmatch(body[:i], -1)
for _, v := range f {
bgpNodes[v[1]] = v[2]
}
return nodes, bgpNodes
}
fixed loop (golint)
// Package lg provides looking glass methods for selected looking glasses
// Cogent Carrier Looking Glass ASN 174
package lg
import (
"bufio"
"errors"
"io/ioutil"
"net/http"
"net/url"
"regexp"
"sort"
"strings"
)
// A Cogent represents a telia looking glass request
type Cogent struct {
Host string
IPv string
Node string
Nodes []string
}
var (
cogentNodes = map[string]string{}
cogentBGPNodes = map[string]string{}
cogentDefaultNode = "US - Los Angeles"
)
// Set configures host and ip version
func (p *Cogent) Set(host, version string) {
p.Host = host
p.IPv = version
if p.Node == "" {
p.Node = cogentDefaultNode
}
}
// GetDefaultNode returns telia default node
func (p *Cogent) GetDefaultNode() string {
return cogentDefaultNode
}
// GetNodes returns all Cogent nodes (US and International)
func (p *Cogent) GetNodes() []string {
// Memory cache
if len(p.Nodes) > 1 {
return p.Nodes
}
cogentNodes, cogentBGPNodes = p.FetchNodes()
var nodes []string
for node := range cogentNodes {
nodes = append(nodes, node)
}
sort.Strings(nodes)
p.Nodes = nodes
return nodes
}
// ChangeNode set new requested node
func (p *Cogent) ChangeNode(node string) {
var valid = false
// Validate
for _, n := range p.Nodes {
if node == n {
valid = true
break
}
}
if valid {
p.Node = node
} else {
p.Node = "NA"
println("Invalid node please press tab after node command to show the valid nodes")
}
}
// Ping tries to connect Cogent's ping looking glass through HTTP
// Returns the result
func (p *Cogent) Ping() (string, error) {
// Basic validate
if p.Node == "NA" || len(p.Host) < 5 {
print("Invalid node or host/ip address")
return "", errors.New("error")
}
var cmd = "P4"
if p.IPv == "ipv6" {
cmd = "P6"
}
resp, err := http.PostForm("http://www.cogentco.com/lookingglass.php",
url.Values{"FKT": {"go!"}, "CMD": {cmd}, "DST": {p.Host}, "LOC": {cogentNodes[p.Node]}})
if err != nil {
return "", err
}
if resp.StatusCode != 200 {
return "", errors.New("error: level3 looking glass is not available")
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
r, _ := regexp.Compile(`<pre>(?s)(.*?)</pre>`)
b := r.FindStringSubmatch(string(body))
if len(b) > 0 {
return b[1], nil
}
return "", errors.New("error")
}
// Trace gets traceroute information from Cogent
func (p *Cogent) Trace() chan string {
c := make(chan string)
var cmd = "T4"
if p.IPv == "ipv6" {
cmd = "T6"
}
resp, err := http.PostForm("http://www.cogentco.com/lookingglass.php",
url.Values{"FKT": {"go!"}, "CMD": {cmd}, "DST": {p.Host}, "LOC": {cogentNodes[p.Node]}})
if err != nil {
println(err)
}
go func() {
defer resp.Body.Close()
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
l := scanner.Text()
m, _ := regexp.MatchString(`^(traceroute|\s*\d{1,2})`, l)
if m {
l = replaceASNTrace(l)
c <- l
}
}
close(c)
}()
return c
}
// BGP gets bgp information from cogent
func (p *Cogent) BGP() chan string {
c := make(chan string)
if _, ok := cogentBGPNodes[p.Node]; !ok {
println("current node doesn't support bgp, please select one of the below nodes:")
go func() {
for n := range cogentBGPNodes {
println(n)
}
close(c)
}()
return c
}
resp, err := http.PostForm("http://www.cogentco.com/lookingglass.php",
url.Values{"FKT": {"go!"}, "CMD": {"BGP"}, "DST": {p.Host}, "LOC": {cogentBGPNodes[p.Node]}})
if err != nil {
println(err)
}
go func() {
defer resp.Body.Close()
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
l := scanner.Text()
c <- l
}
close(c)
}()
return c
}
//FetchNodes returns all available nodes through HTTP
func (p *Cogent) FetchNodes() (map[string]string, map[string]string) {
var (
nodes = make(map[string]string, 100)
bgpNodes = make(map[string]string, 50)
)
resp, err := http.Get("http://www.cogentco.com/lookingglass.php")
if err != nil {
println("error: cogent looking glass unreachable (1)")
return map[string]string{}, map[string]string{}
}
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
println("error: cogent looking glass unreachable (2)" + err.Error())
return map[string]string{}, map[string]string{}
}
body := string(b)
// ping, trace nodes
i := strings.Index(body, "default:")
r, _ := regexp.Compile(`(?is)Option\("([\w|,|\s|-]+)","([\w|\d]+)"`)
f := r.FindAllStringSubmatch(body[i:], -1)
for _, v := range f {
nodes[v[1]] = v[2]
}
// bgp nodes
r, _ = regexp.Compile(`(?is)Option\("([\w|,|\s|-]+)","([\w|\d]+)"`)
f = r.FindAllStringSubmatch(body[:i], -1)
for _, v := range f {
bgpNodes[v[1]] = v[2]
}
return nodes, bgpNodes
}
|
package main
import (
"database/sql"
"flag"
"fmt"
_ "github.com/mattn/go-sqlite3"
"goposm/element"
"goposm/parser"
"log"
"runtime"
"sort"
"sync"
)
type Entry struct {
Pos parser.BlockPosition
NodeFirst, NodeLast int64
WayFirst, WayLast int64
RelFirst, RelLast int64
}
type NotFound struct {
id int64
}
func (e *NotFound) Error() string {
return "not found"
}
func (entry *Entry) readNode(id int64) (*element.Node, error) {
block := parser.ReadPrimitiveBlock(entry.Pos)
stringtable := parser.NewStringTable(block.GetStringtable())
for _, group := range block.Primitivegroup {
dense := group.GetDense()
if dense != nil {
parsedNodes := parser.ReadDenseNodes(dense, block, stringtable)
if len(parsedNodes) > 0 {
i := sort.Search(len(parsedNodes), func(i int) bool {
return parsedNodes[i].Id >= id
})
if i < len(parsedNodes) && parsedNodes[i].Id == id {
return &parsedNodes[i], nil
}
}
}
parsedNodes := parser.ReadNodes(group.Nodes, block, stringtable)
if len(parsedNodes) > 0 {
i := sort.Search(len(parsedNodes), func(i int) bool {
return parsedNodes[i].Id >= id
})
if i < len(parsedNodes) && parsedNodes[i].Id == id {
return &parsedNodes[i], nil
}
}
}
return nil, &NotFound{id}
}
func (entry *Entry) readWay(id int64) (*element.Way, error) {
block := parser.ReadPrimitiveBlock(entry.Pos)
stringtable := parser.NewStringTable(block.GetStringtable())
for _, group := range block.Primitivegroup {
parsedWays := parser.ReadWays(group.Ways, block, stringtable)
if len(parsedWays) > 0 {
i := sort.Search(len(parsedWays), func(i int) bool {
return parsedWays[i].Id >= id
})
if i < len(parsedWays) && parsedWays[i].Id == id {
return &parsedWays[i], nil
}
}
}
return nil, &NotFound{id}
}
func (entry *Entry) readRel(id int64) (*element.Relation, error) {
block := parser.ReadPrimitiveBlock(entry.Pos)
stringtable := parser.NewStringTable(block.GetStringtable())
for _, group := range block.Primitivegroup {
parsedRels := parser.ReadRelations(group.Relations, block, stringtable)
if len(parsedRels) > 0 {
i := sort.Search(len(parsedRels), func(i int) bool {
return parsedRels[i].Id >= id
})
if i < len(parsedRels) && parsedRels[i].Id == id {
return &parsedRels[i], nil
}
}
}
return nil, &NotFound{id}
}
func CreateEntry(pos parser.BlockPosition) Entry {
block := parser.ReadPrimitiveBlock(pos)
entry := Entry{pos, -1, -1, -1, -1, -1, -1}
for _, group := range block.Primitivegroup {
if entry.NodeFirst == -1 {
dense := group.GetDense()
if dense != nil && len(dense.Id) > 0 {
entry.NodeFirst = dense.Id[0]
}
if len(group.Nodes) > 0 {
entry.NodeFirst = *group.Nodes[0].Id
}
}
dense := group.GetDense()
if dense != nil && len(dense.Id) > 0 {
var id int64
for _, idDelta := range dense.Id {
id += idDelta
}
entry.NodeLast = id
}
if len(group.Nodes) > 0 {
entry.NodeLast = *group.Nodes[len(group.Nodes)-1].Id
}
if entry.WayFirst == -1 {
if len(group.Ways) > 0 {
entry.WayFirst = *group.Ways[0].Id
}
}
if len(group.Ways) > 0 {
entry.WayLast = *group.Ways[len(group.Ways)-1].Id
}
if entry.RelFirst == -1 {
if len(group.Relations) > 0 {
entry.RelFirst = *group.Relations[0].Id
}
}
if len(group.Relations) > 0 {
entry.RelLast = *group.Relations[len(group.Relations)-1].Id
}
}
return entry
}
type IndexCache struct {
filename string
db *sql.DB
insertStmt *sql.Stmt
}
func NewIndex(filename string) *IndexCache {
db, err := sql.Open("sqlite3", filename)
if err != nil {
log.Fatal(err)
}
insertStmt, err := db.Prepare(`
insert into indices (
node_first, node_last,
way_first, way_last,
rel_first, rel_last,
offset, size
)
values (?, ?, ?, ?, ?, ?, ?, ?)`)
if err != nil {
log.Fatal(err)
}
return &IndexCache{filename, db, insertStmt}
}
func (index *IndexCache) clear() {
stmts := []string{
"drop table if exists indices",
`create table indices (
id integer not null primary key,
node_first integer,
node_last integer,
way_first integer,
way_last integer,
rel_first integer,
rel_last integer,
offset integer,
size integer
)`,
"create index indices_node_idx on indices (node_first)",
"create index indices_way_idx on indices (way_first)",
"create index indices_rel_idx on indices (rel_first)",
}
for _, stmt := range stmts {
_, err := index.db.Exec(stmt)
if err != nil {
log.Fatalf("%q: %s\n", err, stmt)
}
}
}
func (index *IndexCache) queryNode(id int64) (Entry, error) {
entry := Entry{}
stmt, err := index.db.Prepare(
`select node_first, node_last, offset, size
from indices
where node_first <= ? and node_last >= ?`)
if err != nil {
return entry, err
}
defer stmt.Close()
row := stmt.QueryRow(id, id)
err = row.Scan(&entry.NodeFirst, &entry.NodeLast, &entry.Pos.Offset, &entry.Pos.Size)
if err != nil {
return entry, err
}
return entry, nil
}
func (index *IndexCache) queryWay(id int64) (Entry, error) {
entry := Entry{}
stmt, err := index.db.Prepare(
`select way_first, way_last, offset, size
from indices
where way_first <= ? and way_last >= ?`)
if err != nil {
return entry, err
}
defer stmt.Close()
row := stmt.QueryRow(id, id)
err = row.Scan(&entry.WayFirst, &entry.WayLast, &entry.Pos.Offset, &entry.Pos.Size)
if err != nil {
return entry, err
}
return entry, nil
}
func (index *IndexCache) queryRel(id int64) (Entry, error) {
entry := Entry{}
stmt, err := index.db.Prepare(
`select rel_first, rel_last, offset, size
from indices
where rel_first <= ? and rel_last >= ?`)
if err != nil {
return entry, err
}
defer stmt.Close()
row := stmt.QueryRow(id, id)
err = row.Scan(&entry.RelFirst, &entry.RelLast, &entry.Pos.Offset, &entry.Pos.Size)
if err != nil {
return entry, err
}
return entry, nil
}
func (index *IndexCache) addEntry(entry Entry) {
_, err := index.insertStmt.Exec(
entry.NodeFirst, entry.NodeLast,
entry.WayFirst, entry.WayLast,
entry.RelFirst, entry.RelLast,
entry.Pos.Offset, entry.Pos.Size)
if err != nil {
log.Fatal(err)
}
}
func (index *IndexCache) close() {
index.insertStmt.Close()
index.db.Close()
}
var createIndex bool
var queryNode, queryWay, queryRel int64
func init() {
flag.BoolVar(&createIndex, "create-index", false, "create a new index")
flag.Int64Var(&queryNode, "node", -1, "query node")
flag.Int64Var(&queryWay, "way", -1, "query way")
flag.Int64Var(&queryRel, "rel", -1, "query relation")
}
func FillIndex(index *IndexCache, pbfFilename string) {
indices := make(chan Entry)
positions := parser.PBFBlockPositions(pbfFilename)
waitParser := sync.WaitGroup{}
for i := 0; i < runtime.NumCPU(); i++ {
waitParser.Add(1)
go func() {
for pos := range positions {
indices <- CreateEntry(pos)
}
waitParser.Done()
}()
}
go func() {
for entry := range indices {
index.addEntry(entry)
fmt.Printf("%+v\n", entry)
}
}()
waitParser.Wait()
close(indices)
}
func main() {
flag.Parse()
runtime.GOMAXPROCS(runtime.NumCPU())
index := NewIndex("/tmp/index.sqlite")
defer index.close()
if createIndex {
FillIndex(index, flag.Arg(0))
}
if queryNode != -1 {
entry, err := index.queryNode(queryNode)
if err != nil {
fmt.Println(err)
return
}
entry.Pos.Filename = flag.Arg(0)
node, err := entry.readNode(queryNode)
if err != nil {
fmt.Println(err)
return
}
fmt.Println("queryNode:", node)
} else if queryWay != -1 {
entry, err := index.queryWay(queryWay)
if err != nil {
fmt.Println(err)
return
}
entry.Pos.Filename = flag.Arg(0)
way, err := entry.readWay(queryWay)
if err != nil {
fmt.Println(err)
return
}
fmt.Println("queryWay:", way)
} else if queryRel != -1 {
entry, err := index.queryRel(queryRel)
if err != nil {
fmt.Println(err)
return
}
entry.Pos.Filename = flag.Arg(0)
rel, err := entry.readRel(queryRel)
if err != nil {
fmt.Println(err)
return
}
fmt.Println("queryRel:", rel)
}
}
load dependent nodes for ways and rels
package main
import (
"database/sql"
"flag"
"fmt"
_ "github.com/mattn/go-sqlite3"
"goposm/element"
"goposm/parser"
"log"
"runtime"
"sort"
"sync"
)
type Entry struct {
Pos parser.BlockPosition
NodeFirst, NodeLast int64
WayFirst, WayLast int64
RelFirst, RelLast int64
}
type NotFound struct {
id int64
}
func (e *NotFound) Error() string {
return "not found"
}
func (entry *Entry) readNode(id int64) (*element.Node, error) {
block := parser.ReadPrimitiveBlock(entry.Pos)
stringtable := parser.NewStringTable(block.GetStringtable())
for _, group := range block.Primitivegroup {
dense := group.GetDense()
if dense != nil {
parsedNodes := parser.ReadDenseNodes(dense, block, stringtable)
if len(parsedNodes) > 0 {
i := sort.Search(len(parsedNodes), func(i int) bool {
return parsedNodes[i].Id >= id
})
if i < len(parsedNodes) && parsedNodes[i].Id == id {
return &parsedNodes[i], nil
}
}
}
parsedNodes := parser.ReadNodes(group.Nodes, block, stringtable)
if len(parsedNodes) > 0 {
i := sort.Search(len(parsedNodes), func(i int) bool {
return parsedNodes[i].Id >= id
})
if i < len(parsedNodes) && parsedNodes[i].Id == id {
return &parsedNodes[i], nil
}
}
}
return nil, &NotFound{id}
}
func (entry *Entry) readWay(id int64) (*element.Way, error) {
block := parser.ReadPrimitiveBlock(entry.Pos)
stringtable := parser.NewStringTable(block.GetStringtable())
for _, group := range block.Primitivegroup {
parsedWays := parser.ReadWays(group.Ways, block, stringtable)
if len(parsedWays) > 0 {
i := sort.Search(len(parsedWays), func(i int) bool {
return parsedWays[i].Id >= id
})
if i < len(parsedWays) && parsedWays[i].Id == id {
return &parsedWays[i], nil
}
}
}
return nil, &NotFound{id}
}
func (entry *Entry) readRel(id int64) (*element.Relation, error) {
block := parser.ReadPrimitiveBlock(entry.Pos)
stringtable := parser.NewStringTable(block.GetStringtable())
for _, group := range block.Primitivegroup {
parsedRels := parser.ReadRelations(group.Relations, block, stringtable)
if len(parsedRels) > 0 {
i := sort.Search(len(parsedRels), func(i int) bool {
return parsedRels[i].Id >= id
})
if i < len(parsedRels) && parsedRels[i].Id == id {
return &parsedRels[i], nil
}
}
}
return nil, &NotFound{id}
}
func CreateEntry(pos parser.BlockPosition) Entry {
block := parser.ReadPrimitiveBlock(pos)
entry := Entry{pos, -1, -1, -1, -1, -1, -1}
for _, group := range block.Primitivegroup {
if entry.NodeFirst == -1 {
dense := group.GetDense()
if dense != nil && len(dense.Id) > 0 {
entry.NodeFirst = dense.Id[0]
}
if len(group.Nodes) > 0 {
entry.NodeFirst = *group.Nodes[0].Id
}
}
dense := group.GetDense()
if dense != nil && len(dense.Id) > 0 {
var id int64
for _, idDelta := range dense.Id {
id += idDelta
}
entry.NodeLast = id
}
if len(group.Nodes) > 0 {
entry.NodeLast = *group.Nodes[len(group.Nodes)-1].Id
}
if entry.WayFirst == -1 {
if len(group.Ways) > 0 {
entry.WayFirst = *group.Ways[0].Id
}
}
if len(group.Ways) > 0 {
entry.WayLast = *group.Ways[len(group.Ways)-1].Id
}
if entry.RelFirst == -1 {
if len(group.Relations) > 0 {
entry.RelFirst = *group.Relations[0].Id
}
}
if len(group.Relations) > 0 {
entry.RelLast = *group.Relations[len(group.Relations)-1].Id
}
}
return entry
}
type IndexCache struct {
filename string
db *sql.DB
insertStmt *sql.Stmt
}
func NewIndex(filename string) *IndexCache {
db, err := sql.Open("sqlite3", filename)
if err != nil {
log.Fatal(err)
}
insertStmt, err := db.Prepare(`
insert into indices (
node_first, node_last,
way_first, way_last,
rel_first, rel_last,
offset, size
)
values (?, ?, ?, ?, ?, ?, ?, ?)`)
if err != nil {
log.Fatal(err)
}
return &IndexCache{filename, db, insertStmt}
}
func (index *IndexCache) clear() {
stmts := []string{
"drop table if exists indices",
`create table indices (
id integer not null primary key,
node_first integer,
node_last integer,
way_first integer,
way_last integer,
rel_first integer,
rel_last integer,
offset integer,
size integer
)`,
"create index indices_node_idx on indices (node_first)",
"create index indices_way_idx on indices (way_first)",
"create index indices_rel_idx on indices (rel_first)",
}
for _, stmt := range stmts {
_, err := index.db.Exec(stmt)
if err != nil {
log.Fatalf("%q: %s\n", err, stmt)
}
}
}
func (index *IndexCache) queryNode(id int64) (Entry, error) {
entry := Entry{}
stmt, err := index.db.Prepare(
`select node_first, node_last, offset, size
from indices
where node_first <= ? and node_last >= ?`)
if err != nil {
return entry, err
}
defer stmt.Close()
row := stmt.QueryRow(id, id)
err = row.Scan(&entry.NodeFirst, &entry.NodeLast, &entry.Pos.Offset, &entry.Pos.Size)
if err != nil {
return entry, err
}
return entry, nil
}
func (index *IndexCache) queryWay(id int64) (Entry, error) {
entry := Entry{}
stmt, err := index.db.Prepare(
`select way_first, way_last, offset, size
from indices
where way_first <= ? and way_last >= ?`)
if err != nil {
return entry, err
}
defer stmt.Close()
row := stmt.QueryRow(id, id)
err = row.Scan(&entry.WayFirst, &entry.WayLast, &entry.Pos.Offset, &entry.Pos.Size)
if err != nil {
return entry, err
}
return entry, nil
}
func (index *IndexCache) queryRel(id int64) (Entry, error) {
entry := Entry{}
stmt, err := index.db.Prepare(
`select rel_first, rel_last, offset, size
from indices
where rel_first <= ? and rel_last >= ?`)
if err != nil {
return entry, err
}
defer stmt.Close()
row := stmt.QueryRow(id, id)
err = row.Scan(&entry.RelFirst, &entry.RelLast, &entry.Pos.Offset, &entry.Pos.Size)
if err != nil {
return entry, err
}
return entry, nil
}
func (index *IndexCache) addEntry(entry Entry) {
_, err := index.insertStmt.Exec(
entry.NodeFirst, entry.NodeLast,
entry.WayFirst, entry.WayLast,
entry.RelFirst, entry.RelLast,
entry.Pos.Offset, entry.Pos.Size)
if err != nil {
log.Fatal(err)
}
}
func (index *IndexCache) close() {
index.insertStmt.Close()
index.db.Close()
}
var createIndex bool
var queryNode, queryWay, queryRel int64
func init() {
flag.BoolVar(&createIndex, "create-index", false, "create a new index")
flag.Int64Var(&queryNode, "node", -1, "query node")
flag.Int64Var(&queryWay, "way", -1, "query way")
flag.Int64Var(&queryRel, "rel", -1, "query relation")
}
func FillIndex(index *IndexCache, pbfFilename string) {
indices := make(chan Entry)
positions := parser.PBFBlockPositions(pbfFilename)
waitParser := sync.WaitGroup{}
for i := 0; i < runtime.NumCPU(); i++ {
waitParser.Add(1)
go func() {
for pos := range positions {
indices <- CreateEntry(pos)
}
waitParser.Done()
}()
}
go func() {
for entry := range indices {
index.addEntry(entry)
fmt.Printf("%+v\n", entry)
}
}()
waitParser.Wait()
close(indices)
}
func loadNode(id int64, index *IndexCache) (*element.Node, error) {
entry, err := index.queryNode(id)
if err != nil {
return nil, err
}
entry.Pos.Filename = flag.Arg(0)
node, err := entry.readNode(id)
if err != nil {
return nil, err
}
return node, nil
}
func loadWay(id int64, index *IndexCache) (*element.Way, error) {
entry, err := index.queryWay(id)
if err != nil {
return nil, err
}
entry.Pos.Filename = flag.Arg(0)
way, err := entry.readWay(id)
if err != nil {
return nil, err
}
return way, nil
}
func loadRel(id int64, index *IndexCache) (*element.Relation, error) {
entry, err := index.queryRel(id)
if err != nil {
return nil, err
}
entry.Pos.Filename = flag.Arg(0)
rel, err := entry.readRel(id)
if err != nil {
return nil, err
}
return rel, nil
}
func main() {
flag.Parse()
runtime.GOMAXPROCS(runtime.NumCPU())
index := NewIndex("/tmp/index.sqlite")
defer index.close()
if createIndex {
FillIndex(index, flag.Arg(0))
}
if queryNode != -1 {
node, err := loadNode(queryNode, index)
if err != nil {
fmt.Println(err)
return
}
fmt.Println("queryNode:", node)
} else if queryWay != -1 {
way, err := loadWay(queryWay, index)
if err != nil {
fmt.Println(err)
return
}
fmt.Println("queryWay:", way)
for _, nodeId := range way.Refs {
node, err := loadNode(nodeId, index)
if err != nil {
fmt.Println(err, nodeId)
return
}
fmt.Println("\t", node)
}
} else if queryRel != -1 {
rel, err := loadRel(queryRel, index)
if err != nil {
fmt.Println(err)
return
}
fmt.Println("queryRel:", rel)
for _, member := range rel.Members {
way, err := loadWay(member.Id, index)
if err != nil {
fmt.Println(err, member.Id)
return
}
fmt.Println("\t", way)
for _, nodeId := range way.Refs {
node, err := loadNode(nodeId, index)
if err != nil {
fmt.Println(err, nodeId, node)
return
}
fmt.Println("\t\t", node)
}
}
}
}
|
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"testing"
)
// Test values for the stream test.
// One of each JSON kind.
var streamTest = []interface{}{
0.1,
"hello",
nil,
true,
false,
[]interface{}{"a", "b", "c"},
map[string]interface{}{"K": "Kelvin", "ß": "long s"},
3.14, // another value to make sure something can follow map
}
var streamEncoded = `0.1
"hello"
null
true
false
["a","b","c"]
{"ß":"long s","K":"Kelvin"}
3.14
`
func TestEncoder(t *testing.T) {
for i := 0; i <= len(streamTest); i++ {
var buf bytes.Buffer
enc := NewEncoder(&buf)
for j, v := range streamTest[0:i] {
if err := enc.Encode(v); err != nil {
t.Fatalf("encode #%d: %v", j, err)
}
}
if have, want := buf.String(), nlines(streamEncoded, i); have != want {
t.Errorf("encoding %d items: mismatch", i)
diff(t, []byte(have), []byte(want))
break
}
}
}
func TestDecoder(t *testing.T) {
for i := 0; i <= len(streamTest); i++ {
// Use stream without newlines as input,
// just to stress the decoder even more.
// Our test input does not include back-to-back numbers.
// Otherwise stripping the newlines would
// merge two adjacent JSON values.
var buf bytes.Buffer
for _, c := range nlines(streamEncoded, i) {
if c != '\n' {
buf.WriteRune(c)
}
}
out := make([]interface{}, i)
dec := NewDecoder(&buf)
for j := range out {
if err := dec.Decode(&out[j]); err != nil {
t.Fatalf("decode #%d/%d: %v", j, i, err)
}
}
if !reflect.DeepEqual(out, streamTest[0:i]) {
t.Errorf("decoding %d items: mismatch", i)
for j := range out {
if !reflect.DeepEqual(out[j], streamTest[j]) {
t.Errorf("#%d: have %v want %v", j, out[j], streamTest[j])
}
}
break
}
}
}
func TestDecoderBuffered(t *testing.T) {
r := strings.NewReader(`{"Name": "Gopher"} extra `)
var m struct {
Name string
}
d := NewDecoder(r)
err := d.Decode(&m)
if err != nil {
t.Fatal(err)
}
if m.Name != "Gopher" {
t.Errorf("Name = %q; want Gopher", m.Name)
}
rest, err := ioutil.ReadAll(d.Buffered())
if err != nil {
t.Fatal(err)
}
if g, w := string(rest), " extra "; g != w {
t.Errorf("Remaining = %q; want %q", g, w)
}
}
func nlines(s string, n int) string {
if n <= 0 {
return ""
}
for i, c := range s {
if c == '\n' {
if n--; n == 0 {
return s[0 : i+1]
}
}
}
return s
}
func TestRawMessage(t *testing.T) {
// TODO(rsc): Should not need the * in *RawMessage
var data struct {
X float64
Id *RawMessage
Y float32
}
const raw = `["\u0056",null]`
const msg = `{"X":0.1,"Id":["\u0056",null],"Y":0.2}`
err := Unmarshal([]byte(msg), &data)
if err != nil {
t.Fatalf("Unmarshal: %v", err)
}
if string([]byte(*data.Id)) != raw {
t.Fatalf("Raw mismatch: have %#q want %#q", []byte(*data.Id), raw)
}
b, err := Marshal(&data)
if err != nil {
t.Fatalf("Marshal: %v", err)
}
if string(b) != msg {
t.Fatalf("Marshal: have %#q want %#q", b, msg)
}
}
func TestNullRawMessage(t *testing.T) {
// TODO(rsc): Should not need the * in *RawMessage
var data struct {
X float64
Id *RawMessage
Y float32
}
data.Id = new(RawMessage)
const msg = `{"X":0.1,"Id":null,"Y":0.2}`
err := Unmarshal([]byte(msg), &data)
if err != nil {
t.Fatalf("Unmarshal: %v", err)
}
if data.Id != nil {
t.Fatalf("Raw mismatch: have non-nil, want nil")
}
b, err := Marshal(&data)
if err != nil {
t.Fatalf("Marshal: %v", err)
}
if string(b) != msg {
t.Fatalf("Marshal: have %#q want %#q", b, msg)
}
}
var blockingTests = []string{
`{"x": 1}`,
`[1, 2, 3]`,
}
func TestBlocking(t *testing.T) {
for _, enc := range blockingTests {
r, w := net.Pipe()
go w.Write([]byte(enc))
var val interface{}
// If Decode reads beyond what w.Write writes above,
// it will block, and the test will deadlock.
if err := NewDecoder(r).Decode(&val); err != nil {
t.Errorf("decoding %s: %v", enc, err)
}
r.Close()
w.Close()
}
}
func BenchmarkEncoderEncode(b *testing.B) {
b.ReportAllocs()
type T struct {
X, Y string
}
v := &T{"foo", "bar"}
for i := 0; i < b.N; i++ {
if err := NewEncoder(ioutil.Discard).Encode(v); err != nil {
b.Fatal(err)
}
}
}
type tokenStreamCase struct {
json string
expTokens []interface{}
}
type decodeThis struct {
v interface{}
}
var tokenStreamCases []tokenStreamCase = []tokenStreamCase{
// streaming token cases
{json: `10`, expTokens: []interface{}{float64(10)}},
{json: ` [10] `, expTokens: []interface{}{
Delim('['), float64(10), Delim(']')}},
{json: ` [false,10,"b"] `, expTokens: []interface{}{
Delim('['), false, float64(10), "b", Delim(']')}},
{json: `{ "a": 1 }`, expTokens: []interface{}{
Delim('{'), "a", float64(1), Delim('}')}},
{json: `{"a": 1, "b":"3"}`, expTokens: []interface{}{
Delim('{'), "a", float64(1), "b", "3", Delim('}')}},
{json: ` [{"a": 1},{"a": 2}] `, expTokens: []interface{}{
Delim('['),
Delim('{'), "a", float64(1), Delim('}'),
Delim('{'), "a", float64(2), Delim('}'),
Delim(']')}},
{json: `{"obj": {"a": 1}}`, expTokens: []interface{}{
Delim('{'), "obj", Delim('{'), "a", float64(1), Delim('}'),
Delim('}')}},
{json: `{"obj": [{"a": 1}]}`, expTokens: []interface{}{
Delim('{'), "obj", Delim('['),
Delim('{'), "a", float64(1), Delim('}'),
Delim(']'), Delim('}')}},
// streaming tokens with intermittent Decode()
{json: `{ "a": 1 }`, expTokens: []interface{}{
Delim('{'), "a",
decodeThis{float64(1)},
Delim('}')}},
{json: ` [ { "a" : 1 } ] `, expTokens: []interface{}{
Delim('['),
decodeThis{map[string]interface{}{"a": float64(1)}},
Delim(']')}},
{json: ` [{"a": 1},{"a": 2}] `, expTokens: []interface{}{
Delim('['),
decodeThis{map[string]interface{}{"a": float64(1)}},
decodeThis{map[string]interface{}{"a": float64(2)}},
Delim(']')}},
{json: `{ "obj" : [ { "a" : 1 } ] }`, expTokens: []interface{}{
Delim('{'), "obj", Delim('['),
decodeThis{map[string]interface{}{"a": float64(1)}},
Delim(']'), Delim('}')}},
{json: `{"obj": {"a": 1}}`, expTokens: []interface{}{
Delim('{'), "obj",
decodeThis{map[string]interface{}{"a": float64(1)}},
Delim('}')}},
{json: `{"obj": [{"a": 1}]}`, expTokens: []interface{}{
Delim('{'), "obj",
decodeThis{[]interface{}{
map[string]interface{}{"a": float64(1)},
}},
Delim('}')}},
{json: ` [{"a": 1} {"a": 2}] `, expTokens: []interface{}{
Delim('['),
decodeThis{map[string]interface{}{"a": float64(1)}},
decodeThis{&SyntaxError{"expected comma after array element", 0}},
}},
{json: `{ "a" 1 }`, expTokens: []interface{}{
Delim('{'), "a",
decodeThis{&SyntaxError{"expected colon after object key", 0}},
}},
}
func TestDecodeInStream(t *testing.T) {
for ci, tcase := range tokenStreamCases {
dec := NewDecoder(strings.NewReader(tcase.json))
for i, etk := range tcase.expTokens {
var tk interface{}
var err error
if dt, ok := etk.(decodeThis); ok {
etk = dt.v
err = dec.Decode(&tk)
} else {
tk, err = dec.Token()
}
if experr, ok := etk.(error); ok {
if err == nil || err.Error() != experr.Error() {
t.Errorf("case %v: Expected error %v in %q, but was %v", ci, experr, tcase.json, err)
}
break
} else if err == io.EOF {
t.Errorf("case %v: Unexpected EOF in %q", ci, tcase.json)
break
} else if err != nil {
t.Errorf("case %v: Unexpected error '%v' in %q", ci, err, tcase.json)
break
}
if !reflect.DeepEqual(tk, etk) {
t.Errorf(`case %v: %q @ %v expected %T(%v) was %T(%v)`, ci, tcase.json, i, etk, etk, tk, tk)
break
}
}
}
}
const raw = `{ "foo": "bar" }`
func makeHTTP() io.ReadCloser {
mux := http.NewServeMux()
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(raw))
})
ts := httptest.NewServer(mux)
defer ts.Close()
res, err := http.Get(ts.URL)
if err != nil {
log.Fatalf("GET failed: %v", err)
}
return res.Body
}
func TestHttpDecoding(t *testing.T) {
foo := struct {
Foo string
}{}
rc := makeHTTP()
defer rc.Close()
d := NewDecoder(rc)
err := d.Decode(&foo)
if err != nil {
t.Errorf("Unexpected error %v", err)
}
if foo.Foo != "bar" {
t.Errorf("Expected \"bar\", was %v", foo.Foo)
}
// make sure we get the EOF the second time
err = d.Decode(&foo)
if err != io.EOF {
t.Errorf("Expected io.EOF, was %v", err)
}
}
encoding/json: test style tweaks
Rename test name from Http to HTTP, and fix some style nits.
Change-Id: I00fe1cecd69ca2f50be86a76ec90031c2f921707
Reviewed-on: https://go-review.googlesource.com/12760
Reviewed-by: Andrew Gerrand <395a7d33bec8475c9b83b7d440f141bcbd994aa5@golang.org>
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"testing"
)
// Test values for the stream test.
// One of each JSON kind.
var streamTest = []interface{}{
0.1,
"hello",
nil,
true,
false,
[]interface{}{"a", "b", "c"},
map[string]interface{}{"K": "Kelvin", "ß": "long s"},
3.14, // another value to make sure something can follow map
}
var streamEncoded = `0.1
"hello"
null
true
false
["a","b","c"]
{"ß":"long s","K":"Kelvin"}
3.14
`
func TestEncoder(t *testing.T) {
for i := 0; i <= len(streamTest); i++ {
var buf bytes.Buffer
enc := NewEncoder(&buf)
for j, v := range streamTest[0:i] {
if err := enc.Encode(v); err != nil {
t.Fatalf("encode #%d: %v", j, err)
}
}
if have, want := buf.String(), nlines(streamEncoded, i); have != want {
t.Errorf("encoding %d items: mismatch", i)
diff(t, []byte(have), []byte(want))
break
}
}
}
func TestDecoder(t *testing.T) {
for i := 0; i <= len(streamTest); i++ {
// Use stream without newlines as input,
// just to stress the decoder even more.
// Our test input does not include back-to-back numbers.
// Otherwise stripping the newlines would
// merge two adjacent JSON values.
var buf bytes.Buffer
for _, c := range nlines(streamEncoded, i) {
if c != '\n' {
buf.WriteRune(c)
}
}
out := make([]interface{}, i)
dec := NewDecoder(&buf)
for j := range out {
if err := dec.Decode(&out[j]); err != nil {
t.Fatalf("decode #%d/%d: %v", j, i, err)
}
}
if !reflect.DeepEqual(out, streamTest[0:i]) {
t.Errorf("decoding %d items: mismatch", i)
for j := range out {
if !reflect.DeepEqual(out[j], streamTest[j]) {
t.Errorf("#%d: have %v want %v", j, out[j], streamTest[j])
}
}
break
}
}
}
func TestDecoderBuffered(t *testing.T) {
r := strings.NewReader(`{"Name": "Gopher"} extra `)
var m struct {
Name string
}
d := NewDecoder(r)
err := d.Decode(&m)
if err != nil {
t.Fatal(err)
}
if m.Name != "Gopher" {
t.Errorf("Name = %q; want Gopher", m.Name)
}
rest, err := ioutil.ReadAll(d.Buffered())
if err != nil {
t.Fatal(err)
}
if g, w := string(rest), " extra "; g != w {
t.Errorf("Remaining = %q; want %q", g, w)
}
}
func nlines(s string, n int) string {
if n <= 0 {
return ""
}
for i, c := range s {
if c == '\n' {
if n--; n == 0 {
return s[0 : i+1]
}
}
}
return s
}
func TestRawMessage(t *testing.T) {
// TODO(rsc): Should not need the * in *RawMessage
var data struct {
X float64
Id *RawMessage
Y float32
}
const raw = `["\u0056",null]`
const msg = `{"X":0.1,"Id":["\u0056",null],"Y":0.2}`
err := Unmarshal([]byte(msg), &data)
if err != nil {
t.Fatalf("Unmarshal: %v", err)
}
if string([]byte(*data.Id)) != raw {
t.Fatalf("Raw mismatch: have %#q want %#q", []byte(*data.Id), raw)
}
b, err := Marshal(&data)
if err != nil {
t.Fatalf("Marshal: %v", err)
}
if string(b) != msg {
t.Fatalf("Marshal: have %#q want %#q", b, msg)
}
}
func TestNullRawMessage(t *testing.T) {
// TODO(rsc): Should not need the * in *RawMessage
var data struct {
X float64
Id *RawMessage
Y float32
}
data.Id = new(RawMessage)
const msg = `{"X":0.1,"Id":null,"Y":0.2}`
err := Unmarshal([]byte(msg), &data)
if err != nil {
t.Fatalf("Unmarshal: %v", err)
}
if data.Id != nil {
t.Fatalf("Raw mismatch: have non-nil, want nil")
}
b, err := Marshal(&data)
if err != nil {
t.Fatalf("Marshal: %v", err)
}
if string(b) != msg {
t.Fatalf("Marshal: have %#q want %#q", b, msg)
}
}
var blockingTests = []string{
`{"x": 1}`,
`[1, 2, 3]`,
}
func TestBlocking(t *testing.T) {
for _, enc := range blockingTests {
r, w := net.Pipe()
go w.Write([]byte(enc))
var val interface{}
// If Decode reads beyond what w.Write writes above,
// it will block, and the test will deadlock.
if err := NewDecoder(r).Decode(&val); err != nil {
t.Errorf("decoding %s: %v", enc, err)
}
r.Close()
w.Close()
}
}
func BenchmarkEncoderEncode(b *testing.B) {
b.ReportAllocs()
type T struct {
X, Y string
}
v := &T{"foo", "bar"}
for i := 0; i < b.N; i++ {
if err := NewEncoder(ioutil.Discard).Encode(v); err != nil {
b.Fatal(err)
}
}
}
type tokenStreamCase struct {
json string
expTokens []interface{}
}
type decodeThis struct {
v interface{}
}
var tokenStreamCases []tokenStreamCase = []tokenStreamCase{
// streaming token cases
{json: `10`, expTokens: []interface{}{float64(10)}},
{json: ` [10] `, expTokens: []interface{}{
Delim('['), float64(10), Delim(']')}},
{json: ` [false,10,"b"] `, expTokens: []interface{}{
Delim('['), false, float64(10), "b", Delim(']')}},
{json: `{ "a": 1 }`, expTokens: []interface{}{
Delim('{'), "a", float64(1), Delim('}')}},
{json: `{"a": 1, "b":"3"}`, expTokens: []interface{}{
Delim('{'), "a", float64(1), "b", "3", Delim('}')}},
{json: ` [{"a": 1},{"a": 2}] `, expTokens: []interface{}{
Delim('['),
Delim('{'), "a", float64(1), Delim('}'),
Delim('{'), "a", float64(2), Delim('}'),
Delim(']')}},
{json: `{"obj": {"a": 1}}`, expTokens: []interface{}{
Delim('{'), "obj", Delim('{'), "a", float64(1), Delim('}'),
Delim('}')}},
{json: `{"obj": [{"a": 1}]}`, expTokens: []interface{}{
Delim('{'), "obj", Delim('['),
Delim('{'), "a", float64(1), Delim('}'),
Delim(']'), Delim('}')}},
// streaming tokens with intermittent Decode()
{json: `{ "a": 1 }`, expTokens: []interface{}{
Delim('{'), "a",
decodeThis{float64(1)},
Delim('}')}},
{json: ` [ { "a" : 1 } ] `, expTokens: []interface{}{
Delim('['),
decodeThis{map[string]interface{}{"a": float64(1)}},
Delim(']')}},
{json: ` [{"a": 1},{"a": 2}] `, expTokens: []interface{}{
Delim('['),
decodeThis{map[string]interface{}{"a": float64(1)}},
decodeThis{map[string]interface{}{"a": float64(2)}},
Delim(']')}},
{json: `{ "obj" : [ { "a" : 1 } ] }`, expTokens: []interface{}{
Delim('{'), "obj", Delim('['),
decodeThis{map[string]interface{}{"a": float64(1)}},
Delim(']'), Delim('}')}},
{json: `{"obj": {"a": 1}}`, expTokens: []interface{}{
Delim('{'), "obj",
decodeThis{map[string]interface{}{"a": float64(1)}},
Delim('}')}},
{json: `{"obj": [{"a": 1}]}`, expTokens: []interface{}{
Delim('{'), "obj",
decodeThis{[]interface{}{
map[string]interface{}{"a": float64(1)},
}},
Delim('}')}},
{json: ` [{"a": 1} {"a": 2}] `, expTokens: []interface{}{
Delim('['),
decodeThis{map[string]interface{}{"a": float64(1)}},
decodeThis{&SyntaxError{"expected comma after array element", 0}},
}},
{json: `{ "a" 1 }`, expTokens: []interface{}{
Delim('{'), "a",
decodeThis{&SyntaxError{"expected colon after object key", 0}},
}},
}
func TestDecodeInStream(t *testing.T) {
for ci, tcase := range tokenStreamCases {
dec := NewDecoder(strings.NewReader(tcase.json))
for i, etk := range tcase.expTokens {
var tk interface{}
var err error
if dt, ok := etk.(decodeThis); ok {
etk = dt.v
err = dec.Decode(&tk)
} else {
tk, err = dec.Token()
}
if experr, ok := etk.(error); ok {
if err == nil || err.Error() != experr.Error() {
t.Errorf("case %v: Expected error %v in %q, but was %v", ci, experr, tcase.json, err)
}
break
} else if err == io.EOF {
t.Errorf("case %v: Unexpected EOF in %q", ci, tcase.json)
break
} else if err != nil {
t.Errorf("case %v: Unexpected error '%v' in %q", ci, err, tcase.json)
break
}
if !reflect.DeepEqual(tk, etk) {
t.Errorf(`case %v: %q @ %v expected %T(%v) was %T(%v)`, ci, tcase.json, i, etk, etk, tk, tk)
break
}
}
}
}
// Test from golang.org/issue/11893
func TestHTTPDecoding(t *testing.T) {
const raw = `{ "foo": "bar" }`
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(raw))
}))
defer ts.Close()
res, err := http.Get(ts.URL)
if err != nil {
log.Fatalf("GET failed: %v", err)
}
defer res.Body.Close()
foo := struct {
Foo string
}{}
d := NewDecoder(res.Body)
err = d.Decode(&foo)
if err != nil {
t.Fatalf("Decode: %v", err)
}
if foo.Foo != "bar" {
t.Errorf("decoded %q; want \"bar\"", foo.Foo)
}
// make sure we get the EOF the second time
err = d.Decode(&foo)
if err != io.EOF {
t.Errorf("err = %v; want io.EOF", err)
}
}
|
// Package goarabic contains utility functions for working with Arabic strings.
package goarabic
// Reverse returns its argument string reversed rune-wise left to right.
func Reverse(s string) string {
r := []rune(s)
for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
r[i], r[j] = r[j], r[i]
}
return string(r)
}
// SmartLength returns the length of the given string
// without considering the Arabic Vowels (Tashkeel).
func SmartLength(s *string) int {
// len() use int as return value, so we'd better follow for compatibility
length := 0
for _, value := range *s {
if tashkeel[value] {
continue
}
length++
}
return length
}
// RemoveTashkeel returns its argument as rune-wise string without Arabic vowels (Tashkeel).
func RemoveTashkeel(s string) string {
// var r []rune
// the capcity of the slice wont be greater than the length of the string itself
// hence, cap = len(s)
r := make([]rune, 0, len(s))
for _, value := range s {
if tashkeel[value] {
continue
}
r = append(r, value)
}
return string(r)
}
// RemoveTatweel returns its argument as rune-wise string without Arabic Tatweel character.
func RemoveTatweel(s string) string {
r := make([]rune, 0, len(s))
for _, value := range s {
if TATWEEL.equals(value) {
continue
}
r = append(r, value)
}
return string(r)
}
func getCharGlyph(previousChar, currentChar, nextChar rune) rune {
glyph := currentChar
previousIn := false // in the Arabic Alphabet or not
nextIn := false // in the Arabic Alphabet or not
for _, s := range alphabet {
if s.equals(previousChar) { // previousChar in the Arabic Alphabet ?
previousIn = true
}
if s.equals(nextChar) { // nextChar in the Arabic Alphabet ?
nextIn = true
}
}
for _, s := range alphabet {
if !s.equals(currentChar) { // currentChar in the Arabic Alphabet ?
continue
}
if previousIn && nextIn { // between two Arabic Alphabet, return the medium glyph
for s, _ := range beggining_after {
if s.equals(previousChar) {
return getHarf(currentChar).Beggining
}
}
return getHarf(currentChar).Medium
}
if nextIn { // beginning (because the previous is not in the Arabic Alphabet)
return getHarf(currentChar).Beggining
}
if previousIn { // final (because the next is not in the Arabic Alphabet)
for s, _ := range beggining_after {
if s.equals(previousChar) {
return getHarf(currentChar).Isolated
}
}
return getHarf(currentChar).Final
}
if !previousIn && !nextIn {
return getHarf(currentChar).Isolated
}
}
return glyph
}
// equals() return if true if the given Arabic char is alphabetically equal to
// the current Harf regardless its shape (Glyph)
func (c *Harf) equals(char rune) bool {
switch char {
case c.Unicode:
return true
case c.Beggining:
return true
case c.Isolated:
return true
case c.Medium:
return true
case c.Final:
return true
default:
return false
}
}
// getHarf gets the correspondent Harf for the given Arabic char
func getHarf(char rune) Harf {
for _, s := range alphabet {
if s.equals(char) {
return s
}
}
return Harf{Unicode: char, Isolated: char, Medium: char, Final: char}
}
//removeAllNonAlphabetChars deletes all character which are not included in Arabic Alphabet
func removeAllNonArabicChars(text string) string {
runes := []rune(text)
newText := []rune{}
for _, current := range runes {
inAlphabet := false
for _, s := range alphabet {
if s.equals(current) {
inAlphabet = true
}
}
if inAlphabet {
newText = append(newText, current)
}
}
return string(newText)
}
// ToGlyph returns the glyph representation of the given text
func ToGlyph(text string) string {
text = removeAllNonArabicChars(text)
var prev, next rune
runes := []rune(text)
length := len(runes)
newText := make([]rune, 0, length)
for i, current := range runes {
// get the previous char
if (i - 1) < 0 {
prev = 0
} else {
prev = runes[i-1]
}
// get the next char
if (i + 1) <= length-1 {
next = runes[i+1]
} else {
next = 0
}
// get the current char representation or return the same if unnecessary
glyph := getCharGlyph(prev, current, next)
// append the new char representation to the newText
newText = append(newText, glyph)
}
return string(newText)
}
// RemoveTashkeel returns its argument as rune-wise string without Arabic vowels (Tashkeel).
/*
func RemoveTashkeelExtended(s string) string {
r := []rune(s)
m := map[string]bool{"\u064e": true, "\u064b": true, "\u064f": true,
"\u064c": true, "\u0650": true, "\u064d": true,
"\u0651": true, "\u0652": true}
for key, value := range s {
if m[value] {
continue
}
r[key] = value
}
return string(r)
}
*/
Added remove all non Arabic characters
// Package goarabic contains utility functions for working with Arabic strings.
package goarabic
// Reverse returns its argument string reversed rune-wise left to right.
func Reverse(s string) string {
r := []rune(s)
for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
r[i], r[j] = r[j], r[i]
}
return string(r)
}
// SmartLength returns the length of the given string
// without considering the Arabic Vowels (Tashkeel).
func SmartLength(s *string) int {
// len() use int as return value, so we'd better follow for compatibility
length := 0
for _, value := range *s {
if tashkeel[value] {
continue
}
length++
}
return length
}
// RemoveTashkeel returns its argument as rune-wise string without Arabic vowels (Tashkeel).
func RemoveTashkeel(s string) string {
// var r []rune
// the capcity of the slice wont be greater than the length of the string itself
// hence, cap = len(s)
r := make([]rune, 0, len(s))
for _, value := range s {
if tashkeel[value] {
continue
}
r = append(r, value)
}
return string(r)
}
// RemoveTatweel returns its argument as rune-wise string without Arabic Tatweel character.
func RemoveTatweel(s string) string {
r := make([]rune, 0, len(s))
for _, value := range s {
if TATWEEL.equals(value) {
continue
}
r = append(r, value)
}
return string(r)
}
func getCharGlyph(previousChar, currentChar, nextChar rune) rune {
glyph := currentChar
previousIn := false // in the Arabic Alphabet or not
nextIn := false // in the Arabic Alphabet or not
for _, s := range alphabet {
if s.equals(previousChar) { // previousChar in the Arabic Alphabet ?
previousIn = true
}
if s.equals(nextChar) { // nextChar in the Arabic Alphabet ?
nextIn = true
}
}
for _, s := range alphabet {
if !s.equals(currentChar) { // currentChar in the Arabic Alphabet ?
continue
}
if previousIn && nextIn { // between two Arabic Alphabet, return the medium glyph
for s, _ := range beggining_after {
if s.equals(previousChar) {
return getHarf(currentChar).Beggining
}
}
return getHarf(currentChar).Medium
}
if nextIn { // beginning (because the previous is not in the Arabic Alphabet)
return getHarf(currentChar).Beggining
}
if previousIn { // final (because the next is not in the Arabic Alphabet)
for s, _ := range beggining_after {
if s.equals(previousChar) {
return getHarf(currentChar).Isolated
}
}
return getHarf(currentChar).Final
}
if !previousIn && !nextIn {
return getHarf(currentChar).Isolated
}
}
return glyph
}
// equals() return if true if the given Arabic char is alphabetically equal to
// the current Harf regardless its shape (Glyph)
func (c *Harf) equals(char rune) bool {
switch char {
case c.Unicode:
return true
case c.Beggining:
return true
case c.Isolated:
return true
case c.Medium:
return true
case c.Final:
return true
default:
return false
}
}
// getHarf gets the correspondent Harf for the given Arabic char
func getHarf(char rune) Harf {
for _, s := range alphabet {
if s.equals(char) {
return s
}
}
return Harf{Unicode: char, Isolated: char, Medium: char, Final: char}
}
//removeAllNonAlphabetChars deletes all character which are not included in Arabic Alphabet
func removeAllNonArabicChars(text string) string {
runes := []rune(text)
newText := []rune{}
for _, current := range runes {
inAlphabet := false
for _, s := range alphabet {
if s.equals(current) {
inAlphabet = true
}
}
if inAlphabet {
newText = append(newText, current)
}
}
return string(newText)
}
// ToGlyph returns the glyph representation of the given text
func ToGlyph(text string) string {
//text = removeAllNonArabicChars(text)
var prev, next rune
runes := []rune(text)
length := len(runes)
newText := make([]rune, 0, length)
for i, current := range runes {
// get the previous char
if (i - 1) < 0 {
prev = 0
} else {
prev = runes[i-1]
}
// get the next char
if (i + 1) <= length-1 {
next = runes[i+1]
} else {
next = 0
}
// get the current char representation or return the same if unnecessary
glyph := getCharGlyph(prev, current, next)
// append the new char representation to the newText
newText = append(newText, glyph)
}
return string(newText)
}
// RemoveTashkeel returns its argument as rune-wise string without Arabic vowels (Tashkeel).
/*
func RemoveTashkeelExtended(s string) string {
r := []rune(s)
m := map[string]bool{"\u064e": true, "\u064b": true, "\u064f": true,
"\u064c": true, "\u0650": true, "\u064d": true,
"\u0651": true, "\u0652": true}
for key, value := range s {
if m[value] {
continue
}
r[key] = value
}
return string(r)
}
*/
|
/*-
* Copyright 2016 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package lib
import (
"bufio"
"bytes"
"crypto/x509"
"encoding/json"
"encoding/pem"
"fmt"
"text/template"
"time"
"github.com/fatih/color"
)
var layout = `
{{- define "PkixName" -}}
{{- if .CommonName}}
CommonName: {{.CommonName}}
{{- end -}}
{{- range .Organization}}
Organization: {{.}}
{{- end -}}
{{- range .OrganizationalUnit}}
OrganizationalUnit: {{.}}
{{- end -}}
{{- range .Country}}
Country: {{.}}
{{- end -}}
{{- range .Locality}}
Locality: {{.}}
{{- end -}}
{{- range .Province}}
Province: {{.}}
{{- end -}}
{{- range .StreetAddress}}
StreetAddress: {{.}}
{{- end -}}
{{- range .PostalCode}}
PostalCode: {{.}}
{{- end -}}
{{- range .ExtraNames -}}
{{- range $index, $type := .Type }}
{{- if $index }}.{{else}}
{{end -}}
{{- $type -}}
{{- end }}: {{ .Value }}
{{- end -}}
{{end -}}
{{- if .Alias}}{{.Alias}}
{{end}}Serial: {{.SerialNumber}}
Not Before: {{.NotBefore | certStart}}
Not After : {{.NotAfter | certEnd}}
Signature : {{.SignatureAlgorithm | highlightAlgorithm}}{{if .IsSelfSigned}} (self-signed){{end}}
Subject Info:
{{- template "PkixName" .Subject.Name}}
Issuer Info:
{{- template "PkixName" .Issuer.Name}}
{{- if .Subject.KeyID}}
Subject Key ID : {{.Subject.KeyID | hexify}}{{end}}{{if .Issuer.KeyID}}
Authority Key ID : {{.Issuer.KeyID | hexify}}{{end}}{{if .BasicConstraints}}
Basic Constraints: CA:{{.BasicConstraints.IsCA}}{{if .BasicConstraints.MaxPathLen}}, pathlen:{{.BasicConstraints.MaxPathLen}}{{end}}{{end}}{{if .NameConstraints}}
Name Constraints {{if .PermittedDNSDomains.Critical}}(critical){{end}}: {{range .NameConstraints.PermittedDNSDomains}}
{{.}}{{end}}{{end}}{{if .KeyUsage}}
Key Usage:{{range .KeyUsage | keyUsage}}
{{.}}{{end}}{{end}}{{if .ExtKeyUsage}}
Extended Key Usage:{{range .ExtKeyUsage}}
{{. | extKeyUsage}}{{end}}{{end}}{{if .AltDNSNames}}
Alternate DNS Names:{{range .AltDNSNames}}
{{.}}{{end}}{{end}}{{if .AltIPAddresses}}
Alternate IP Addresses:{{range .AltIPAddresses}}
{{.}}{{end}}{{end}}{{if .EmailAddresses}}
Email Addresses:{{range .EmailAddresses}}
{{.}}{{end}}{{end}}{{if .Warnings}}
Warnings:{{range .Warnings}}
{{. | redify}}{{end}}{{end}}`
type certWithName struct {
name string
file string
cert *x509.Certificate
}
func (c certWithName) MarshalJSON() ([]byte, error) {
out := createSimpleCertificate(c.name, c.cert)
return json.Marshal(out)
}
func createSimpleCertificateFromX509(block *pem.Block) (simpleCertificate, error) {
raw, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return simpleCertificate{}, fmt.Errorf("error reading cert: %s", err)
}
cert := certWithName{cert: raw}
if val, ok := block.Headers[nameHeader]; ok {
cert.name = val
}
if val, ok := block.Headers[fileHeader]; ok {
cert.file = val
}
return createSimpleCertificate(cert.name, cert.cert), nil
}
// EncodeX509ToJSON encodes an X.509 certificate into a JSON string.
func EncodeX509ToJSON(cert *x509.Certificate) []byte {
out := createSimpleCertificate("", cert)
raw, err := json.Marshal(out)
if err != nil {
panic(err)
}
return raw
}
// EncodeX509ToObject encodes an X.509 certificate into a JSON-serializable object.
func EncodeX509ToObject(cert *x509.Certificate) interface{} {
return createSimpleCertificate("", cert)
}
// EncodeX509ToText encodes an X.509 certificate into human-readable text.
func EncodeX509ToText(cert *x509.Certificate) []byte {
return displayCert(createSimpleCertificate("", cert))
}
// displayCert takes in a parsed certificate object
// (for jceks certs, blank otherwise), and prints out relevant
// information. Start and end dates are colored based on whether or not
// the certificate is expired, not expired, or close to expiring.
func displayCert(cert simpleCertificate) []byte {
funcMap := template.FuncMap{
"certStart": certStart,
"certEnd": certEnd,
"redify": redify,
"highlightAlgorithm": highlightAlgorithm,
"hexify": hexify,
"keyUsage": keyUsage,
"extKeyUsage": extKeyUsage,
}
t := template.New("Cert template").Funcs(funcMap)
t, err := t.Parse(layout)
if err != nil {
// Should never happen
panic(err)
}
var buffer bytes.Buffer
w := bufio.NewWriter(&buffer)
err = t.Execute(w, cert)
if err != nil {
// Should never happen
panic(err)
}
w.Flush()
return buffer.Bytes()
}
var (
green = color.New(color.Bold, color.FgGreen)
yellow = color.New(color.Bold, color.FgYellow)
red = color.New(color.Bold, color.FgRed)
)
var algorithmColors = map[x509.SignatureAlgorithm]*color.Color{
x509.MD2WithRSA: red,
x509.MD5WithRSA: red,
x509.SHA1WithRSA: red,
x509.SHA256WithRSA: green,
x509.SHA384WithRSA: green,
x509.SHA512WithRSA: green,
x509.DSAWithSHA1: red,
x509.DSAWithSHA256: red,
x509.ECDSAWithSHA1: red,
x509.ECDSAWithSHA256: green,
x509.ECDSAWithSHA384: green,
x509.ECDSAWithSHA512: green,
}
// highlightAlgorithm changes the color of the signing algorithm
// based on a set color map, e.g. to make SHA-1 show up red.
func highlightAlgorithm(sigAlg simpleSigAlg) string {
sig := x509.SignatureAlgorithm(sigAlg)
color, ok := algorithmColors[sig]
if !ok {
return algString(sig)
}
return color.SprintFunc()(algString(sig))
}
// certStart takes a given start time for the validity of
// a certificate and returns that time colored properly
// based on how close it is to expiry. If it's more than
// a day after the certificate became valid the string will
// be green. If it has been less than a day the string will
// be yellow. If the certificate is not yet valid, the string
// will be red.
func certStart(start time.Time) string {
now := time.Now()
day, _ := time.ParseDuration("24h")
threshold := start.Add(day)
if now.After(threshold) {
return green.SprintfFunc()(start.String())
} else if now.After(start) {
return yellow.SprintfFunc()(start.String())
} else {
return red.SprintfFunc()(start.String())
}
}
// certEnd takes a given end time for the validity of
// a certificate and returns that time colored properly
// based on how close it is to expiry. If the certificate
// is more than a month away from expiring it returns a
// green string. If the certificate is less than a month
// from expiry it returns a yellow string. If the certificate
// is expired it returns a red string.
func certEnd(end time.Time) string {
now := time.Now()
month, _ := time.ParseDuration("720h")
threshold := now.Add(month)
if threshold.Before(end) {
return green.SprintfFunc()(end.String())
} else if now.Before(end) {
return yellow.SprintfFunc()(end.String())
} else {
return red.SprintfFunc()(end.String())
}
}
func redify(text string) string {
return red.SprintfFunc()("%s", text)
}
Simplify subject printing
Note that RDNs now appear in the order they're in the pkix.Name, instead of a fixed order
/*-
* Copyright 2016 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package lib
import (
"bufio"
"bytes"
"crypto/x509"
"encoding/json"
"encoding/pem"
"fmt"
"text/template"
"time"
"encoding/asn1"
"github.com/fatih/color"
)
var layout = `
{{- define "PkixName" -}}
{{- range .Names}}
{{ .Type | oidify }}: {{ .Value }}
{{- end -}}
{{end -}}
{{- if .Alias}}{{.Alias}}
{{end}}Serial: {{.SerialNumber}}
Not Before: {{.NotBefore | certStart}}
Not After : {{.NotAfter | certEnd}}
Signature : {{.SignatureAlgorithm | highlightAlgorithm}}{{if .IsSelfSigned}} (self-signed){{end}}
Subject Info:
{{- template "PkixName" .Subject.Name}}
Issuer Info:
{{- template "PkixName" .Issuer.Name}}
{{- if .Subject.KeyID}}
Subject Key ID : {{.Subject.KeyID | hexify}}{{end}}{{if .Issuer.KeyID}}
Authority Key ID : {{.Issuer.KeyID | hexify}}{{end}}{{if .BasicConstraints}}
Basic Constraints: CA:{{.BasicConstraints.IsCA}}{{if .BasicConstraints.MaxPathLen}}, pathlen:{{.BasicConstraints.MaxPathLen}}{{end}}{{end}}{{if .NameConstraints}}
Name Constraints {{if .PermittedDNSDomains.Critical}}(critical){{end}}: {{range .NameConstraints.PermittedDNSDomains}}
{{.}}{{end}}{{end}}{{if .KeyUsage}}
Key Usage:{{range .KeyUsage | keyUsage}}
{{.}}{{end}}{{end}}{{if .ExtKeyUsage}}
Extended Key Usage:{{range .ExtKeyUsage}}
{{. | extKeyUsage}}{{end}}{{end}}{{if .AltDNSNames}}
Alternate DNS Names:{{range .AltDNSNames}}
{{.}}{{end}}{{end}}{{if .AltIPAddresses}}
Alternate IP Addresses:{{range .AltIPAddresses}}
{{.}}{{end}}{{end}}{{if .EmailAddresses}}
Email Addresses:{{range .EmailAddresses}}
{{.}}{{end}}{{end}}{{if .Warnings}}
Warnings:{{range .Warnings}}
{{. | redify}}{{end}}{{end}}`
type certWithName struct {
name string
file string
cert *x509.Certificate
}
func (c certWithName) MarshalJSON() ([]byte, error) {
out := createSimpleCertificate(c.name, c.cert)
return json.Marshal(out)
}
func createSimpleCertificateFromX509(block *pem.Block) (simpleCertificate, error) {
raw, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return simpleCertificate{}, fmt.Errorf("error reading cert: %s", err)
}
cert := certWithName{cert: raw}
if val, ok := block.Headers[nameHeader]; ok {
cert.name = val
}
if val, ok := block.Headers[fileHeader]; ok {
cert.file = val
}
return createSimpleCertificate(cert.name, cert.cert), nil
}
// EncodeX509ToJSON encodes an X.509 certificate into a JSON string.
func EncodeX509ToJSON(cert *x509.Certificate) []byte {
out := createSimpleCertificate("", cert)
raw, err := json.Marshal(out)
if err != nil {
panic(err)
}
return raw
}
// EncodeX509ToObject encodes an X.509 certificate into a JSON-serializable object.
func EncodeX509ToObject(cert *x509.Certificate) interface{} {
return createSimpleCertificate("", cert)
}
// EncodeX509ToText encodes an X.509 certificate into human-readable text.
func EncodeX509ToText(cert *x509.Certificate) []byte {
return displayCert(createSimpleCertificate("", cert))
}
// displayCert takes in a parsed certificate object
// (for jceks certs, blank otherwise), and prints out relevant
// information. Start and end dates are colored based on whether or not
// the certificate is expired, not expired, or close to expiring.
func displayCert(cert simpleCertificate) []byte {
funcMap := template.FuncMap{
"certStart": certStart,
"certEnd": certEnd,
"redify": redify,
"highlightAlgorithm": highlightAlgorithm,
"hexify": hexify,
"keyUsage": keyUsage,
"extKeyUsage": extKeyUsage,
"oidify": oidify,
}
t := template.New("Cert template").Funcs(funcMap)
t, err := t.Parse(layout)
if err != nil {
// Should never happen
panic(err)
}
var buffer bytes.Buffer
w := bufio.NewWriter(&buffer)
err = t.Execute(w, cert)
if err != nil {
// Should never happen
panic(err)
}
w.Flush()
return buffer.Bytes()
}
var (
green = color.New(color.Bold, color.FgGreen)
yellow = color.New(color.Bold, color.FgYellow)
red = color.New(color.Bold, color.FgRed)
)
var algorithmColors = map[x509.SignatureAlgorithm]*color.Color{
x509.MD2WithRSA: red,
x509.MD5WithRSA: red,
x509.SHA1WithRSA: red,
x509.SHA256WithRSA: green,
x509.SHA384WithRSA: green,
x509.SHA512WithRSA: green,
x509.DSAWithSHA1: red,
x509.DSAWithSHA256: red,
x509.ECDSAWithSHA1: red,
x509.ECDSAWithSHA256: green,
x509.ECDSAWithSHA384: green,
x509.ECDSAWithSHA512: green,
}
// highlightAlgorithm changes the color of the signing algorithm
// based on a set color map, e.g. to make SHA-1 show up red.
func highlightAlgorithm(sigAlg simpleSigAlg) string {
sig := x509.SignatureAlgorithm(sigAlg)
color, ok := algorithmColors[sig]
if !ok {
return algString(sig)
}
return color.SprintFunc()(algString(sig))
}
// certStart takes a given start time for the validity of
// a certificate and returns that time colored properly
// based on how close it is to expiry. If it's more than
// a day after the certificate became valid the string will
// be green. If it has been less than a day the string will
// be yellow. If the certificate is not yet valid, the string
// will be red.
func certStart(start time.Time) string {
now := time.Now()
day, _ := time.ParseDuration("24h")
threshold := start.Add(day)
if now.After(threshold) {
return green.SprintfFunc()(start.String())
} else if now.After(start) {
return yellow.SprintfFunc()(start.String())
} else {
return red.SprintfFunc()(start.String())
}
}
// certEnd takes a given end time for the validity of
// a certificate and returns that time colored properly
// based on how close it is to expiry. If the certificate
// is more than a month away from expiring it returns a
// green string. If the certificate is less than a month
// from expiry it returns a yellow string. If the certificate
// is expired it returns a red string.
func certEnd(end time.Time) string {
now := time.Now()
month, _ := time.ParseDuration("720h")
threshold := now.Add(month)
if threshold.Before(end) {
return green.SprintfFunc()(end.String())
} else if now.Before(end) {
return yellow.SprintfFunc()(end.String())
} else {
return red.SprintfFunc()(end.String())
}
}
func redify(text string) string {
return red.SprintfFunc()("%s", text)
}
func oidify(oid asn1.ObjectIdentifier) string {
raw := oid.String()
names := map[string]string{
"2.5.4.3": "CommonName",
"2.5.4.6": "Country",
"2.5.4.7": "Locality",
"2.5.4.8": "Province",
"2.5.4.10": "Organization",
"2.5.4.11": "OrganizationalUnit",
"1.2.840.113549.1.9.1": "emailAddress",
}
name, ok := names[raw]
if ok {
return name
}
return raw
}
|
// Copyright 2012 Marc-Antoine Ruel. All rights reserved.
// Use of this source code is governed under the Apache License, Version 2.0
// that can be found in the LICENSE file.
// Package subcommands permits a Go application to implement subcommands support
// similar to what is supported by the 'go' tool.
//
// The library is designed so that the test cases can run concurrently.
// Using global flags variables is discouraged to keep your program testable
// conccurently.
package subcommands
import (
"flag"
"fmt"
"io"
"os"
"sort"
"strings"
"text/template"
"github.com/texttheater/golang-levenshtein/levenshtein"
)
// Application describes an application with subcommand support.
type Application interface {
// GetName returns the 'name' of the application.
GetName() string
// GetTitle returns a one-line title explaining the purpose of the tool.
GetTitle() string
// GetCommands returns the list of the subcommands that are supported.
GetCommands() []*Command
// GetOut is used for testing to allow parallel test case execution, should
// be normally os.Stdout.
GetOut() io.Writer
// GetOut is used for testing to allow parallel test case execution, should
// be normally os.Stderr.
GetErr() io.Writer
// GetEnvVars returns the map of EnvVarName -> EnvVarDefinition that this
// Application responds to.
GetEnvVars() map[string]EnvVarDefinition
}
// EnvVarDefinition describes an environment variable that this application
// responds to.
type EnvVarDefinition struct {
Advanced bool
ShortDesc string
Default string
}
// DefaultApplication implements all of Application interface's methods. An
// application should usually have a global instance of DefaultApplication and
// route main() to command_support.Run(app).
type DefaultApplication struct {
Name string
Title string
Commands []*Command
EnvVars map[string]EnvVarDefinition
}
// GetName implements interface Application.
func (a *DefaultApplication) GetName() string {
return a.Name
}
// GetTitle implements interface Application.
func (a *DefaultApplication) GetTitle() string {
return a.Title
}
// GetCommands implements interface Application.
func (a *DefaultApplication) GetCommands() []*Command {
return a.Commands
}
// GetOut implements interface Application.
func (a *DefaultApplication) GetOut() io.Writer {
return os.Stdout
}
// GetErr implements interface Application.
func (a *DefaultApplication) GetErr() io.Writer {
return os.Stderr
}
// GetEnvVars implements interface Application.
func (a *DefaultApplication) GetEnvVars() map[string]EnvVarDefinition {
return a.EnvVars
}
// Env is the mapping of resolved environment variables passed to
// CommandRun.Run.
type Env map[string]EnvVar
// EnvVar will document the value and existance of a given environment variable,
// as defined by Application.GetEnvVars. Value will be the value from the
// environment, or the Default value if it didn't exist. Exists will be true iff
// the value was present in the environment.
type EnvVar struct {
Value string
Exists bool
}
// CommandRun is an initialized object representing a subcommand that is ready
// to be executed.
type CommandRun interface {
// Run execute the actual command. When this function is called by
// command_support.Run(), the flags have already been parsed.
Run(a Application, args []string, env Env) int
// GetFlags returns the flags for this specific command.
GetFlags() *flag.FlagSet
}
// CommandRunBase implements GetFlags of CommandRun. It should be embedded in
// another struct that implements Run().
type CommandRunBase struct {
Flags flag.FlagSet
}
// GetFlags implements CommandRun.
func (c *CommandRunBase) GetFlags() *flag.FlagSet {
return &c.Flags
}
// Command describes a subcommand. It has one generator to generate a command
// object which is executable. The purpose of this design is to enable safe
// parallel execution of test cases.
type Command struct {
UsageLine string
ShortDesc string
LongDesc string
Advanced bool
CommandRun func() CommandRun
isSection bool
}
// Name returns the command's name: the first word in the usage line.
func (c *Command) Name() string {
name := c.UsageLine
i := strings.Index(name, " ")
if i >= 0 {
name = name[:i]
}
return name
}
// Section returns an un-runnable command that can act as a nice section
// heading for other commands.
func Section(name string) *Command {
return &Command{
ShortDesc: "\n\t" + name,
isSection: true,
}
}
// usage prints out the general application usage.
//
// TODO(maruel): Use termbox-go to enable coloring!
func usage(out io.Writer, a Application, includeAdvanced bool) {
usageTemplate := `{{.Title}}
Usage: {{.Name}} [command] [arguments]
Commands:{{range .Commands}}
{{.Name | printf "%%-%ds"}} {{.ShortDesc}}{{end}}
{{if .EnvVars}}Environment Variables:{{range .EnvVars}}
{{.Name | printf "%%-%ds"}} {{.ShortDesc}}{{if .Default}} (Default: {{.Default | printf "%%q"}}){{end}}{{end}}
{{end}}
Use "{{.Name}} help [command]" for more information about a command.{{if .ShowAdvancedTip}}
Use "{{.Name}} help -advanced" to display all commands.{{end}}
`
widestCmd := 0
allCmds := a.GetCommands()
cmds := make([]*Command, 0, len(allCmds))
hasAdvanced := false
for _, c := range allCmds {
hasAdvanced = hasAdvanced || c.Advanced
if !c.Advanced || includeAdvanced {
// We need to include this command
if namLen := len(c.Name()); namLen > widestCmd {
widestCmd = namLen
}
cmds = append(cmds, c)
}
}
type envVarEntry struct {
Name string
ShortDesc string
Default string
}
widestEnvVar := 0
envVars := []envVarEntry(nil)
if envVarMap := a.GetEnvVars(); len(envVarMap) > 0 {
envVarKeys := make(sort.StringSlice, 0, len(envVarMap))
for k, v := range envVarMap {
if v.Advanced {
hasAdvanced = true
}
if !v.Advanced || includeAdvanced {
if keyLen := len(k); keyLen > widestEnvVar {
widestEnvVar = keyLen
}
envVarKeys = append(envVarKeys, k)
}
}
envVarKeys.Sort()
envVars = make([]envVarEntry, 0, len(envVarKeys))
for _, k := range envVarKeys {
v := envVarMap[k]
envVars = append(envVars, envVarEntry{k, v.ShortDesc, v.Default})
}
}
data := map[string]interface{}{
"Title": a.GetTitle(),
"Name": a.GetName(),
"Commands": cmds,
"EnvVars": envVars,
"ShowAdvancedTip": (hasAdvanced && !includeAdvanced),
}
tmpl(out, fmt.Sprintf(usageTemplate, widestCmd, widestEnvVar), data)
}
func getCommandUsageHandler(out io.Writer, a Application, c *Command, r CommandRun, helpUsed *bool) func() {
return func() {
helpTemplate := "{{.Cmd.LongDesc | trim | wrapWithLines}}usage: {{.App.GetName}} {{.Cmd.UsageLine}}\n"
dict := struct {
App Application
Cmd *Command
}{a, c}
tmpl(out, helpTemplate, dict)
r.GetFlags().PrintDefaults()
*helpUsed = true
}
}
// Initializes the flags for a specific CommandRun.
func initCommand(a Application, c *Command, r CommandRun, out io.Writer, helpUsed *bool) {
r.GetFlags().Usage = getCommandUsageHandler(out, a, c, r, helpUsed)
r.GetFlags().SetOutput(out)
r.GetFlags().Init(c.Name(), flag.ContinueOnError)
}
// FindCommand finds a Command by name and returns it if found.
func FindCommand(a Application, name string) *Command {
for _, c := range a.GetCommands() {
if c.Name() == name {
return c
}
}
return nil
}
// FindNearestCommand heuristically finds a Command the user wanted to type but
// failed to type correctly.
func FindNearestCommand(a Application, name string) *Command {
commands := map[string]*Command{}
for _, c := range a.GetCommands() {
if !c.isSection {
commands[c.Name()] = c
}
}
if c, ok := commands[name]; ok {
return c
}
// Search for unique prefix.
withPrefix := []*Command{}
for n, c := range commands {
if strings.HasPrefix(n, name) {
withPrefix = append(withPrefix, c)
}
}
if len(withPrefix) == 1 {
return withPrefix[0]
}
// Search for case insensitivity.
withPrefix = []*Command{}
lowName := strings.ToLower(name)
for n, c := range commands {
if strings.HasPrefix(strings.ToLower(n), lowName) {
withPrefix = append(withPrefix, c)
}
}
if len(withPrefix) == 1 {
return withPrefix[0]
}
// Calculate the levenshtein distance and take the closest one.
closestD := 1000
var closestC *Command
secondD := 1000
for n, c := range commands {
dist := levenshtein.DistanceForStrings([]rune(n), []rune(name), levenshtein.DefaultOptions)
if dist < closestD {
secondD = closestD
closestD = dist
closestC = c
} else if dist < secondD {
secondD = dist
}
}
if closestD > 3 {
// Not similar enough. Don't be a fool and run a random command.
return nil
}
if (secondD - closestD) < 3 {
// Too ambiguous.
return nil
}
return closestC
}
// Run runs the application, scheduling the subcommand. This is the main entry
// point of the library.
func Run(a Application, args []string) int {
var helpUsed bool
// Process general flags first, mainly for -help.
flag.Usage = func() {
usage(a.GetErr(), a, false)
helpUsed = true
}
// Do not parse during unit tests because flag.commandLine.errorHandling == ExitOnError. :(
// It is safer to use a base class embedding CommandRunBase that is then
// embedded by each CommandRun implementation to define flags available for
// all commands.
if args == nil {
flag.Parse()
args = flag.Args()
}
if len(args) < 1 {
// Need a command.
usage(a.GetErr(), a, false)
return 2
}
if c := FindNearestCommand(a, args[0]); c != nil {
// Initialize the flags.
r := c.CommandRun()
initCommand(a, c, r, a.GetErr(), &helpUsed)
if err := r.GetFlags().Parse(args[1:]); err != nil {
return 2
}
if helpUsed {
return 0
}
envVars := a.GetEnvVars()
envMap := make(map[string]EnvVar, len(envVars))
for k, v := range envVars {
val, ok := os.LookupEnv(k)
if !ok {
val = v.Default
}
envMap[k] = EnvVar{val, ok}
}
return r.Run(a, r.GetFlags().Args(), envMap)
}
fmt.Fprintf(a.GetErr(), "%s: unknown command %#q\n\nRun '%s help' for usage.\n", a.GetName(), args[0], a.GetName())
return 2
}
// tmpl executes the given template text on data, writing the result to w.
func tmpl(w io.Writer, text string, data interface{}) {
t := template.New("top")
t.Funcs(template.FuncMap{"trim": strings.TrimSpace, "wrapWithLines": wrapWithLines})
template.Must(t.Parse(text))
if err := t.Execute(w, data); err != nil {
panic(fmt.Sprintf("Failed to execute templare: %s", err))
}
}
func wrapWithLines(s string) string {
if s == "" {
return s
}
return s + "\n\n"
}
// CmdHelp defines the help command. It should be included in your application's
// Commands list.
//
// It is not added automatically but it will be run automatically if added.
var CmdHelp = &Command{
UsageLine: "help [<command>|-advanced]",
ShortDesc: "prints help about a command",
LongDesc: "Prints an overview of every command or information about a specific command.\nPass -advanced to see help for advanced commands.",
CommandRun: func() CommandRun {
ret := &helpRun{}
ret.Flags.BoolVar(&ret.advanced, "advanced", false, "show advanced commands")
return ret
},
}
type helpRun struct {
CommandRunBase
advanced bool
}
func (c *helpRun) Run(a Application, args []string, env Env) int {
if len(args) == 0 {
usage(a.GetOut(), a, c.advanced)
return 0
}
if len(args) != 1 {
fmt.Fprintf(a.GetErr(), "%s: Too many arguments given\n\nRun '%s help' for usage.\n", a.GetName(), a.GetName())
return 2
}
// Redirects all output to Out.
var helpUsed bool
if cmd := FindNearestCommand(a, args[0]); cmd != nil {
// Initialize the flags.
r := cmd.CommandRun()
initCommand(a, cmd, r, a.GetErr(), &helpUsed)
r.GetFlags().Usage()
return 0
}
fmt.Fprintf(a.GetErr(), "%s: unknown command %#q\n\nRun '%s help' for usage.\n", a.GetName(), args[0], a.GetName())
return 2
}
Fix comment typo (#6)
// Copyright 2012 Marc-Antoine Ruel. All rights reserved.
// Use of this source code is governed under the Apache License, Version 2.0
// that can be found in the LICENSE file.
// Package subcommands permits a Go application to implement subcommands support
// similar to what is supported by the 'go' tool.
//
// The library is designed so that the test cases can run concurrently.
// Using global flags variables is discouraged to keep your program testable
// conccurently.
package subcommands
import (
"flag"
"fmt"
"io"
"os"
"sort"
"strings"
"text/template"
"github.com/texttheater/golang-levenshtein/levenshtein"
)
// Application describes an application with subcommand support.
type Application interface {
// GetName returns the 'name' of the application.
GetName() string
// GetTitle returns a one-line title explaining the purpose of the tool.
GetTitle() string
// GetCommands returns the list of the subcommands that are supported.
GetCommands() []*Command
// GetOut is used for testing to allow parallel test case execution, should
// be normally os.Stdout.
GetOut() io.Writer
// GetErr is used for testing to allow parallel test case execution, should
// be normally os.Stderr.
GetErr() io.Writer
// GetEnvVars returns the map of EnvVarName -> EnvVarDefinition that this
// Application responds to.
GetEnvVars() map[string]EnvVarDefinition
}
// EnvVarDefinition describes an environment variable that this application
// responds to.
type EnvVarDefinition struct {
Advanced bool
ShortDesc string
Default string
}
// DefaultApplication implements all of Application interface's methods. An
// application should usually have a global instance of DefaultApplication and
// route main() to command_support.Run(app).
type DefaultApplication struct {
Name string
Title string
Commands []*Command
EnvVars map[string]EnvVarDefinition
}
// GetName implements interface Application.
func (a *DefaultApplication) GetName() string {
return a.Name
}
// GetTitle implements interface Application.
func (a *DefaultApplication) GetTitle() string {
return a.Title
}
// GetCommands implements interface Application.
func (a *DefaultApplication) GetCommands() []*Command {
return a.Commands
}
// GetOut implements interface Application.
func (a *DefaultApplication) GetOut() io.Writer {
return os.Stdout
}
// GetErr implements interface Application.
func (a *DefaultApplication) GetErr() io.Writer {
return os.Stderr
}
// GetEnvVars implements interface Application.
func (a *DefaultApplication) GetEnvVars() map[string]EnvVarDefinition {
return a.EnvVars
}
// Env is the mapping of resolved environment variables passed to
// CommandRun.Run.
type Env map[string]EnvVar
// EnvVar will document the value and existance of a given environment variable,
// as defined by Application.GetEnvVars. Value will be the value from the
// environment, or the Default value if it didn't exist. Exists will be true iff
// the value was present in the environment.
type EnvVar struct {
Value string
Exists bool
}
// CommandRun is an initialized object representing a subcommand that is ready
// to be executed.
type CommandRun interface {
// Run execute the actual command. When this function is called by
// command_support.Run(), the flags have already been parsed.
Run(a Application, args []string, env Env) int
// GetFlags returns the flags for this specific command.
GetFlags() *flag.FlagSet
}
// CommandRunBase implements GetFlags of CommandRun. It should be embedded in
// another struct that implements Run().
type CommandRunBase struct {
Flags flag.FlagSet
}
// GetFlags implements CommandRun.
func (c *CommandRunBase) GetFlags() *flag.FlagSet {
return &c.Flags
}
// Command describes a subcommand. It has one generator to generate a command
// object which is executable. The purpose of this design is to enable safe
// parallel execution of test cases.
type Command struct {
UsageLine string
ShortDesc string
LongDesc string
Advanced bool
CommandRun func() CommandRun
isSection bool
}
// Name returns the command's name: the first word in the usage line.
func (c *Command) Name() string {
name := c.UsageLine
i := strings.Index(name, " ")
if i >= 0 {
name = name[:i]
}
return name
}
// Section returns an un-runnable command that can act as a nice section
// heading for other commands.
func Section(name string) *Command {
return &Command{
ShortDesc: "\n\t" + name,
isSection: true,
}
}
// usage prints out the general application usage.
//
// TODO(maruel): Use termbox-go to enable coloring!
func usage(out io.Writer, a Application, includeAdvanced bool) {
usageTemplate := `{{.Title}}
Usage: {{.Name}} [command] [arguments]
Commands:{{range .Commands}}
{{.Name | printf "%%-%ds"}} {{.ShortDesc}}{{end}}
{{if .EnvVars}}Environment Variables:{{range .EnvVars}}
{{.Name | printf "%%-%ds"}} {{.ShortDesc}}{{if .Default}} (Default: {{.Default | printf "%%q"}}){{end}}{{end}}
{{end}}
Use "{{.Name}} help [command]" for more information about a command.{{if .ShowAdvancedTip}}
Use "{{.Name}} help -advanced" to display all commands.{{end}}
`
widestCmd := 0
allCmds := a.GetCommands()
cmds := make([]*Command, 0, len(allCmds))
hasAdvanced := false
for _, c := range allCmds {
hasAdvanced = hasAdvanced || c.Advanced
if !c.Advanced || includeAdvanced {
// We need to include this command
if namLen := len(c.Name()); namLen > widestCmd {
widestCmd = namLen
}
cmds = append(cmds, c)
}
}
type envVarEntry struct {
Name string
ShortDesc string
Default string
}
widestEnvVar := 0
envVars := []envVarEntry(nil)
if envVarMap := a.GetEnvVars(); len(envVarMap) > 0 {
envVarKeys := make(sort.StringSlice, 0, len(envVarMap))
for k, v := range envVarMap {
if v.Advanced {
hasAdvanced = true
}
if !v.Advanced || includeAdvanced {
if keyLen := len(k); keyLen > widestEnvVar {
widestEnvVar = keyLen
}
envVarKeys = append(envVarKeys, k)
}
}
envVarKeys.Sort()
envVars = make([]envVarEntry, 0, len(envVarKeys))
for _, k := range envVarKeys {
v := envVarMap[k]
envVars = append(envVars, envVarEntry{k, v.ShortDesc, v.Default})
}
}
data := map[string]interface{}{
"Title": a.GetTitle(),
"Name": a.GetName(),
"Commands": cmds,
"EnvVars": envVars,
"ShowAdvancedTip": (hasAdvanced && !includeAdvanced),
}
tmpl(out, fmt.Sprintf(usageTemplate, widestCmd, widestEnvVar), data)
}
func getCommandUsageHandler(out io.Writer, a Application, c *Command, r CommandRun, helpUsed *bool) func() {
return func() {
helpTemplate := "{{.Cmd.LongDesc | trim | wrapWithLines}}usage: {{.App.GetName}} {{.Cmd.UsageLine}}\n"
dict := struct {
App Application
Cmd *Command
}{a, c}
tmpl(out, helpTemplate, dict)
r.GetFlags().PrintDefaults()
*helpUsed = true
}
}
// Initializes the flags for a specific CommandRun.
func initCommand(a Application, c *Command, r CommandRun, out io.Writer, helpUsed *bool) {
r.GetFlags().Usage = getCommandUsageHandler(out, a, c, r, helpUsed)
r.GetFlags().SetOutput(out)
r.GetFlags().Init(c.Name(), flag.ContinueOnError)
}
// FindCommand finds a Command by name and returns it if found.
func FindCommand(a Application, name string) *Command {
for _, c := range a.GetCommands() {
if c.Name() == name {
return c
}
}
return nil
}
// FindNearestCommand heuristically finds a Command the user wanted to type but
// failed to type correctly.
func FindNearestCommand(a Application, name string) *Command {
commands := map[string]*Command{}
for _, c := range a.GetCommands() {
if !c.isSection {
commands[c.Name()] = c
}
}
if c, ok := commands[name]; ok {
return c
}
// Search for unique prefix.
withPrefix := []*Command{}
for n, c := range commands {
if strings.HasPrefix(n, name) {
withPrefix = append(withPrefix, c)
}
}
if len(withPrefix) == 1 {
return withPrefix[0]
}
// Search for case insensitivity.
withPrefix = []*Command{}
lowName := strings.ToLower(name)
for n, c := range commands {
if strings.HasPrefix(strings.ToLower(n), lowName) {
withPrefix = append(withPrefix, c)
}
}
if len(withPrefix) == 1 {
return withPrefix[0]
}
// Calculate the levenshtein distance and take the closest one.
closestD := 1000
var closestC *Command
secondD := 1000
for n, c := range commands {
dist := levenshtein.DistanceForStrings([]rune(n), []rune(name), levenshtein.DefaultOptions)
if dist < closestD {
secondD = closestD
closestD = dist
closestC = c
} else if dist < secondD {
secondD = dist
}
}
if closestD > 3 {
// Not similar enough. Don't be a fool and run a random command.
return nil
}
if (secondD - closestD) < 3 {
// Too ambiguous.
return nil
}
return closestC
}
// Run runs the application, scheduling the subcommand. This is the main entry
// point of the library.
func Run(a Application, args []string) int {
var helpUsed bool
// Process general flags first, mainly for -help.
flag.Usage = func() {
usage(a.GetErr(), a, false)
helpUsed = true
}
// Do not parse during unit tests because flag.commandLine.errorHandling == ExitOnError. :(
// It is safer to use a base class embedding CommandRunBase that is then
// embedded by each CommandRun implementation to define flags available for
// all commands.
if args == nil {
flag.Parse()
args = flag.Args()
}
if len(args) < 1 {
// Need a command.
usage(a.GetErr(), a, false)
return 2
}
if c := FindNearestCommand(a, args[0]); c != nil {
// Initialize the flags.
r := c.CommandRun()
initCommand(a, c, r, a.GetErr(), &helpUsed)
if err := r.GetFlags().Parse(args[1:]); err != nil {
return 2
}
if helpUsed {
return 0
}
envVars := a.GetEnvVars()
envMap := make(map[string]EnvVar, len(envVars))
for k, v := range envVars {
val, ok := os.LookupEnv(k)
if !ok {
val = v.Default
}
envMap[k] = EnvVar{val, ok}
}
return r.Run(a, r.GetFlags().Args(), envMap)
}
fmt.Fprintf(a.GetErr(), "%s: unknown command %#q\n\nRun '%s help' for usage.\n", a.GetName(), args[0], a.GetName())
return 2
}
// tmpl executes the given template text on data, writing the result to w.
func tmpl(w io.Writer, text string, data interface{}) {
t := template.New("top")
t.Funcs(template.FuncMap{"trim": strings.TrimSpace, "wrapWithLines": wrapWithLines})
template.Must(t.Parse(text))
if err := t.Execute(w, data); err != nil {
panic(fmt.Sprintf("Failed to execute templare: %s", err))
}
}
func wrapWithLines(s string) string {
if s == "" {
return s
}
return s + "\n\n"
}
// CmdHelp defines the help command. It should be included in your application's
// Commands list.
//
// It is not added automatically but it will be run automatically if added.
var CmdHelp = &Command{
UsageLine: "help [<command>|-advanced]",
ShortDesc: "prints help about a command",
LongDesc: "Prints an overview of every command or information about a specific command.\nPass -advanced to see help for advanced commands.",
CommandRun: func() CommandRun {
ret := &helpRun{}
ret.Flags.BoolVar(&ret.advanced, "advanced", false, "show advanced commands")
return ret
},
}
type helpRun struct {
CommandRunBase
advanced bool
}
func (c *helpRun) Run(a Application, args []string, env Env) int {
if len(args) == 0 {
usage(a.GetOut(), a, c.advanced)
return 0
}
if len(args) != 1 {
fmt.Fprintf(a.GetErr(), "%s: Too many arguments given\n\nRun '%s help' for usage.\n", a.GetName(), a.GetName())
return 2
}
// Redirects all output to Out.
var helpUsed bool
if cmd := FindNearestCommand(a, args[0]); cmd != nil {
// Initialize the flags.
r := cmd.CommandRun()
initCommand(a, cmd, r, a.GetErr(), &helpUsed)
r.GetFlags().Usage()
return 0
}
fmt.Fprintf(a.GetErr(), "%s: unknown command %#q\n\nRun '%s help' for usage.\n", a.GetName(), args[0], a.GetName())
return 2
}
|
// Copyright 2015 Ka-Hing Cheung
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"fmt"
"os"
"sync"
"syscall"
"time"
"golang.org/x/net/context"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/jacobsa/fuse"
"github.com/jacobsa/fuse/fuseops"
"github.com/jacobsa/fuse/fuseutil"
"github.com/Sirupsen/logrus"
)
// goofys is a Filey System written in Go. All the backend data is
// stored on S3 as is. It's a Filey System instead of a File System
// because it makes minimal effort at being POSIX
// compliant. Particularly things that are difficult to support on S3
// or would translate into more than one round-trip would either fail
// (rename non-empty dir) or faked (no per-file permission). goofys
// does not have a on disk data cache, and consistency model is
// close-to-open.
type Goofys struct {
fuseutil.NotImplementedFileSystem
bucket string
flags *FlagStorage
umask uint32
awsConfig *aws.Config
sess *session.Session
s3 *s3.S3
v2Signer bool
rootAttrs fuseops.InodeAttributes
bufferPool *BufferPool
// A lock protecting the state of the file system struct itself (distinct
// from per-inode locks). Make sure to see the notes on lock ordering above.
mu sync.Mutex
// The next inode ID to hand out. We assume that this will never overflow,
// since even if we were handing out inode IDs at 4 GHz, it would still take
// over a century to do so.
//
// GUARDED_BY(mu)
nextInodeID fuseops.InodeID
// The collection of live inodes, keyed by inode ID. No ID less than
// fuseops.RootInodeID is ever used.
//
// INVARIANT: For all keys k, fuseops.RootInodeID <= k < nextInodeID
// INVARIANT: For all keys k, inodes[k].ID() == k
// INVARIANT: inodes[fuseops.RootInodeID] is missing or of type inode.DirInode
// INVARIANT: For all v, if IsDirName(v.Name()) then v is inode.DirInode
//
// GUARDED_BY(mu)
inodes map[fuseops.InodeID]*Inode
inodesCache map[string]*Inode // fullname to inode
nextHandleID fuseops.HandleID
dirHandles map[fuseops.HandleID]*DirHandle
fileHandles map[fuseops.HandleID]*FileHandle
}
var s3Log = GetLogger("s3")
func NewGoofys(bucket string, awsConfig *aws.Config, flags *FlagStorage) *Goofys {
// Set up the basic struct.
fs := &Goofys{
bucket: bucket,
flags: flags,
umask: 0122,
}
if flags.DebugS3 {
awsConfig.LogLevel = aws.LogLevel(aws.LogDebug | aws.LogDebugWithRequestErrors)
s3Log.Level = logrus.DebugLevel
}
fs.awsConfig = awsConfig
fs.sess = session.New(awsConfig)
fs.s3 = fs.newS3()
err := fs.testBucket()
if err != nil {
switch mapAwsError(err) {
case fuse.ENOENT:
log.Errorf("bucket %v does not exist", fs.bucket)
return nil
case fuse.EINVAL:
// only non-aws would require v2 signer, and it's not clear
// how to detect region in those cases
fs.fallbackV2Signer()
err = fs.testBucket()
if err != nil {
log.Errorf("Unable to access '%v': %v", fs.bucket, err)
return nil
}
default:
fs.detectBucketLocationByHEAD()
fs.sess = session.New(awsConfig)
fs.s3 = fs.newS3()
// try again to make sure
err = fs.testBucket()
if err != nil {
log.Errorf("Unable to access '%v': %v", fs.bucket, mapAwsError(err))
return nil
}
}
}
now := time.Now()
fs.rootAttrs = fuseops.InodeAttributes{
Size: 4096,
Nlink: 2,
Mode: flags.DirMode | os.ModeDir,
Atime: now,
Mtime: now,
Ctime: now,
Crtime: now,
Uid: fs.flags.Uid,
Gid: fs.flags.Gid,
}
fs.bufferPool = BufferPool{}.Init()
fs.nextInodeID = fuseops.RootInodeID + 1
fs.inodes = make(map[fuseops.InodeID]*Inode)
root := NewInode(aws.String(""), aws.String(""), flags)
root.Id = fuseops.RootInodeID
root.Attributes = &fs.rootAttrs
fs.inodes[fuseops.RootInodeID] = root
fs.inodesCache = make(map[string]*Inode)
fs.nextHandleID = 1
fs.dirHandles = make(map[fuseops.HandleID]*DirHandle)
fs.fileHandles = make(map[fuseops.HandleID]*FileHandle)
return fs
}
func (fs *Goofys) fallbackV2Signer() (err error) {
if fs.v2Signer {
return fuse.EINVAL
}
s3Log.Infoln("Falling back to v2 signer")
fs.v2Signer = true
fs.s3 = fs.newS3()
return
}
func (fs *Goofys) newS3() *s3.S3 {
svc := s3.New(fs.sess)
if fs.v2Signer {
svc.Handlers.Sign.Clear()
svc.Handlers.Sign.PushBack(SignV2)
svc.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
}
return svc
}
func (fs *Goofys) testBucket() (err error) {
_, err = fs.s3.HeadBucket(&s3.HeadBucketInput{Bucket: &fs.bucket})
return
}
func (fs *Goofys) detectBucketLocationByHEAD() {
config := &aws.Config{
Credentials: credentials.AnonymousCredentials,
Endpoint: fs.awsConfig.Endpoint,
// always probe with us-east-1 region, otherwise the behavior of other endpoints
// maybe different
Region: aws.String("us-east-1"),
Logger: GetLogger("s3"),
LogLevel: aws.LogLevel(aws.LogDebug | aws.LogDebugWithRequestErrors),
}
sess := session.New(config)
tmpS3 := s3.New(sess)
req, _ := tmpS3.HeadBucketRequest(&s3.HeadBucketInput{Bucket: &fs.bucket})
req.Send()
region := req.HTTPResponse.Header["X-Amz-Bucket-Region"]
if len(region) != 0 {
if region[0] != *fs.awsConfig.Region {
s3Log.Infof("Switching from region '%v' to '%v'", *fs.awsConfig.Region, region[0])
fs.awsConfig.Region = ®ion[0]
}
} else {
s3Log.Infof("Unable to detect bucket region, staying at '%v'", *fs.awsConfig.Region)
}
return
}
// Find the given inode. Panic if it doesn't exist.
//
// LOCKS_REQUIRED(fs.mu)
func (fs *Goofys) getInodeOrDie(id fuseops.InodeID) (inode *Inode) {
inode = fs.inodes[id]
if inode == nil {
panic(fmt.Sprintf("Unknown inode: %v", id))
}
return
}
func (fs *Goofys) StatFS(
ctx context.Context,
op *fuseops.StatFSOp) (err error) {
const BLOCK_SIZE = 4096
const TOTAL_SPACE = 1 * 1024 * 1024 * 1024 * 1024 * 1024 // 1PB
const TOTAL_BLOCKS = TOTAL_SPACE / BLOCK_SIZE
const INODES = 1 * 1000 * 1000 * 1000 // 1 billion
op.BlockSize = BLOCK_SIZE
op.Blocks = TOTAL_BLOCKS
op.BlocksFree = TOTAL_BLOCKS
op.BlocksAvailable = TOTAL_BLOCKS
op.IoSize = 1 * 1024 * 1024 // 1MB
op.Inodes = INODES
op.InodesFree = INODES
return
}
func (fs *Goofys) GetInodeAttributes(
ctx context.Context,
op *fuseops.GetInodeAttributesOp) (err error) {
fs.mu.Lock()
inode := fs.getInodeOrDie(op.Inode)
fs.mu.Unlock()
attr, err := inode.GetAttributes(fs)
op.Attributes = *attr
op.AttributesExpiration = time.Now().Add(fs.flags.StatCacheTTL)
return
}
func mapAwsError(err error) error {
if awsErr, ok := err.(awserr.Error); ok {
if reqErr, ok := err.(awserr.RequestFailure); ok {
// A service error occurred
switch reqErr.StatusCode() {
case 400:
return fuse.EINVAL
case 403:
return syscall.EACCES
case 404:
return fuse.ENOENT
case 405:
return syscall.ENOTSUP
default:
s3Log.Errorf("code=%v msg=%v request=%v\n", reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
return reqErr
}
} else {
switch awsErr.Code() {
case "BucketRegionError":
// don't need to log anything, we should detect region after
return err
default:
// Generic AWS Error with Code, Message, and original error (if any)
s3Log.Errorf("code=%v msg=%v, err=%v\n", awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
return awsErr
}
}
} else {
return err
}
}
func (fs *Goofys) LookUpInodeNotDir(name string, c chan s3.HeadObjectOutput, errc chan error) {
params := &s3.HeadObjectInput{Bucket: &fs.bucket, Key: &name}
resp, err := fs.s3.HeadObject(params)
if err != nil {
errc <- mapAwsError(err)
return
}
s3Log.Debug(resp)
c <- *resp
}
func (fs *Goofys) LookUpInodeDir(name string, c chan s3.ListObjectsOutput, errc chan error) {
params := &s3.ListObjectsInput{
Bucket: &fs.bucket,
Delimiter: aws.String("/"),
MaxKeys: aws.Int64(1),
Prefix: aws.String(name + "/"),
}
resp, err := fs.s3.ListObjects(params)
if err != nil {
errc <- mapAwsError(err)
return
}
s3Log.Debug(resp)
c <- *resp
}
func (fs *Goofys) mpuCopyPart(from string, to string, mpuId string, bytes string, part int64, wg *sync.WaitGroup,
etag **string, errout *error) {
defer func() {
wg.Done()
}()
// XXX use CopySourceIfUnmodifiedSince to ensure that
// we are copying from the same object
params := &s3.UploadPartCopyInput{
Bucket: &fs.bucket,
Key: &to,
CopySource: &from,
UploadId: &mpuId,
CopySourceRange: &bytes,
PartNumber: &part,
}
s3Log.Debug(params)
resp, err := fs.s3.UploadPartCopy(params)
if err != nil {
*errout = mapAwsError(err)
return
}
*etag = resp.CopyPartResult.ETag
return
}
func sizeToParts(size int64) int {
const PART_SIZE = 5 * 1024 * 1024 * 1024
nParts := int(size / PART_SIZE)
if size%PART_SIZE != 0 {
nParts++
}
return nParts
}
func (fs *Goofys) mpuCopyParts(size int64, from string, to string, mpuId string,
wg *sync.WaitGroup, etags []*string, err *error) {
const PART_SIZE = 5 * 1024 * 1024 * 1024
rangeFrom := int64(0)
rangeTo := int64(0)
for i := int64(1); rangeTo < size; i++ {
rangeFrom = rangeTo
rangeTo = i * PART_SIZE
if rangeTo > size {
rangeTo = size
}
bytes := fmt.Sprintf("bytes=%v-%v", rangeFrom, rangeTo-1)
wg.Add(1)
go fs.mpuCopyPart(from, to, mpuId, bytes, i, wg, &etags[i-1], err)
}
}
func (fs *Goofys) copyObjectMultipart(size int64, from string, to string, mpuId string) (err error) {
var wg sync.WaitGroup
nParts := sizeToParts(size)
etags := make([]*string, nParts)
if mpuId == "" {
params := &s3.CreateMultipartUploadInput{
Bucket: &fs.bucket,
Key: &to,
StorageClass: &fs.flags.StorageClass,
}
resp, err := fs.s3.CreateMultipartUpload(params)
if err != nil {
return mapAwsError(err)
}
mpuId = *resp.UploadId
}
fs.mpuCopyParts(size, from, to, mpuId, &wg, etags, &err)
wg.Wait()
if err != nil {
return
} else {
parts := make([]*s3.CompletedPart, nParts)
for i := 0; i < nParts; i++ {
parts[i] = &s3.CompletedPart{
ETag: etags[i],
PartNumber: aws.Int64(int64(i + 1)),
}
}
params := &s3.CompleteMultipartUploadInput{
Bucket: &fs.bucket,
Key: &to,
UploadId: &mpuId,
MultipartUpload: &s3.CompletedMultipartUpload{
Parts: parts,
},
}
s3Log.Debug(params)
_, err = fs.s3.CompleteMultipartUpload(params)
if err != nil {
return mapAwsError(err)
}
}
return
}
func (fs *Goofys) copyObjectMaybeMultipart(size int64, from string, to string) (err error) {
if size == -1 {
params := &s3.HeadObjectInput{Bucket: &fs.bucket, Key: &from}
resp, err := fs.s3.HeadObject(params)
if err != nil {
return mapAwsError(err)
}
size = *resp.ContentLength
}
from = fs.bucket + "/" + from
if size > 5*1024*1024*1024 {
return fs.copyObjectMultipart(size, from, to, "")
}
params := &s3.CopyObjectInput{
Bucket: &fs.bucket,
CopySource: &from,
Key: &to,
StorageClass: &fs.flags.StorageClass,
}
_, err = fs.s3.CopyObject(params)
if err != nil {
err = mapAwsError(err)
}
return
}
func (fs *Goofys) allocateInodeId() (id fuseops.InodeID) {
id = fs.nextInodeID
fs.nextInodeID++
return
}
// returned inode has nil Id
func (fs *Goofys) LookUpInodeMaybeDir(name string, fullName string) (inode *Inode, err error) {
errObjectChan := make(chan error, 1)
objectChan := make(chan s3.HeadObjectOutput, 1)
errDirChan := make(chan error, 1)
dirChan := make(chan s3.ListObjectsOutput, 1)
go fs.LookUpInodeNotDir(fullName, objectChan, errObjectChan)
go fs.LookUpInodeDir(fullName, dirChan, errDirChan)
notFound := false
for {
select {
case resp := <-objectChan:
// XXX/TODO if both object and object/ exists, return dir
inode = NewInode(&name, &fullName, fs.flags)
inode.Attributes = &fuseops.InodeAttributes{
Size: uint64(aws.Int64Value(resp.ContentLength)),
Nlink: 1,
Mode: fs.flags.FileMode,
Atime: *resp.LastModified,
Mtime: *resp.LastModified,
Ctime: *resp.LastModified,
Crtime: *resp.LastModified,
Uid: fs.flags.Uid,
Gid: fs.flags.Gid,
}
return
case err = <-errObjectChan:
if err == fuse.ENOENT {
if notFound {
return nil, err
} else {
notFound = true
err = nil
}
} else {
return
}
case resp := <-dirChan:
if len(resp.CommonPrefixes) != 0 || len(resp.Contents) != 0 {
inode = NewInode(&name, &fullName, fs.flags)
inode.Attributes = &fs.rootAttrs
return
} else {
// 404
if notFound {
return nil, fuse.ENOENT
} else {
notFound = true
}
}
case err = <-errDirChan:
return
}
}
}
func (fs *Goofys) LookUpInode(
ctx context.Context,
op *fuseops.LookUpInodeOp) (err error) {
fs.mu.Lock()
parent := fs.getInodeOrDie(op.Parent)
inode, ok := fs.inodesCache[parent.getChildName(op.Name)]
if ok {
inode.Ref()
expireTime := inode.AttrTime.Add(fs.flags.StatCacheTTL)
if !expireTime.After(time.Now()) {
ok = false
}
}
fs.mu.Unlock()
if !ok {
var newInode *Inode
newInode, err = parent.LookUp(fs, op.Name)
if err != nil {
if inode != nil {
// just kidding! pretend we didn't up the ref
inode.DeRef(1)
}
return err
}
if inode == nil {
fs.mu.Lock()
inode = newInode
inode.Id = fs.allocateInodeId()
fs.inodesCache[*inode.FullName] = inode
fs.inodes[inode.Id] = inode
fs.mu.Unlock()
} else {
inode.Attributes = newInode.Attributes
inode.AttrTime = time.Now()
}
}
op.Entry.Child = inode.Id
op.Entry.Attributes = *inode.Attributes
op.Entry.AttributesExpiration = time.Now().Add(fs.flags.StatCacheTTL)
op.Entry.EntryExpiration = time.Now().Add(fs.flags.TypeCacheTTL)
inode.logFuse("<-- LookUpInode")
return
}
// LOCKS_EXCLUDED(fs.mu)
func (fs *Goofys) ForgetInode(
ctx context.Context,
op *fuseops.ForgetInodeOp) (err error) {
fs.mu.Lock()
defer fs.mu.Unlock()
inode := fs.getInodeOrDie(op.Inode)
stale := inode.DeRef(op.N)
if stale {
delete(fs.inodes, op.Inode)
delete(fs.inodesCache, *inode.FullName)
}
return
}
func (fs *Goofys) OpenDir(
ctx context.Context,
op *fuseops.OpenDirOp) (err error) {
fs.mu.Lock()
handleID := fs.nextHandleID
fs.nextHandleID++
in := fs.getInodeOrDie(op.Inode)
fs.mu.Unlock()
// XXX/is this a dir?
dh := in.OpenDir()
fs.mu.Lock()
defer fs.mu.Unlock()
fs.dirHandles[handleID] = dh
op.Handle = handleID
return
}
// LOCKS_EXCLUDED(fs.mu)
func (fs *Goofys) ReadDir(
ctx context.Context,
op *fuseops.ReadDirOp) (err error) {
// Find the handle.
fs.mu.Lock()
dh := fs.dirHandles[op.Handle]
//inode := fs.inodes[op.Inode]
fs.mu.Unlock()
if dh == nil {
panic(fmt.Sprintf("can't find dh=%v", op.Handle))
}
dh.inode.logFuse("ReadDir", op.Offset)
for i := op.Offset; ; i++ {
e, err := dh.ReadDir(fs, i)
if err != nil {
return err
}
if e == nil {
break
}
n := fuseutil.WriteDirent(op.Dst[op.BytesRead:], *e)
if n == 0 {
break
}
dh.inode.logFuse("<-- ReadDir", e.Name, e.Offset)
op.BytesRead += n
}
return
}
func (fs *Goofys) ReleaseDirHandle(
ctx context.Context,
op *fuseops.ReleaseDirHandleOp) (err error) {
fs.mu.Lock()
defer fs.mu.Unlock()
dh := fs.dirHandles[op.Handle]
dh.CloseDir()
fuseLog.Debugln("ReleaseDirHandle", *dh.inode.FullName)
delete(fs.dirHandles, op.Handle)
return
}
func (fs *Goofys) OpenFile(
ctx context.Context,
op *fuseops.OpenFileOp) (err error) {
fs.mu.Lock()
in := fs.getInodeOrDie(op.Inode)
fs.mu.Unlock()
fh := in.OpenFile(fs)
fs.mu.Lock()
defer fs.mu.Unlock()
handleID := fs.nextHandleID
fs.nextHandleID++
fs.fileHandles[handleID] = fh
op.Handle = handleID
op.KeepPageCache = true
return
}
func (fs *Goofys) ReadFile(
ctx context.Context,
op *fuseops.ReadFileOp) (err error) {
fs.mu.Lock()
fh := fs.fileHandles[op.Handle]
fs.mu.Unlock()
op.BytesRead, err = fh.ReadFile(fs, op.Offset, op.Dst)
return
}
func (fs *Goofys) SyncFile(
ctx context.Context,
op *fuseops.SyncFileOp) (err error) {
fs.mu.Lock()
fh := fs.fileHandles[op.Handle]
fs.mu.Unlock()
err = fh.FlushFile(fs)
return
}
func (fs *Goofys) FlushFile(
ctx context.Context,
op *fuseops.FlushFileOp) (err error) {
fs.mu.Lock()
fh := fs.fileHandles[op.Handle]
fs.mu.Unlock()
err = fh.FlushFile(fs)
if err == nil {
fs.mu.Lock()
fs.inodesCache[*fh.inode.FullName] = fh.inode
fs.mu.Unlock()
}
return
}
func (fs *Goofys) ReleaseFileHandle(
ctx context.Context,
op *fuseops.ReleaseFileHandleOp) (err error) {
fs.mu.Lock()
defer fs.mu.Unlock()
fh := fs.fileHandles[op.Handle]
fh.Release()
fuseLog.Debugln("ReleaseFileHandle", *fh.inode.FullName)
delete(fs.fileHandles, op.Handle)
// try to compact heap
//fs.bufferPool.MaybeGC()
return
}
func (fs *Goofys) CreateFile(
ctx context.Context,
op *fuseops.CreateFileOp) (err error) {
fs.mu.Lock()
parent := fs.getInodeOrDie(op.Parent)
fs.mu.Unlock()
inode, fh := parent.Create(fs, op.Name)
fs.mu.Lock()
defer fs.mu.Unlock()
nextInode := fs.nextInodeID
fs.nextInodeID++
inode.Id = nextInode
fs.inodes[inode.Id] = inode
op.Entry.Child = inode.Id
op.Entry.Attributes = *inode.Attributes
op.Entry.AttributesExpiration = time.Now().Add(fs.flags.StatCacheTTL)
op.Entry.EntryExpiration = time.Now().Add(fs.flags.TypeCacheTTL)
// Allocate a handle.
handleID := fs.nextHandleID
fs.nextHandleID++
fs.fileHandles[handleID] = fh
op.Handle = handleID
inode.logFuse("<-- CreateFile")
return
}
func (fs *Goofys) MkDir(
ctx context.Context,
op *fuseops.MkDirOp) (err error) {
fs.mu.Lock()
parent := fs.getInodeOrDie(op.Parent)
fs.mu.Unlock()
// ignore op.Mode for now
inode, err := parent.MkDir(fs, op.Name)
if err != nil {
return err
}
fs.mu.Lock()
defer fs.mu.Unlock()
nextInode := fs.nextInodeID
fs.nextInodeID++
inode.Id = nextInode
fs.inodesCache[*inode.FullName] = inode
fs.inodes[inode.Id] = inode
op.Entry.Child = inode.Id
op.Entry.Attributes = *inode.Attributes
op.Entry.AttributesExpiration = time.Now().Add(fs.flags.StatCacheTTL)
op.Entry.EntryExpiration = time.Now().Add(fs.flags.TypeCacheTTL)
return
}
func (fs *Goofys) RmDir(
ctx context.Context,
op *fuseops.RmDirOp) (err error) {
fs.mu.Lock()
parent := fs.getInodeOrDie(op.Parent)
fs.mu.Unlock()
err = parent.RmDir(fs, op.Name)
return
}
func (fs *Goofys) SetInodeAttributes(
ctx context.Context,
op *fuseops.SetInodeAttributesOp) (err error) {
fs.mu.Lock()
inode := fs.getInodeOrDie(op.Inode)
fs.mu.Unlock()
attr, err := inode.GetAttributes(fs)
op.Attributes = *attr
op.AttributesExpiration = time.Now().Add(fs.flags.StatCacheTTL)
return
}
func (fs *Goofys) WriteFile(
ctx context.Context,
op *fuseops.WriteFileOp) (err error) {
fs.mu.Lock()
fh, ok := fs.fileHandles[op.Handle]
if !ok {
panic(fmt.Sprintf("WriteFile: can't find handle %v", op.Handle))
}
fs.mu.Unlock()
err = fh.WriteFile(fs, op.Offset, op.Data)
return
}
func (fs *Goofys) Unlink(
ctx context.Context,
op *fuseops.UnlinkOp) (err error) {
fs.mu.Lock()
parent := fs.getInodeOrDie(op.Parent)
fs.mu.Unlock()
err = parent.Unlink(fs, op.Name)
return
}
func (fs *Goofys) Rename(
ctx context.Context,
op *fuseops.RenameOp) (err error) {
fs.mu.Lock()
parent := fs.getInodeOrDie(op.OldParent)
newParent := fs.getInodeOrDie(op.NewParent)
fs.mu.Unlock()
return parent.Rename(fs, op.OldName, newParent, op.NewName)
}
fallback to v2 signing on first 403 in addition to 400
fixes #65
// Copyright 2015 Ka-Hing Cheung
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"fmt"
"os"
"sync"
"syscall"
"time"
"golang.org/x/net/context"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/jacobsa/fuse"
"github.com/jacobsa/fuse/fuseops"
"github.com/jacobsa/fuse/fuseutil"
"github.com/Sirupsen/logrus"
)
// goofys is a Filey System written in Go. All the backend data is
// stored on S3 as is. It's a Filey System instead of a File System
// because it makes minimal effort at being POSIX
// compliant. Particularly things that are difficult to support on S3
// or would translate into more than one round-trip would either fail
// (rename non-empty dir) or faked (no per-file permission). goofys
// does not have a on disk data cache, and consistency model is
// close-to-open.
type Goofys struct {
fuseutil.NotImplementedFileSystem
bucket string
flags *FlagStorage
umask uint32
awsConfig *aws.Config
sess *session.Session
s3 *s3.S3
v2Signer bool
rootAttrs fuseops.InodeAttributes
bufferPool *BufferPool
// A lock protecting the state of the file system struct itself (distinct
// from per-inode locks). Make sure to see the notes on lock ordering above.
mu sync.Mutex
// The next inode ID to hand out. We assume that this will never overflow,
// since even if we were handing out inode IDs at 4 GHz, it would still take
// over a century to do so.
//
// GUARDED_BY(mu)
nextInodeID fuseops.InodeID
// The collection of live inodes, keyed by inode ID. No ID less than
// fuseops.RootInodeID is ever used.
//
// INVARIANT: For all keys k, fuseops.RootInodeID <= k < nextInodeID
// INVARIANT: For all keys k, inodes[k].ID() == k
// INVARIANT: inodes[fuseops.RootInodeID] is missing or of type inode.DirInode
// INVARIANT: For all v, if IsDirName(v.Name()) then v is inode.DirInode
//
// GUARDED_BY(mu)
inodes map[fuseops.InodeID]*Inode
inodesCache map[string]*Inode // fullname to inode
nextHandleID fuseops.HandleID
dirHandles map[fuseops.HandleID]*DirHandle
fileHandles map[fuseops.HandleID]*FileHandle
}
var s3Log = GetLogger("s3")
func NewGoofys(bucket string, awsConfig *aws.Config, flags *FlagStorage) *Goofys {
// Set up the basic struct.
fs := &Goofys{
bucket: bucket,
flags: flags,
umask: 0122,
}
if flags.DebugS3 {
awsConfig.LogLevel = aws.LogLevel(aws.LogDebug | aws.LogDebugWithRequestErrors)
s3Log.Level = logrus.DebugLevel
}
fs.awsConfig = awsConfig
fs.sess = session.New(awsConfig)
fs.s3 = fs.newS3()
err := fs.testBucket()
if err != nil {
switch mapAwsError(err) {
case fuse.ENOENT:
log.Errorf("bucket %v does not exist", fs.bucket)
return nil
case fuse.EINVAL: // swift3, ceph-s3 return 400
case syscall.EACCES: // GCS, EMC return 403
// only non-aws would require v2 signer, and it's not clear
// how to detect region in those cases
fs.fallbackV2Signer()
err = fs.testBucket()
if err != nil {
log.Errorf("Unable to access '%v': %v", fs.bucket, err)
return nil
}
default:
fs.detectBucketLocationByHEAD()
fs.sess = session.New(awsConfig)
fs.s3 = fs.newS3()
// try again to make sure
err = fs.testBucket()
if err != nil {
log.Errorf("Unable to access '%v': %v", fs.bucket, mapAwsError(err))
return nil
}
}
}
now := time.Now()
fs.rootAttrs = fuseops.InodeAttributes{
Size: 4096,
Nlink: 2,
Mode: flags.DirMode | os.ModeDir,
Atime: now,
Mtime: now,
Ctime: now,
Crtime: now,
Uid: fs.flags.Uid,
Gid: fs.flags.Gid,
}
fs.bufferPool = BufferPool{}.Init()
fs.nextInodeID = fuseops.RootInodeID + 1
fs.inodes = make(map[fuseops.InodeID]*Inode)
root := NewInode(aws.String(""), aws.String(""), flags)
root.Id = fuseops.RootInodeID
root.Attributes = &fs.rootAttrs
fs.inodes[fuseops.RootInodeID] = root
fs.inodesCache = make(map[string]*Inode)
fs.nextHandleID = 1
fs.dirHandles = make(map[fuseops.HandleID]*DirHandle)
fs.fileHandles = make(map[fuseops.HandleID]*FileHandle)
return fs
}
func (fs *Goofys) fallbackV2Signer() (err error) {
if fs.v2Signer {
return fuse.EINVAL
}
s3Log.Infoln("Falling back to v2 signer")
fs.v2Signer = true
fs.s3 = fs.newS3()
return
}
func (fs *Goofys) newS3() *s3.S3 {
svc := s3.New(fs.sess)
if fs.v2Signer {
svc.Handlers.Sign.Clear()
svc.Handlers.Sign.PushBack(SignV2)
svc.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
}
return svc
}
func (fs *Goofys) testBucket() (err error) {
_, err = fs.s3.HeadBucket(&s3.HeadBucketInput{Bucket: &fs.bucket})
return
}
func (fs *Goofys) detectBucketLocationByHEAD() {
config := &aws.Config{
Credentials: credentials.AnonymousCredentials,
Endpoint: fs.awsConfig.Endpoint,
// always probe with us-east-1 region, otherwise the behavior of other endpoints
// maybe different
Region: aws.String("us-east-1"),
Logger: GetLogger("s3"),
LogLevel: aws.LogLevel(aws.LogDebug | aws.LogDebugWithRequestErrors),
}
sess := session.New(config)
tmpS3 := s3.New(sess)
req, _ := tmpS3.HeadBucketRequest(&s3.HeadBucketInput{Bucket: &fs.bucket})
req.Send()
region := req.HTTPResponse.Header["X-Amz-Bucket-Region"]
if len(region) != 0 {
if region[0] != *fs.awsConfig.Region {
s3Log.Infof("Switching from region '%v' to '%v'", *fs.awsConfig.Region, region[0])
fs.awsConfig.Region = ®ion[0]
}
} else {
s3Log.Infof("Unable to detect bucket region, staying at '%v'", *fs.awsConfig.Region)
}
return
}
// Find the given inode. Panic if it doesn't exist.
//
// LOCKS_REQUIRED(fs.mu)
func (fs *Goofys) getInodeOrDie(id fuseops.InodeID) (inode *Inode) {
inode = fs.inodes[id]
if inode == nil {
panic(fmt.Sprintf("Unknown inode: %v", id))
}
return
}
func (fs *Goofys) StatFS(
ctx context.Context,
op *fuseops.StatFSOp) (err error) {
const BLOCK_SIZE = 4096
const TOTAL_SPACE = 1 * 1024 * 1024 * 1024 * 1024 * 1024 // 1PB
const TOTAL_BLOCKS = TOTAL_SPACE / BLOCK_SIZE
const INODES = 1 * 1000 * 1000 * 1000 // 1 billion
op.BlockSize = BLOCK_SIZE
op.Blocks = TOTAL_BLOCKS
op.BlocksFree = TOTAL_BLOCKS
op.BlocksAvailable = TOTAL_BLOCKS
op.IoSize = 1 * 1024 * 1024 // 1MB
op.Inodes = INODES
op.InodesFree = INODES
return
}
func (fs *Goofys) GetInodeAttributes(
ctx context.Context,
op *fuseops.GetInodeAttributesOp) (err error) {
fs.mu.Lock()
inode := fs.getInodeOrDie(op.Inode)
fs.mu.Unlock()
attr, err := inode.GetAttributes(fs)
op.Attributes = *attr
op.AttributesExpiration = time.Now().Add(fs.flags.StatCacheTTL)
return
}
func mapAwsError(err error) error {
if awsErr, ok := err.(awserr.Error); ok {
if reqErr, ok := err.(awserr.RequestFailure); ok {
// A service error occurred
switch reqErr.StatusCode() {
case 400:
return fuse.EINVAL
case 403:
return syscall.EACCES
case 404:
return fuse.ENOENT
case 405:
return syscall.ENOTSUP
default:
s3Log.Errorf("code=%v msg=%v request=%v\n", reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
return reqErr
}
} else {
switch awsErr.Code() {
case "BucketRegionError":
// don't need to log anything, we should detect region after
return err
default:
// Generic AWS Error with Code, Message, and original error (if any)
s3Log.Errorf("code=%v msg=%v, err=%v\n", awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
return awsErr
}
}
} else {
return err
}
}
func (fs *Goofys) LookUpInodeNotDir(name string, c chan s3.HeadObjectOutput, errc chan error) {
params := &s3.HeadObjectInput{Bucket: &fs.bucket, Key: &name}
resp, err := fs.s3.HeadObject(params)
if err != nil {
errc <- mapAwsError(err)
return
}
s3Log.Debug(resp)
c <- *resp
}
func (fs *Goofys) LookUpInodeDir(name string, c chan s3.ListObjectsOutput, errc chan error) {
params := &s3.ListObjectsInput{
Bucket: &fs.bucket,
Delimiter: aws.String("/"),
MaxKeys: aws.Int64(1),
Prefix: aws.String(name + "/"),
}
resp, err := fs.s3.ListObjects(params)
if err != nil {
errc <- mapAwsError(err)
return
}
s3Log.Debug(resp)
c <- *resp
}
func (fs *Goofys) mpuCopyPart(from string, to string, mpuId string, bytes string, part int64, wg *sync.WaitGroup,
etag **string, errout *error) {
defer func() {
wg.Done()
}()
// XXX use CopySourceIfUnmodifiedSince to ensure that
// we are copying from the same object
params := &s3.UploadPartCopyInput{
Bucket: &fs.bucket,
Key: &to,
CopySource: &from,
UploadId: &mpuId,
CopySourceRange: &bytes,
PartNumber: &part,
}
s3Log.Debug(params)
resp, err := fs.s3.UploadPartCopy(params)
if err != nil {
*errout = mapAwsError(err)
return
}
*etag = resp.CopyPartResult.ETag
return
}
func sizeToParts(size int64) int {
const PART_SIZE = 5 * 1024 * 1024 * 1024
nParts := int(size / PART_SIZE)
if size%PART_SIZE != 0 {
nParts++
}
return nParts
}
func (fs *Goofys) mpuCopyParts(size int64, from string, to string, mpuId string,
wg *sync.WaitGroup, etags []*string, err *error) {
const PART_SIZE = 5 * 1024 * 1024 * 1024
rangeFrom := int64(0)
rangeTo := int64(0)
for i := int64(1); rangeTo < size; i++ {
rangeFrom = rangeTo
rangeTo = i * PART_SIZE
if rangeTo > size {
rangeTo = size
}
bytes := fmt.Sprintf("bytes=%v-%v", rangeFrom, rangeTo-1)
wg.Add(1)
go fs.mpuCopyPart(from, to, mpuId, bytes, i, wg, &etags[i-1], err)
}
}
func (fs *Goofys) copyObjectMultipart(size int64, from string, to string, mpuId string) (err error) {
var wg sync.WaitGroup
nParts := sizeToParts(size)
etags := make([]*string, nParts)
if mpuId == "" {
params := &s3.CreateMultipartUploadInput{
Bucket: &fs.bucket,
Key: &to,
StorageClass: &fs.flags.StorageClass,
}
resp, err := fs.s3.CreateMultipartUpload(params)
if err != nil {
return mapAwsError(err)
}
mpuId = *resp.UploadId
}
fs.mpuCopyParts(size, from, to, mpuId, &wg, etags, &err)
wg.Wait()
if err != nil {
return
} else {
parts := make([]*s3.CompletedPart, nParts)
for i := 0; i < nParts; i++ {
parts[i] = &s3.CompletedPart{
ETag: etags[i],
PartNumber: aws.Int64(int64(i + 1)),
}
}
params := &s3.CompleteMultipartUploadInput{
Bucket: &fs.bucket,
Key: &to,
UploadId: &mpuId,
MultipartUpload: &s3.CompletedMultipartUpload{
Parts: parts,
},
}
s3Log.Debug(params)
_, err = fs.s3.CompleteMultipartUpload(params)
if err != nil {
return mapAwsError(err)
}
}
return
}
func (fs *Goofys) copyObjectMaybeMultipart(size int64, from string, to string) (err error) {
if size == -1 {
params := &s3.HeadObjectInput{Bucket: &fs.bucket, Key: &from}
resp, err := fs.s3.HeadObject(params)
if err != nil {
return mapAwsError(err)
}
size = *resp.ContentLength
}
from = fs.bucket + "/" + from
if size > 5*1024*1024*1024 {
return fs.copyObjectMultipart(size, from, to, "")
}
params := &s3.CopyObjectInput{
Bucket: &fs.bucket,
CopySource: &from,
Key: &to,
StorageClass: &fs.flags.StorageClass,
}
_, err = fs.s3.CopyObject(params)
if err != nil {
err = mapAwsError(err)
}
return
}
func (fs *Goofys) allocateInodeId() (id fuseops.InodeID) {
id = fs.nextInodeID
fs.nextInodeID++
return
}
// returned inode has nil Id
func (fs *Goofys) LookUpInodeMaybeDir(name string, fullName string) (inode *Inode, err error) {
errObjectChan := make(chan error, 1)
objectChan := make(chan s3.HeadObjectOutput, 1)
errDirChan := make(chan error, 1)
dirChan := make(chan s3.ListObjectsOutput, 1)
go fs.LookUpInodeNotDir(fullName, objectChan, errObjectChan)
go fs.LookUpInodeDir(fullName, dirChan, errDirChan)
notFound := false
for {
select {
case resp := <-objectChan:
// XXX/TODO if both object and object/ exists, return dir
inode = NewInode(&name, &fullName, fs.flags)
inode.Attributes = &fuseops.InodeAttributes{
Size: uint64(aws.Int64Value(resp.ContentLength)),
Nlink: 1,
Mode: fs.flags.FileMode,
Atime: *resp.LastModified,
Mtime: *resp.LastModified,
Ctime: *resp.LastModified,
Crtime: *resp.LastModified,
Uid: fs.flags.Uid,
Gid: fs.flags.Gid,
}
return
case err = <-errObjectChan:
if err == fuse.ENOENT {
if notFound {
return nil, err
} else {
notFound = true
err = nil
}
} else {
return
}
case resp := <-dirChan:
if len(resp.CommonPrefixes) != 0 || len(resp.Contents) != 0 {
inode = NewInode(&name, &fullName, fs.flags)
inode.Attributes = &fs.rootAttrs
return
} else {
// 404
if notFound {
return nil, fuse.ENOENT
} else {
notFound = true
}
}
case err = <-errDirChan:
return
}
}
}
func (fs *Goofys) LookUpInode(
ctx context.Context,
op *fuseops.LookUpInodeOp) (err error) {
fs.mu.Lock()
parent := fs.getInodeOrDie(op.Parent)
inode, ok := fs.inodesCache[parent.getChildName(op.Name)]
if ok {
inode.Ref()
expireTime := inode.AttrTime.Add(fs.flags.StatCacheTTL)
if !expireTime.After(time.Now()) {
ok = false
}
}
fs.mu.Unlock()
if !ok {
var newInode *Inode
newInode, err = parent.LookUp(fs, op.Name)
if err != nil {
if inode != nil {
// just kidding! pretend we didn't up the ref
inode.DeRef(1)
}
return err
}
if inode == nil {
fs.mu.Lock()
inode = newInode
inode.Id = fs.allocateInodeId()
fs.inodesCache[*inode.FullName] = inode
fs.inodes[inode.Id] = inode
fs.mu.Unlock()
} else {
inode.Attributes = newInode.Attributes
inode.AttrTime = time.Now()
}
}
op.Entry.Child = inode.Id
op.Entry.Attributes = *inode.Attributes
op.Entry.AttributesExpiration = time.Now().Add(fs.flags.StatCacheTTL)
op.Entry.EntryExpiration = time.Now().Add(fs.flags.TypeCacheTTL)
inode.logFuse("<-- LookUpInode")
return
}
// LOCKS_EXCLUDED(fs.mu)
func (fs *Goofys) ForgetInode(
ctx context.Context,
op *fuseops.ForgetInodeOp) (err error) {
fs.mu.Lock()
defer fs.mu.Unlock()
inode := fs.getInodeOrDie(op.Inode)
stale := inode.DeRef(op.N)
if stale {
delete(fs.inodes, op.Inode)
delete(fs.inodesCache, *inode.FullName)
}
return
}
func (fs *Goofys) OpenDir(
ctx context.Context,
op *fuseops.OpenDirOp) (err error) {
fs.mu.Lock()
handleID := fs.nextHandleID
fs.nextHandleID++
in := fs.getInodeOrDie(op.Inode)
fs.mu.Unlock()
// XXX/is this a dir?
dh := in.OpenDir()
fs.mu.Lock()
defer fs.mu.Unlock()
fs.dirHandles[handleID] = dh
op.Handle = handleID
return
}
// LOCKS_EXCLUDED(fs.mu)
func (fs *Goofys) ReadDir(
ctx context.Context,
op *fuseops.ReadDirOp) (err error) {
// Find the handle.
fs.mu.Lock()
dh := fs.dirHandles[op.Handle]
//inode := fs.inodes[op.Inode]
fs.mu.Unlock()
if dh == nil {
panic(fmt.Sprintf("can't find dh=%v", op.Handle))
}
dh.inode.logFuse("ReadDir", op.Offset)
for i := op.Offset; ; i++ {
e, err := dh.ReadDir(fs, i)
if err != nil {
return err
}
if e == nil {
break
}
n := fuseutil.WriteDirent(op.Dst[op.BytesRead:], *e)
if n == 0 {
break
}
dh.inode.logFuse("<-- ReadDir", e.Name, e.Offset)
op.BytesRead += n
}
return
}
func (fs *Goofys) ReleaseDirHandle(
ctx context.Context,
op *fuseops.ReleaseDirHandleOp) (err error) {
fs.mu.Lock()
defer fs.mu.Unlock()
dh := fs.dirHandles[op.Handle]
dh.CloseDir()
fuseLog.Debugln("ReleaseDirHandle", *dh.inode.FullName)
delete(fs.dirHandles, op.Handle)
return
}
func (fs *Goofys) OpenFile(
ctx context.Context,
op *fuseops.OpenFileOp) (err error) {
fs.mu.Lock()
in := fs.getInodeOrDie(op.Inode)
fs.mu.Unlock()
fh := in.OpenFile(fs)
fs.mu.Lock()
defer fs.mu.Unlock()
handleID := fs.nextHandleID
fs.nextHandleID++
fs.fileHandles[handleID] = fh
op.Handle = handleID
op.KeepPageCache = true
return
}
func (fs *Goofys) ReadFile(
ctx context.Context,
op *fuseops.ReadFileOp) (err error) {
fs.mu.Lock()
fh := fs.fileHandles[op.Handle]
fs.mu.Unlock()
op.BytesRead, err = fh.ReadFile(fs, op.Offset, op.Dst)
return
}
func (fs *Goofys) SyncFile(
ctx context.Context,
op *fuseops.SyncFileOp) (err error) {
fs.mu.Lock()
fh := fs.fileHandles[op.Handle]
fs.mu.Unlock()
err = fh.FlushFile(fs)
return
}
func (fs *Goofys) FlushFile(
ctx context.Context,
op *fuseops.FlushFileOp) (err error) {
fs.mu.Lock()
fh := fs.fileHandles[op.Handle]
fs.mu.Unlock()
err = fh.FlushFile(fs)
if err == nil {
fs.mu.Lock()
fs.inodesCache[*fh.inode.FullName] = fh.inode
fs.mu.Unlock()
}
return
}
func (fs *Goofys) ReleaseFileHandle(
ctx context.Context,
op *fuseops.ReleaseFileHandleOp) (err error) {
fs.mu.Lock()
defer fs.mu.Unlock()
fh := fs.fileHandles[op.Handle]
fh.Release()
fuseLog.Debugln("ReleaseFileHandle", *fh.inode.FullName)
delete(fs.fileHandles, op.Handle)
// try to compact heap
//fs.bufferPool.MaybeGC()
return
}
func (fs *Goofys) CreateFile(
ctx context.Context,
op *fuseops.CreateFileOp) (err error) {
fs.mu.Lock()
parent := fs.getInodeOrDie(op.Parent)
fs.mu.Unlock()
inode, fh := parent.Create(fs, op.Name)
fs.mu.Lock()
defer fs.mu.Unlock()
nextInode := fs.nextInodeID
fs.nextInodeID++
inode.Id = nextInode
fs.inodes[inode.Id] = inode
op.Entry.Child = inode.Id
op.Entry.Attributes = *inode.Attributes
op.Entry.AttributesExpiration = time.Now().Add(fs.flags.StatCacheTTL)
op.Entry.EntryExpiration = time.Now().Add(fs.flags.TypeCacheTTL)
// Allocate a handle.
handleID := fs.nextHandleID
fs.nextHandleID++
fs.fileHandles[handleID] = fh
op.Handle = handleID
inode.logFuse("<-- CreateFile")
return
}
func (fs *Goofys) MkDir(
ctx context.Context,
op *fuseops.MkDirOp) (err error) {
fs.mu.Lock()
parent := fs.getInodeOrDie(op.Parent)
fs.mu.Unlock()
// ignore op.Mode for now
inode, err := parent.MkDir(fs, op.Name)
if err != nil {
return err
}
fs.mu.Lock()
defer fs.mu.Unlock()
nextInode := fs.nextInodeID
fs.nextInodeID++
inode.Id = nextInode
fs.inodesCache[*inode.FullName] = inode
fs.inodes[inode.Id] = inode
op.Entry.Child = inode.Id
op.Entry.Attributes = *inode.Attributes
op.Entry.AttributesExpiration = time.Now().Add(fs.flags.StatCacheTTL)
op.Entry.EntryExpiration = time.Now().Add(fs.flags.TypeCacheTTL)
return
}
func (fs *Goofys) RmDir(
ctx context.Context,
op *fuseops.RmDirOp) (err error) {
fs.mu.Lock()
parent := fs.getInodeOrDie(op.Parent)
fs.mu.Unlock()
err = parent.RmDir(fs, op.Name)
return
}
func (fs *Goofys) SetInodeAttributes(
ctx context.Context,
op *fuseops.SetInodeAttributesOp) (err error) {
fs.mu.Lock()
inode := fs.getInodeOrDie(op.Inode)
fs.mu.Unlock()
attr, err := inode.GetAttributes(fs)
op.Attributes = *attr
op.AttributesExpiration = time.Now().Add(fs.flags.StatCacheTTL)
return
}
func (fs *Goofys) WriteFile(
ctx context.Context,
op *fuseops.WriteFileOp) (err error) {
fs.mu.Lock()
fh, ok := fs.fileHandles[op.Handle]
if !ok {
panic(fmt.Sprintf("WriteFile: can't find handle %v", op.Handle))
}
fs.mu.Unlock()
err = fh.WriteFile(fs, op.Offset, op.Data)
return
}
func (fs *Goofys) Unlink(
ctx context.Context,
op *fuseops.UnlinkOp) (err error) {
fs.mu.Lock()
parent := fs.getInodeOrDie(op.Parent)
fs.mu.Unlock()
err = parent.Unlink(fs, op.Name)
return
}
func (fs *Goofys) Rename(
ctx context.Context,
op *fuseops.RenameOp) (err error) {
fs.mu.Lock()
parent := fs.getInodeOrDie(op.OldParent)
newParent := fs.getInodeOrDie(op.NewParent)
fs.mu.Unlock()
return parent.Rename(fs, op.OldName, newParent, op.NewName)
}
|
// Package twilio provides internal utilities for the twilio-go client library.
package twilio
import (
"encoding/json"
"fmt"
"log"
"net/http"
"net/url"
"strconv"
"strings"
"github.com/google/go-querystring/query"
"github.com/pkg/errors"
)
// Error provides information about an unsuccessful request.
type Error struct {
Code int `json:"code"`
Detail string `json:"detail"`
Message string `json:"message"`
MoreInfo string `json:"more_info"`
Status int `json:"status"`
}
func (err Error) Error() string {
return fmt.Sprintf("Status: %d - Error %d: %s (%s) More info: %s",
err.Status, err.Code, err.Message, err.Detail, err.MoreInfo)
}
// Credentials store user authentication credentials.
type Credentials struct {
AccountSid string
AuthToken string
}
// Client encapsulates a standard HTTP backend with authorization.
type Client struct {
Credentials
HTTPClient *http.Client
BaseURL string
}
func (c *Client) basicAuth() (string, string) {
return c.Credentials.AccountSid, c.Credentials.AuthToken
}
const errorStatusCode = 400
func doWithErr(req *http.Request, client *http.Client) (*http.Response, error) {
if client == nil {
client = http.DefaultClient
}
res, err := client.Do(req)
if err != nil {
return nil, err
}
if res.StatusCode >= errorStatusCode {
err = &Error{}
if decodeErr := json.NewDecoder(res.Body).Decode(err); decodeErr != nil {
err = errors.Wrap(decodeErr, "error decoding the response for an HTTP error code: "+strconv.Itoa(res.StatusCode))
return nil, err
}
return nil, err
}
return res, nil
}
// SendRequest verifies, constructs, and authorizes an HTTP request.
func (c Client) SendRequest(method string, rawURL string, queryParams, formData interface{}) (*http.Response, error) {
u, err := url.Parse(rawURL)
if err != nil {
log.Fatal(err)
}
if queryParams != nil {
v, _ := query.Values(queryParams)
u.RawQuery = v.Encode()
}
valueReader := &strings.Reader{}
if formData != nil {
v, _ := query.Values(formData)
qs := v.Encode()
// Convert "[" and "]" (%5B and %5D) to "." and "" to conform to Twilio form-urlencoded specs.
replacer := strings.NewReplacer("%5B", ".", "%5D", "")
dotNotationQs := replacer.Replace(qs)
valueReader = strings.NewReader(dotNotationQs)
}
req, err := http.NewRequest(method, u.String(), valueReader)
if err != nil {
return nil, err
}
req.SetBasicAuth(c.basicAuth())
if method == http.MethodPost {
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
}
return doWithErr(req, c.HTTPClient)
}
// Post performs a POST request on the object at the provided URI in the context of the Request's BaseURL
// with the provided data as parameters.
func (c Client) Post(path string, bodyData interface{}) (*http.Response, error) {
return c.SendRequest(http.MethodPost, path, nil, bodyData)
}
// Get performs a GET request on the object at the provided URI in the context of the Request's BaseURL
// with the provided data as parameters.
func (c Client) Get(path string, queryData interface{}) (*http.Response, error) {
return c.SendRequest(http.MethodGet, path, queryData, nil)
}
// Delete performs a DELETE request on the object at the provided URI in the context of the Request's BaseURL
// with the provided data as parameters.
func (c Client) Delete(path string) (*http.Response, error) {
return c.SendRequest(http.MethodDelete, path, nil, nil)
}
fix: forward errors instead of logging fatally in SendRequest (#16)
// Package twilio provides internal utilities for the twilio-go client library.
package twilio
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"github.com/google/go-querystring/query"
"github.com/pkg/errors"
)
// Error provides information about an unsuccessful request.
type Error struct {
Code int `json:"code"`
Detail string `json:"detail"`
Message string `json:"message"`
MoreInfo string `json:"more_info"`
Status int `json:"status"`
}
func (err Error) Error() string {
return fmt.Sprintf("Status: %d - Error %d: %s (%s) More info: %s",
err.Status, err.Code, err.Message, err.Detail, err.MoreInfo)
}
// Credentials store user authentication credentials.
type Credentials struct {
AccountSid string
AuthToken string
}
// Client encapsulates a standard HTTP backend with authorization.
type Client struct {
Credentials
HTTPClient *http.Client
BaseURL string
}
func (c *Client) basicAuth() (string, string) {
return c.Credentials.AccountSid, c.Credentials.AuthToken
}
const errorStatusCode = 400
func doWithErr(req *http.Request, client *http.Client) (*http.Response, error) {
if client == nil {
client = http.DefaultClient
}
res, err := client.Do(req)
if err != nil {
return nil, err
}
if res.StatusCode >= errorStatusCode {
err = &Error{}
if decodeErr := json.NewDecoder(res.Body).Decode(err); decodeErr != nil {
err = errors.Wrap(decodeErr, "error decoding the response for an HTTP error code: "+strconv.Itoa(res.StatusCode))
return nil, err
}
return nil, err
}
return res, nil
}
// SendRequest verifies, constructs, and authorizes an HTTP request.
func (c Client) SendRequest(method string, rawURL string, queryParams, formData interface{}) (*http.Response, error) {
u, err := url.Parse(rawURL)
if err != nil {
return nil, err
}
if queryParams != nil {
v, _ := query.Values(queryParams)
u.RawQuery = v.Encode()
}
valueReader := &strings.Reader{}
if formData != nil {
v, _ := query.Values(formData)
qs := v.Encode()
// Convert "[" and "]" (%5B and %5D) to "." and "" to conform to Twilio form-urlencoded specs.
replacer := strings.NewReplacer("%5B", ".", "%5D", "")
dotNotationQs := replacer.Replace(qs)
valueReader = strings.NewReader(dotNotationQs)
}
req, err := http.NewRequest(method, u.String(), valueReader)
if err != nil {
return nil, err
}
req.SetBasicAuth(c.basicAuth())
if method == http.MethodPost {
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
}
return doWithErr(req, c.HTTPClient)
}
// Post performs a POST request on the object at the provided URI in the context of the Request's BaseURL
// with the provided data as parameters.
func (c Client) Post(path string, bodyData interface{}) (*http.Response, error) {
return c.SendRequest(http.MethodPost, path, nil, bodyData)
}
// Get performs a GET request on the object at the provided URI in the context of the Request's BaseURL
// with the provided data as parameters.
func (c Client) Get(path string, queryData interface{}) (*http.Response, error) {
return c.SendRequest(http.MethodGet, path, queryData, nil)
}
// Delete performs a DELETE request on the object at the provided URI in the context of the Request's BaseURL
// with the provided data as parameters.
func (c Client) Delete(path string) (*http.Response, error) {
return c.SendRequest(http.MethodDelete, path, nil, nil)
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package replay
import (
"bytes"
"context"
"flag"
"math"
"os"
"path/filepath"
"sync"
"testing"
"time"
"github.com/google/gapid/core/assert"
"github.com/google/gapid/core/log"
"github.com/google/gapid/core/os/device"
"github.com/google/gapid/core/os/device/bind"
"github.com/google/gapid/gapis/api"
"github.com/google/gapid/gapis/api/gles"
"github.com/google/gapid/gapis/capture"
"github.com/google/gapid/gapis/database"
"github.com/google/gapid/gapis/memory"
"github.com/google/gapid/gapis/replay"
"github.com/google/gapid/gapis/resolve"
"github.com/google/gapid/gapis/service/path"
"github.com/google/gapid/test/integration/gles/snippets"
)
const replayTimeout = time.Second * 5
var (
triangleVertices = []float32{
+0.0, -0.5, 0.1,
-0.5, +0.5, 0.5,
+0.5, +0.5, 0.9,
}
squareVertices = []float32{
-0.5, -0.5, 0.5,
-0.5, +0.5, 0.5,
+0.5, +0.5, 0.5,
+0.5, -0.5, 0.5,
}
squareIndices = []uint16{
0, 1, 2, 0, 2, 3,
}
generateReferenceImages = flag.String("generate", "", "directory in which to generate reference images, empty to disable")
exportCaptures = flag.String("export-captures", "", "directory to export captures to, empty to disable")
)
func TestMain(m *testing.M) {
flag.Parse()
os.Exit(m.Run())
}
func setup(ctx context.Context) (context.Context, *device.Instance) {
r := bind.NewRegistry()
ctx = bind.PutRegistry(ctx, r)
m := replay.New(ctx)
ctx = replay.PutManager(ctx, m)
ctx = database.Put(ctx, database.NewInMemory(ctx))
bind.GetRegistry(ctx).AddDevice(ctx, bind.Host(ctx))
return ctx, r.DefaultDevice().Instance()
}
func buildAndMaybeExportCapture(ctx context.Context, b *snippets.Builder, name string) *path.Capture {
c := b.Capture(ctx, name)
maybeExportCapture(ctx, c)
return c
}
func maybeExportCapture(ctx context.Context, c *path.Capture) {
if *exportCaptures == "" {
return
}
cap, err := capture.ResolveFromPath(ctx, c)
assert.For(ctx, "err").ThatError(err).Succeeded()
f, err := os.Create(filepath.Join(*exportCaptures, cap.Name+".gfxtrace"))
assert.For(ctx, "err").ThatError(err).Succeeded()
defer f.Close()
err = capture.Export(ctx, c, f)
assert.For(ctx, "err").ThatError(err).Succeeded()
}
func p(addr uint64) memory.Pointer { return memory.BytePtr(addr) }
type verifier func(context.Context, *path.Capture, *device.Instance)
type generator func(context.Context, *device.Instance) (*path.Capture, verifier)
// mergeCaptures creates a capture from the cmds of several existing captures,
// by interleaving them arbitrarily, on different threads.
func mergeCaptures(ctx context.Context, captures ...*path.Capture) *path.Capture {
lists := [][]api.Cmd{}
threads := []uint64{}
remainingCmds := 0
if len(captures) == 0 {
panic("mergeCaptures requires at least one capture")
}
var d *device.Instance
for i, path := range captures {
c, err := capture.ResolveFromPath(ctx, path)
assert.For(ctx, "err").ThatError(err).Succeeded()
lists = append(lists, c.Commands)
remainingCmds += len(c.Commands)
threads = append(threads, uint64(0x10000+i))
if i == 0 {
d = c.Header.Device
}
}
merged := snippets.NewBuilder(ctx, d)
threadIndex := 0
cmdsUntilSwitchThread, modFourCounter := 4, 3
for remainingCmds > 0 {
if cmdsUntilSwitchThread > 0 && len(lists[threadIndex]) > 0 {
cmd := lists[threadIndex][0]
cmd.SetThread(threads[threadIndex])
merged.Add(cmd)
lists[threadIndex] = lists[threadIndex][1:]
remainingCmds--
cmdsUntilSwitchThread--
} else {
threadIndex = (threadIndex + 1) % len(lists)
for len(lists[threadIndex]) == 0 {
threadIndex = (threadIndex + 1) % len(lists)
}
// We don't want to always switch threads after the same number of commands,
// but we want it to be predictable. This should do.
cmdsUntilSwitchThread = 2 + modFourCounter
modFourCounter = (modFourCounter + 1) % 4
}
}
return merged.Capture(ctx, "merged")
}
func generateDrawTriangleCapture(ctx context.Context, d *device.Instance) (*path.Capture, verifier) {
return generateDrawTriangleCaptureEx(ctx, d,
0.0, 1.0, 0.0,
1.0, 0.0, 0.0)
}
// generateDrawTriangleCaptureEx generates a capture with several frames containing
// a rotating triangle of color RGB(fr, fg, fb) on a RGB(br, bg, bb) background.
func generateDrawTriangleCaptureEx(ctx context.Context, d *device.Instance,
br, bg, bb gles.GLfloat,
fr, fg, fb gles.GLfloat) (*path.Capture, verifier) {
b := snippets.NewBuilder(ctx, d)
b.CreateContext(64, 64, false, false)
b.Add(b.CB.GlEnable(gles.GLenum_GL_DEPTH_TEST)) // Required for depth-writing
b.ClearColor(br, bg, bb, 1.0)
clear := b.ClearDepth()
prog := b.CreateProgram(ctx, simpleVSSource, simpleFSSource(fr, fg, fb))
angleLoc := b.AddUniformSampler(ctx, prog, "angle")
posLoc := b.AddAttributeVec3(ctx, prog, "position")
triangleVerticesR := b.Data(ctx, triangleVertices)
b.Add(
b.CB.GlUseProgram(prog),
b.CB.GlUniform1f(angleLoc, gles.GLfloat(0)),
b.CB.GlEnableVertexAttribArray(posLoc),
b.CB.GlVertexAttribPointer(posLoc, 3, gles.GLenum_GL_FLOAT, gles.GLboolean(0), 0, triangleVerticesR.Ptr()),
b.CB.GlDrawArrays(gles.GLenum_GL_TRIANGLES, 0, 3).AddRead(triangleVerticesR.Data()),
)
triangle := b.Last()
angle := 0.0
for i := 0; i < 30; i++ {
angle += math.Pi / 30.0
b.SwapBuffers()
b.Add(
b.CB.GlUniform1f(angleLoc, gles.GLfloat(angle)),
b.CB.GlClear(gles.GLbitfield_GL_COLOR_BUFFER_BIT|gles.GLbitfield_GL_DEPTH_BUFFER_BIT),
b.CB.GlDrawArrays(gles.GLenum_GL_TRIANGLES, 0, 3).AddRead(triangleVerticesR.Data()),
)
}
rotatedTriangle := b.Last()
verify := func(ctx context.Context, c *path.Capture, d *device.Instance) {
checkReplay(ctx, c, d, 1, func() { // expect a single replay batch.
done := &sync.WaitGroup{}
done.Add(5)
go checkColorBuffer(ctx, c, d, 64, 64, 0.0, "solid-green", clear, done)
go checkDepthBuffer(ctx, c, d, 64, 64, 0.0, "one-depth", clear, done)
go checkColorBuffer(ctx, c, d, 64, 64, 0.01, "triangle", triangle, done)
go checkColorBuffer(ctx, c, d, 64, 64, 0.01, "triangle-180", rotatedTriangle, done)
go checkDepthBuffer(ctx, c, d, 64, 64, 0.01, "triangle-depth", triangle, done)
done.Wait()
})
}
return buildAndMaybeExportCapture(ctx, b, "draw-triangle"), verify
}
func test(t *testing.T, name string, tg generator) {
ctx, d := setup(log.Testing(t))
c, verify := tg(ctx, d)
verify(ctx, c, d)
}
func TestMultiContextCapture(t *testing.T) {
ctx, d := setup(log.Testing(t))
t1, _ := generateDrawTriangleCaptureEx(ctx, d, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0)
t2, _ := generateDrawTriangleCaptureEx(ctx, d, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0)
t3, _ := generateDrawTriangleCaptureEx(ctx, d, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0)
c := mergeCaptures(ctx, t1, t2, t3)
maybeExportCapture(ctx, c)
contexts, err := resolve.Contexts(ctx, c.Contexts(), nil)
assert.For(ctx, "err").ThatError(err).Succeeded()
assert.For(ctx, "len").That(len(contexts)).Equals(3)
}
func TestExportAndImportCapture(t *testing.T) {
ctx, d := setup(log.Testing(t))
c, verify := generateDrawTriangleCapture(ctx, d)
var exported bytes.Buffer
err := capture.Export(ctx, c, &exported)
assert.For(ctx, "err").ThatError(err).Succeeded()
ctx, d = setup(log.Testing(t))
src := &capture.Blob{Data: exported.Bytes()}
recoveredCapture, err := capture.Import(ctx, "recovered", src)
assert.For(ctx, "err").ThatError(err).Succeeded()
verify(ctx, recoveredCapture, d)
}
// TestResizeRenderer checks that backbuffers can be resized without destroying
// the current context.
func TestResizeRenderer(t *testing.T) {
ctx, d := setup(log.Testing(t))
b := snippets.NewBuilder(ctx, d)
b.CreateContext(8, 8, false, false) // start with a small backbuffer
triangleVerticesR := b.Data(ctx, triangleVertices)
prog := b.CreateProgram(ctx, simpleVSSource, simpleFSSource(1.0, 0.0, 0.0))
posLoc := b.AddAttributeVec3(ctx, prog, "position")
b.Add(
b.CB.GlUseProgram(prog),
b.CB.GlEnableVertexAttribArray(posLoc),
b.CB.GlVertexAttribPointer(posLoc, 3, gles.GLenum_GL_FLOAT, gles.GLboolean(0), 0, triangleVerticesR.Ptr()),
)
b.ResizeBackbuffer(64, 64)
b.ClearColor(0, 0, 1, 1)
triangle := b.Add(b.CB.GlDrawArrays(gles.GLenum_GL_TRIANGLES, 0, 3).AddRead(triangleVerticesR.Data()))
c := buildAndMaybeExportCapture(ctx, b, "resize-renderer")
checkReplay(ctx, c, d, 1, func() { // expect a single replay batch.
checkColorBuffer(ctx, c, d, 64, 64, 0.01, "triangle_2", triangle, nil)
})
}
// TestNewContextUndefined checks that a new context is filled with the
// undefined framebuffer pattern.
func TestNewContextUndefined(t *testing.T) {
ctx, d := setup(log.Testing(t))
b := snippets.NewBuilder(ctx, d)
b.CreateContext(64, 64, false, false)
makeCurrent := b.Last()
c := buildAndMaybeExportCapture(ctx, b, "new-context-undefined")
checkReplay(ctx, c, d, 1, func() { // expect a single replay batch.
checkColorBuffer(ctx, c, d, 64, 64, 0.01, "undef-fb", makeCurrent, nil)
})
}
// TestPreserveBuffersOnSwap checks that when the preserveBuffersOnSwap flag is
// set, the backbuffer is preserved between calls to eglSwapBuffers().
func TestPreserveBuffersOnSwap(t *testing.T) {
ctx, d := setup(log.Testing(t))
b := snippets.NewBuilder(ctx, d)
b.CreateContext(64, 64, false, true)
clear := b.ClearColor(0, 0, 1, 1)
swapA := b.SwapBuffers()
swapB := b.SwapBuffers()
swapC := b.SwapBuffers()
c := buildAndMaybeExportCapture(ctx, b, "preserve-buffers-on-swap")
checkReplay(ctx, c, d, 1, func() { // expect a single replay batch.
done := &sync.WaitGroup{}
done.Add(4)
go checkColorBuffer(ctx, c, d, 64, 64, 0.0, "solid-blue", clear, done)
go checkColorBuffer(ctx, c, d, 64, 64, 0.0, "solid-blue", swapA, done)
go checkColorBuffer(ctx, c, d, 64, 64, 0.0, "solid-blue", swapB, done)
go checkColorBuffer(ctx, c, d, 64, 64, 0.0, "solid-blue", swapC, done)
done.Wait()
})
}
Fix the boken GLES integration test.
The replay now correctly no longer resets the viewport when a
context is bound for the 2nd and following times.
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package replay
import (
"bytes"
"context"
"flag"
"math"
"os"
"path/filepath"
"sync"
"testing"
"time"
"github.com/google/gapid/core/assert"
"github.com/google/gapid/core/log"
"github.com/google/gapid/core/os/device"
"github.com/google/gapid/core/os/device/bind"
"github.com/google/gapid/gapis/api"
"github.com/google/gapid/gapis/api/gles"
"github.com/google/gapid/gapis/capture"
"github.com/google/gapid/gapis/database"
"github.com/google/gapid/gapis/memory"
"github.com/google/gapid/gapis/replay"
"github.com/google/gapid/gapis/resolve"
"github.com/google/gapid/gapis/service/path"
"github.com/google/gapid/test/integration/gles/snippets"
)
const replayTimeout = time.Second * 5
var (
triangleVertices = []float32{
+0.0, -0.5, 0.1,
-0.5, +0.5, 0.5,
+0.5, +0.5, 0.9,
}
squareVertices = []float32{
-0.5, -0.5, 0.5,
-0.5, +0.5, 0.5,
+0.5, +0.5, 0.5,
+0.5, -0.5, 0.5,
}
squareIndices = []uint16{
0, 1, 2, 0, 2, 3,
}
generateReferenceImages = flag.String("generate", "", "directory in which to generate reference images, empty to disable")
exportCaptures = flag.String("export-captures", "", "directory to export captures to, empty to disable")
)
func TestMain(m *testing.M) {
flag.Parse()
os.Exit(m.Run())
}
func setup(ctx context.Context) (context.Context, *device.Instance) {
r := bind.NewRegistry()
ctx = bind.PutRegistry(ctx, r)
m := replay.New(ctx)
ctx = replay.PutManager(ctx, m)
ctx = database.Put(ctx, database.NewInMemory(ctx))
bind.GetRegistry(ctx).AddDevice(ctx, bind.Host(ctx))
return ctx, r.DefaultDevice().Instance()
}
func buildAndMaybeExportCapture(ctx context.Context, b *snippets.Builder, name string) *path.Capture {
c := b.Capture(ctx, name)
maybeExportCapture(ctx, c)
return c
}
func maybeExportCapture(ctx context.Context, c *path.Capture) {
if *exportCaptures == "" {
return
}
cap, err := capture.ResolveFromPath(ctx, c)
assert.For(ctx, "err").ThatError(err).Succeeded()
f, err := os.Create(filepath.Join(*exportCaptures, cap.Name+".gfxtrace"))
assert.For(ctx, "err").ThatError(err).Succeeded()
defer f.Close()
err = capture.Export(ctx, c, f)
assert.For(ctx, "err").ThatError(err).Succeeded()
}
func p(addr uint64) memory.Pointer { return memory.BytePtr(addr) }
type verifier func(context.Context, *path.Capture, *device.Instance)
type generator func(context.Context, *device.Instance) (*path.Capture, verifier)
// mergeCaptures creates a capture from the cmds of several existing captures,
// by interleaving them arbitrarily, on different threads.
func mergeCaptures(ctx context.Context, captures ...*path.Capture) *path.Capture {
lists := [][]api.Cmd{}
threads := []uint64{}
remainingCmds := 0
if len(captures) == 0 {
panic("mergeCaptures requires at least one capture")
}
var d *device.Instance
for i, path := range captures {
c, err := capture.ResolveFromPath(ctx, path)
assert.For(ctx, "err").ThatError(err).Succeeded()
lists = append(lists, c.Commands)
remainingCmds += len(c.Commands)
threads = append(threads, uint64(0x10000+i))
if i == 0 {
d = c.Header.Device
}
}
merged := snippets.NewBuilder(ctx, d)
threadIndex := 0
cmdsUntilSwitchThread, modFourCounter := 4, 3
for remainingCmds > 0 {
if cmdsUntilSwitchThread > 0 && len(lists[threadIndex]) > 0 {
cmd := lists[threadIndex][0]
cmd.SetThread(threads[threadIndex])
merged.Add(cmd)
lists[threadIndex] = lists[threadIndex][1:]
remainingCmds--
cmdsUntilSwitchThread--
} else {
threadIndex = (threadIndex + 1) % len(lists)
for len(lists[threadIndex]) == 0 {
threadIndex = (threadIndex + 1) % len(lists)
}
// We don't want to always switch threads after the same number of commands,
// but we want it to be predictable. This should do.
cmdsUntilSwitchThread = 2 + modFourCounter
modFourCounter = (modFourCounter + 1) % 4
}
}
return merged.Capture(ctx, "merged")
}
func generateDrawTriangleCapture(ctx context.Context, d *device.Instance) (*path.Capture, verifier) {
return generateDrawTriangleCaptureEx(ctx, d,
0.0, 1.0, 0.0,
1.0, 0.0, 0.0)
}
// generateDrawTriangleCaptureEx generates a capture with several frames containing
// a rotating triangle of color RGB(fr, fg, fb) on a RGB(br, bg, bb) background.
func generateDrawTriangleCaptureEx(ctx context.Context, d *device.Instance,
br, bg, bb gles.GLfloat,
fr, fg, fb gles.GLfloat) (*path.Capture, verifier) {
b := snippets.NewBuilder(ctx, d)
b.CreateContext(64, 64, false, false)
b.Add(b.CB.GlEnable(gles.GLenum_GL_DEPTH_TEST)) // Required for depth-writing
b.ClearColor(br, bg, bb, 1.0)
clear := b.ClearDepth()
prog := b.CreateProgram(ctx, simpleVSSource, simpleFSSource(fr, fg, fb))
angleLoc := b.AddUniformSampler(ctx, prog, "angle")
posLoc := b.AddAttributeVec3(ctx, prog, "position")
triangleVerticesR := b.Data(ctx, triangleVertices)
b.Add(
b.CB.GlUseProgram(prog),
b.CB.GlUniform1f(angleLoc, gles.GLfloat(0)),
b.CB.GlEnableVertexAttribArray(posLoc),
b.CB.GlVertexAttribPointer(posLoc, 3, gles.GLenum_GL_FLOAT, gles.GLboolean(0), 0, triangleVerticesR.Ptr()),
b.CB.GlDrawArrays(gles.GLenum_GL_TRIANGLES, 0, 3).AddRead(triangleVerticesR.Data()),
)
triangle := b.Last()
angle := 0.0
for i := 0; i < 30; i++ {
angle += math.Pi / 30.0
b.SwapBuffers()
b.Add(
b.CB.GlUniform1f(angleLoc, gles.GLfloat(angle)),
b.CB.GlClear(gles.GLbitfield_GL_COLOR_BUFFER_BIT|gles.GLbitfield_GL_DEPTH_BUFFER_BIT),
b.CB.GlDrawArrays(gles.GLenum_GL_TRIANGLES, 0, 3).AddRead(triangleVerticesR.Data()),
)
}
rotatedTriangle := b.Last()
verify := func(ctx context.Context, c *path.Capture, d *device.Instance) {
checkReplay(ctx, c, d, 1, func() { // expect a single replay batch.
done := &sync.WaitGroup{}
done.Add(5)
go checkColorBuffer(ctx, c, d, 64, 64, 0.0, "solid-green", clear, done)
go checkDepthBuffer(ctx, c, d, 64, 64, 0.0, "one-depth", clear, done)
go checkColorBuffer(ctx, c, d, 64, 64, 0.01, "triangle", triangle, done)
go checkColorBuffer(ctx, c, d, 64, 64, 0.01, "triangle-180", rotatedTriangle, done)
go checkDepthBuffer(ctx, c, d, 64, 64, 0.01, "triangle-depth", triangle, done)
done.Wait()
})
}
return buildAndMaybeExportCapture(ctx, b, "draw-triangle"), verify
}
func test(t *testing.T, name string, tg generator) {
ctx, d := setup(log.Testing(t))
c, verify := tg(ctx, d)
verify(ctx, c, d)
}
func TestMultiContextCapture(t *testing.T) {
ctx, d := setup(log.Testing(t))
t1, _ := generateDrawTriangleCaptureEx(ctx, d, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0)
t2, _ := generateDrawTriangleCaptureEx(ctx, d, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0)
t3, _ := generateDrawTriangleCaptureEx(ctx, d, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0)
c := mergeCaptures(ctx, t1, t2, t3)
maybeExportCapture(ctx, c)
contexts, err := resolve.Contexts(ctx, c.Contexts(), nil)
assert.For(ctx, "err").ThatError(err).Succeeded()
assert.For(ctx, "len").That(len(contexts)).Equals(3)
}
func TestExportAndImportCapture(t *testing.T) {
ctx, d := setup(log.Testing(t))
c, verify := generateDrawTriangleCapture(ctx, d)
var exported bytes.Buffer
err := capture.Export(ctx, c, &exported)
assert.For(ctx, "err").ThatError(err).Succeeded()
ctx, d = setup(log.Testing(t))
src := &capture.Blob{Data: exported.Bytes()}
recoveredCapture, err := capture.Import(ctx, "recovered", src)
assert.For(ctx, "err").ThatError(err).Succeeded()
verify(ctx, recoveredCapture, d)
}
// TestResizeRenderer checks that backbuffers can be resized without destroying
// the current context.
func TestResizeRenderer(t *testing.T) {
ctx, d := setup(log.Testing(t))
b := snippets.NewBuilder(ctx, d)
b.CreateContext(8, 8, false, false) // start with a small backbuffer
triangleVerticesR := b.Data(ctx, triangleVertices)
prog := b.CreateProgram(ctx, simpleVSSource, simpleFSSource(1.0, 0.0, 0.0))
posLoc := b.AddAttributeVec3(ctx, prog, "position")
b.Add(
b.CB.GlUseProgram(prog),
b.CB.GlEnableVertexAttribArray(posLoc),
b.CB.GlVertexAttribPointer(posLoc, 3, gles.GLenum_GL_FLOAT, gles.GLboolean(0), 0, triangleVerticesR.Ptr()),
)
b.ResizeBackbuffer(64, 64)
b.Add(b.CB.GlViewport(0, 0, 64, 64))
b.ClearColor(0, 0, 1, 1)
triangle := b.Add(b.CB.GlDrawArrays(gles.GLenum_GL_TRIANGLES, 0, 3).AddRead(triangleVerticesR.Data()))
c := buildAndMaybeExportCapture(ctx, b, "resize-renderer")
checkReplay(ctx, c, d, 1, func() { // expect a single replay batch.
checkColorBuffer(ctx, c, d, 64, 64, 0.01, "triangle_2", triangle, nil)
})
}
// TestNewContextUndefined checks that a new context is filled with the
// undefined framebuffer pattern.
func TestNewContextUndefined(t *testing.T) {
ctx, d := setup(log.Testing(t))
b := snippets.NewBuilder(ctx, d)
b.CreateContext(64, 64, false, false)
makeCurrent := b.Last()
c := buildAndMaybeExportCapture(ctx, b, "new-context-undefined")
checkReplay(ctx, c, d, 1, func() { // expect a single replay batch.
checkColorBuffer(ctx, c, d, 64, 64, 0.01, "undef-fb", makeCurrent, nil)
})
}
// TestPreserveBuffersOnSwap checks that when the preserveBuffersOnSwap flag is
// set, the backbuffer is preserved between calls to eglSwapBuffers().
func TestPreserveBuffersOnSwap(t *testing.T) {
ctx, d := setup(log.Testing(t))
b := snippets.NewBuilder(ctx, d)
b.CreateContext(64, 64, false, true)
clear := b.ClearColor(0, 0, 1, 1)
swapA := b.SwapBuffers()
swapB := b.SwapBuffers()
swapC := b.SwapBuffers()
c := buildAndMaybeExportCapture(ctx, b, "preserve-buffers-on-swap")
checkReplay(ctx, c, d, 1, func() { // expect a single replay batch.
done := &sync.WaitGroup{}
done.Add(4)
go checkColorBuffer(ctx, c, d, 64, 64, 0.0, "solid-blue", clear, done)
go checkColorBuffer(ctx, c, d, 64, 64, 0.0, "solid-blue", swapA, done)
go checkColorBuffer(ctx, c, d, 64, 64, 0.0, "solid-blue", swapB, done)
go checkColorBuffer(ctx, c, d, 64, 64, 0.0, "solid-blue", swapC, done)
done.Wait()
})
}
|
package lib
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/go-ini/ini"
"net/http"
"os"
"strings"
)
// IAM_ARN is the prefix for role ARNs and Virtual MFA devices
// (physical MFA devices use device serial number, not ARN)
const IAM_ARN = "arn:aws:iam::"
// LookupMfa retrieves the MFA devices configured for the calling user's IAM account
func LookupMfa(sess *session.Session) ([]*iam.MFADevice, error) {
s := iam.New(sess)
res, err := s.ListMFADevices(&iam.ListMFADevicesInput{})
if err != nil {
return nil, err
}
return res.MFADevices, nil
}
// PromptForMfa will print a prompt to Stdout for a user to enter the MFA code
func PromptForMfa() string {
var mfaCode string
fmt.Print("Enter MFA Code: ")
fmt.Scanln(&mfaCode)
return mfaCode
}
// AwsConfigFile returns the location of the AWS SDK config file. Use the
// value of the AWS_CONFIG_FILE environment variable, if available, otherwise
// use the SDK default location
func AwsConfigFile() string {
c := defaults.SharedConfigFilename()
e, ok := os.LookupEnv("AWS_CONFIG_FILE")
if ok && len(e) > 0 {
c = e
}
return c
}
func AwsCredentialsFile() string {
c := defaults.SharedCredentialsFilename()
e, ok := os.LookupEnv("AWS_SHARED_CREDENTIALS_FILE")
if ok && len(e) > 0 {
c = e
}
return c
}
// AwsSession returns an AWS SDK session object to use for making API calls to AWS. This session
// will be set to get configuration from the shared configuration files, and enable verbose credential
// chain logging. If the profile argument is provided, the session will be set to use it for configuration.
func AwsSession(profile string) *session.Session {
// Doing this kills the ability to use env vars, which may mess
// with the -M option, requiring the ~/.aws/credentials file
// Unset AWS credential env vars
//env := []string{
// "AWS_ACCESS_KEY_ID", "AWS_ACCESS_KEY",
// "AWS_SECRET_ACCESS_KEY", "AWS_SECRET_KEY",
// "AWS_SESSION_TOKEN", "AWS_SECURITY_TOKEN",
//}
//for _, e := range env {
// os.Unsetenv(e)
//}
opts := session.Options{
SharedConfigState: session.SharedConfigEnable,
AssumeRoleTokenProvider: stscreds.StdinTokenProvider,
Config: aws.Config{CredentialsChainVerboseErrors: aws.Bool(true)},
}
if len(profile) > 0 {
opts.Profile = profile
}
return session.Must(session.NewSessionWithOptions(opts))
}
// VersionCheck will check the program version against the latest release according to github
func VersionCheck(version string) error {
u := "https://github.com/mmmorris1975/aws-runas/releases/latest"
r, err := http.NewRequest(http.MethodHead, u, http.NoBody)
if err != nil {
return err
}
// Get in the weeds so we don't follow redirects
res, err := http.DefaultTransport.RoundTrip(r)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode == http.StatusFound {
url, err := res.Location()
if err != nil {
return err
}
p := strings.Trim(url.Path, `/`)
f := strings.Split(p, `/`)
v := f[len(f)-1]
if v != version {
fmt.Printf("New version of aws-runas available: %s\nDownload available at: %s\n", v, u)
}
return nil
}
return fmt.Errorf("version check failed, bad HTTP Status: %d", res.StatusCode)
}
// RunDiagnostics will sanity check various configuration items
func RunDiagnostics(p *AWSProfile) error {
envAk := os.Getenv("AWS_ACCESS_KEY_ID")
envSt := os.Getenv("AWS_SESSION_TOKEN")
if len(envAk) > 0 && len(envSt) > 0 {
if strings.HasPrefix(envAk, "AKIA") {
return fmt.Errorf("detected static access key env var along with session token env var, this is invalid")
}
}
fmt.Printf("PROFILE: %s\n", p.Name)
fmt.Printf("ROLE ARN: %s\n", p.RoleArn)
fmt.Printf("MFA SERIAL: %s\n", p.MfaSerial)
fmt.Printf("EXTERNAL ID: %s\n", p.ExternalId)
fmt.Printf("ROLE SESSION NAME: %s\n", p.RoleSessionName)
fmt.Printf("SESSION TOKEN DURATION: %s\n", p.SessionDuration)
fmt.Printf("ASSUME ROLE CREDENTIAL DURATION: %s\n", p.CredDuration)
fmt.Printf("REGION: %s\n", p.Region)
fmt.Printf("SOURCE PROFILE: %s\n", p.SourceProfile)
c := AwsCredentialsFile()
f, err := ini.Load(c)
if err != nil {
return err
}
f.BlockMode = false
profile := p.SourceProfile
if len(profile) < 1 {
profile = p.Name
}
s, err := f.GetSection(profile)
if err != nil {
// if access key envvar exists, assume creds are configured as envvars and not in credentials file
// otherwise, flag as an error, since creds are totally missing
if len(envAk) < 1 {
return fmt.Errorf("missing [%s] section in credentials file %s", profile, c)
}
}
if s.HasKey("aws_access_key_id") && s.HasKey("aws_secret_access_key") {
if len(envAk) > 0 || len(envSt) > 0 {
return fmt.Errorf("detected AWS credential environment variables and profile credentials, this may confuse aws-runas")
}
} else {
// section is in cred file, but one or both of the cred keys are missing
if len(envAk) < 1 {
return fmt.Errorf("profile found in credentials file, but missing credential configuration")
}
}
return nil
}
removed some long dead code
package lib
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/go-ini/ini"
"net/http"
"os"
"strings"
)
// IAM_ARN is the prefix for role ARNs and Virtual MFA devices
// (physical MFA devices use device serial number, not ARN)
const IAM_ARN = "arn:aws:iam::"
// LookupMfa retrieves the MFA devices configured for the calling user's IAM account
func LookupMfa(sess *session.Session) ([]*iam.MFADevice, error) {
s := iam.New(sess)
res, err := s.ListMFADevices(&iam.ListMFADevicesInput{})
if err != nil {
return nil, err
}
return res.MFADevices, nil
}
// PromptForMfa will print a prompt to Stdout for a user to enter the MFA code
func PromptForMfa() string {
var mfaCode string
fmt.Print("Enter MFA Code: ")
fmt.Scanln(&mfaCode)
return mfaCode
}
// AwsConfigFile returns the location of the AWS SDK config file. Use the
// value of the AWS_CONFIG_FILE environment variable, if available, otherwise
// use the SDK default location
func AwsConfigFile() string {
c := defaults.SharedConfigFilename()
e, ok := os.LookupEnv("AWS_CONFIG_FILE")
if ok && len(e) > 0 {
c = e
}
return c
}
func AwsCredentialsFile() string {
c := defaults.SharedCredentialsFilename()
e, ok := os.LookupEnv("AWS_SHARED_CREDENTIALS_FILE")
if ok && len(e) > 0 {
c = e
}
return c
}
// AwsSession returns an AWS SDK session object to use for making API calls to AWS. This session
// will be set to get configuration from the shared configuration files, and enable verbose credential
// chain logging. If the profile argument is provided, the session will be set to use it for configuration.
func AwsSession(profile string) *session.Session {
opts := session.Options{
SharedConfigState: session.SharedConfigEnable,
AssumeRoleTokenProvider: stscreds.StdinTokenProvider,
Config: aws.Config{CredentialsChainVerboseErrors: aws.Bool(true)},
}
if len(profile) > 0 {
opts.Profile = profile
}
return session.Must(session.NewSessionWithOptions(opts))
}
// VersionCheck will check the program version against the latest release according to github
func VersionCheck(version string) error {
u := "https://github.com/mmmorris1975/aws-runas/releases/latest"
r, err := http.NewRequest(http.MethodHead, u, http.NoBody)
if err != nil {
return err
}
// Get in the weeds so we don't follow redirects
res, err := http.DefaultTransport.RoundTrip(r)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode == http.StatusFound {
url, err := res.Location()
if err != nil {
return err
}
p := strings.Trim(url.Path, `/`)
f := strings.Split(p, `/`)
v := f[len(f)-1]
if v != version {
fmt.Printf("New version of aws-runas available: %s\nDownload available at: %s\n", v, u)
}
return nil
}
return fmt.Errorf("version check failed, bad HTTP Status: %d", res.StatusCode)
}
// RunDiagnostics will sanity check various configuration items
func RunDiagnostics(p *AWSProfile) error {
envAk := os.Getenv("AWS_ACCESS_KEY_ID")
envSt := os.Getenv("AWS_SESSION_TOKEN")
if len(envAk) > 0 && len(envSt) > 0 {
if strings.HasPrefix(envAk, "AKIA") {
return fmt.Errorf("detected static access key env var along with session token env var, this is invalid")
}
}
fmt.Printf("PROFILE: %s\n", p.Name)
fmt.Printf("ROLE ARN: %s\n", p.RoleArn)
fmt.Printf("MFA SERIAL: %s\n", p.MfaSerial)
fmt.Printf("EXTERNAL ID: %s\n", p.ExternalId)
fmt.Printf("ROLE SESSION NAME: %s\n", p.RoleSessionName)
fmt.Printf("SESSION TOKEN DURATION: %s\n", p.SessionDuration)
fmt.Printf("ASSUME ROLE CREDENTIAL DURATION: %s\n", p.CredDuration)
fmt.Printf("REGION: %s\n", p.Region)
fmt.Printf("SOURCE PROFILE: %s\n", p.SourceProfile)
c := AwsCredentialsFile()
f, err := ini.Load(c)
if err != nil {
return err
}
f.BlockMode = false
profile := p.SourceProfile
if len(profile) < 1 {
profile = p.Name
}
s, err := f.GetSection(profile)
if err != nil {
// if access key envvar exists, assume creds are configured as envvars and not in credentials file
// otherwise, flag as an error, since creds are totally missing
if len(envAk) < 1 {
return fmt.Errorf("missing [%s] section in credentials file %s", profile, c)
}
}
if s.HasKey("aws_access_key_id") && s.HasKey("aws_secret_access_key") {
if len(envAk) > 0 || len(envSt) > 0 {
return fmt.Errorf("detected AWS credential environment variables and profile credentials, this may confuse aws-runas")
}
} else {
// section is in cred file, but one or both of the cred keys are missing
if len(envAk) < 1 {
return fmt.Errorf("profile found in credentials file, but missing credential configuration")
}
}
return nil
}
|
// Package db to connect to mongodb
package db
import (
"errors"
"fmt"
"github.com/MG-RAST/AWE/lib/conf"
mgo "github.com/MG-RAST/AWE/vendor/gopkg.in/mgo.v2"
"time"
)
var (
Connection connection
DbTimeout = time.Second * time.Duration(conf.MONGODB_TIMEOUT)
)
type connection struct {
dbname string
username string
password string
Session *mgo.Session
DB *mgo.Database
}
func Initialize() (err error) {
c := connection{}
s, err := mgo.DialWithTimeout(conf.MONGODB_HOST, DbTimeout)
if err != nil {
e := errors.New(fmt.Sprintf("no reachable mongodb server(s) at %s", conf.MONGODB_HOST))
return e
}
c.Session = s
c.DB = c.Session.DB(conf.MONGODB_DATABASE)
if conf.MONGODB_USER != "" && conf.MONGODB_PASSWD != "" {
c.DB.Login(conf.MONGODB_USER, conf.MONGODB_PASSWD)
}
Connection = c
return
}
func Drop() error {
return Connection.DB.DropDatabase()
}
test mongo connection with short timeout on startup
// Package db to connect to mongodb
package db
import (
"errors"
"fmt"
"github.com/MG-RAST/AWE/lib/conf"
mgo "github.com/MG-RAST/AWE/vendor/gopkg.in/mgo.v2"
"time"
)
const (
DialTimeout = time.Duration(time.Second * 10)
DialAttempts = 3
)
var (
Connection connection
DbTimeout = time.Second * time.Duration(conf.MONGODB_TIMEOUT)
)
type connection struct {
dbname string
username string
password string
Session *mgo.Session
DB *mgo.Database
}
func Initialize() (err error) {
c := connection{}
// test connection
canDial := false
for i := 0; i < DialAttempts; i++ {
s, err := mgo.DialWithTimeout(conf.MONGODB_HOST, DialTimeout)
if err == nil {
s.Close()
canDial = true
break
}
}
if !canDial {
return errors.New(fmt.Sprintf("no reachable mongodb server(s) at %s", conf.MONGODB_HOST))
}
// get handle
s, err := mgo.DialWithTimeout(conf.MONGODB_HOST, DbTimeout)
if err != nil {
return errors.New(fmt.Sprintf("no reachable mongodb server(s) at %s", conf.MONGODB_HOST))
}
c.Session = s
c.DB = c.Session.DB(conf.MONGODB_DATABASE)
if conf.MONGODB_USER != "" && conf.MONGODB_PASSWD != "" {
c.DB.Login(conf.MONGODB_USER, conf.MONGODB_PASSWD)
}
Connection = c
return
}
func Drop() error {
return Connection.DB.DropDatabase()
}
|
package sysexits
const {
OK int = 0 /* successful termination */
BASE int = 64 /* base value for error messages */
USAGE int = 64 /* command line usage error */
DATAERR int = 65 /* data format error */
NOINPUT int = 66 /* cannot open input */
NOUSER int = 67 /* addressee unknown */
NOHOST int = 68 /* host name unknown */
UNAVAILABLE int = 69 /* service unavailable */
SOFTWARE int = 70 /* internal software error */
OSERR int = 71 /* system error (e.g., can't fork) */
OSFILE int = 72 /* critical OS file missing */
CANTCREAT int = 73 /* can't create (user) output file */
IOERR int = 74 /* input/output error */
TEMPFAIL int = 75 /* temp failure; user is invited to retry */
PROTOCOL int = 76 /* remote error in protocol */
NOPERM int = 77 /* permission denied */
CONFIG int = 78 /* configuration error */
MAX int = 78 /* maximum listed value */
}
braces update
package sysexits
const (
OK int = 0 /* successful termination */
BASE int = 64 /* base value for error messages */
USAGE int = 64 /* command line usage error */
DATAERR int = 65 /* data format error */
NOINPUT int = 66 /* cannot open input */
NOUSER int = 67 /* addressee unknown */
NOHOST int = 68 /* host name unknown */
UNAVAILABLE int = 69 /* service unavailable */
SOFTWARE int = 70 /* internal software error */
OSERR int = 71 /* system error (e.g., can't fork) */
OSFILE int = 72 /* critical OS file missing */
CANTCREAT int = 73 /* can't create (user) output file */
IOERR int = 74 /* input/output error */
TEMPFAIL int = 75 /* temp failure; user is invited to retry */
PROTOCOL int = 76 /* remote error in protocol */
NOPERM int = 77 /* permission denied */
CONFIG int = 78 /* configuration error */
MAX int = 78 /* maximum listed value */
)
|
// Copyright 2016 The appc Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package docker2aci
import "github.com/appc/spec/schema"
var Version = "0.9.0+git"
var AppcVersion = schema.AppContainerVersion
version: bump to v0.9.1
// Copyright 2016 The appc Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package docker2aci
import "github.com/appc/spec/schema"
var Version = "0.9.1"
var AppcVersion = schema.AppContainerVersion
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.