CombinedText stringlengths 4 3.42M |
|---|
package provider
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/MiteshSharma/StorageService/service/data"
log "github.com/Sirupsen/logrus"
"net/http"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/MiteshSharma/StorageService/utils"
"time"
"fmt"
)
type S3Storage struct {
Session *session.Session
S3Connection *s3.S3
}
func NewS3Storage() *S3Storage {
credentials.NewStaticCredentials(
utils.ConfigParam.StorageConfig.S3Storage.KeyId,
utils.ConfigParam.StorageConfig.S3Storage.AccessKey, "")
awsConfig := &aws.Config{
Region: aws.String("us-west-1"),
}
session := session.New(awsConfig)
connection := s3.New(session)
return &S3Storage{Session: session, S3Connection: connection}
}
func (s3S S3Storage) GetBuckets() ([]*data.Bucket, error) {
var params *s3.ListBucketsInput
resp, err := s3S.S3Connection.ListBuckets(params)
if err != nil {
log.Debug("Error fetching bucket list %v", err)
return nil, err
}
buckets := make([]*data.Bucket, len(resp.Buckets))
index := 0
for _, bucket := range resp.Buckets {
bucket := data.NewBucket(*bucket.Name)
buckets[index] = bucket
index++
}
return buckets, nil
}
func (s3S S3Storage)GetBucket(name string) (*data.Bucket, error) {
req := s3.HeadBucketInput{
Bucket: &name,
}
_, err := s3S.S3Connection.HeadBucket(&req)
if err != nil {
log.Debug("No bucket exist %v", err)
return &data.Bucket{}, err
}
return data.NewBucket(*req.Bucket), nil
}
func (s3S S3Storage)CreateBucket(name string) (*data.Bucket, error) {
req := s3.CreateBucketInput{
Bucket: aws.String(name),
ACL: aws.String("public-read"),
}
_, err := s3S.S3Connection.CreateBucket(&req)
if err != nil {
log.Debug("Error on creating bucket %v", err)
return nil, err
}
if err = s3S.S3Connection.WaitUntilBucketExists(&s3.HeadBucketInput{Bucket: &name}); err != nil {
log.Debug("Failed to wait for bucket to exist %s, %s\n", name, err)
return nil, err
}
return data.NewBucket(name), nil
}
func (s3S S3Storage)DestroyBucket(name string) (error) {
params := &s3.DeleteBucketInput{
Bucket: &name, // Required
}
_, err := s3S.S3Connection.DeleteBucket(params)
if err != nil {
log.Debug("Error on deleting bucket %v", err)
return err
}
return nil
}
func (s3S S3Storage)GetFiles(bucketName string) ([]*data.File, error) {
bucket, err := s3S.GetBucket(bucketName)
if (&data.Bucket{}) == bucket {
log.Debug("Bucket name doesn't exist %v", err)
return nil, err
}
params := &s3.ListObjectsInput{
Bucket: aws.String(bucketName), // Required
}
resp, err := s3S.S3Connection.ListObjects(params)
if err != nil {
log.Debug("Error fetching bucket content %v", err)
return nil, err
}
fileList := make([]*data.File, len(resp.Contents))
index := 0
for _, content := range resp.Contents {
file := data.NewFile(*content.Key, *content.Size, time.Now())
fileList[index] = file
index++
}
return fileList, nil
}
func (s3S S3Storage)GetFile(bucketName, name string) (*data.File, error) {
params := &s3.GetObjectInput{
Bucket: aws.String(bucketName), // Required
Key: aws.String(name), // Required
}
resp, err := s3S.S3Connection.GetObject(params)
if err != nil {
log.Debug("Error fetching bucket file %v", err)
return &data.File{}, err
}
return data.NewFile(name, *resp.ContentLength, *resp.LastModified), nil
}
func (s3S S3Storage)RemoveFile(bucketName, name string) (error) {
params := &s3.DeleteObjectInput{
Bucket: aws.String(bucketName), // Required
Key: aws.String(name), // Required
}
_, err := s3S.S3Connection.DeleteObject(params)
if err != nil {
log.Debug("Error deleting bucket content %v", err)
return err
}
return nil
}
func (s3S S3Storage) UploadFile(bucketName string, request *http.Request) ([]*data.File, error) {
uploader := s3manager.NewUploader(s3S.Session)
var fileName string
err := request.ParseMultipartForm(100000)
if err != nil {
log.Debug("Error parsing multipart form %v", err)
return nil, err
}
//get a ref to the parsed multipart form
m := request.MultipartForm
fmt.Println("Reading form data")
//get the *fileheaders
files := m.File["file"]
fmt.Println("File size %ld", len(files))
fileList := make([]*data.File, len(files))
index := 0
for i, _ := range files {
fileName = files[i].Filename
//for each fileheader, get a handle to the actual file
file, err := files[i].Open()
defer file.Close()
if err != nil {
log.Debug("Error opening file received %v", err)
return nil, err
}
//create destination file making sure the path is writeable.
result, err := uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(bucketName),
Key: aws.String(fileName),
ACL: aws.String("public-read"),
Body: file,
})
if err != nil {
log.Debug("Failed to upload %v", err)
} else {
fmt.Println("Upload successful with uploadId "+result.UploadID)
}
fileObj, _ := s3S.GetFile(bucketName, fileName)
fileList[index] = fileObj
index++
}
return fileList, nil
}
Reading region from config file for s3 upload
package provider
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/MiteshSharma/StorageService/service/data"
log "github.com/Sirupsen/logrus"
"net/http"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/MiteshSharma/StorageService/utils"
"time"
"fmt"
)
type S3Storage struct {
Session *session.Session
S3Connection *s3.S3
}
func NewS3Storage() *S3Storage {
credentials.NewStaticCredentials(
utils.ConfigParam.StorageConfig.S3Storage.KeyId,
utils.ConfigParam.StorageConfig.S3Storage.AccessKey, "")
awsConfig := &aws.Config{
Region: aws.String(utils.ConfigParam.StorageConfig.S3Storage.Region),
}
session := session.New(awsConfig)
connection := s3.New(session)
return &S3Storage{Session: session, S3Connection: connection}
}
func (s3S S3Storage) GetBuckets() ([]*data.Bucket, error) {
var params *s3.ListBucketsInput
resp, err := s3S.S3Connection.ListBuckets(params)
if err != nil {
log.Debug("Error fetching bucket list %v", err)
return nil, err
}
buckets := make([]*data.Bucket, len(resp.Buckets))
index := 0
for _, bucket := range resp.Buckets {
bucket := data.NewBucket(*bucket.Name)
buckets[index] = bucket
index++
}
return buckets, nil
}
func (s3S S3Storage)GetBucket(name string) (*data.Bucket, error) {
req := s3.HeadBucketInput{
Bucket: &name,
}
_, err := s3S.S3Connection.HeadBucket(&req)
if err != nil {
log.Debug("No bucket exist %v", err)
return &data.Bucket{}, err
}
return data.NewBucket(*req.Bucket), nil
}
func (s3S S3Storage)CreateBucket(name string) (*data.Bucket, error) {
req := s3.CreateBucketInput{
Bucket: aws.String(name),
ACL: aws.String("public-read"),
}
_, err := s3S.S3Connection.CreateBucket(&req)
if err != nil {
log.Debug("Error on creating bucket %v", err)
return nil, err
}
if err = s3S.S3Connection.WaitUntilBucketExists(&s3.HeadBucketInput{Bucket: &name}); err != nil {
log.Debug("Failed to wait for bucket to exist %s, %s\n", name, err)
return nil, err
}
return data.NewBucket(name), nil
}
func (s3S S3Storage)DestroyBucket(name string) (error) {
params := &s3.DeleteBucketInput{
Bucket: &name, // Required
}
_, err := s3S.S3Connection.DeleteBucket(params)
if err != nil {
log.Debug("Error on deleting bucket %v", err)
return err
}
return nil
}
func (s3S S3Storage)GetFiles(bucketName string) ([]*data.File, error) {
bucket, err := s3S.GetBucket(bucketName)
if (&data.Bucket{}) == bucket {
log.Debug("Bucket name doesn't exist %v", err)
return nil, err
}
params := &s3.ListObjectsInput{
Bucket: aws.String(bucketName), // Required
}
resp, err := s3S.S3Connection.ListObjects(params)
if err != nil {
log.Debug("Error fetching bucket content %v", err)
return nil, err
}
fileList := make([]*data.File, len(resp.Contents))
index := 0
for _, content := range resp.Contents {
file := data.NewFile(*content.Key, *content.Size, time.Now())
fileList[index] = file
index++
}
return fileList, nil
}
func (s3S S3Storage)GetFile(bucketName, name string) (*data.File, error) {
params := &s3.GetObjectInput{
Bucket: aws.String(bucketName), // Required
Key: aws.String(name), // Required
}
resp, err := s3S.S3Connection.GetObject(params)
if err != nil {
log.Debug("Error fetching bucket file %v", err)
return &data.File{}, err
}
return data.NewFile(name, *resp.ContentLength, *resp.LastModified), nil
}
func (s3S S3Storage)RemoveFile(bucketName, name string) (error) {
params := &s3.DeleteObjectInput{
Bucket: aws.String(bucketName), // Required
Key: aws.String(name), // Required
}
_, err := s3S.S3Connection.DeleteObject(params)
if err != nil {
log.Debug("Error deleting bucket content %v", err)
return err
}
return nil
}
func (s3S S3Storage) UploadFile(bucketName string, request *http.Request) ([]*data.File, error) {
uploader := s3manager.NewUploader(s3S.Session)
var fileName string
err := request.ParseMultipartForm(100000)
if err != nil {
log.Debug("Error parsing multipart form %v", err)
return nil, err
}
//get a ref to the parsed multipart form
m := request.MultipartForm
fmt.Println("Reading form data")
//get the *fileheaders
files := m.File["file"]
fmt.Println("File size %ld", len(files))
fileList := make([]*data.File, len(files))
index := 0
for i, _ := range files {
fileName = files[i].Filename
//for each fileheader, get a handle to the actual file
file, err := files[i].Open()
defer file.Close()
if err != nil {
log.Debug("Error opening file received %v", err)
return nil, err
}
//create destination file making sure the path is writeable.
result, err := uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(bucketName),
Key: aws.String(fileName),
ACL: aws.String("public-read"),
Body: file,
})
if err != nil {
log.Debug("Failed to upload %v", err)
} else {
fmt.Println("Upload successful with uploadId "+result.UploadID)
}
fileObj, _ := s3S.GetFile(bucketName, fileName)
fileList[index] = fileObj
index++
}
return fileList, nil
} |
package client
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"io"
"os"
bosherr "github.com/cloudfoundry/bosh-utils/errors"
boshsys "github.com/cloudfoundry/bosh-utils/system"
"golang.org/x/crypto/ssh"
)
type ExecuteOptions struct {
Exec string
ExtraArgs []string
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
Host string
SkipAuthRetry bool
}
func (s Service) Execute(opts ExecuteOptions) (int, error) {
if err := s.executeOpts(&opts); err != nil {
return -1, err
}
tmpdir, err := s.fs.TempDir("ssh")
if err != nil {
return -1, bosherr.WrapError(err, "Creating certificate tmpdir")
}
defer s.fs.RemoveAll(tmpdir)
privateKeyBytes, publicKeyBytes, err := makeSSHKeyPair()
if err != nil {
return -1, bosherr.WrapError(err, "Creating ephemeral ssh key")
}
tmpPrivateKey := fmt.Sprintf("%s/id_rsa", tmpdir)
err = s.fs.WriteFile(tmpPrivateKey, nil)
if err != nil {
return -1, bosherr.WrapError(err, "Touching private key")
}
err = s.fs.Chmod(tmpPrivateKey, 0600)
if err != nil {
return -1, bosherr.WrapError(err, "Setting permissions of private key")
}
err = s.fs.WriteFile(tmpPrivateKey, privateKeyBytes)
if err != nil {
return -1, bosherr.WrapError(err, "Writing private key")
}
err = s.fs.WriteFile(fmt.Sprintf("%s/id_rsa.pub", tmpdir), publicKeyBytes)
if err != nil {
return -1, bosherr.WrapError(err, "Writing public key")
}
certificate, target, err := s.SignPublicKey(SignPublicKeyOptions{
PublicKey: publicKeyBytes,
})
if err != nil {
return -1, bosherr.WrapError(err, "Requesting signed public keys")
}
sshargs := []string{
"-o", "ForwardAgent=no",
"-o", "ServerAliveInterval=30",
"-o", "IdentitiesOnly=yes",
}
tmpCertificate := fmt.Sprintf("%s/id_rsa-cert.pub", tmpdir)
err = s.fs.WriteFile(tmpCertificate, certificate)
if err != nil {
return -1, bosherr.WrapError(err, "Writing certificate")
}
sshargs = append(sshargs, "-o", fmt.Sprintf("IdentityFile=%s", tmpPrivateKey))
sshargs = append(sshargs, "-o", fmt.Sprintf("CertificateFile=%s", tmpCertificate))
sshargs = append(sshargs, opts.ExtraArgs...)
if target != nil {
if target.Port != 0 {
sshargs = append(sshargs, "-p", string(target.Port))
}
if target.User != "" {
sshargs = append(sshargs, "-l", target.User)
}
if target.PublicKey != "" {
sshargs = append(sshargs, "-o", "StrictHostKeyChecking=yes")
tmpKnownHosts := fmt.Sprintf("%s/known_hosts", tmpdir)
err = s.fs.WriteFileString(tmpKnownHosts, fmt.Sprintf("%s %s\n", target.Host, target.PublicKey))
if err != nil {
return -1, bosherr.WrapError(err, "Writing certificate")
}
sshargs = append(sshargs, "-o", fmt.Sprintf("UserKnownHostsFile=%s", tmpKnownHosts))
}
if target.Host != "" {
if opts.Host != "" {
return -1, errors.New("Cannot specify user or host (already configured by remote)")
}
sshargs = append(sshargs, target.Host)
}
}
if opts.Host != "" {
sshargs = append(sshargs, opts.Host)
}
_, _, exitStatus, err := s.cmdRunner.RunComplexCommand(boshsys.Command{
Name: opts.Exec,
Args: sshargs,
Stdin: opts.Stdin,
Stdout: opts.Stdout,
Stderr: opts.Stderr,
KeepAttached: true,
})
return exitStatus, err
}
// https://github.com/cloudfoundry/bosh-cli/blob/a0c78a59b5eeac11a32e953451a497eb1cb9ba7d/director/ssh_opts.go#L43
func makeSSHKeyPair() ([]byte, []byte, error) {
privKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return nil, nil, err
}
privKeyPEM := &pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(privKey),
}
privKeyBuf := bytes.NewBufferString("")
err = pem.Encode(privKeyBuf, privKeyPEM)
if err != nil {
return nil, nil, err
}
pub, err := ssh.NewPublicKey(&privKey.PublicKey)
if err != nil {
return nil, nil, err
}
return privKeyBuf.Bytes(), ssh.MarshalAuthorizedKey(pub), nil
}
func (s Service) executeOpts(opts *ExecuteOptions) error {
if opts.Exec == "" {
opts.Exec = "ssh"
}
if opts.Stdin == nil {
opts.Stdin = os.Stdin
}
if opts.Stdout == nil {
opts.Stdout = os.Stdout
}
if opts.Stderr == nil {
opts.Stderr = os.Stderr
}
return nil
}
Fix client SSH port configuration
package client
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"io"
"os"
"strconv"
bosherr "github.com/cloudfoundry/bosh-utils/errors"
boshsys "github.com/cloudfoundry/bosh-utils/system"
"golang.org/x/crypto/ssh"
)
type ExecuteOptions struct {
Exec string
ExtraArgs []string
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
Host string
SkipAuthRetry bool
}
func (s Service) Execute(opts ExecuteOptions) (int, error) {
if err := s.executeOpts(&opts); err != nil {
return -1, err
}
tmpdir, err := s.fs.TempDir("ssh")
if err != nil {
return -1, bosherr.WrapError(err, "Creating certificate tmpdir")
}
defer s.fs.RemoveAll(tmpdir)
privateKeyBytes, publicKeyBytes, err := makeSSHKeyPair()
if err != nil {
return -1, bosherr.WrapError(err, "Creating ephemeral ssh key")
}
tmpPrivateKey := fmt.Sprintf("%s/id_rsa", tmpdir)
err = s.fs.WriteFile(tmpPrivateKey, nil)
if err != nil {
return -1, bosherr.WrapError(err, "Touching private key")
}
err = s.fs.Chmod(tmpPrivateKey, 0600)
if err != nil {
return -1, bosherr.WrapError(err, "Setting permissions of private key")
}
err = s.fs.WriteFile(tmpPrivateKey, privateKeyBytes)
if err != nil {
return -1, bosherr.WrapError(err, "Writing private key")
}
err = s.fs.WriteFile(fmt.Sprintf("%s/id_rsa.pub", tmpdir), publicKeyBytes)
if err != nil {
return -1, bosherr.WrapError(err, "Writing public key")
}
certificate, target, err := s.SignPublicKey(SignPublicKeyOptions{
PublicKey: publicKeyBytes,
})
if err != nil {
return -1, bosherr.WrapError(err, "Requesting signed public keys")
}
sshargs := []string{
"-o", "ForwardAgent=no",
"-o", "ServerAliveInterval=30",
"-o", "IdentitiesOnly=yes",
}
tmpCertificate := fmt.Sprintf("%s/id_rsa-cert.pub", tmpdir)
err = s.fs.WriteFile(tmpCertificate, certificate)
if err != nil {
return -1, bosherr.WrapError(err, "Writing certificate")
}
sshargs = append(sshargs, "-o", fmt.Sprintf("IdentityFile=%s", tmpPrivateKey))
sshargs = append(sshargs, "-o", fmt.Sprintf("CertificateFile=%s", tmpCertificate))
sshargs = append(sshargs, opts.ExtraArgs...)
if target != nil {
if target.Port != 0 {
sshargs = append(sshargs, "-p", strconv.Itoa(target.Port))
}
if target.User != "" {
sshargs = append(sshargs, "-l", target.User)
}
if target.PublicKey != "" {
sshargs = append(sshargs, "-o", "StrictHostKeyChecking=yes")
tmpKnownHosts := fmt.Sprintf("%s/known_hosts", tmpdir)
err = s.fs.WriteFileString(tmpKnownHosts, fmt.Sprintf("%s %s\n", target.Host, target.PublicKey))
if err != nil {
return -1, bosherr.WrapError(err, "Writing certificate")
}
sshargs = append(sshargs, "-o", fmt.Sprintf("UserKnownHostsFile=%s", tmpKnownHosts))
}
if target.Host != "" {
if opts.Host != "" {
return -1, errors.New("Cannot specify user or host (already configured by remote)")
}
sshargs = append(sshargs, target.Host)
}
}
if opts.Host != "" {
sshargs = append(sshargs, opts.Host)
}
_, _, exitStatus, err := s.cmdRunner.RunComplexCommand(boshsys.Command{
Name: opts.Exec,
Args: sshargs,
Stdin: opts.Stdin,
Stdout: opts.Stdout,
Stderr: opts.Stderr,
KeepAttached: true,
})
return exitStatus, err
}
// https://github.com/cloudfoundry/bosh-cli/blob/a0c78a59b5eeac11a32e953451a497eb1cb9ba7d/director/ssh_opts.go#L43
func makeSSHKeyPair() ([]byte, []byte, error) {
privKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return nil, nil, err
}
privKeyPEM := &pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(privKey),
}
privKeyBuf := bytes.NewBufferString("")
err = pem.Encode(privKeyBuf, privKeyPEM)
if err != nil {
return nil, nil, err
}
pub, err := ssh.NewPublicKey(&privKey.PublicKey)
if err != nil {
return nil, nil, err
}
return privKeyBuf.Bytes(), ssh.MarshalAuthorizedKey(pub), nil
}
func (s Service) executeOpts(opts *ExecuteOptions) error {
if opts.Exec == "" {
opts.Exec = "ssh"
}
if opts.Stdin == nil {
opts.Stdin = os.Stdin
}
if opts.Stdout == nil {
opts.Stdout = os.Stdout
}
if opts.Stderr == nil {
opts.Stderr = os.Stderr
}
return nil
}
|
// Copyright 2015 The Vanadium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file was auto-generated via go generate.
// DO NOT UPDATE MANUALLY
/*
Command device facilitates interaction with the Vanadium device manager.
Usage:
device <command>
The device commands are:
install Install the given application.
install-local Install the given application from the local system.
uninstall Uninstall the given application installation.
associate Tool for creating associations between Vanadium blessings and a
system account
describe Describe the device.
claim Claim the device.
instantiate Create an instance of the given application.
delete Delete the given application instance.
run Run the given application instance.
kill Kill the given application instance.
revert Revert the device manager or applications.
update Update the device manager or applications.
status Get device manager or application status.
debug Debug the device.
acl Tool for setting device manager Permissions
publish Publish the given application(s).
ls List applications.
help Display help for commands or topics
The global flags are:
-v23.namespace.root=[/(dev.v.io/role/vprod/service/mounttabled)@ns.dev.v.io:8101]
local namespace root; can be repeated to provided multiple roots
-v23.proxy=
object name of proxy service to use to export services across network
boundaries
-alsologtostderr=true
log to standard error as well as files
-log_backtrace_at=:0
when logging hits line file:N, emit a stack trace
-log_dir=
if non-empty, write log files to this directory
-logtostderr=false
log to standard error instead of files
-max_stack_buf_size=4292608
max size in bytes of the buffer to use for logging stack traces
-metadata=<just specify -metadata to activate>
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
-v=0
log level for V logs
-v23.credentials=
directory to use for storing security credentials
-v23.i18n-catalogue=
18n catalogue files to load, comma separated
-v23.permissions.file=map[]
specify a perms file as <name>:<permsfile>
-v23.permissions.literal=
explicitly specify the runtime perms as a JSON-encoded access.Permissions.
Overrides all --v23.permissions.file flags.
-v23.tcp.address=
address to listen on
-v23.tcp.protocol=wsh
protocol to listen with
-v23.vtrace.cache-size=1024
The number of vtrace traces to store in memory.
-v23.vtrace.collect-regexp=
Spans and annotations that match this regular expression will trigger trace
collection.
-v23.vtrace.dump-on-shutdown=true
If true, dump all stored traces on runtime shutdown.
-v23.vtrace.sample-rate=0
Rate (from 0.0 to 1.0) to sample vtrace traces.
-vmodule=
comma-separated list of pattern=N settings for filename-filtered logging
-vpath=
comma-separated list of pattern=N settings for file pathname-filtered logging
Device install
Install the given application and print the name of the new installation.
Usage:
device install [flags] <device> <application>
<device> is the vanadium object name of the device manager's app service.
<application> is the vanadium object name of the application.
The device install flags are:
-config={}
JSON-encoded device.Config object, of the form:
'{"flag1":"value1","flag2":"value2"}'
-packages={}
JSON-encoded application.Packages object, of the form:
'{"pkg1":{"File":"object name 1"},"pkg2":{"File":"object name 2"}}'
Device install-local
Install the given application specified using a local path, and print the name
of the new installation.
Usage:
device install-local [flags] <device> <title> [ENV=VAL ...] binary [--flag=val ...] [PACKAGES path ...]
<device> is the vanadium object name of the device manager's app service.
<title> is the app title.
This is followed by an arbitrary number of environment variable settings, the
local path for the binary to install, and arbitrary flag settings and args.
Optionally, this can be followed by 'PACKAGES' and a list of local files and
directories to be installed as packages for the app
The device install-local flags are:
-config={}
JSON-encoded device.Config object, of the form:
'{"flag1":"value1","flag2":"value2"}'
-packages={}
JSON-encoded application.Packages object, of the form:
'{"pkg1":{"File":"local file path1"},"pkg2":{"File":"local file path 2"}}'
Device uninstall
Uninstall the given application installation.
Usage:
device uninstall <installation>
<installation> is the vanadium object name of the application installation to
uninstall.
Device associate - Tool for creating associations between Vanadium blessings and a system account
The associate tool facilitates managing blessing to system account associations.
Usage:
device associate <command>
The device associate commands are:
list Lists the account associations.
add Add the listed blessings with the specified system account.
remove Removes system accounts associated with the listed blessings.
Device associate list
Lists all account associations.
Usage:
device associate list <devicemanager>.
<devicemanager> is the name of the device manager to connect to.
Device associate add
Add the listed blessings with the specified system account.
Usage:
device associate add <devicemanager> <systemName> <blessing>...
<devicemanager> is the name of the device manager to connect to. <systemName> is
the name of an account holder on the local system. <blessing>.. are the
blessings to associate systemAccount with.
Device associate remove
Removes system accounts associated with the listed blessings.
Usage:
device associate remove <devicemanager> <blessing>...
<devicemanager> is the name of the device manager to connect to. <blessing>...
is a list of blessings.
Device describe
Describe the device.
Usage:
device describe <device>
<device> is the vanadium object name of the device manager's device service.
Device claim
Claim the device.
Usage:
device claim <device> <grant extension> <pairing token> <device publickey>
<device> is the vanadium object name of the device manager's device service.
<grant extension> is used to extend the default blessing of the current
principal when blessing the app instance.
<pairing token> is a token that the device manager expects to be replayed during
a claim operation on the device.
<device publickey> is the marshalled public key of the device manager we are
claiming.
Device instantiate
Create an instance of the given application, provide it with a blessing, and
print the name of the new instance.
Usage:
device instantiate <application installation> <grant extension>
<application installation> is the vanadium object name of the application
installation from which to create an instance.
<grant extension> is used to extend the default blessing of the current
principal when blessing the app instance.
Device delete
Delete the given application instance.
Usage:
device delete <app instance>
<app instance> is the vanadium object name of the application instance to
delete.
Device run
Run the given application instance.
Usage:
device run <app instance>
<app instance> is the vanadium object name of the application instance to run.
Device kill
Kill the given application instance.
Usage:
device kill <app instance>
<app instance> is the vanadium object name of the application instance to kill.
Device revert
Revert the device manager or application instances and installations to a
previous version of their current version
Usage:
device revert [flags] <name patterns...>
<name patterns...> are vanadium object names or glob name patterns corresponding
to the device manager service, or to application installations and instances.
The device revert flags are:
-installation-state=!Uninstalled
If non-empty, specifies allowed installation states (all others installations
get filtered out). The value of the flag is a comma-separated list of values
from among: [Active Uninstalled]. If the value is prefixed by '!', the list
acts as a blacklist (all matching installations get filtered out).
-instance-state=!Deleted
If non-empty, specifies allowed instance states (all other instances get
filtered out). The value of the flag is a comma-separated list of values from
among: [Launching Running Dying NotRunning Updating Deleted]. If the value is
prefixed by '!', the list acts as a blacklist (all matching instances get
filtered out).
-only-installations=false
If set, only consider installations.
-only-instances=false
If set, only consider instances.
-parallelism=BYKIND
Specifies the level of parallelism for the handler execution. One of: [BYKIND
FULL NONE].
Device update
Update the device manager or application instances and installations
Usage:
device update [flags] <name patterns...>
<name patterns...> are vanadium object names or glob name patterns corresponding
to the device manager service, or to application installations and instances.
The device update flags are:
-installation-state=!Uninstalled
If non-empty, specifies allowed installation states (all others installations
get filtered out). The value of the flag is a comma-separated list of values
from among: [Active Uninstalled]. If the value is prefixed by '!', the list
acts as a blacklist (all matching installations get filtered out).
-instance-state=!Deleted
If non-empty, specifies allowed instance states (all other instances get
filtered out). The value of the flag is a comma-separated list of values from
among: [Launching Running Dying NotRunning Updating Deleted]. If the value is
prefixed by '!', the list acts as a blacklist (all matching instances get
filtered out).
-only-installations=false
If set, only consider installations.
-only-instances=false
If set, only consider instances.
-parallelism=BYKIND
Specifies the level of parallelism for the handler execution. One of: [BYKIND
FULL NONE].
Device status
Get the status of the device manager or application instances and installations.
Usage:
device status [flags] <name patterns...>
<name patterns...> are vanadium object names or glob name patterns corresponding
to the device manager service, or to application installations and instances.
The device status flags are:
-installation-state=
If non-empty, specifies allowed installation states (all others installations
get filtered out). The value of the flag is a comma-separated list of values
from among: [Active Uninstalled]. If the value is prefixed by '!', the list
acts as a blacklist (all matching installations get filtered out).
-instance-state=
If non-empty, specifies allowed instance states (all other instances get
filtered out). The value of the flag is a comma-separated list of values from
among: [Launching Running Dying NotRunning Updating Deleted]. If the value is
prefixed by '!', the list acts as a blacklist (all matching instances get
filtered out).
-only-installations=false
If set, only consider installations.
-only-instances=false
If set, only consider instances.
-parallelism=FULL
Specifies the level of parallelism for the handler execution. One of: [BYKIND
FULL NONE].
Device debug
Get internal debug information about application installations and instances.
Usage:
device debug [flags] <app name patterns...>
<app name patterns...> are vanadium object names or glob name patterns
corresponding to application installations and instances.
The device debug flags are:
-installation-state=
If non-empty, specifies allowed installation states (all others installations
get filtered out). The value of the flag is a comma-separated list of values
from among: [Active Uninstalled]. If the value is prefixed by '!', the list
acts as a blacklist (all matching installations get filtered out).
-instance-state=
If non-empty, specifies allowed instance states (all other instances get
filtered out). The value of the flag is a comma-separated list of values from
among: [Launching Running Dying NotRunning Updating Deleted]. If the value is
prefixed by '!', the list acts as a blacklist (all matching instances get
filtered out).
-only-installations=false
If set, only consider installations.
-only-instances=false
If set, only consider instances.
-parallelism=FULL
Specifies the level of parallelism for the handler execution. One of: [BYKIND
FULL NONE].
Device acl - Tool for setting device manager Permissions
The acl tool manages Permissions on the device manger, installations and
instances.
Usage:
device acl <command>
The device acl commands are:
get Get Permissions for the given target.
set Set Permissions for the given target.
Device acl get
Get Permissions for the given target.
Usage:
device acl get <device manager name>
<device manager name> can be a Vanadium name for a device manager, application
installation or instance.
Device acl set
Set Permissions for the given target
Usage:
device acl set [flags] <device manager name> (<blessing> [!]<tag>(,[!]<tag>)*
<device manager name> can be a Vanadium name for a device manager, application
installation or instance.
<blessing> is a blessing pattern. If the same pattern is repeated multiple times
in the command, then the only the last occurrence will be honored.
<tag> is a subset of defined access types ("Admin", "Read", "Write" etc.). If
the access right is prefixed with a '!' then <blessing> is added to the NotIn
list for that right. Using "^" as a "tag" causes all occurrences of <blessing>
in the current AccessList to be cleared.
Examples: set root/self ^ will remove "root/self" from the In and NotIn lists
for all access rights.
set root/self Read,!Write will add "root/self" to the In list for Read access
and the NotIn list for Write access (and remove "root/self" from both the In and
NotIn lists of all other access rights)
The device acl set flags are:
-f=false
Instead of making the AccessLists additive, do a complete replacement based
on the specified settings.
Device publish
Publishes the given application(s) to the binary and application servers. The
binaries should be in $JIRI_ROOT/release/go/bin/[<GOOS>_<GOARCH>] by default (can
be overrriden with --from). By default the binary name is used as the name of
the application envelope, and as the title in the envelope. However,
<envelope-name> and <title> can be specified explicitly using :<envelope-name>
and @<title>. The binary is published as <binserv>/<binary
name>/<GOOS>-<GOARCH>/<TIMESTAMP>. The application envelope is published as
<appserv>/<envelope-name>/<TIMESTAMP>. Optionally, adds blessing patterns to the
Read and Resolve AccessLists.
Usage:
device publish [flags] <binary name>[:<envelope-name>][@<title>] ...
The device publish flags are:
-add-publisher=true
If true, add a publisher blessing to the application envelope
-appserv=applications
Name of application service.
-binserv=binaries
Name of binary service.
-from=
Location of binaries to be published. Defaults to
$JIRI_ROOT/release/go/bin/[<GOOS>_<GOARCH>]
-goarch=<runtime.GOARCH>
GOARCH for application. The default is the value of runtime.GOARCH.
-goos=<runtime.GOOS>
GOOS for application. The default is the value of runtime.GOOS.
-publisher-min-validity=30h0m0s
Publisher blessings that are valid for less than this amount of time are
considered invalid
-readers=dev.v.io
If non-empty, comma-separated blessing patterns to add to Read and Resolve
AccessList.
Device ls
List application installations or instances.
Usage:
device ls [flags] <app name patterns...>
<app name patterns...> are vanadium object names or glob name patterns
corresponding to application installations and instances.
The device ls flags are:
-installation-state=
If non-empty, specifies allowed installation states (all others installations
get filtered out). The value of the flag is a comma-separated list of values
from among: [Active Uninstalled]. If the value is prefixed by '!', the list
acts as a blacklist (all matching installations get filtered out).
-instance-state=
If non-empty, specifies allowed instance states (all other instances get
filtered out). The value of the flag is a comma-separated list of values from
among: [Launching Running Dying NotRunning Updating Deleted]. If the value is
prefixed by '!', the list acts as a blacklist (all matching instances get
filtered out).
-only-installations=false
If set, only consider installations.
-only-instances=false
If set, only consider instances.
-parallelism=FULL
Specifies the level of parallelism for the handler execution. One of: [BYKIND
FULL NONE].
Device help - Display help for commands or topics
Help with no args displays the usage of the parent command.
Help with args displays the usage of the specified sub-command or help topic.
"help ..." recursively displays help for all commands and topics.
Usage:
device help [flags] [command/topic ...]
[command/topic ...] optionally identifies a specific sub-command or help topic.
The device help flags are:
-style=compact
The formatting style for help output:
compact - Good for compact cmdline output.
full - Good for cmdline output, shows all global flags.
godoc - Good for godoc processing.
Override the default by setting the CMDLINE_STYLE environment variable.
-width=<terminal width>
Format output to this target width in runes, or unlimited if width < 0.
Defaults to the terminal width if available. Override the default by setting
the CMDLINE_WIDTH environment variable.
*/
package main
TBR: services/device: fix generated docs.
PresubmitTest: none
Change-Id: I57daf57f882cda770ba4f9f467933c427c39357b
// Copyright 2015 The Vanadium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file was auto-generated via go generate.
// DO NOT UPDATE MANUALLY
/*
Command device facilitates interaction with the Vanadium device manager.
Usage:
device <command>
The device commands are:
install Install the given application.
install-local Install the given application from the local system.
uninstall Uninstall the given application installation.
associate Tool for creating associations between Vanadium blessings and a
system account
describe Describe the device.
claim Claim the device.
instantiate Create an instance of the given application.
delete Delete the given application instance.
run Run the given application instance.
kill Kill the given application instance.
revert Revert the device manager or applications.
update Update the device manager or applications.
status Get device manager or application status.
debug Debug the device.
acl Tool for setting device manager Permissions
publish Publish the given application(s).
ls List applications.
help Display help for commands or topics
The global flags are:
-v23.namespace.root=[/(dev.v.io/role/vprod/service/mounttabled)@ns.dev.v.io:8101]
local namespace root; can be repeated to provided multiple roots
-v23.proxy=
object name of proxy service to use to export services across network
boundaries
-alsologtostderr=true
log to standard error as well as files
-log_backtrace_at=:0
when logging hits line file:N, emit a stack trace
-log_dir=
if non-empty, write log files to this directory
-logtostderr=false
log to standard error instead of files
-max_stack_buf_size=4292608
max size in bytes of the buffer to use for logging stack traces
-metadata=<just specify -metadata to activate>
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
-v=0
log level for V logs
-v23.credentials=
directory to use for storing security credentials
-v23.i18n-catalogue=
18n catalogue files to load, comma separated
-v23.permissions.file=map[]
specify a perms file as <name>:<permsfile>
-v23.permissions.literal=
explicitly specify the runtime perms as a JSON-encoded access.Permissions.
Overrides all --v23.permissions.file flags.
-v23.tcp.address=
address to listen on
-v23.tcp.protocol=wsh
protocol to listen with
-v23.vtrace.cache-size=1024
The number of vtrace traces to store in memory.
-v23.vtrace.collect-regexp=
Spans and annotations that match this regular expression will trigger trace
collection.
-v23.vtrace.dump-on-shutdown=true
If true, dump all stored traces on runtime shutdown.
-v23.vtrace.sample-rate=0
Rate (from 0.0 to 1.0) to sample vtrace traces.
-vmodule=
comma-separated list of pattern=N settings for filename-filtered logging
-vpath=
comma-separated list of pattern=N settings for file pathname-filtered logging
Device install
Install the given application and print the name of the new installation.
Usage:
device install [flags] <device> <application>
<device> is the vanadium object name of the device manager's app service.
<application> is the vanadium object name of the application.
The device install flags are:
-config={}
JSON-encoded device.Config object, of the form:
'{"flag1":"value1","flag2":"value2"}'
-packages={}
JSON-encoded application.Packages object, of the form:
'{"pkg1":{"File":"object name 1"},"pkg2":{"File":"object name 2"}}'
Device install-local
Install the given application specified using a local path, and print the name
of the new installation.
Usage:
device install-local [flags] <device> <title> [ENV=VAL ...] binary [--flag=val ...] [PACKAGES path ...]
<device> is the vanadium object name of the device manager's app service.
<title> is the app title.
This is followed by an arbitrary number of environment variable settings, the
local path for the binary to install, and arbitrary flag settings and args.
Optionally, this can be followed by 'PACKAGES' and a list of local files and
directories to be installed as packages for the app
The device install-local flags are:
-config={}
JSON-encoded device.Config object, of the form:
'{"flag1":"value1","flag2":"value2"}'
-packages={}
JSON-encoded application.Packages object, of the form:
'{"pkg1":{"File":"local file path1"},"pkg2":{"File":"local file path 2"}}'
Device uninstall
Uninstall the given application installation.
Usage:
device uninstall <installation>
<installation> is the vanadium object name of the application installation to
uninstall.
Device associate - Tool for creating associations between Vanadium blessings and a system account
The associate tool facilitates managing blessing to system account associations.
Usage:
device associate <command>
The device associate commands are:
list Lists the account associations.
add Add the listed blessings with the specified system account.
remove Removes system accounts associated with the listed blessings.
Device associate list
Lists all account associations.
Usage:
device associate list <devicemanager>.
<devicemanager> is the name of the device manager to connect to.
Device associate add
Add the listed blessings with the specified system account.
Usage:
device associate add <devicemanager> <systemName> <blessing>...
<devicemanager> is the name of the device manager to connect to. <systemName> is
the name of an account holder on the local system. <blessing>.. are the
blessings to associate systemAccount with.
Device associate remove
Removes system accounts associated with the listed blessings.
Usage:
device associate remove <devicemanager> <blessing>...
<devicemanager> is the name of the device manager to connect to. <blessing>...
is a list of blessings.
Device describe
Describe the device.
Usage:
device describe <device>
<device> is the vanadium object name of the device manager's device service.
Device claim
Claim the device.
Usage:
device claim <device> <grant extension> <pairing token> <device publickey>
<device> is the vanadium object name of the device manager's device service.
<grant extension> is used to extend the default blessing of the current
principal when blessing the app instance.
<pairing token> is a token that the device manager expects to be replayed during
a claim operation on the device.
<device publickey> is the marshalled public key of the device manager we are
claiming.
Device instantiate
Create an instance of the given application, provide it with a blessing, and
print the name of the new instance.
Usage:
device instantiate <application installation> <grant extension>
<application installation> is the vanadium object name of the application
installation from which to create an instance.
<grant extension> is used to extend the default blessing of the current
principal when blessing the app instance.
Device delete
Delete the given application instance.
Usage:
device delete <app instance>
<app instance> is the vanadium object name of the application instance to
delete.
Device run
Run the given application instance.
Usage:
device run <app instance>
<app instance> is the vanadium object name of the application instance to run.
Device kill
Kill the given application instance.
Usage:
device kill <app instance>
<app instance> is the vanadium object name of the application instance to kill.
Device revert
Revert the device manager or application instances and installations to a
previous version of their current version
Usage:
device revert [flags] <name patterns...>
<name patterns...> are vanadium object names or glob name patterns corresponding
to the device manager service, or to application installations and instances.
The device revert flags are:
-installation-state=!Uninstalled
If non-empty, specifies allowed installation states (all others installations
get filtered out). The value of the flag is a comma-separated list of values
from among: [Active Uninstalled]. If the value is prefixed by '!', the list
acts as a blacklist (all matching installations get filtered out).
-instance-state=!Deleted
If non-empty, specifies allowed instance states (all other instances get
filtered out). The value of the flag is a comma-separated list of values from
among: [Launching Running Dying NotRunning Updating Deleted]. If the value is
prefixed by '!', the list acts as a blacklist (all matching instances get
filtered out).
-only-installations=false
If set, only consider installations.
-only-instances=false
If set, only consider instances.
-parallelism=BYKIND
Specifies the level of parallelism for the handler execution. One of: [BYKIND
FULL NONE].
Device update
Update the device manager or application instances and installations
Usage:
device update [flags] <name patterns...>
<name patterns...> are vanadium object names or glob name patterns corresponding
to the device manager service, or to application installations and instances.
The device update flags are:
-installation-state=!Uninstalled
If non-empty, specifies allowed installation states (all others installations
get filtered out). The value of the flag is a comma-separated list of values
from among: [Active Uninstalled]. If the value is prefixed by '!', the list
acts as a blacklist (all matching installations get filtered out).
-instance-state=!Deleted
If non-empty, specifies allowed instance states (all other instances get
filtered out). The value of the flag is a comma-separated list of values from
among: [Launching Running Dying NotRunning Updating Deleted]. If the value is
prefixed by '!', the list acts as a blacklist (all matching instances get
filtered out).
-only-installations=false
If set, only consider installations.
-only-instances=false
If set, only consider instances.
-parallelism=BYKIND
Specifies the level of parallelism for the handler execution. One of: [BYKIND
FULL NONE].
Device status
Get the status of the device manager or application instances and installations.
Usage:
device status [flags] <name patterns...>
<name patterns...> are vanadium object names or glob name patterns corresponding
to the device manager service, or to application installations and instances.
The device status flags are:
-installation-state=
If non-empty, specifies allowed installation states (all others installations
get filtered out). The value of the flag is a comma-separated list of values
from among: [Active Uninstalled]. If the value is prefixed by '!', the list
acts as a blacklist (all matching installations get filtered out).
-instance-state=
If non-empty, specifies allowed instance states (all other instances get
filtered out). The value of the flag is a comma-separated list of values from
among: [Launching Running Dying NotRunning Updating Deleted]. If the value is
prefixed by '!', the list acts as a blacklist (all matching instances get
filtered out).
-only-installations=false
If set, only consider installations.
-only-instances=false
If set, only consider instances.
-parallelism=FULL
Specifies the level of parallelism for the handler execution. One of: [BYKIND
FULL NONE].
Device debug
Get internal debug information about application installations and instances.
Usage:
device debug [flags] <app name patterns...>
<app name patterns...> are vanadium object names or glob name patterns
corresponding to application installations and instances.
The device debug flags are:
-installation-state=
If non-empty, specifies allowed installation states (all others installations
get filtered out). The value of the flag is a comma-separated list of values
from among: [Active Uninstalled]. If the value is prefixed by '!', the list
acts as a blacklist (all matching installations get filtered out).
-instance-state=
If non-empty, specifies allowed instance states (all other instances get
filtered out). The value of the flag is a comma-separated list of values from
among: [Launching Running Dying NotRunning Updating Deleted]. If the value is
prefixed by '!', the list acts as a blacklist (all matching instances get
filtered out).
-only-installations=false
If set, only consider installations.
-only-instances=false
If set, only consider instances.
-parallelism=FULL
Specifies the level of parallelism for the handler execution. One of: [BYKIND
FULL NONE].
Device acl - Tool for setting device manager Permissions
The acl tool manages Permissions on the device manger, installations and
instances.
Usage:
device acl <command>
The device acl commands are:
get Get Permissions for the given target.
set Set Permissions for the given target.
Device acl get
Get Permissions for the given target.
Usage:
device acl get <device manager name>
<device manager name> can be a Vanadium name for a device manager, application
installation or instance.
Device acl set
Set Permissions for the given target
Usage:
device acl set [flags] <device manager name> (<blessing> [!]<tag>(,[!]<tag>)*
<device manager name> can be a Vanadium name for a device manager, application
installation or instance.
<blessing> is a blessing pattern. If the same pattern is repeated multiple times
in the command, then the only the last occurrence will be honored.
<tag> is a subset of defined access types ("Admin", "Read", "Write" etc.). If
the access right is prefixed with a '!' then <blessing> is added to the NotIn
list for that right. Using "^" as a "tag" causes all occurrences of <blessing>
in the current AccessList to be cleared.
Examples: set root/self ^ will remove "root/self" from the In and NotIn lists
for all access rights.
set root/self Read,!Write will add "root/self" to the In list for Read access
and the NotIn list for Write access (and remove "root/self" from both the In and
NotIn lists of all other access rights)
The device acl set flags are:
-f=false
Instead of making the AccessLists additive, do a complete replacement based
on the specified settings.
Device publish
Publishes the given application(s) to the binary and application servers. The
binaries should be in $JIRI_ROOT/release/go/bin/[<GOOS>_<GOARCH>] by default
(can be overrriden with --from). By default the binary name is used as the name
of the application envelope, and as the title in the envelope. However,
<envelope-name> and <title> can be specified explicitly using :<envelope-name>
and @<title>. The binary is published as <binserv>/<binary
name>/<GOOS>-<GOARCH>/<TIMESTAMP>. The application envelope is published as
<appserv>/<envelope-name>/<TIMESTAMP>. Optionally, adds blessing patterns to the
Read and Resolve AccessLists.
Usage:
device publish [flags] <binary name>[:<envelope-name>][@<title>] ...
The device publish flags are:
-add-publisher=true
If true, add a publisher blessing to the application envelope
-appserv=applications
Name of application service.
-binserv=binaries
Name of binary service.
-from=
Location of binaries to be published. Defaults to
$JIRI_ROOT/release/go/bin/[<GOOS>_<GOARCH>]
-goarch=<runtime.GOARCH>
GOARCH for application. The default is the value of runtime.GOARCH.
-goos=<runtime.GOOS>
GOOS for application. The default is the value of runtime.GOOS.
-publisher-min-validity=30h0m0s
Publisher blessings that are valid for less than this amount of time are
considered invalid
-readers=dev.v.io
If non-empty, comma-separated blessing patterns to add to Read and Resolve
AccessList.
Device ls
List application installations or instances.
Usage:
device ls [flags] <app name patterns...>
<app name patterns...> are vanadium object names or glob name patterns
corresponding to application installations and instances.
The device ls flags are:
-installation-state=
If non-empty, specifies allowed installation states (all others installations
get filtered out). The value of the flag is a comma-separated list of values
from among: [Active Uninstalled]. If the value is prefixed by '!', the list
acts as a blacklist (all matching installations get filtered out).
-instance-state=
If non-empty, specifies allowed instance states (all other instances get
filtered out). The value of the flag is a comma-separated list of values from
among: [Launching Running Dying NotRunning Updating Deleted]. If the value is
prefixed by '!', the list acts as a blacklist (all matching instances get
filtered out).
-only-installations=false
If set, only consider installations.
-only-instances=false
If set, only consider instances.
-parallelism=FULL
Specifies the level of parallelism for the handler execution. One of: [BYKIND
FULL NONE].
Device help - Display help for commands or topics
Help with no args displays the usage of the parent command.
Help with args displays the usage of the specified sub-command or help topic.
"help ..." recursively displays help for all commands and topics.
Usage:
device help [flags] [command/topic ...]
[command/topic ...] optionally identifies a specific sub-command or help topic.
The device help flags are:
-style=compact
The formatting style for help output:
compact - Good for compact cmdline output.
full - Good for cmdline output, shows all global flags.
godoc - Good for godoc processing.
Override the default by setting the CMDLINE_STYLE environment variable.
-width=<terminal width>
Format output to this target width in runes, or unlimited if width < 0.
Defaults to the terminal width if available. Override the default by setting
the CMDLINE_WIDTH environment variable.
*/
package main
|
// Copyright 2021 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
import (
"context"
"errors"
"fmt"
"strings"
"code.gitea.io/gitea/models"
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/notification"
repo_module "code.gitea.io/gitea/modules/repository"
pull_service "code.gitea.io/gitea/services/pull"
)
// CreateNewBranch creates a new repository branch
func CreateNewBranch(ctx context.Context, doer *user_model.User, repo *repo_model.Repository, oldBranchName, branchName string) (err error) {
// Check if branch name can be used
if err := checkBranchName(ctx, repo, branchName); err != nil {
return err
}
if !git.IsBranchExist(ctx, repo.RepoPath(), oldBranchName) {
return models.ErrBranchDoesNotExist{
BranchName: oldBranchName,
}
}
if err := git.Push(ctx, repo.RepoPath(), git.PushOptions{
Remote: repo.RepoPath(),
Branch: fmt.Sprintf("%s:%s%s", oldBranchName, git.BranchPrefix, branchName),
Env: models.PushingEnvironment(doer, repo),
}); err != nil {
if git.IsErrPushOutOfDate(err) || git.IsErrPushRejected(err) {
return err
}
return fmt.Errorf("Push: %v", err)
}
return nil
}
// GetBranches returns branches from the repository, skipping skip initial branches and
// returning at most limit branches, or all branches if limit is 0.
func GetBranches(ctx context.Context, repo *repo_model.Repository, skip, limit int) ([]*git.Branch, int, error) {
return git.GetBranchesByPath(ctx, repo.RepoPath(), skip, limit)
}
// checkBranchName validates branch name with existing repository branches
func checkBranchName(ctx context.Context, repo *repo_model.Repository, name string) error {
_, err := git.WalkReferences(ctx, repo.RepoPath(), func(_, refName string) error {
branchRefName := strings.TrimPrefix(refName, git.BranchPrefix)
switch {
case branchRefName == name:
return models.ErrBranchAlreadyExists{
BranchName: name,
}
// If branchRefName like a/b but we want to create a branch named a then we have a conflict
case strings.HasPrefix(branchRefName, name+"/"):
return models.ErrBranchNameConflict{
BranchName: branchRefName,
}
// Conversely if branchRefName like a but we want to create a branch named a/b then we also have a conflict
case strings.HasPrefix(name, branchRefName+"/"):
return models.ErrBranchNameConflict{
BranchName: branchRefName,
}
case refName == git.TagPrefix+name:
return models.ErrTagAlreadyExists{
TagName: name,
}
}
return nil
})
return err
}
// CreateNewBranchFromCommit creates a new repository branch
func CreateNewBranchFromCommit(ctx context.Context, doer *user_model.User, repo *repo_model.Repository, commit, branchName string) (err error) {
// Check if branch name can be used
if err := checkBranchName(ctx, repo, branchName); err != nil {
return err
}
if err := git.Push(ctx, repo.RepoPath(), git.PushOptions{
Remote: repo.RepoPath(),
Branch: fmt.Sprintf("%s:%s%s", commit, git.BranchPrefix, branchName),
Env: models.PushingEnvironment(doer, repo),
}); err != nil {
if git.IsErrPushOutOfDate(err) || git.IsErrPushRejected(err) {
return err
}
return fmt.Errorf("Push: %v", err)
}
return nil
}
// RenameBranch rename a branch
func RenameBranch(repo *repo_model.Repository, doer *user_model.User, gitRepo *git.Repository, from, to string) (string, error) {
if from == to {
return "target_exist", nil
}
if gitRepo.IsBranchExist(to) {
return "target_exist", nil
}
if !gitRepo.IsBranchExist(from) {
return "from_not_exist", nil
}
if err := models.RenameBranch(repo, from, to, func(isDefault bool) error {
err2 := gitRepo.RenameBranch(from, to)
if err2 != nil {
return err2
}
if isDefault {
err2 = gitRepo.SetDefaultBranch(to)
if err2 != nil {
return err2
}
}
return nil
}); err != nil {
return "", err
}
refID, err := gitRepo.GetRefCommitID(git.BranchPrefix + to)
if err != nil {
return "", err
}
notification.NotifyDeleteRef(doer, repo, "branch", git.BranchPrefix+from)
notification.NotifyCreateRef(doer, repo, "branch", git.BranchPrefix+to, refID)
return "", nil
}
// enmuerates all branch related errors
var (
ErrBranchIsDefault = errors.New("branch is default")
ErrBranchIsProtected = errors.New("branch is protected")
)
// DeleteBranch delete branch
func DeleteBranch(doer *user_model.User, repo *repo_model.Repository, gitRepo *git.Repository, branchName string) error {
if branchName == repo.DefaultBranch {
return ErrBranchIsDefault
}
isProtected, err := models.IsProtectedBranch(repo.ID, branchName)
if err != nil {
return err
}
if isProtected {
return ErrBranchIsProtected
}
commit, err := gitRepo.GetBranchCommit(branchName)
if err != nil {
return err
}
if err := gitRepo.DeleteBranch(branchName, git.DeleteBranchOptions{
Force: true,
}); err != nil {
return err
}
if err := pull_service.CloseBranchPulls(doer, repo.ID, branchName); err != nil {
return err
}
// Don't return error below this
if err := PushUpdate(
&repo_module.PushUpdateOptions{
RefFullName: git.BranchPrefix + branchName,
OldCommitID: commit.ID.String(),
NewCommitID: git.EmptySHA,
PusherID: doer.ID,
PusherName: doer.Name,
RepoUserName: repo.OwnerName,
RepoName: repo.Name,
}); err != nil {
log.Error("Update: %v", err)
}
if err := models.AddDeletedBranch(repo.ID, branchName, commit.ID.String(), doer.ID); err != nil {
log.Warn("AddDeletedBranch: %v", err)
}
return nil
}
Avoid MoreThanOne Error (#19557)
// Copyright 2021 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
import (
"context"
"errors"
"fmt"
"strings"
"code.gitea.io/gitea/models"
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/notification"
repo_module "code.gitea.io/gitea/modules/repository"
pull_service "code.gitea.io/gitea/services/pull"
)
// CreateNewBranch creates a new repository branch
func CreateNewBranch(ctx context.Context, doer *user_model.User, repo *repo_model.Repository, oldBranchName, branchName string) (err error) {
// Check if branch name can be used
if err := checkBranchName(ctx, repo, branchName); err != nil {
return err
}
if !git.IsBranchExist(ctx, repo.RepoPath(), oldBranchName) {
return models.ErrBranchDoesNotExist{
BranchName: oldBranchName,
}
}
if err := git.Push(ctx, repo.RepoPath(), git.PushOptions{
Remote: repo.RepoPath(),
Branch: fmt.Sprintf("%s%s:%s%s", git.BranchPrefix, oldBranchName, git.BranchPrefix, branchName),
Env: models.PushingEnvironment(doer, repo),
}); err != nil {
if git.IsErrPushOutOfDate(err) || git.IsErrPushRejected(err) {
return err
}
return fmt.Errorf("Push: %v", err)
}
return nil
}
// GetBranches returns branches from the repository, skipping skip initial branches and
// returning at most limit branches, or all branches if limit is 0.
func GetBranches(ctx context.Context, repo *repo_model.Repository, skip, limit int) ([]*git.Branch, int, error) {
return git.GetBranchesByPath(ctx, repo.RepoPath(), skip, limit)
}
// checkBranchName validates branch name with existing repository branches
func checkBranchName(ctx context.Context, repo *repo_model.Repository, name string) error {
_, err := git.WalkReferences(ctx, repo.RepoPath(), func(_, refName string) error {
branchRefName := strings.TrimPrefix(refName, git.BranchPrefix)
switch {
case branchRefName == name:
return models.ErrBranchAlreadyExists{
BranchName: name,
}
// If branchRefName like a/b but we want to create a branch named a then we have a conflict
case strings.HasPrefix(branchRefName, name+"/"):
return models.ErrBranchNameConflict{
BranchName: branchRefName,
}
// Conversely if branchRefName like a but we want to create a branch named a/b then we also have a conflict
case strings.HasPrefix(name, branchRefName+"/"):
return models.ErrBranchNameConflict{
BranchName: branchRefName,
}
case refName == git.TagPrefix+name:
return models.ErrTagAlreadyExists{
TagName: name,
}
}
return nil
})
return err
}
// CreateNewBranchFromCommit creates a new repository branch
func CreateNewBranchFromCommit(ctx context.Context, doer *user_model.User, repo *repo_model.Repository, commit, branchName string) (err error) {
// Check if branch name can be used
if err := checkBranchName(ctx, repo, branchName); err != nil {
return err
}
if err := git.Push(ctx, repo.RepoPath(), git.PushOptions{
Remote: repo.RepoPath(),
Branch: fmt.Sprintf("%s:%s%s", commit, git.BranchPrefix, branchName),
Env: models.PushingEnvironment(doer, repo),
}); err != nil {
if git.IsErrPushOutOfDate(err) || git.IsErrPushRejected(err) {
return err
}
return fmt.Errorf("Push: %v", err)
}
return nil
}
// RenameBranch rename a branch
func RenameBranch(repo *repo_model.Repository, doer *user_model.User, gitRepo *git.Repository, from, to string) (string, error) {
if from == to {
return "target_exist", nil
}
if gitRepo.IsBranchExist(to) {
return "target_exist", nil
}
if !gitRepo.IsBranchExist(from) {
return "from_not_exist", nil
}
if err := models.RenameBranch(repo, from, to, func(isDefault bool) error {
err2 := gitRepo.RenameBranch(from, to)
if err2 != nil {
return err2
}
if isDefault {
err2 = gitRepo.SetDefaultBranch(to)
if err2 != nil {
return err2
}
}
return nil
}); err != nil {
return "", err
}
refID, err := gitRepo.GetRefCommitID(git.BranchPrefix + to)
if err != nil {
return "", err
}
notification.NotifyDeleteRef(doer, repo, "branch", git.BranchPrefix+from)
notification.NotifyCreateRef(doer, repo, "branch", git.BranchPrefix+to, refID)
return "", nil
}
// enmuerates all branch related errors
var (
ErrBranchIsDefault = errors.New("branch is default")
ErrBranchIsProtected = errors.New("branch is protected")
)
// DeleteBranch delete branch
func DeleteBranch(doer *user_model.User, repo *repo_model.Repository, gitRepo *git.Repository, branchName string) error {
if branchName == repo.DefaultBranch {
return ErrBranchIsDefault
}
isProtected, err := models.IsProtectedBranch(repo.ID, branchName)
if err != nil {
return err
}
if isProtected {
return ErrBranchIsProtected
}
commit, err := gitRepo.GetBranchCommit(branchName)
if err != nil {
return err
}
if err := gitRepo.DeleteBranch(branchName, git.DeleteBranchOptions{
Force: true,
}); err != nil {
return err
}
if err := pull_service.CloseBranchPulls(doer, repo.ID, branchName); err != nil {
return err
}
// Don't return error below this
if err := PushUpdate(
&repo_module.PushUpdateOptions{
RefFullName: git.BranchPrefix + branchName,
OldCommitID: commit.ID.String(),
NewCommitID: git.EmptySHA,
PusherID: doer.ID,
PusherName: doer.Name,
RepoUserName: repo.OwnerName,
RepoName: repo.Name,
}); err != nil {
log.Error("Update: %v", err)
}
if err := models.AddDeletedBranch(repo.ID, branchName, commit.ID.String(), doer.ID); err != nil {
log.Warn("AddDeletedBranch: %v", err)
}
return nil
}
|
// +build !nacl
// run
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
)
var tests = `
# These are test cases for the linker analysis that detects chains of
# nosplit functions that would cause a stack overflow.
#
# Lines beginning with # are comments.
#
# Each test case describes a sequence of functions, one per line.
# Each function definition is the function name, then the frame size,
# then optionally the keyword 'nosplit', then the body of the function.
# The body is assembly code, with some shorthands.
# The shorthand 'call x' stands for CALL x(SB).
# The shorthand 'callind' stands for 'CALL R0', where R0 is a register.
# Each test case must define a function named main, and it must be first.
# That is, a line beginning "main " indicates the start of a new test case.
# Within a stanza, ; can be used instead of \n to separate lines.
#
# After the function definition, the test case ends with an optional
# REJECT line, specifying the architectures on which the case should
# be rejected. "REJECT" without any architectures means reject on all architectures.
# The linker should accept the test case on systems not explicitly rejected.
#
# 64-bit systems do not attempt to execute test cases with frame sizes
# that are only 32-bit aligned.
# Ordinary function should work
main 0
# Large frame marked nosplit is always wrong.
main 10000 nosplit
REJECT
# Calling a large frame is okay.
main 0 call big
big 10000
# But not if the frame is nosplit.
main 0 call big
big 10000 nosplit
REJECT
# Recursion is okay.
main 0 call main
# Recursive nosplit runs out of space.
main 0 nosplit call main
REJECT
# Chains of ordinary functions okay.
main 0 call f1
f1 80 call f2
f2 80
# Chains of nosplit must fit in the stack limit, 128 bytes.
main 0 call f1
f1 80 nosplit call f2
f2 80 nosplit
REJECT
# Larger chains.
main 0 call f1
f1 16 call f2
f2 16 call f3
f3 16 call f4
f4 16 call f5
f5 16 call f6
f6 16 call f7
f7 16 call f8
f8 16 call end
end 1000
main 0 call f1
f1 16 nosplit call f2
f2 16 nosplit call f3
f3 16 nosplit call f4
f4 16 nosplit call f5
f5 16 nosplit call f6
f6 16 nosplit call f7
f7 16 nosplit call f8
f8 16 nosplit call end
end 1000
REJECT
# Test cases near the 128-byte limit.
# Ordinary stack split frame is always okay.
main 112
main 116
main 120
main 124
main 128
main 132
main 136
# A nosplit leaf can use the whole 128-CallSize bytes available on entry.
# (CallSize is 32 on ppc64)
main 96 nosplit
main 100 nosplit; REJECT ppc64 ppc64le
main 104 nosplit; REJECT ppc64 ppc64le
main 108 nosplit; REJECT ppc64 ppc64le
main 112 nosplit; REJECT ppc64 ppc64le
main 116 nosplit; REJECT ppc64 ppc64le
main 120 nosplit; REJECT ppc64 ppc64le
main 124 nosplit; REJECT ppc64 ppc64le
main 128 nosplit; REJECT
main 132 nosplit; REJECT
main 136 nosplit; REJECT
# Calling a nosplit function from a nosplit function requires
# having room for the saved caller PC and the called frame.
# Because ARM doesn't save LR in the leaf, it gets an extra 4 bytes.
# Because arm64 doesn't save LR in the leaf, it gets an extra 8 bytes.
# ppc64 doesn't save LR in the leaf, but CallSize is 32, so it gets 24 fewer bytes than amd64.
main 96 nosplit call f; f 0 nosplit
main 100 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le
main 104 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le
main 108 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le
main 112 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le
main 116 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le
main 120 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64
main 124 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64 386
main 128 nosplit call f; f 0 nosplit; REJECT
main 132 nosplit call f; f 0 nosplit; REJECT
main 136 nosplit call f; f 0 nosplit; REJECT
# Calling a splitting function from a nosplit function requires
# having room for the saved caller PC of the call but also the
# saved caller PC for the call to morestack.
# RISC architectures differ in the same way as before.
main 96 nosplit call f; f 0 call f
main 100 nosplit call f; f 0 call f; REJECT ppc64 ppc64le
main 104 nosplit call f; f 0 call f; REJECT ppc64 ppc64le
main 108 nosplit call f; f 0 call f; REJECT ppc64 ppc64le
main 112 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64
main 116 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64
main 120 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 386
main 124 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 386
main 128 nosplit call f; f 0 call f; REJECT
main 132 nosplit call f; f 0 call f; REJECT
main 136 nosplit call f; f 0 call f; REJECT
# Indirect calls are assumed to be splitting functions.
main 96 nosplit callind
main 100 nosplit callind; REJECT ppc64 ppc64le
main 104 nosplit callind; REJECT ppc64 ppc64le
main 108 nosplit callind; REJECT ppc64 ppc64le
main 112 nosplit callind; REJECT ppc64 ppc64le amd64
main 116 nosplit callind; REJECT ppc64 ppc64le amd64
main 120 nosplit callind; REJECT ppc64 ppc64le amd64 386
main 124 nosplit callind; REJECT ppc64 ppc64le amd64 386
main 128 nosplit callind; REJECT
main 132 nosplit callind; REJECT
main 136 nosplit callind; REJECT
# Issue 7623
main 0 call f; f 112
main 0 call f; f 116
main 0 call f; f 120
main 0 call f; f 124
main 0 call f; f 128
main 0 call f; f 132
main 0 call f; f 136
`
var (
commentRE = regexp.MustCompile(`(?m)^#.*`)
rejectRE = regexp.MustCompile(`(?s)\A(.+?)((\n|; *)REJECT(.*))?\z`)
lineRE = regexp.MustCompile(`(\w+) (\d+)( nosplit)?(.*)`)
callRE = regexp.MustCompile(`\bcall (\w+)\b`)
callindRE = regexp.MustCompile(`\bcallind\b`)
)
func main() {
goarch := os.Getenv("GOARCH")
if goarch == "" {
goarch = runtime.GOARCH
}
// Frame pointer is on by default now.
// golang.org/issue/18317.
return
version, err := exec.Command("go", "tool", "compile", "-V").Output()
if err != nil {
bug()
fmt.Printf("running go tool compile -V: %v\n", err)
return
}
if strings.Contains(string(version), "framepointer") {
// Skip this test if GOEXPERIMENT=framepointer
return
}
dir, err := ioutil.TempDir("", "go-test-nosplit")
if err != nil {
bug()
fmt.Printf("creating temp dir: %v\n", err)
return
}
defer os.RemoveAll(dir)
tests = strings.Replace(tests, "\t", " ", -1)
tests = commentRE.ReplaceAllString(tests, "")
nok := 0
nfail := 0
TestCases:
for len(tests) > 0 {
var stanza string
i := strings.Index(tests, "\nmain ")
if i < 0 {
stanza, tests = tests, ""
} else {
stanza, tests = tests[:i], tests[i+1:]
}
m := rejectRE.FindStringSubmatch(stanza)
if m == nil {
bug()
fmt.Printf("invalid stanza:\n\t%s\n", indent(stanza))
continue
}
lines := strings.TrimSpace(m[1])
reject := false
if m[2] != "" {
if strings.TrimSpace(m[4]) == "" {
reject = true
} else {
for _, rej := range strings.Fields(m[4]) {
if rej == goarch {
reject = true
}
}
}
}
if lines == "" && !reject {
continue
}
var gobuf bytes.Buffer
fmt.Fprintf(&gobuf, "package main\n")
var buf bytes.Buffer
ptrSize := 4
switch goarch {
case "mips", "mipsle":
fmt.Fprintf(&buf, "#define CALL JAL\n#define REGISTER (R0)\n")
case "mips64", "mips64le":
ptrSize = 8
fmt.Fprintf(&buf, "#define CALL JAL\n#define REGISTER (R0)\n")
case "ppc64", "ppc64le":
ptrSize = 8
fmt.Fprintf(&buf, "#define CALL BL\n#define REGISTER (CTR)\n")
case "arm":
fmt.Fprintf(&buf, "#define CALL BL\n#define REGISTER (R0)\n")
case "arm64":
ptrSize = 8
fmt.Fprintf(&buf, "#define CALL BL\n#define REGISTER (R0)\n")
case "amd64":
ptrSize = 8
fmt.Fprintf(&buf, "#define REGISTER AX\n")
case "s390x":
ptrSize = 8
fmt.Fprintf(&buf, "#define REGISTER R10\n")
default:
fmt.Fprintf(&buf, "#define REGISTER AX\n")
}
for _, line := range strings.Split(lines, "\n") {
line = strings.TrimSpace(line)
if line == "" {
continue
}
for i, subline := range strings.Split(line, ";") {
subline = strings.TrimSpace(subline)
if subline == "" {
continue
}
m := lineRE.FindStringSubmatch(subline)
if m == nil {
bug()
fmt.Printf("invalid function line: %s\n", subline)
continue TestCases
}
name := m[1]
size, _ := strconv.Atoi(m[2])
// The limit was originally 128 but is now 592.
// Instead of rewriting the test cases above, adjust
// the first stack frame to use up the extra bytes.
if i == 0 {
size += (880 - 128) - 128
// Noopt builds have a larger stackguard.
// See ../src/cmd/dist/buildruntime.go:stackGuardMultiplier
// This increase is included in objabi.StackGuard
for _, s := range strings.Split(os.Getenv("GO_GCFLAGS"), " ") {
if s == "-N" {
size += 880
}
}
}
if size%ptrSize == 4 || goarch == "arm64" && size != 0 && (size+8)%16 != 0 {
continue TestCases
}
nosplit := m[3]
body := m[4]
if nosplit != "" {
nosplit = ",7"
} else {
nosplit = ",0"
}
body = callRE.ReplaceAllString(body, "CALL ·$1(SB);")
body = callindRE.ReplaceAllString(body, "CALL REGISTER;")
fmt.Fprintf(&gobuf, "func %s()\n", name)
fmt.Fprintf(&buf, "TEXT ·%s(SB)%s,$%d-0\n\t%s\n\tRET\n\n", name, nosplit, size, body)
}
}
if err := ioutil.WriteFile(filepath.Join(dir, "asm.s"), buf.Bytes(), 0666); err != nil {
log.Fatal(err)
}
if err := ioutil.WriteFile(filepath.Join(dir, "main.go"), gobuf.Bytes(), 0666); err != nil {
log.Fatal(err)
}
cmd := exec.Command("go", "build")
cmd.Dir = dir
output, err := cmd.CombinedOutput()
if err == nil {
nok++
if reject {
bug()
fmt.Printf("accepted incorrectly:\n\t%s\n", indent(strings.TrimSpace(stanza)))
}
} else {
nfail++
if !reject {
bug()
fmt.Printf("rejected incorrectly:\n\t%s\n", indent(strings.TrimSpace(stanza)))
fmt.Printf("\n\tlinker output:\n\t%s\n", indent(string(output)))
}
}
}
if !bugged && (nok == 0 || nfail == 0) {
bug()
fmt.Printf("not enough test cases run\n")
}
}
func indent(s string) string {
return strings.Replace(s, "\n", "\n\t", -1)
}
var bugged = false
func bug() {
if !bugged {
bugged = true
fmt.Printf("BUG\n")
}
}
test: fix and re-enable nosplit.go
The test was skipped because it did not work on AMD64 with
frame pointer enabled, and accidentally skipped on other
architectures. Now frame pointer is the default on AMD64.
Update the test to work with frame pointer. Now the test
is skipped only when frame pointer is NOT enabled on AMD64.
Fixes #18317.
Change-Id: I724cb6874e562f16e67ce5f389a1d032a2003115
Reviewed-on: https://go-review.googlesource.com/68610
Run-TryBot: Cherry Zhang <d62e63aa42ce272d7b6a5055d97e942b33a34679@google.com>
TryBot-Result: Gobot Gobot <66cb808b70d30c07676d5e946fee83fd561249e5@golang.org>
Reviewed-by: Russ Cox <5ad239cb8a44f659eaaee0aa1ea5b94947abe557@golang.org>
// +build !nacl
// run
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
)
var tests = `
# These are test cases for the linker analysis that detects chains of
# nosplit functions that would cause a stack overflow.
#
# Lines beginning with # are comments.
#
# Each test case describes a sequence of functions, one per line.
# Each function definition is the function name, then the frame size,
# then optionally the keyword 'nosplit', then the body of the function.
# The body is assembly code, with some shorthands.
# The shorthand 'call x' stands for CALL x(SB).
# The shorthand 'callind' stands for 'CALL R0', where R0 is a register.
# Each test case must define a function named main, and it must be first.
# That is, a line beginning "main " indicates the start of a new test case.
# Within a stanza, ; can be used instead of \n to separate lines.
#
# After the function definition, the test case ends with an optional
# REJECT line, specifying the architectures on which the case should
# be rejected. "REJECT" without any architectures means reject on all architectures.
# The linker should accept the test case on systems not explicitly rejected.
#
# 64-bit systems do not attempt to execute test cases with frame sizes
# that are only 32-bit aligned.
# Ordinary function should work
main 0
# Large frame marked nosplit is always wrong.
main 10000 nosplit
REJECT
# Calling a large frame is okay.
main 0 call big
big 10000
# But not if the frame is nosplit.
main 0 call big
big 10000 nosplit
REJECT
# Recursion is okay.
main 0 call main
# Recursive nosplit runs out of space.
main 0 nosplit call main
REJECT
# Chains of ordinary functions okay.
main 0 call f1
f1 80 call f2
f2 80
# Chains of nosplit must fit in the stack limit, 128 bytes.
main 0 call f1
f1 80 nosplit call f2
f2 80 nosplit
REJECT
# Larger chains.
main 0 call f1
f1 16 call f2
f2 16 call f3
f3 16 call f4
f4 16 call f5
f5 16 call f6
f6 16 call f7
f7 16 call f8
f8 16 call end
end 1000
main 0 call f1
f1 16 nosplit call f2
f2 16 nosplit call f3
f3 16 nosplit call f4
f4 16 nosplit call f5
f5 16 nosplit call f6
f6 16 nosplit call f7
f7 16 nosplit call f8
f8 16 nosplit call end
end 1000
REJECT
# Test cases near the 128-byte limit.
# Ordinary stack split frame is always okay.
main 112
main 116
main 120
main 124
main 128
main 132
main 136
# A nosplit leaf can use the whole 128-CallSize bytes available on entry.
# (CallSize is 32 on ppc64, 8 on amd64 for frame pointer.)
main 96 nosplit
main 100 nosplit; REJECT ppc64 ppc64le
main 104 nosplit; REJECT ppc64 ppc64le
main 108 nosplit; REJECT ppc64 ppc64le
main 112 nosplit; REJECT ppc64 ppc64le
main 116 nosplit; REJECT ppc64 ppc64le
main 120 nosplit; REJECT ppc64 ppc64le amd64
main 124 nosplit; REJECT ppc64 ppc64le amd64
main 128 nosplit; REJECT
main 132 nosplit; REJECT
main 136 nosplit; REJECT
# Calling a nosplit function from a nosplit function requires
# having room for the saved caller PC and the called frame.
# Because ARM doesn't save LR in the leaf, it gets an extra 4 bytes.
# Because arm64 doesn't save LR in the leaf, it gets an extra 8 bytes.
# ppc64 doesn't save LR in the leaf, but CallSize is 32, so it gets 24 bytes.
# Because AMD64 uses frame pointer, it has 8 fewer bytes.
main 96 nosplit call f; f 0 nosplit
main 100 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le
main 104 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le
main 108 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le
main 112 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64
main 116 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64
main 120 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64
main 124 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64 386
main 128 nosplit call f; f 0 nosplit; REJECT
main 132 nosplit call f; f 0 nosplit; REJECT
main 136 nosplit call f; f 0 nosplit; REJECT
# Calling a splitting function from a nosplit function requires
# having room for the saved caller PC of the call but also the
# saved caller PC for the call to morestack.
# Architectures differ in the same way as before.
main 96 nosplit call f; f 0 call f
main 100 nosplit call f; f 0 call f; REJECT ppc64 ppc64le
main 104 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64
main 108 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64
main 112 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64
main 116 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64
main 120 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 386
main 124 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 386
main 128 nosplit call f; f 0 call f; REJECT
main 132 nosplit call f; f 0 call f; REJECT
main 136 nosplit call f; f 0 call f; REJECT
# Indirect calls are assumed to be splitting functions.
main 96 nosplit callind
main 100 nosplit callind; REJECT ppc64 ppc64le
main 104 nosplit callind; REJECT ppc64 ppc64le amd64
main 108 nosplit callind; REJECT ppc64 ppc64le amd64
main 112 nosplit callind; REJECT ppc64 ppc64le amd64
main 116 nosplit callind; REJECT ppc64 ppc64le amd64
main 120 nosplit callind; REJECT ppc64 ppc64le amd64 386
main 124 nosplit callind; REJECT ppc64 ppc64le amd64 386
main 128 nosplit callind; REJECT
main 132 nosplit callind; REJECT
main 136 nosplit callind; REJECT
# Issue 7623
main 0 call f; f 112
main 0 call f; f 116
main 0 call f; f 120
main 0 call f; f 124
main 0 call f; f 128
main 0 call f; f 132
main 0 call f; f 136
`
var (
commentRE = regexp.MustCompile(`(?m)^#.*`)
rejectRE = regexp.MustCompile(`(?s)\A(.+?)((\n|; *)REJECT(.*))?\z`)
lineRE = regexp.MustCompile(`(\w+) (\d+)( nosplit)?(.*)`)
callRE = regexp.MustCompile(`\bcall (\w+)\b`)
callindRE = regexp.MustCompile(`\bcallind\b`)
)
func main() {
goarch := os.Getenv("GOARCH")
if goarch == "" {
goarch = runtime.GOARCH
}
version, err := exec.Command("go", "tool", "compile", "-V").Output()
if err != nil {
bug()
fmt.Printf("running go tool compile -V: %v\n", err)
return
}
if s := string(version); goarch == "amd64" && strings.Contains(s, "X:") && !strings.Contains(s, "framepointer") {
// Skip this test if framepointer is NOT enabled on AMD64
return
}
dir, err := ioutil.TempDir("", "go-test-nosplit")
if err != nil {
bug()
fmt.Printf("creating temp dir: %v\n", err)
return
}
defer os.RemoveAll(dir)
tests = strings.Replace(tests, "\t", " ", -1)
tests = commentRE.ReplaceAllString(tests, "")
nok := 0
nfail := 0
TestCases:
for len(tests) > 0 {
var stanza string
i := strings.Index(tests, "\nmain ")
if i < 0 {
stanza, tests = tests, ""
} else {
stanza, tests = tests[:i], tests[i+1:]
}
m := rejectRE.FindStringSubmatch(stanza)
if m == nil {
bug()
fmt.Printf("invalid stanza:\n\t%s\n", indent(stanza))
continue
}
lines := strings.TrimSpace(m[1])
reject := false
if m[2] != "" {
if strings.TrimSpace(m[4]) == "" {
reject = true
} else {
for _, rej := range strings.Fields(m[4]) {
if rej == goarch {
reject = true
}
}
}
}
if lines == "" && !reject {
continue
}
var gobuf bytes.Buffer
fmt.Fprintf(&gobuf, "package main\n")
var buf bytes.Buffer
ptrSize := 4
switch goarch {
case "mips", "mipsle":
fmt.Fprintf(&buf, "#define REGISTER (R0)\n")
case "mips64", "mips64le":
ptrSize = 8
fmt.Fprintf(&buf, "#define REGISTER (R0)\n")
case "ppc64", "ppc64le":
ptrSize = 8
fmt.Fprintf(&buf, "#define REGISTER (CTR)\n")
case "arm":
fmt.Fprintf(&buf, "#define REGISTER (R0)\n")
case "arm64":
ptrSize = 8
fmt.Fprintf(&buf, "#define REGISTER (R0)\n")
case "amd64":
ptrSize = 8
fmt.Fprintf(&buf, "#define REGISTER AX\n")
case "s390x":
ptrSize = 8
fmt.Fprintf(&buf, "#define REGISTER R10\n")
default:
fmt.Fprintf(&buf, "#define REGISTER AX\n")
}
for _, line := range strings.Split(lines, "\n") {
line = strings.TrimSpace(line)
if line == "" {
continue
}
for i, subline := range strings.Split(line, ";") {
subline = strings.TrimSpace(subline)
if subline == "" {
continue
}
m := lineRE.FindStringSubmatch(subline)
if m == nil {
bug()
fmt.Printf("invalid function line: %s\n", subline)
continue TestCases
}
name := m[1]
size, _ := strconv.Atoi(m[2])
// The limit was originally 128 but is now 752 (880-128).
// Instead of rewriting the test cases above, adjust
// the first stack frame to use up the extra bytes.
if i == 0 {
size += (880 - 128) - 128
// Noopt builds have a larger stackguard.
// See ../src/cmd/dist/buildruntime.go:stackGuardMultiplier
// This increase is included in objabi.StackGuard
for _, s := range strings.Split(os.Getenv("GO_GCFLAGS"), " ") {
if s == "-N" {
size += 880
}
}
}
if size%ptrSize == 4 || goarch == "arm64" && size != 0 && (size+8)%16 != 0 {
continue TestCases
}
nosplit := m[3]
body := m[4]
if nosplit != "" {
nosplit = ",7"
} else {
nosplit = ",0"
}
body = callRE.ReplaceAllString(body, "CALL ·$1(SB);")
body = callindRE.ReplaceAllString(body, "CALL REGISTER;")
fmt.Fprintf(&gobuf, "func %s()\n", name)
fmt.Fprintf(&buf, "TEXT ·%s(SB)%s,$%d-0\n\t%s\n\tRET\n\n", name, nosplit, size, body)
}
}
if err := ioutil.WriteFile(filepath.Join(dir, "asm.s"), buf.Bytes(), 0666); err != nil {
log.Fatal(err)
}
if err := ioutil.WriteFile(filepath.Join(dir, "main.go"), gobuf.Bytes(), 0666); err != nil {
log.Fatal(err)
}
cmd := exec.Command("go", "build")
cmd.Dir = dir
output, err := cmd.CombinedOutput()
if err == nil {
nok++
if reject {
bug()
fmt.Printf("accepted incorrectly:\n\t%s\n", indent(strings.TrimSpace(stanza)))
}
} else {
nfail++
if !reject {
bug()
fmt.Printf("rejected incorrectly:\n\t%s\n", indent(strings.TrimSpace(stanza)))
fmt.Printf("\n\tlinker output:\n\t%s\n", indent(string(output)))
}
}
}
if !bugged && (nok == 0 || nfail == 0) {
bug()
fmt.Printf("not enough test cases run\n")
}
}
func indent(s string) string {
return strings.Replace(s, "\n", "\n\t", -1)
}
var bugged = false
func bug() {
if !bugged {
bugged = true
fmt.Printf("BUG\n")
}
}
|
package neurgo
import (
"testing"
"github.com/couchbaselabs/go.assert"
"sync"
"log"
)
type Wiretap struct {
Node
}
type Injector struct {
Node
}
func TestConnectBidirectional(t *testing.T) {
neuron := &Neuron{}
sensor := &Sensor{}
weights := []float64{20,20,20,20,20}
sensor.ConnectBidirectionalWeighted(neuron, weights)
assert.Equals(t, len(sensor.outbound), 1)
assert.Equals(t, len(neuron.inbound), 1)
assert.True(t, neuron.inbound[0].channel != nil)
assert.True(t, sensor.outbound[0].channel != nil)
assert.Equals(t, len(neuron.inbound[0].weights), len(weights))
assert.Equals(t, neuron.inbound[0].weights[0], weights[0])
actuator := &Actuator{}
neuron.ConnectBidirectional(actuator)
assert.Equals(t, len(neuron.outbound), 1)
assert.Equals(t, len(actuator.inbound), 1)
assert.Equals(t, len(actuator.inbound[0].weights), 0)
}
func TestNetwork(t *testing.T) {
// create network nodes
neuron1 := &Neuron{Bias: 10, ActivationFunction: identity_activation}
neuron2 := &Neuron{Bias: 10, ActivationFunction: identity_activation}
sensor := &Sensor{}
actuator := &Actuator{}
wiretap := &Wiretap{}
injector := &Injector{}
// connect nodes together
injector.ConnectBidirectional(sensor)
weights := []float64{20,20,20,20,20}
sensor.ConnectBidirectionalWeighted(neuron1, weights)
sensor.ConnectBidirectionalWeighted(neuron2, weights)
neuron1.ConnectBidirectional(actuator)
neuron2.ConnectBidirectional(actuator)
actuator.ConnectBidirectional(wiretap)
// spinup node goroutines
signallers := []Signaller{neuron1, neuron2, sensor, actuator}
for _, signaller := range signallers {
go Run(signaller)
}
var wg sync.WaitGroup
wg.Add(1)
wg.Add(1)
// inject a value into sensor
go func() {
testValue := []float64{1,1,1,1,1}
injector.outbound[0].channel <- testValue
wg.Done()
}()
// read the value from wiretap (which taps into actuator)
go func() {
value := <- wiretap.inbound[0].channel
assert.Equals(t, len(value), 2)
assert.Equals(t, value[0], float64(110))
assert.Equals(t, value[1], float64(110))
wg.Done()
}()
wg.Wait()
}
func TestXnorNetwork(t *testing.T) {
// create network nodes
input_neuron1 := &Neuron{Bias: 0, ActivationFunction: identity_activation}
input_neuron2 := &Neuron{Bias: 0, ActivationFunction: identity_activation}
hidden_neuron1 := &Neuron{Bias: -30, ActivationFunction: sigmoid}
hidden_neuron2 := &Neuron{Bias: 10, ActivationFunction: sigmoid}
output_neuron := &Neuron{Bias: -10, ActivationFunction: sigmoid}
sensor1 := &Sensor{}
sensor2 := &Sensor{}
actuator := &Actuator{}
wiretap := &Wiretap{}
injector1 := &Injector{}
injector2 := &Injector{}
// connect nodes together
injector1.ConnectBidirectional(sensor1)
injector2.ConnectBidirectional(sensor2)
sensor1.ConnectBidirectionalWeighted(input_neuron1, []float64{1})
sensor2.ConnectBidirectionalWeighted(input_neuron2, []float64{1})
input_neuron1.ConnectBidirectionalWeighted(hidden_neuron1, []float64{20})
input_neuron2.ConnectBidirectionalWeighted(hidden_neuron1, []float64{20})
input_neuron1.ConnectBidirectionalWeighted(hidden_neuron2, []float64{-20})
input_neuron2.ConnectBidirectionalWeighted(hidden_neuron2, []float64{-10})
hidden_neuron1.ConnectBidirectionalWeighted(output_neuron, []float64{20})
hidden_neuron2.ConnectBidirectionalWeighted(output_neuron, []float64{20})
output_neuron.ConnectBidirectional(actuator)
actuator.ConnectBidirectional(wiretap)
// spinup node goroutines
signallers := []Signaller{input_neuron1, input_neuron2, hidden_neuron1, hidden_neuron2, output_neuron, sensor1, sensor2, actuator}
for _, signaller := range signallers {
go Run(signaller)
}
var wg sync.WaitGroup
wg.Add(1)
wg.Add(1)
// inject a value into sensor
go func() {
testValue1 := []float64{0}
injector1.outbound[0].channel <- testValue1
testValue2 := []float64{1}
injector2.outbound[0].channel <- testValue2
//testValue1b := []float64{1}
//injector1.outbound[0].channel <- testValue1b
//testValue2b := []float64{1}
//injector2.outbound[0].channel <- testValue2b
wg.Done()
}()
// read the value from wiretap (which taps into actuator)
go func() {
resultVector := <- wiretap.inbound[0].channel
result := resultVector[0]
assert.True(t, equalsWithMaxDelta(result, 0.00000, .01))
log.Printf("Xnor - Got value from wiretap: %v", result)
//resultVector2 := <- wiretap.inbound[0].channel
//result2 := resultVector2[0]
//log.Printf("Xnor - Got value2 from wiretap: %v", result2)
wg.Done()
}()
wg.Wait()
}
func identity_activation(x float64) float64 {
return x
}
Was using wrong weight (couldn't read scribble writing from coursera lecture)
package neurgo
import (
"testing"
"github.com/couchbaselabs/go.assert"
"sync"
"log"
)
type Wiretap struct {
Node
}
type Injector struct {
Node
}
func TestConnectBidirectional(t *testing.T) {
neuron := &Neuron{}
sensor := &Sensor{}
weights := []float64{20,20,20,20,20}
sensor.ConnectBidirectionalWeighted(neuron, weights)
assert.Equals(t, len(sensor.outbound), 1)
assert.Equals(t, len(neuron.inbound), 1)
assert.True(t, neuron.inbound[0].channel != nil)
assert.True(t, sensor.outbound[0].channel != nil)
assert.Equals(t, len(neuron.inbound[0].weights), len(weights))
assert.Equals(t, neuron.inbound[0].weights[0], weights[0])
actuator := &Actuator{}
neuron.ConnectBidirectional(actuator)
assert.Equals(t, len(neuron.outbound), 1)
assert.Equals(t, len(actuator.inbound), 1)
assert.Equals(t, len(actuator.inbound[0].weights), 0)
}
func TestNetwork(t *testing.T) {
// create network nodes
neuron1 := &Neuron{Bias: 10, ActivationFunction: identity_activation}
neuron2 := &Neuron{Bias: 10, ActivationFunction: identity_activation}
sensor := &Sensor{}
actuator := &Actuator{}
wiretap := &Wiretap{}
injector := &Injector{}
// connect nodes together
injector.ConnectBidirectional(sensor)
weights := []float64{20,20,20,20,20}
sensor.ConnectBidirectionalWeighted(neuron1, weights)
sensor.ConnectBidirectionalWeighted(neuron2, weights)
neuron1.ConnectBidirectional(actuator)
neuron2.ConnectBidirectional(actuator)
actuator.ConnectBidirectional(wiretap)
// spinup node goroutines
signallers := []Signaller{neuron1, neuron2, sensor, actuator}
for _, signaller := range signallers {
go Run(signaller)
}
var wg sync.WaitGroup
wg.Add(1)
wg.Add(1)
// inject a value into sensor
go func() {
testValue := []float64{1,1,1,1,1}
injector.outbound[0].channel <- testValue
wg.Done()
}()
// read the value from wiretap (which taps into actuator)
go func() {
value := <- wiretap.inbound[0].channel
assert.Equals(t, len(value), 2)
assert.Equals(t, value[0], float64(110))
assert.Equals(t, value[1], float64(110))
wg.Done()
}()
wg.Wait()
}
func TestXnorNetwork(t *testing.T) {
// create network nodes
input_neuron1 := &Neuron{Bias: 0, ActivationFunction: identity_activation}
input_neuron2 := &Neuron{Bias: 0, ActivationFunction: identity_activation}
hidden_neuron1 := &Neuron{Bias: -30, ActivationFunction: sigmoid}
hidden_neuron2 := &Neuron{Bias: 10, ActivationFunction: sigmoid}
output_neuron := &Neuron{Bias: -10, ActivationFunction: sigmoid}
sensor1 := &Sensor{}
sensor2 := &Sensor{}
actuator := &Actuator{}
wiretap := &Wiretap{}
injector1 := &Injector{}
injector2 := &Injector{}
// connect nodes together
injector1.ConnectBidirectional(sensor1)
injector2.ConnectBidirectional(sensor2)
sensor1.ConnectBidirectionalWeighted(input_neuron1, []float64{1})
sensor2.ConnectBidirectionalWeighted(input_neuron2, []float64{1})
input_neuron1.ConnectBidirectionalWeighted(hidden_neuron1, []float64{20})
input_neuron2.ConnectBidirectionalWeighted(hidden_neuron1, []float64{20})
input_neuron1.ConnectBidirectionalWeighted(hidden_neuron2, []float64{-20})
input_neuron2.ConnectBidirectionalWeighted(hidden_neuron2, []float64{-20})
hidden_neuron1.ConnectBidirectionalWeighted(output_neuron, []float64{20})
hidden_neuron2.ConnectBidirectionalWeighted(output_neuron, []float64{20})
output_neuron.ConnectBidirectional(actuator)
actuator.ConnectBidirectional(wiretap)
// spinup node goroutines
signallers := []Signaller{input_neuron1, input_neuron2, hidden_neuron1, hidden_neuron2, output_neuron, sensor1, sensor2, actuator}
for _, signaller := range signallers {
go Run(signaller)
}
var wg sync.WaitGroup
wg.Add(1)
wg.Add(1)
// inject a value into sensor
go func() {
testValue1 := []float64{0}
injector1.outbound[0].channel <- testValue1
testValue2 := []float64{1}
injector2.outbound[0].channel <- testValue2
//testValue1b := []float64{1}
//injector1.outbound[0].channel <- testValue1b
//testValue2b := []float64{1}
//injector2.outbound[0].channel <- testValue2b
wg.Done()
}()
// read the value from wiretap (which taps into actuator)
go func() {
resultVector := <- wiretap.inbound[0].channel
result := resultVector[0]
assert.True(t, equalsWithMaxDelta(result, 0.00000, .01))
log.Printf("Xnor - Got value from wiretap: %v", result)
//resultVector2 := <- wiretap.inbound[0].channel
//result2 := resultVector2[0]
//log.Printf("Xnor - Got value2 from wiretap: %v", result2)
wg.Done()
}()
wg.Wait()
}
func identity_activation(x float64) float64 {
return x
}
|
package open
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"golang.org/x/net/context"
"github.com/crackcomm/crawl"
"github.com/satori/go.uuid"
"github.com/skratchdot/open-golang/open"
)
// Open - Opens crawl response in browser.
func Open(resp *crawl.Response) error {
fname := filepath.Join(os.TempDir(), fmt.Sprintf("%s.html", uuid.NewV4().String()))
body, err := resp.ReadBody()
if err != nil {
return err
}
if err := ioutil.WriteFile(fname, body, os.ModePerm); err != nil {
return err
}
return open.Start(fmt.Sprintf("file://%s", fname))
}
// Handler - Crawl handler that opens crawl response in browser.
func Handler(_ context.Context, resp *crawl.Response) error {
return Open(resp)
}
open: use utility function
package open
import (
"fmt"
"os"
"path/filepath"
"golang.org/x/net/context"
"github.com/crackcomm/crawl"
"github.com/satori/go.uuid"
"github.com/skratchdot/open-golang/open"
)
// Open - Opens crawl response in browser.
func Open(resp *crawl.Response) error {
fname := filepath.Join(os.TempDir(), fmt.Sprintf("%s.html", uuid.NewV4().String()))
if err := crawl.WriteResponseFile(resp, fname); err != nil {
return err
}
return open.Start(fmt.Sprintf("file://%s", fname))
}
// Handler - Crawl handler that opens crawl response in browser.
func Handler(_ context.Context, resp *crawl.Response) error {
return Open(resp)
}
|
// errchk $G -e $D/$F.go
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
func main() {
var n byte; // ERROR "not a type"
var y = float(0); // ERROR "cannot call"
const (
a = 1+iota; // ERROR "string"
)
}
const (
bool = 1;
byte = 2;
float = 3;
float32 = 4;
float64 = 5;
int = 6;
int8 = 7;
int16 = 8;
int32 = 9;
int64 = 10;
uint = 11;
uint8 = 12;
uint16 = 13;
uint32 = 14;
uint64 = 15;
uintptr = 16;
true = 17;
false = 18;
iota = "abc";
nil = 20;
cap = 21;
len = 22;
make = 23;
new = 24;
panic = 25;
panicln = 26;
print = 27;
println = 28;
)
Recognize gccgo error messages.
rename1.go:10:8: error: expected type
rename1.go:11:10: error: expected function
rename1.go:13:8: error: incompatible types in binary expression
R=rsc
http://go/go-review/1015013
// errchk $G -e $D/$F.go
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
func main() {
var n byte; // ERROR "not a type|expected type"
var y = float(0); // ERROR "cannot call|expected function"
const (
a = 1+iota; // ERROR "string|incompatible types"
)
}
const (
bool = 1;
byte = 2;
float = 3;
float32 = 4;
float64 = 5;
int = 6;
int8 = 7;
int16 = 8;
int32 = 9;
int64 = 10;
uint = 11;
uint8 = 12;
uint16 = 13;
uint32 = 14;
uint64 = 15;
uintptr = 16;
true = 17;
false = 18;
iota = "abc";
nil = 20;
cap = 21;
len = 22;
make = 23;
new = 24;
panic = 25;
panicln = 26;
print = 27;
println = 28;
)
|
// Copyright 2015 Dorival de Moraes Pedroso. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package goga
import (
"math"
"math/rand"
"sort"
"github.com/cpmech/gosl/chk"
"github.com/cpmech/gosl/la"
"github.com/cpmech/gosl/rnd"
"github.com/cpmech/gosl/utl"
)
// OpsData holds data for crossover and mutation operators
type OpsData struct {
// constants
Pc float64 // probability of crossover
Pm float64 // probability of mutation
Ncuts int // number of cuts during crossover
Nchanges int // number of changes during mutation
Tmax float64 // max number of generations
MwiczB float64 // Michalewicz' power coefficient
BlxAlp float64 // BLX-α coefficient
Mmax float64 // multiplier for mutation
Cuts []int // specified cuts for crossover. can be <nil>
OrdSti []int // {start, end, insertPoint}. can be <nil>
Xrange [][]float64 // [ngenes][2] genes minimum and maximum values
// crossover functions
CxInt CxIntFunc_t // int crossover function
CxFlt CxFltFunc_t // flt crossover function
CxStr CxStrFunc_t // str crossover function
CxKey CxKeyFunc_t // key crossover function
CxByt CxBytFunc_t // byt crossover function
CxFun CxFunFunc_t // fun crossover function
// mutation functions
MtInt MtIntFunc_t // int mutation function
MtFlt MtFltFunc_t // flt mutation function
MtStr MtStrFunc_t // str mutation function
MtKey MtKeyFunc_t // key mutation function
MtByt MtBytFunc_t // byt mutation function
MtFun MtFunFunc_t // fun mutation function
}
// SetDefault sets default values
func (o *OpsData) SetDefault() {
// constants
o.Pc = 0.8
o.Pm = 0.01
o.Ncuts = 2
o.Nchanges = 1
o.MwiczB = 2.0
o.BlxAlp = 0.5
o.Mmax = 2
// crossover functions
o.CxInt = IntCrossover
o.CxFlt = FltCrossoverBlx
o.CxStr = StrCrossover
o.CxKey = KeyCrossover
o.CxByt = BytCrossover
o.CxFun = FunCrossover
// mutation functions
o.MtInt = IntMutation
o.MtFlt = FltMutationMwicz
o.MtStr = StrMutation
o.MtKey = KeyMutation
o.MtByt = BytMutation
o.MtFun = FunMutation
}
// CalcDerived sets derived quantities
func (o *OpsData) CalcDerived(Tf int, xrange [][]float64) {
o.Tmax = float64(Tf)
o.Xrange = xrange
}
// MwiczDelta computes Michalewicz' Δ function
func (o *OpsData) MwiczDelta(t, x float64) float64 {
r := rand.Float64()
return (1.0 - math.Pow(r, math.Pow(1.0-t/o.Tmax, o.MwiczB))) * x
}
// EnforceRange makes sure x is within given range
func (o *OpsData) EnforceRange(igene int, x float64) float64 {
if x < o.Xrange[igene][0] {
return o.Xrange[igene][0]
}
if x > o.Xrange[igene][1] {
return o.Xrange[igene][1]
}
return x
}
// auxiliary ///////////////////////////////////////////////////////////////////////////////////////
// SimpleChromo splits 'genes' into 'nbases' unequal parts
// Input:
// genes -- a slice whose size equals to the number of genes
// nbases -- number of bases used to split 'genes'
// Output:
// chromo -- the chromosome
//
// Example:
//
// genes = [0, 1, 2, ... nbases-1, 0, 1, 2, ... nbases-1]
// \___________________/ \___________________/
// gene # 0 gene # 1
//
func SimpleChromo(genes []float64, nbases int) (chromo []float64) {
ngenes := len(genes)
chromo = make([]float64, ngenes*nbases)
values := make([]float64, nbases)
var sumv float64
for i, g := range genes {
rnd.Float64s(values, 0, 1)
sumv = la.VecAccum(values)
for j := 0; j < nbases; j++ {
chromo[i*nbases+j] = g * values[j] / sumv
}
}
return
}
// GenerateCxEnds randomly computes the end positions of cuts in chromosomes
// Input:
// size -- size of chromosome
// ncuts -- number of cuts to be used, unless cuts != nil
// cuts -- cut positions. can be nil => use ncuts instead
// Output:
// ends -- end positions where the last one equals size
// Example:
// 0 1 2 3 4 5 6 7
// A = a b c d e f g h size = 8
// ↑ ↑ ↑ cuts = [1, 5]
// 1 5 8 ends = [1, 5, 8]
func GenerateCxEnds(size, ncuts int, cuts []int) (ends []int) {
// handle small slices
if size < 2 {
return
}
if size == 2 {
return []int{1, size}
}
// cuts slice is given
if len(cuts) > 0 {
ncuts = len(cuts)
ends = make([]int, ncuts+1)
ends[ncuts] = size
for i, cut := range cuts {
if cut < 1 || cut >= size {
chk.Panic("cut=%d is outside the allowed range: 1 ≤ cut ≤ size-1", cut)
}
if i > 0 {
if cut == cuts[i-1] {
chk.Panic("repeated cut values are not allowed: cuts=%v", cuts)
}
}
ends[i] = cut
}
sort.Ints(ends)
return
}
// randomly generate cuts
if ncuts < 1 {
ncuts = 1
}
if ncuts >= size {
ncuts = size - 1
}
ends = make([]int, ncuts+1)
ends[ncuts] = size
// pool of values for selections
pool := rnd.IntGetUniqueN(1, size, ncuts)
sort.Ints(pool)
for i := 0; i < ncuts; i++ {
ends[i] = pool[i]
}
return
}
// crossover ///////////////////////////////////////////////////////////////////////////////////////
// IntCrossover performs the crossover of genetic data from A and B
// Output:
// a and b -- offspring
// Example:
// 0 1 2 3 4 5 6 7
// A = a b c d e f g h size = 8
// B = * . . . . * * * cuts = [1, 5]
// ↑ ↑ ↑ ends = [1, 5, 8]
// 1 5 8
// a = a . . . . f g h
// b = * b c d e * * *
func IntCrossover(a, b, A, B []int, time int, ops *OpsData) (ends []int) {
size := len(A)
if !rnd.FlipCoin(ops.Pc) || size < 2 {
for i := 0; i < len(A); i++ {
a[i], b[i] = A[i], B[i]
}
return
}
ends = GenerateCxEnds(size, ops.Ncuts, ops.Cuts)
swap := false
start := 0
for _, end := range ends {
if swap {
for j := start; j < end; j++ {
b[j], a[j] = A[j], B[j]
}
} else {
for j := start; j < end; j++ {
a[j], b[j] = A[j], B[j]
}
}
start = end
swap = !swap
}
return
}
// IntOrdCrossover performs the crossover in a pair of individuals with integer numbers
// that correspond to a ordered sequence, e.g. for traveling salesman problem
// Output:
// a and b -- offspring chromosomes
// Note: using OX1 method explained in [1] (proposed in [2])
// References:
// [1] Larrañaga P, Kuijpers CMH, Murga RH, Inza I and Dizdarevic S. Genetic Algorithms for the
// Travelling Salesman Problem: A Review of Representations and Operators. Artificial
// Intelligence Review, 13:129-170; 1999. doi:10.1023/A:1006529012972
// [2] Davis L. Applying Adaptive Algorithms to Epistatic Domains. Proceedings of International
// Joint Conference on Artificial Intelligence, 162-164; 1985.
// Example:
// data:
// 0 1 2 3 4 5 6 7
// A = a b | c d e | f g h size = 8
// B = b d | f h g | e c a cuts = [2, 5]
// ↑ ↑ ↑ ends = [2, 5, 8]
// 2 5 8
// first step: copy subtours
// a = . . | f h g | . . .
// b = . . | c d e | . . .
// second step: copy unique from subtour's end, position 5
// start adding here
// ↓ 5 6 7 0 1 2 3 4
// a = d e | f h g | a b c get from A: | f̶ g̶ h̶ | a b | c d e
// b = h g | c d e | a b f get from B: | e̶ c̶ a | b d̶ | f h g
func IntOrdCrossover(a, b, A, B []int, time int, ops *OpsData) (notused []int) {
size := len(A)
if !rnd.FlipCoin(ops.Pc) || size < 3 {
for i := 0; i < len(A); i++ {
a[i], b[i] = A[i], B[i]
}
return
}
var s, t int
if len(ops.Cuts) == 2 {
s, t = ops.Cuts[0], ops.Cuts[1]
} else {
s = rnd.Int(1, size-2)
t = rnd.Int(s+1, size-1)
}
chk.IntAssertLessThan(s, t)
acore := B[s:t]
bcore := A[s:t]
ncore := t - s
acorehas := make(map[int]bool) // TODO: check if map can be replaced => improve efficiency
bcorehas := make(map[int]bool)
for i := 0; i < ncore; i++ {
a[s+i] = acore[i]
b[s+i] = bcore[i]
acorehas[acore[i]] = true
bcorehas[bcore[i]] = true
}
ja, jb := t, t
for i := 0; i < size; i++ {
k := (i + t) % size
if !acorehas[A[k]] {
a[ja] = A[k]
ja++
if ja == size {
ja = 0
}
}
if !bcorehas[B[k]] {
b[jb] = B[k]
jb++
if jb == size {
jb = 0
}
}
}
return
}
// FltCrossover performs the crossover of genetic data from A and B
// Output:
// a and b -- offspring
// Example:
// 0 1 2 3 4 5 6 7
// A = a b c d e f g h size = 8
// B = * . . . . * * * cuts = [1, 5]
// ↑ ↑ ↑ ends = [1, 5, 8]
// 1 5 8
// a = a . . . . f g h
// b = * b c d e * * *
func FltCrossover(a, b, A, B []float64, time int, ops *OpsData) (ends []int) {
size := len(A)
if !rnd.FlipCoin(ops.Pc) || size < 2 {
for i := 0; i < len(A); i++ {
a[i], b[i] = A[i], B[i]
}
return
}
ends = GenerateCxEnds(size, ops.Ncuts, ops.Cuts)
swap := false
start := 0
for _, end := range ends {
if swap {
for j := start; j < end; j++ {
b[j], a[j] = A[j], B[j]
}
} else {
for j := start; j < end; j++ {
a[j], b[j] = A[j], B[j]
}
}
start = end
swap = !swap
}
return
}
// FltCrossoverBlx implements the BLS-α crossover by Eshelman et al. (1993); see also Herrera (1998)
// Output:
// a and b -- offspring
func FltCrossoverBlx(a, b, A, B []float64, time int, ops *OpsData) (ends []int) {
size := len(A)
if !rnd.FlipCoin(ops.Pc) {
for i := 0; i < size; i++ {
a[i], b[i] = A[i], B[i]
}
return
}
α := ops.BlxAlp
chk.IntAssert(len(ops.Xrange), len(A))
var cmin, cmax, δ float64
for i := 0; i < size; i++ {
cmin = utl.Min(A[i], B[i])
cmax = utl.Max(A[i], B[i])
δ = cmax - cmin
a[i] = rnd.Float64(cmin-α*δ, cmax+α*δ)
b[i] = rnd.Float64(cmin-α*δ, cmax+α*δ)
a[i] = ops.EnforceRange(i, a[i])
b[i] = ops.EnforceRange(i, b[i])
}
return
}
// StrCrossover performs the crossover of genetic data from A and B
// Output:
// a and b -- offspring
// Example:
// 0 1 2 3 4 5 6 7
// A = a b c d e f g h size = 8
// B = * . . . . * * * cuts = [1, 5]
// ↑ ↑ ↑ ends = [1, 5, 8]
// 1 5 8
// a = a . . . . f g h
// b = * b c d e * * *
func StrCrossover(a, b, A, B []string, time int, ops *OpsData) (ends []int) {
size := len(A)
if !rnd.FlipCoin(ops.Pc) || size < 2 {
for i := 0; i < len(A); i++ {
a[i], b[i] = A[i], B[i]
}
return
}
ends = GenerateCxEnds(size, ops.Ncuts, ops.Cuts)
swap := false
start := 0
for _, end := range ends {
if swap {
for j := start; j < end; j++ {
b[j], a[j] = A[j], B[j]
}
} else {
for j := start; j < end; j++ {
a[j], b[j] = A[j], B[j]
}
}
start = end
swap = !swap
}
return
}
// KeyCrossover performs the crossover of genetic data from A and B
// Output:
// a and b -- offspring
// Example:
// 0 1 2 3 4 5 6 7
// A = a b c d e f g h size = 8
// B = * . . . . * * * cuts = [1, 5]
// ↑ ↑ ↑ ends = [1, 5, 8]
// 1 5 8
// a = a . . . . f g h
// b = * b c d e * * *
func KeyCrossover(a, b, A, B []byte, time int, ops *OpsData) (ends []int) {
size := len(A)
if !rnd.FlipCoin(ops.Pc) || size < 2 {
for i := 0; i < len(A); i++ {
a[i], b[i] = A[i], B[i]
}
return
}
ends = GenerateCxEnds(size, ops.Ncuts, ops.Cuts)
swap := false
start := 0
for _, end := range ends {
if swap {
for j := start; j < end; j++ {
b[j], a[j] = A[j], B[j]
}
} else {
for j := start; j < end; j++ {
a[j], b[j] = A[j], B[j]
}
}
start = end
swap = !swap
}
return
}
// BytCrossover performs the crossover of genetic data from A and B
// Output:
// a and b -- offspring
// Example:
// 0 1 2 3 4 5 6 7
// A = a b c d e f g h size = 8
// B = * . . . . * * * cuts = [1, 5]
// ↑ ↑ ↑ ends = [1, 5, 8]
// 1 5 8
// a = a . . . . f g h
// b = * b c d e * * *
func BytCrossover(a, b, A, B [][]byte, time int, ops *OpsData) (ends []int) {
size := len(A)
if !rnd.FlipCoin(ops.Pc) || size < 2 {
for i := 0; i < len(A); i++ {
copy(a[i], A[i])
copy(b[i], B[i])
}
return
}
ends = GenerateCxEnds(size, ops.Ncuts, ops.Cuts)
swap := false
start := 0
for _, end := range ends {
if swap {
for j := start; j < end; j++ {
copy(b[j], A[j])
copy(a[j], B[j])
}
} else {
for j := start; j < end; j++ {
copy(a[j], A[j])
copy(b[j], B[j])
}
}
start = end
swap = !swap
}
return
}
// FunCrossover performs the crossover of genetic data from A and B
// Output:
// a and b -- offspring
// Example:
// 0 1 2 3 4 5 6 7
// A = a b c d e f g h size = 8
// B = * . . . . * * * cuts = [1, 5]
// ↑ ↑ ↑ ends = [1, 5, 8]
// 1 5 8
// a = a . . . . f g h
// b = * b c d e * * *
func FunCrossover(a, b, A, B []Func_t, time int, ops *OpsData) (ends []int) {
size := len(A)
if !rnd.FlipCoin(ops.Pc) || size < 2 {
for i := 0; i < len(A); i++ {
a[i], b[i] = A[i], B[i]
}
return
}
ends = GenerateCxEnds(size, ops.Ncuts, ops.Cuts)
swap := false
start := 0
for _, end := range ends {
if swap {
for j := start; j < end; j++ {
b[j], a[j] = A[j], B[j]
}
} else {
for j := start; j < end; j++ {
a[j], b[j] = A[j], B[j]
}
}
start = end
swap = !swap
}
return
}
// mutation ////////////////////////////////////////////////////////////////////////////////////////
// IntMutation performs the mutation of genetic data from A
// Output: modified individual 'A'
func IntMutation(A []int, time int, ops *OpsData) {
size := len(A)
if !rnd.FlipCoin(ops.Pm) || size < 1 {
return
}
pos := rnd.IntGetUniqueN(0, size, ops.Nchanges)
for _, i := range pos {
m := rnd.Int(1, int(ops.Mmax))
if rnd.FlipCoin(0.5) {
A[i] += m * A[i]
} else {
A[i] -= m * A[i]
}
}
}
// IntOrdMutation performs the mutation of genetic data from a ordered list of integers A
// Output: modified individual 'A'
// Note: using DM method as explained in [1] (citing [2])
// References:
// [1] Larrañaga P, Kuijpers CMH, Murga RH, Inza I and Dizdarevic S. Genetic Algorithms for the
// Travelling Salesman Problem: A Review of Representations and Operators. Artificial
// Intelligence Review, 13:129-170; 1999. doi:10.1023/A:1006529012972
// [2] Michalewicz Z. Genetic Algorithms + Data Structures = Evolution Programs. Berlin
// Heidelberg: Springer Verlag; 1992
// Joint Conference on Artificial Intelligence, 162-164; 1985.
//
// DM displacement mutation method:
// Ex:
// 0 1 2 3 4 5 6 7
// A = a b c d e f g h s = 2
// ↑ ↑ t = 5
// 2 5
//
// core = c d e (subtour) ncore = t - s = 5 - 2 = 3
//
// 0 1 2 3 4
// remain = a b f g h (remaining) nrem = size - ncore = 8 - 3 = 5
// ↑
// 4 = ins
func IntOrdMutation(A []int, time int, ops *OpsData) {
size := len(A)
if !rnd.FlipCoin(ops.Pm) || size < 3 {
if size == 2 {
A[0], A[1] = A[1], A[0]
}
return
}
var s, t, ncore, nrem, ins int
if ops.OrdSti != nil {
s, t, ins = ops.OrdSti[0], ops.OrdSti[1], ops.OrdSti[2]
ncore = t - s
nrem = size - ncore
} else {
s = rnd.Int(1, size-2)
t = rnd.Int(s+1, size-1)
ncore = t - s
nrem = size - ncore
ins = rnd.Int(1, nrem)
}
core := make([]int, ncore)
remain := make([]int, nrem)
var jc, jr int
for i := 0; i < size; i++ {
if i >= s && i < t {
core[jc] = A[i]
jc++
} else {
remain[jr] = A[i]
jr++
}
}
jc, jr = 0, 0
for i := 0; i < size; i++ {
if i < ins {
A[i] = remain[jr]
jr++
} else {
if jc < ncore {
A[i] = core[jc]
jc++
} else {
A[i] = remain[jr]
jr++
}
}
}
}
// FltMutation performs the mutation of genetic data from A
// Output: modified individual 'A'
func FltMutation(A []float64, time int, ops *OpsData) {
size := len(A)
if !rnd.FlipCoin(ops.Pm) || size < 1 {
return
}
pos := rnd.IntGetUniqueN(0, size, ops.Nchanges)
for _, i := range pos {
m := rnd.Float64(1, ops.Mmax)
if rnd.FlipCoin(0.5) {
A[i] += m * A[i]
} else {
A[i] -= m * A[i]
}
}
}
// FltMutationMwicz implements the non-uniform mutation (Michaelewicz, 1992; Herrera, 1998)
// See also Michalewicz (1996) page 103
func FltMutationMwicz(A []float64, time int, ops *OpsData) {
size := len(A)
if !rnd.FlipCoin(ops.Pm) || size < 1 {
return
}
t := float64(time)
chk.IntAssert(len(ops.Xrange), len(A))
for i := 0; i < size; i++ {
xmin := ops.Xrange[i][0]
xmax := ops.Xrange[i][1]
if rnd.FlipCoin(0.5) {
A[i] += ops.MwiczDelta(t, xmax-A[i])
} else {
A[i] -= ops.MwiczDelta(t, A[i]-xmin)
}
A[i] = ops.EnforceRange(i, A[i])
}
}
// StrMutation performs the mutation of genetic data from A
// Output: modified individual 'A'
func StrMutation(A []string, time int, ops *OpsData) {
size := len(A)
if !rnd.FlipCoin(ops.Pm) || size < 1 {
return
}
pos := rnd.IntGetUniqueN(0, size, ops.Nchanges)
for _, i := range pos {
A[i] = "TODO" // TODO: improve this
}
}
// KeyMutation performs the mutation of genetic data from A
// Output: modified individual 'A'
func KeyMutation(A []byte, time int, ops *OpsData) {
size := len(A)
if !rnd.FlipCoin(ops.Pm) || size < 1 {
return
}
pos := rnd.IntGetUniqueN(0, size, ops.Nchanges)
for _, i := range pos {
v := rnd.Int(0, 100)
A[i] = byte(v) // TODO: improve this
}
}
// BytMutation performs the mutation of genetic data from A
// Output: modified individual 'A'
func BytMutation(A [][]byte, time int, ops *OpsData) {
size := len(A)
if !rnd.FlipCoin(ops.Pm) || size < 1 {
return
}
pos := rnd.IntGetUniqueN(0, size, ops.Nchanges)
for _, i := range pos {
v := rnd.Int(0, 100)
A[i][0] = byte(v) // TODO: improve this
}
}
// FunMutation performs the mutation of genetic data from A
// Output: modified individual 'A'
func FunMutation(A []Func_t, time int, ops *OpsData) {
size := len(A)
if !rnd.FlipCoin(ops.Pm) || size < 1 {
return
}
pos := rnd.IntGetUniqueN(0, size, ops.Nchanges)
for _, i := range pos {
// TODO: improve this
A[i] = func(ind *Individual) string { return "mutated" }
}
}
enforce range added to operators
// Copyright 2015 Dorival de Moraes Pedroso. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package goga
import (
"math"
"math/rand"
"sort"
"github.com/cpmech/gosl/chk"
"github.com/cpmech/gosl/la"
"github.com/cpmech/gosl/rnd"
"github.com/cpmech/gosl/utl"
)
// OpsData holds data for crossover and mutation operators
type OpsData struct {
// constants
Pc float64 // probability of crossover
Pm float64 // probability of mutation
Ncuts int // number of cuts during crossover
Nchanges int // number of changes during mutation
Tmax float64 // max number of generations
MwiczB float64 // Michalewicz' power coefficient
BlxAlp float64 // BLX-α coefficient
Mmax float64 // multiplier for mutation
Cuts []int // specified cuts for crossover. can be <nil>
OrdSti []int // {start, end, insertPoint}. can be <nil>
Xrange [][]float64 // [ngenes][2] genes minimum and maximum values
EnfRange bool // do enforce range
// crossover functions
CxInt CxIntFunc_t // int crossover function
CxFlt CxFltFunc_t // flt crossover function
CxStr CxStrFunc_t // str crossover function
CxKey CxKeyFunc_t // key crossover function
CxByt CxBytFunc_t // byt crossover function
CxFun CxFunFunc_t // fun crossover function
// mutation functions
MtInt MtIntFunc_t // int mutation function
MtFlt MtFltFunc_t // flt mutation function
MtStr MtStrFunc_t // str mutation function
MtKey MtKeyFunc_t // key mutation function
MtByt MtBytFunc_t // byt mutation function
MtFun MtFunFunc_t // fun mutation function
}
// SetDefault sets default values
func (o *OpsData) SetDefault() {
// constants
o.Pc = 0.8
o.Pm = 0.01
o.Ncuts = 2
o.Nchanges = 1
o.MwiczB = 2.0
o.BlxAlp = 0.5
o.Mmax = 2
// crossover functions
o.CxInt = IntCrossover
o.CxFlt = FltCrossoverBlx
o.CxStr = StrCrossover
o.CxKey = KeyCrossover
o.CxByt = BytCrossover
o.CxFun = FunCrossover
// mutation functions
o.MtInt = IntMutation
o.MtFlt = FltMutationMwicz
o.MtStr = StrMutation
o.MtKey = KeyMutation
o.MtByt = BytMutation
o.MtFun = FunMutation
}
// CalcDerived sets derived quantities
func (o *OpsData) CalcDerived(Tf int, xrange [][]float64) {
o.Tmax = float64(Tf)
o.Xrange = xrange
}
// MwiczDelta computes Michalewicz' Δ function
func (o *OpsData) MwiczDelta(t, x float64) float64 {
r := rand.Float64()
return (1.0 - math.Pow(r, math.Pow(1.0-t/o.Tmax, o.MwiczB))) * x
}
// EnforceRange makes sure x is within given range
func (o *OpsData) EnforceRange(igene int, x float64) float64 {
if !o.EnfRange {
return x
}
if x < o.Xrange[igene][0] {
return o.Xrange[igene][0]
}
if x > o.Xrange[igene][1] {
return o.Xrange[igene][1]
}
return x
}
// auxiliary ///////////////////////////////////////////////////////////////////////////////////////
// SimpleChromo splits 'genes' into 'nbases' unequal parts
// Input:
// genes -- a slice whose size equals to the number of genes
// nbases -- number of bases used to split 'genes'
// Output:
// chromo -- the chromosome
//
// Example:
//
// genes = [0, 1, 2, ... nbases-1, 0, 1, 2, ... nbases-1]
// \___________________/ \___________________/
// gene # 0 gene # 1
//
func SimpleChromo(genes []float64, nbases int) (chromo []float64) {
ngenes := len(genes)
chromo = make([]float64, ngenes*nbases)
values := make([]float64, nbases)
var sumv float64
for i, g := range genes {
rnd.Float64s(values, 0, 1)
sumv = la.VecAccum(values)
for j := 0; j < nbases; j++ {
chromo[i*nbases+j] = g * values[j] / sumv
}
}
return
}
// GenerateCxEnds randomly computes the end positions of cuts in chromosomes
// Input:
// size -- size of chromosome
// ncuts -- number of cuts to be used, unless cuts != nil
// cuts -- cut positions. can be nil => use ncuts instead
// Output:
// ends -- end positions where the last one equals size
// Example:
// 0 1 2 3 4 5 6 7
// A = a b c d e f g h size = 8
// ↑ ↑ ↑ cuts = [1, 5]
// 1 5 8 ends = [1, 5, 8]
func GenerateCxEnds(size, ncuts int, cuts []int) (ends []int) {
// handle small slices
if size < 2 {
return
}
if size == 2 {
return []int{1, size}
}
// cuts slice is given
if len(cuts) > 0 {
ncuts = len(cuts)
ends = make([]int, ncuts+1)
ends[ncuts] = size
for i, cut := range cuts {
if cut < 1 || cut >= size {
chk.Panic("cut=%d is outside the allowed range: 1 ≤ cut ≤ size-1", cut)
}
if i > 0 {
if cut == cuts[i-1] {
chk.Panic("repeated cut values are not allowed: cuts=%v", cuts)
}
}
ends[i] = cut
}
sort.Ints(ends)
return
}
// randomly generate cuts
if ncuts < 1 {
ncuts = 1
}
if ncuts >= size {
ncuts = size - 1
}
ends = make([]int, ncuts+1)
ends[ncuts] = size
// pool of values for selections
pool := rnd.IntGetUniqueN(1, size, ncuts)
sort.Ints(pool)
for i := 0; i < ncuts; i++ {
ends[i] = pool[i]
}
return
}
// crossover ///////////////////////////////////////////////////////////////////////////////////////
// IntCrossover performs the crossover of genetic data from A and B
// Output:
// a and b -- offspring
// Example:
// 0 1 2 3 4 5 6 7
// A = a b c d e f g h size = 8
// B = * . . . . * * * cuts = [1, 5]
// ↑ ↑ ↑ ends = [1, 5, 8]
// 1 5 8
// a = a . . . . f g h
// b = * b c d e * * *
func IntCrossover(a, b, A, B []int, time int, ops *OpsData) (ends []int) {
size := len(A)
if !rnd.FlipCoin(ops.Pc) || size < 2 {
for i := 0; i < len(A); i++ {
a[i], b[i] = A[i], B[i]
}
return
}
ends = GenerateCxEnds(size, ops.Ncuts, ops.Cuts)
swap := false
start := 0
for _, end := range ends {
if swap {
for j := start; j < end; j++ {
b[j], a[j] = A[j], B[j]
}
} else {
for j := start; j < end; j++ {
a[j], b[j] = A[j], B[j]
}
}
start = end
swap = !swap
}
return
}
// IntOrdCrossover performs the crossover in a pair of individuals with integer numbers
// that correspond to a ordered sequence, e.g. for traveling salesman problem
// Output:
// a and b -- offspring chromosomes
// Note: using OX1 method explained in [1] (proposed in [2])
// References:
// [1] Larrañaga P, Kuijpers CMH, Murga RH, Inza I and Dizdarevic S. Genetic Algorithms for the
// Travelling Salesman Problem: A Review of Representations and Operators. Artificial
// Intelligence Review, 13:129-170; 1999. doi:10.1023/A:1006529012972
// [2] Davis L. Applying Adaptive Algorithms to Epistatic Domains. Proceedings of International
// Joint Conference on Artificial Intelligence, 162-164; 1985.
// Example:
// data:
// 0 1 2 3 4 5 6 7
// A = a b | c d e | f g h size = 8
// B = b d | f h g | e c a cuts = [2, 5]
// ↑ ↑ ↑ ends = [2, 5, 8]
// 2 5 8
// first step: copy subtours
// a = . . | f h g | . . .
// b = . . | c d e | . . .
// second step: copy unique from subtour's end, position 5
// start adding here
// ↓ 5 6 7 0 1 2 3 4
// a = d e | f h g | a b c get from A: | f̶ g̶ h̶ | a b | c d e
// b = h g | c d e | a b f get from B: | e̶ c̶ a | b d̶ | f h g
func IntOrdCrossover(a, b, A, B []int, time int, ops *OpsData) (notused []int) {
size := len(A)
if !rnd.FlipCoin(ops.Pc) || size < 3 {
for i := 0; i < len(A); i++ {
a[i], b[i] = A[i], B[i]
}
return
}
var s, t int
if len(ops.Cuts) == 2 {
s, t = ops.Cuts[0], ops.Cuts[1]
} else {
s = rnd.Int(1, size-2)
t = rnd.Int(s+1, size-1)
}
chk.IntAssertLessThan(s, t)
acore := B[s:t]
bcore := A[s:t]
ncore := t - s
acorehas := make(map[int]bool) // TODO: check if map can be replaced => improve efficiency
bcorehas := make(map[int]bool)
for i := 0; i < ncore; i++ {
a[s+i] = acore[i]
b[s+i] = bcore[i]
acorehas[acore[i]] = true
bcorehas[bcore[i]] = true
}
ja, jb := t, t
for i := 0; i < size; i++ {
k := (i + t) % size
if !acorehas[A[k]] {
a[ja] = A[k]
ja++
if ja == size {
ja = 0
}
}
if !bcorehas[B[k]] {
b[jb] = B[k]
jb++
if jb == size {
jb = 0
}
}
}
return
}
// FltCrossover performs the crossover of genetic data from A and B
// Output:
// a and b -- offspring
// Example:
// 0 1 2 3 4 5 6 7
// A = a b c d e f g h size = 8
// B = * . . . . * * * cuts = [1, 5]
// ↑ ↑ ↑ ends = [1, 5, 8]
// 1 5 8
// a = a . . . . f g h
// b = * b c d e * * *
func FltCrossover(a, b, A, B []float64, time int, ops *OpsData) (ends []int) {
size := len(A)
if !rnd.FlipCoin(ops.Pc) || size < 2 {
for i := 0; i < len(A); i++ {
a[i], b[i] = A[i], B[i]
}
return
}
ends = GenerateCxEnds(size, ops.Ncuts, ops.Cuts)
swap := false
start := 0
for _, end := range ends {
if swap {
for j := start; j < end; j++ {
b[j], a[j] = A[j], B[j]
}
} else {
for j := start; j < end; j++ {
a[j], b[j] = A[j], B[j]
}
}
start = end
swap = !swap
}
return
}
// FltCrossoverBlx implements the BLS-α crossover by Eshelman et al. (1993); see also Herrera (1998)
// Output:
// a and b -- offspring
func FltCrossoverBlx(a, b, A, B []float64, time int, ops *OpsData) (ends []int) {
size := len(A)
if !rnd.FlipCoin(ops.Pc) {
for i := 0; i < size; i++ {
a[i], b[i] = A[i], B[i]
}
return
}
α := ops.BlxAlp
chk.IntAssert(len(ops.Xrange), len(A))
var cmin, cmax, δ float64
for i := 0; i < size; i++ {
cmin = utl.Min(A[i], B[i])
cmax = utl.Max(A[i], B[i])
δ = cmax - cmin
a[i] = rnd.Float64(cmin-α*δ, cmax+α*δ)
b[i] = rnd.Float64(cmin-α*δ, cmax+α*δ)
a[i] = ops.EnforceRange(i, a[i])
b[i] = ops.EnforceRange(i, b[i])
}
return
}
// StrCrossover performs the crossover of genetic data from A and B
// Output:
// a and b -- offspring
// Example:
// 0 1 2 3 4 5 6 7
// A = a b c d e f g h size = 8
// B = * . . . . * * * cuts = [1, 5]
// ↑ ↑ ↑ ends = [1, 5, 8]
// 1 5 8
// a = a . . . . f g h
// b = * b c d e * * *
func StrCrossover(a, b, A, B []string, time int, ops *OpsData) (ends []int) {
size := len(A)
if !rnd.FlipCoin(ops.Pc) || size < 2 {
for i := 0; i < len(A); i++ {
a[i], b[i] = A[i], B[i]
}
return
}
ends = GenerateCxEnds(size, ops.Ncuts, ops.Cuts)
swap := false
start := 0
for _, end := range ends {
if swap {
for j := start; j < end; j++ {
b[j], a[j] = A[j], B[j]
}
} else {
for j := start; j < end; j++ {
a[j], b[j] = A[j], B[j]
}
}
start = end
swap = !swap
}
return
}
// KeyCrossover performs the crossover of genetic data from A and B
// Output:
// a and b -- offspring
// Example:
// 0 1 2 3 4 5 6 7
// A = a b c d e f g h size = 8
// B = * . . . . * * * cuts = [1, 5]
// ↑ ↑ ↑ ends = [1, 5, 8]
// 1 5 8
// a = a . . . . f g h
// b = * b c d e * * *
func KeyCrossover(a, b, A, B []byte, time int, ops *OpsData) (ends []int) {
size := len(A)
if !rnd.FlipCoin(ops.Pc) || size < 2 {
for i := 0; i < len(A); i++ {
a[i], b[i] = A[i], B[i]
}
return
}
ends = GenerateCxEnds(size, ops.Ncuts, ops.Cuts)
swap := false
start := 0
for _, end := range ends {
if swap {
for j := start; j < end; j++ {
b[j], a[j] = A[j], B[j]
}
} else {
for j := start; j < end; j++ {
a[j], b[j] = A[j], B[j]
}
}
start = end
swap = !swap
}
return
}
// BytCrossover performs the crossover of genetic data from A and B
// Output:
// a and b -- offspring
// Example:
// 0 1 2 3 4 5 6 7
// A = a b c d e f g h size = 8
// B = * . . . . * * * cuts = [1, 5]
// ↑ ↑ ↑ ends = [1, 5, 8]
// 1 5 8
// a = a . . . . f g h
// b = * b c d e * * *
func BytCrossover(a, b, A, B [][]byte, time int, ops *OpsData) (ends []int) {
size := len(A)
if !rnd.FlipCoin(ops.Pc) || size < 2 {
for i := 0; i < len(A); i++ {
copy(a[i], A[i])
copy(b[i], B[i])
}
return
}
ends = GenerateCxEnds(size, ops.Ncuts, ops.Cuts)
swap := false
start := 0
for _, end := range ends {
if swap {
for j := start; j < end; j++ {
copy(b[j], A[j])
copy(a[j], B[j])
}
} else {
for j := start; j < end; j++ {
copy(a[j], A[j])
copy(b[j], B[j])
}
}
start = end
swap = !swap
}
return
}
// FunCrossover performs the crossover of genetic data from A and B
// Output:
// a and b -- offspring
// Example:
// 0 1 2 3 4 5 6 7
// A = a b c d e f g h size = 8
// B = * . . . . * * * cuts = [1, 5]
// ↑ ↑ ↑ ends = [1, 5, 8]
// 1 5 8
// a = a . . . . f g h
// b = * b c d e * * *
func FunCrossover(a, b, A, B []Func_t, time int, ops *OpsData) (ends []int) {
size := len(A)
if !rnd.FlipCoin(ops.Pc) || size < 2 {
for i := 0; i < len(A); i++ {
a[i], b[i] = A[i], B[i]
}
return
}
ends = GenerateCxEnds(size, ops.Ncuts, ops.Cuts)
swap := false
start := 0
for _, end := range ends {
if swap {
for j := start; j < end; j++ {
b[j], a[j] = A[j], B[j]
}
} else {
for j := start; j < end; j++ {
a[j], b[j] = A[j], B[j]
}
}
start = end
swap = !swap
}
return
}
// mutation ////////////////////////////////////////////////////////////////////////////////////////
// IntMutation performs the mutation of genetic data from A
// Output: modified individual 'A'
func IntMutation(A []int, time int, ops *OpsData) {
size := len(A)
if !rnd.FlipCoin(ops.Pm) || size < 1 {
return
}
pos := rnd.IntGetUniqueN(0, size, ops.Nchanges)
for _, i := range pos {
m := rnd.Int(1, int(ops.Mmax))
if rnd.FlipCoin(0.5) {
A[i] += m * A[i]
} else {
A[i] -= m * A[i]
}
}
}
// IntOrdMutation performs the mutation of genetic data from a ordered list of integers A
// Output: modified individual 'A'
// Note: using DM method as explained in [1] (citing [2])
// References:
// [1] Larrañaga P, Kuijpers CMH, Murga RH, Inza I and Dizdarevic S. Genetic Algorithms for the
// Travelling Salesman Problem: A Review of Representations and Operators. Artificial
// Intelligence Review, 13:129-170; 1999. doi:10.1023/A:1006529012972
// [2] Michalewicz Z. Genetic Algorithms + Data Structures = Evolution Programs. Berlin
// Heidelberg: Springer Verlag; 1992
// Joint Conference on Artificial Intelligence, 162-164; 1985.
//
// DM displacement mutation method:
// Ex:
// 0 1 2 3 4 5 6 7
// A = a b c d e f g h s = 2
// ↑ ↑ t = 5
// 2 5
//
// core = c d e (subtour) ncore = t - s = 5 - 2 = 3
//
// 0 1 2 3 4
// remain = a b f g h (remaining) nrem = size - ncore = 8 - 3 = 5
// ↑
// 4 = ins
func IntOrdMutation(A []int, time int, ops *OpsData) {
size := len(A)
if !rnd.FlipCoin(ops.Pm) || size < 3 {
if size == 2 {
A[0], A[1] = A[1], A[0]
}
return
}
var s, t, ncore, nrem, ins int
if ops.OrdSti != nil {
s, t, ins = ops.OrdSti[0], ops.OrdSti[1], ops.OrdSti[2]
ncore = t - s
nrem = size - ncore
} else {
s = rnd.Int(1, size-2)
t = rnd.Int(s+1, size-1)
ncore = t - s
nrem = size - ncore
ins = rnd.Int(1, nrem)
}
core := make([]int, ncore)
remain := make([]int, nrem)
var jc, jr int
for i := 0; i < size; i++ {
if i >= s && i < t {
core[jc] = A[i]
jc++
} else {
remain[jr] = A[i]
jr++
}
}
jc, jr = 0, 0
for i := 0; i < size; i++ {
if i < ins {
A[i] = remain[jr]
jr++
} else {
if jc < ncore {
A[i] = core[jc]
jc++
} else {
A[i] = remain[jr]
jr++
}
}
}
}
// FltMutation performs the mutation of genetic data from A
// Output: modified individual 'A'
func FltMutation(A []float64, time int, ops *OpsData) {
size := len(A)
if !rnd.FlipCoin(ops.Pm) || size < 1 {
return
}
pos := rnd.IntGetUniqueN(0, size, ops.Nchanges)
for _, i := range pos {
m := rnd.Float64(1, ops.Mmax)
if rnd.FlipCoin(0.5) {
A[i] += m * A[i]
} else {
A[i] -= m * A[i]
}
}
}
// FltMutationMwicz implements the non-uniform mutation (Michaelewicz, 1992; Herrera, 1998)
// See also Michalewicz (1996) page 103
func FltMutationMwicz(A []float64, time int, ops *OpsData) {
size := len(A)
if !rnd.FlipCoin(ops.Pm) || size < 1 {
return
}
t := float64(time)
chk.IntAssert(len(ops.Xrange), len(A))
for i := 0; i < size; i++ {
xmin := ops.Xrange[i][0]
xmax := ops.Xrange[i][1]
if rnd.FlipCoin(0.5) {
A[i] += ops.MwiczDelta(t, xmax-A[i])
} else {
A[i] -= ops.MwiczDelta(t, A[i]-xmin)
}
A[i] = ops.EnforceRange(i, A[i])
}
}
// StrMutation performs the mutation of genetic data from A
// Output: modified individual 'A'
func StrMutation(A []string, time int, ops *OpsData) {
size := len(A)
if !rnd.FlipCoin(ops.Pm) || size < 1 {
return
}
pos := rnd.IntGetUniqueN(0, size, ops.Nchanges)
for _, i := range pos {
A[i] = "TODO" // TODO: improve this
}
}
// KeyMutation performs the mutation of genetic data from A
// Output: modified individual 'A'
func KeyMutation(A []byte, time int, ops *OpsData) {
size := len(A)
if !rnd.FlipCoin(ops.Pm) || size < 1 {
return
}
pos := rnd.IntGetUniqueN(0, size, ops.Nchanges)
for _, i := range pos {
v := rnd.Int(0, 100)
A[i] = byte(v) // TODO: improve this
}
}
// BytMutation performs the mutation of genetic data from A
// Output: modified individual 'A'
func BytMutation(A [][]byte, time int, ops *OpsData) {
size := len(A)
if !rnd.FlipCoin(ops.Pm) || size < 1 {
return
}
pos := rnd.IntGetUniqueN(0, size, ops.Nchanges)
for _, i := range pos {
v := rnd.Int(0, 100)
A[i][0] = byte(v) // TODO: improve this
}
}
// FunMutation performs the mutation of genetic data from A
// Output: modified individual 'A'
func FunMutation(A []Func_t, time int, ops *OpsData) {
size := len(A)
if !rnd.FlipCoin(ops.Pm) || size < 1 {
return
}
pos := rnd.IntGetUniqueN(0, size, ops.Nchanges)
for _, i := range pos {
// TODO: improve this
A[i] = func(ind *Individual) string { return "mutated" }
}
}
|
package main
import (
"encoding/json"
"flag"
"fmt"
"os"
"sort"
"strings"
"time"
ioutil "io/ioutil"
git "github.com/libgit2/git2go"
)
type ColorType string
const (
Red ColorType = "\x1b[0;31m"
Yellow = "\x1b[0;33m"
Green = "\x1b[0;32m"
BaseBranch string = "master"
CachePath = ".git/go_gb_cache.json"
)
// @todo Always append \n to msg.
func exit(msg string, args ...string) {
fmt.Printf(msg, args)
os.Exit(1)
}
func NewRepo() *git.Repository {
repo, err := git.OpenRepository(".")
if err != nil {
// @todo improve message
exit("Could not open repository at '.'\n")
}
return repo
}
func NewBranchIterator(repo *git.Repository) *git.BranchIterator {
i, err := repo.NewBranchIterator(git.BranchLocal)
if err != nil {
// @todo improve message
exit("Can't list branches\n")
}
return i
}
func LookupBaseOid(repo *git.Repository) *git.Oid {
base_branch, err := repo.LookupBranch(BaseBranch, git.BranchLocal)
if err != nil {
exit("Error looking up %s\n", BaseBranch)
}
return base_branch.Target()
}
type Comparison struct {
Repo *git.Repository
BaseOid *git.Oid
Branch *git.Branch
Oid *git.Oid
IsMerged bool
Ahead int
Behind int
}
func NewComparison(repo *git.Repository, base_oid *git.Oid, branch *git.Branch, store CacheStore) *Comparison {
c := new(Comparison)
c.Repo = repo
c.BaseOid = base_oid
c.Branch = branch
c.Oid = branch.Target()
cache := store[c.CacheKey()]
if cache != nil {
c.Ahead = cache.Ahead
c.Behind = cache.Behind
c.IsMerged = cache.IsMerged
} else {
c.IsMerged = false
c.Ahead = -1
c.Behind = -1
}
return c
}
func (c *Comparison) Name() string {
name, err := c.Branch.Name()
if err != nil {
exit("Can't get branch name\n")
}
return name
}
func (c *Comparison) IsHead() bool {
head, err := c.Branch.IsHead()
if err != nil {
exit("Can't get IsHead\n")
}
return head
}
func (c *Comparison) Commit() *git.Commit {
commit, err := c.Repo.LookupCommit(c.Oid)
if err != nil {
exit("Could not lookup commit '%s'.\n", c.Oid.String())
}
return commit
}
// @todo red for old commits
func (c *Comparison) Color() ColorType {
if c.IsHead() {
return Green
} else {
return Yellow
}
}
func (c *Comparison) When() time.Time {
sig := c.Commit().Committer()
return sig.When
}
func (c *Comparison) FormattedWhen() string {
return c.When().Format("2006-01-02 15:04PM")
}
func (c *Comparison) CacheKey() string {
strs := []string{c.BaseOid.String(), c.Oid.String()}
return strings.Join(strs, "..")
}
func (c *Comparison) SetIsMerged() {
if c.Oid.String() == c.BaseOid.String() {
c.IsMerged = true
} else {
merged, err := c.Repo.DescendantOf(c.BaseOid, c.Oid)
if err != nil {
exit("Could not get descendant of '%s' and '%s'.\n", c.BaseOid.String(), c.Oid.String())
}
c.IsMerged = merged
}
}
func (c *Comparison) SetAheadBehind() {
var err error
c.Ahead, c.Behind, err = c.Repo.AheadBehind(c.Oid, c.BaseOid)
if err != nil {
exit("Error getting ahead/behind\n", c.BaseOid.String())
}
}
func (c *Comparison) Execute() {
if c.Ahead > -1 && c.Behind > -1 {
return
}
c.SetIsMerged()
c.SetAheadBehind()
}
type Comparisons []*Comparison
type ComparisonsByWhen Comparisons
func (a ComparisonsByWhen) Len() int {
return len(a)
}
func (a ComparisonsByWhen) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func (a ComparisonsByWhen) Less(i, j int) bool {
return a[i].When().Unix() < a[j].When().Unix()
}
type Options struct {
Ahead int
Behind int
Merged bool
NoMerged bool
ClearCache bool
}
func NewOptions() *Options {
o := new(Options)
flag.IntVar(&o.Ahead, "ahead", -1, "only show branches that are <ahead> commits ahead.")
flag.IntVar(&o.Behind, "behind", -1, "only show branches that are <behind> commits behind.")
flag.BoolVar(&o.Merged, "merged", false, "only show branches that are merged.")
flag.BoolVar(&o.NoMerged, "no-merged", false, "only show branches that are not merged.")
flag.BoolVar(&o.ClearCache, "clear-cache", false, "clear cache of comparisons.")
flag.Parse()
return o
}
type CacheStore map[string]*Comparison
func NewCacheStore() CacheStore {
bits, err := ioutil.ReadFile(CachePath)
if err != nil {
// no-op: `cache.json` will be written on exit.
}
y := make(CacheStore)
json.Unmarshal(bits, &y)
return y
}
func (store *CacheStore) WriteToFile() error {
b, err := json.Marshal(store)
if err != nil {
fmt.Printf("Could not save cache to file.\n")
}
ioutil.WriteFile(CachePath, b, 0644)
return nil
}
func main() {
opts := NewOptions()
if opts.ClearCache {
os.Remove(CachePath)
}
store := NewCacheStore()
repo := NewRepo()
branch_iterator := NewBranchIterator(repo)
base_oid := LookupBaseOid(repo)
comparisons := make(Comparisons, 0)
// type BranchIteratorFunc func(*Branch, BranchType) error
branch_iterator.ForEach(func(branch *git.Branch, btype git.BranchType) error {
comp := NewComparison(repo, base_oid, branch, store)
comparisons = append(comparisons, comp)
return nil
})
sort.Sort(ComparisonsByWhen(comparisons))
for _, comp := range comparisons {
comp.Execute()
merged_string := ""
if comp.IsMerged {
merged_string = "(merged)"
}
if opts.Ahead != -1 && opts.Ahead != comp.Ahead {
continue
}
if opts.Behind != -1 && opts.Behind != comp.Behind {
continue
}
if opts.Merged && !comp.IsMerged {
continue
}
if opts.NoMerged && comp.IsMerged {
continue
}
// continue
fmt.Printf(
"%s%s | %-30s | behind: %4d | ahead: %4d %s\n",
comp.Color(),
comp.FormattedWhen(),
comp.Name(),
comp.Behind,
comp.Ahead,
merged_string)
store[comp.CacheKey()] = comp
}
store.WriteToFile()
}
Add red color output.
package main
import (
"encoding/json"
"flag"
"fmt"
"os"
"sort"
"strings"
"time"
ioutil "io/ioutil"
git "github.com/libgit2/git2go"
"github.com/mgutz/ansi"
)
var (
Red string = ansi.ColorCode("red")
Yellow = ansi.ColorCode("yellow")
Green = ansi.ColorCode("green")
)
const (
BaseBranch string = "master"
CachePath = ".git/go_gb_cache.json"
)
// @todo Always append \n to msg.
func exit(msg string, args ...string) {
fmt.Printf(msg, args)
os.Exit(1)
}
func NewRepo() *git.Repository {
repo, err := git.OpenRepository(".")
if err != nil {
// @todo improve message
exit("Could not open repository at '.'\n")
}
return repo
}
func NewBranchIterator(repo *git.Repository) *git.BranchIterator {
i, err := repo.NewBranchIterator(git.BranchLocal)
if err != nil {
// @todo improve message
exit("Can't list branches\n")
}
return i
}
func LookupBaseOid(repo *git.Repository) *git.Oid {
base_branch, err := repo.LookupBranch(BaseBranch, git.BranchLocal)
if err != nil {
exit("Error looking up %s\n", BaseBranch)
}
return base_branch.Target()
}
type Comparison struct {
Repo *git.Repository
BaseOid *git.Oid
Branch *git.Branch
Oid *git.Oid
IsMerged bool
Ahead int
Behind int
}
func NewComparison(repo *git.Repository, base_oid *git.Oid, branch *git.Branch, store CacheStore) *Comparison {
c := new(Comparison)
c.Repo = repo
c.BaseOid = base_oid
c.Branch = branch
c.Oid = branch.Target()
cache := store[c.CacheKey()]
if cache != nil {
c.Ahead = cache.Ahead
c.Behind = cache.Behind
c.IsMerged = cache.IsMerged
} else {
c.IsMerged = false
c.Ahead = -1
c.Behind = -1
}
return c
}
func (c *Comparison) Name() string {
name, err := c.Branch.Name()
if err != nil {
exit("Can't get branch name\n")
}
return name
}
func (c *Comparison) IsHead() bool {
head, err := c.Branch.IsHead()
if err != nil {
exit("Can't get IsHead\n")
}
return head
}
func (c *Comparison) Commit() *git.Commit {
commit, err := c.Repo.LookupCommit(c.Oid)
if err != nil {
exit("Could not lookup commit '%s'.", c.Oid.String())
}
return commit
}
func (c *Comparison) ColorCode() string {
hours, _ := time.ParseDuration("336h") // two weeks
two_weeks := time.Now().Add(-hours)
if c.IsHead() {
return Green
} else if c.When().Before(two_weeks) {
return Red
} else {
return Yellow
}
}
func (c *Comparison) When() time.Time {
sig := c.Commit().Committer()
return sig.When
}
func (c *Comparison) FormattedWhen() string {
return c.When().Format("2006-01-02 15:04PM")
}
func (c *Comparison) CacheKey() string {
strs := []string{c.BaseOid.String(), c.Oid.String()}
return strings.Join(strs, "..")
}
func (c *Comparison) SetIsMerged() {
if c.Oid.String() == c.BaseOid.String() {
c.IsMerged = true
} else {
merged, err := c.Repo.DescendantOf(c.BaseOid, c.Oid)
if err != nil {
exit("Could not get descendant of '%s' and '%s'.\n", c.BaseOid.String(), c.Oid.String())
}
c.IsMerged = merged
}
}
func (c *Comparison) SetAheadBehind() {
var err error
c.Ahead, c.Behind, err = c.Repo.AheadBehind(c.Oid, c.BaseOid)
if err != nil {
exit("Error getting ahead/behind\n", c.BaseOid.String())
}
}
func (c *Comparison) Execute() {
if c.Ahead > -1 && c.Behind > -1 {
return
}
c.SetIsMerged()
c.SetAheadBehind()
}
type Comparisons []*Comparison
type ComparisonsByWhen Comparisons
func (a ComparisonsByWhen) Len() int {
return len(a)
}
func (a ComparisonsByWhen) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func (a ComparisonsByWhen) Less(i, j int) bool {
return a[i].When().Unix() < a[j].When().Unix()
}
type Options struct {
Ahead int
Behind int
Merged bool
NoMerged bool
ClearCache bool
}
func NewOptions() *Options {
o := new(Options)
flag.IntVar(&o.Ahead, "ahead", -1, "only show branches that are <ahead> commits ahead.")
flag.IntVar(&o.Behind, "behind", -1, "only show branches that are <behind> commits behind.")
flag.BoolVar(&o.Merged, "merged", false, "only show branches that are merged.")
flag.BoolVar(&o.NoMerged, "no-merged", false, "only show branches that are not merged.")
flag.BoolVar(&o.ClearCache, "clear-cache", false, "clear cache of comparisons.")
flag.Parse()
return o
}
type CacheStore map[string]*Comparison
func NewCacheStore() CacheStore {
bits, err := ioutil.ReadFile(CachePath)
if err != nil {
// no-op: `cache.json` will be written on exit.
}
y := make(CacheStore)
json.Unmarshal(bits, &y)
return y
}
func (store *CacheStore) WriteToFile() error {
b, err := json.Marshal(store)
if err != nil {
fmt.Printf("Could not save cache to file.\n")
}
ioutil.WriteFile(CachePath, b, 0644)
return nil
}
func main() {
opts := NewOptions()
if opts.ClearCache {
os.Remove(CachePath)
}
store := NewCacheStore()
repo := NewRepo()
branch_iterator := NewBranchIterator(repo)
base_oid := LookupBaseOid(repo)
comparisons := make(Comparisons, 0)
// type BranchIteratorFunc func(*Branch, BranchType) error
branch_iterator.ForEach(func(branch *git.Branch, btype git.BranchType) error {
comp := NewComparison(repo, base_oid, branch, store)
comparisons = append(comparisons, comp)
return nil
})
sort.Sort(ComparisonsByWhen(comparisons))
for _, comp := range comparisons {
comp.Execute()
merged_string := ""
if comp.IsMerged {
merged_string = "(merged)"
}
if opts.Ahead != -1 && opts.Ahead != comp.Ahead {
continue
}
if opts.Behind != -1 && opts.Behind != comp.Behind {
continue
}
if opts.Merged && !comp.IsMerged {
continue
}
if opts.NoMerged && comp.IsMerged {
continue
}
fmt.Printf(
"%s%s | %-30s | behind: %4d | ahead: %4d %s",
comp.ColorCode(),
comp.FormattedWhen(),
comp.Name(),
comp.Behind,
comp.Ahead,
merged_string)
store[comp.CacheKey()] = comp
}
store.WriteToFile()
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package net
import (
"io"
"os"
"sync"
"syscall"
"time"
)
// Network file descriptor.
type netFD struct {
// locking/lifetime of sysfd
sysmu sync.Mutex
sysref int
closing bool
// immutable until Close
sysfd int
family int
proto int
sysfile *os.File
cr chan bool
cw chan bool
net string
laddr Addr
raddr Addr
// owned by client
rdeadline_delta int64
rdeadline int64
rio sync.Mutex
wdeadline_delta int64
wdeadline int64
wio sync.Mutex
// owned by fd wait server
ncr, ncw int
}
type InvalidConnError struct{}
func (e *InvalidConnError) String() string { return "invalid net.Conn" }
func (e *InvalidConnError) Temporary() bool { return false }
func (e *InvalidConnError) Timeout() bool { return false }
// A pollServer helps FDs determine when to retry a non-blocking
// read or write after they get EAGAIN. When an FD needs to wait,
// send the fd on s.cr (for a read) or s.cw (for a write) to pass the
// request to the poll server. Then receive on fd.cr/fd.cw.
// When the pollServer finds that i/o on FD should be possible
// again, it will send fd on fd.cr/fd.cw to wake any waiting processes.
// This protocol is implemented as s.WaitRead() and s.WaitWrite().
//
// There is one subtlety: when sending on s.cr/s.cw, the
// poll server is probably in a system call, waiting for an fd
// to become ready. It's not looking at the request channels.
// To resolve this, the poll server waits not just on the FDs it has
// been given but also its own pipe. After sending on the
// buffered channel s.cr/s.cw, WaitRead/WaitWrite writes a
// byte to the pipe, causing the pollServer's poll system call to
// return. In response to the pipe being readable, the pollServer
// re-polls its request channels.
//
// Note that the ordering is "send request" and then "wake up server".
// If the operations were reversed, there would be a race: the poll
// server might wake up and look at the request channel, see that it
// was empty, and go back to sleep, all before the requester managed
// to send the request. Because the send must complete before the wakeup,
// the request channel must be buffered. A buffer of size 1 is sufficient
// for any request load. If many processes are trying to submit requests,
// one will succeed, the pollServer will read the request, and then the
// channel will be empty for the next process's request. A larger buffer
// might help batch requests.
//
// To avoid races in closing, all fd operations are locked and
// refcounted. when netFD.Close() is called, it calls syscall.Shutdown
// and sets a closing flag. Only when the last reference is removed
// will the fd be closed.
type pollServer struct {
cr, cw chan *netFD // buffered >= 1
pr, pw *os.File
poll *pollster // low-level OS hooks
sync.Mutex // controls pending and deadline
pending map[int]*netFD
deadline int64 // next deadline (nsec since 1970)
}
func (s *pollServer) AddFD(fd *netFD, mode int) {
intfd := fd.sysfd
if intfd < 0 {
// fd closed underfoot
if mode == 'r' {
fd.cr <- true
} else {
fd.cw <- true
}
return
}
s.Lock()
var t int64
key := intfd << 1
if mode == 'r' {
fd.ncr++
t = fd.rdeadline
} else {
fd.ncw++
key++
t = fd.wdeadline
}
s.pending[key] = fd
doWakeup := false
if t > 0 && (s.deadline == 0 || t < s.deadline) {
s.deadline = t
doWakeup = true
}
wake, err := s.poll.AddFD(intfd, mode, false)
if err != nil {
panic("pollServer AddFD " + err.String())
}
if wake {
doWakeup = true
}
s.Unlock()
if doWakeup {
s.Wakeup()
}
}
var wakeupbuf [1]byte
func (s *pollServer) Wakeup() { s.pw.Write(wakeupbuf[0:]) }
func (s *pollServer) LookupFD(fd int, mode int) *netFD {
key := fd << 1
if mode == 'w' {
key++
}
netfd, ok := s.pending[key]
if !ok {
return nil
}
s.pending[key] = nil, false
return netfd
}
func (s *pollServer) WakeFD(fd *netFD, mode int) {
if mode == 'r' {
for fd.ncr > 0 {
fd.ncr--
fd.cr <- true
}
} else {
for fd.ncw > 0 {
fd.ncw--
fd.cw <- true
}
}
}
func (s *pollServer) Now() int64 {
return time.Nanoseconds()
}
func (s *pollServer) CheckDeadlines() {
now := s.Now()
// TODO(rsc): This will need to be handled more efficiently,
// probably with a heap indexed by wakeup time.
var next_deadline int64
for key, fd := range s.pending {
var t int64
var mode int
if key&1 == 0 {
mode = 'r'
} else {
mode = 'w'
}
if mode == 'r' {
t = fd.rdeadline
} else {
t = fd.wdeadline
}
if t > 0 {
if t <= now {
s.pending[key] = nil, false
if mode == 'r' {
s.poll.DelFD(fd.sysfd, mode)
fd.rdeadline = -1
} else {
s.poll.DelFD(fd.sysfd, mode)
fd.wdeadline = -1
}
s.WakeFD(fd, mode)
} else if next_deadline == 0 || t < next_deadline {
next_deadline = t
}
}
}
s.deadline = next_deadline
}
func (s *pollServer) Run() {
var scratch [100]byte
s.Lock()
defer s.Unlock()
for {
var t = s.deadline
if t > 0 {
t = t - s.Now()
if t <= 0 {
s.CheckDeadlines()
continue
}
}
fd, mode, err := s.poll.WaitFD(s, t)
if err != nil {
print("pollServer WaitFD: ", err.String(), "\n")
return
}
if fd < 0 {
// Timeout happened.
s.CheckDeadlines()
continue
}
if fd == s.pr.Fd() {
// Drain our wakeup pipe (we could loop here,
// but it's unlikely that there are more than
// len(scratch) wakeup calls).
s.pr.Read(scratch[0:])
s.CheckDeadlines()
} else {
netfd := s.LookupFD(fd, mode)
if netfd == nil {
print("pollServer: unexpected wakeup for fd=", fd, " mode=", string(mode), "\n")
continue
}
s.WakeFD(netfd, mode)
}
}
}
func (s *pollServer) WaitRead(fd *netFD) {
s.AddFD(fd, 'r')
<-fd.cr
}
func (s *pollServer) WaitWrite(fd *netFD) {
s.AddFD(fd, 'w')
<-fd.cw
}
// Network FD methods.
// All the network FDs use a single pollServer.
var pollserver *pollServer
var onceStartServer sync.Once
func startServer() {
p, err := newPollServer()
if err != nil {
print("Start pollServer: ", err.String(), "\n")
}
pollserver = p
}
func newFD(fd, family, proto int, net string) (f *netFD, err os.Error) {
onceStartServer.Do(startServer)
if e := syscall.SetNonblock(fd, true); e != 0 {
return nil, os.Errno(e)
}
f = &netFD{
sysfd: fd,
family: family,
proto: proto,
net: net,
}
f.cr = make(chan bool, 1)
f.cw = make(chan bool, 1)
return f, nil
}
func (fd *netFD) setAddr(laddr, raddr Addr) {
fd.laddr = laddr
fd.raddr = raddr
var ls, rs string
if laddr != nil {
ls = laddr.String()
}
if raddr != nil {
rs = raddr.String()
}
fd.sysfile = os.NewFile(fd.sysfd, fd.net+":"+ls+"->"+rs)
}
func (fd *netFD) connect(ra syscall.Sockaddr) (err os.Error) {
e := syscall.Connect(fd.sysfd, ra)
if e == syscall.EINPROGRESS {
var errno int
pollserver.WaitWrite(fd)
e, errno = syscall.GetsockoptInt(fd.sysfd, syscall.SOL_SOCKET, syscall.SO_ERROR)
if errno != 0 {
return os.NewSyscallError("getsockopt", errno)
}
}
if e != 0 {
return os.Errno(e)
}
return nil
}
// Add a reference to this fd.
func (fd *netFD) incref() {
fd.sysmu.Lock()
fd.sysref++
fd.sysmu.Unlock()
}
// Remove a reference to this FD and close if we've been asked to do so (and
// there are no references left.
func (fd *netFD) decref() {
fd.sysmu.Lock()
fd.sysref--
if fd.closing && fd.sysref == 0 && fd.sysfd >= 0 {
// In case the user has set linger, switch to blocking mode so
// the close blocks. As long as this doesn't happen often, we
// can handle the extra OS processes. Otherwise we'll need to
// use the pollserver for Close too. Sigh.
syscall.SetNonblock(fd.sysfd, false)
fd.sysfile.Close()
fd.sysfile = nil
fd.sysfd = -1
}
fd.sysmu.Unlock()
}
func (fd *netFD) Close() os.Error {
if fd == nil || fd.sysfile == nil {
return os.EINVAL
}
fd.incref()
syscall.Shutdown(fd.sysfd, syscall.SHUT_RDWR)
fd.closing = true
fd.decref()
return nil
}
func (fd *netFD) Read(p []byte) (n int, err os.Error) {
if fd == nil {
return 0, os.EINVAL
}
fd.rio.Lock()
defer fd.rio.Unlock()
fd.incref()
defer fd.decref()
if fd.sysfile == nil {
return 0, os.EINVAL
}
if fd.rdeadline_delta > 0 {
fd.rdeadline = pollserver.Now() + fd.rdeadline_delta
} else {
fd.rdeadline = 0
}
var oserr os.Error
for {
var errno int
n, errno = syscall.Read(fd.sysfile.Fd(), p)
if (errno == syscall.EAGAIN || errno == syscall.EINTR) && fd.rdeadline >= 0 {
pollserver.WaitRead(fd)
continue
}
if errno != 0 {
n = 0
oserr = os.Errno(errno)
} else if n == 0 && errno == 0 && fd.proto != syscall.SOCK_DGRAM {
err = os.EOF
}
break
}
if oserr != nil {
err = &OpError{"read", fd.net, fd.raddr, oserr}
}
return
}
func (fd *netFD) ReadFrom(p []byte) (n int, sa syscall.Sockaddr, err os.Error) {
if fd == nil || fd.sysfile == nil {
return 0, nil, os.EINVAL
}
fd.rio.Lock()
defer fd.rio.Unlock()
fd.incref()
defer fd.decref()
if fd.rdeadline_delta > 0 {
fd.rdeadline = pollserver.Now() + fd.rdeadline_delta
} else {
fd.rdeadline = 0
}
var oserr os.Error
for {
var errno int
n, sa, errno = syscall.Recvfrom(fd.sysfd, p, 0)
if (errno == syscall.EAGAIN || errno == syscall.EINTR) && fd.rdeadline >= 0 {
pollserver.WaitRead(fd)
continue
}
if errno != 0 {
n = 0
oserr = os.Errno(errno)
}
break
}
if oserr != nil {
err = &OpError{"read", fd.net, fd.laddr, oserr}
}
return
}
func (fd *netFD) ReadMsg(p []byte, oob []byte) (n, oobn, flags int, sa syscall.Sockaddr, err os.Error) {
if fd == nil || fd.sysfile == nil {
return 0, 0, 0, nil, os.EINVAL
}
fd.rio.Lock()
defer fd.rio.Unlock()
fd.incref()
defer fd.decref()
if fd.rdeadline_delta > 0 {
fd.rdeadline = pollserver.Now() + fd.rdeadline_delta
} else {
fd.rdeadline = 0
}
var oserr os.Error
for {
var errno int
n, oobn, flags, sa, errno = syscall.Recvmsg(fd.sysfd, p, oob, 0)
if (errno == syscall.EAGAIN || errno == syscall.EINTR) && fd.rdeadline >= 0 {
pollserver.WaitRead(fd)
continue
}
if errno != 0 {
oserr = os.Errno(errno)
}
if n == 0 {
oserr = os.EOF
}
break
}
if oserr != nil {
err = &OpError{"read", fd.net, fd.laddr, oserr}
return
}
return
}
func (fd *netFD) Write(p []byte) (n int, err os.Error) {
if fd == nil {
return 0, os.EINVAL
}
fd.wio.Lock()
defer fd.wio.Unlock()
fd.incref()
defer fd.decref()
if fd.sysfile == nil {
return 0, os.EINVAL
}
if fd.wdeadline_delta > 0 {
fd.wdeadline = pollserver.Now() + fd.wdeadline_delta
} else {
fd.wdeadline = 0
}
nn := 0
var oserr os.Error
for {
n, errno := syscall.Write(fd.sysfile.Fd(), p[nn:])
if n > 0 {
nn += n
}
if nn == len(p) {
break
}
if (errno == syscall.EAGAIN || errno == syscall.EINTR) && fd.wdeadline >= 0 {
pollserver.WaitWrite(fd)
continue
}
if errno != 0 {
n = 0
oserr = os.Errno(errno)
break
}
if n == 0 {
oserr = io.ErrUnexpectedEOF
break
}
}
if oserr != nil {
err = &OpError{"write", fd.net, fd.raddr, oserr}
}
return nn, err
}
func (fd *netFD) WriteTo(p []byte, sa syscall.Sockaddr) (n int, err os.Error) {
if fd == nil || fd.sysfile == nil {
return 0, os.EINVAL
}
fd.wio.Lock()
defer fd.wio.Unlock()
fd.incref()
defer fd.decref()
if fd.wdeadline_delta > 0 {
fd.wdeadline = pollserver.Now() + fd.wdeadline_delta
} else {
fd.wdeadline = 0
}
var oserr os.Error
for {
errno := syscall.Sendto(fd.sysfd, p, 0, sa)
if (errno == syscall.EAGAIN || errno == syscall.EINTR) && fd.wdeadline >= 0 {
pollserver.WaitWrite(fd)
continue
}
if errno != 0 {
oserr = os.Errno(errno)
}
break
}
if oserr == nil {
n = len(p)
} else {
err = &OpError{"write", fd.net, fd.raddr, oserr}
}
return
}
func (fd *netFD) WriteMsg(p []byte, oob []byte, sa syscall.Sockaddr) (n int, oobn int, err os.Error) {
if fd == nil || fd.sysfile == nil {
return 0, 0, os.EINVAL
}
fd.wio.Lock()
defer fd.wio.Unlock()
fd.incref()
defer fd.decref()
if fd.wdeadline_delta > 0 {
fd.wdeadline = pollserver.Now() + fd.wdeadline_delta
} else {
fd.wdeadline = 0
}
var oserr os.Error
for {
var errno int
errno = syscall.Sendmsg(fd.sysfd, p, oob, sa, 0)
if (errno == syscall.EAGAIN || errno == syscall.EINTR) && fd.wdeadline >= 0 {
pollserver.WaitWrite(fd)
continue
}
if errno != 0 {
oserr = os.Errno(errno)
}
break
}
if oserr == nil {
n = len(p)
oobn = len(oob)
} else {
err = &OpError{"write", fd.net, fd.raddr, oserr}
}
return
}
func (fd *netFD) accept(toAddr func(syscall.Sockaddr) Addr) (nfd *netFD, err os.Error) {
if fd == nil || fd.sysfile == nil {
return nil, os.EINVAL
}
fd.incref()
defer fd.decref()
// See ../syscall/exec.go for description of ForkLock.
// It is okay to hold the lock across syscall.Accept
// because we have put fd.sysfd into non-blocking mode.
syscall.ForkLock.RLock()
var s, e int
var sa syscall.Sockaddr
for {
if fd.closing {
syscall.ForkLock.RUnlock()
return nil, os.EINVAL
}
s, sa, e = syscall.Accept(fd.sysfd)
if e != syscall.EAGAIN && e != syscall.EINTR {
break
}
syscall.ForkLock.RUnlock()
pollserver.WaitRead(fd)
syscall.ForkLock.RLock()
}
if e != 0 {
syscall.ForkLock.RUnlock()
return nil, &OpError{"accept", fd.net, fd.laddr, os.Errno(e)}
}
syscall.CloseOnExec(s)
syscall.ForkLock.RUnlock()
if nfd, err = newFD(s, fd.family, fd.proto, fd.net); err != nil {
syscall.Close(s)
return nil, err
}
nfd.setAddr(fd.laddr, toAddr(sa))
return nfd, nil
}
func (fd *netFD) dup() (f *os.File, err os.Error) {
ns, e := syscall.Dup(fd.sysfd)
if e != 0 {
return nil, &OpError{"dup", fd.net, fd.laddr, os.Errno(e)}
}
// We want blocking mode for the new fd, hence the double negative.
if e = syscall.SetNonblock(ns, false); e != 0 {
return nil, &OpError{"setnonblock", fd.net, fd.laddr, os.Errno(e)}
}
return os.NewFile(ns, fd.sysfile.Name()), nil
}
func closesocket(s int) (errno int) {
return syscall.Close(s)
}
libgo: Remove unnecessary EINTR checks.
We always use SA_RESTART with signals, so
read/write/etc. should never return EINTR.
R=iant
CC=gofrontend-dev
https://golang.org/cl/4325049
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package net
import (
"io"
"os"
"sync"
"syscall"
"time"
)
// Network file descriptor.
type netFD struct {
// locking/lifetime of sysfd
sysmu sync.Mutex
sysref int
closing bool
// immutable until Close
sysfd int
family int
proto int
sysfile *os.File
cr chan bool
cw chan bool
net string
laddr Addr
raddr Addr
// owned by client
rdeadline_delta int64
rdeadline int64
rio sync.Mutex
wdeadline_delta int64
wdeadline int64
wio sync.Mutex
// owned by fd wait server
ncr, ncw int
}
type InvalidConnError struct{}
func (e *InvalidConnError) String() string { return "invalid net.Conn" }
func (e *InvalidConnError) Temporary() bool { return false }
func (e *InvalidConnError) Timeout() bool { return false }
// A pollServer helps FDs determine when to retry a non-blocking
// read or write after they get EAGAIN. When an FD needs to wait,
// send the fd on s.cr (for a read) or s.cw (for a write) to pass the
// request to the poll server. Then receive on fd.cr/fd.cw.
// When the pollServer finds that i/o on FD should be possible
// again, it will send fd on fd.cr/fd.cw to wake any waiting processes.
// This protocol is implemented as s.WaitRead() and s.WaitWrite().
//
// There is one subtlety: when sending on s.cr/s.cw, the
// poll server is probably in a system call, waiting for an fd
// to become ready. It's not looking at the request channels.
// To resolve this, the poll server waits not just on the FDs it has
// been given but also its own pipe. After sending on the
// buffered channel s.cr/s.cw, WaitRead/WaitWrite writes a
// byte to the pipe, causing the pollServer's poll system call to
// return. In response to the pipe being readable, the pollServer
// re-polls its request channels.
//
// Note that the ordering is "send request" and then "wake up server".
// If the operations were reversed, there would be a race: the poll
// server might wake up and look at the request channel, see that it
// was empty, and go back to sleep, all before the requester managed
// to send the request. Because the send must complete before the wakeup,
// the request channel must be buffered. A buffer of size 1 is sufficient
// for any request load. If many processes are trying to submit requests,
// one will succeed, the pollServer will read the request, and then the
// channel will be empty for the next process's request. A larger buffer
// might help batch requests.
//
// To avoid races in closing, all fd operations are locked and
// refcounted. when netFD.Close() is called, it calls syscall.Shutdown
// and sets a closing flag. Only when the last reference is removed
// will the fd be closed.
type pollServer struct {
cr, cw chan *netFD // buffered >= 1
pr, pw *os.File
poll *pollster // low-level OS hooks
sync.Mutex // controls pending and deadline
pending map[int]*netFD
deadline int64 // next deadline (nsec since 1970)
}
func (s *pollServer) AddFD(fd *netFD, mode int) {
intfd := fd.sysfd
if intfd < 0 {
// fd closed underfoot
if mode == 'r' {
fd.cr <- true
} else {
fd.cw <- true
}
return
}
s.Lock()
var t int64
key := intfd << 1
if mode == 'r' {
fd.ncr++
t = fd.rdeadline
} else {
fd.ncw++
key++
t = fd.wdeadline
}
s.pending[key] = fd
doWakeup := false
if t > 0 && (s.deadline == 0 || t < s.deadline) {
s.deadline = t
doWakeup = true
}
wake, err := s.poll.AddFD(intfd, mode, false)
if err != nil {
panic("pollServer AddFD " + err.String())
}
if wake {
doWakeup = true
}
s.Unlock()
if doWakeup {
s.Wakeup()
}
}
var wakeupbuf [1]byte
func (s *pollServer) Wakeup() { s.pw.Write(wakeupbuf[0:]) }
func (s *pollServer) LookupFD(fd int, mode int) *netFD {
key := fd << 1
if mode == 'w' {
key++
}
netfd, ok := s.pending[key]
if !ok {
return nil
}
s.pending[key] = nil, false
return netfd
}
func (s *pollServer) WakeFD(fd *netFD, mode int) {
if mode == 'r' {
for fd.ncr > 0 {
fd.ncr--
fd.cr <- true
}
} else {
for fd.ncw > 0 {
fd.ncw--
fd.cw <- true
}
}
}
func (s *pollServer) Now() int64 {
return time.Nanoseconds()
}
func (s *pollServer) CheckDeadlines() {
now := s.Now()
// TODO(rsc): This will need to be handled more efficiently,
// probably with a heap indexed by wakeup time.
var next_deadline int64
for key, fd := range s.pending {
var t int64
var mode int
if key&1 == 0 {
mode = 'r'
} else {
mode = 'w'
}
if mode == 'r' {
t = fd.rdeadline
} else {
t = fd.wdeadline
}
if t > 0 {
if t <= now {
s.pending[key] = nil, false
if mode == 'r' {
s.poll.DelFD(fd.sysfd, mode)
fd.rdeadline = -1
} else {
s.poll.DelFD(fd.sysfd, mode)
fd.wdeadline = -1
}
s.WakeFD(fd, mode)
} else if next_deadline == 0 || t < next_deadline {
next_deadline = t
}
}
}
s.deadline = next_deadline
}
func (s *pollServer) Run() {
var scratch [100]byte
s.Lock()
defer s.Unlock()
for {
var t = s.deadline
if t > 0 {
t = t - s.Now()
if t <= 0 {
s.CheckDeadlines()
continue
}
}
fd, mode, err := s.poll.WaitFD(s, t)
if err != nil {
print("pollServer WaitFD: ", err.String(), "\n")
return
}
if fd < 0 {
// Timeout happened.
s.CheckDeadlines()
continue
}
if fd == s.pr.Fd() {
// Drain our wakeup pipe (we could loop here,
// but it's unlikely that there are more than
// len(scratch) wakeup calls).
s.pr.Read(scratch[0:])
s.CheckDeadlines()
} else {
netfd := s.LookupFD(fd, mode)
if netfd == nil {
print("pollServer: unexpected wakeup for fd=", fd, " mode=", string(mode), "\n")
continue
}
s.WakeFD(netfd, mode)
}
}
}
func (s *pollServer) WaitRead(fd *netFD) {
s.AddFD(fd, 'r')
<-fd.cr
}
func (s *pollServer) WaitWrite(fd *netFD) {
s.AddFD(fd, 'w')
<-fd.cw
}
// Network FD methods.
// All the network FDs use a single pollServer.
var pollserver *pollServer
var onceStartServer sync.Once
func startServer() {
p, err := newPollServer()
if err != nil {
print("Start pollServer: ", err.String(), "\n")
}
pollserver = p
}
func newFD(fd, family, proto int, net string) (f *netFD, err os.Error) {
onceStartServer.Do(startServer)
if e := syscall.SetNonblock(fd, true); e != 0 {
return nil, os.Errno(e)
}
f = &netFD{
sysfd: fd,
family: family,
proto: proto,
net: net,
}
f.cr = make(chan bool, 1)
f.cw = make(chan bool, 1)
return f, nil
}
func (fd *netFD) setAddr(laddr, raddr Addr) {
fd.laddr = laddr
fd.raddr = raddr
var ls, rs string
if laddr != nil {
ls = laddr.String()
}
if raddr != nil {
rs = raddr.String()
}
fd.sysfile = os.NewFile(fd.sysfd, fd.net+":"+ls+"->"+rs)
}
func (fd *netFD) connect(ra syscall.Sockaddr) (err os.Error) {
e := syscall.Connect(fd.sysfd, ra)
if e == syscall.EINPROGRESS {
var errno int
pollserver.WaitWrite(fd)
e, errno = syscall.GetsockoptInt(fd.sysfd, syscall.SOL_SOCKET, syscall.SO_ERROR)
if errno != 0 {
return os.NewSyscallError("getsockopt", errno)
}
}
if e != 0 {
return os.Errno(e)
}
return nil
}
// Add a reference to this fd.
func (fd *netFD) incref() {
fd.sysmu.Lock()
fd.sysref++
fd.sysmu.Unlock()
}
// Remove a reference to this FD and close if we've been asked to do so (and
// there are no references left.
func (fd *netFD) decref() {
fd.sysmu.Lock()
fd.sysref--
if fd.closing && fd.sysref == 0 && fd.sysfd >= 0 {
// In case the user has set linger, switch to blocking mode so
// the close blocks. As long as this doesn't happen often, we
// can handle the extra OS processes. Otherwise we'll need to
// use the pollserver for Close too. Sigh.
syscall.SetNonblock(fd.sysfd, false)
fd.sysfile.Close()
fd.sysfile = nil
fd.sysfd = -1
}
fd.sysmu.Unlock()
}
func (fd *netFD) Close() os.Error {
if fd == nil || fd.sysfile == nil {
return os.EINVAL
}
fd.incref()
syscall.Shutdown(fd.sysfd, syscall.SHUT_RDWR)
fd.closing = true
fd.decref()
return nil
}
func (fd *netFD) Read(p []byte) (n int, err os.Error) {
if fd == nil {
return 0, os.EINVAL
}
fd.rio.Lock()
defer fd.rio.Unlock()
fd.incref()
defer fd.decref()
if fd.sysfile == nil {
return 0, os.EINVAL
}
if fd.rdeadline_delta > 0 {
fd.rdeadline = pollserver.Now() + fd.rdeadline_delta
} else {
fd.rdeadline = 0
}
var oserr os.Error
for {
var errno int
n, errno = syscall.Read(fd.sysfile.Fd(), p)
if errno == syscall.EAGAIN && fd.rdeadline >= 0 {
pollserver.WaitRead(fd)
continue
}
if errno != 0 {
n = 0
oserr = os.Errno(errno)
} else if n == 0 && errno == 0 && fd.proto != syscall.SOCK_DGRAM {
err = os.EOF
}
break
}
if oserr != nil {
err = &OpError{"read", fd.net, fd.raddr, oserr}
}
return
}
func (fd *netFD) ReadFrom(p []byte) (n int, sa syscall.Sockaddr, err os.Error) {
if fd == nil || fd.sysfile == nil {
return 0, nil, os.EINVAL
}
fd.rio.Lock()
defer fd.rio.Unlock()
fd.incref()
defer fd.decref()
if fd.rdeadline_delta > 0 {
fd.rdeadline = pollserver.Now() + fd.rdeadline_delta
} else {
fd.rdeadline = 0
}
var oserr os.Error
for {
var errno int
n, sa, errno = syscall.Recvfrom(fd.sysfd, p, 0)
if errno == syscall.EAGAIN && fd.rdeadline >= 0 {
pollserver.WaitRead(fd)
continue
}
if errno != 0 {
n = 0
oserr = os.Errno(errno)
}
break
}
if oserr != nil {
err = &OpError{"read", fd.net, fd.laddr, oserr}
}
return
}
func (fd *netFD) ReadMsg(p []byte, oob []byte) (n, oobn, flags int, sa syscall.Sockaddr, err os.Error) {
if fd == nil || fd.sysfile == nil {
return 0, 0, 0, nil, os.EINVAL
}
fd.rio.Lock()
defer fd.rio.Unlock()
fd.incref()
defer fd.decref()
if fd.rdeadline_delta > 0 {
fd.rdeadline = pollserver.Now() + fd.rdeadline_delta
} else {
fd.rdeadline = 0
}
var oserr os.Error
for {
var errno int
n, oobn, flags, sa, errno = syscall.Recvmsg(fd.sysfd, p, oob, 0)
if errno == syscall.EAGAIN && fd.rdeadline >= 0 {
pollserver.WaitRead(fd)
continue
}
if errno != 0 {
oserr = os.Errno(errno)
}
if n == 0 {
oserr = os.EOF
}
break
}
if oserr != nil {
err = &OpError{"read", fd.net, fd.laddr, oserr}
return
}
return
}
func (fd *netFD) Write(p []byte) (n int, err os.Error) {
if fd == nil {
return 0, os.EINVAL
}
fd.wio.Lock()
defer fd.wio.Unlock()
fd.incref()
defer fd.decref()
if fd.sysfile == nil {
return 0, os.EINVAL
}
if fd.wdeadline_delta > 0 {
fd.wdeadline = pollserver.Now() + fd.wdeadline_delta
} else {
fd.wdeadline = 0
}
nn := 0
var oserr os.Error
for {
n, errno := syscall.Write(fd.sysfile.Fd(), p[nn:])
if n > 0 {
nn += n
}
if nn == len(p) {
break
}
if errno == syscall.EAGAIN && fd.wdeadline >= 0 {
pollserver.WaitWrite(fd)
continue
}
if errno != 0 {
n = 0
oserr = os.Errno(errno)
break
}
if n == 0 {
oserr = io.ErrUnexpectedEOF
break
}
}
if oserr != nil {
err = &OpError{"write", fd.net, fd.raddr, oserr}
}
return nn, err
}
func (fd *netFD) WriteTo(p []byte, sa syscall.Sockaddr) (n int, err os.Error) {
if fd == nil || fd.sysfile == nil {
return 0, os.EINVAL
}
fd.wio.Lock()
defer fd.wio.Unlock()
fd.incref()
defer fd.decref()
if fd.wdeadline_delta > 0 {
fd.wdeadline = pollserver.Now() + fd.wdeadline_delta
} else {
fd.wdeadline = 0
}
var oserr os.Error
for {
errno := syscall.Sendto(fd.sysfd, p, 0, sa)
if errno == syscall.EAGAIN && fd.wdeadline >= 0 {
pollserver.WaitWrite(fd)
continue
}
if errno != 0 {
oserr = os.Errno(errno)
}
break
}
if oserr == nil {
n = len(p)
} else {
err = &OpError{"write", fd.net, fd.raddr, oserr}
}
return
}
func (fd *netFD) WriteMsg(p []byte, oob []byte, sa syscall.Sockaddr) (n int, oobn int, err os.Error) {
if fd == nil || fd.sysfile == nil {
return 0, 0, os.EINVAL
}
fd.wio.Lock()
defer fd.wio.Unlock()
fd.incref()
defer fd.decref()
if fd.wdeadline_delta > 0 {
fd.wdeadline = pollserver.Now() + fd.wdeadline_delta
} else {
fd.wdeadline = 0
}
var oserr os.Error
for {
var errno int
errno = syscall.Sendmsg(fd.sysfd, p, oob, sa, 0)
if errno == syscall.EAGAIN && fd.wdeadline >= 0 {
pollserver.WaitWrite(fd)
continue
}
if errno != 0 {
oserr = os.Errno(errno)
}
break
}
if oserr == nil {
n = len(p)
oobn = len(oob)
} else {
err = &OpError{"write", fd.net, fd.raddr, oserr}
}
return
}
func (fd *netFD) accept(toAddr func(syscall.Sockaddr) Addr) (nfd *netFD, err os.Error) {
if fd == nil || fd.sysfile == nil {
return nil, os.EINVAL
}
fd.incref()
defer fd.decref()
// See ../syscall/exec.go for description of ForkLock.
// It is okay to hold the lock across syscall.Accept
// because we have put fd.sysfd into non-blocking mode.
syscall.ForkLock.RLock()
var s, e int
var sa syscall.Sockaddr
for {
if fd.closing {
syscall.ForkLock.RUnlock()
return nil, os.EINVAL
}
s, sa, e = syscall.Accept(fd.sysfd)
if e != syscall.EAGAIN {
break
}
syscall.ForkLock.RUnlock()
pollserver.WaitRead(fd)
syscall.ForkLock.RLock()
}
if e != 0 {
syscall.ForkLock.RUnlock()
return nil, &OpError{"accept", fd.net, fd.laddr, os.Errno(e)}
}
syscall.CloseOnExec(s)
syscall.ForkLock.RUnlock()
if nfd, err = newFD(s, fd.family, fd.proto, fd.net); err != nil {
syscall.Close(s)
return nil, err
}
nfd.setAddr(fd.laddr, toAddr(sa))
return nfd, nil
}
func (fd *netFD) dup() (f *os.File, err os.Error) {
ns, e := syscall.Dup(fd.sysfd)
if e != 0 {
return nil, &OpError{"dup", fd.net, fd.laddr, os.Errno(e)}
}
// We want blocking mode for the new fd, hence the double negative.
if e = syscall.SetNonblock(ns, false); e != 0 {
return nil, &OpError{"setnonblock", fd.net, fd.laddr, os.Errno(e)}
}
return os.NewFile(ns, fd.sysfile.Name()), nil
}
func closesocket(s int) (errno int) {
return syscall.Close(s)
}
|
package types
import (
"bytes"
"compress/gzip"
"database/sql/driver"
"encoding/json"
"errors"
"io/ioutil"
)
// GzippedText is a []byte which transparently gzips data being submitted to
// a database and ungzips data being Scanned from a database.
type GzippedText []byte
// Value implements the driver.Valuer interface, gzipping the raw value of
// this GzippedText.
func (g GzippedText) Value() (driver.Value, error) {
b := make([]byte, 0, len(g))
buf := bytes.NewBuffer(b)
w := gzip.NewWriter(buf)
w.Write(g)
w.Close()
return buf.Bytes(), nil
}
// Scan implements the sql.Scanner interface, ungzipping the value coming off
// the wire and storing the raw result in the GzippedText.
func (g *GzippedText) Scan(src interface{}) error {
var source []byte
switch src.(type) {
case string:
source = []byte(src.(string))
case []byte:
source = src.([]byte)
default:
return errors.New("Incompatible type for GzippedText")
}
reader, err := gzip.NewReader(bytes.NewReader(source))
if err != nil {
return err
}
defer reader.Close()
b, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
*g = GzippedText(b)
return nil
}
// JSONText is a json.RawMessage, which is a []byte underneath.
// Value() validates the json format in the source, and returns an error if
// the json is not valid. Scan does no validation. JSONText additionally
// implements `Unmarshal`, which unmarshals the json within to an interface{}
type JSONText json.RawMessage
var _EMPTY_JSON = JSONText("{}")
// MarshalJSON returns the *j as the JSON encoding of j.
func (j *JSONText) MarshalJSON() ([]byte, error) {
if len(*j) == 0 {
*j = _EMPTY_JSON
}
return *j, nil
}
// UnmarshalJSON sets *j to a copy of data
func (j *JSONText) UnmarshalJSON(data []byte) error {
if j == nil {
return errors.New("JSONText: UnmarshalJSON on nil pointer")
}
*j = append((*j)[0:0], data...)
return nil
}
// Value returns j as a value. This does a validating unmarshal into another
// RawMessage. If j is invalid json, it returns an error.
func (j JSONText) Value() (driver.Value, error) {
var m json.RawMessage
var err = j.Unmarshal(&m)
if err != nil {
return []byte{}, err
}
return []byte(j), nil
}
// Scan stores the src in *j. No validation is done.
func (j *JSONText) Scan(src interface{}) error {
var source []byte
switch t := src.(type) {
case string:
source = []byte(t)
case []byte:
if len(t) == 0 {
source = _EMPTY_JSON
} else {
source = t
}
case nil:
*j = _EMPTY_JSON
default:
return errors.New("Incompatible type for JSONText")
}
*j = JSONText(append((*j)[0:0], source...))
return nil
}
// Unmarshal unmarshal's the json in j to v, as in json.Unmarshal.
func (j *JSONText) Unmarshal(v interface{}) error {
if len(*j) == 0 {
*j = _EMPTY_JSON
}
return json.Unmarshal([]byte(*j), v)
}
// String supports pretty printing for JSONText types.
func (j JSONText) String() string {
return string(j)
}
// NullJSONText represents a JSONText that may be null.
// NullJSONText implements the scanner interface so
// it can be used as a scan destination, similar to NullString.
type NullJSONText struct {
JSONText
Valid bool // Valid is true if JSONText is not NULL
}
// Scan implements the Scanner interface.
func (n *NullJSONText) Scan(value interface{}) error {
if value == nil {
n.JSONText, n.Valid = _EMPTY_JSON, false
return nil
}
n.Valid = true
return n.JSONText.Scan(value)
}
// Value implements the driver Valuer interface.
func (n *NullJSONText) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return n.JSONText.Value()
}
// BitBool is an implementation of a bool for the MySQL type BIT(1).
// This type allows you to avoid wasting an entire byte for MySQL's boolean type TINYINT.
type BitBool bool
// Value implements the driver.Valuer interface,
// and turns the BitBool into a bitfield (BIT(1)) for MySQL storage.
func (b BitBool) Value() (driver.Value, error) {
if b {
return []byte{1}, nil
}
return []byte{0}, nil
}
// Scan implements the sql.Scanner interface,
// and turns the bitfield incoming from MySQL into a BitBool
func (b *BitBool) Scan(src interface{}) error {
v, ok := src.([]byte)
if !ok {
return errors.New("bad []byte type assertion")
}
*b = v[0] == 1
return nil
}
Value() should be defined on a non-pointer receiver
package types
import (
"bytes"
"compress/gzip"
"database/sql/driver"
"encoding/json"
"errors"
"io/ioutil"
)
// GzippedText is a []byte which transparently gzips data being submitted to
// a database and ungzips data being Scanned from a database.
type GzippedText []byte
// Value implements the driver.Valuer interface, gzipping the raw value of
// this GzippedText.
func (g GzippedText) Value() (driver.Value, error) {
b := make([]byte, 0, len(g))
buf := bytes.NewBuffer(b)
w := gzip.NewWriter(buf)
w.Write(g)
w.Close()
return buf.Bytes(), nil
}
// Scan implements the sql.Scanner interface, ungzipping the value coming off
// the wire and storing the raw result in the GzippedText.
func (g *GzippedText) Scan(src interface{}) error {
var source []byte
switch src.(type) {
case string:
source = []byte(src.(string))
case []byte:
source = src.([]byte)
default:
return errors.New("Incompatible type for GzippedText")
}
reader, err := gzip.NewReader(bytes.NewReader(source))
if err != nil {
return err
}
defer reader.Close()
b, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
*g = GzippedText(b)
return nil
}
// JSONText is a json.RawMessage, which is a []byte underneath.
// Value() validates the json format in the source, and returns an error if
// the json is not valid. Scan does no validation. JSONText additionally
// implements `Unmarshal`, which unmarshals the json within to an interface{}
type JSONText json.RawMessage
var _EMPTY_JSON = JSONText("{}")
// MarshalJSON returns the *j as the JSON encoding of j.
func (j *JSONText) MarshalJSON() ([]byte, error) {
if len(*j) == 0 {
*j = _EMPTY_JSON
}
return *j, nil
}
// UnmarshalJSON sets *j to a copy of data
func (j *JSONText) UnmarshalJSON(data []byte) error {
if j == nil {
return errors.New("JSONText: UnmarshalJSON on nil pointer")
}
*j = append((*j)[0:0], data...)
return nil
}
// Value returns j as a value. This does a validating unmarshal into another
// RawMessage. If j is invalid json, it returns an error.
func (j JSONText) Value() (driver.Value, error) {
var m json.RawMessage
var err = j.Unmarshal(&m)
if err != nil {
return []byte{}, err
}
return []byte(j), nil
}
// Scan stores the src in *j. No validation is done.
func (j *JSONText) Scan(src interface{}) error {
var source []byte
switch t := src.(type) {
case string:
source = []byte(t)
case []byte:
if len(t) == 0 {
source = _EMPTY_JSON
} else {
source = t
}
case nil:
*j = _EMPTY_JSON
default:
return errors.New("Incompatible type for JSONText")
}
*j = JSONText(append((*j)[0:0], source...))
return nil
}
// Unmarshal unmarshal's the json in j to v, as in json.Unmarshal.
func (j *JSONText) Unmarshal(v interface{}) error {
if len(*j) == 0 {
*j = _EMPTY_JSON
}
return json.Unmarshal([]byte(*j), v)
}
// String supports pretty printing for JSONText types.
func (j JSONText) String() string {
return string(j)
}
// NullJSONText represents a JSONText that may be null.
// NullJSONText implements the scanner interface so
// it can be used as a scan destination, similar to NullString.
type NullJSONText struct {
JSONText
Valid bool // Valid is true if JSONText is not NULL
}
// Scan implements the Scanner interface.
func (n *NullJSONText) Scan(value interface{}) error {
if value == nil {
n.JSONText, n.Valid = _EMPTY_JSON, false
return nil
}
n.Valid = true
return n.JSONText.Scan(value)
}
// Value implements the driver Valuer interface.
func (n NullJSONText) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return n.JSONText.Value()
}
// BitBool is an implementation of a bool for the MySQL type BIT(1).
// This type allows you to avoid wasting an entire byte for MySQL's boolean type TINYINT.
type BitBool bool
// Value implements the driver.Valuer interface,
// and turns the BitBool into a bitfield (BIT(1)) for MySQL storage.
func (b BitBool) Value() (driver.Value, error) {
if b {
return []byte{1}, nil
}
return []byte{0}, nil
}
// Scan implements the sql.Scanner interface,
// and turns the bitfield incoming from MySQL into a BitBool
func (b *BitBool) Scan(src interface{}) error {
v, ok := src.([]byte)
if !ok {
return errors.New("bad []byte type assertion")
}
*b = v[0] == 1
return nil
}
|
package types
import (
"fmt"
"time"
)
const KeelDefaultPort = 9300
const KeelPolicyLabel = "keel.observer/policy"
type Repository struct {
Host string `json:"host,omitempty"`
Name string `json:"name,omitempty"`
Tag string `json:"tag,omitempty"`
}
type Event struct {
Repository Repository `json:"repository,omitempty"`
CreatedAt time.Time `json:"createdAt,omitempty"`
// optional field to identify trigger
TriggerName string `json:"triggerName,omitempty"`
}
type Version struct {
Major int64
Minor int64
Patch int64
PreRelease string
Metadata string
}
func (v Version) String() string {
return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch)
}
// PolicyType - policy type
type PolicyType int
// ParsePolicy - parse policy type
func ParsePolicy(policy string) PolicyType {
switch policy {
case "all":
return PolicyTypeAll
case "major":
return PolicyTypeMajor
case "minor":
return PolicyTypeMinor
case "patch":
return PolicyTypePatch
default:
return PolicyTypeUnknown
}
}
func (t PolicyType) String() string {
switch t {
case PolicyTypeUnknown:
return "unknown"
case PolicyTypeAll:
return "all"
case PolicyTypeMajor:
return "major"
case PolicyTypeMinor:
return "minor"
case PolicyTypePatch:
return "patch"
case PolicyTypeForce:
return "force"
default:
return ""
}
}
// available policies
const (
PolicyTypeUnknown = iota
PolicyTypeAll
PolicyTypeMajor
PolicyTypeMinor
PolicyTypePatch
PolicyTypeForce // update always when a new image is available
)
regenerating tag
package types
import (
"bytes"
"fmt"
"time"
)
const KeelDefaultPort = 9300
const KeelPolicyLabel = "keel.observer/policy"
type Repository struct {
Host string `json:"host,omitempty"`
Name string `json:"name,omitempty"`
Tag string `json:"tag,omitempty"`
}
type Event struct {
Repository Repository `json:"repository,omitempty"`
CreatedAt time.Time `json:"createdAt,omitempty"`
// optional field to identify trigger
TriggerName string `json:"triggerName,omitempty"`
}
type Version struct {
Major int64
Minor int64
Patch int64
PreRelease string
Metadata string
Prefix string // v prefix
}
func (v Version) String() string {
var buf bytes.Buffer
if v.Prefix != "" {
fmt.Fprintf(&buf, v.Prefix)
}
fmt.Fprintf(&buf, "%d.%d.%d", v.Major, v.Minor, v.Patch)
if v.PreRelease != "" {
fmt.Fprintf(&buf, "-%s", v.PreRelease)
}
if v.Metadata != "" {
fmt.Fprintf(&buf, "+%s", v.Metadata)
}
return buf.String()
}
// PolicyType - policy type
type PolicyType int
// ParsePolicy - parse policy type
func ParsePolicy(policy string) PolicyType {
switch policy {
case "all":
return PolicyTypeAll
case "major":
return PolicyTypeMajor
case "minor":
return PolicyTypeMinor
case "patch":
return PolicyTypePatch
default:
return PolicyTypeUnknown
}
}
func (t PolicyType) String() string {
switch t {
case PolicyTypeUnknown:
return "unknown"
case PolicyTypeAll:
return "all"
case PolicyTypeMajor:
return "major"
case PolicyTypeMinor:
return "minor"
case PolicyTypePatch:
return "patch"
case PolicyTypeForce:
return "force"
default:
return ""
}
}
// available policies
const (
PolicyTypeUnknown = iota
PolicyTypeAll
PolicyTypeMajor
PolicyTypeMinor
PolicyTypePatch
PolicyTypeForce // update always when a new image is available
)
|
package matstack
import (
"fmt"
"github.com/go-gl/mathgl/mgl32"
"testing"
)
func TestStackNew(t *testing.T) {
stack := NewMatStack()
if !(*stack)[0].ApproxEqual(mgl32.Ident4()) {
t.Errorf("Cannot construct stack correctly")
}
}
func TestStackPushPopPeek(t *testing.T) {
stack := NewMatStack()
if !stack.Peek().ApproxEqual(mgl32.Ident4()) {
t.Errorf("Peek not working")
}
stack.Push(mgl32.HomogRotate3DY(mgl32.DegToRad(90)))
if !stack.Peek().ApproxEqual(mgl32.HomogRotate3DY(mgl32.DegToRad(90))) {
t.Errorf("Peek not working")
}
if stack.Len() != 2 {
t.Errorf("Peek alters stack length")
}
pop, err := stack.Pop()
if err != nil || !pop.ApproxEqual(mgl32.HomogRotate3DY(mgl32.DegToRad(90))) {
t.Errorf("Pop is unsuccessful")
}
if stack.Len() != 1 {
t.Errorf("Pop does not actually shorten stack")
}
_, err = stack.Pop()
if err == nil {
t.Errorf("Popping stack with 1 element does not return error as expected")
}
}
func TestStackMultiPush(t *testing.T) {
stack := NewMatStack()
scale := mgl32.Scale3D(2, 2, 2)
rot := mgl32.HomogRotate3DY(mgl32.DegToRad(90))
trans := mgl32.Translate3D(4, 5, 6)
stack.Push(trans)
stack.Push(rot)
if !stack.Peek().ApproxEqualThreshold(trans.Mul4(rot), 1e-4) {
t.Errorf("Stack does not multiply first two pushes correctly")
}
stack.Push(scale)
if !stack.Peek().ApproxEqualThreshold(trans.Mul4(rot).Mul4(scale), 1e-4) {
t.Errorf("Stack does not multiple third push correctly")
}
stack.Unwind(2)
stack.Push(scale)
if !stack.Peek().ApproxEqualThreshold(trans.Mul4(scale), 1e-4) {
t.Errorf("Unwinding and multiplying does not work correctly")
}
}
func TestReseed(t *testing.T) {
stack := NewMatStack()
scale := mgl32.Scale3D(2, 2, 2)
rot := mgl32.HomogRotate3DY(mgl32.DegToRad(90))
trans := mgl32.Translate3D(4, 5, 6)
stack.Push(trans)
stack.Push(rot)
stack.Push(scale)
trans2 := mgl32.Translate3D(1, 2, 3)
err := stack.Reseed(1, trans2)
if err != nil {
t.Fatalf("Rebase returned error when it should not %v", err)
}
if !stack.Peek().ApproxEqualThreshold(trans2.Mul4(rot).Mul4(scale), 1e-4) {
t.Fatalf("Rebase does not remultiply correctly. Got\n %v expected\n %v. (Previous state:\n %v)", stack.Peek(), trans2.Mul4(rot).Mul4(scale), trans.Mul4(rot).Mul4(scale))
}
}
func TestRebase(t *testing.T) {
stack := NewMatStack()
stack2 := NewMatStack()
scale := mgl32.Scale3D(2, 2, 2)
rot := mgl32.HomogRotate3DY(mgl32.DegToRad(90))
trans := mgl32.Translate3D(4, 5, 6)
trans2 := mgl32.Translate3D(1, 2, 3)
stack.Push(trans)
stack.Push(rot)
stack2.Push(trans2)
stack2.Push(scale)
out, _ := Rebase(stack2, 1, stack)
if !out.Peek().ApproxEqualThreshold(trans.Mul4(rot).Mul4(trans2).Mul4(scale), 1e-4) {
t.Log("\n", out)
t.Errorf("Rebase unsuccessful. Got\n %v, expected\n %v", out.Peek(), trans.Mul4(rot).Mul4(trans2).Mul4(scale))
}
}
func ExampleReseed() {
stack := NewMatStack()
scale := mgl32.Scale3D(2, 2, 2)
rot := mgl32.HomogRotate3DY(mgl32.DegToRad(90))
trans := mgl32.Translate3D(4, 5, 6)
stack.Push(trans)
stack.Push(rot)
stack.Push(scale)
fmt.Println("Initial state:\n", stack.Peek())
trans2 := mgl32.Translate3D(1, 2, 3)
err := stack.Reseed(1, trans2)
if err == nil {
panic("Rebase failed")
}
fmt.Println("After rebase:\n", stack.Peek())
fmt.Println("Should be:\n", trans2.Mul4(rot).Mul4(scale))
}
func ExampleRebase() {
parent1 := NewMatStack()
scale := mgl32.Scale3D(2, 2, 2)
rot := mgl32.HomogRotate3DY(mgl32.DegToRad(90))
trans := mgl32.Translate3D(5, 5, 5)
parent1.Push(trans)
parent1.Push(rot)
parent1.Push(scale)
parent2 := parent1.Copy()
trans2 := mgl32.Translate3D(1, 1, 1)
rot2 := mgl32.HomogRotate3DX(mgl32.DegToRad(45))
parent1.Push(trans2)
parent1.Push(rot2)
// Replay the pushes onto parent1 after the copy, as if
// they had been done on parent2 instead
parent2, err := Rebase(parent1, 4, parent2)
if err != nil {
panic(err)
}
// Now parent2 and parent 1 should be the same!
fmt.Println(parent2.Peek().ApproxEqualThreshold(parent1.Peek(), 1e-4))
// Output: true
}
Fixed comment in rebase example
package matstack
import (
"fmt"
"github.com/go-gl/mathgl/mgl32"
"testing"
)
func TestStackNew(t *testing.T) {
stack := NewMatStack()
if !(*stack)[0].ApproxEqual(mgl32.Ident4()) {
t.Errorf("Cannot construct stack correctly")
}
}
func TestStackPushPopPeek(t *testing.T) {
stack := NewMatStack()
if !stack.Peek().ApproxEqual(mgl32.Ident4()) {
t.Errorf("Peek not working")
}
stack.Push(mgl32.HomogRotate3DY(mgl32.DegToRad(90)))
if !stack.Peek().ApproxEqual(mgl32.HomogRotate3DY(mgl32.DegToRad(90))) {
t.Errorf("Peek not working")
}
if stack.Len() != 2 {
t.Errorf("Peek alters stack length")
}
pop, err := stack.Pop()
if err != nil || !pop.ApproxEqual(mgl32.HomogRotate3DY(mgl32.DegToRad(90))) {
t.Errorf("Pop is unsuccessful")
}
if stack.Len() != 1 {
t.Errorf("Pop does not actually shorten stack")
}
_, err = stack.Pop()
if err == nil {
t.Errorf("Popping stack with 1 element does not return error as expected")
}
}
func TestStackMultiPush(t *testing.T) {
stack := NewMatStack()
scale := mgl32.Scale3D(2, 2, 2)
rot := mgl32.HomogRotate3DY(mgl32.DegToRad(90))
trans := mgl32.Translate3D(4, 5, 6)
stack.Push(trans)
stack.Push(rot)
if !stack.Peek().ApproxEqualThreshold(trans.Mul4(rot), 1e-4) {
t.Errorf("Stack does not multiply first two pushes correctly")
}
stack.Push(scale)
if !stack.Peek().ApproxEqualThreshold(trans.Mul4(rot).Mul4(scale), 1e-4) {
t.Errorf("Stack does not multiple third push correctly")
}
stack.Unwind(2)
stack.Push(scale)
if !stack.Peek().ApproxEqualThreshold(trans.Mul4(scale), 1e-4) {
t.Errorf("Unwinding and multiplying does not work correctly")
}
}
func TestReseed(t *testing.T) {
stack := NewMatStack()
scale := mgl32.Scale3D(2, 2, 2)
rot := mgl32.HomogRotate3DY(mgl32.DegToRad(90))
trans := mgl32.Translate3D(4, 5, 6)
stack.Push(trans)
stack.Push(rot)
stack.Push(scale)
trans2 := mgl32.Translate3D(1, 2, 3)
err := stack.Reseed(1, trans2)
if err != nil {
t.Fatalf("Rebase returned error when it should not %v", err)
}
if !stack.Peek().ApproxEqualThreshold(trans2.Mul4(rot).Mul4(scale), 1e-4) {
t.Fatalf("Rebase does not remultiply correctly. Got\n %v expected\n %v. (Previous state:\n %v)", stack.Peek(), trans2.Mul4(rot).Mul4(scale), trans.Mul4(rot).Mul4(scale))
}
}
func TestRebase(t *testing.T) {
stack := NewMatStack()
stack2 := NewMatStack()
scale := mgl32.Scale3D(2, 2, 2)
rot := mgl32.HomogRotate3DY(mgl32.DegToRad(90))
trans := mgl32.Translate3D(4, 5, 6)
trans2 := mgl32.Translate3D(1, 2, 3)
stack.Push(trans)
stack.Push(rot)
stack2.Push(trans2)
stack2.Push(scale)
out, _ := Rebase(stack2, 1, stack)
if !out.Peek().ApproxEqualThreshold(trans.Mul4(rot).Mul4(trans2).Mul4(scale), 1e-4) {
t.Log("\n", out)
t.Errorf("Rebase unsuccessful. Got\n %v, expected\n %v", out.Peek(), trans.Mul4(rot).Mul4(trans2).Mul4(scale))
}
}
func ExampleReseed() {
stack := NewMatStack()
scale := mgl32.Scale3D(2, 2, 2)
rot := mgl32.HomogRotate3DY(mgl32.DegToRad(90))
trans := mgl32.Translate3D(4, 5, 6)
stack.Push(trans)
stack.Push(rot)
stack.Push(scale)
fmt.Println("Initial state:\n", stack.Peek())
trans2 := mgl32.Translate3D(1, 2, 3)
err := stack.Reseed(1, trans2)
if err == nil {
panic("Rebase failed")
}
fmt.Println("After rebase:\n", stack.Peek())
fmt.Println("Should be:\n", trans2.Mul4(rot).Mul4(scale))
}
func ExampleRebase() {
parent1 := NewMatStack()
scale := mgl32.Scale3D(2, 2, 2)
rot := mgl32.HomogRotate3DY(mgl32.DegToRad(90))
trans := mgl32.Translate3D(5, 5, 5)
parent1.Push(trans)
parent1.Push(rot)
parent1.Push(scale)
parent2 := parent1.Copy()
trans2 := mgl32.Translate3D(1, 1, 1)
rot2 := mgl32.HomogRotate3DX(mgl32.DegToRad(45))
parent1.Push(trans2)
parent1.Push(rot2)
// Replay the pushes the changes from parent1 after the copy onto parent2, as if
// they had been done on parent2 instead
parent2, err := Rebase(parent1, 4, parent2)
if err != nil {
panic(err)
}
// Now parent2 and parent 1 should be the same!
fmt.Println(parent2.Peek().ApproxEqualThreshold(parent1.Peek(), 1e-4))
// Output: true
}
|
package main
import (
"image"
"io"
"log"
"os"
"os/exec"
"time"
"github.com/lucasb-eyer/go-colorful"
"github.com/ninjasphere/go-ninja/api"
"github.com/ninjasphere/go-ninja/config"
"github.com/ninjasphere/go-ninja/model"
"github.com/ninjasphere/sphere-go-led-controller/ui"
"github.com/ninjasphere/sphere-go-led-controller/util"
"github.com/tarm/goserial"
)
type LedController struct {
controlEnabled bool
controlLayout *ui.PaneLayout
pairingLayout *ui.PairingLayout
conn *ninja.Connection
serial io.ReadWriteCloser
waiting chan bool
}
func NewLedController(conn *ninja.Connection) (*LedController, error) {
c := &serial.Config{Name: "/dev/tty.ledmatrix", Baud: 115200}
s, err := serial.OpenPort(c)
if err != nil {
return nil, err
}
// Send a blank image to the led matrix
util.WriteLEDMatrix(image.NewRGBA(image.Rect(0, 0, 16, 16)), s)
controller := &LedController{
conn: conn,
pairingLayout: ui.NewPairingLayout(conn),
serial: s,
waiting: make(chan bool),
}
conn.MustExportService(controller, "$node/"+config.Serial()+"/led-controller", &model.ServiceAnnouncement{
Schema: "/service/led-controller",
})
return controller, nil
}
func (c *LedController) start(enableControl bool) {
c.controlEnabled = enableControl
frameWritten := make(chan bool)
go func() {
for {
if c.controlEnabled {
if c.controlLayout == nil {
log.Println("Enabling layout... clearing LED")
util.WriteLEDMatrix(image.NewRGBA(image.Rect(0, 0, 16, 16)), c.serial)
c.controlLayout = getPaneLayout(c.conn)
log.Println("Finished control layout")
}
image, wake, err := c.controlLayout.Render()
if err != nil {
log.Fatal(err)
}
go func() {
util.WriteLEDMatrix(image, c.serial)
frameWritten <- true
}()
select {
case <-frameWritten:
// All good.
case <-time.After(10 * time.Second):
log.Println("Timeout writing to LED matrix. REBOOTING!")
// Timed out writing to the led matrix. For now. Boot!
cmd := exec.Command("reboot")
output, err := cmd.Output()
log.Printf("Output from reboot: %s err: %s", output, err)
}
if wake != nil {
log.Println("Waiting as the UI is asleep")
select {
case <-wake:
log.Println("UI woke up!")
case <-c.waiting:
log.Println("Got a command from rpc...")
}
}
} else {
image, err := c.pairingLayout.Render()
if err != nil {
log.Fatal(err)
}
util.WriteLEDMatrix(image, c.serial)
}
}
}()
}
func (c *LedController) EnableControl() error {
c.controlEnabled = true
c.gotCommand()
return nil
}
func (c *LedController) DisableControl() error {
c.controlEnabled = false
c.gotCommand()
return nil
}
type PairingCodeRequest struct {
Code string `json:"code"`
DisplayTime int `json:"displayTime"`
}
func (c *LedController) DisplayPairingCode(req *PairingCodeRequest) error {
c.controlEnabled = false
c.pairingLayout.ShowCode(req.Code)
c.gotCommand()
return nil
}
type ColorRequest struct {
Color string `json:"color"`
DisplayTime int `json:"displayTime"`
}
func (c *LedController) DisplayColor(req *ColorRequest) error {
col, err := colorful.Hex(req.Color)
if err != nil {
return err
}
c.controlEnabled = false
c.pairingLayout.ShowColor(col)
c.gotCommand()
return nil
}
type IconRequest struct {
Icon string `json:"icon"`
DisplayTime int `json:"displayTime"`
}
func (c *LedController) DisplayIcon(req *IconRequest) error {
c.controlEnabled = false
c.pairingLayout.ShowIcon(req.Icon)
c.gotCommand()
return nil
}
func (c *LedController) gotCommand() {
select {
case c.waiting <- true:
default:
}
}
// Load from a config file instead...
func getPaneLayout(conn *ninja.Connection) *ui.PaneLayout {
layout, wake := ui.NewPaneLayout(false)
mediaPane := ui.NewMediaPane(&ui.MediaPaneImages{
Volume: "images/media-volume-speaker.gif",
Mute: "images/media-volume-mute.png",
Play: "images/media-play.png",
Pause: "images/media-pause.png",
Stop: "images/media-stop.png",
Next: "images/media-next.png",
}, conn)
layout.AddPane(mediaPane)
if len(os.Getenv("CERTIFICATION")) > 0 {
layout.AddPane(ui.NewCertPane(conn.GetMqttClient()))
} else {
//layout.AddPane(ui.NewTextScrollPane("Exit Music (For A Film)"))
heaterPane := ui.NewOnOffPane("images/heater-off.png", "images/heater-on.gif", func(state bool) {
log.Printf("Heater state: %t", state)
}, conn, "heater")
layout.AddPane(heaterPane)
}
lightPane := ui.NewLightPane("images/light-off.png", "images/light-on.png", func(state bool) {
log.Printf("Light on-off state: %t", state)
}, func(state float64) {
log.Printf("Light color state: %f", state)
}, conn)
layout.AddPane(lightPane)
fanPane := ui.NewOnOffPane("images/fan-off.png", "images/fan-on.gif", func(state bool) {
log.Printf("Fan state: %t", state)
}, conn, "fan")
layout.AddPane(fanPane)
go func() {
<-wake
}()
go layout.Wake()
return layout
}
Don't reboot, just quit on a led write timeout
package main
import (
"image"
"io"
"log"
"os"
"time"
"github.com/lucasb-eyer/go-colorful"
"github.com/ninjasphere/go-ninja/api"
"github.com/ninjasphere/go-ninja/config"
"github.com/ninjasphere/go-ninja/model"
"github.com/ninjasphere/sphere-go-led-controller/ui"
"github.com/ninjasphere/sphere-go-led-controller/util"
"github.com/tarm/goserial"
)
type LedController struct {
controlEnabled bool
controlLayout *ui.PaneLayout
pairingLayout *ui.PairingLayout
conn *ninja.Connection
serial io.ReadWriteCloser
waiting chan bool
}
func NewLedController(conn *ninja.Connection) (*LedController, error) {
c := &serial.Config{Name: "/dev/tty.ledmatrix", Baud: 115200}
s, err := serial.OpenPort(c)
if err != nil {
return nil, err
}
// Send a blank image to the led matrix
util.WriteLEDMatrix(image.NewRGBA(image.Rect(0, 0, 16, 16)), s)
controller := &LedController{
conn: conn,
pairingLayout: ui.NewPairingLayout(conn),
serial: s,
waiting: make(chan bool),
}
conn.MustExportService(controller, "$node/"+config.Serial()+"/led-controller", &model.ServiceAnnouncement{
Schema: "/service/led-controller",
})
return controller, nil
}
func (c *LedController) start(enableControl bool) {
c.controlEnabled = enableControl
frameWritten := make(chan bool)
go func() {
for {
if c.controlEnabled {
if c.controlLayout == nil {
log.Println("Enabling layout... clearing LED")
util.WriteLEDMatrix(image.NewRGBA(image.Rect(0, 0, 16, 16)), c.serial)
c.controlLayout = getPaneLayout(c.conn)
log.Println("Finished control layout")
}
image, wake, err := c.controlLayout.Render()
if err != nil {
log.Fatal(err)
}
go func() {
util.WriteLEDMatrix(image, c.serial)
frameWritten <- true
}()
select {
case <-frameWritten:
// All good.
case <-time.After(10 * time.Second):
log.Println("Timeout writing to LED matrix. Quitting.")
os.Exit(1)
// Timed out writing to the led matrix. For now. Boot!
//cmd := exec.Command("reboot")
//output, err := cmd.Output()
//log.Printf("Output from reboot: %s err: %s", output, err)
}
if wake != nil {
log.Println("Waiting as the UI is asleep")
select {
case <-wake:
log.Println("UI woke up!")
case <-c.waiting:
log.Println("Got a command from rpc...")
}
}
} else {
image, err := c.pairingLayout.Render()
if err != nil {
log.Fatal(err)
}
util.WriteLEDMatrix(image, c.serial)
}
}
}()
}
func (c *LedController) EnableControl() error {
c.controlEnabled = true
c.gotCommand()
return nil
}
func (c *LedController) DisableControl() error {
c.controlEnabled = false
c.gotCommand()
return nil
}
type PairingCodeRequest struct {
Code string `json:"code"`
DisplayTime int `json:"displayTime"`
}
func (c *LedController) DisplayPairingCode(req *PairingCodeRequest) error {
c.controlEnabled = false
c.pairingLayout.ShowCode(req.Code)
c.gotCommand()
return nil
}
type ColorRequest struct {
Color string `json:"color"`
DisplayTime int `json:"displayTime"`
}
func (c *LedController) DisplayColor(req *ColorRequest) error {
col, err := colorful.Hex(req.Color)
if err != nil {
return err
}
c.controlEnabled = false
c.pairingLayout.ShowColor(col)
c.gotCommand()
return nil
}
type IconRequest struct {
Icon string `json:"icon"`
DisplayTime int `json:"displayTime"`
}
func (c *LedController) DisplayIcon(req *IconRequest) error {
c.controlEnabled = false
c.pairingLayout.ShowIcon(req.Icon)
c.gotCommand()
return nil
}
func (c *LedController) gotCommand() {
select {
case c.waiting <- true:
default:
}
}
// Load from a config file instead...
func getPaneLayout(conn *ninja.Connection) *ui.PaneLayout {
layout, wake := ui.NewPaneLayout(false)
mediaPane := ui.NewMediaPane(&ui.MediaPaneImages{
Volume: "images/media-volume-speaker.gif",
Mute: "images/media-volume-mute.png",
Play: "images/media-play.png",
Pause: "images/media-pause.png",
Stop: "images/media-stop.png",
Next: "images/media-next.png",
}, conn)
layout.AddPane(mediaPane)
if len(os.Getenv("CERTIFICATION")) > 0 {
layout.AddPane(ui.NewCertPane(conn.GetMqttClient()))
} else {
//layout.AddPane(ui.NewTextScrollPane("Exit Music (For A Film)"))
heaterPane := ui.NewOnOffPane("images/heater-off.png", "images/heater-on.gif", func(state bool) {
log.Printf("Heater state: %t", state)
}, conn, "heater")
layout.AddPane(heaterPane)
}
lightPane := ui.NewLightPane("images/light-off.png", "images/light-on.png", func(state bool) {
log.Printf("Light on-off state: %t", state)
}, func(state float64) {
log.Printf("Light color state: %f", state)
}, conn)
layout.AddPane(lightPane)
fanPane := ui.NewOnOffPane("images/fan-off.png", "images/fan-on.gif", func(state bool) {
log.Printf("Fan state: %t", state)
}, conn, "fan")
layout.AddPane(fanPane)
go func() {
<-wake
}()
go layout.Wake()
return layout
}
|
// Copyright 2015 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package notify
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"mime"
"net"
"net/http"
"net/mail"
"net/smtp"
"os"
"strings"
"time"
"github.com/prometheus/common/log"
"github.com/prometheus/common/model"
"golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
"github.com/prometheus/alertmanager/config"
"github.com/prometheus/alertmanager/template"
"github.com/prometheus/alertmanager/types"
)
type notifierConfig interface {
SendResolved() bool
}
type NotifierFunc func(context.Context, ...*types.Alert) error
func (f NotifierFunc) Notify(ctx context.Context, alerts ...*types.Alert) error {
return f(ctx, alerts...)
}
// Build creates a fanout notifier for each receiver.
func Build(confs []*config.Receiver, tmpl *template.Template) map[string]Fanout {
res := map[string]Fanout{}
filter := func(n Notifier, c notifierConfig) Notifier {
return NotifierFunc(func(ctx context.Context, alerts ...*types.Alert) error {
var res []*types.Alert
if c.SendResolved() {
res = alerts
} else {
for _, a := range alerts {
if a.Status() != model.AlertResolved {
res = append(res, a)
}
}
}
if len(res) == 0 {
return nil
}
return n.Notify(ctx, res...)
})
}
for _, nc := range confs {
var (
fo = Fanout{}
add = func(i int, on, n Notifier) { fo[fmt.Sprintf("%T/%d", on, i)] = n }
)
for i, c := range nc.WebhookConfigs {
n := NewWebhook(c)
add(i, n, filter(n, c))
}
for i, c := range nc.EmailConfigs {
n := NewEmail(c, tmpl)
add(i, n, filter(n, c))
}
for i, c := range nc.PagerdutyConfigs {
n := NewPagerDuty(c, tmpl)
add(i, n, filter(n, c))
}
for i, c := range nc.OpsGenieConfigs {
n := NewOpsGenie(c, tmpl)
add(i, n, filter(n, c))
}
for i, c := range nc.SlackConfigs {
n := NewSlack(c, tmpl)
add(i, n, filter(n, c))
}
for i, c := range nc.HipchatConfigs {
n := NewHipchat(c, tmpl)
add(i, n, filter(n, c))
}
res[nc.Name] = fo
}
return res
}
const contentTypeJSON = "application/json"
// Webhook implements a Notifier for generic webhooks.
type Webhook struct {
// The URL to which notifications are sent.
URL string
}
// NewWebhook returns a new Webhook.
func NewWebhook(conf *config.WebhookConfig) *Webhook {
return &Webhook{URL: conf.URL}
}
// WebhookMessage defines the JSON object send to webhook endpoints.
type WebhookMessage struct {
// The protocol version.
Version string `json:"version"`
// The alert status. It is firing iff any of the alerts is not resolved.
Status model.AlertStatus `json:"status"`
// A batch of alerts.
Alerts model.Alerts `json:"alert"`
}
// Notify implements the Notifier interface.
func (w *Webhook) Notify(ctx context.Context, alerts ...*types.Alert) error {
as := types.Alerts(alerts...)
// If there are no annotations, instantiate so
// {} is sent rather than null.
for _, a := range as {
if a.Annotations == nil {
a.Annotations = model.LabelSet{}
}
}
msg := &WebhookMessage{
Version: "2",
Status: as.Status(),
Alerts: as,
}
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(msg); err != nil {
return err
}
resp, err := ctxhttp.Post(ctx, http.DefaultClient, w.URL, contentTypeJSON, &buf)
if err != nil {
return err
}
resp.Body.Close()
if resp.StatusCode/100 != 2 {
return fmt.Errorf("unexpected status code %v", resp.StatusCode)
}
return nil
}
// Email implements a Notifier for email notifications.
type Email struct {
conf *config.EmailConfig
tmpl *template.Template
}
// NewEmail returns a new Email notifier.
func NewEmail(c *config.EmailConfig, t *template.Template) *Email {
if _, ok := c.Headers["Subject"]; !ok {
c.Headers["Subject"] = config.DefaultEmailSubject
}
if _, ok := c.Headers["To"]; !ok {
c.Headers["To"] = c.To
}
if _, ok := c.Headers["From"]; !ok {
c.Headers["From"] = c.From
}
return &Email{conf: c, tmpl: t}
}
// auth resolves a string of authentication mechanisms.
func (n *Email) auth(mechs string) (smtp.Auth, *tls.Config, error) {
username := os.Getenv("SMTP_AUTH_USERNAME")
for _, mech := range strings.Split(mechs, " ") {
switch mech {
case "CRAM-MD5":
secret := os.Getenv("SMTP_AUTH_SECRET")
if secret == "" {
continue
}
return smtp.CRAMMD5Auth(username, secret), nil, nil
case "PLAIN":
password := os.Getenv("SMTP_AUTH_PASSWORD")
if password == "" {
continue
}
identity := os.Getenv("SMTP_AUTH_IDENTITY")
// We need to know the hostname for both auth and TLS.
host, _, err := net.SplitHostPort(n.conf.Smarthost)
if err != nil {
return nil, nil, fmt.Errorf("invalid address: %s", err)
}
var (
auth = smtp.PlainAuth(identity, username, password, host)
cfg = &tls.Config{ServerName: host}
)
return auth, cfg, nil
}
}
return nil, nil, nil
}
// Notify implements the Notifier interface.
func (n *Email) Notify(ctx context.Context, as ...*types.Alert) error {
// Connect to the SMTP smarthost.
c, err := smtp.Dial(n.conf.Smarthost)
if err != nil {
return err
}
defer c.Quit()
if ok, mech := c.Extension("AUTH"); ok {
auth, tlsConf, err := n.auth(mech)
if err != nil {
return err
}
if tlsConf != nil {
if err := c.StartTLS(tlsConf); err != nil {
return fmt.Errorf("starttls failed: %s", err)
}
}
if auth != nil {
if err := c.Auth(auth); err != nil {
return fmt.Errorf("%T failed: %s", auth, err)
}
}
}
var (
data = n.tmpl.Data(receiver(ctx), groupLabels(ctx), as...)
tmpl = tmplText(n.tmpl, data, &err)
from = tmpl(n.conf.From)
to = tmpl(n.conf.To)
)
if err != nil {
return err
}
addrs, err := mail.ParseAddressList(from)
if err != nil {
return fmt.Errorf("parsing from addresses: %s", err)
}
if len(addrs) != 1 {
return fmt.Errorf("must be exactly one from address")
}
if err := c.Mail(addrs[0].Address); err != nil {
return fmt.Errorf("sending mail from: %s", err)
}
addrs, err = mail.ParseAddressList(to)
if err != nil {
return fmt.Errorf("parsing to addresses: %s", err)
}
for _, addr := range addrs {
if err := c.Rcpt(addr.Address); err != nil {
return fmt.Errorf("sending rcpt to: %s", err)
}
}
// Send the email body.
wc, err := c.Data()
if err != nil {
return err
}
defer wc.Close()
for header, t := range n.conf.Headers {
value, err := n.tmpl.ExecuteTextString(t, data)
if err != nil {
return fmt.Errorf("executing %q header template: %s", header, err)
}
fmt.Fprintf(wc, "%s: %s\r\n", header, mime.QEncoding.Encode("utf-8", value))
}
fmt.Fprintf(wc, "Content-Type: text/html; charset=UTF-8\r\n")
fmt.Fprintf(wc, "Date: %s\r\n", time.Now().Format(time.RFC1123Z))
// TODO: Add some useful headers here, such as URL of the alertmanager
// and active/resolved.
fmt.Fprintf(wc, "\r\n")
// TODO(fabxc): do a multipart write that considers the plain template.
body, err := n.tmpl.ExecuteHTMLString(n.conf.HTML, data)
if err != nil {
return fmt.Errorf("executing email html template: %s", err)
}
_, err = io.WriteString(wc, body)
return err
}
// PagerDuty implements a Notifier for PagerDuty notifications.
type PagerDuty struct {
conf *config.PagerdutyConfig
tmpl *template.Template
}
// NewPagerDuty returns a new PagerDuty notifier.
func NewPagerDuty(c *config.PagerdutyConfig, t *template.Template) *PagerDuty {
return &PagerDuty{conf: c, tmpl: t}
}
const (
pagerDutyEventTrigger = "trigger"
pagerDutyEventResolve = "resolve"
)
type pagerDutyMessage struct {
ServiceKey string `json:"service_key"`
IncidentKey model.Fingerprint `json:"incident_key"`
EventType string `json:"event_type"`
Description string `json:"description"`
Client string `json:"client,omitempty"`
ClientURL string `json:"client_url,omitempty"`
Details map[string]string `json:"details,omitempty"`
}
// Notify implements the Notifier interface.
//
// http://developer.pagerduty.com/documentation/integration/events/trigger
func (n *PagerDuty) Notify(ctx context.Context, as ...*types.Alert) error {
key, ok := GroupKey(ctx)
if !ok {
return fmt.Errorf("group key missing")
}
var err error
var (
alerts = types.Alerts(as...)
data = n.tmpl.Data(receiver(ctx), groupLabels(ctx), as...)
tmpl = tmplText(n.tmpl, data, &err)
eventType = pagerDutyEventTrigger
)
if alerts.Status() == model.AlertResolved {
eventType = pagerDutyEventResolve
}
log.With("incident", key).With("eventType", eventType).Debugln("notifying PagerDuty")
details := make(map[string]string, len(n.conf.Details))
for k, v := range n.conf.Details {
details[k] = tmpl(v)
}
msg := &pagerDutyMessage{
ServiceKey: tmpl(string(n.conf.ServiceKey)),
EventType: eventType,
IncidentKey: key,
Description: tmpl(n.conf.Description),
Details: details,
}
if eventType == pagerDutyEventTrigger {
msg.Client = tmpl(n.conf.Client)
msg.ClientURL = tmpl(n.conf.ClientURL)
}
if err != nil {
return err
}
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(msg); err != nil {
return err
}
resp, err := ctxhttp.Post(ctx, http.DefaultClient, n.conf.URL, contentTypeJSON, &buf)
if err != nil {
return err
}
resp.Body.Close()
if resp.StatusCode/100 != 2 {
return fmt.Errorf("unexpected status code %v", resp.StatusCode)
}
return nil
}
// Slack implements a Notifier for Slack notifications.
type Slack struct {
conf *config.SlackConfig
tmpl *template.Template
}
// NewSlack returns a new Slack notification handler.
func NewSlack(conf *config.SlackConfig, tmpl *template.Template) *Slack {
return &Slack{
conf: conf,
tmpl: tmpl,
}
}
// slackReq is the request for sending a slack notification.
type slackReq struct {
Channel string `json:"channel,omitempty"`
Username string `json:"username,omitempty"`
Attachments []slackAttachment `json:"attachments"`
}
// slackAttachment is used to display a richly-formatted message block.
type slackAttachment struct {
Title string `json:"title,omitempty"`
TitleLink string `json:"title_link,omitempty"`
Pretext string `json:"pretext,omitempty"`
Text string `json:"text"`
Fallback string `json:"fallback"`
Color string `json:"color,omitempty"`
MrkdwnIn []string `json:"mrkdwn_in,omitempty"`
}
// slackAttachmentField is displayed in a table inside the message attachment.
type slackAttachmentField struct {
Title string `json:"title"`
Value string `json:"value"`
Short bool `json:"short,omitempty"`
}
// Notify implements the Notifier interface.
func (n *Slack) Notify(ctx context.Context, as ...*types.Alert) error {
var err error
var (
data = n.tmpl.Data(receiver(ctx), groupLabels(ctx), as...)
tmplText = tmplText(n.tmpl, data, &err)
tmplHTML = tmplHTML(n.tmpl, data, &err)
)
attachment := &slackAttachment{
Title: tmplText(n.conf.Title),
TitleLink: tmplText(n.conf.TitleLink),
Pretext: tmplText(n.conf.Pretext),
Text: tmplHTML(n.conf.Text),
Fallback: tmplText(n.conf.Fallback),
Color: tmplText(n.conf.Color),
MrkdwnIn: []string{"fallback", "pretext"},
}
req := &slackReq{
Channel: tmplText(n.conf.Channel),
Username: tmplText(n.conf.Username),
Attachments: []slackAttachment{*attachment},
}
if err != nil {
return err
}
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(req); err != nil {
return err
}
resp, err := ctxhttp.Post(ctx, http.DefaultClient, string(n.conf.APIURL), contentTypeJSON, &buf)
if err != nil {
return err
}
// TODO(fabxc): is 2xx status code really indicator for success for Slack API?
resp.Body.Close()
if resp.StatusCode/100 != 2 {
return fmt.Errorf("unexpected status code %v", resp.StatusCode)
}
return nil
}
// Hipchat implements a Notifier for Hipchat notifications.
type Hipchat struct {
conf *config.HipchatConfig
tmpl *template.Template
}
// NewHipchat returns a new Hipchat notification handler.
func NewHipchat(conf *config.HipchatConfig, tmpl *template.Template) *Hipchat {
return &Hipchat{
conf: conf,
tmpl: tmpl,
}
}
type hipchatReq struct {
From string `json:"from"`
Notify bool `json:"notify"`
Message string `json:"message"`
MessageFormat string `json:"message_format"`
Color string `json:"color"`
}
// Notify implements the Notifier interface.
func (n *Hipchat) Notify(ctx context.Context, as ...*types.Alert) error {
var err error
var msg string
var (
data = n.tmpl.Data(receiver(ctx), groupLabels(ctx), as...)
tmplText = tmplText(n.tmpl, data, &err)
tmplHTML = tmplHTML(n.tmpl, data, &err)
url = fmt.Sprintf("%sv2/room/%s/notification?auth_token=%s", n.conf.APIURL, n.conf.RoomID, n.conf.AuthToken)
)
if n.conf.MessageFormat == "html" {
msg = tmplHTML(n.conf.Message)
} else {
msg = tmplText(n.conf.Message)
}
req := &hipchatReq{
From: tmplText(n.conf.From),
Notify: n.conf.Notify,
Message: msg,
MessageFormat: n.conf.MessageFormat,
Color: tmplText(n.conf.Color),
}
if err != nil {
return err
}
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(req); err != nil {
return err
}
resp, err := ctxhttp.Post(ctx, http.DefaultClient, url, contentTypeJSON, &buf)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode/100 != 2 {
return fmt.Errorf("unexpected status code %v", resp.StatusCode)
}
return nil
}
// OpsGenie implements a Notifier for OpsGenie notifications.
type OpsGenie struct {
conf *config.OpsGenieConfig
tmpl *template.Template
}
// NewOpsGenieDuty returns a new OpsGenie notifier.
func NewOpsGenie(c *config.OpsGenieConfig, t *template.Template) *OpsGenie {
return &OpsGenie{conf: c, tmpl: t}
}
type opsGenieMessage struct {
APIKey string `json:"apiKey"`
Alias model.Fingerprint `json:"alias"`
}
type opsGenieCreateMessage struct {
*opsGenieMessage `json:,inline`
Message string `json:"message"`
Details map[string]string `json:"details"`
}
type opsGenieCloseMessage struct {
*opsGenieMessage `json:,inline`
}
// Notify implements the Notifier interface.
func (n *OpsGenie) Notify(ctx context.Context, as ...*types.Alert) error {
key, ok := GroupKey(ctx)
if !ok {
return fmt.Errorf("group key missing")
}
data := n.tmpl.Data(receiver(ctx), groupLabels(ctx), as...)
log.With("incident", key).Debugln("notifying OpsGenie")
var err error
tmpl := tmplText(n.tmpl, data, &err)
details := make(map[string]string, len(n.conf.Details))
for k, v := range n.conf.Details {
details[k] = tmpl(v)
}
var (
msg interface{}
apiURL string
apiMsg = opsGenieMessage{
APIKey: string(n.conf.APIKey),
Alias: key,
}
alerts = types.Alerts(as...)
)
switch alerts.Status() {
case model.AlertResolved:
apiURL = n.conf.APIHost + "v1/json/alert/close"
msg = &opsGenieCloseMessage{&apiMsg}
default:
apiURL = n.conf.APIHost + "v1/json/alert"
msg = &opsGenieCreateMessage{
opsGenieMessage: &apiMsg,
Message: tmpl(n.conf.Description),
Details: details,
}
}
if err != nil {
return fmt.Errorf("templating error: %s", err)
}
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(msg); err != nil {
return err
}
resp, err := ctxhttp.Post(ctx, http.DefaultClient, apiURL, contentTypeJSON, &buf)
if err != nil {
return err
}
resp.Body.Close()
if resp.StatusCode/100 != 2 {
return fmt.Errorf("unexpected status code %v", resp.StatusCode)
}
return nil
}
func tmplText(tmpl *template.Template, data *template.Data, err *error) func(string) string {
return func(name string) (s string) {
if *err != nil {
return
}
s, *err = tmpl.ExecuteTextString(name, data)
return s
}
}
func tmplHTML(tmpl *template.Template, data *template.Data, err *error) func(string) string {
return func(name string) (s string) {
if *err != nil {
return
}
s, *err = tmpl.ExecuteHTMLString(name, data)
return s
}
}
Add instrumentation about notifications
// Copyright 2015 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package notify
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"mime"
"net"
"net/http"
"net/mail"
"net/smtp"
"os"
"strings"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"github.com/prometheus/common/model"
"golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
"github.com/prometheus/alertmanager/config"
"github.com/prometheus/alertmanager/template"
"github.com/prometheus/alertmanager/types"
)
var (
numNotifications = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "alertmanager",
Name: "notifications_total",
Help: "The total number of attempted notifications.",
}, []string{"integration"})
numFailedNotifications = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "alertmanager",
Name: "notifications_failed_total",
Help: "The total number of failed notifications.",
}, []string{"integration"})
)
func init() {
prometheus.Register(numNotifications)
prometheus.Register(numFailedNotifications)
}
type notifierConfig interface {
SendResolved() bool
}
type NotifierFunc func(context.Context, ...*types.Alert) error
func (f NotifierFunc) Notify(ctx context.Context, alerts ...*types.Alert) error {
return f(ctx, alerts...)
}
type integration interface {
Notifier
name() string
}
// Build creates a fanout notifier for each receiver.
func Build(confs []*config.Receiver, tmpl *template.Template) map[string]Fanout {
res := map[string]Fanout{}
filter := func(n integration, c notifierConfig) Notifier {
return NotifierFunc(func(ctx context.Context, alerts ...*types.Alert) error {
var res []*types.Alert
if c.SendResolved() {
res = alerts
} else {
for _, a := range alerts {
if a.Status() != model.AlertResolved {
res = append(res, a)
}
}
}
if len(res) == 0 {
return nil
}
err := n.Notify(ctx, res...)
if err != nil {
numFailedNotifications.WithLabelValues(n.name()).Inc()
}
numNotifications.WithLabelValues(n.name()).Inc()
return err
})
}
for _, nc := range confs {
var (
fo = Fanout{}
add = func(i int, on integration, n Notifier) { fo[fmt.Sprintf("%s/%d", on.name(), i)] = n }
)
for i, c := range nc.WebhookConfigs {
n := NewWebhook(c)
add(i, n, filter(n, c))
}
for i, c := range nc.EmailConfigs {
n := NewEmail(c, tmpl)
add(i, n, filter(n, c))
}
for i, c := range nc.PagerdutyConfigs {
n := NewPagerDuty(c, tmpl)
add(i, n, filter(n, c))
}
for i, c := range nc.OpsGenieConfigs {
n := NewOpsGenie(c, tmpl)
add(i, n, filter(n, c))
}
for i, c := range nc.SlackConfigs {
n := NewSlack(c, tmpl)
add(i, n, filter(n, c))
}
for i, c := range nc.HipchatConfigs {
n := NewHipchat(c, tmpl)
add(i, n, filter(n, c))
}
res[nc.Name] = fo
}
return res
}
const contentTypeJSON = "application/json"
// Webhook implements a Notifier for generic webhooks.
type Webhook struct {
// The URL to which notifications are sent.
URL string
}
// NewWebhook returns a new Webhook.
func NewWebhook(conf *config.WebhookConfig) *Webhook {
return &Webhook{URL: conf.URL}
}
func (*Webhook) name() string { return "webhook" }
// WebhookMessage defines the JSON object send to webhook endpoints.
type WebhookMessage struct {
// The protocol version.
Version string `json:"version"`
// The alert status. It is firing iff any of the alerts is not resolved.
Status model.AlertStatus `json:"status"`
// A batch of alerts.
Alerts model.Alerts `json:"alert"`
}
// Notify implements the Notifier interface.
func (w *Webhook) Notify(ctx context.Context, alerts ...*types.Alert) error {
as := types.Alerts(alerts...)
// If there are no annotations, instantiate so
// {} is sent rather than null.
for _, a := range as {
if a.Annotations == nil {
a.Annotations = model.LabelSet{}
}
}
msg := &WebhookMessage{
Version: "2",
Status: as.Status(),
Alerts: as,
}
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(msg); err != nil {
return err
}
resp, err := ctxhttp.Post(ctx, http.DefaultClient, w.URL, contentTypeJSON, &buf)
if err != nil {
return err
}
resp.Body.Close()
if resp.StatusCode/100 != 2 {
return fmt.Errorf("unexpected status code %v", resp.StatusCode)
}
return nil
}
// Email implements a Notifier for email notifications.
type Email struct {
conf *config.EmailConfig
tmpl *template.Template
}
// NewEmail returns a new Email notifier.
func NewEmail(c *config.EmailConfig, t *template.Template) *Email {
if _, ok := c.Headers["Subject"]; !ok {
c.Headers["Subject"] = config.DefaultEmailSubject
}
if _, ok := c.Headers["To"]; !ok {
c.Headers["To"] = c.To
}
if _, ok := c.Headers["From"]; !ok {
c.Headers["From"] = c.From
}
return &Email{conf: c, tmpl: t}
}
func (*Email) name() string { return "email" }
// auth resolves a string of authentication mechanisms.
func (n *Email) auth(mechs string) (smtp.Auth, *tls.Config, error) {
username := os.Getenv("SMTP_AUTH_USERNAME")
for _, mech := range strings.Split(mechs, " ") {
switch mech {
case "CRAM-MD5":
secret := os.Getenv("SMTP_AUTH_SECRET")
if secret == "" {
continue
}
return smtp.CRAMMD5Auth(username, secret), nil, nil
case "PLAIN":
password := os.Getenv("SMTP_AUTH_PASSWORD")
if password == "" {
continue
}
identity := os.Getenv("SMTP_AUTH_IDENTITY")
// We need to know the hostname for both auth and TLS.
host, _, err := net.SplitHostPort(n.conf.Smarthost)
if err != nil {
return nil, nil, fmt.Errorf("invalid address: %s", err)
}
var (
auth = smtp.PlainAuth(identity, username, password, host)
cfg = &tls.Config{ServerName: host}
)
return auth, cfg, nil
}
}
return nil, nil, nil
}
// Notify implements the Notifier interface.
func (n *Email) Notify(ctx context.Context, as ...*types.Alert) error {
// Connect to the SMTP smarthost.
c, err := smtp.Dial(n.conf.Smarthost)
if err != nil {
return err
}
defer c.Quit()
if ok, mech := c.Extension("AUTH"); ok {
auth, tlsConf, err := n.auth(mech)
if err != nil {
return err
}
if tlsConf != nil {
if err := c.StartTLS(tlsConf); err != nil {
return fmt.Errorf("starttls failed: %s", err)
}
}
if auth != nil {
if err := c.Auth(auth); err != nil {
return fmt.Errorf("%T failed: %s", auth, err)
}
}
}
var (
data = n.tmpl.Data(receiver(ctx), groupLabels(ctx), as...)
tmpl = tmplText(n.tmpl, data, &err)
from = tmpl(n.conf.From)
to = tmpl(n.conf.To)
)
if err != nil {
return err
}
addrs, err := mail.ParseAddressList(from)
if err != nil {
return fmt.Errorf("parsing from addresses: %s", err)
}
if len(addrs) != 1 {
return fmt.Errorf("must be exactly one from address")
}
if err := c.Mail(addrs[0].Address); err != nil {
return fmt.Errorf("sending mail from: %s", err)
}
addrs, err = mail.ParseAddressList(to)
if err != nil {
return fmt.Errorf("parsing to addresses: %s", err)
}
for _, addr := range addrs {
if err := c.Rcpt(addr.Address); err != nil {
return fmt.Errorf("sending rcpt to: %s", err)
}
}
// Send the email body.
wc, err := c.Data()
if err != nil {
return err
}
defer wc.Close()
for header, t := range n.conf.Headers {
value, err := n.tmpl.ExecuteTextString(t, data)
if err != nil {
return fmt.Errorf("executing %q header template: %s", header, err)
}
fmt.Fprintf(wc, "%s: %s\r\n", header, mime.QEncoding.Encode("utf-8", value))
}
fmt.Fprintf(wc, "Content-Type: text/html; charset=UTF-8\r\n")
fmt.Fprintf(wc, "Date: %s\r\n", time.Now().Format(time.RFC1123Z))
// TODO: Add some useful headers here, such as URL of the alertmanager
// and active/resolved.
fmt.Fprintf(wc, "\r\n")
// TODO(fabxc): do a multipart write that considers the plain template.
body, err := n.tmpl.ExecuteHTMLString(n.conf.HTML, data)
if err != nil {
return fmt.Errorf("executing email html template: %s", err)
}
_, err = io.WriteString(wc, body)
return err
}
// PagerDuty implements a Notifier for PagerDuty notifications.
type PagerDuty struct {
conf *config.PagerdutyConfig
tmpl *template.Template
}
// NewPagerDuty returns a new PagerDuty notifier.
func NewPagerDuty(c *config.PagerdutyConfig, t *template.Template) *PagerDuty {
return &PagerDuty{conf: c, tmpl: t}
}
func (*PagerDuty) name() string { return "pagerduty" }
const (
pagerDutyEventTrigger = "trigger"
pagerDutyEventResolve = "resolve"
)
type pagerDutyMessage struct {
ServiceKey string `json:"service_key"`
IncidentKey model.Fingerprint `json:"incident_key"`
EventType string `json:"event_type"`
Description string `json:"description"`
Client string `json:"client,omitempty"`
ClientURL string `json:"client_url,omitempty"`
Details map[string]string `json:"details,omitempty"`
}
// Notify implements the Notifier interface.
//
// http://developer.pagerduty.com/documentation/integration/events/trigger
func (n *PagerDuty) Notify(ctx context.Context, as ...*types.Alert) error {
key, ok := GroupKey(ctx)
if !ok {
return fmt.Errorf("group key missing")
}
var err error
var (
alerts = types.Alerts(as...)
data = n.tmpl.Data(receiver(ctx), groupLabels(ctx), as...)
tmpl = tmplText(n.tmpl, data, &err)
eventType = pagerDutyEventTrigger
)
if alerts.Status() == model.AlertResolved {
eventType = pagerDutyEventResolve
}
log.With("incident", key).With("eventType", eventType).Debugln("notifying PagerDuty")
details := make(map[string]string, len(n.conf.Details))
for k, v := range n.conf.Details {
details[k] = tmpl(v)
}
msg := &pagerDutyMessage{
ServiceKey: tmpl(string(n.conf.ServiceKey)),
EventType: eventType,
IncidentKey: key,
Description: tmpl(n.conf.Description),
Details: details,
}
if eventType == pagerDutyEventTrigger {
msg.Client = tmpl(n.conf.Client)
msg.ClientURL = tmpl(n.conf.ClientURL)
}
if err != nil {
return err
}
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(msg); err != nil {
return err
}
resp, err := ctxhttp.Post(ctx, http.DefaultClient, n.conf.URL, contentTypeJSON, &buf)
if err != nil {
return err
}
resp.Body.Close()
if resp.StatusCode/100 != 2 {
return fmt.Errorf("unexpected status code %v", resp.StatusCode)
}
return nil
}
// Slack implements a Notifier for Slack notifications.
type Slack struct {
conf *config.SlackConfig
tmpl *template.Template
}
// NewSlack returns a new Slack notification handler.
func NewSlack(conf *config.SlackConfig, tmpl *template.Template) *Slack {
return &Slack{
conf: conf,
tmpl: tmpl,
}
}
func (*Slack) name() string { return "slack" }
// slackReq is the request for sending a slack notification.
type slackReq struct {
Channel string `json:"channel,omitempty"`
Username string `json:"username,omitempty"`
Attachments []slackAttachment `json:"attachments"`
}
// slackAttachment is used to display a richly-formatted message block.
type slackAttachment struct {
Title string `json:"title,omitempty"`
TitleLink string `json:"title_link,omitempty"`
Pretext string `json:"pretext,omitempty"`
Text string `json:"text"`
Fallback string `json:"fallback"`
Color string `json:"color,omitempty"`
MrkdwnIn []string `json:"mrkdwn_in,omitempty"`
}
// slackAttachmentField is displayed in a table inside the message attachment.
type slackAttachmentField struct {
Title string `json:"title"`
Value string `json:"value"`
Short bool `json:"short,omitempty"`
}
// Notify implements the Notifier interface.
func (n *Slack) Notify(ctx context.Context, as ...*types.Alert) error {
var err error
var (
data = n.tmpl.Data(receiver(ctx), groupLabels(ctx), as...)
tmplText = tmplText(n.tmpl, data, &err)
tmplHTML = tmplHTML(n.tmpl, data, &err)
)
attachment := &slackAttachment{
Title: tmplText(n.conf.Title),
TitleLink: tmplText(n.conf.TitleLink),
Pretext: tmplText(n.conf.Pretext),
Text: tmplHTML(n.conf.Text),
Fallback: tmplText(n.conf.Fallback),
Color: tmplText(n.conf.Color),
MrkdwnIn: []string{"fallback", "pretext"},
}
req := &slackReq{
Channel: tmplText(n.conf.Channel),
Username: tmplText(n.conf.Username),
Attachments: []slackAttachment{*attachment},
}
if err != nil {
return err
}
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(req); err != nil {
return err
}
resp, err := ctxhttp.Post(ctx, http.DefaultClient, string(n.conf.APIURL), contentTypeJSON, &buf)
if err != nil {
return err
}
// TODO(fabxc): is 2xx status code really indicator for success for Slack API?
resp.Body.Close()
if resp.StatusCode/100 != 2 {
return fmt.Errorf("unexpected status code %v", resp.StatusCode)
}
return nil
}
// Hipchat implements a Notifier for Hipchat notifications.
type Hipchat struct {
conf *config.HipchatConfig
tmpl *template.Template
}
// NewHipchat returns a new Hipchat notification handler.
func NewHipchat(conf *config.HipchatConfig, tmpl *template.Template) *Hipchat {
return &Hipchat{
conf: conf,
tmpl: tmpl,
}
}
func (*Hipchat) name() string { return "hipchat" }
type hipchatReq struct {
From string `json:"from"`
Notify bool `json:"notify"`
Message string `json:"message"`
MessageFormat string `json:"message_format"`
Color string `json:"color"`
}
// Notify implements the Notifier interface.
func (n *Hipchat) Notify(ctx context.Context, as ...*types.Alert) error {
var err error
var msg string
var (
data = n.tmpl.Data(receiver(ctx), groupLabels(ctx), as...)
tmplText = tmplText(n.tmpl, data, &err)
tmplHTML = tmplHTML(n.tmpl, data, &err)
url = fmt.Sprintf("%sv2/room/%s/notification?auth_token=%s", n.conf.APIURL, n.conf.RoomID, n.conf.AuthToken)
)
if n.conf.MessageFormat == "html" {
msg = tmplHTML(n.conf.Message)
} else {
msg = tmplText(n.conf.Message)
}
req := &hipchatReq{
From: tmplText(n.conf.From),
Notify: n.conf.Notify,
Message: msg,
MessageFormat: n.conf.MessageFormat,
Color: tmplText(n.conf.Color),
}
if err != nil {
return err
}
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(req); err != nil {
return err
}
resp, err := ctxhttp.Post(ctx, http.DefaultClient, url, contentTypeJSON, &buf)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode/100 != 2 {
return fmt.Errorf("unexpected status code %v", resp.StatusCode)
}
return nil
}
// OpsGenie implements a Notifier for OpsGenie notifications.
type OpsGenie struct {
conf *config.OpsGenieConfig
tmpl *template.Template
}
// NewOpsGenieDuty returns a new OpsGenie notifier.
func NewOpsGenie(c *config.OpsGenieConfig, t *template.Template) *OpsGenie {
return &OpsGenie{conf: c, tmpl: t}
}
func (*OpsGenie) name() string { return "opsgenie" }
type opsGenieMessage struct {
APIKey string `json:"apiKey"`
Alias model.Fingerprint `json:"alias"`
}
type opsGenieCreateMessage struct {
*opsGenieMessage `json:,inline`
Message string `json:"message"`
Details map[string]string `json:"details"`
}
type opsGenieCloseMessage struct {
*opsGenieMessage `json:,inline`
}
// Notify implements the Notifier interface.
func (n *OpsGenie) Notify(ctx context.Context, as ...*types.Alert) error {
key, ok := GroupKey(ctx)
if !ok {
return fmt.Errorf("group key missing")
}
data := n.tmpl.Data(receiver(ctx), groupLabels(ctx), as...)
log.With("incident", key).Debugln("notifying OpsGenie")
var err error
tmpl := tmplText(n.tmpl, data, &err)
details := make(map[string]string, len(n.conf.Details))
for k, v := range n.conf.Details {
details[k] = tmpl(v)
}
var (
msg interface{}
apiURL string
apiMsg = opsGenieMessage{
APIKey: string(n.conf.APIKey),
Alias: key,
}
alerts = types.Alerts(as...)
)
switch alerts.Status() {
case model.AlertResolved:
apiURL = n.conf.APIHost + "v1/json/alert/close"
msg = &opsGenieCloseMessage{&apiMsg}
default:
apiURL = n.conf.APIHost + "v1/json/alert"
msg = &opsGenieCreateMessage{
opsGenieMessage: &apiMsg,
Message: tmpl(n.conf.Description),
Details: details,
}
}
if err != nil {
return fmt.Errorf("templating error: %s", err)
}
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(msg); err != nil {
return err
}
resp, err := ctxhttp.Post(ctx, http.DefaultClient, apiURL, contentTypeJSON, &buf)
if err != nil {
return err
}
resp.Body.Close()
if resp.StatusCode/100 != 2 {
return fmt.Errorf("unexpected status code %v", resp.StatusCode)
}
return nil
}
func tmplText(tmpl *template.Template, data *template.Data, err *error) func(string) string {
return func(name string) (s string) {
if *err != nil {
return
}
s, *err = tmpl.ExecuteTextString(name, data)
return s
}
}
func tmplHTML(tmpl *template.Template, data *template.Data, err *error) func(string) string {
return func(name string) (s string) {
if *err != nil {
return
}
s, *err = tmpl.ExecuteHTMLString(name, data)
return s
}
}
|
package jira
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"strings"
"time"
"github.com/codeship/go-retro"
)
const (
dateFormat = "2006-01-02"
jiraHost = "jira.lazada.com"
jiraPath = "/rest/timesheet-gadget/1.0/raw-timesheet.json"
maxRetryAttempts = 3
)
type Entrie struct {
ID uint64 `json:"id"`
Comment string `json:"string"`
TimeSpent int64 `json:"timeSpent"`
Author string `json:"author"`
AuthorFullName string `json:"authorFullName"`
Created int64 `json:"created"`
StartDate int64 `json:"startDate"`
UpdateAuthor string `json:"updateAuthor"`
UpdateAuthorFullName string `json:"updateAuthorFullName"`
Updated int64 `json:"updated"`
}
type WorklogItem struct {
Key string `json:"key"`
Summary string `json:"summary"`
Entries []Entrie `json:"entries"`
}
type Timesheet struct {
StartDate int64 `json:"startDate"`
EndDate int64 `json:"endDate"`
Worklog []WorklogItem `json:"worklog"`
}
type UserTimeLog struct {
Name string
TimeSpent time.Duration
}
type Client struct {
token string
cli *http.Client
}
func NewClient(token string) *Client {
return &Client{
token: token,
}
}
func (this *Client) GetTimesheetForUser(user string, from, to time.Time) (*Timesheet, error) {
values := url.Values{}
values.Add("targetUser", user)
values.Add("startDate", from.Format(dateFormat))
values.Add("endDate", to.Format(dateFormat))
jiraURL := url.URL{
Scheme: "https",
Host: jiraHost,
Path: jiraPath,
RawQuery: values.Encode(),
}
req := &http.Request{
Method: "GET",
URL: &jiraURL,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
// we can use SetBasicAuth here but I think having password in config file is not good idea
// we'll keep token instead
Header: make(http.Header),
Host: jiraURL.Host,
}
req.Header.Set("Authorization", "Basic "+this.token)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
responseBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
log.Printf("JIRA response for %q from %v to %v: %q", user, from, to, responseBody)
var timesheet Timesheet
if err := json.Unmarshal(responseBody, ×heet); err != nil {
return nil, err
}
return ×heet, nil
}
func (this *Client) GetTotalTimeSpentByUser(user string, from, to time.Time) (time.Duration, error) {
var timesheet *Timesheet
getTimesheetError := retro.DoWithRetry(func() error {
result, err := this.GetTimesheetForUser(user, from, to)
if err != nil {
return retro.NewBackoffRetryableError(err, maxRetryAttempts)
}
timesheet = result
return nil
})
if getTimesheetError != nil {
return 0, getTimesheetError
}
var totalTimeSpent time.Duration
// avoid slice elements copying
for worklogItemIndex := range timesheet.Worklog {
worklogItem := ×heet.Worklog[worklogItemIndex]
for entrieIndex := range worklogItem.Entries {
entrie := &worklogItem.Entries[entrieIndex]
if entrie.Author != user {
return 0, fmt.Errorf("worklog author %q != user %q", entrie.Author, user)
}
spentDuration := time.Duration(entrie.TimeSpent) * time.Second
totalTimeSpent += spentDuration
//log.Printf("entrie: %+v start: %v created: %v spent: %v\n",
// entrie,
// time.Unix(entrie.StartDate/1000, 0),
// time.Unix(entrie.Created/1000, 0),
// spentDuration)
}
}
return totalTimeSpent, nil
}
type durationErrorResult struct {
totalTimeSpent time.Duration
user string
err error
}
func (this *Client) getTotalTimeSpentByUserAsync(user string, from, to time.Time, ch chan<- durationErrorResult) {
totalTimeSpent, err := this.GetTotalTimeSpentByUser(user, from, to)
ch <- durationErrorResult{
totalTimeSpent: totalTimeSpent,
user: user,
err: err,
}
}
func (this *Client) GetUsersLoggedLessThenMin(users []string, from, to time.Time, min time.Duration) ([]UserTimeLog, error) {
result := make([]UserTimeLog, 0, len(users))
errors := make([]error, 0, len(users))
ch := make(chan durationErrorResult)
for _, user := range users {
go this.getTotalTimeSpentByUserAsync(user, from, to, ch)
}
for i := 0; i < len(users); i++ {
res := <-ch
if res.err != nil {
errors = append(errors, res.err)
continue
}
if res.totalTimeSpent < min {
result = append(result, UserTimeLog{
Name: res.user,
TimeSpent: res.totalTimeSpent,
})
}
}
if len(errors) > 0 {
msgs := make([]string, 0, len(errors))
for _, e := range errors {
msgs = append(msgs, e.Error())
}
return result, fmt.Errorf("Multiple errors occured: %s", strings.Join(msgs, ", "))
}
return result, nil
}
Add retrying if response is empty or incorrect author
package jira
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"strings"
"time"
"github.com/codeship/go-retro"
)
const (
dateFormat = "2006-01-02"
jiraHost = "jira.lazada.com"
jiraPath = "/rest/timesheet-gadget/1.0/raw-timesheet.json"
maxRetryAttempts = 3
)
var emptyResponseError = fmt.Errorf("empty response")
type Entrie struct {
ID uint64 `json:"id"`
Comment string `json:"string"`
TimeSpent int64 `json:"timeSpent"`
Author string `json:"author"`
AuthorFullName string `json:"authorFullName"`
Created int64 `json:"created"`
StartDate int64 `json:"startDate"`
UpdateAuthor string `json:"updateAuthor"`
UpdateAuthorFullName string `json:"updateAuthorFullName"`
Updated int64 `json:"updated"`
}
type WorklogItem struct {
Key string `json:"key"`
Summary string `json:"summary"`
Entries []Entrie `json:"entries"`
}
type Timesheet struct {
StartDate int64 `json:"startDate"`
EndDate int64 `json:"endDate"`
Worklog []WorklogItem `json:"worklog"`
}
type UserTimeLog struct {
Name string
TimeSpent time.Duration
}
type Client struct {
token string
cli *http.Client
}
func NewClient(token string) *Client {
return &Client{
token: token,
}
}
func (this *Client) GetTimesheetForUser(user string, from, to time.Time) (*Timesheet, error) {
values := url.Values{}
values.Add("targetUser", user)
values.Add("startDate", from.Format(dateFormat))
values.Add("endDate", to.Format(dateFormat))
jiraURL := url.URL{
Scheme: "https",
Host: jiraHost,
Path: jiraPath,
RawQuery: values.Encode(),
}
req := &http.Request{
Method: "GET",
URL: &jiraURL,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
// we can use SetBasicAuth here but I think having password in config file is not good idea
// we'll keep token instead
Header: make(http.Header),
Host: jiraURL.Host,
}
req.Header.Set("Authorization", "Basic "+this.token)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
responseBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
log.Printf("JIRA response for %q from %v to %v: %q", user, from, to, responseBody)
var timesheet Timesheet
if err := json.Unmarshal(responseBody, ×heet); err != nil {
return nil, err
}
return ×heet, nil
}
func (this *Client) GetTotalTimeSpentByUser(user string, from, to time.Time) (time.Duration, error) {
timesheet, err := this.GetTimesheetForUser(user, from, to)
if err != nil {
return 0, err
}
var totalTimeSpent time.Duration
// avoid slice elements copying
for worklogItemIndex := range timesheet.Worklog {
worklogItem := ×heet.Worklog[worklogItemIndex]
for entrieIndex := range worklogItem.Entries {
entrie := &worklogItem.Entries[entrieIndex]
if entrie.Author != user {
return 0, fmt.Errorf("worklog author %q != user %q", entrie.Author, user)
}
spentDuration := time.Duration(entrie.TimeSpent) * time.Second
totalTimeSpent += spentDuration
//log.Printf("entrie: %+v start: %v created: %v spent: %v\n",
// entrie,
// time.Unix(entrie.StartDate/1000, 0),
// time.Unix(entrie.Created/1000, 0),
// spentDuration)
}
}
return totalTimeSpent, nil
}
type durationErrorResult struct {
totalTimeSpent time.Duration
user string
err error
}
func (this *Client) getTotalTimeSpentByUserAsync(user string, from, to time.Time, ch chan<- durationErrorResult) {
var totalTimeSpent time.Duration
getTimesheetError := retro.DoWithRetry(func() error {
result, err := this.GetTotalTimeSpentByUser(user, from, to)
if err != nil {
return retro.NewBackoffRetryableError(err, maxRetryAttempts)
}
if result == 0 {
return retro.NewBackoffRetryableError(emptyResponseError, maxRetryAttempts)
}
totalTimeSpent = result
return nil
})
ch <- durationErrorResult{
totalTimeSpent: totalTimeSpent,
user: user,
err: getTimesheetError,
}
}
func (this *Client) GetUsersLoggedLessThenMin(users []string, from, to time.Time, min time.Duration) ([]UserTimeLog, error) {
result := make([]UserTimeLog, 0, len(users))
errors := make([]error, 0, len(users))
ch := make(chan durationErrorResult)
for _, user := range users {
go this.getTotalTimeSpentByUserAsync(user, from, to, ch)
}
for i := 0; i < len(users); i++ {
res := <-ch
if res.err != nil && res.err != emptyResponseError {
errors = append(errors, res.err)
continue
}
if res.totalTimeSpent < min {
result = append(result, UserTimeLog{
Name: res.user,
TimeSpent: res.totalTimeSpent,
})
}
}
if len(errors) > 0 {
msgs := make([]string, 0, len(errors))
for _, e := range errors {
msgs = append(msgs, e.Error())
}
return result, fmt.Errorf("Multiple errors occured: %s", strings.Join(msgs, ", "))
}
return result, nil
}
|
/*
* gomacro - A Go interpreter with Lisp-like macros
*
* Copyright (C) 2019 Massimiliano Ghilardi
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*
*
* op3.go
*
* Created on Jan 27, 2019
* Author Massimiliano Ghilardi
*/
package arm64
// ============================================================================
// three-arg instruction
var op3vals = map[Op3]uint8{
AND3: 0x0A,
ADD3: 0x0B,
ADC3: 0x1A, // add with carry
OR3: 0x2A,
XOR3: 0x4A,
SUB3: 0x4B,
SBB3: 0x5A, // subtract with borrow
}
// return 32bit value used to encode operation on Reg,Reg,Reg
func op3val(op Op3) uint32 {
var val uint32
switch op {
case SHL3:
val = 0x1AC02000
case SHR3:
// logical i.e. zero-extended right shift is 0x1AC02400
// arithmetic i.e. sign-extended right shift is 0x1AC02800
val = 0x1AC02400
case MUL3:
// 0x1B007C00 because MUL3 a,b,c is an alias for MADD4 xzr,a,b,c
val = 0x1B007C00
case DIV3:
// unsigned division is 0x1AC00800
// signed division is 0x1AC00C00
val = 0x1AC00800
case REM3:
errorf("internal error, operation %v needs to be implemented as {s|u}div followed by msub", op)
default:
val = uint32(op3vals[op]) << 24
if val == 0 {
errorf("unknown Op2 instruction: %v", op)
}
}
return val
}
// return 32bit value used to encode operation on Reg,Const,Reg
func immval(op Op3) uint32 {
switch op {
case AND3:
return 0x12 << 24
case ADD3:
return 0x11 << 24
case SHL3, SHR3:
// immediate constant is encoded differently
return 0x53 << 24
case OR3:
return 0x32 << 24
case XOR3:
return 0x52 << 24
case SUB3:
return 0x51 << 24
default:
errorf("cannot encode Op2 instruction %v with immediate constant", op)
return 0
}
}
// ============================================================================
func (arch Arm64) Op3(asm *Asm, op Op3, a Arg, b Arg, dst Arg) *Asm {
arch.op3(asm, op, a, b, dst)
return asm
}
func (arch Arm64) op3(asm *Asm, op Op3, a Arg, b Arg, dst Arg) Arm64 {
// validate kinds
assert(a.Kind() == dst.Kind())
switch op {
case SHL3, SHR3:
assert(!b.Kind().Signed())
case GETIDX, SETIDX:
errorf("unimplemented: %v %v,%v,%v", op, op, a, b, dst)
default:
assert(b.Kind() == dst.Kind())
}
// validate dst
switch dst.(type) {
case Reg, Mem:
break
case Const:
errorf("destination cannot be a constant: %v %v, %v, %v", op, a, b, dst)
default:
errorf("unknown destination type %T, expecting Reg or Mem: %v %v, %v, %v", dst, op, a, b, dst)
}
if asm.Optimize3(op, a, b, dst) {
return arch
}
var ra, rb, rdst Reg
var ta, tdst bool // Reg is a temporary register?
switch dst := dst.(type) {
case Reg:
rdst = dst
case Mem:
rdst = asm.RegAlloc(dst.Kind())
defer asm.RegFree(rdst)
tdst = true
}
var not_dst bool
if op == AND_NOT3 {
// must be emulated
not_dst = true
op = AND3
}
if op.IsCommutative() && a.Const() && !b.Const() {
a, b = b, a
}
switch xa := a.(type) {
case Reg:
ra = xa
case Mem:
if tdst {
// reuse temporary register rdst
ra = rdst
} else {
ra = asm.RegAlloc(xa.Kind())
defer asm.RegFree(ra)
}
ta = true
arch.load(asm, xa, ra)
case Const:
ra = asm.RegAlloc(xa.Kind())
defer asm.RegFree(ra)
arch.movConstReg(asm, xa, ra)
default:
errorf("unknown argument type %T, expecting Const, Reg or Mem: %v %v, %v, %v", a, op, a, b, dst)
}
switch xb := b.(type) {
case Reg:
arch.op3RegRegReg(asm, op, ra, xb, rdst)
case Mem:
if tdst && (!ta || ra != rdst) {
// reuse temporary register rdst
rb = rdst
} else {
rb = asm.RegAlloc(xb.Kind())
defer asm.RegFree(rb)
}
arch.load(asm, xb, rb).op3RegRegReg(asm, op, ra, rb, rdst)
case Const:
arch.op3RegConstReg(asm, op, ra, xb, rdst)
default:
errorf("unknown argument type %T, expecting Const, Reg or Mem: %v %v, %v, %v", b, op, a, b, dst)
}
if not_dst {
// operation was AND_NOT3: negate dst
arch.op2RegReg(asm, NOT2, rdst, rdst)
}
if tdst {
arch.store(asm, rdst, dst.(Mem))
}
return arch
}
func (arch Arm64) op3RegRegReg(asm *Asm, op Op3, a Reg, b Reg, dst Reg) Arm64 {
var opbits uint32
if dst.Kind().Signed() {
switch op {
case SHR3:
// arithmetic right shift
opbits = 0xC00
case DIV3:
// signed division
opbits = 0x400
}
}
arch.extendHighBits(asm, op, a)
arch.extendHighBits(asm, op, b)
// TODO: on arm64, division by zero returns zero instead of panic
asm.Uint32(kbit(dst) | (opbits ^ op3val(op)) | val(b)<<16 | val(a)<<5 | val(dst))
return arch
}
func (arch Arm64) op3RegConstReg(asm *Asm, op Op3, a Reg, cb Const, dst Reg) Arm64 {
if arch.tryOp3RegConstReg(asm, op, a, uint64(cb.Val()), dst) {
return arch
}
rb := asm.RegAlloc(cb.Kind())
arch.movConstReg(asm, cb, rb).op3RegRegReg(asm, op, a, rb, dst)
asm.RegFree(rb)
return arch
}
// try to encode operation into a single instruction.
// return false if not possible because constant must be loaded in a register
func (arch Arm64) tryOp3RegConstReg(asm *Asm, op Op3, a Reg, cval uint64, dst Reg) bool {
imm3 := immediate3(op)
immcval, ok := imm3.Encode64(cval, dst.Kind())
if !ok {
return false
}
opval := immval(op)
kbit := kbit(dst)
arch.extendHighBits(asm, op, a)
switch imm3 {
case Imm3AddSub, Imm3Bitwise:
// for op == OR3, also accept a == XZR
asm.Uint32(kbit | opval | immcval | valOrX31(a.RegId(), op == OR3)<<5 | val(dst))
case Imm3Shift:
arch.shiftRegConstReg(asm, op, a, cval, dst)
default:
cb := ConstInt64(int64(cval))
errorf("unknown constant encoding style %v of %v: %v %v, %v, %v", imm3, op, op, a, cb, dst)
}
return true
}
func (arch Arm64) shiftRegConstReg(asm *Asm, op Op3, a Reg, cval uint64, dst Reg) {
dsize := dst.Kind().Size()
if cval >= 8*uint64(dsize) {
cb := ConstInt64(int64(cval))
errorf("constant is out of range for shift: %v %v, %v, %v", op, a, cb, dst)
}
switch op {
case SHL3:
switch dsize {
case 1, 2, 4:
asm.Uint32(0x53000000 | uint32(32-cval)<<16 | uint32(31-cval)<<10 | val(a)<<5 | val(dst))
case 8:
asm.Uint32(0xD3400000 | uint32(64-cval)<<16 | uint32(63-cval)<<10 | val(a)<<5 | val(dst))
}
case SHR3:
var unsignedbit uint32
if !dst.Kind().Signed() {
unsignedbit = 0x40 << 24
}
switch dsize {
case 1, 2, 4:
asm.Uint32(unsignedbit | 0x13007C00 | uint32(cval)<<16 | val(a)<<5 | val(dst))
case 8:
asm.Uint32(unsignedbit | 0x9340FC00 | uint32(cval)<<16 | val(a)<<5 | val(dst))
}
}
}
// arm64 has no native operations to work on 8 bit and 16 bit registers.
// Actually, it only has ldr (load) and str (store), but no arithmetic
// or bitwise operations.
// So we emulate them similarly to what compilers do:
// use 32 bit registers and ignore high bits in operands and results.
// Exception: right-shift, division and remainder move data
// from high bits to low bits, so we must zero-extend or sign-extend
// the operands
func (arch Arm64) extendHighBits(asm *Asm, op Op3, r Reg) Arm64 {
rkind := r.Kind()
rsize := rkind.Size()
if rsize > 2 {
return arch
}
switch op {
case SHR3, DIV3, REM3:
if rkind.Signed() {
arch.cast(asm, r, MakeReg(r.RegId(), Int32))
} else {
arch.cast(asm, r, MakeReg(r.RegId(), Uint32))
}
}
return arch
}
// ============================================================================
// style of immediate constants
// embeddable in a single Op3 instruction
type Immediate3 uint8
const (
Imm3None Immediate3 = iota
Imm3AddSub // 12 bits wide, possibly shifted left by 12 bits
Imm3Bitwise // complicated
Imm3Shift // 0..63 for 64 bit registers; 0..31 for 32 bit registers
)
// return the style of immediate constants
// embeddable in a single Op3 instruction
func immediate3(op Op3) Immediate3 {
switch op {
case ADD3, SUB3:
return Imm3AddSub
case AND3, OR3, XOR3:
return Imm3Bitwise
case SHL3, SHR3:
return Imm3Shift
default:
return Imm3None
}
}
// return false if val cannot be encoded using imm style
func (imm Immediate3) Encode64(val uint64, kind Kind) (e uint32, ok bool) {
kbits := kind.Size() * 8
switch imm {
case Imm3AddSub:
// 12 bits wide, possibly shifted left by 12 bits
if val == val&0xFFF {
return uint32(val << 10), true
} else if val == val&0xFFF000 {
return 0x400000 | uint32(val>>2), true
}
case Imm3Bitwise:
// complicated
if kbits <= 32 {
e, ok = imm3Bitwise32[val]
} else {
e, ok = imm3Bitwise64[val]
}
return e, ok
case Imm3Shift:
if val >= 0 && val < uint64(kbits) {
// actual encoding is complicated
return uint32(val), true
}
}
return 0, false
}
var imm3Bitwise32 = makeImm3Bitwise32()
var imm3Bitwise64 = makeImm3Bitwise64()
// compute all immediate constants that can be encoded
// in and, orr, eor on 32-bit registers
func makeImm3Bitwise32() map[uint64]uint32 {
result := make(map[uint64]uint32)
var bitmask uint64
var size, length, e, rotation uint32
for size = 2; size <= 32; size *= 2 {
for length = 1; length < size; length++ {
bitmask = 0xffffffff >> (32 - length)
for e = size; e < 32; e *= 2 {
bitmask |= bitmask << e
}
for rotation = 0; rotation < size; rotation++ {
result[bitmask] = (size&64|rotation)<<16 | (0x7800*size)&0xF000 | (length-1)<<10
bitmask = (bitmask >> 1) | (bitmask << 31)
}
}
}
return result
}
// compute all immediate constants that can be encoded
// in and, orr, eor on 64-bit registers
func makeImm3Bitwise64() map[uint64]uint32 {
result := make(map[uint64]uint32)
var bitmask uint64
var size, length, e, rotation uint32
for size = 2; size <= 64; size *= 2 {
for length = 1; length < size; length++ {
bitmask = 0xffffffffffffffff >> (64 - length)
for e = size; e < 64; e *= 2 {
bitmask |= bitmask << e
}
for rotation = 0; rotation < size; rotation++ {
// #0x5555555555555555 => size=2, length=1, rotation=0 => 0x00f000
// #0xaaaaaaaaaaaaaaaa => size=2, length=1, rotation=1 => 0x01f000
// #0x1111111111111111 => size=4, length=1, rotation=0 => 0x00e000
// #0x8888888888888888 => size=4, length=1, rotation=1 => 0x01e000
// #0x4444444444444444 => size=4, length=1, rotation=2 => 0x02e000
// #0x2222222222222222 => size=4, length=1, rotation=3 => 0x03e000
// #0x3333333333333333 => size=4, length=2, rotation=0 => 0x00e400
// #0x7777777777777777 => size=4, length=3, rotation=0 => 0x00e800
// #0x0101010101010101 => size=8, length=1, rotation=0 => 0x00c000
// #0x0303030303030303 => size=8, length=2, rotation=0 => 0x00c400
// #0x0707070707070707 => size=8, length=3, rotation=0 => 0x00c800
// #0x0f0f0f0f0f0f0f0f => size=8, length=4, rotation=0 => 0x00cc00
// #0x1f1f1f1f1f1f1f1f => size=8, length=5, rotation=0 => 0x00d000
// #0x3f3f3f3f3f3f3f3f => size=8, length=6, rotation=0 => 0x00d400
// #0x7f7f7f7f7f7f7f7f => size=8, length=7, rotation=0 => 0x00d800
// ...
result[bitmask] = (size&64|rotation)<<16 | (0x7800*size)&0xF000 | (length-1)<<10
bitmask = (bitmask >> 1) | (bitmask << 63)
}
}
}
return result
}
fix call to errorf() in jit/arm64/op3.go
/*
* gomacro - A Go interpreter with Lisp-like macros
*
* Copyright (C) 2019 Massimiliano Ghilardi
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*
*
* op3.go
*
* Created on Jan 27, 2019
* Author Massimiliano Ghilardi
*/
package arm64
// ============================================================================
// three-arg instruction
var op3vals = map[Op3]uint8{
AND3: 0x0A,
ADD3: 0x0B,
ADC3: 0x1A, // add with carry
OR3: 0x2A,
XOR3: 0x4A,
SUB3: 0x4B,
SBB3: 0x5A, // subtract with borrow
}
// return 32bit value used to encode operation on Reg,Reg,Reg
func op3val(op Op3) uint32 {
var val uint32
switch op {
case SHL3:
val = 0x1AC02000
case SHR3:
// logical i.e. zero-extended right shift is 0x1AC02400
// arithmetic i.e. sign-extended right shift is 0x1AC02800
val = 0x1AC02400
case MUL3:
// 0x1B007C00 because MUL3 a,b,c is an alias for MADD4 xzr,a,b,c
val = 0x1B007C00
case DIV3:
// unsigned division is 0x1AC00800
// signed division is 0x1AC00C00
val = 0x1AC00800
case REM3:
errorf("internal error, operation %v needs to be implemented as {s|u}div followed by msub", op)
default:
val = uint32(op3vals[op]) << 24
if val == 0 {
errorf("unknown Op2 instruction: %v", op)
}
}
return val
}
// return 32bit value used to encode operation on Reg,Const,Reg
func immval(op Op3) uint32 {
switch op {
case AND3:
return 0x12 << 24
case ADD3:
return 0x11 << 24
case SHL3, SHR3:
// immediate constant is encoded differently
return 0x53 << 24
case OR3:
return 0x32 << 24
case XOR3:
return 0x52 << 24
case SUB3:
return 0x51 << 24
default:
errorf("cannot encode Op2 instruction %v with immediate constant", op)
return 0
}
}
// ============================================================================
func (arch Arm64) Op3(asm *Asm, op Op3, a Arg, b Arg, dst Arg) *Asm {
arch.op3(asm, op, a, b, dst)
return asm
}
func (arch Arm64) op3(asm *Asm, op Op3, a Arg, b Arg, dst Arg) Arm64 {
// validate kinds
assert(a.Kind() == dst.Kind())
switch op {
case SHL3, SHR3:
assert(!b.Kind().Signed())
case GETIDX, SETIDX:
errorf("unimplemented: %v %v,%v,%v", op, a, b, dst)
default:
assert(b.Kind() == dst.Kind())
}
// validate dst
switch dst.(type) {
case Reg, Mem:
break
case Const:
errorf("destination cannot be a constant: %v %v, %v, %v", op, a, b, dst)
default:
errorf("unknown destination type %T, expecting Reg or Mem: %v %v, %v, %v", dst, op, a, b, dst)
}
if asm.Optimize3(op, a, b, dst) {
return arch
}
var ra, rb, rdst Reg
var ta, tdst bool // Reg is a temporary register?
switch dst := dst.(type) {
case Reg:
rdst = dst
case Mem:
rdst = asm.RegAlloc(dst.Kind())
defer asm.RegFree(rdst)
tdst = true
}
var not_dst bool
if op == AND_NOT3 {
// must be emulated
not_dst = true
op = AND3
}
if op.IsCommutative() && a.Const() && !b.Const() {
a, b = b, a
}
switch xa := a.(type) {
case Reg:
ra = xa
case Mem:
if tdst {
// reuse temporary register rdst
ra = rdst
} else {
ra = asm.RegAlloc(xa.Kind())
defer asm.RegFree(ra)
}
ta = true
arch.load(asm, xa, ra)
case Const:
ra = asm.RegAlloc(xa.Kind())
defer asm.RegFree(ra)
arch.movConstReg(asm, xa, ra)
default:
errorf("unknown argument type %T, expecting Const, Reg or Mem: %v %v, %v, %v", a, op, a, b, dst)
}
switch xb := b.(type) {
case Reg:
arch.op3RegRegReg(asm, op, ra, xb, rdst)
case Mem:
if tdst && (!ta || ra != rdst) {
// reuse temporary register rdst
rb = rdst
} else {
rb = asm.RegAlloc(xb.Kind())
defer asm.RegFree(rb)
}
arch.load(asm, xb, rb).op3RegRegReg(asm, op, ra, rb, rdst)
case Const:
arch.op3RegConstReg(asm, op, ra, xb, rdst)
default:
errorf("unknown argument type %T, expecting Const, Reg or Mem: %v %v, %v, %v", b, op, a, b, dst)
}
if not_dst {
// operation was AND_NOT3: negate dst
arch.op2RegReg(asm, NOT2, rdst, rdst)
}
if tdst {
arch.store(asm, rdst, dst.(Mem))
}
return arch
}
func (arch Arm64) op3RegRegReg(asm *Asm, op Op3, a Reg, b Reg, dst Reg) Arm64 {
var opbits uint32
if dst.Kind().Signed() {
switch op {
case SHR3:
// arithmetic right shift
opbits = 0xC00
case DIV3:
// signed division
opbits = 0x400
}
}
arch.extendHighBits(asm, op, a)
arch.extendHighBits(asm, op, b)
// TODO: on arm64, division by zero returns zero instead of panic
asm.Uint32(kbit(dst) | (opbits ^ op3val(op)) | val(b)<<16 | val(a)<<5 | val(dst))
return arch
}
func (arch Arm64) op3RegConstReg(asm *Asm, op Op3, a Reg, cb Const, dst Reg) Arm64 {
if arch.tryOp3RegConstReg(asm, op, a, uint64(cb.Val()), dst) {
return arch
}
rb := asm.RegAlloc(cb.Kind())
arch.movConstReg(asm, cb, rb).op3RegRegReg(asm, op, a, rb, dst)
asm.RegFree(rb)
return arch
}
// try to encode operation into a single instruction.
// return false if not possible because constant must be loaded in a register
func (arch Arm64) tryOp3RegConstReg(asm *Asm, op Op3, a Reg, cval uint64, dst Reg) bool {
imm3 := immediate3(op)
immcval, ok := imm3.Encode64(cval, dst.Kind())
if !ok {
return false
}
opval := immval(op)
kbit := kbit(dst)
arch.extendHighBits(asm, op, a)
switch imm3 {
case Imm3AddSub, Imm3Bitwise:
// for op == OR3, also accept a == XZR
asm.Uint32(kbit | opval | immcval | valOrX31(a.RegId(), op == OR3)<<5 | val(dst))
case Imm3Shift:
arch.shiftRegConstReg(asm, op, a, cval, dst)
default:
cb := ConstInt64(int64(cval))
errorf("unknown constant encoding style %v of %v: %v %v, %v, %v", imm3, op, op, a, cb, dst)
}
return true
}
func (arch Arm64) shiftRegConstReg(asm *Asm, op Op3, a Reg, cval uint64, dst Reg) {
dsize := dst.Kind().Size()
if cval >= 8*uint64(dsize) {
cb := ConstInt64(int64(cval))
errorf("constant is out of range for shift: %v %v, %v, %v", op, a, cb, dst)
}
switch op {
case SHL3:
switch dsize {
case 1, 2, 4:
asm.Uint32(0x53000000 | uint32(32-cval)<<16 | uint32(31-cval)<<10 | val(a)<<5 | val(dst))
case 8:
asm.Uint32(0xD3400000 | uint32(64-cval)<<16 | uint32(63-cval)<<10 | val(a)<<5 | val(dst))
}
case SHR3:
var unsignedbit uint32
if !dst.Kind().Signed() {
unsignedbit = 0x40 << 24
}
switch dsize {
case 1, 2, 4:
asm.Uint32(unsignedbit | 0x13007C00 | uint32(cval)<<16 | val(a)<<5 | val(dst))
case 8:
asm.Uint32(unsignedbit | 0x9340FC00 | uint32(cval)<<16 | val(a)<<5 | val(dst))
}
}
}
// arm64 has no native operations to work on 8 bit and 16 bit registers.
// Actually, it only has ldr (load) and str (store), but no arithmetic
// or bitwise operations.
// So we emulate them similarly to what compilers do:
// use 32 bit registers and ignore high bits in operands and results.
// Exception: right-shift, division and remainder move data
// from high bits to low bits, so we must zero-extend or sign-extend
// the operands
func (arch Arm64) extendHighBits(asm *Asm, op Op3, r Reg) Arm64 {
rkind := r.Kind()
rsize := rkind.Size()
if rsize > 2 {
return arch
}
switch op {
case SHR3, DIV3, REM3:
if rkind.Signed() {
arch.cast(asm, r, MakeReg(r.RegId(), Int32))
} else {
arch.cast(asm, r, MakeReg(r.RegId(), Uint32))
}
}
return arch
}
// ============================================================================
// style of immediate constants
// embeddable in a single Op3 instruction
type Immediate3 uint8
const (
Imm3None Immediate3 = iota
Imm3AddSub // 12 bits wide, possibly shifted left by 12 bits
Imm3Bitwise // complicated
Imm3Shift // 0..63 for 64 bit registers; 0..31 for 32 bit registers
)
// return the style of immediate constants
// embeddable in a single Op3 instruction
func immediate3(op Op3) Immediate3 {
switch op {
case ADD3, SUB3:
return Imm3AddSub
case AND3, OR3, XOR3:
return Imm3Bitwise
case SHL3, SHR3:
return Imm3Shift
default:
return Imm3None
}
}
// return false if val cannot be encoded using imm style
func (imm Immediate3) Encode64(val uint64, kind Kind) (e uint32, ok bool) {
kbits := kind.Size() * 8
switch imm {
case Imm3AddSub:
// 12 bits wide, possibly shifted left by 12 bits
if val == val&0xFFF {
return uint32(val << 10), true
} else if val == val&0xFFF000 {
return 0x400000 | uint32(val>>2), true
}
case Imm3Bitwise:
// complicated
if kbits <= 32 {
e, ok = imm3Bitwise32[val]
} else {
e, ok = imm3Bitwise64[val]
}
return e, ok
case Imm3Shift:
if val >= 0 && val < uint64(kbits) {
// actual encoding is complicated
return uint32(val), true
}
}
return 0, false
}
var imm3Bitwise32 = makeImm3Bitwise32()
var imm3Bitwise64 = makeImm3Bitwise64()
// compute all immediate constants that can be encoded
// in and, orr, eor on 32-bit registers
func makeImm3Bitwise32() map[uint64]uint32 {
result := make(map[uint64]uint32)
var bitmask uint64
var size, length, e, rotation uint32
for size = 2; size <= 32; size *= 2 {
for length = 1; length < size; length++ {
bitmask = 0xffffffff >> (32 - length)
for e = size; e < 32; e *= 2 {
bitmask |= bitmask << e
}
for rotation = 0; rotation < size; rotation++ {
result[bitmask] = (size&64|rotation)<<16 | (0x7800*size)&0xF000 | (length-1)<<10
bitmask = (bitmask >> 1) | (bitmask << 31)
}
}
}
return result
}
// compute all immediate constants that can be encoded
// in and, orr, eor on 64-bit registers
func makeImm3Bitwise64() map[uint64]uint32 {
result := make(map[uint64]uint32)
var bitmask uint64
var size, length, e, rotation uint32
for size = 2; size <= 64; size *= 2 {
for length = 1; length < size; length++ {
bitmask = 0xffffffffffffffff >> (64 - length)
for e = size; e < 64; e *= 2 {
bitmask |= bitmask << e
}
for rotation = 0; rotation < size; rotation++ {
// #0x5555555555555555 => size=2, length=1, rotation=0 => 0x00f000
// #0xaaaaaaaaaaaaaaaa => size=2, length=1, rotation=1 => 0x01f000
// #0x1111111111111111 => size=4, length=1, rotation=0 => 0x00e000
// #0x8888888888888888 => size=4, length=1, rotation=1 => 0x01e000
// #0x4444444444444444 => size=4, length=1, rotation=2 => 0x02e000
// #0x2222222222222222 => size=4, length=1, rotation=3 => 0x03e000
// #0x3333333333333333 => size=4, length=2, rotation=0 => 0x00e400
// #0x7777777777777777 => size=4, length=3, rotation=0 => 0x00e800
// #0x0101010101010101 => size=8, length=1, rotation=0 => 0x00c000
// #0x0303030303030303 => size=8, length=2, rotation=0 => 0x00c400
// #0x0707070707070707 => size=8, length=3, rotation=0 => 0x00c800
// #0x0f0f0f0f0f0f0f0f => size=8, length=4, rotation=0 => 0x00cc00
// #0x1f1f1f1f1f1f1f1f => size=8, length=5, rotation=0 => 0x00d000
// #0x3f3f3f3f3f3f3f3f => size=8, length=6, rotation=0 => 0x00d400
// #0x7f7f7f7f7f7f7f7f => size=8, length=7, rotation=0 => 0x00d800
// ...
result[bitmask] = (size&64|rotation)<<16 | (0x7800*size)&0xF000 | (length-1)<<10
bitmask = (bitmask >> 1) | (bitmask << 63)
}
}
}
return result
}
|
// +build linux
package nsinit
import (
"fmt"
"github.com/dotcloud/docker/pkg/libcontainer"
"github.com/dotcloud/docker/pkg/libcontainer/capabilities"
"github.com/dotcloud/docker/pkg/libcontainer/network"
"github.com/dotcloud/docker/pkg/libcontainer/utils"
"github.com/dotcloud/docker/pkg/system"
"github.com/dotcloud/docker/pkg/user"
"os"
"syscall"
)
// Init is the init process that first runs inside a new namespace to setup mounts, users, networking,
// and other options required for the new container.
func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, console string, syncPipe *SyncPipe, args []string) error {
rootfs, err := utils.ResolveRootfs(uncleanRootfs)
if err != nil {
return err
}
// We always read this as it is a way to sync with the parent as well
context, err := syncPipe.ReadFromParent()
if err != nil {
syncPipe.Close()
return err
}
syncPipe.Close()
if console != "" {
// close pipes so that we can replace it with the pty
closeStdPipes()
slave, err := system.OpenTerminal(console, syscall.O_RDWR)
if err != nil {
return fmt.Errorf("open terminal %s", err)
}
if err := dupSlave(slave); err != nil {
return fmt.Errorf("dup2 slave %s", err)
}
}
if _, err := system.Setsid(); err != nil {
return fmt.Errorf("setsid %s", err)
}
if console != "" {
if err := system.Setctty(); err != nil {
return fmt.Errorf("setctty %s", err)
}
}
if err := system.ParentDeathSignal(); err != nil {
return fmt.Errorf("parent deth signal %s", err)
}
if err := setupNewMountNamespace(rootfs, console, container.ReadonlyFs); err != nil {
return fmt.Errorf("setup mount namespace %s", err)
}
if err := setupNetwork(container, context); err != nil {
return fmt.Errorf("setup networking %s", err)
}
if err := system.Sethostname(container.Hostname); err != nil {
return fmt.Errorf("sethostname %s", err)
}
if err := capabilities.DropCapabilities(container); err != nil {
return fmt.Errorf("drop capabilities %s", err)
}
if err := setupUser(container); err != nil {
return fmt.Errorf("setup user %s", err)
}
if container.WorkingDir != "" {
if err := system.Chdir(container.WorkingDir); err != nil {
return fmt.Errorf("chdir to %s %s", container.WorkingDir, err)
}
}
return system.Execv(args[0], args[0:], container.Env)
}
func closeStdPipes() {
os.Stdin.Close()
os.Stdout.Close()
os.Stderr.Close()
}
func setupUser(container *libcontainer.Container) error {
switch container.User {
case "root", "":
if err := system.Setgroups(nil); err != nil {
return err
}
if err := system.Setresgid(0, 0, 0); err != nil {
return err
}
if err := system.Setresuid(0, 0, 0); err != nil {
return err
}
default:
uid, gid, suppGids, err := user.GetUserGroupSupplementary(container.User, syscall.Getuid(), syscall.Getgid())
if err != nil {
return err
}
if err := system.Setgroups(suppGids); err != nil {
return err
}
if err := system.Setgid(gid); err != nil {
return err
}
if err := system.Setuid(uid); err != nil {
return err
}
}
return nil
}
// dupSlave dup2 the pty slave's fd into stdout and stdin and ensures that
// the slave's fd is 0, or stdin
func dupSlave(slave *os.File) error {
if slave.Fd() != 0 {
return fmt.Errorf("slave fd not 0 %d", slave.Fd())
}
if err := system.Dup2(slave.Fd(), 1); err != nil {
return err
}
if err := system.Dup2(slave.Fd(), 2); err != nil {
return err
}
return nil
}
// setupVethNetwork uses the Network config if it is not nil to initialize
// the new veth interface inside the container for use by changing the name to eth0
// setting the MTU and IP address along with the default gateway
func setupNetwork(container *libcontainer.Container, context libcontainer.Context) error {
for _, config := range container.Networks {
strategy, err := network.GetStrategy(config.Type)
if err != nil {
return err
}
return strategy.Initialize(config, context)
}
return nil
}
Allow child process to live if daemon dies
Docker-DCO-1.1-Signed-off-by: Michael Crosby <michael@crosbymichael.com> (github: crosbymichael)
// +build linux
package nsinit
import (
"fmt"
"github.com/dotcloud/docker/pkg/libcontainer"
"github.com/dotcloud/docker/pkg/libcontainer/capabilities"
"github.com/dotcloud/docker/pkg/libcontainer/network"
"github.com/dotcloud/docker/pkg/libcontainer/utils"
"github.com/dotcloud/docker/pkg/system"
"github.com/dotcloud/docker/pkg/user"
"os"
"syscall"
)
// Init is the init process that first runs inside a new namespace to setup mounts, users, networking,
// and other options required for the new container.
func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, console string, syncPipe *SyncPipe, args []string) error {
rootfs, err := utils.ResolveRootfs(uncleanRootfs)
if err != nil {
return err
}
// We always read this as it is a way to sync with the parent as well
context, err := syncPipe.ReadFromParent()
if err != nil {
syncPipe.Close()
return err
}
syncPipe.Close()
if console != "" {
// close pipes so that we can replace it with the pty
closeStdPipes()
slave, err := system.OpenTerminal(console, syscall.O_RDWR)
if err != nil {
return fmt.Errorf("open terminal %s", err)
}
if err := dupSlave(slave); err != nil {
return fmt.Errorf("dup2 slave %s", err)
}
}
if _, err := system.Setsid(); err != nil {
return fmt.Errorf("setsid %s", err)
}
if console != "" {
if err := system.Setctty(); err != nil {
return fmt.Errorf("setctty %s", err)
}
}
/*
if err := system.ParentDeathSignal(); err != nil {
return fmt.Errorf("parent death signal %s", err)
}
*/
if err := setupNewMountNamespace(rootfs, console, container.ReadonlyFs); err != nil {
return fmt.Errorf("setup mount namespace %s", err)
}
if err := setupNetwork(container, context); err != nil {
return fmt.Errorf("setup networking %s", err)
}
if err := system.Sethostname(container.Hostname); err != nil {
return fmt.Errorf("sethostname %s", err)
}
if err := capabilities.DropCapabilities(container); err != nil {
return fmt.Errorf("drop capabilities %s", err)
}
if err := setupUser(container); err != nil {
return fmt.Errorf("setup user %s", err)
}
if container.WorkingDir != "" {
if err := system.Chdir(container.WorkingDir); err != nil {
return fmt.Errorf("chdir to %s %s", container.WorkingDir, err)
}
}
return system.Execv(args[0], args[0:], container.Env)
}
func closeStdPipes() {
os.Stdin.Close()
os.Stdout.Close()
os.Stderr.Close()
}
func setupUser(container *libcontainer.Container) error {
switch container.User {
case "root", "":
if err := system.Setgroups(nil); err != nil {
return err
}
if err := system.Setresgid(0, 0, 0); err != nil {
return err
}
if err := system.Setresuid(0, 0, 0); err != nil {
return err
}
default:
uid, gid, suppGids, err := user.GetUserGroupSupplementary(container.User, syscall.Getuid(), syscall.Getgid())
if err != nil {
return err
}
if err := system.Setgroups(suppGids); err != nil {
return err
}
if err := system.Setgid(gid); err != nil {
return err
}
if err := system.Setuid(uid); err != nil {
return err
}
}
return nil
}
// dupSlave dup2 the pty slave's fd into stdout and stdin and ensures that
// the slave's fd is 0, or stdin
func dupSlave(slave *os.File) error {
if slave.Fd() != 0 {
return fmt.Errorf("slave fd not 0 %d", slave.Fd())
}
if err := system.Dup2(slave.Fd(), 1); err != nil {
return err
}
if err := system.Dup2(slave.Fd(), 2); err != nil {
return err
}
return nil
}
// setupVethNetwork uses the Network config if it is not nil to initialize
// the new veth interface inside the container for use by changing the name to eth0
// setting the MTU and IP address along with the default gateway
func setupNetwork(container *libcontainer.Container, context libcontainer.Context) error {
for _, config := range container.Networks {
strategy, err := network.GetStrategy(config.Type)
if err != nil {
return err
}
return strategy.Initialize(config, context)
}
return nil
}
|
// +build linux
package nsinit
import (
"fmt"
"github.com/dotcloud/docker/pkg/libcontainer"
"github.com/dotcloud/docker/pkg/libcontainer/apparmor"
"github.com/dotcloud/docker/pkg/libcontainer/capabilities"
"github.com/dotcloud/docker/pkg/libcontainer/network"
"github.com/dotcloud/docker/pkg/libcontainer/utils"
"github.com/dotcloud/docker/pkg/system"
"github.com/dotcloud/docker/pkg/user"
"os"
"syscall"
)
// Init is the init process that first runs inside a new namespace to setup mounts, users, networking,
// and other options required for the new container.
func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, console string, syncPipe *SyncPipe, args []string) error {
rootfs, err := utils.ResolveRootfs(uncleanRootfs)
if err != nil {
return err
}
// We always read this as it is a way to sync with the parent as well
context, err := syncPipe.ReadFromParent()
if err != nil {
syncPipe.Close()
return err
}
syncPipe.Close()
if console != "" {
slave, err := system.OpenTerminal(console, syscall.O_RDWR)
if err != nil {
return fmt.Errorf("open terminal %s", err)
}
if err := dupSlave(slave); err != nil {
return fmt.Errorf("dup2 slave %s", err)
}
}
if _, err := system.Setsid(); err != nil {
return fmt.Errorf("setsid %s", err)
}
if console != "" {
if err := system.Setctty(); err != nil {
return fmt.Errorf("setctty %s", err)
}
}
/* this is commented out so that we get the current Ghost functionality
if err := system.ParentDeathSignal(); err != nil {
return fmt.Errorf("parent death signal %s", err)
}
*/
if err := setupNewMountNamespace(rootfs, console, container.ReadonlyFs); err != nil {
return fmt.Errorf("setup mount namespace %s", err)
}
if err := setupNetwork(container, context); err != nil {
return fmt.Errorf("setup networking %s", err)
}
if err := system.Sethostname(container.Hostname); err != nil {
return fmt.Errorf("sethostname %s", err)
}
if err := finalizeNamespace(container); err != nil {
return fmt.Errorf("finalize namespace %s", err)
}
if err := apparmor.ApplyProfile(os.Getpid(), container.Context["apparmor_profile"]); err != nil {
return err
}
return system.Execv(args[0], args[0:], container.Env)
}
func setupUser(container *libcontainer.Container) error {
switch container.User {
case "root", "":
if err := system.Setgroups(nil); err != nil {
return err
}
if err := system.Setresgid(0, 0, 0); err != nil {
return err
}
if err := system.Setresuid(0, 0, 0); err != nil {
return err
}
default:
uid, gid, suppGids, err := user.GetUserGroupSupplementary(container.User, syscall.Getuid(), syscall.Getgid())
if err != nil {
return err
}
if err := system.Setgroups(suppGids); err != nil {
return err
}
if err := system.Setgid(gid); err != nil {
return err
}
if err := system.Setuid(uid); err != nil {
return err
}
}
return nil
}
// dupSlave dup2 the pty slave's fd into stdout and stdin and ensures that
// the slave's fd is 0, or stdin
func dupSlave(slave *os.File) error {
if err := system.Dup2(slave.Fd(), 0); err != nil {
return err
}
if err := system.Dup2(slave.Fd(), 1); err != nil {
return err
}
if err := system.Dup2(slave.Fd(), 2); err != nil {
return err
}
return nil
}
// setupVethNetwork uses the Network config if it is not nil to initialize
// the new veth interface inside the container for use by changing the name to eth0
// setting the MTU and IP address along with the default gateway
func setupNetwork(container *libcontainer.Container, context libcontainer.Context) error {
for _, config := range container.Networks {
strategy, err := network.GetStrategy(config.Type)
if err != nil {
return err
}
return strategy.Initialize(config, context)
}
return nil
}
// finalizeNamespace drops the caps and sets the correct user
// and working dir before execing the command inside the namespace
func finalizeNamespace(container *libcontainer.Container) error {
if err := capabilities.DropCapabilities(container); err != nil {
return fmt.Errorf("drop capabilities %s", err)
}
if err := setupUser(container); err != nil {
return fmt.Errorf("setup user %s", err)
}
if container.WorkingDir != "" {
if err := system.Chdir(container.WorkingDir); err != nil {
return fmt.Errorf("chdir to %s %s", container.WorkingDir, err)
}
}
return nil
}
Remove the ghosts and kill everything
Docker-DCO-1.1-Signed-off-by: Michael Crosby <michael@crosbymichael.com> (github: crosbymichael)
// +build linux
package nsinit
import (
"fmt"
"github.com/dotcloud/docker/pkg/libcontainer"
"github.com/dotcloud/docker/pkg/libcontainer/apparmor"
"github.com/dotcloud/docker/pkg/libcontainer/capabilities"
"github.com/dotcloud/docker/pkg/libcontainer/network"
"github.com/dotcloud/docker/pkg/libcontainer/utils"
"github.com/dotcloud/docker/pkg/system"
"github.com/dotcloud/docker/pkg/user"
"os"
"syscall"
)
// Init is the init process that first runs inside a new namespace to setup mounts, users, networking,
// and other options required for the new container.
func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, console string, syncPipe *SyncPipe, args []string) error {
rootfs, err := utils.ResolveRootfs(uncleanRootfs)
if err != nil {
return err
}
// We always read this as it is a way to sync with the parent as well
context, err := syncPipe.ReadFromParent()
if err != nil {
syncPipe.Close()
return err
}
syncPipe.Close()
if console != "" {
slave, err := system.OpenTerminal(console, syscall.O_RDWR)
if err != nil {
return fmt.Errorf("open terminal %s", err)
}
if err := dupSlave(slave); err != nil {
return fmt.Errorf("dup2 slave %s", err)
}
}
if _, err := system.Setsid(); err != nil {
return fmt.Errorf("setsid %s", err)
}
if console != "" {
if err := system.Setctty(); err != nil {
return fmt.Errorf("setctty %s", err)
}
}
/*
if err := system.ParentDeathSignal(); err != nil {
return fmt.Errorf("parent death signal %s", err)
}
*/
if err := setupNewMountNamespace(rootfs, console, container.ReadonlyFs); err != nil {
return fmt.Errorf("setup mount namespace %s", err)
}
if err := setupNetwork(container, context); err != nil {
return fmt.Errorf("setup networking %s", err)
}
if err := system.Sethostname(container.Hostname); err != nil {
return fmt.Errorf("sethostname %s", err)
}
if err := finalizeNamespace(container); err != nil {
return fmt.Errorf("finalize namespace %s", err)
}
if err := apparmor.ApplyProfile(os.Getpid(), container.Context["apparmor_profile"]); err != nil {
return err
}
return system.Execv(args[0], args[0:], container.Env)
}
func setupUser(container *libcontainer.Container) error {
switch container.User {
case "root", "":
if err := system.Setgroups(nil); err != nil {
return err
}
if err := system.Setresgid(0, 0, 0); err != nil {
return err
}
if err := system.Setresuid(0, 0, 0); err != nil {
return err
}
default:
uid, gid, suppGids, err := user.GetUserGroupSupplementary(container.User, syscall.Getuid(), syscall.Getgid())
if err != nil {
return err
}
if err := system.Setgroups(suppGids); err != nil {
return err
}
if err := system.Setgid(gid); err != nil {
return err
}
if err := system.Setuid(uid); err != nil {
return err
}
}
return nil
}
// dupSlave dup2 the pty slave's fd into stdout and stdin and ensures that
// the slave's fd is 0, or stdin
func dupSlave(slave *os.File) error {
if err := system.Dup2(slave.Fd(), 0); err != nil {
return err
}
if err := system.Dup2(slave.Fd(), 1); err != nil {
return err
}
if err := system.Dup2(slave.Fd(), 2); err != nil {
return err
}
return nil
}
// setupVethNetwork uses the Network config if it is not nil to initialize
// the new veth interface inside the container for use by changing the name to eth0
// setting the MTU and IP address along with the default gateway
func setupNetwork(container *libcontainer.Container, context libcontainer.Context) error {
for _, config := range container.Networks {
strategy, err := network.GetStrategy(config.Type)
if err != nil {
return err
}
return strategy.Initialize(config, context)
}
return nil
}
// finalizeNamespace drops the caps and sets the correct user
// and working dir before execing the command inside the namespace
func finalizeNamespace(container *libcontainer.Container) error {
if err := capabilities.DropCapabilities(container); err != nil {
return fmt.Errorf("drop capabilities %s", err)
}
if err := setupUser(container); err != nil {
return fmt.Errorf("setup user %s", err)
}
if container.WorkingDir != "" {
if err := system.Chdir(container.WorkingDir); err != nil {
return fmt.Errorf("chdir to %s %s", container.WorkingDir, err)
}
}
return nil
}
|
package ulimit
import "testing"
func TestParseValid(t *testing.T) {
u1 := &Ulimit{"nofile", 1024, 512}
if u2, _ := Parse("nofile=512:1024"); u1 == u2 {
t.Fatalf("expected %s, but got %s", u1.String(), u2.String())
}
}
func TestParseInvalidLimitType(t *testing.T) {
if _, err := Parse("notarealtype=1024:1024"); err == nil {
t.Fatalf("expected error on invalid ulimit type")
}
}
func TestParseBadFormat(t *testing.T) {
if _, err := Parse("nofile:1024:1024"); err == nil {
t.Fatal("expected error on bad syntax")
}
if _, err := Parse("nofile"); err == nil {
t.Fatal("expected error on bad syntax")
}
if _, err := Parse("nofile="); err == nil {
t.Fatal("expected error on bad syntax")
}
if _, err := Parse("nofile=:"); err == nil {
t.Fatal("expected error on bad syntax")
}
if _, err := Parse("nofile=:1024"); err == nil {
t.Fatal("expected error on bad syntax")
}
}
func TestParseHardLessThanSoft(t *testing.T) {
if _, err := Parse("nofile:1024:1"); err == nil {
t.Fatal("expected error on hard limit less than soft limit")
}
}
func TestParseInvalidValueType(t *testing.T) {
if _, err := Parse("nofile:asdf"); err == nil {
t.Fatal("expected error on bad value type")
}
}
func TestStringOutput(t *testing.T) {
u := &Ulimit{"nofile", 1024, 512}
if s := u.String(); s != "nofile=512:1024" {
t.Fatal("expected String to return nofile=512:1024, but got", s)
}
}
Fixes pointer error
Signed-off-by: Paul Mou <0f90fb9690777da3f85e77c343c3a83a00a1ce86@gmail.com>
package ulimit
import "testing"
func TestParseValid(t *testing.T) {
u1 := &Ulimit{"nofile", 1024, 512}
if u2, _ := Parse("nofile=512:1024"); *u1 != *u2 {
t.Fatalf("expected %q, but got %q", u1, u2)
}
}
func TestParseInvalidLimitType(t *testing.T) {
if _, err := Parse("notarealtype=1024:1024"); err == nil {
t.Fatalf("expected error on invalid ulimit type")
}
}
func TestParseBadFormat(t *testing.T) {
if _, err := Parse("nofile:1024:1024"); err == nil {
t.Fatal("expected error on bad syntax")
}
if _, err := Parse("nofile"); err == nil {
t.Fatal("expected error on bad syntax")
}
if _, err := Parse("nofile="); err == nil {
t.Fatal("expected error on bad syntax")
}
if _, err := Parse("nofile=:"); err == nil {
t.Fatal("expected error on bad syntax")
}
if _, err := Parse("nofile=:1024"); err == nil {
t.Fatal("expected error on bad syntax")
}
}
func TestParseHardLessThanSoft(t *testing.T) {
if _, err := Parse("nofile:1024:1"); err == nil {
t.Fatal("expected error on hard limit less than soft limit")
}
}
func TestParseInvalidValueType(t *testing.T) {
if _, err := Parse("nofile:asdf"); err == nil {
t.Fatal("expected error on bad value type")
}
}
func TestStringOutput(t *testing.T) {
u := &Ulimit{"nofile", 1024, 512}
if s := u.String(); s != "nofile=512:1024" {
t.Fatal("expected String to return nofile=512:1024, but got", s)
}
}
|
package orm
import (
"errors"
"fmt"
"io"
"reflect"
"strings"
"sync"
"time"
"github.com/go-pg/pg/internal"
"github.com/go-pg/pg/types"
)
type withQuery struct {
name string
query *Query
}
type Query struct {
db DB
stickyErr error
model tableModel
ignoreModel bool
with []withQuery
tables []FormatAppender
columns []FormatAppender
set []FormatAppender
where []sepFormatAppender
updWhere []sepFormatAppender
joins []FormatAppender
group []FormatAppender
having []queryParamsAppender
order []FormatAppender
onConflict *queryParamsAppender
returning []queryParamsAppender
limit int
offset int
selFor FormatAppender
}
var _ queryAppender = (*Query)(nil)
func NewQuery(db DB, model ...interface{}) *Query {
return (&Query{}).DB(db).Model(model...)
}
// New returns new zero Query binded to the current db and model.
func (q *Query) New() *Query {
return &Query{
db: q.db,
model: q.model,
ignoreModel: true,
}
}
func (q *Query) AppendQuery(b []byte) ([]byte, error) {
return selectQuery{q: q}.AppendQuery(b)
}
// Copy returns copy of the Query.
func (q *Query) Copy() *Query {
copy := &Query{
db: q.db,
stickyErr: q.stickyErr,
model: q.model,
ignoreModel: q.ignoreModel,
tables: q.tables[:len(q.tables):len(q.tables)],
columns: q.columns[:len(q.columns):len(q.columns)],
set: q.set[:len(q.set):len(q.set)],
where: q.where[:len(q.where):len(q.where)],
updWhere: q.updWhere[:len(q.updWhere):len(q.updWhere)],
joins: q.joins[:len(q.joins):len(q.joins)],
group: q.group[:len(q.group):len(q.group)],
having: q.having[:len(q.having):len(q.having)],
order: q.order[:len(q.order):len(q.order)],
onConflict: q.onConflict,
returning: q.returning[:len(q.returning):len(q.returning)],
limit: q.limit,
offset: q.offset,
}
for _, with := range q.with {
copy = copy.With(with.name, with.query.Copy())
}
return copy
}
func (q *Query) err(err error) *Query {
if q.stickyErr == nil {
q.stickyErr = err
}
return q
}
func (q *Query) DB(db DB) *Query {
q.db = db
for _, with := range q.with {
with.query.db = db
}
return q
}
func (q *Query) Model(model ...interface{}) *Query {
var err error
switch l := len(model); {
case l == 0:
q.model = nil
case l == 1:
q.model, err = newTableModel(model[0])
case l > 1:
q.model, err = newTableModel(&model)
}
if err != nil {
q = q.err(err)
}
if q.ignoreModel {
q.ignoreModel = false
}
return q
}
// With adds subq as common table expression with the given name.
func (q *Query) With(name string, subq *Query) *Query {
q.with = append(q.with, withQuery{name, subq})
return q
}
// WrapWith creates new Query and adds to it current query as
// common table expression with the given name.
func (q *Query) WrapWith(name string) *Query {
wrapper := q.New()
wrapper.with = q.with
q.with = nil
wrapper = wrapper.With(name, q)
return wrapper
}
func (q *Query) Table(tables ...string) *Query {
for _, table := range tables {
q.tables = append(q.tables, fieldAppender{table})
}
return q
}
func (q *Query) TableExpr(expr string, params ...interface{}) *Query {
q.tables = append(q.tables, queryParamsAppender{expr, params})
return q
}
// Column adds column to the Query quoting it according to PostgreSQL rules.
// ColumnExpr can be used to bypass quoting restriction.
func (q *Query) Column(columns ...string) *Query {
for _, column := range columns {
if column == "_" {
if q.columns == nil {
q.columns = make([]FormatAppender, 0)
}
continue
}
if q.model != nil {
if _, j := q.model.Join(column, nil); j != nil {
continue
}
}
q.columns = append(q.columns, fieldAppender{column})
}
return q
}
// ColumnExpr adds column expression to the Query.
func (q *Query) ColumnExpr(expr string, params ...interface{}) *Query {
q.columns = append(q.columns, queryParamsAppender{expr, params})
return q
}
func (q *Query) getFields() ([]*Field, error) {
return q._getFields(false)
}
func (q *Query) getDataFields() ([]*Field, error) {
return q._getFields(true)
}
func (q *Query) _getFields(omitPKs bool) ([]*Field, error) {
table := q.model.Table()
var columns []*Field
for _, col := range q.columns {
f, ok := col.(fieldAppender)
if !ok {
continue
}
field, err := table.GetField(f.field)
if err != nil {
return nil, err
}
if omitPKs && field.HasFlag(PrimaryKeyFlag) {
continue
}
columns = append(columns, field)
}
return columns, nil
}
func (q *Query) Relation(name string, apply func(*Query) (*Query, error)) *Query {
if _, j := q.model.Join(name, apply); j == nil {
return q.err(fmt.Errorf(
"model=%s does not have relation=%s",
q.model.Table().Type.Name(), name,
))
}
return q
}
func (q *Query) Set(set string, params ...interface{}) *Query {
q.set = append(q.set, queryParamsAppender{set, params})
return q
}
func (q *Query) Where(where string, params ...interface{}) *Query {
q.addWhere(&whereAppender{
sep: "AND",
where: where,
params: params,
})
return q
}
func (q *Query) WhereOr(where string, params ...interface{}) *Query {
q.addWhere(&whereAppender{
sep: "OR",
where: where,
params: params,
})
return q
}
// WhereGroup encloses conditions added in the function in parentheses.
//
// q.Where("TRUE").
// WhereGroup(func(q *orm.Query) (*orm.Query, error)) {
// q = q.WhereOr("FALSE").WhereOr("TRUE").
// return q, nil
// })
//
// generates
//
// WHERE TRUE AND (FALSE OR TRUE)
func (q *Query) WhereGroup(fn func(*Query) (*Query, error)) *Query {
return q.whereGroup("AND", fn)
}
// WhereOrGroup encloses conditions added in the function in parentheses.
//
// q.Where("TRUE").
// WhereOrGroup(func(q *orm.Query) (*orm.Query, error)) {
// q = q.Where("FALSE").Where("TRUE").
// return q, nil
// })
//
// generates
//
// WHERE TRUE OR (FALSE AND TRUE)
func (q *Query) WhereOrGroup(fn func(*Query) (*Query, error)) *Query {
return q.whereGroup("OR", fn)
}
func (q *Query) whereGroup(conj string, fn func(*Query) (*Query, error)) *Query {
saved := q.where
q.where = nil
newq, err := fn(q)
if err != nil {
q.err(err)
return q
}
f := whereGroupAppender{
sep: conj,
where: newq.where,
}
newq.where = saved
newq.addWhere(f)
return newq
}
// WhereIn is a shortcut for Where and pg.In to work with IN operator:
//
// WhereIn("id IN (?)", 1, 2, 3)
func (q *Query) WhereIn(where string, params ...interface{}) *Query {
return q.Where(where, types.In(params))
}
func (q *Query) addWhere(f sepFormatAppender) {
if q.onConflictDoUpdate() {
q.updWhere = append(q.updWhere, f)
} else {
q.where = append(q.where, f)
}
}
func (q *Query) Join(join string, params ...interface{}) *Query {
q.joins = append(q.joins, queryParamsAppender{join, params})
return q
}
func (q *Query) Group(columns ...string) *Query {
for _, column := range columns {
q.group = append(q.group, fieldAppender{column})
}
return q
}
func (q *Query) GroupExpr(group string, params ...interface{}) *Query {
q.group = append(q.group, queryParamsAppender{group, params})
return q
}
func (q *Query) Having(having string, params ...interface{}) *Query {
q.having = append(q.having, queryParamsAppender{having, params})
return q
}
// Order adds sort order to the Query quoting column name.
// OrderExpr can be used to bypass quoting restriction.
func (q *Query) Order(orders ...string) *Query {
loop:
for _, order := range orders {
ind := strings.Index(order, " ")
if ind != -1 {
field := order[:ind]
sort := order[ind+1:]
switch internal.ToUpper(sort) {
case "ASC", "DESC", "ASC NULLS FIRST", "DESC NULLS FIRST",
"ASC NULLS LAST", "DESC NULLS LAST":
q = q.OrderExpr("? ?", types.F(field), types.Q(sort))
continue loop
}
}
q.order = append(q.order, fieldAppender{order})
}
return q
}
// Order adds sort order to the Query.
func (q *Query) OrderExpr(order string, params ...interface{}) *Query {
q.order = append(q.order, queryParamsAppender{order, params})
return q
}
func (q *Query) Limit(n int) *Query {
q.limit = n
return q
}
func (q *Query) Offset(n int) *Query {
q.offset = n
return q
}
func (q *Query) OnConflict(s string, params ...interface{}) *Query {
q.onConflict = &queryParamsAppender{s, params}
return q
}
func (q *Query) onConflictDoUpdate() bool {
return q.onConflict != nil &&
strings.HasSuffix(q.onConflict.query, "DO UPDATE")
}
func (q *Query) Returning(s string, params ...interface{}) *Query {
q.returning = append(q.returning, queryParamsAppender{s, params})
return q
}
func (q *Query) For(s string, params ...interface{}) *Query {
q.selFor = queryParamsAppender{s, params}
return q
}
// Apply calls the fn passing the Query as an argument.
func (q *Query) Apply(fn func(*Query) (*Query, error)) *Query {
qq, err := fn(q)
if err != nil {
q.err(err)
return q
}
return qq
}
// Count returns number of rows matching the query using count aggregate function.
func (q *Query) Count() (int, error) {
if q.stickyErr != nil {
return 0, q.stickyErr
}
var count int
_, err := q.db.QueryOne(
Scan(&count),
q.countSelectQuery("count(*)"),
q.model,
)
return count, err
}
func (q *Query) countSelectQuery(column string) selectQuery {
return selectQuery{
q: q,
count: column,
}
}
// First selects the first row.
func (q *Query) First() error {
err := q.model.Table().checkPKs()
if err != nil {
return err
}
b := columns(nil, q.model.Table().Alias, "", q.model.Table().PKs)
return q.OrderExpr(internal.BytesToString(b)).Limit(1).Select()
}
// Last selects the last row.
func (q *Query) Last() error {
err := q.model.Table().checkPKs()
if err != nil {
return err
}
b := columns(nil, q.model.Table().Alias, "", q.model.Table().PKs)
b = append(b, " DESC"...)
return q.OrderExpr(internal.BytesToString(b)).Limit(1).Select()
}
// Select selects the model.
func (q *Query) Select(values ...interface{}) error {
if q.stickyErr != nil {
return q.stickyErr
}
model, err := q.newModel(values...)
if err != nil {
return err
}
res, err := q.query(model, selectQuery{q: q})
if err != nil {
return err
}
if res.RowsReturned() > 0 {
if q.model != nil {
if err := q.selectJoins(q.model.GetJoins()); err != nil {
return err
}
}
if err := model.AfterSelect(q.db); err != nil {
return err
}
}
return nil
}
func (q *Query) newModel(values ...interface{}) (Model, error) {
if len(values) > 0 {
return NewModel(values...)
}
return q.model, nil
}
func (q *Query) query(model Model, query interface{}) (Result, error) {
if _, ok := model.(useQueryOne); ok {
return q.db.QueryOne(model, query, q.model)
}
return q.db.Query(model, query, q.model)
}
// SelectAndCount runs Select and Count in two goroutines,
// waits for them to finish and returns the result.
func (q *Query) SelectAndCount(values ...interface{}) (count int, err error) {
if q.stickyErr != nil {
return 0, q.stickyErr
}
var wg sync.WaitGroup
wg.Add(2)
var mu sync.Mutex
go func() {
defer wg.Done()
if e := q.Select(values...); e != nil {
mu.Lock()
err = e
mu.Unlock()
}
}()
go func() {
defer wg.Done()
var e error
count, e = q.Count()
if e != nil {
mu.Lock()
err = e
mu.Unlock()
}
}()
wg.Wait()
return count, err
}
func (q *Query) forEachHasOneJoin(fn func(*join)) {
if q.model == nil {
return
}
q._forEachHasOneJoin(fn, q.model.GetJoins())
}
func (q *Query) _forEachHasOneJoin(fn func(*join), joins []join) {
for i := range joins {
j := &joins[i]
switch j.Rel.Type {
case HasOneRelation, BelongsToRelation:
fn(j)
q._forEachHasOneJoin(fn, j.JoinModel.GetJoins())
}
}
}
func (q *Query) selectJoins(joins []join) error {
var err error
for i := range joins {
j := &joins[i]
if j.Rel.Type == HasOneRelation || j.Rel.Type == BelongsToRelation {
err = q.selectJoins(j.JoinModel.GetJoins())
} else {
err = j.Select(q.db)
}
if err != nil {
return err
}
}
return nil
}
// Insert inserts the model.
func (q *Query) Insert(values ...interface{}) (Result, error) {
if q.stickyErr != nil {
return nil, q.stickyErr
}
model, err := q.newModel(values...)
if err != nil {
return nil, err
}
if q.model != nil {
if err := q.model.BeforeInsert(q.db); err != nil {
return nil, err
}
}
res, err := q.db.Query(model, insertQuery{q: q}, q.model)
if err != nil {
return nil, err
}
if q.model != nil {
if err := q.model.AfterInsert(q.db); err != nil {
return nil, err
}
}
return res, nil
}
// SelectOrInsert selects the model inserting one if it does not exist.
func (q *Query) SelectOrInsert(values ...interface{}) (inserted bool, err error) {
if q.stickyErr != nil {
return false, q.stickyErr
}
insertq := q
if len(insertq.columns) > 0 {
insertq = insertq.Copy()
insertq.columns = nil
}
var insertErr error
for i := 0; i < 5; i++ {
if i >= 2 {
time.Sleep(internal.RetryBackoff(i-2, 250*time.Millisecond, 5*time.Second))
}
err := q.Select(values...)
if err == nil {
return false, nil
}
if err != internal.ErrNoRows {
return false, err
}
res, err := insertq.Insert(values...)
if err != nil {
insertErr = err
if pgErr, ok := err.(internal.PGError); ok {
if pgErr.IntegrityViolation() {
continue
}
if pgErr.Field('C') == "55000" {
// Retry on "#55000 attempted to delete invisible tuple".
continue
}
}
return false, err
}
if res.RowsAffected() == 1 {
return true, nil
}
}
err = fmt.Errorf(
"pg: SelectOrInsert: select returns no rows (insert fails with err=%q)",
insertErr,
)
return false, err
}
// Update updates the model.
func (q *Query) Update(scan ...interface{}) (Result, error) {
return q.update(scan, false)
}
// Update updates the model omitting null columns.
func (q *Query) UpdateNotNull(scan ...interface{}) (Result, error) {
return q.update(scan, true)
}
func (q *Query) update(scan []interface{}, omitZero bool) (Result, error) {
if q.stickyErr != nil {
return nil, q.stickyErr
}
model, err := q.newModel(scan...)
if err != nil {
return nil, err
}
if q.model != nil {
if err := q.model.BeforeUpdate(q.db); err != nil {
return nil, err
}
}
res, err := q.db.Query(model, updateQuery{q: q, omitZero: omitZero}, q.model)
if err != nil {
return nil, err
}
if q.model != nil {
if err := q.model.AfterUpdate(q.db); err != nil {
return nil, err
}
}
return res, nil
}
// Delete deletes the model.
func (q *Query) Delete(values ...interface{}) (Result, error) {
if q.stickyErr != nil {
return nil, q.stickyErr
}
model, err := q.newModel(values...)
if err != nil {
return nil, err
}
if q.model != nil {
if err := q.model.BeforeDelete(q.db); err != nil {
return nil, err
}
}
res, err := q.db.Query(model, deleteQuery{q}, q.model)
if err != nil {
return nil, err
}
if q.model != nil {
if err := q.model.AfterDelete(q.db); err != nil {
return nil, err
}
}
return res, nil
}
func (q *Query) CreateTable(opt *CreateTableOptions) (Result, error) {
if q.stickyErr != nil {
return nil, q.stickyErr
}
return q.db.Exec(createTableQuery{
q: q,
opt: opt,
})
}
func (q *Query) DropTable(opt *DropTableOptions) (Result, error) {
if q.stickyErr != nil {
return nil, q.stickyErr
}
return q.db.Exec(dropTableQuery{
q: q,
opt: opt,
})
}
// Exec is an alias for DB.Exec.
func (q *Query) Exec(query interface{}, params ...interface{}) (Result, error) {
params = append(params, q.model)
return q.db.Exec(query, params...)
}
// ExecOne is an alias for DB.ExecOne.
func (q *Query) ExecOne(query interface{}, params ...interface{}) (Result, error) {
params = append(params, q.model)
return q.db.ExecOne(query, params...)
}
// Query is an alias for DB.Query.
func (q *Query) Query(model, query interface{}, params ...interface{}) (Result, error) {
params = append(params, q.model)
return q.db.Query(model, query, params...)
}
// QueryOne is an alias for DB.QueryOne.
func (q *Query) QueryOne(model, query interface{}, params ...interface{}) (Result, error) {
params = append(params, q.model)
return q.db.QueryOne(model, query, params...)
}
// CopyFrom is an alias from DB.CopyFrom.
func (q *Query) CopyFrom(r io.Reader, query interface{}, params ...interface{}) (Result, error) {
params = append(params, q.model)
return q.db.CopyFrom(r, query, params...)
}
// CopyTo is an alias from DB.CopyTo.
func (q *Query) CopyTo(w io.Writer, query interface{}, params ...interface{}) (Result, error) {
params = append(params, q.model)
return q.db.CopyTo(w, query, params...)
}
func (q *Query) FormatQuery(b []byte, query string, params ...interface{}) []byte {
params = append(params, q.model)
if q.db != nil {
return q.db.FormatQuery(b, query, params...)
}
return formatter.Append(b, query, params...)
}
func (q *Query) hasModel() bool {
return !q.ignoreModel && q.model != nil
}
func (q *Query) hasTables() bool {
return q.hasModel() || len(q.tables) > 0
}
func (q *Query) appendTableName(b []byte) []byte {
return q.FormatQuery(b, string(q.model.Table().Name))
}
func (q *Query) appendTableNameWithAlias(b []byte) []byte {
b = q.appendTableName(b)
b = append(b, " AS "...)
b = append(b, q.model.Table().Alias...)
return b
}
func (q *Query) appendFirstTable(b []byte) []byte {
if q.hasModel() {
return q.appendTableName(b)
}
if len(q.tables) > 0 {
b = q.tables[0].AppendFormat(b, q)
}
return b
}
func (q *Query) appendFirstTableWithAlias(b []byte) []byte {
if q.hasModel() {
return q.appendTableNameWithAlias(b)
}
if len(q.tables) > 0 {
b = q.tables[0].AppendFormat(b, q)
}
return b
}
func (q *Query) appendTables(b []byte) []byte {
if q.hasModel() {
b = q.appendTableNameWithAlias(b)
if len(q.tables) > 0 {
b = append(b, ", "...)
}
}
for i, f := range q.tables {
if i > 0 {
b = append(b, ", "...)
}
b = f.AppendFormat(b, q)
}
return b
}
func (q *Query) hasOtherTables() bool {
if q.hasModel() {
return len(q.tables) > 0
}
return len(q.tables) > 1
}
func (q *Query) modelHasData() bool {
if !q.hasModel() {
return false
}
v := q.model.Value()
return v.Kind() == reflect.Slice && v.Len() > 0
}
func (q *Query) appendOtherTables(b []byte) []byte {
tables := q.tables
if !q.hasModel() {
tables = tables[1:]
}
for i, f := range tables {
if i > 0 {
b = append(b, ", "...)
}
b = f.AppendFormat(b, q)
}
return b
}
func (q *Query) appendColumns(b []byte) []byte {
for i, f := range q.columns {
if i > 0 {
b = append(b, ", "...)
}
b = f.AppendFormat(b, q)
}
return b
}
func (q *Query) mustAppendWhere(b []byte) ([]byte, error) {
if len(q.where) > 0 {
b = q.appendWhere(b)
return b, nil
}
if q.model == nil {
return nil, errors.New("pg: Model is nil")
}
if err := q.model.Table().checkPKs(); err != nil {
return nil, err
}
b = append(b, " WHERE "...)
return wherePKQuery{q}.AppendFormat(b, nil), nil
}
func (q *Query) appendWhere(b []byte) []byte {
return q._appendWhere(b, q.where)
}
func (q *Query) appendUpdWhere(b []byte) []byte {
return q._appendWhere(b, q.updWhere)
}
func (q *Query) _appendWhere(b []byte, where []sepFormatAppender) []byte {
b = append(b, " WHERE "...)
for i, f := range where {
if i > 0 {
b = append(b, ' ')
b = f.AppendSep(b)
b = append(b, ' ')
}
b = f.AppendFormat(b, q)
}
return b
}
func (q *Query) appendSet(b []byte) []byte {
b = append(b, " SET "...)
for i, f := range q.set {
if i > 0 {
b = append(b, ", "...)
}
b = f.AppendFormat(b, q)
}
return b
}
func (q *Query) appendReturning(b []byte) []byte {
b = append(b, " RETURNING "...)
for i, f := range q.returning {
if i > 0 {
b = append(b, ", "...)
}
b = f.AppendFormat(b, q)
}
return b
}
func (q *Query) appendWith(b []byte) ([]byte, error) {
var err error
b = append(b, "WITH "...)
for i, with := range q.with {
if i > 0 {
b = append(b, ", "...)
}
b = types.AppendField(b, with.name, 1)
b = append(b, " AS ("...)
b, err = selectQuery{q: with.query}.AppendQuery(b)
if err != nil {
return nil, err
}
b = append(b, ')')
}
b = append(b, ' ')
return b, nil
}
//------------------------------------------------------------------------------
type wherePKQuery struct {
*Query
}
func (wherePKQuery) AppendSep(b []byte) []byte {
return append(b, "AND"...)
}
func (q wherePKQuery) AppendFormat(b []byte, f QueryFormatter) []byte {
table := q.model.Table()
value := q.model.Value()
if value.Kind() == reflect.Struct {
return appendColumnAndValue(b, value, table.Alias, table.PKs)
} else {
return appendColumnAndColumn(b, value, table.Alias, table.PKs)
}
}
func appendColumnAndValue(b []byte, v reflect.Value, alias types.Q, fields []*Field) []byte {
for i, f := range fields {
if i > 0 {
b = append(b, " AND "...)
}
b = append(b, alias...)
b = append(b, '.')
b = append(b, f.Column...)
b = append(b, " = "...)
b = f.AppendValue(b, v, 1)
}
return b
}
func appendColumnAndColumn(b []byte, v reflect.Value, alias types.Q, fields []*Field) []byte {
for i, f := range fields {
if i > 0 {
b = append(b, " AND "...)
}
b = append(b, alias...)
b = append(b, '.')
b = append(b, f.Column...)
b = append(b, " = _data."...)
b = append(b, f.Column...)
}
return b
}
orm: better doc for Query.First and Query.Last
package orm
import (
"errors"
"fmt"
"io"
"reflect"
"strings"
"sync"
"time"
"github.com/go-pg/pg/internal"
"github.com/go-pg/pg/types"
)
type withQuery struct {
name string
query *Query
}
type Query struct {
db DB
stickyErr error
model tableModel
ignoreModel bool
with []withQuery
tables []FormatAppender
columns []FormatAppender
set []FormatAppender
where []sepFormatAppender
updWhere []sepFormatAppender
joins []FormatAppender
group []FormatAppender
having []queryParamsAppender
order []FormatAppender
onConflict *queryParamsAppender
returning []queryParamsAppender
limit int
offset int
selFor FormatAppender
}
var _ queryAppender = (*Query)(nil)
func NewQuery(db DB, model ...interface{}) *Query {
return (&Query{}).DB(db).Model(model...)
}
// New returns new zero Query binded to the current db and model.
func (q *Query) New() *Query {
return &Query{
db: q.db,
model: q.model,
ignoreModel: true,
}
}
func (q *Query) AppendQuery(b []byte) ([]byte, error) {
return selectQuery{q: q}.AppendQuery(b)
}
// Copy returns copy of the Query.
func (q *Query) Copy() *Query {
copy := &Query{
db: q.db,
stickyErr: q.stickyErr,
model: q.model,
ignoreModel: q.ignoreModel,
tables: q.tables[:len(q.tables):len(q.tables)],
columns: q.columns[:len(q.columns):len(q.columns)],
set: q.set[:len(q.set):len(q.set)],
where: q.where[:len(q.where):len(q.where)],
updWhere: q.updWhere[:len(q.updWhere):len(q.updWhere)],
joins: q.joins[:len(q.joins):len(q.joins)],
group: q.group[:len(q.group):len(q.group)],
having: q.having[:len(q.having):len(q.having)],
order: q.order[:len(q.order):len(q.order)],
onConflict: q.onConflict,
returning: q.returning[:len(q.returning):len(q.returning)],
limit: q.limit,
offset: q.offset,
}
for _, with := range q.with {
copy = copy.With(with.name, with.query.Copy())
}
return copy
}
func (q *Query) err(err error) *Query {
if q.stickyErr == nil {
q.stickyErr = err
}
return q
}
func (q *Query) DB(db DB) *Query {
q.db = db
for _, with := range q.with {
with.query.db = db
}
return q
}
func (q *Query) Model(model ...interface{}) *Query {
var err error
switch l := len(model); {
case l == 0:
q.model = nil
case l == 1:
q.model, err = newTableModel(model[0])
case l > 1:
q.model, err = newTableModel(&model)
}
if err != nil {
q = q.err(err)
}
if q.ignoreModel {
q.ignoreModel = false
}
return q
}
// With adds subq as common table expression with the given name.
func (q *Query) With(name string, subq *Query) *Query {
q.with = append(q.with, withQuery{name, subq})
return q
}
// WrapWith creates new Query and adds to it current query as
// common table expression with the given name.
func (q *Query) WrapWith(name string) *Query {
wrapper := q.New()
wrapper.with = q.with
q.with = nil
wrapper = wrapper.With(name, q)
return wrapper
}
func (q *Query) Table(tables ...string) *Query {
for _, table := range tables {
q.tables = append(q.tables, fieldAppender{table})
}
return q
}
func (q *Query) TableExpr(expr string, params ...interface{}) *Query {
q.tables = append(q.tables, queryParamsAppender{expr, params})
return q
}
// Column adds column to the Query quoting it according to PostgreSQL rules.
// ColumnExpr can be used to bypass quoting restriction.
func (q *Query) Column(columns ...string) *Query {
for _, column := range columns {
if column == "_" {
if q.columns == nil {
q.columns = make([]FormatAppender, 0)
}
continue
}
if q.model != nil {
if _, j := q.model.Join(column, nil); j != nil {
continue
}
}
q.columns = append(q.columns, fieldAppender{column})
}
return q
}
// ColumnExpr adds column expression to the Query.
func (q *Query) ColumnExpr(expr string, params ...interface{}) *Query {
q.columns = append(q.columns, queryParamsAppender{expr, params})
return q
}
func (q *Query) getFields() ([]*Field, error) {
return q._getFields(false)
}
func (q *Query) getDataFields() ([]*Field, error) {
return q._getFields(true)
}
func (q *Query) _getFields(omitPKs bool) ([]*Field, error) {
table := q.model.Table()
var columns []*Field
for _, col := range q.columns {
f, ok := col.(fieldAppender)
if !ok {
continue
}
field, err := table.GetField(f.field)
if err != nil {
return nil, err
}
if omitPKs && field.HasFlag(PrimaryKeyFlag) {
continue
}
columns = append(columns, field)
}
return columns, nil
}
func (q *Query) Relation(name string, apply func(*Query) (*Query, error)) *Query {
if _, j := q.model.Join(name, apply); j == nil {
return q.err(fmt.Errorf(
"model=%s does not have relation=%s",
q.model.Table().Type.Name(), name,
))
}
return q
}
func (q *Query) Set(set string, params ...interface{}) *Query {
q.set = append(q.set, queryParamsAppender{set, params})
return q
}
func (q *Query) Where(where string, params ...interface{}) *Query {
q.addWhere(&whereAppender{
sep: "AND",
where: where,
params: params,
})
return q
}
func (q *Query) WhereOr(where string, params ...interface{}) *Query {
q.addWhere(&whereAppender{
sep: "OR",
where: where,
params: params,
})
return q
}
// WhereGroup encloses conditions added in the function in parentheses.
//
// q.Where("TRUE").
// WhereGroup(func(q *orm.Query) (*orm.Query, error)) {
// q = q.WhereOr("FALSE").WhereOr("TRUE").
// return q, nil
// })
//
// generates
//
// WHERE TRUE AND (FALSE OR TRUE)
func (q *Query) WhereGroup(fn func(*Query) (*Query, error)) *Query {
return q.whereGroup("AND", fn)
}
// WhereOrGroup encloses conditions added in the function in parentheses.
//
// q.Where("TRUE").
// WhereOrGroup(func(q *orm.Query) (*orm.Query, error)) {
// q = q.Where("FALSE").Where("TRUE").
// return q, nil
// })
//
// generates
//
// WHERE TRUE OR (FALSE AND TRUE)
func (q *Query) WhereOrGroup(fn func(*Query) (*Query, error)) *Query {
return q.whereGroup("OR", fn)
}
func (q *Query) whereGroup(conj string, fn func(*Query) (*Query, error)) *Query {
saved := q.where
q.where = nil
newq, err := fn(q)
if err != nil {
q.err(err)
return q
}
f := whereGroupAppender{
sep: conj,
where: newq.where,
}
newq.where = saved
newq.addWhere(f)
return newq
}
// WhereIn is a shortcut for Where and pg.In to work with IN operator:
//
// WhereIn("id IN (?)", 1, 2, 3)
func (q *Query) WhereIn(where string, params ...interface{}) *Query {
return q.Where(where, types.In(params))
}
func (q *Query) addWhere(f sepFormatAppender) {
if q.onConflictDoUpdate() {
q.updWhere = append(q.updWhere, f)
} else {
q.where = append(q.where, f)
}
}
func (q *Query) Join(join string, params ...interface{}) *Query {
q.joins = append(q.joins, queryParamsAppender{join, params})
return q
}
func (q *Query) Group(columns ...string) *Query {
for _, column := range columns {
q.group = append(q.group, fieldAppender{column})
}
return q
}
func (q *Query) GroupExpr(group string, params ...interface{}) *Query {
q.group = append(q.group, queryParamsAppender{group, params})
return q
}
func (q *Query) Having(having string, params ...interface{}) *Query {
q.having = append(q.having, queryParamsAppender{having, params})
return q
}
// Order adds sort order to the Query quoting column name.
// OrderExpr can be used to bypass quoting restriction.
func (q *Query) Order(orders ...string) *Query {
loop:
for _, order := range orders {
ind := strings.Index(order, " ")
if ind != -1 {
field := order[:ind]
sort := order[ind+1:]
switch internal.ToUpper(sort) {
case "ASC", "DESC", "ASC NULLS FIRST", "DESC NULLS FIRST",
"ASC NULLS LAST", "DESC NULLS LAST":
q = q.OrderExpr("? ?", types.F(field), types.Q(sort))
continue loop
}
}
q.order = append(q.order, fieldAppender{order})
}
return q
}
// Order adds sort order to the Query.
func (q *Query) OrderExpr(order string, params ...interface{}) *Query {
q.order = append(q.order, queryParamsAppender{order, params})
return q
}
func (q *Query) Limit(n int) *Query {
q.limit = n
return q
}
func (q *Query) Offset(n int) *Query {
q.offset = n
return q
}
func (q *Query) OnConflict(s string, params ...interface{}) *Query {
q.onConflict = &queryParamsAppender{s, params}
return q
}
func (q *Query) onConflictDoUpdate() bool {
return q.onConflict != nil &&
strings.HasSuffix(q.onConflict.query, "DO UPDATE")
}
func (q *Query) Returning(s string, params ...interface{}) *Query {
q.returning = append(q.returning, queryParamsAppender{s, params})
return q
}
func (q *Query) For(s string, params ...interface{}) *Query {
q.selFor = queryParamsAppender{s, params}
return q
}
// Apply calls the fn passing the Query as an argument.
func (q *Query) Apply(fn func(*Query) (*Query, error)) *Query {
qq, err := fn(q)
if err != nil {
q.err(err)
return q
}
return qq
}
// Count returns number of rows matching the query using count aggregate function.
func (q *Query) Count() (int, error) {
if q.stickyErr != nil {
return 0, q.stickyErr
}
var count int
_, err := q.db.QueryOne(
Scan(&count),
q.countSelectQuery("count(*)"),
q.model,
)
return count, err
}
func (q *Query) countSelectQuery(column string) selectQuery {
return selectQuery{
q: q,
count: column,
}
}
// First sorts rows by primary key and selects the first row.
// It is a shortcut for:
//
// q.OrderExpr("id ASC").Limit(1)
func (q *Query) First() error {
err := q.model.Table().checkPKs()
if err != nil {
return err
}
b := columns(nil, q.model.Table().Alias, "", q.model.Table().PKs)
return q.OrderExpr(internal.BytesToString(b)).Limit(1).Select()
}
// Last sorts rows by primary key and selects the last row.
// It is a shortcut for:
//
// q.OrderExpr("id DESC").Limit(1)
func (q *Query) Last() error {
err := q.model.Table().checkPKs()
if err != nil {
return err
}
b := columns(nil, q.model.Table().Alias, "", q.model.Table().PKs)
b = append(b, " DESC"...)
return q.OrderExpr(internal.BytesToString(b)).Limit(1).Select()
}
// Select selects the model.
func (q *Query) Select(values ...interface{}) error {
if q.stickyErr != nil {
return q.stickyErr
}
model, err := q.newModel(values...)
if err != nil {
return err
}
res, err := q.query(model, selectQuery{q: q})
if err != nil {
return err
}
if res.RowsReturned() > 0 {
if q.model != nil {
if err := q.selectJoins(q.model.GetJoins()); err != nil {
return err
}
}
if err := model.AfterSelect(q.db); err != nil {
return err
}
}
return nil
}
func (q *Query) newModel(values ...interface{}) (Model, error) {
if len(values) > 0 {
return NewModel(values...)
}
return q.model, nil
}
func (q *Query) query(model Model, query interface{}) (Result, error) {
if _, ok := model.(useQueryOne); ok {
return q.db.QueryOne(model, query, q.model)
}
return q.db.Query(model, query, q.model)
}
// SelectAndCount runs Select and Count in two goroutines,
// waits for them to finish and returns the result.
func (q *Query) SelectAndCount(values ...interface{}) (count int, err error) {
if q.stickyErr != nil {
return 0, q.stickyErr
}
var wg sync.WaitGroup
wg.Add(2)
var mu sync.Mutex
go func() {
defer wg.Done()
if e := q.Select(values...); e != nil {
mu.Lock()
err = e
mu.Unlock()
}
}()
go func() {
defer wg.Done()
var e error
count, e = q.Count()
if e != nil {
mu.Lock()
err = e
mu.Unlock()
}
}()
wg.Wait()
return count, err
}
func (q *Query) forEachHasOneJoin(fn func(*join)) {
if q.model == nil {
return
}
q._forEachHasOneJoin(fn, q.model.GetJoins())
}
func (q *Query) _forEachHasOneJoin(fn func(*join), joins []join) {
for i := range joins {
j := &joins[i]
switch j.Rel.Type {
case HasOneRelation, BelongsToRelation:
fn(j)
q._forEachHasOneJoin(fn, j.JoinModel.GetJoins())
}
}
}
func (q *Query) selectJoins(joins []join) error {
var err error
for i := range joins {
j := &joins[i]
if j.Rel.Type == HasOneRelation || j.Rel.Type == BelongsToRelation {
err = q.selectJoins(j.JoinModel.GetJoins())
} else {
err = j.Select(q.db)
}
if err != nil {
return err
}
}
return nil
}
// Insert inserts the model.
func (q *Query) Insert(values ...interface{}) (Result, error) {
if q.stickyErr != nil {
return nil, q.stickyErr
}
model, err := q.newModel(values...)
if err != nil {
return nil, err
}
if q.model != nil {
if err := q.model.BeforeInsert(q.db); err != nil {
return nil, err
}
}
res, err := q.db.Query(model, insertQuery{q: q}, q.model)
if err != nil {
return nil, err
}
if q.model != nil {
if err := q.model.AfterInsert(q.db); err != nil {
return nil, err
}
}
return res, nil
}
// SelectOrInsert selects the model inserting one if it does not exist.
func (q *Query) SelectOrInsert(values ...interface{}) (inserted bool, err error) {
if q.stickyErr != nil {
return false, q.stickyErr
}
insertq := q
if len(insertq.columns) > 0 {
insertq = insertq.Copy()
insertq.columns = nil
}
var insertErr error
for i := 0; i < 5; i++ {
if i >= 2 {
time.Sleep(internal.RetryBackoff(i-2, 250*time.Millisecond, 5*time.Second))
}
err := q.Select(values...)
if err == nil {
return false, nil
}
if err != internal.ErrNoRows {
return false, err
}
res, err := insertq.Insert(values...)
if err != nil {
insertErr = err
if pgErr, ok := err.(internal.PGError); ok {
if pgErr.IntegrityViolation() {
continue
}
if pgErr.Field('C') == "55000" {
// Retry on "#55000 attempted to delete invisible tuple".
continue
}
}
return false, err
}
if res.RowsAffected() == 1 {
return true, nil
}
}
err = fmt.Errorf(
"pg: SelectOrInsert: select returns no rows (insert fails with err=%q)",
insertErr,
)
return false, err
}
// Update updates the model.
func (q *Query) Update(scan ...interface{}) (Result, error) {
return q.update(scan, false)
}
// Update updates the model omitting null columns.
func (q *Query) UpdateNotNull(scan ...interface{}) (Result, error) {
return q.update(scan, true)
}
func (q *Query) update(scan []interface{}, omitZero bool) (Result, error) {
if q.stickyErr != nil {
return nil, q.stickyErr
}
model, err := q.newModel(scan...)
if err != nil {
return nil, err
}
if q.model != nil {
if err := q.model.BeforeUpdate(q.db); err != nil {
return nil, err
}
}
res, err := q.db.Query(model, updateQuery{q: q, omitZero: omitZero}, q.model)
if err != nil {
return nil, err
}
if q.model != nil {
if err := q.model.AfterUpdate(q.db); err != nil {
return nil, err
}
}
return res, nil
}
// Delete deletes the model.
func (q *Query) Delete(values ...interface{}) (Result, error) {
if q.stickyErr != nil {
return nil, q.stickyErr
}
model, err := q.newModel(values...)
if err != nil {
return nil, err
}
if q.model != nil {
if err := q.model.BeforeDelete(q.db); err != nil {
return nil, err
}
}
res, err := q.db.Query(model, deleteQuery{q}, q.model)
if err != nil {
return nil, err
}
if q.model != nil {
if err := q.model.AfterDelete(q.db); err != nil {
return nil, err
}
}
return res, nil
}
func (q *Query) CreateTable(opt *CreateTableOptions) (Result, error) {
if q.stickyErr != nil {
return nil, q.stickyErr
}
return q.db.Exec(createTableQuery{
q: q,
opt: opt,
})
}
func (q *Query) DropTable(opt *DropTableOptions) (Result, error) {
if q.stickyErr != nil {
return nil, q.stickyErr
}
return q.db.Exec(dropTableQuery{
q: q,
opt: opt,
})
}
// Exec is an alias for DB.Exec.
func (q *Query) Exec(query interface{}, params ...interface{}) (Result, error) {
params = append(params, q.model)
return q.db.Exec(query, params...)
}
// ExecOne is an alias for DB.ExecOne.
func (q *Query) ExecOne(query interface{}, params ...interface{}) (Result, error) {
params = append(params, q.model)
return q.db.ExecOne(query, params...)
}
// Query is an alias for DB.Query.
func (q *Query) Query(model, query interface{}, params ...interface{}) (Result, error) {
params = append(params, q.model)
return q.db.Query(model, query, params...)
}
// QueryOne is an alias for DB.QueryOne.
func (q *Query) QueryOne(model, query interface{}, params ...interface{}) (Result, error) {
params = append(params, q.model)
return q.db.QueryOne(model, query, params...)
}
// CopyFrom is an alias from DB.CopyFrom.
func (q *Query) CopyFrom(r io.Reader, query interface{}, params ...interface{}) (Result, error) {
params = append(params, q.model)
return q.db.CopyFrom(r, query, params...)
}
// CopyTo is an alias from DB.CopyTo.
func (q *Query) CopyTo(w io.Writer, query interface{}, params ...interface{}) (Result, error) {
params = append(params, q.model)
return q.db.CopyTo(w, query, params...)
}
func (q *Query) FormatQuery(b []byte, query string, params ...interface{}) []byte {
params = append(params, q.model)
if q.db != nil {
return q.db.FormatQuery(b, query, params...)
}
return formatter.Append(b, query, params...)
}
func (q *Query) hasModel() bool {
return !q.ignoreModel && q.model != nil
}
func (q *Query) hasTables() bool {
return q.hasModel() || len(q.tables) > 0
}
func (q *Query) appendTableName(b []byte) []byte {
return q.FormatQuery(b, string(q.model.Table().Name))
}
func (q *Query) appendTableNameWithAlias(b []byte) []byte {
b = q.appendTableName(b)
b = append(b, " AS "...)
b = append(b, q.model.Table().Alias...)
return b
}
func (q *Query) appendFirstTable(b []byte) []byte {
if q.hasModel() {
return q.appendTableName(b)
}
if len(q.tables) > 0 {
b = q.tables[0].AppendFormat(b, q)
}
return b
}
func (q *Query) appendFirstTableWithAlias(b []byte) []byte {
if q.hasModel() {
return q.appendTableNameWithAlias(b)
}
if len(q.tables) > 0 {
b = q.tables[0].AppendFormat(b, q)
}
return b
}
func (q *Query) appendTables(b []byte) []byte {
if q.hasModel() {
b = q.appendTableNameWithAlias(b)
if len(q.tables) > 0 {
b = append(b, ", "...)
}
}
for i, f := range q.tables {
if i > 0 {
b = append(b, ", "...)
}
b = f.AppendFormat(b, q)
}
return b
}
func (q *Query) hasOtherTables() bool {
if q.hasModel() {
return len(q.tables) > 0
}
return len(q.tables) > 1
}
func (q *Query) modelHasData() bool {
if !q.hasModel() {
return false
}
v := q.model.Value()
return v.Kind() == reflect.Slice && v.Len() > 0
}
func (q *Query) appendOtherTables(b []byte) []byte {
tables := q.tables
if !q.hasModel() {
tables = tables[1:]
}
for i, f := range tables {
if i > 0 {
b = append(b, ", "...)
}
b = f.AppendFormat(b, q)
}
return b
}
func (q *Query) appendColumns(b []byte) []byte {
for i, f := range q.columns {
if i > 0 {
b = append(b, ", "...)
}
b = f.AppendFormat(b, q)
}
return b
}
func (q *Query) mustAppendWhere(b []byte) ([]byte, error) {
if len(q.where) > 0 {
b = q.appendWhere(b)
return b, nil
}
if q.model == nil {
return nil, errors.New("pg: Model is nil")
}
if err := q.model.Table().checkPKs(); err != nil {
return nil, err
}
b = append(b, " WHERE "...)
return wherePKQuery{q}.AppendFormat(b, nil), nil
}
func (q *Query) appendWhere(b []byte) []byte {
return q._appendWhere(b, q.where)
}
func (q *Query) appendUpdWhere(b []byte) []byte {
return q._appendWhere(b, q.updWhere)
}
func (q *Query) _appendWhere(b []byte, where []sepFormatAppender) []byte {
b = append(b, " WHERE "...)
for i, f := range where {
if i > 0 {
b = append(b, ' ')
b = f.AppendSep(b)
b = append(b, ' ')
}
b = f.AppendFormat(b, q)
}
return b
}
func (q *Query) appendSet(b []byte) []byte {
b = append(b, " SET "...)
for i, f := range q.set {
if i > 0 {
b = append(b, ", "...)
}
b = f.AppendFormat(b, q)
}
return b
}
func (q *Query) appendReturning(b []byte) []byte {
b = append(b, " RETURNING "...)
for i, f := range q.returning {
if i > 0 {
b = append(b, ", "...)
}
b = f.AppendFormat(b, q)
}
return b
}
func (q *Query) appendWith(b []byte) ([]byte, error) {
var err error
b = append(b, "WITH "...)
for i, with := range q.with {
if i > 0 {
b = append(b, ", "...)
}
b = types.AppendField(b, with.name, 1)
b = append(b, " AS ("...)
b, err = selectQuery{q: with.query}.AppendQuery(b)
if err != nil {
return nil, err
}
b = append(b, ')')
}
b = append(b, ' ')
return b, nil
}
//------------------------------------------------------------------------------
type wherePKQuery struct {
*Query
}
func (wherePKQuery) AppendSep(b []byte) []byte {
return append(b, "AND"...)
}
func (q wherePKQuery) AppendFormat(b []byte, f QueryFormatter) []byte {
table := q.model.Table()
value := q.model.Value()
if value.Kind() == reflect.Struct {
return appendColumnAndValue(b, value, table.Alias, table.PKs)
} else {
return appendColumnAndColumn(b, value, table.Alias, table.PKs)
}
}
func appendColumnAndValue(b []byte, v reflect.Value, alias types.Q, fields []*Field) []byte {
for i, f := range fields {
if i > 0 {
b = append(b, " AND "...)
}
b = append(b, alias...)
b = append(b, '.')
b = append(b, f.Column...)
b = append(b, " = "...)
b = f.AppendValue(b, v, 1)
}
return b
}
func appendColumnAndColumn(b []byte, v reflect.Value, alias types.Q, fields []*Field) []byte {
for i, f := range fields {
if i > 0 {
b = append(b, " AND "...)
}
b = append(b, alias...)
b = append(b, '.')
b = append(b, f.Column...)
b = append(b, " = _data."...)
b = append(b, f.Column...)
}
return b
}
|
// Package unsafeslice contains functions for zero-copy casting between typed slices and byte slices.
package unsafeslice
import (
"reflect"
"unsafe"
)
// Useful constants.
const (
Uint64Size = 8
Uint32Size = 4
Uint16Size = 2
Uint8Size = 1
)
func newRawSliceHeader(sh *reflect.SliceHeader, b []byte, stride int) *reflect.SliceHeader {
sh.Len = len(b) / stride
sh.Cap = len(b) / stride
sh.Data = (uintptr)(unsafe.Pointer(&b[0]))
return sh
}
func newSliceHeaderFromBytes(b []byte, stride int) unsafe.Pointer {
sh := &reflect.SliceHeader{}
return unsafe.Pointer(newRawSliceHeader(sh, b, stride))
}
func newSliceHeader(p unsafe.Pointer, size int) unsafe.Pointer {
return unsafe.Pointer(&reflect.SliceHeader{
Len: size,
Cap: size,
Data: uintptr(p),
})
}
func Uint64SliceFromByteSlice(b []byte) []uint64 {
return *(*[]uint64)(newSliceHeaderFromBytes(b, Uint64Size))
}
func ByteSliceFromUint64Slice(b []uint64) []byte {
return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint64Size))
}
func Int64SliceFromByteSlice(b []byte) []int64 {
return *(*[]int64)(newSliceHeaderFromBytes(b, Uint64Size))
}
func ByteSliceFromInt64Slice(b []int64) []byte {
return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint64Size))
}
func Uint32SliceFromByteSlice(b []byte) []uint32 {
return *(*[]uint32)(newSliceHeaderFromBytes(b, Uint32Size))
}
func ByteSliceFromUint32Slice(b []uint32) []byte {
return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint32Size))
}
func Int32SliceFromByteSlice(b []byte) []int32 {
return *(*[]int32)(newSliceHeaderFromBytes(b, Uint32Size))
}
func ByteSliceFromInt32Slice(b []int32) []byte {
return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint32Size))
}
func Uint16SliceFromByteSlice(b []byte) []uint16 {
return *(*[]uint16)(newSliceHeaderFromBytes(b, Uint16Size))
}
func ByteSliceFromUint16Slice(b []uint32) []byte {
return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint16Size))
}
func Int16SliceFromByteSlice(b []byte) []int16 {
return *(*[]int16)(newSliceHeaderFromBytes(b, Uint16Size))
}
func ByteSliceFromInt16Slice(b []int32) []byte {
return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint16Size))
}
func Uint8SliceFromByteSlice(b []byte) []uint8 {
return *(*[]uint8)(newSliceHeaderFromBytes(b, Uint8Size))
}
func ByteSliceFromUint8Slice(b []uint8) []byte {
return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint8Size))
}
func Int8SliceFromByteSlice(b []byte) []int8 {
return *(*[]int8)(newSliceHeaderFromBytes(b, Uint8Size))
}
func ByteSliceFromInt8Slice(b []int8) []byte {
return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint8Size))
}
func ByteSliceFromString(s string) []byte {
h := (*reflect.StringHeader)(unsafe.Pointer(&s))
return *(*[]byte)(newSliceHeader(unsafe.Pointer(h.Data), len(s)*Uint8Size))
}
func StringFromByteSlice(b []byte) string {
h := reflect.StringHeader{
Data: uintptr(unsafe.Pointer(&b[0])),
Len: len(b),
}
return *(*string)(unsafe.Pointer(&h))
}
// Create a slice of structs from a slice of bytes.
//
// var v []Struct
// StructSliceFromByteSlice(bytes, &v)
//
// Elements in the byte array must be padded correctly. See unsafe.AlignOf, et al.
//
// Note that this is slower than the scalar primitives above as it uses reflection.
func StructSliceFromByteSlice(b []byte, out interface{}) {
ptr := reflect.ValueOf(out)
if ptr.Kind() != reflect.Ptr {
panic("expected pointer to a slice of structs (*[]X)")
}
slice := ptr.Elem()
if slice.Kind() != reflect.Slice {
panic("expected pointer to a slice of structs (*[]X)")
}
// TODO: More checks, such as ensuring that:
// - elements are NOT pointers
// - structs do not contain pointers, slices or maps
stride := int(slice.Type().Elem().Size())
if len(b)%stride != 0 {
panic("size of byte buffer is not a multiple of struct size")
}
sh := (*reflect.SliceHeader)(unsafe.Pointer(slice.UnsafeAddr()))
newRawSliceHeader(sh, b, stride)
}
// ByteSliceFromStructSlice does what you would expect.
//
// Note that this is slower than the scalar primitives above as it uses reflection.
func ByteSliceFromStructSlice(s interface{}) []byte {
slice := reflect.ValueOf(s)
if slice.Kind() != reflect.Slice {
panic("expected a slice of structs (*[]X)")
}
var length int
var data uintptr
if slice.Len() != 0 {
elem := slice.Index(0)
length = int(elem.Type().Size()) * slice.Len()
data = elem.UnsafeAddr()
}
out := &reflect.SliceHeader{
Len: length,
Cap: length,
Data: data,
}
return *(*[]byte)(unsafe.Pointer(out))
}
Fix some typos.
// Package unsafeslice contains functions for zero-copy casting between typed slices and byte slices.
package unsafeslice
import (
"reflect"
"unsafe"
)
// Useful constants.
const (
Uint64Size = 8
Uint32Size = 4
Uint16Size = 2
Uint8Size = 1
)
func newRawSliceHeader(sh *reflect.SliceHeader, b []byte, stride int) *reflect.SliceHeader {
sh.Len = len(b) / stride
sh.Cap = len(b) / stride
sh.Data = (uintptr)(unsafe.Pointer(&b[0]))
return sh
}
func newSliceHeaderFromBytes(b []byte, stride int) unsafe.Pointer {
sh := &reflect.SliceHeader{}
return unsafe.Pointer(newRawSliceHeader(sh, b, stride))
}
func newSliceHeader(p unsafe.Pointer, size int) unsafe.Pointer {
return unsafe.Pointer(&reflect.SliceHeader{
Len: size,
Cap: size,
Data: uintptr(p),
})
}
func Uint64SliceFromByteSlice(b []byte) []uint64 {
return *(*[]uint64)(newSliceHeaderFromBytes(b, Uint64Size))
}
func ByteSliceFromUint64Slice(b []uint64) []byte {
return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint64Size))
}
func Int64SliceFromByteSlice(b []byte) []int64 {
return *(*[]int64)(newSliceHeaderFromBytes(b, Uint64Size))
}
func ByteSliceFromInt64Slice(b []int64) []byte {
return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint64Size))
}
func Uint32SliceFromByteSlice(b []byte) []uint32 {
return *(*[]uint32)(newSliceHeaderFromBytes(b, Uint32Size))
}
func ByteSliceFromUint32Slice(b []uint32) []byte {
return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint32Size))
}
func Int32SliceFromByteSlice(b []byte) []int32 {
return *(*[]int32)(newSliceHeaderFromBytes(b, Uint32Size))
}
func ByteSliceFromInt32Slice(b []int32) []byte {
return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint32Size))
}
func Uint16SliceFromByteSlice(b []byte) []uint16 {
return *(*[]uint16)(newSliceHeaderFromBytes(b, Uint16Size))
}
func ByteSliceFromUint16Slice(b []uint16) []byte {
return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint16Size))
}
func Int16SliceFromByteSlice(b []byte) []int16 {
return *(*[]int16)(newSliceHeaderFromBytes(b, Uint16Size))
}
func ByteSliceFromInt16Slice(b []int16) []byte {
return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint16Size))
}
func Int8SliceFromByteSlice(b []byte) []int8 {
return *(*[]int8)(newSliceHeaderFromBytes(b, Uint8Size))
}
func ByteSliceFromInt8Slice(b []int8) []byte {
return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint8Size))
}
func ByteSliceFromString(s string) []byte {
h := (*reflect.StringHeader)(unsafe.Pointer(&s))
return *(*[]byte)(newSliceHeader(unsafe.Pointer(h.Data), len(s)*Uint8Size))
}
func StringFromByteSlice(b []byte) string {
h := reflect.StringHeader{
Data: uintptr(unsafe.Pointer(&b[0])),
Len: len(b),
}
return *(*string)(unsafe.Pointer(&h))
}
// Create a slice of structs from a slice of bytes.
//
// var v []Struct
// StructSliceFromByteSlice(bytes, &v)
//
// Elements in the byte array must be padded correctly. See unsafe.AlignOf, et al.
//
// Note that this is slower than the scalar primitives above as it uses reflection.
func StructSliceFromByteSlice(b []byte, out interface{}) {
ptr := reflect.ValueOf(out)
if ptr.Kind() != reflect.Ptr {
panic("expected pointer to a slice of structs (*[]X)")
}
slice := ptr.Elem()
if slice.Kind() != reflect.Slice {
panic("expected pointer to a slice of structs (*[]X)")
}
// TODO: More checks, such as ensuring that:
// - elements are NOT pointers
// - structs do not contain pointers, slices or maps
stride := int(slice.Type().Elem().Size())
if len(b)%stride != 0 {
panic("size of byte buffer is not a multiple of struct size")
}
sh := (*reflect.SliceHeader)(unsafe.Pointer(slice.UnsafeAddr()))
newRawSliceHeader(sh, b, stride)
}
// ByteSliceFromStructSlice does what you would expect.
//
// Note that this is slower than the scalar primitives above as it uses reflection.
func ByteSliceFromStructSlice(s interface{}) []byte {
slice := reflect.ValueOf(s)
if slice.Kind() != reflect.Slice {
panic("expected a slice of structs (*[]X)")
}
var length int
var data uintptr
if slice.Len() != 0 {
elem := slice.Index(0)
length = int(elem.Type().Size()) * slice.Len()
data = elem.UnsafeAddr()
}
out := &reflect.SliceHeader{
Len: length,
Cap: length,
Data: data,
}
return *(*[]byte)(unsafe.Pointer(out))
}
|
package main
import (
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"github.com/bitly/go-nsq"
"github.com/bitly/go-simplejson"
"github.com/bitly/nsq/util"
"github.com/bitly/nsq/util/lookupd"
"io/ioutil"
"log"
"net"
"os"
"path"
"runtime"
"strconv"
"strings"
"sync"
"time"
)
type NSQD struct {
// 64bit atomic vars need to be first for proper alignment on 32bit platforms
clientIDSequence int64
sync.RWMutex
options *nsqdOptions
topicMap map[string]*Topic
lookupPeers []*LookupPeer
tcpAddr *net.TCPAddr
httpAddr *net.TCPAddr
tcpListener net.Listener
httpListener net.Listener
tlsConfig *tls.Config
idChan chan nsq.MessageID
notifyChan chan interface{}
exitChan chan int
waitGroup util.WaitGroupWrapper
}
func NewNSQD(options *nsqdOptions) *NSQD {
var tlsConfig *tls.Config
if options.MaxDeflateLevel < 1 || options.MaxDeflateLevel > 9 {
log.Fatalf("--max-deflate-level must be [1,9]")
}
tcpAddr, err := net.ResolveTCPAddr("tcp", options.TCPAddress)
if err != nil {
log.Fatal(err)
}
httpAddr, err := net.ResolveTCPAddr("tcp", options.HTTPAddress)
if err != nil {
log.Fatal(err)
}
if options.StatsdPrefix == "" {
statsdHostKey := util.StatsdHostKey(net.JoinHostPort(options.BroadcastAddress,
strconv.Itoa(httpAddr.Port)))
prefixWithHost := strings.Replace(options.StatsdPrefix, "%s", statsdHostKey, -1)
if prefixWithHost[len(prefixWithHost)-1] != '.' {
prefixWithHost += "."
}
options.StatsdPrefix = prefixWithHost
}
if options.TLSCert != "" || options.TLSKey != "" {
cert, err := tls.LoadX509KeyPair(options.TLSCert, options.TLSKey)
if err != nil {
log.Fatalf("ERROR: failed to LoadX509KeyPair %s", err.Error())
}
tlsConfig = &tls.Config{
Certificates: []tls.Certificate{cert},
ClientAuth: tls.VerifyClientCertIfGiven,
}
tlsConfig.BuildNameToCertificate()
}
n := &NSQD{
options: options,
tcpAddr: tcpAddr,
httpAddr: httpAddr,
topicMap: make(map[string]*Topic),
idChan: make(chan nsq.MessageID, 4096),
exitChan: make(chan int),
notifyChan: make(chan interface{}),
tlsConfig: tlsConfig,
}
n.waitGroup.Wrap(func() { n.idPump() })
return n
}
func (n *NSQD) Main() {
context := &Context{n}
n.waitGroup.Wrap(func() { n.lookupLoop() })
tcpListener, err := net.Listen("tcp", n.tcpAddr.String())
if err != nil {
log.Fatalf("FATAL: listen (%s) failed - %s", n.tcpAddr, err.Error())
}
n.tcpListener = tcpListener
tcpServer := &tcpServer{context: context}
n.waitGroup.Wrap(func() { util.TCPServer(n.tcpListener, tcpServer) })
httpListener, err := net.Listen("tcp", n.httpAddr.String())
if err != nil {
log.Fatalf("FATAL: listen (%s) failed - %s", n.httpAddr, err.Error())
}
n.httpListener = httpListener
httpServer := &httpServer{context: context}
n.waitGroup.Wrap(func() { util.HTTPServer(n.httpListener, httpServer) })
if n.options.StatsdAddress != "" {
n.waitGroup.Wrap(func() { n.statsdLoop() })
}
}
func (n *NSQD) LoadMetadata() {
fn := fmt.Sprintf(path.Join(n.options.DataPath, "nsqd.%d.dat"), n.options.ID)
data, err := ioutil.ReadFile(fn)
if err != nil {
if !os.IsNotExist(err) {
log.Printf("ERROR: failed to read channel metadata from %s - %s", fn, err.Error())
}
return
}
js, err := simplejson.NewJson(data)
if err != nil {
log.Printf("ERROR: failed to parse metadata - %s", err.Error())
return
}
topics, err := js.Get("topics").Array()
if err != nil {
log.Printf("ERROR: failed to parse metadata - %s", err.Error())
return
}
for ti := range topics {
topicJs := js.Get("topics").GetIndex(ti)
topicName, err := topicJs.Get("name").String()
if err != nil {
log.Printf("ERROR: failed to parse metadata - %s", err.Error())
return
}
if !nsq.IsValidTopicName(topicName) {
log.Printf("WARNING: skipping creation of invalid topic %s", topicName)
continue
}
topic := n.GetTopic(topicName)
paused, _ := topicJs.Get("paused").Bool()
if paused {
topic.Pause()
}
channels, err := topicJs.Get("channels").Array()
if err != nil {
log.Printf("ERROR: failed to parse metadata - %s", err.Error())
return
}
for ci := range channels {
channelJs := topicJs.Get("channels").GetIndex(ci)
channelName, err := channelJs.Get("name").String()
if err != nil {
log.Printf("ERROR: failed to parse metadata - %s", err.Error())
return
}
if !nsq.IsValidChannelName(channelName) {
log.Printf("WARNING: skipping creation of invalid channel %s", channelName)
continue
}
channel := topic.GetChannel(channelName)
paused, _ = channelJs.Get("paused").Bool()
if paused {
channel.Pause()
}
}
}
}
func (n *NSQD) PersistMetadata() error {
// persist metadata about what topics/channels we have
// so that upon restart we can get back to the same state
fileName := fmt.Sprintf(path.Join(n.options.DataPath, "nsqd.%d.dat"), n.options.ID)
log.Printf("NSQ: persisting topic/channel metadata to %s", fileName)
js := make(map[string]interface{})
topics := make([]interface{}, 0)
for _, topic := range n.topicMap {
topicData := make(map[string]interface{})
topicData["name"] = topic.name
topicData["paused"] = topic.IsPaused()
channels := make([]interface{}, 0)
topic.Lock()
for _, channel := range topic.channelMap {
channel.Lock()
if !channel.ephemeralChannel {
channelData := make(map[string]interface{})
channelData["name"] = channel.name
channelData["paused"] = channel.IsPaused()
channels = append(channels, channelData)
}
channel.Unlock()
}
topic.Unlock()
topicData["channels"] = channels
topics = append(topics, topicData)
}
js["version"] = util.BINARY_VERSION
js["topics"] = topics
data, err := json.Marshal(&js)
if err != nil {
return err
}
tmpFileName := fileName + ".tmp"
f, err := os.OpenFile(tmpFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return err
}
_, err = f.Write(data)
if err != nil {
f.Close()
return err
}
f.Sync()
f.Close()
err = os.Rename(tmpFileName, fileName)
if err != nil {
return err
}
return nil
}
func (n *NSQD) Exit() {
if n.tcpListener != nil {
n.tcpListener.Close()
}
if n.httpListener != nil {
n.httpListener.Close()
}
n.Lock()
err := n.PersistMetadata()
if err != nil {
log.Printf("ERROR: failed to persist metadata - %s", err.Error())
}
log.Printf("NSQ: closing topics")
for _, topic := range n.topicMap {
topic.Close()
}
n.Unlock()
// we want to do this last as it closes the idPump (if closed first it
// could potentially starve items in process and deadlock)
close(n.exitChan)
n.waitGroup.Wait()
}
// GetTopic performs a thread safe operation
// to return a pointer to a Topic object (potentially new)
func (n *NSQD) GetTopic(topicName string) *Topic {
n.Lock()
t, ok := n.topicMap[topicName]
if ok {
n.Unlock()
return t
} else {
t = NewTopic(topicName, &Context{n})
n.topicMap[topicName] = t
log.Printf("TOPIC(%s): created", t.name)
// release our global nsqd lock, and switch to a more granular topic lock while we init our
// channels from lookupd. This blocks concurrent PutMessages to this topic.
t.Lock()
n.Unlock()
// if using lookupd, make a blocking call to get the topics, and immediately create them.
// this makes sure that any message received is buffered to the right channels
if len(n.lookupPeers) > 0 {
channelNames, _ := lookupd.GetLookupdTopicChannels(t.name, n.lookupHttpAddrs())
for _, channelName := range channelNames {
t.getOrCreateChannel(channelName)
}
}
t.Unlock()
// NOTE: I would prefer for this to only happen in topic.GetChannel() but we're special
// casing the code above so that we can control the locks such that it is impossible
// for a message to be written to a (new) topic while we're looking up channels
// from lookupd...
//
// update messagePump state
select {
case t.channelUpdateChan <- 1:
case <-t.exitChan:
}
}
return t
}
// GetExistingTopic gets a topic only if it exists
func (n *NSQD) GetExistingTopic(topicName string) (*Topic, error) {
n.RLock()
defer n.RUnlock()
topic, ok := n.topicMap[topicName]
if !ok {
return nil, errors.New("topic does not exist")
}
return topic, nil
}
// DeleteExistingTopic removes a topic only if it exists
func (n *NSQD) DeleteExistingTopic(topicName string) error {
n.RLock()
topic, ok := n.topicMap[topicName]
if !ok {
n.RUnlock()
return errors.New("topic does not exist")
}
n.RUnlock()
// delete empties all channels and the topic itself before closing
// (so that we dont leave any messages around)
//
// we do this before removing the topic from map below (with no lock)
// so that any incoming writes will error and not create a new topic
// to enforce ordering
topic.Delete()
n.Lock()
delete(n.topicMap, topicName)
n.Unlock()
return nil
}
func (n *NSQD) idPump() {
lastError := time.Now()
for {
id, err := NewGUID(n.options.ID)
if err != nil {
now := time.Now()
if now.Sub(lastError) > time.Second {
// only print the error once/second
log.Printf("ERROR: %s", err.Error())
lastError = now
}
runtime.Gosched()
continue
}
select {
case n.idChan <- id.Hex():
case <-n.exitChan:
goto exit
}
}
exit:
log.Printf("ID: closing")
}
func (n *NSQD) Notify(v interface{}) {
// by selecting on exitChan we guarantee that
// we do not block exit, see issue #123
select {
case <-n.exitChan:
case n.notifyChan <- v:
n.Lock()
err := n.PersistMetadata()
if err != nil {
log.Printf("ERROR: failed to persist metadata - %s", err.Error())
}
n.Unlock()
}
}
nsqd: properly fill-in statsd prefix with host when not empty
package main
import (
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"github.com/bitly/go-nsq"
"github.com/bitly/go-simplejson"
"github.com/bitly/nsq/util"
"github.com/bitly/nsq/util/lookupd"
"io/ioutil"
"log"
"net"
"os"
"path"
"runtime"
"strconv"
"strings"
"sync"
"time"
)
type NSQD struct {
// 64bit atomic vars need to be first for proper alignment on 32bit platforms
clientIDSequence int64
sync.RWMutex
options *nsqdOptions
topicMap map[string]*Topic
lookupPeers []*LookupPeer
tcpAddr *net.TCPAddr
httpAddr *net.TCPAddr
tcpListener net.Listener
httpListener net.Listener
tlsConfig *tls.Config
idChan chan nsq.MessageID
notifyChan chan interface{}
exitChan chan int
waitGroup util.WaitGroupWrapper
}
func NewNSQD(options *nsqdOptions) *NSQD {
var tlsConfig *tls.Config
if options.MaxDeflateLevel < 1 || options.MaxDeflateLevel > 9 {
log.Fatalf("--max-deflate-level must be [1,9]")
}
tcpAddr, err := net.ResolveTCPAddr("tcp", options.TCPAddress)
if err != nil {
log.Fatal(err)
}
httpAddr, err := net.ResolveTCPAddr("tcp", options.HTTPAddress)
if err != nil {
log.Fatal(err)
}
if options.StatsdPrefix != "" {
statsdHostKey := util.StatsdHostKey(net.JoinHostPort(options.BroadcastAddress,
strconv.Itoa(httpAddr.Port)))
prefixWithHost := strings.Replace(options.StatsdPrefix, "%s", statsdHostKey, -1)
if prefixWithHost[len(prefixWithHost)-1] != '.' {
prefixWithHost += "."
}
options.StatsdPrefix = prefixWithHost
}
if options.TLSCert != "" || options.TLSKey != "" {
cert, err := tls.LoadX509KeyPair(options.TLSCert, options.TLSKey)
if err != nil {
log.Fatalf("ERROR: failed to LoadX509KeyPair %s", err.Error())
}
tlsConfig = &tls.Config{
Certificates: []tls.Certificate{cert},
ClientAuth: tls.VerifyClientCertIfGiven,
}
tlsConfig.BuildNameToCertificate()
}
n := &NSQD{
options: options,
tcpAddr: tcpAddr,
httpAddr: httpAddr,
topicMap: make(map[string]*Topic),
idChan: make(chan nsq.MessageID, 4096),
exitChan: make(chan int),
notifyChan: make(chan interface{}),
tlsConfig: tlsConfig,
}
n.waitGroup.Wrap(func() { n.idPump() })
return n
}
func (n *NSQD) Main() {
context := &Context{n}
n.waitGroup.Wrap(func() { n.lookupLoop() })
tcpListener, err := net.Listen("tcp", n.tcpAddr.String())
if err != nil {
log.Fatalf("FATAL: listen (%s) failed - %s", n.tcpAddr, err.Error())
}
n.tcpListener = tcpListener
tcpServer := &tcpServer{context: context}
n.waitGroup.Wrap(func() { util.TCPServer(n.tcpListener, tcpServer) })
httpListener, err := net.Listen("tcp", n.httpAddr.String())
if err != nil {
log.Fatalf("FATAL: listen (%s) failed - %s", n.httpAddr, err.Error())
}
n.httpListener = httpListener
httpServer := &httpServer{context: context}
n.waitGroup.Wrap(func() { util.HTTPServer(n.httpListener, httpServer) })
if n.options.StatsdAddress != "" {
n.waitGroup.Wrap(func() { n.statsdLoop() })
}
}
func (n *NSQD) LoadMetadata() {
fn := fmt.Sprintf(path.Join(n.options.DataPath, "nsqd.%d.dat"), n.options.ID)
data, err := ioutil.ReadFile(fn)
if err != nil {
if !os.IsNotExist(err) {
log.Printf("ERROR: failed to read channel metadata from %s - %s", fn, err.Error())
}
return
}
js, err := simplejson.NewJson(data)
if err != nil {
log.Printf("ERROR: failed to parse metadata - %s", err.Error())
return
}
topics, err := js.Get("topics").Array()
if err != nil {
log.Printf("ERROR: failed to parse metadata - %s", err.Error())
return
}
for ti := range topics {
topicJs := js.Get("topics").GetIndex(ti)
topicName, err := topicJs.Get("name").String()
if err != nil {
log.Printf("ERROR: failed to parse metadata - %s", err.Error())
return
}
if !nsq.IsValidTopicName(topicName) {
log.Printf("WARNING: skipping creation of invalid topic %s", topicName)
continue
}
topic := n.GetTopic(topicName)
paused, _ := topicJs.Get("paused").Bool()
if paused {
topic.Pause()
}
channels, err := topicJs.Get("channels").Array()
if err != nil {
log.Printf("ERROR: failed to parse metadata - %s", err.Error())
return
}
for ci := range channels {
channelJs := topicJs.Get("channels").GetIndex(ci)
channelName, err := channelJs.Get("name").String()
if err != nil {
log.Printf("ERROR: failed to parse metadata - %s", err.Error())
return
}
if !nsq.IsValidChannelName(channelName) {
log.Printf("WARNING: skipping creation of invalid channel %s", channelName)
continue
}
channel := topic.GetChannel(channelName)
paused, _ = channelJs.Get("paused").Bool()
if paused {
channel.Pause()
}
}
}
}
func (n *NSQD) PersistMetadata() error {
// persist metadata about what topics/channels we have
// so that upon restart we can get back to the same state
fileName := fmt.Sprintf(path.Join(n.options.DataPath, "nsqd.%d.dat"), n.options.ID)
log.Printf("NSQ: persisting topic/channel metadata to %s", fileName)
js := make(map[string]interface{})
topics := make([]interface{}, 0)
for _, topic := range n.topicMap {
topicData := make(map[string]interface{})
topicData["name"] = topic.name
topicData["paused"] = topic.IsPaused()
channels := make([]interface{}, 0)
topic.Lock()
for _, channel := range topic.channelMap {
channel.Lock()
if !channel.ephemeralChannel {
channelData := make(map[string]interface{})
channelData["name"] = channel.name
channelData["paused"] = channel.IsPaused()
channels = append(channels, channelData)
}
channel.Unlock()
}
topic.Unlock()
topicData["channels"] = channels
topics = append(topics, topicData)
}
js["version"] = util.BINARY_VERSION
js["topics"] = topics
data, err := json.Marshal(&js)
if err != nil {
return err
}
tmpFileName := fileName + ".tmp"
f, err := os.OpenFile(tmpFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return err
}
_, err = f.Write(data)
if err != nil {
f.Close()
return err
}
f.Sync()
f.Close()
err = os.Rename(tmpFileName, fileName)
if err != nil {
return err
}
return nil
}
func (n *NSQD) Exit() {
if n.tcpListener != nil {
n.tcpListener.Close()
}
if n.httpListener != nil {
n.httpListener.Close()
}
n.Lock()
err := n.PersistMetadata()
if err != nil {
log.Printf("ERROR: failed to persist metadata - %s", err.Error())
}
log.Printf("NSQ: closing topics")
for _, topic := range n.topicMap {
topic.Close()
}
n.Unlock()
// we want to do this last as it closes the idPump (if closed first it
// could potentially starve items in process and deadlock)
close(n.exitChan)
n.waitGroup.Wait()
}
// GetTopic performs a thread safe operation
// to return a pointer to a Topic object (potentially new)
func (n *NSQD) GetTopic(topicName string) *Topic {
n.Lock()
t, ok := n.topicMap[topicName]
if ok {
n.Unlock()
return t
} else {
t = NewTopic(topicName, &Context{n})
n.topicMap[topicName] = t
log.Printf("TOPIC(%s): created", t.name)
// release our global nsqd lock, and switch to a more granular topic lock while we init our
// channels from lookupd. This blocks concurrent PutMessages to this topic.
t.Lock()
n.Unlock()
// if using lookupd, make a blocking call to get the topics, and immediately create them.
// this makes sure that any message received is buffered to the right channels
if len(n.lookupPeers) > 0 {
channelNames, _ := lookupd.GetLookupdTopicChannels(t.name, n.lookupHttpAddrs())
for _, channelName := range channelNames {
t.getOrCreateChannel(channelName)
}
}
t.Unlock()
// NOTE: I would prefer for this to only happen in topic.GetChannel() but we're special
// casing the code above so that we can control the locks such that it is impossible
// for a message to be written to a (new) topic while we're looking up channels
// from lookupd...
//
// update messagePump state
select {
case t.channelUpdateChan <- 1:
case <-t.exitChan:
}
}
return t
}
// GetExistingTopic gets a topic only if it exists
func (n *NSQD) GetExistingTopic(topicName string) (*Topic, error) {
n.RLock()
defer n.RUnlock()
topic, ok := n.topicMap[topicName]
if !ok {
return nil, errors.New("topic does not exist")
}
return topic, nil
}
// DeleteExistingTopic removes a topic only if it exists
func (n *NSQD) DeleteExistingTopic(topicName string) error {
n.RLock()
topic, ok := n.topicMap[topicName]
if !ok {
n.RUnlock()
return errors.New("topic does not exist")
}
n.RUnlock()
// delete empties all channels and the topic itself before closing
// (so that we dont leave any messages around)
//
// we do this before removing the topic from map below (with no lock)
// so that any incoming writes will error and not create a new topic
// to enforce ordering
topic.Delete()
n.Lock()
delete(n.topicMap, topicName)
n.Unlock()
return nil
}
func (n *NSQD) idPump() {
lastError := time.Now()
for {
id, err := NewGUID(n.options.ID)
if err != nil {
now := time.Now()
if now.Sub(lastError) > time.Second {
// only print the error once/second
log.Printf("ERROR: %s", err.Error())
lastError = now
}
runtime.Gosched()
continue
}
select {
case n.idChan <- id.Hex():
case <-n.exitChan:
goto exit
}
}
exit:
log.Printf("ID: closing")
}
func (n *NSQD) Notify(v interface{}) {
// by selecting on exitChan we guarantee that
// we do not block exit, see issue #123
select {
case <-n.exitChan:
case n.notifyChan <- v:
n.Lock()
err := n.PersistMetadata()
if err != nil {
log.Printf("ERROR: failed to persist metadata - %s", err.Error())
}
n.Unlock()
}
}
|
package nv
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"reflect"
"sort"
xdr "github.com/davecgh/go-xdr/xdr2"
)
func Encode(i interface{}) ([]byte, error) {
if i == nil {
return nil, errors.New("can not encode a nil pointer")
}
v := reflect.ValueOf(i)
if !v.IsValid() {
return nil, fmt.Errorf("type '%s' is invalid", v.Kind().String())
}
var err error
buff := bytes.NewBuffer(nil)
if err = binary.Write(buff, binary.BigEndian, encoding{Encoding: 1, Endianess: 1}); err != nil {
return nil, err
}
if err = encodeList(buff, v); err != nil {
return nil, err
}
return buff.Bytes(), nil
}
func encodeList(w io.Writer, v reflect.Value) error {
var err error
if err = binary.Write(w, binary.BigEndian, header{Flag: _UNIQUE_NAME}); err != nil {
return err
}
v = deref(v)
switch v.Kind() {
case reflect.Struct:
_, err = encodeStruct(v, w)
case reflect.Map:
keys := make([]string, len(v.MapKeys()))
for i, k := range v.MapKeys() {
keys[i] = k.Interface().(string)
}
sort.Strings(keys)
for _, name := range keys {
v := v.MapIndex(reflect.ValueOf(name))
if err := encodeItem(w, name, nil, v); err != nil {
return err
}
}
err = binary.Write(w, binary.BigEndian, uint64(0))
default:
return fmt.Errorf("invalid type '%s', must be a struct", v.Kind().String())
}
return err
}
func encodeStruct(v reflect.Value, w io.Writer) (int, error) {
var err error
size := 0
forEachField(v, func(i int, field reflect.Value) bool {
// Skip fields that can't be set (e.g. unexported)
if !field.CanSet() {
return true
}
name := v.Type().Field(i).Name
tags := getTags(i, v)
if len(tags) > 0 && tags[0] != "" {
name = tags[0]
}
if err = encodeItem(w, name, tags, field); err != nil {
return false
}
return true
})
if err != nil {
return 0, err
}
if err = binary.Write(w, binary.BigEndian, uint64(0)); err != nil {
return 0, err
}
return size + 8, nil
}
func encodeItem(w io.Writer, name string, tags []string, field reflect.Value) error {
field = deref(field)
var types = map[reflect.Kind]dataType{
reflect.Bool: _BOOLEAN_VALUE,
reflect.Float32: _DOUBLE,
reflect.Float64: _DOUBLE,
reflect.Int16: _INT16,
reflect.Int32: _INT32,
reflect.Int64: _INT64,
reflect.Int8: _INT8,
reflect.Int: _INT32,
reflect.Map: _NVLIST,
reflect.String: _STRING,
reflect.Struct: _NVLIST,
reflect.Uint16: _UINT16,
reflect.Uint32: _UINT32,
reflect.Uint64: _UINT64,
reflect.Uint8: _UINT8,
reflect.Uint: _UINT32,
}
var sliceTypes = map[reflect.Kind]dataType{
reflect.Bool: _BOOLEAN_ARRAY,
reflect.Int16: _INT16_ARRAY,
reflect.Int32: _INT32_ARRAY,
reflect.Int64: _INT64_ARRAY,
reflect.Int8: _INT8_ARRAY,
reflect.Int: _INT32_ARRAY,
reflect.Map: _NVLIST_ARRAY,
reflect.String: _STRING_ARRAY,
reflect.Struct: _NVLIST_ARRAY,
reflect.Uint16: _UINT16_ARRAY,
reflect.Uint32: _UINT32_ARRAY,
reflect.Uint64: _UINT64_ARRAY,
reflect.Uint8: _UINT8_ARRAY,
reflect.Uint: _UINT32_ARRAY,
}
var tagType dataType
if len(tags) > 1 {
if tags[1] == "byte" {
tagType = _BYTE
} else if tags[1] == "uint8" {
tagType = _UINT8
}
}
p := pair{
Name: name,
NElements: 1,
}
var ok bool
p.Type, ok = types[field.Kind()]
switch field.Kind() {
case reflect.Bool:
if field.Type().Name() == "Boolean" {
p.Type = _BOOLEAN
}
case reflect.Interface:
return encodeItem(w, name, tags, reflect.ValueOf(field.Interface()))
case reflect.Slice, reflect.Array:
p.Type, ok = sliceTypes[field.Type().Elem().Kind()]
switch tagType {
case _BYTE:
p.Type = _BYTE_ARRAY
case _UINT8:
p.Type = _UINT8_ARRAY
}
case reflect.Int64:
if field.Type().String() == "time.Duration" {
p.Type = _HRTIME
}
case reflect.Uint8:
switch tagType {
case _BYTE:
p.Type = _BYTE
case _UINT8:
p.Type = _UINT8
}
}
if !ok {
return fmt.Errorf("unknown type: %v", field.Kind())
}
p.data = field.Interface()
value := p.data
vbuf := &bytes.Buffer{}
switch p.Type {
case _BOOLEAN:
p.NElements = 0
case _BYTE:
value = int8(value.(uint8))
case _UINT8:
value = int(int8(value.(uint8)))
case _BYTE_ARRAY:
p.NElements = uint32(len(value.([]byte)))
n := int(p.NElements)
arrType := reflect.ArrayOf(n, reflect.TypeOf(byte(0)))
arr := reflect.New(arrType).Elem()
for i, b := range value.([]byte) {
arr.Index(i).SetUint(uint64(b))
}
value = arr.Interface()
case _BOOLEAN_ARRAY:
p.NElements = uint32(len(value.([]bool)))
case _INT8_ARRAY:
p.NElements = uint32(len(value.([]int8)))
case _INT16_ARRAY:
p.NElements = uint32(len(value.([]int16)))
case _INT32_ARRAY:
p.NElements = uint32(len(value.([]int32)))
case _INT64_ARRAY:
p.NElements = uint32(len(value.([]int64)))
case _UINT8_ARRAY:
// this one is weird since UINT8s are encoded as char
// aka int32s... :(
p.NElements = uint32(len(value.([]uint8)))
n := int(p.NElements)
sliceType := reflect.SliceOf(reflect.TypeOf(int32(0)))
slice := reflect.MakeSlice(sliceType, n, n)
for i, b := range value.([]uint8) {
slice.Index(i).SetInt(int64(int8(b)))
}
value = slice.Interface()
case _UINT16_ARRAY:
p.NElements = uint32(len(value.([]uint16)))
case _UINT32_ARRAY:
p.NElements = uint32(len(value.([]uint32)))
case _UINT64_ARRAY:
p.NElements = uint32(len(value.([]uint64)))
case _STRING_ARRAY:
p.NElements = uint32(len(value.([]string)))
arrType := reflect.ArrayOf(int(p.NElements), reflect.TypeOf(""))
arr := reflect.New(arrType).Elem()
for i, b := range value.([]string) {
arr.Index(i).SetString(b)
}
value = arr.Interface()
case _NVLIST:
if err := encodeList(vbuf, reflect.ValueOf(value)); err != nil {
return err
}
p.data = vbuf.Bytes()
case _NVLIST_ARRAY:
p.NElements = uint32(len(value.([]map[string]interface{})))
for _, l := range value.([]map[string]interface{}) {
if err := encodeList(vbuf, reflect.ValueOf(l)); err != nil {
return err
}
}
p.data = vbuf.Bytes()
}
if vbuf.Len() == 0 && p.Type != _BOOLEAN {
_, err := xdr.NewEncoder(vbuf).Encode(value)
if err != nil {
return err
}
}
p.EncodedSize = uint32(p.encodedSize())
p.DecodedSize = uint32(p.decodedSize())
pbuf := &bytes.Buffer{}
_, err := xdr.NewEncoder(pbuf).Encode(p)
if err != nil {
return err
}
_, err = pbuf.WriteTo(w)
if err != nil {
return err
}
_, err = vbuf.WriteTo(w)
if err != nil {
return err
}
return nil
}
remove always ignored int return from encodeStruct
package nv
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"reflect"
"sort"
xdr "github.com/davecgh/go-xdr/xdr2"
)
func Encode(i interface{}) ([]byte, error) {
if i == nil {
return nil, errors.New("can not encode a nil pointer")
}
v := reflect.ValueOf(i)
if !v.IsValid() {
return nil, fmt.Errorf("type '%s' is invalid", v.Kind().String())
}
var err error
buff := bytes.NewBuffer(nil)
if err = binary.Write(buff, binary.BigEndian, encoding{Encoding: 1, Endianess: 1}); err != nil {
return nil, err
}
if err = encodeList(buff, v); err != nil {
return nil, err
}
return buff.Bytes(), nil
}
func encodeList(w io.Writer, v reflect.Value) error {
var err error
if err = binary.Write(w, binary.BigEndian, header{Flag: _UNIQUE_NAME}); err != nil {
return err
}
v = deref(v)
switch v.Kind() {
case reflect.Struct:
if err := encodeStruct(v, w); err != nil {
return err
}
case reflect.Map:
keys := make([]string, len(v.MapKeys()))
for i, k := range v.MapKeys() {
keys[i] = k.Interface().(string)
}
sort.Strings(keys)
for _, name := range keys {
v := v.MapIndex(reflect.ValueOf(name))
if err := encodeItem(w, name, nil, v); err != nil {
return err
}
}
err = binary.Write(w, binary.BigEndian, uint64(0))
default:
return fmt.Errorf("invalid type '%s', must be a struct", v.Kind().String())
}
return err
}
func encodeStruct(v reflect.Value, w io.Writer) error {
var err error
forEachField(v, func(i int, field reflect.Value) bool {
// Skip fields that can't be set (e.g. unexported)
if !field.CanSet() {
return true
}
name := v.Type().Field(i).Name
tags := getTags(i, v)
if len(tags) > 0 && tags[0] != "" {
name = tags[0]
}
if err = encodeItem(w, name, tags, field); err != nil {
return false
}
return true
})
if err != nil {
return err
}
return binary.Write(w, binary.BigEndian, uint64(0))
}
func encodeItem(w io.Writer, name string, tags []string, field reflect.Value) error {
field = deref(field)
var types = map[reflect.Kind]dataType{
reflect.Bool: _BOOLEAN_VALUE,
reflect.Float32: _DOUBLE,
reflect.Float64: _DOUBLE,
reflect.Int16: _INT16,
reflect.Int32: _INT32,
reflect.Int64: _INT64,
reflect.Int8: _INT8,
reflect.Int: _INT32,
reflect.Map: _NVLIST,
reflect.String: _STRING,
reflect.Struct: _NVLIST,
reflect.Uint16: _UINT16,
reflect.Uint32: _UINT32,
reflect.Uint64: _UINT64,
reflect.Uint8: _UINT8,
reflect.Uint: _UINT32,
}
var sliceTypes = map[reflect.Kind]dataType{
reflect.Bool: _BOOLEAN_ARRAY,
reflect.Int16: _INT16_ARRAY,
reflect.Int32: _INT32_ARRAY,
reflect.Int64: _INT64_ARRAY,
reflect.Int8: _INT8_ARRAY,
reflect.Int: _INT32_ARRAY,
reflect.Map: _NVLIST_ARRAY,
reflect.String: _STRING_ARRAY,
reflect.Struct: _NVLIST_ARRAY,
reflect.Uint16: _UINT16_ARRAY,
reflect.Uint32: _UINT32_ARRAY,
reflect.Uint64: _UINT64_ARRAY,
reflect.Uint8: _UINT8_ARRAY,
reflect.Uint: _UINT32_ARRAY,
}
var tagType dataType
if len(tags) > 1 {
if tags[1] == "byte" {
tagType = _BYTE
} else if tags[1] == "uint8" {
tagType = _UINT8
}
}
p := pair{
Name: name,
NElements: 1,
}
var ok bool
p.Type, ok = types[field.Kind()]
switch field.Kind() {
case reflect.Bool:
if field.Type().Name() == "Boolean" {
p.Type = _BOOLEAN
}
case reflect.Interface:
return encodeItem(w, name, tags, reflect.ValueOf(field.Interface()))
case reflect.Slice, reflect.Array:
p.Type, ok = sliceTypes[field.Type().Elem().Kind()]
switch tagType {
case _BYTE:
p.Type = _BYTE_ARRAY
case _UINT8:
p.Type = _UINT8_ARRAY
}
case reflect.Int64:
if field.Type().String() == "time.Duration" {
p.Type = _HRTIME
}
case reflect.Uint8:
switch tagType {
case _BYTE:
p.Type = _BYTE
case _UINT8:
p.Type = _UINT8
}
}
if !ok {
return fmt.Errorf("unknown type: %v", field.Kind())
}
p.data = field.Interface()
value := p.data
vbuf := &bytes.Buffer{}
switch p.Type {
case _BOOLEAN:
p.NElements = 0
case _BYTE:
value = int8(value.(uint8))
case _UINT8:
value = int(int8(value.(uint8)))
case _BYTE_ARRAY:
p.NElements = uint32(len(value.([]byte)))
n := int(p.NElements)
arrType := reflect.ArrayOf(n, reflect.TypeOf(byte(0)))
arr := reflect.New(arrType).Elem()
for i, b := range value.([]byte) {
arr.Index(i).SetUint(uint64(b))
}
value = arr.Interface()
case _BOOLEAN_ARRAY:
p.NElements = uint32(len(value.([]bool)))
case _INT8_ARRAY:
p.NElements = uint32(len(value.([]int8)))
case _INT16_ARRAY:
p.NElements = uint32(len(value.([]int16)))
case _INT32_ARRAY:
p.NElements = uint32(len(value.([]int32)))
case _INT64_ARRAY:
p.NElements = uint32(len(value.([]int64)))
case _UINT8_ARRAY:
// this one is weird since UINT8s are encoded as char
// aka int32s... :(
p.NElements = uint32(len(value.([]uint8)))
n := int(p.NElements)
sliceType := reflect.SliceOf(reflect.TypeOf(int32(0)))
slice := reflect.MakeSlice(sliceType, n, n)
for i, b := range value.([]uint8) {
slice.Index(i).SetInt(int64(int8(b)))
}
value = slice.Interface()
case _UINT16_ARRAY:
p.NElements = uint32(len(value.([]uint16)))
case _UINT32_ARRAY:
p.NElements = uint32(len(value.([]uint32)))
case _UINT64_ARRAY:
p.NElements = uint32(len(value.([]uint64)))
case _STRING_ARRAY:
p.NElements = uint32(len(value.([]string)))
arrType := reflect.ArrayOf(int(p.NElements), reflect.TypeOf(""))
arr := reflect.New(arrType).Elem()
for i, b := range value.([]string) {
arr.Index(i).SetString(b)
}
value = arr.Interface()
case _NVLIST:
if err := encodeList(vbuf, reflect.ValueOf(value)); err != nil {
return err
}
p.data = vbuf.Bytes()
case _NVLIST_ARRAY:
p.NElements = uint32(len(value.([]map[string]interface{})))
for _, l := range value.([]map[string]interface{}) {
if err := encodeList(vbuf, reflect.ValueOf(l)); err != nil {
return err
}
}
p.data = vbuf.Bytes()
}
if vbuf.Len() == 0 && p.Type != _BOOLEAN {
_, err := xdr.NewEncoder(vbuf).Encode(value)
if err != nil {
return err
}
}
p.EncodedSize = uint32(p.encodedSize())
p.DecodedSize = uint32(p.decodedSize())
pbuf := &bytes.Buffer{}
_, err := xdr.NewEncoder(pbuf).Encode(p)
if err != nil {
return err
}
_, err = pbuf.WriteTo(w)
if err != nil {
return err
}
_, err = vbuf.WriteTo(w)
if err != nil {
return err
}
return nil
}
|
package otto
import (
"encoding/json"
"fmt"
"log"
"os"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"github.com/hashicorp/otto/app"
"github.com/hashicorp/otto/appfile"
"github.com/hashicorp/otto/context"
"github.com/hashicorp/otto/directory"
"github.com/hashicorp/otto/foundation"
"github.com/hashicorp/otto/infrastructure"
"github.com/hashicorp/otto/ui"
"github.com/hashicorp/terraform/dag"
)
// Core is the main struct to use to interact with Otto as a library.
type Core struct {
appfile *appfile.File
appfileCompiled *appfile.Compiled
apps map[app.Tuple]app.Factory
dir directory.Backend
infras map[string]infrastructure.Factory
foundationMap map[foundation.Tuple]foundation.Factory
dataDir string
localDir string
compileDir string
ui ui.Ui
}
// CoreConfig is configuration for creating a new core with NewCore.
type CoreConfig struct {
// DataDir is the directory where local data will be stored that
// is global to all Otto processes.
//
// LocalDir is the directory where data local to this single Appfile
// will be stored. This isn't necessarilly cleared for compilation.
//
// CompiledDir is the directory where compiled data will be written.
// Each compilation will clear this directory.
DataDir string
LocalDir string
CompileDir string
// Appfile is the appfile that this core will be using for configuration.
// This must be a compiled Appfile.
Appfile *appfile.Compiled
// Directory is the directory where data is stored about this Appfile.
Directory directory.Backend
// Apps is the map of available app implementations.
Apps map[app.Tuple]app.Factory
// Infrastructures is the map of available infrastructures. The
// value is a factory that can create the infrastructure impl.
Infrastructures map[string]infrastructure.Factory
// Foundations is the map of available foundations. The
// value is a factory that can create the impl.
Foundations map[foundation.Tuple]foundation.Factory
// Ui is the Ui that will be used to communicate with the user.
Ui ui.Ui
}
// NewCore creates a new core.
//
// Once this function is called, this CoreConfig should not be used again
// or modified, since the Core may use parts of it without deep copying.
func NewCore(c *CoreConfig) (*Core, error) {
return &Core{
appfile: c.Appfile.File,
appfileCompiled: c.Appfile,
apps: c.Apps,
dir: c.Directory,
infras: c.Infrastructures,
foundationMap: c.Foundations,
dataDir: c.DataDir,
localDir: c.LocalDir,
compileDir: c.CompileDir,
ui: c.Ui,
}, nil
}
// Compile takes the Appfile and compiles all the resulting data.
func (c *Core) Compile() error {
// Get the infra implementation for this
infra, infraCtx, err := c.infra()
if err != nil {
return err
}
// Get all the foundation implementations (which are tied as singletons
// to the infrastructure).
foundations, foundationCtxs, err := c.foundations()
if err != nil {
return err
}
// Delete the prior output directory
log.Printf("[INFO] deleting prior compilation contents: %s", c.compileDir)
if err := os.RemoveAll(c.compileDir); err != nil {
return err
}
// Compile the infrastructure for our application
log.Printf("[INFO] running infra compile...")
c.ui.Message("Compiling infra...")
if _, err := infra.Compile(infraCtx); err != nil {
return err
}
// Compile the foundation (not tied to any app). This compilation
// of the foundation is used for `otto infra` to set everything up.
log.Printf("[INFO] running foundation compilations")
for i, f := range foundations {
ctx := foundationCtxs[i]
c.ui.Message(fmt.Sprintf(
"Compiling foundation: %s", ctx.Tuple.Type))
if _, err := f.Compile(ctx); err != nil {
return err
}
}
// Walk through the dependencies and compile all of them.
// We have to compile every dependency for dev building.
var resultLock sync.Mutex
results := make([]*app.CompileResult, 0, len(c.appfileCompiled.Graph.Vertices()))
err = c.walk(func(app app.App, ctx *app.Context, root bool) error {
if !root {
c.ui.Header(fmt.Sprintf(
"Compiling dependency '%s'...",
ctx.Appfile.Application.Name))
} else {
c.ui.Header(fmt.Sprintf(
"Compiling main application..."))
}
// If this is the root, we set the dev dep fragments.
if root {
// We grab the lock just in case although if we're the
// root this should be serialized.
resultLock.Lock()
ctx.DevDepFragments = make([]string, 0, len(results))
for _, result := range results {
if result.DevDepFragmentPath != "" {
ctx.DevDepFragments = append(
ctx.DevDepFragments, result.DevDepFragmentPath)
}
}
resultLock.Unlock()
}
// Build the contexts for the foundations. We use this
// to also compile the list of foundation dirs.
ctx.FoundationDirs = make([]string, len(foundations))
for i, _ := range foundations {
fCtx := foundationCtxs[i]
fCtx.Dir = filepath.Join(ctx.Dir, fmt.Sprintf("foundation-%s", fCtx.Tuple.Type))
ctx.FoundationDirs[i] = fCtx.Dir
}
// Compile!
result, err := app.Compile(ctx)
if err != nil {
return err
}
// Compile the foundations for this app
subdirs := []string{"app-dev", "app-dev-dep", "app-deploy"}
for i, f := range foundations {
fCtx := foundationCtxs[i]
if result != nil {
fCtx.AppConfig = &result.FoundationConfig
}
if _, err := f.Compile(fCtx); err != nil {
return err
}
// Make sure the subdirs exist
for _, dir := range subdirs {
if err := os.MkdirAll(filepath.Join(fCtx.Dir, dir), 0755); err != nil {
return err
}
}
}
// Store the compilation result for later
resultLock.Lock()
defer resultLock.Unlock()
results = append(results, result)
return nil
})
return err
}
func (c *Core) walk(f func(app.App, *app.Context, bool) error) error {
root, err := c.appfileCompiled.Graph.Root()
if err != nil {
return fmt.Errorf(
"Error loading app: %s", err)
}
// Walk the appfile graph.
var stop int32 = 0
return c.appfileCompiled.Graph.Walk(func(raw dag.Vertex) (err error) {
// If we're told to stop (something else had an error), then stop early.
// Graphs walks by default will complete all disjoint parts of the
// graph before failing, but Otto doesn't have to do that.
if atomic.LoadInt32(&stop) != 0 {
return nil
}
// If we exit with an error, then mark the stop atomic
defer func() {
if err != nil {
atomic.StoreInt32(&stop, 1)
}
}()
// Convert to the rich vertex type so that we can access data
v := raw.(*appfile.CompiledGraphVertex)
// Get the context and app for this appfile
appCtx, err := c.appContext(v.File)
if err != nil {
return fmt.Errorf(
"Error loading Appfile for '%s': %s",
dag.VertexName(raw), err)
}
app, err := c.app(appCtx)
if err != nil {
return fmt.Errorf(
"Error loading App implementation for '%s': %s",
dag.VertexName(raw), err)
}
// Call our callback
return f(app, appCtx, raw == root)
})
}
// Build builds the deployable artifact for the currently compiled
// Appfile.
func (c *Core) Build() error {
// Get the infra implementation for this
infra, infraCtx, err := c.infra()
if err != nil {
return err
}
if err := c.creds(infra, infraCtx); err != nil {
return err
}
// We only use the root application for this task, upstream dependencies
// don't have an effect on the build process.
root, err := c.appfileCompiled.Graph.Root()
if err != nil {
return err
}
rootCtx, err := c.appContext(root.(*appfile.CompiledGraphVertex).File)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
rootApp, err := c.app(rootCtx)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
// Just update our shared data so we get the creds
rootCtx.Shared = infraCtx.Shared
return rootApp.Build(rootCtx)
}
// Deploy deploys the application.
//
// Deploy supports subactions, which can be specified with action and args.
// Action can be "" to get the default deploy behavior.
func (c *Core) Deploy(action string, args []string) error {
// Get the infra implementation for this
infra, infraCtx, err := c.infra()
if err != nil {
return err
}
// Special case: don't try to fetch creds during `help` or `info`
if action != "help" && action != "info" {
if err := c.creds(infra, infraCtx); err != nil {
return err
}
}
// TODO: Verify that upstream dependencies are deployed
// We only use the root application for this task, upstream dependencies
// don't have an effect on the build process.
root, err := c.appfileCompiled.Graph.Root()
if err != nil {
return err
}
rootCtx, err := c.appContext(root.(*appfile.CompiledGraphVertex).File)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
rootApp, err := c.app(rootCtx)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
// Update our shared data so we get the creds
rootCtx.Shared = infraCtx.Shared
// Pass through the requested action
rootCtx.Action = action
rootCtx.ActionArgs = args
return rootApp.Deploy(rootCtx)
}
// Dev starts a dev environment for the current application. For destroying
// and other tasks against the dev environment, use the generic `Execute`
// method.
func (c *Core) Dev() error {
// We need to get the root data separately since we need that for
// all the function calls into the dependencies.
root, err := c.appfileCompiled.Graph.Root()
if err != nil {
return err
}
rootCtx, err := c.appContext(root.(*appfile.CompiledGraphVertex).File)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
rootApp, err := c.app(rootCtx)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
// Go through all the dependencies and build their immutable
// dev environment pieces for the final configuration.
err = c.walk(func(appImpl app.App, ctx *app.Context, root bool) error {
// If it is the root, we just return and do nothing else since
// the root is a special case where we're building the actual
// dev environment.
if root {
return nil
}
// Get the path to where we'd cache the dependency if we have
// cached it...
cachePath := filepath.Join(ctx.CacheDir, "dev-dep.json")
// Check if we've cached this. If so, then use the cache.
if _, err := app.ReadDevDep(cachePath); err == nil {
ctx.Ui.Header(fmt.Sprintf(
"Using cached dev dependency for '%s'",
ctx.Appfile.Application.Name))
return nil
}
// Build the development dependency
dep, err := appImpl.DevDep(rootCtx, ctx)
if err != nil {
return fmt.Errorf(
"Error building dependency for dev '%s': %s",
ctx.Appfile.Application.Name,
err)
}
// If we have a dependency with files, then verify the files
// and store it in our cache directory so we can retrieve it
// later.
if dep != nil && len(dep.Files) > 0 {
if err := dep.RelFiles(ctx.CacheDir); err != nil {
return fmt.Errorf(
"Error caching dependency for dev '%s': %s",
ctx.Appfile.Application.Name,
err)
}
if err := app.WriteDevDep(cachePath, dep); err != nil {
return fmt.Errorf(
"Error caching dependency for dev '%s': %s",
ctx.Appfile.Application.Name,
err)
}
}
return nil
})
if err != nil {
return err
}
// All the development dependencies are built/loaded. We now have
// everything we need to build the complete development environment.
return rootApp.Dev(rootCtx)
}
// Infra manages the infrastructure for this Appfile.
//
// Infra supports subactions, which can be specified with action and args.
// Infra recognizes two special actions: "" (blank string) and "destroy".
// The former expects to create or update the complete infrastructure,
// and the latter will destroy the infrastructure.
func (c *Core) Infra(action string, args []string) error {
// Get the infra implementation for this
infra, infraCtx, err := c.infra()
if err != nil {
return err
}
if err := c.creds(infra, infraCtx); err != nil {
return err
}
// Set the action and action args
infraCtx.Action = action
infraCtx.ActionArgs = args
// If we need the foundations, then get them
var foundations []foundation.Foundation
var foundationCtxs []*foundation.Context
if action == "" || action == "destroy" {
foundations, foundationCtxs, err = c.foundations()
if err != nil {
return err
}
}
// If we're doing anything other than destroying, then
// run the execution now.
if action != "destroy" {
c.ui.Header("Building main infrastructure...")
if err := infra.Execute(infraCtx); err != nil {
return err
}
}
// If we have any foundations, we now run their infra deployment.
// This should only ever execute if action is to deploy or destroy,
// since those are the only cases that we load foundations.
for i, f := range foundations {
ctx := foundationCtxs[i]
ctx.Action = action
ctx.ActionArgs = args
ctx.InfraCreds = infraCtx.InfraCreds
log.Printf(
"[INFO] infra action '%s' on foundation '%s'",
action, ctx.Tuple.Type)
switch action {
case "":
c.ui.Header(fmt.Sprintf(
"Building infrastructure for foundation: %s",
ctx.Tuple.Type))
case "destroy":
c.ui.Header(fmt.Sprintf(
"Destroying infrastructure for foundation: %s",
ctx.Tuple.Type))
}
if err := f.Infra(ctx); err != nil {
return err
}
}
// If the action is destroy, we run the infrastructure execution
// here. We mirror creation above since in the destruction case
// we need to first destroy all applications and foundations that
// are using this infra.
if action == "destroy" {
c.ui.Header("Destroying main infrastructure...")
if err := infra.Execute(infraCtx); err != nil {
return err
}
}
// Output the right thing
switch action {
case "":
infraCtx.Ui.Header("[green]Infrastructure successfully created!")
infraCtx.Ui.Message(
"[green]The infrastructure necessary to deploy this application\n" +
"is now available. You can now deploy using `otto deploy`.")
case "destroy":
infraCtx.Ui.Header("[green]Infrastructure successfully destroyed!")
infraCtx.Ui.Message(
"[green]The infrastructure necessary to run this application and\n" +
"all other applications in this project has been destroyed.")
}
return nil
}
func (c *Core) Status() error {
return nil
}
// Execute executes the given task for this Appfile.
func (c *Core) Execute(opts *ExecuteOpts) error {
switch opts.Task {
case ExecuteTaskDev:
return c.executeApp(opts)
default:
return fmt.Errorf("unknown task: %s", opts.Task)
}
}
// creds reads the credentials if we have them, or queries the user
// for infrastructure credentials using the infrastructure if we
// don't have them.
func (c *Core) creds(
infra infrastructure.Infrastructure,
infraCtx *infrastructure.Context) error {
// Output to the user some information about what is about to
// happen here...
infraCtx.Ui.Header("Detecting infrastructure credentials...")
// The path to where we put the encrypted creds
path := filepath.Join(c.localDir, "creds")
// Determine whether we believe the creds exist already or not
var exists bool
if _, err := os.Stat(path); err == nil {
exists = true
} else {
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return err
}
}
var creds map[string]string
if exists {
infraCtx.Ui.Message(
"Cached and encrypted infrastructure credentials found.\n" +
"Otto will now ask you for the password to decrypt these\n" +
"credentials.\n\n")
// If they exist, ask for the password
value, err := infraCtx.Ui.Input(&ui.InputOpts{
Id: "creds_password",
Query: "Encrypted Credentials Password",
Description: strings.TrimSpace(credsQueryPassExists),
Hide: true,
EnvVars: []string{"OTTO_CREDS_PASSWORD"},
})
if err != nil {
return err
}
// If the password is not blank, then just read the credentials
if value != "" {
plaintext, err := cryptRead(path, value)
if err == nil {
err = json.Unmarshal(plaintext, &creds)
}
if err != nil {
return fmt.Errorf(
"error reading encrypted credentials: %s\n\n"+
"If this error persists, you can force Otto to ask for credentials\n"+
"again by inputting the empty password as the password.",
err)
}
}
}
// If we don't have creds, then we need to query the user via
// the infrastructure implementation.
if creds == nil {
infraCtx.Ui.Message(
"Existing infrastructure credentials were not found! Otto will\n" +
"now ask you for infrastructure credentials. These will be encrypted\n" +
"and saved on disk so this doesn't need to be repeated.\n\n" +
"IMPORTANT: If you're re-entering new credentials, make sure the\n" +
"credentials are for the same account, otherwise you may lose\n" +
"access to your existing infrastructure Otto set up.\n\n")
var err error
creds, err = infra.Creds(infraCtx)
if err != nil {
return err
}
// Now that we have the credentials, we need to ask for the
// password to encrypt and store them.
var password string
for password == "" {
password, err = infraCtx.Ui.Input(&ui.InputOpts{
Id: "creds_password",
Query: "Password for Encrypting Credentials",
Description: strings.TrimSpace(credsQueryPassNew),
Hide: true,
EnvVars: []string{"OTTO_CREDS_PASSWORD"},
})
if err != nil {
return err
}
}
// With the password, encrypt and write the data
plaintext, err := json.Marshal(creds)
if err != nil {
// creds is a map[string]string, so this shouldn't ever fail
panic(err)
}
if err := cryptWrite(path, password, plaintext); err != nil {
return fmt.Errorf(
"error writing encrypted credentials: %s", err)
}
}
// Set the credentials
infraCtx.InfraCreds = creds
return nil
}
func (c *Core) executeApp(opts *ExecuteOpts) error {
// Get the infra implementation for this
appCtx, err := c.appContext(c.appfile)
if err != nil {
return err
}
app, err := c.app(appCtx)
if err != nil {
return err
}
// Set the action and action args
appCtx.Action = opts.Action
appCtx.ActionArgs = opts.Args
// Build the infrastructure compilation context
switch opts.Task {
case ExecuteTaskDev:
return app.Dev(appCtx)
default:
panic(fmt.Sprintf("uknown task: %s", opts.Task))
}
}
func (c *Core) appContext(f *appfile.File) (*app.Context, error) {
// We need the configuration for the active infrastructure
// so that we can build the tuple below
config := f.ActiveInfrastructure()
if config == nil {
return nil, fmt.Errorf(
"infrastructure not found in appfile: %s",
f.Project.Infrastructure)
}
// The tuple we're looking for is the application type, the
// infrastructure type, and the infrastructure flavor. Build that
// tuple.
tuple := app.Tuple{
App: f.Application.Type,
Infra: f.Project.Infrastructure,
InfraFlavor: config.Flavor,
}
// The output directory for data. This is either the main app so
// it goes directly into "app" or it is a dependency and goes into
// a dep folder.
outputDir := filepath.Join(c.compileDir, "app")
if id := f.ID; id != c.appfile.ID {
outputDir = filepath.Join(
c.compileDir, fmt.Sprintf("dep-%s", id))
}
// The cache directory for this app
cacheDir := filepath.Join(c.dataDir, "cache", f.ID)
if err := os.MkdirAll(cacheDir, 0755); err != nil {
return nil, fmt.Errorf(
"error making cache directory '%s': %s",
cacheDir, err)
}
return &app.Context{
Dir: outputDir,
CacheDir: cacheDir,
LocalDir: c.localDir,
Tuple: tuple,
Appfile: f,
Application: f.Application,
Shared: context.Shared{
InstallDir: filepath.Join(c.dataDir, "binaries"),
Directory: c.dir,
Ui: c.ui,
},
}, nil
}
func (c *Core) app(ctx *app.Context) (app.App, error) {
log.Printf("[INFO] Loading app implementation for Tuple: %s", ctx.Tuple)
// Look for the app impl. factory
f := app.TupleMap(c.apps).Lookup(ctx.Tuple)
if f == nil {
return nil, fmt.Errorf(
"app implementation for tuple not found: %s", ctx.Tuple)
}
// Start the impl.
result, err := f()
if err != nil {
return nil, fmt.Errorf(
"app failed to start properly: %s", err)
}
return result, nil
}
func (c *Core) infra() (infrastructure.Infrastructure, *infrastructure.Context, error) {
// Get the infrastructure factory
f, ok := c.infras[c.appfile.Project.Infrastructure]
if !ok {
return nil, nil, fmt.Errorf(
"infrastructure type not supported: %s",
c.appfile.Project.Infrastructure)
}
// Get the infrastructure configuration
config := c.appfile.ActiveInfrastructure()
if config == nil {
return nil, nil, fmt.Errorf(
"infrastructure not found in appfile: %s",
c.appfile.Project.Infrastructure)
}
// Start the infrastructure implementation
infra, err := f()
if err != nil {
return nil, nil, err
}
// The output directory for data
outputDir := filepath.Join(
c.compileDir, fmt.Sprintf("infra-%s", c.appfile.Project.Infrastructure))
// Build the context
return infra, &infrastructure.Context{
Dir: outputDir,
Infra: config,
Shared: context.Shared{
InstallDir: filepath.Join(c.dataDir, "binaries"),
Directory: c.dir,
Ui: c.ui,
},
}, nil
}
func (c *Core) foundations() ([]foundation.Foundation, []*foundation.Context, error) {
// Get the infrastructure configuration
config := c.appfile.ActiveInfrastructure()
if config == nil {
return nil, nil, fmt.Errorf(
"infrastructure not found in appfile: %s",
c.appfile.Project.Infrastructure)
}
// If there are no foundations, return nothing.
if len(config.Foundations) == 0 {
return nil, nil, nil
}
// Create the arrays for our list
fs := make([]foundation.Foundation, 0, len(config.Foundations))
ctxs := make([]*foundation.Context, 0, cap(fs))
for _, f := range config.Foundations {
// The tuple we're looking for is the foundation type, the
// infrastructure type, and the infrastructure flavor. Build that
// tuple.
tuple := foundation.Tuple{
Type: f.Name,
Infra: config.Type,
InfraFlavor: config.Flavor,
}
// Look for the matching foundation
fun := foundation.TupleMap(c.foundationMap).Lookup(tuple)
if fun == nil {
return nil, nil, fmt.Errorf(
"foundation implementation for tuple not found: %s",
tuple)
}
// Instantiate the implementation
impl, err := fun()
if err != nil {
return nil, nil, err
}
// The output directory for data
outputDir := filepath.Join(
c.compileDir, fmt.Sprintf("foundation-%s", f.Name))
// Build the context
ctx := &foundation.Context{
Config: f.Config,
Dir: outputDir,
Tuple: tuple,
Appfile: c.appfile,
Shared: context.Shared{
InstallDir: filepath.Join(c.dataDir, "binaries"),
Directory: c.dir,
Ui: c.ui,
},
}
// Add to our results
fs = append(fs, impl)
ctxs = append(ctxs, ctx)
}
return fs, ctxs, nil
}
const credsQueryPassExists = `
Infrastructure credentials are required for this operation. Otto found
saved credentials that are password protected. Please enter the password
to decrypt these credentials. You may also just hit <enter> and leave
the password blank to force Otto to ask for the credentials again.
`
const credsQueryPassNew = `
This password will be used to encrypt and save the credentials so they
don't need to be repeated multiple times.
`
otto: Status
package otto
import (
"encoding/json"
"fmt"
"log"
"os"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"github.com/hashicorp/otto/app"
"github.com/hashicorp/otto/appfile"
"github.com/hashicorp/otto/context"
"github.com/hashicorp/otto/directory"
"github.com/hashicorp/otto/foundation"
"github.com/hashicorp/otto/infrastructure"
"github.com/hashicorp/otto/ui"
"github.com/hashicorp/terraform/dag"
)
// Core is the main struct to use to interact with Otto as a library.
type Core struct {
appfile *appfile.File
appfileCompiled *appfile.Compiled
apps map[app.Tuple]app.Factory
dir directory.Backend
infras map[string]infrastructure.Factory
foundationMap map[foundation.Tuple]foundation.Factory
dataDir string
localDir string
compileDir string
ui ui.Ui
}
// CoreConfig is configuration for creating a new core with NewCore.
type CoreConfig struct {
// DataDir is the directory where local data will be stored that
// is global to all Otto processes.
//
// LocalDir is the directory where data local to this single Appfile
// will be stored. This isn't necessarilly cleared for compilation.
//
// CompiledDir is the directory where compiled data will be written.
// Each compilation will clear this directory.
DataDir string
LocalDir string
CompileDir string
// Appfile is the appfile that this core will be using for configuration.
// This must be a compiled Appfile.
Appfile *appfile.Compiled
// Directory is the directory where data is stored about this Appfile.
Directory directory.Backend
// Apps is the map of available app implementations.
Apps map[app.Tuple]app.Factory
// Infrastructures is the map of available infrastructures. The
// value is a factory that can create the infrastructure impl.
Infrastructures map[string]infrastructure.Factory
// Foundations is the map of available foundations. The
// value is a factory that can create the impl.
Foundations map[foundation.Tuple]foundation.Factory
// Ui is the Ui that will be used to communicate with the user.
Ui ui.Ui
}
// NewCore creates a new core.
//
// Once this function is called, this CoreConfig should not be used again
// or modified, since the Core may use parts of it without deep copying.
func NewCore(c *CoreConfig) (*Core, error) {
return &Core{
appfile: c.Appfile.File,
appfileCompiled: c.Appfile,
apps: c.Apps,
dir: c.Directory,
infras: c.Infrastructures,
foundationMap: c.Foundations,
dataDir: c.DataDir,
localDir: c.LocalDir,
compileDir: c.CompileDir,
ui: c.Ui,
}, nil
}
// Compile takes the Appfile and compiles all the resulting data.
func (c *Core) Compile() error {
// Get the infra implementation for this
infra, infraCtx, err := c.infra()
if err != nil {
return err
}
// Get all the foundation implementations (which are tied as singletons
// to the infrastructure).
foundations, foundationCtxs, err := c.foundations()
if err != nil {
return err
}
// Delete the prior output directory
log.Printf("[INFO] deleting prior compilation contents: %s", c.compileDir)
if err := os.RemoveAll(c.compileDir); err != nil {
return err
}
// Compile the infrastructure for our application
log.Printf("[INFO] running infra compile...")
c.ui.Message("Compiling infra...")
if _, err := infra.Compile(infraCtx); err != nil {
return err
}
// Compile the foundation (not tied to any app). This compilation
// of the foundation is used for `otto infra` to set everything up.
log.Printf("[INFO] running foundation compilations")
for i, f := range foundations {
ctx := foundationCtxs[i]
c.ui.Message(fmt.Sprintf(
"Compiling foundation: %s", ctx.Tuple.Type))
if _, err := f.Compile(ctx); err != nil {
return err
}
}
// Walk through the dependencies and compile all of them.
// We have to compile every dependency for dev building.
var resultLock sync.Mutex
results := make([]*app.CompileResult, 0, len(c.appfileCompiled.Graph.Vertices()))
err = c.walk(func(app app.App, ctx *app.Context, root bool) error {
if !root {
c.ui.Header(fmt.Sprintf(
"Compiling dependency '%s'...",
ctx.Appfile.Application.Name))
} else {
c.ui.Header(fmt.Sprintf(
"Compiling main application..."))
}
// If this is the root, we set the dev dep fragments.
if root {
// We grab the lock just in case although if we're the
// root this should be serialized.
resultLock.Lock()
ctx.DevDepFragments = make([]string, 0, len(results))
for _, result := range results {
if result.DevDepFragmentPath != "" {
ctx.DevDepFragments = append(
ctx.DevDepFragments, result.DevDepFragmentPath)
}
}
resultLock.Unlock()
}
// Build the contexts for the foundations. We use this
// to also compile the list of foundation dirs.
ctx.FoundationDirs = make([]string, len(foundations))
for i, _ := range foundations {
fCtx := foundationCtxs[i]
fCtx.Dir = filepath.Join(ctx.Dir, fmt.Sprintf("foundation-%s", fCtx.Tuple.Type))
ctx.FoundationDirs[i] = fCtx.Dir
}
// Compile!
result, err := app.Compile(ctx)
if err != nil {
return err
}
// Compile the foundations for this app
subdirs := []string{"app-dev", "app-dev-dep", "app-deploy"}
for i, f := range foundations {
fCtx := foundationCtxs[i]
if result != nil {
fCtx.AppConfig = &result.FoundationConfig
}
if _, err := f.Compile(fCtx); err != nil {
return err
}
// Make sure the subdirs exist
for _, dir := range subdirs {
if err := os.MkdirAll(filepath.Join(fCtx.Dir, dir), 0755); err != nil {
return err
}
}
}
// Store the compilation result for later
resultLock.Lock()
defer resultLock.Unlock()
results = append(results, result)
return nil
})
return err
}
func (c *Core) walk(f func(app.App, *app.Context, bool) error) error {
root, err := c.appfileCompiled.Graph.Root()
if err != nil {
return fmt.Errorf(
"Error loading app: %s", err)
}
// Walk the appfile graph.
var stop int32 = 0
return c.appfileCompiled.Graph.Walk(func(raw dag.Vertex) (err error) {
// If we're told to stop (something else had an error), then stop early.
// Graphs walks by default will complete all disjoint parts of the
// graph before failing, but Otto doesn't have to do that.
if atomic.LoadInt32(&stop) != 0 {
return nil
}
// If we exit with an error, then mark the stop atomic
defer func() {
if err != nil {
atomic.StoreInt32(&stop, 1)
}
}()
// Convert to the rich vertex type so that we can access data
v := raw.(*appfile.CompiledGraphVertex)
// Get the context and app for this appfile
appCtx, err := c.appContext(v.File)
if err != nil {
return fmt.Errorf(
"Error loading Appfile for '%s': %s",
dag.VertexName(raw), err)
}
app, err := c.app(appCtx)
if err != nil {
return fmt.Errorf(
"Error loading App implementation for '%s': %s",
dag.VertexName(raw), err)
}
// Call our callback
return f(app, appCtx, raw == root)
})
}
// Build builds the deployable artifact for the currently compiled
// Appfile.
func (c *Core) Build() error {
// Get the infra implementation for this
infra, infraCtx, err := c.infra()
if err != nil {
return err
}
if err := c.creds(infra, infraCtx); err != nil {
return err
}
// We only use the root application for this task, upstream dependencies
// don't have an effect on the build process.
root, err := c.appfileCompiled.Graph.Root()
if err != nil {
return err
}
rootCtx, err := c.appContext(root.(*appfile.CompiledGraphVertex).File)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
rootApp, err := c.app(rootCtx)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
// Just update our shared data so we get the creds
rootCtx.Shared = infraCtx.Shared
return rootApp.Build(rootCtx)
}
// Deploy deploys the application.
//
// Deploy supports subactions, which can be specified with action and args.
// Action can be "" to get the default deploy behavior.
func (c *Core) Deploy(action string, args []string) error {
// Get the infra implementation for this
infra, infraCtx, err := c.infra()
if err != nil {
return err
}
// Special case: don't try to fetch creds during `help` or `info`
if action != "help" && action != "info" {
if err := c.creds(infra, infraCtx); err != nil {
return err
}
}
// TODO: Verify that upstream dependencies are deployed
// We only use the root application for this task, upstream dependencies
// don't have an effect on the build process.
root, err := c.appfileCompiled.Graph.Root()
if err != nil {
return err
}
rootCtx, err := c.appContext(root.(*appfile.CompiledGraphVertex).File)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
rootApp, err := c.app(rootCtx)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
// Update our shared data so we get the creds
rootCtx.Shared = infraCtx.Shared
// Pass through the requested action
rootCtx.Action = action
rootCtx.ActionArgs = args
return rootApp.Deploy(rootCtx)
}
// Dev starts a dev environment for the current application. For destroying
// and other tasks against the dev environment, use the generic `Execute`
// method.
func (c *Core) Dev() error {
// We need to get the root data separately since we need that for
// all the function calls into the dependencies.
root, err := c.appfileCompiled.Graph.Root()
if err != nil {
return err
}
rootCtx, err := c.appContext(root.(*appfile.CompiledGraphVertex).File)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
rootApp, err := c.app(rootCtx)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
// Go through all the dependencies and build their immutable
// dev environment pieces for the final configuration.
err = c.walk(func(appImpl app.App, ctx *app.Context, root bool) error {
// If it is the root, we just return and do nothing else since
// the root is a special case where we're building the actual
// dev environment.
if root {
return nil
}
// Get the path to where we'd cache the dependency if we have
// cached it...
cachePath := filepath.Join(ctx.CacheDir, "dev-dep.json")
// Check if we've cached this. If so, then use the cache.
if _, err := app.ReadDevDep(cachePath); err == nil {
ctx.Ui.Header(fmt.Sprintf(
"Using cached dev dependency for '%s'",
ctx.Appfile.Application.Name))
return nil
}
// Build the development dependency
dep, err := appImpl.DevDep(rootCtx, ctx)
if err != nil {
return fmt.Errorf(
"Error building dependency for dev '%s': %s",
ctx.Appfile.Application.Name,
err)
}
// If we have a dependency with files, then verify the files
// and store it in our cache directory so we can retrieve it
// later.
if dep != nil && len(dep.Files) > 0 {
if err := dep.RelFiles(ctx.CacheDir); err != nil {
return fmt.Errorf(
"Error caching dependency for dev '%s': %s",
ctx.Appfile.Application.Name,
err)
}
if err := app.WriteDevDep(cachePath, dep); err != nil {
return fmt.Errorf(
"Error caching dependency for dev '%s': %s",
ctx.Appfile.Application.Name,
err)
}
}
return nil
})
if err != nil {
return err
}
// All the development dependencies are built/loaded. We now have
// everything we need to build the complete development environment.
return rootApp.Dev(rootCtx)
}
// Infra manages the infrastructure for this Appfile.
//
// Infra supports subactions, which can be specified with action and args.
// Infra recognizes two special actions: "" (blank string) and "destroy".
// The former expects to create or update the complete infrastructure,
// and the latter will destroy the infrastructure.
func (c *Core) Infra(action string, args []string) error {
// Get the infra implementation for this
infra, infraCtx, err := c.infra()
if err != nil {
return err
}
if err := c.creds(infra, infraCtx); err != nil {
return err
}
// Set the action and action args
infraCtx.Action = action
infraCtx.ActionArgs = args
// If we need the foundations, then get them
var foundations []foundation.Foundation
var foundationCtxs []*foundation.Context
if action == "" || action == "destroy" {
foundations, foundationCtxs, err = c.foundations()
if err != nil {
return err
}
}
// If we're doing anything other than destroying, then
// run the execution now.
if action != "destroy" {
c.ui.Header("Building main infrastructure...")
if err := infra.Execute(infraCtx); err != nil {
return err
}
}
// If we have any foundations, we now run their infra deployment.
// This should only ever execute if action is to deploy or destroy,
// since those are the only cases that we load foundations.
for i, f := range foundations {
ctx := foundationCtxs[i]
ctx.Action = action
ctx.ActionArgs = args
ctx.InfraCreds = infraCtx.InfraCreds
log.Printf(
"[INFO] infra action '%s' on foundation '%s'",
action, ctx.Tuple.Type)
switch action {
case "":
c.ui.Header(fmt.Sprintf(
"Building infrastructure for foundation: %s",
ctx.Tuple.Type))
case "destroy":
c.ui.Header(fmt.Sprintf(
"Destroying infrastructure for foundation: %s",
ctx.Tuple.Type))
}
if err := f.Infra(ctx); err != nil {
return err
}
}
// If the action is destroy, we run the infrastructure execution
// here. We mirror creation above since in the destruction case
// we need to first destroy all applications and foundations that
// are using this infra.
if action == "destroy" {
c.ui.Header("Destroying main infrastructure...")
if err := infra.Execute(infraCtx); err != nil {
return err
}
}
// Output the right thing
switch action {
case "":
infraCtx.Ui.Header("[green]Infrastructure successfully created!")
infraCtx.Ui.Message(
"[green]The infrastructure necessary to deploy this application\n" +
"is now available. You can now deploy using `otto deploy`.")
case "destroy":
infraCtx.Ui.Header("[green]Infrastructure successfully destroyed!")
infraCtx.Ui.Message(
"[green]The infrastructure necessary to run this application and\n" +
"all other applications in this project has been destroyed.")
}
return nil
}
// Status outputs to the UI the status of all the stages of this application.
func (c *Core) Status() error {
infra := c.appfile.ActiveInfrastructure()
if infra == nil {
panic("infra not found")
}
// We output UI about loading because if the directory is remote
// (such as Atlas), then this can actually be slow.
c.ui.Header("Loading status information...")
// Dev
c.ui.Message("Loading development status")
dev, err := c.dir.GetDev(&directory.Dev{Lookup: directory.Lookup{
AppID: c.appfile.ID}})
if err != nil {
return fmt.Errorf(
"Error loading development status: %s", err)
}
// Build
c.ui.Message("Loading build status")
build, err := c.dir.GetBuild(&directory.Build{Lookup: directory.Lookup{
AppID: c.appfile.ID, Infra: infra.Name, InfraFlavor: infra.Flavor}})
if err != nil {
return fmt.Errorf(
"Error loading build status: %s", err)
}
// Create the status texts
devStatus := "[red]NOT CREATED"
if dev.IsReady() {
devStatus = "[green]CREATED"
}
buildStatus := "[red]NO BUILDS"
if build != nil {
buildStatus = "[green]BUILD READY"
}
c.ui.Header("Status results...")
c.ui.Message(fmt.Sprintf("Development environment: %s", devStatus))
return nil
}
// Execute executes the given task for this Appfile.
func (c *Core) Execute(opts *ExecuteOpts) error {
switch opts.Task {
case ExecuteTaskDev:
return c.executeApp(opts)
default:
return fmt.Errorf("unknown task: %s", opts.Task)
}
}
// creds reads the credentials if we have them, or queries the user
// for infrastructure credentials using the infrastructure if we
// don't have them.
func (c *Core) creds(
infra infrastructure.Infrastructure,
infraCtx *infrastructure.Context) error {
// Output to the user some information about what is about to
// happen here...
infraCtx.Ui.Header("Detecting infrastructure credentials...")
// The path to where we put the encrypted creds
path := filepath.Join(c.localDir, "creds")
// Determine whether we believe the creds exist already or not
var exists bool
if _, err := os.Stat(path); err == nil {
exists = true
} else {
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return err
}
}
var creds map[string]string
if exists {
infraCtx.Ui.Message(
"Cached and encrypted infrastructure credentials found.\n" +
"Otto will now ask you for the password to decrypt these\n" +
"credentials.\n\n")
// If they exist, ask for the password
value, err := infraCtx.Ui.Input(&ui.InputOpts{
Id: "creds_password",
Query: "Encrypted Credentials Password",
Description: strings.TrimSpace(credsQueryPassExists),
Hide: true,
EnvVars: []string{"OTTO_CREDS_PASSWORD"},
})
if err != nil {
return err
}
// If the password is not blank, then just read the credentials
if value != "" {
plaintext, err := cryptRead(path, value)
if err == nil {
err = json.Unmarshal(plaintext, &creds)
}
if err != nil {
return fmt.Errorf(
"error reading encrypted credentials: %s\n\n"+
"If this error persists, you can force Otto to ask for credentials\n"+
"again by inputting the empty password as the password.",
err)
}
}
}
// If we don't have creds, then we need to query the user via
// the infrastructure implementation.
if creds == nil {
infraCtx.Ui.Message(
"Existing infrastructure credentials were not found! Otto will\n" +
"now ask you for infrastructure credentials. These will be encrypted\n" +
"and saved on disk so this doesn't need to be repeated.\n\n" +
"IMPORTANT: If you're re-entering new credentials, make sure the\n" +
"credentials are for the same account, otherwise you may lose\n" +
"access to your existing infrastructure Otto set up.\n\n")
var err error
creds, err = infra.Creds(infraCtx)
if err != nil {
return err
}
// Now that we have the credentials, we need to ask for the
// password to encrypt and store them.
var password string
for password == "" {
password, err = infraCtx.Ui.Input(&ui.InputOpts{
Id: "creds_password",
Query: "Password for Encrypting Credentials",
Description: strings.TrimSpace(credsQueryPassNew),
Hide: true,
EnvVars: []string{"OTTO_CREDS_PASSWORD"},
})
if err != nil {
return err
}
}
// With the password, encrypt and write the data
plaintext, err := json.Marshal(creds)
if err != nil {
// creds is a map[string]string, so this shouldn't ever fail
panic(err)
}
if err := cryptWrite(path, password, plaintext); err != nil {
return fmt.Errorf(
"error writing encrypted credentials: %s", err)
}
}
// Set the credentials
infraCtx.InfraCreds = creds
return nil
}
func (c *Core) executeApp(opts *ExecuteOpts) error {
// Get the infra implementation for this
appCtx, err := c.appContext(c.appfile)
if err != nil {
return err
}
app, err := c.app(appCtx)
if err != nil {
return err
}
// Set the action and action args
appCtx.Action = opts.Action
appCtx.ActionArgs = opts.Args
// Build the infrastructure compilation context
switch opts.Task {
case ExecuteTaskDev:
return app.Dev(appCtx)
default:
panic(fmt.Sprintf("uknown task: %s", opts.Task))
}
}
func (c *Core) appContext(f *appfile.File) (*app.Context, error) {
// We need the configuration for the active infrastructure
// so that we can build the tuple below
config := f.ActiveInfrastructure()
if config == nil {
return nil, fmt.Errorf(
"infrastructure not found in appfile: %s",
f.Project.Infrastructure)
}
// The tuple we're looking for is the application type, the
// infrastructure type, and the infrastructure flavor. Build that
// tuple.
tuple := app.Tuple{
App: f.Application.Type,
Infra: f.Project.Infrastructure,
InfraFlavor: config.Flavor,
}
// The output directory for data. This is either the main app so
// it goes directly into "app" or it is a dependency and goes into
// a dep folder.
outputDir := filepath.Join(c.compileDir, "app")
if id := f.ID; id != c.appfile.ID {
outputDir = filepath.Join(
c.compileDir, fmt.Sprintf("dep-%s", id))
}
// The cache directory for this app
cacheDir := filepath.Join(c.dataDir, "cache", f.ID)
if err := os.MkdirAll(cacheDir, 0755); err != nil {
return nil, fmt.Errorf(
"error making cache directory '%s': %s",
cacheDir, err)
}
return &app.Context{
Dir: outputDir,
CacheDir: cacheDir,
LocalDir: c.localDir,
Tuple: tuple,
Appfile: f,
Application: f.Application,
Shared: context.Shared{
InstallDir: filepath.Join(c.dataDir, "binaries"),
Directory: c.dir,
Ui: c.ui,
},
}, nil
}
func (c *Core) app(ctx *app.Context) (app.App, error) {
log.Printf("[INFO] Loading app implementation for Tuple: %s", ctx.Tuple)
// Look for the app impl. factory
f := app.TupleMap(c.apps).Lookup(ctx.Tuple)
if f == nil {
return nil, fmt.Errorf(
"app implementation for tuple not found: %s", ctx.Tuple)
}
// Start the impl.
result, err := f()
if err != nil {
return nil, fmt.Errorf(
"app failed to start properly: %s", err)
}
return result, nil
}
func (c *Core) infra() (infrastructure.Infrastructure, *infrastructure.Context, error) {
// Get the infrastructure factory
f, ok := c.infras[c.appfile.Project.Infrastructure]
if !ok {
return nil, nil, fmt.Errorf(
"infrastructure type not supported: %s",
c.appfile.Project.Infrastructure)
}
// Get the infrastructure configuration
config := c.appfile.ActiveInfrastructure()
if config == nil {
return nil, nil, fmt.Errorf(
"infrastructure not found in appfile: %s",
c.appfile.Project.Infrastructure)
}
// Start the infrastructure implementation
infra, err := f()
if err != nil {
return nil, nil, err
}
// The output directory for data
outputDir := filepath.Join(
c.compileDir, fmt.Sprintf("infra-%s", c.appfile.Project.Infrastructure))
// Build the context
return infra, &infrastructure.Context{
Dir: outputDir,
Infra: config,
Shared: context.Shared{
InstallDir: filepath.Join(c.dataDir, "binaries"),
Directory: c.dir,
Ui: c.ui,
},
}, nil
}
func (c *Core) foundations() ([]foundation.Foundation, []*foundation.Context, error) {
// Get the infrastructure configuration
config := c.appfile.ActiveInfrastructure()
if config == nil {
return nil, nil, fmt.Errorf(
"infrastructure not found in appfile: %s",
c.appfile.Project.Infrastructure)
}
// If there are no foundations, return nothing.
if len(config.Foundations) == 0 {
return nil, nil, nil
}
// Create the arrays for our list
fs := make([]foundation.Foundation, 0, len(config.Foundations))
ctxs := make([]*foundation.Context, 0, cap(fs))
for _, f := range config.Foundations {
// The tuple we're looking for is the foundation type, the
// infrastructure type, and the infrastructure flavor. Build that
// tuple.
tuple := foundation.Tuple{
Type: f.Name,
Infra: config.Type,
InfraFlavor: config.Flavor,
}
// Look for the matching foundation
fun := foundation.TupleMap(c.foundationMap).Lookup(tuple)
if fun == nil {
return nil, nil, fmt.Errorf(
"foundation implementation for tuple not found: %s",
tuple)
}
// Instantiate the implementation
impl, err := fun()
if err != nil {
return nil, nil, err
}
// The output directory for data
outputDir := filepath.Join(
c.compileDir, fmt.Sprintf("foundation-%s", f.Name))
// Build the context
ctx := &foundation.Context{
Config: f.Config,
Dir: outputDir,
Tuple: tuple,
Appfile: c.appfile,
Shared: context.Shared{
InstallDir: filepath.Join(c.dataDir, "binaries"),
Directory: c.dir,
Ui: c.ui,
},
}
// Add to our results
fs = append(fs, impl)
ctxs = append(ctxs, ctx)
}
return fs, ctxs, nil
}
const credsQueryPassExists = `
Infrastructure credentials are required for this operation. Otto found
saved credentials that are password protected. Please enter the password
to decrypt these credentials. You may also just hit <enter> and leave
the password blank to force Otto to ask for the credentials again.
`
const credsQueryPassNew = `
This password will be used to encrypt and save the credentials so they
don't need to be repeated multiple times.
`
|
// Copyright (c) 2014 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package indexer
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"github.com/couchbase/indexing/secondary/common"
"github.com/couchbase/indexing/secondary/fdb"
"github.com/couchbase/indexing/secondary/logging"
"github.com/couchbase/indexing/secondary/platform"
"os"
"path/filepath"
"sort"
"sync"
"time"
)
var (
snapshotMetaListKey = []byte("snapshots-list")
)
//NewForestDBSlice initiailizes a new slice with forestdb backend.
//Both main and back index gets initialized with default config.
//Slice methods are not thread-safe and application needs to
//handle the synchronization. The only exception being Insert and
//Delete can be called concurrently.
//Returns error in case slice cannot be initialized.
func NewForestDBSlice(path string, sliceId SliceId, idxDefnId common.IndexDefnId,
idxInstId common.IndexInstId, isPrimary bool,
sysconf common.Config, idxStats *IndexStats) (*fdbSlice, error) {
info, err := os.Stat(path)
if err != nil || err == nil && info.IsDir() {
os.Mkdir(path, 0777)
}
filepath := newFdbFile(path, false)
slice := &fdbSlice{}
slice.idxStats = idxStats
slice.get_bytes = platform.NewAlignedInt64(0)
slice.insert_bytes = platform.NewAlignedInt64(0)
slice.delete_bytes = platform.NewAlignedInt64(0)
slice.fragAfterCompaction = platform.NewAlignedInt64(0)
slice.flushedCount = platform.NewAlignedUint64(0)
slice.committedCount = platform.NewAlignedUint64(0)
config := forestdb.DefaultConfig()
config.SetDurabilityOpt(forestdb.DRB_ASYNC)
memQuota := sysconf["settings.memory_quota"].Uint64()
logging.Debugf("NewForestDBSlice(): buffer cache size %d", memQuota)
config.SetBufferCacheSize(memQuota)
logging.Debugf("NewForestDBSlice(): buffer cache size %d", memQuota)
prob := sysconf["settings.max_writer_lock_prob"].Int()
config.SetMaxWriterLockProb(uint8(prob))
logging.Debugf("NewForestDBSlice(): max writer lock prob %d", prob)
kvconfig := forestdb.DefaultKVStoreConfig()
retry:
if slice.dbfile, err = forestdb.Open(filepath, config); err != nil {
if err == forestdb.RESULT_NO_DB_HEADERS {
logging.Warnf("NewForestDBSlice(): Open failed with no_db_header error...Resetting the forestdb file")
os.Remove(filepath)
goto retry
}
return nil, err
}
slice.config = config
slice.sysconf = sysconf
//open a separate file handle for compaction
if slice.compactFd, err = forestdb.Open(filepath, config); err != nil {
return nil, err
}
config.SetOpenFlags(forestdb.OPEN_FLAG_RDONLY)
if slice.statFd, err = forestdb.Open(filepath, config); err != nil {
return nil, err
}
slice.numWriters = sysconf["numSliceWriters"].Int()
slice.main = make([]*forestdb.KVStore, slice.numWriters)
for i := 0; i < slice.numWriters; i++ {
if slice.main[i], err = slice.dbfile.OpenKVStore("main", kvconfig); err != nil {
return nil, err
}
}
//create a separate back-index for non-primary indexes
if !isPrimary {
slice.back = make([]*forestdb.KVStore, slice.numWriters)
for i := 0; i < slice.numWriters; i++ {
if slice.back[i], err = slice.dbfile.OpenKVStore("back", kvconfig); err != nil {
return nil, err
}
}
}
// Make use of default kvstore provided by forestdb
if slice.meta, err = slice.dbfile.OpenKVStore("default", kvconfig); err != nil {
return nil, err
}
slice.path = path
slice.currfile = filepath
slice.idxInstId = idxInstId
slice.idxDefnId = idxDefnId
slice.id = sliceId
sliceBufSize := sysconf["settings.sliceBufSize"].Uint64()
slice.cmdCh = make(chan interface{}, sliceBufSize)
slice.workerDone = make([]chan bool, slice.numWriters)
slice.stopCh = make([]DoneChannel, slice.numWriters)
slice.isPrimary = isPrimary
for i := 0; i < slice.numWriters; i++ {
slice.stopCh[i] = make(DoneChannel)
slice.workerDone[i] = make(chan bool)
go slice.handleCommandsWorker(i)
}
logging.Debugf("ForestDBSlice:NewForestDBSlice \n\t Created New Slice Id %v IndexInstId %v "+
"WriterThreads %v", sliceId, idxInstId, slice.numWriters)
slice.setCommittedCount()
return slice, nil
}
//kv represents a key/value pair in storage format
type indexItem struct {
key []byte
docid []byte
}
//fdbSlice represents a forestdb slice
type fdbSlice struct {
get_bytes, insert_bytes, delete_bytes platform.AlignedInt64
//flushed count
flushedCount platform.AlignedUint64
// persisted items count
committedCount platform.AlignedUint64
// Fragmentation percent computed after last compaction
fragAfterCompaction platform.AlignedInt64
path string
currfile string
id SliceId //slice id
refCount int
lock sync.RWMutex
dbfile *forestdb.File
statFd *forestdb.File
//forestdb requires a separate file handle to be used for compaction
//as we need to allow concurrent db updates to happen on existing file handle
//while compaction is running in the background.
compactFd *forestdb.File
metaLock sync.Mutex
meta *forestdb.KVStore // handle for index meta
main []*forestdb.KVStore // handle for forward index
back []*forestdb.KVStore // handle for reverse index
config *forestdb.Config
idxDefnId common.IndexDefnId
idxInstId common.IndexInstId
status SliceStatus
isActive bool
isPrimary bool
isSoftDeleted bool
isSoftClosed bool
cmdCh chan interface{} //internal channel to buffer commands
stopCh []DoneChannel //internal channel to signal shutdown
workerDone []chan bool //worker status check channel
fatalDbErr error //store any fatal DB error
numWriters int //number of writer threads
//TODO: Remove this once these stats are
//captured by the stats library
totalFlushTime time.Duration
totalCommitTime time.Duration
idxStats *IndexStats
sysconf common.Config
confLock sync.Mutex
}
func (fdb *fdbSlice) IncrRef() {
fdb.lock.Lock()
defer fdb.lock.Unlock()
fdb.refCount++
}
func (fdb *fdbSlice) DecrRef() {
fdb.lock.Lock()
defer fdb.lock.Unlock()
fdb.refCount--
if fdb.refCount == 0 {
if fdb.isSoftClosed {
tryCloseFdbSlice(fdb)
}
if fdb.isSoftDeleted {
tryDeleteFdbSlice(fdb)
}
}
}
//Insert will insert the given key/value pair from slice.
//Internally the request is buffered and executed async.
//If forestdb has encountered any fatal error condition,
//it will be returned as error.
func (fdb *fdbSlice) Insert(key []byte, docid []byte) error {
fdb.idxStats.flushQueueSize.Add(1)
fdb.idxStats.numFlushQueued.Add(1)
fdb.cmdCh <- &indexItem{key: key, docid: docid}
return fdb.fatalDbErr
}
//Delete will delete the given document from slice.
//Internally the request is buffered and executed async.
//If forestdb has encountered any fatal error condition,
//it will be returned as error.
func (fdb *fdbSlice) Delete(docid []byte) error {
fdb.idxStats.flushQueueSize.Add(1)
fdb.idxStats.numFlushQueued.Add(1)
fdb.cmdCh <- docid
return fdb.fatalDbErr
}
//handleCommands keep listening to any buffered
//write requests for the slice and processes
//those. This will shut itself down internal
//shutdown channel is closed.
func (fdb *fdbSlice) handleCommandsWorker(workerId int) {
var start time.Time
var elapsed time.Duration
var c interface{}
var icmd *indexItem
var dcmd []byte
loop:
for {
select {
case c = <-fdb.cmdCh:
switch c.(type) {
case *indexItem:
icmd = c.(*indexItem)
start = time.Now()
fdb.insert((*icmd).key, (*icmd).docid, workerId)
elapsed = time.Since(start)
fdb.totalFlushTime += elapsed
case []byte:
dcmd = c.([]byte)
start = time.Now()
fdb.delete(dcmd, workerId)
elapsed = time.Since(start)
fdb.totalFlushTime += elapsed
default:
logging.Errorf("ForestDBSlice::handleCommandsWorker \n\tSliceId %v IndexInstId %v Received "+
"Unknown Command %v", fdb.id, fdb.idxInstId, c)
}
fdb.idxStats.flushQueueSize.Add(-1)
case <-fdb.stopCh[workerId]:
fdb.stopCh[workerId] <- true
break loop
//worker gets a status check message on this channel, it responds
//when its not processing any mutation
case <-fdb.workerDone[workerId]:
fdb.workerDone[workerId] <- true
}
}
}
//insert does the actual insert in forestdb
func (fdb *fdbSlice) insert(key []byte, docid []byte, workerId int) {
if fdb.isPrimary {
fdb.insertPrimaryIndex(key, docid, workerId)
} else {
fdb.insertSecIndex(key, docid, workerId)
}
fdb.logWriterStat()
}
func (fdb *fdbSlice) insertPrimaryIndex(key []byte, docid []byte, workerId int) {
var err error
logging.Tracef("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Set Key - %s", fdb.id, fdb.idxInstId, docid)
//check if the docid exists in the main index
t0 := time.Now()
if _, err = fdb.main[workerId].GetKV(key); err == nil {
fdb.idxStats.Timings.stKVGet.Put(time.Now().Sub(t0))
//skip
logging.Tracef("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Key %v Already Exists. "+
"Primary Index Update Skipped.", fdb.id, fdb.idxInstId, string(docid))
} else if err != nil && err != forestdb.RESULT_KEY_NOT_FOUND {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error locating "+
"mainindex entry %v", fdb.id, fdb.idxInstId, err)
} else if err == forestdb.RESULT_KEY_NOT_FOUND {
//set in main index
t0 := time.Now()
if err = fdb.main[workerId].SetKV(key, nil); err != nil {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error in Main Index Set. "+
"Skipped Key %s. Error %v", fdb.id, fdb.idxInstId, string(docid), err)
}
fdb.idxStats.Timings.stKVSet.Put(time.Now().Sub(t0))
platform.AddInt64(&fdb.insert_bytes, int64(len(key)))
}
}
func (fdb *fdbSlice) insertSecIndex(key []byte, docid []byte, workerId int) {
var err error
var oldkey []byte
//logging.Tracef("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Set Key - %s "+
// "Value - %s", fdb.id, fdb.idxInstId, k, v)
//check if the docid exists in the back index
if oldkey, err = fdb.getBackIndexEntry(docid, workerId); err != nil {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error locating "+
"backindex entry %v", fdb.id, fdb.idxInstId, err)
return
} else if oldkey != nil {
//If old-key from backindex matches with the new-key
//in mutation, skip it.
if bytes.Equal(oldkey, key) {
logging.Tracef("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Received Unchanged Key for "+
"Doc Id %v. Key %v. Skipped.", fdb.id, fdb.idxInstId, string(docid), key)
return
}
//there is already an entry in main index for this docid
//delete from main index
t0 := time.Now()
if err = fdb.main[workerId].DeleteKV(oldkey); err != nil {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error deleting "+
"entry from main index %v", fdb.id, fdb.idxInstId, err)
return
}
fdb.idxStats.Timings.stKVDelete.Put(time.Now().Sub(t0))
platform.AddInt64(&fdb.delete_bytes, int64(len(oldkey)))
//delete from back index
t0 = time.Now()
if err = fdb.back[workerId].DeleteKV(docid); err != nil {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error deleting "+
"entry from back index %v", fdb.id, fdb.idxInstId, err)
return
}
fdb.idxStats.Timings.stKVDelete.Put(time.Now().Sub(t0))
platform.AddInt64(&fdb.delete_bytes, int64(len(docid)))
}
if key == nil {
logging.Tracef("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Received NIL Key for "+
"Doc Id %s. Skipped.", fdb.id, fdb.idxInstId, docid)
return
}
//set the back index entry <docid, encodedkey>
t0 := time.Now()
if err = fdb.back[workerId].SetKV(docid, key); err != nil {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error in Back Index Set. "+
"Skipped Key %s. Value %v. Error %v", fdb.id, fdb.idxInstId, string(docid), key, err)
return
}
fdb.idxStats.Timings.stKVSet.Put(time.Now().Sub(t0))
platform.AddInt64(&fdb.insert_bytes, int64(len(docid)+len(key)))
t0 = time.Now()
//set in main index
if err = fdb.main[workerId].SetKV(key, nil); err != nil {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error in Main Index Set. "+
"Skipped Key %v. Error %v", fdb.id, fdb.idxInstId, key, err)
return
}
fdb.idxStats.Timings.stKVSet.Put(time.Now().Sub(t0))
platform.AddInt64(&fdb.insert_bytes, int64(len(key)))
}
//delete does the actual delete in forestdb
func (fdb *fdbSlice) delete(docid []byte, workerId int) {
if fdb.isPrimary {
fdb.deletePrimaryIndex(docid, workerId)
} else {
fdb.deleteSecIndex(docid, workerId)
}
fdb.logWriterStat()
}
func (fdb *fdbSlice) deletePrimaryIndex(docid []byte, workerId int) {
//logging.Tracef("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Delete Key - %s",
// fdb.id, fdb.idxInstId, docid)
if docid == nil {
common.CrashOnError(errors.New("Nil Primary Key"))
return
}
//docid -> key format
entry, err := NewPrimaryIndexEntry(docid)
common.CrashOnError(err)
//delete from main index
t0 := time.Now()
if err := fdb.main[workerId].DeleteKV(entry.Bytes()); err != nil {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Error deleting "+
"entry from main index for Doc %s. Error %v", fdb.id, fdb.idxInstId,
docid, err)
return
}
fdb.idxStats.Timings.stKVDelete.Put(time.Now().Sub(t0))
platform.AddInt64(&fdb.delete_bytes, int64(len(entry.Bytes())))
}
func (fdb *fdbSlice) deleteSecIndex(docid []byte, workerId int) {
//logging.Tracef("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Delete Key - %s",
// fdb.id, fdb.idxInstId, docid)
var olditm []byte
var err error
if olditm, err = fdb.getBackIndexEntry(docid, workerId); err != nil {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Error locating "+
"backindex entry for Doc %s. Error %v", fdb.id, fdb.idxInstId, docid, err)
return
}
//if the oldkey is nil, nothing needs to be done. This is the case of deletes
//which happened before index was created.
if olditm == nil {
logging.Tracef("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v Received NIL Key for "+
"Doc Id %v. Skipped.", fdb.id, fdb.idxInstId, docid)
return
}
//delete from main index
t0 := time.Now()
if err = fdb.main[workerId].DeleteKV(olditm); err != nil {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Error deleting "+
"entry from main index for Doc %s. Key %v. Error %v", fdb.id, fdb.idxInstId,
docid, olditm, err)
return
}
fdb.idxStats.Timings.stKVDelete.Put(time.Now().Sub(t0))
platform.AddInt64(&fdb.delete_bytes, int64(len(olditm)))
//delete from the back index
t0 = time.Now()
if err = fdb.back[workerId].DeleteKV(docid); err != nil {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Error deleting "+
"entry from back index for Doc %s. Error %v", fdb.id, fdb.idxInstId, docid, err)
return
}
fdb.idxStats.Timings.stKVDelete.Put(time.Now().Sub(t0))
platform.AddInt64(&fdb.delete_bytes, int64(len(docid)))
}
//getBackIndexEntry returns an existing back index entry
//given the docid
func (fdb *fdbSlice) getBackIndexEntry(docid []byte, workerId int) ([]byte, error) {
// logging.Tracef("ForestDBSlice::getBackIndexEntry \n\tSliceId %v IndexInstId %v Get BackIndex Key - %s",
// fdb.id, fdb.idxInstId, docid)
var kbytes []byte
var err error
t0 := time.Now()
kbytes, err = fdb.back[workerId].GetKV(docid)
fdb.idxStats.Timings.stKVGet.Put(time.Now().Sub(t0))
platform.AddInt64(&fdb.get_bytes, int64(len(kbytes)))
//forestdb reports get in a non-existent key as an
//error, skip that
if err != nil && err != forestdb.RESULT_KEY_NOT_FOUND {
return nil, err
}
return kbytes, nil
}
//checkFatalDbError checks if the error returned from DB
//is fatal and stores it. This error will be returned
//to caller on next DB operation
func (fdb *fdbSlice) checkFatalDbError(err error) {
//panic on all DB errors and recover rather than risk
//inconsistent db state
common.CrashOnError(err)
errStr := err.Error()
switch errStr {
case "checksum error", "file corruption", "no db instance",
"alloc fail", "seek fail", "fsync fail":
fdb.fatalDbErr = err
}
}
// Creates an open snapshot handle from snapshot info
// Snapshot info is obtained from NewSnapshot() or GetSnapshots() API
// Returns error if snapshot handle cannot be created.
func (fdb *fdbSlice) OpenSnapshot(info SnapshotInfo) (Snapshot, error) {
snapInfo := info.(*fdbSnapshotInfo)
var s *fdbSnapshot
if fdb.isPrimary {
s = &fdbSnapshot{slice: fdb,
idxDefnId: fdb.idxDefnId,
idxInstId: fdb.idxInstId,
main: fdb.main[0],
ts: snapInfo.Timestamp(),
mainSeqNum: snapInfo.MainSeq,
committed: info.IsCommitted(),
}
} else {
s = &fdbSnapshot{slice: fdb,
idxDefnId: fdb.idxDefnId,
idxInstId: fdb.idxInstId,
main: fdb.main[0],
back: fdb.back[0],
ts: snapInfo.Timestamp(),
mainSeqNum: snapInfo.MainSeq,
backSeqNum: snapInfo.BackSeq,
committed: info.IsCommitted(),
}
}
if info.IsCommitted() {
s.meta = fdb.meta
s.metaSeqNum = snapInfo.MetaSeq
}
logging.Debugf("ForestDBSlice::OpenSnapshot \n\tSliceId %v IndexInstId %v Creating New "+
"Snapshot %v committed:%v", fdb.id, fdb.idxInstId, snapInfo, s.committed)
err := s.Create()
return s, err
}
func (fdb *fdbSlice) setCommittedCount() {
mainDbInfo, err := fdb.main[0].Info()
if err == nil {
platform.StoreUint64(&fdb.committedCount, mainDbInfo.DocCount())
} else {
logging.Errorf("ForestDB setCommittedCount failed %v", err)
}
}
func (fdb *fdbSlice) GetCommittedCount() uint64 {
return platform.LoadUint64(&fdb.committedCount)
}
//Rollback slice to given snapshot. Return error if
//not possible
func (fdb *fdbSlice) Rollback(info SnapshotInfo) error {
//get the seqnum from snapshot
snapInfo := info.(*fdbSnapshotInfo)
infos, err := fdb.getSnapshotsMeta()
if err != nil {
return err
}
sic := NewSnapshotInfoContainer(infos)
sic.RemoveRecentThanTS(info.Timestamp())
//rollback meta-store first, if main/back index rollback fails, recovery
//will pick up the rolled-back meta information.
err = fdb.meta.Rollback(snapInfo.MetaSeq)
if err != nil {
logging.Errorf("ForestDBSlice::Rollback \n\tSliceId %v IndexInstId %v. Error Rollback "+
"Meta Index to Snapshot %v. Error %v", fdb.id, fdb.idxInstId, info, err)
return err
}
//call forestdb to rollback for each kv store
err = fdb.main[0].Rollback(snapInfo.MainSeq)
if err != nil {
logging.Errorf("ForestDBSlice::Rollback \n\tSliceId %v IndexInstId %v. Error Rollback "+
"Main Index to Snapshot %v. Error %v", fdb.id, fdb.idxInstId, info, err)
return err
}
fdb.setCommittedCount()
//rollback back-index only for non-primary indexes
if !fdb.isPrimary {
err = fdb.back[0].Rollback(snapInfo.BackSeq)
if err != nil {
logging.Errorf("ForestDBSlice::Rollback \n\tSliceId %v IndexInstId %v. Error Rollback "+
"Back Index to Snapshot %v. Error %v", fdb.id, fdb.idxInstId, info, err)
return err
}
}
// Update valid snapshot list and commit
err = fdb.updateSnapshotsMeta(sic.List())
if err != nil {
return err
}
return fdb.dbfile.Commit(forestdb.COMMIT_MANUAL_WAL_FLUSH)
}
//RollbackToZero rollbacks the slice to initial state. Return error if
//not possible
func (fdb *fdbSlice) RollbackToZero() error {
zeroSeqNum := forestdb.SeqNum(0)
var err error
//rollback meta-store first, if main/back index rollback fails, recovery
//will pick up the rolled-back meta information.
err = fdb.meta.Rollback(zeroSeqNum)
if err != nil {
logging.Errorf("ForestDBSlice::Rollback \n\tSliceId %v IndexInstId %v. Error Rollback "+
"Meta Index to Zero. Error %v", fdb.id, fdb.idxInstId, err)
return err
}
//call forestdb to rollback
err = fdb.main[0].Rollback(zeroSeqNum)
if err != nil {
logging.Errorf("ForestDBSlice::Rollback \n\tSliceId %v IndexInstId %v. Error Rollback "+
"Main Index to Zero. Error %v", fdb.id, fdb.idxInstId, err)
return err
}
fdb.setCommittedCount()
//rollback back-index only for non-primary indexes
if !fdb.isPrimary {
err = fdb.back[0].Rollback(zeroSeqNum)
if err != nil {
logging.Errorf("ForestDBSlice::Rollback \n\tSliceId %v IndexInstId %v. Error Rollback "+
"Back Index to Zero. Error %v", fdb.id, fdb.idxInstId, err)
return err
}
}
return nil
}
//slice insert/delete methods are async. There
//can be outstanding mutations in internal queue to flush even
//after insert/delete have return success to caller.
//This method provides a mechanism to wait till internal
//queue is empty.
func (fdb *fdbSlice) waitPersist() {
//every SLICE_COMMIT_POLL_INTERVAL milliseconds,
//check for outstanding mutations. If there are
//none, proceed with the commit.
ticker := time.NewTicker(time.Millisecond * SLICE_COMMIT_POLL_INTERVAL)
for _ = range ticker.C {
if fdb.checkAllWorkersDone() {
break
}
}
}
//Commit persists the outstanding writes in underlying
//forestdb database. If Commit returns error, slice
//should be rolled back to previous snapshot.
func (fdb *fdbSlice) NewSnapshot(ts *common.TsVbuuid, commit bool) (SnapshotInfo, error) {
fdb.waitPersist()
mainDbInfo, err := fdb.main[0].Info()
if err != nil {
return nil, err
}
newSnapshotInfo := &fdbSnapshotInfo{
Ts: ts,
MainSeq: mainDbInfo.LastSeqNum(),
Committed: commit,
}
//for non-primary index add info for back-index
if !fdb.isPrimary {
backDbInfo, err := fdb.back[0].Info()
if err != nil {
return nil, err
}
newSnapshotInfo.BackSeq = backDbInfo.LastSeqNum()
}
if commit {
metaDbInfo, err := fdb.meta.Info()
if err != nil {
return nil, err
}
//the next meta seqno after this update
newSnapshotInfo.MetaSeq = metaDbInfo.LastSeqNum() + 1
infos, err := fdb.getSnapshotsMeta()
if err != nil {
return nil, err
}
sic := NewSnapshotInfoContainer(infos)
sic.Add(newSnapshotInfo)
fdb.confLock.Lock()
maxRollbacks := fdb.sysconf["settings.recovery.max_rollbacks"].Int()
fdb.confLock.Unlock()
if sic.Len() > maxRollbacks {
sic.RemoveOldest()
}
// Meta update should be done before commit
// Otherwise, metadata will not be atomically updated along with disk commit.
err = fdb.updateSnapshotsMeta(sic.List())
if err != nil {
return nil, err
}
// Commit database file
start := time.Now()
err = fdb.dbfile.Commit(forestdb.COMMIT_MANUAL_WAL_FLUSH)
elapsed := time.Since(start)
fdb.idxStats.Timings.stCommit.Put(elapsed)
fdb.totalCommitTime += elapsed
logging.Debugf("ForestDBSlice::Commit \n\tSliceId %v IndexInstId %v TotalFlushTime %v "+
"TotalCommitTime %v", fdb.id, fdb.idxInstId, fdb.totalFlushTime, fdb.totalCommitTime)
if err != nil {
logging.Errorf("ForestDBSlice::Commit \n\tSliceId %v IndexInstId %v Error in "+
"Index Commit %v", fdb.id, fdb.idxInstId, err)
return nil, err
}
fdb.setCommittedCount()
}
return newSnapshotInfo, nil
}
//checkAllWorkersDone return true if all workers have
//finished processing
func (fdb *fdbSlice) checkAllWorkersDone() bool {
//if there are mutations in the cmdCh, workers are
//not yet done
if len(fdb.cmdCh) > 0 {
return false
}
//worker queue is empty, make sure both workers are done
//processing the last mutation
for i := 0; i < fdb.numWriters; i++ {
fdb.workerDone[i] <- true
<-fdb.workerDone[i]
}
return true
}
func (fdb *fdbSlice) Close() {
fdb.lock.Lock()
defer fdb.lock.Unlock()
logging.Infof("ForestDBSlice::Close \n\tClosing Slice Id %v, IndexInstId %v, "+
"IndexDefnId %v", fdb.idxInstId, fdb.idxDefnId, fdb.id)
//signal shutdown for command handler routines
for i := 0; i < fdb.numWriters; i++ {
fdb.stopCh[i] <- true
<-fdb.stopCh[i]
}
if fdb.refCount > 0 {
fdb.isSoftClosed = true
} else {
tryCloseFdbSlice(fdb)
}
}
//Destroy removes the database file from disk.
//Slice is not recoverable after this.
func (fdb *fdbSlice) Destroy() {
fdb.lock.Lock()
defer fdb.lock.Unlock()
if fdb.refCount > 0 {
logging.Infof("ForestDBSlice::Destroy \n\tSoftdeleted Slice Id %v, IndexInstId %v, "+
"IndexDefnId %v", fdb.id, fdb.idxInstId, fdb.idxDefnId)
fdb.isSoftDeleted = true
} else {
tryDeleteFdbSlice(fdb)
}
}
//Id returns the Id for this Slice
func (fdb *fdbSlice) Id() SliceId {
return fdb.id
}
// FilePath returns the filepath for this Slice
func (fdb *fdbSlice) Path() string {
return fdb.path
}
//IsActive returns if the slice is active
func (fdb *fdbSlice) IsActive() bool {
return fdb.isActive
}
//SetActive sets the active state of this slice
func (fdb *fdbSlice) SetActive(isActive bool) {
fdb.isActive = isActive
}
//Status returns the status for this slice
func (fdb *fdbSlice) Status() SliceStatus {
return fdb.status
}
//SetStatus set new status for this slice
func (fdb *fdbSlice) SetStatus(status SliceStatus) {
fdb.status = status
}
//IndexInstId returns the Index InstanceId this
//slice is associated with
func (fdb *fdbSlice) IndexInstId() common.IndexInstId {
return fdb.idxInstId
}
//IndexDefnId returns the Index DefnId this slice
//is associated with
func (fdb *fdbSlice) IndexDefnId() common.IndexDefnId {
return fdb.idxDefnId
}
// Returns snapshot info list
func (fdb *fdbSlice) GetSnapshots() ([]SnapshotInfo, error) {
infos, err := fdb.getSnapshotsMeta()
return infos, err
}
func (fdb *fdbSlice) Compact() error {
fdb.IncrRef()
defer fdb.DecrRef()
//get oldest snapshot upto which compaction can be done
infos, err := fdb.getSnapshotsMeta()
if err != nil {
return err
}
sic := NewSnapshotInfoContainer(infos)
osnap := sic.GetOldest()
if osnap == nil {
logging.Infof("ForestDBSlice::Compact No Snapshot Found. Skipped Compaction."+
"Slice Id %v, IndexInstId %v, IndexDefnId %v", fdb.id, fdb.idxInstId, fdb.idxDefnId)
return nil
}
mainSeq := osnap.(*fdbSnapshotInfo).MainSeq
//find the db snapshot lower than oldest snapshot
snap, err := fdb.compactFd.GetAllSnapMarkers()
if err != nil {
return err
}
defer snap.FreeSnapMarkers()
var snapMarker *forestdb.SnapMarker
var compactSeqNum forestdb.SeqNum
snaploop:
for _, s := range snap.SnapInfoList() {
cm := s.GetKvsCommitMarkers()
for _, c := range cm {
//if seqNum of "main" kvs is less than or equal to oldest snapshot seqnum
//it is safe to compact upto that snapshot
if c.GetKvStoreName() == "main" && c.GetSeqNum() <= mainSeq {
snapMarker = s.GetSnapMarker()
compactSeqNum = c.GetSeqNum()
break snaploop
}
}
}
if snapMarker == nil {
logging.Infof("ForestDBSlice::Compact No Valid SnapMarker Found. Skipped Compaction."+
"Slice Id %v, IndexInstId %v, IndexDefnId %v", fdb.id, fdb.idxInstId, fdb.idxDefnId)
return nil
} else {
logging.Infof("ForestDBSlice::Compact Compacting upto SeqNum %v. "+
"Slice Id %v, IndexInstId %v, IndexDefnId %v", compactSeqNum, fdb.id,
fdb.idxInstId, fdb.idxDefnId)
}
newpath := newFdbFile(fdb.path, true)
// Remove any existing files leftover due to a crash during last compaction attempt
os.Remove(newpath)
err = fdb.compactFd.CompactUpto(newpath, snapMarker)
if err != nil {
return err
}
if _, e := os.Stat(fdb.currfile); e == nil {
err = os.Remove(fdb.currfile)
}
fdb.currfile = newpath
diskSz, err := common.FileSize(fdb.currfile)
config := forestdb.DefaultConfig()
config.SetOpenFlags(forestdb.OPEN_FLAG_RDONLY)
fdb.statFd.Close()
if fdb.statFd, err = forestdb.Open(fdb.currfile, config); err != nil {
return err
}
dataSz := int64(fdb.statFd.EstimateSpaceUsed())
frag := (diskSz - dataSz) * 100 / diskSz
platform.StoreInt64(&fdb.fragAfterCompaction, frag)
return err
}
func (fdb *fdbSlice) Statistics() (StorageStatistics, error) {
var sts StorageStatistics
sz, err := common.FileSize(fdb.currfile)
if err != nil {
return sts, err
}
sts.DataSize = int64(fdb.statFd.EstimateSpaceUsed())
sts.DiskSize = sz
// Compute approximate fragmentation percentage
// Since we keep multiple index snapshots after compaction, it is not
// trivial to compute fragmentation as ration of data size to disk size.
// Hence we compute approximate fragmentation by removing fragmentation
// threshold caused as a result of compaction.
sts.Fragmentation = 0
if sts.DataSize > 0 {
sts.Fragmentation = ((sts.DiskSize - sts.DataSize) * 100) / sts.DiskSize
}
compactionFrag := platform.LoadInt64(&fdb.fragAfterCompaction)
sts.Fragmentation -= compactionFrag
if sts.Fragmentation < 0 {
sts.Fragmentation = 0
}
sts.GetBytes = platform.LoadInt64(&fdb.get_bytes)
sts.InsertBytes = platform.LoadInt64(&fdb.insert_bytes)
sts.DeleteBytes = platform.LoadInt64(&fdb.delete_bytes)
return sts, nil
}
func (fdb *fdbSlice) UpdateConfig(cfg common.Config) {
fdb.confLock.Lock()
defer fdb.confLock.Unlock()
fdb.sysconf = cfg
}
func (fdb *fdbSlice) String() string {
str := fmt.Sprintf("SliceId: %v ", fdb.id)
str += fmt.Sprintf("File: %v ", fdb.path)
str += fmt.Sprintf("Index: %v ", fdb.idxInstId)
return str
}
func (fdb *fdbSlice) updateSnapshotsMeta(infos []SnapshotInfo) error {
fdb.metaLock.Lock()
defer fdb.metaLock.Unlock()
val, err := json.Marshal(infos)
if err != nil {
goto handle_err
}
err = fdb.meta.SetKV(snapshotMetaListKey, val)
if err != nil {
goto handle_err
}
return nil
handle_err:
return errors.New("Failed to update snapshots list -" + err.Error())
}
func (fdb *fdbSlice) getSnapshotsMeta() ([]SnapshotInfo, error) {
var tmp []*fdbSnapshotInfo
var snapList []SnapshotInfo
fdb.metaLock.Lock()
defer fdb.metaLock.Unlock()
data, err := fdb.meta.GetKV(snapshotMetaListKey)
if err != nil {
if err == forestdb.RESULT_KEY_NOT_FOUND {
return []SnapshotInfo(nil), nil
}
return nil, err
}
err = json.Unmarshal(data, &tmp)
if err != nil {
goto handle_err
}
for i := range tmp {
snapList = append(snapList, tmp[i])
}
return snapList, nil
handle_err:
return snapList, errors.New("Failed to retrieve snapshots list -" + err.Error())
}
func tryDeleteFdbSlice(fdb *fdbSlice) {
logging.Infof("ForestDBSlice::Destroy \n\tDestroying Slice Id %v, IndexInstId %v, "+
"IndexDefnId %v", fdb.id, fdb.idxInstId, fdb.idxDefnId)
if err := forestdb.Destroy(fdb.currfile, fdb.config); err != nil {
logging.Errorf("ForestDBSlice::Destroy \n\t Error Destroying Slice Id %v, "+
"IndexInstId %v, IndexDefnId %v. Error %v", fdb.id, fdb.idxInstId, fdb.idxDefnId, err)
}
//cleanup the disk directory
if err := os.RemoveAll(fdb.path); err != nil {
logging.Errorf("ForestDBSlice::Destroy \n\t Error Cleaning Up Slice Id %v, "+
"IndexInstId %v, IndexDefnId %v. Error %v", fdb.id, fdb.idxInstId, fdb.idxDefnId, err)
}
}
func tryCloseFdbSlice(fdb *fdbSlice) {
//close the main index
if fdb.main[0] != nil {
fdb.main[0].Close()
}
if !fdb.isPrimary {
//close the back index
if fdb.back[0] != nil {
fdb.back[0].Close()
}
}
if fdb.meta != nil {
fdb.meta.Close()
}
fdb.statFd.Close()
fdb.compactFd.Close()
fdb.dbfile.Close()
}
func newFdbFile(dirpath string, newVersion bool) string {
var version int = 0
pattern := fmt.Sprintf("data.fdb.*")
files, _ := filepath.Glob(filepath.Join(dirpath, pattern))
sort.Strings(files)
// Pick the first file with least version
if len(files) > 0 {
filename := filepath.Base(files[0])
_, err := fmt.Sscanf(filename, "data.fdb.%d", &version)
if err != nil {
panic(fmt.Sprintf("Invalid data file %s (%v)", files[0], err))
}
}
if newVersion {
version++
}
newFilename := fmt.Sprintf("data.fdb.%d", version)
return filepath.Join(dirpath, newFilename)
}
func (fdb *fdbSlice) logWriterStat() {
count := platform.AddUint64(&fdb.flushedCount, 1)
if (count%10000 == 0) || count == 1 {
logging.Infof("logWriterStat:: %v "+
"FlushedCount %v QueuedCount %v", fdb.idxInstId,
count, len(fdb.cmdCh))
}
}
MB-15449 indexer: Avoid backindex delete operation if exists
ForestDB insert operation will replace a key if it
exists already. Each delete operation require a WAL entry
on disk and adds fragmentation. This change aims at improving
write performance for incremental update.
Change-Id: I4a8bdbc73a1323ecc26502619eefd76037c127af
// Copyright (c) 2014 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package indexer
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"github.com/couchbase/indexing/secondary/common"
"github.com/couchbase/indexing/secondary/fdb"
"github.com/couchbase/indexing/secondary/logging"
"github.com/couchbase/indexing/secondary/platform"
"os"
"path/filepath"
"sort"
"sync"
"time"
)
var (
snapshotMetaListKey = []byte("snapshots-list")
)
//NewForestDBSlice initiailizes a new slice with forestdb backend.
//Both main and back index gets initialized with default config.
//Slice methods are not thread-safe and application needs to
//handle the synchronization. The only exception being Insert and
//Delete can be called concurrently.
//Returns error in case slice cannot be initialized.
func NewForestDBSlice(path string, sliceId SliceId, idxDefnId common.IndexDefnId,
idxInstId common.IndexInstId, isPrimary bool,
sysconf common.Config, idxStats *IndexStats) (*fdbSlice, error) {
info, err := os.Stat(path)
if err != nil || err == nil && info.IsDir() {
os.Mkdir(path, 0777)
}
filepath := newFdbFile(path, false)
slice := &fdbSlice{}
slice.idxStats = idxStats
slice.get_bytes = platform.NewAlignedInt64(0)
slice.insert_bytes = platform.NewAlignedInt64(0)
slice.delete_bytes = platform.NewAlignedInt64(0)
slice.fragAfterCompaction = platform.NewAlignedInt64(0)
slice.flushedCount = platform.NewAlignedUint64(0)
slice.committedCount = platform.NewAlignedUint64(0)
config := forestdb.DefaultConfig()
config.SetDurabilityOpt(forestdb.DRB_ASYNC)
memQuota := sysconf["settings.memory_quota"].Uint64()
logging.Debugf("NewForestDBSlice(): buffer cache size %d", memQuota)
config.SetBufferCacheSize(memQuota)
logging.Debugf("NewForestDBSlice(): buffer cache size %d", memQuota)
prob := sysconf["settings.max_writer_lock_prob"].Int()
config.SetMaxWriterLockProb(uint8(prob))
logging.Debugf("NewForestDBSlice(): max writer lock prob %d", prob)
kvconfig := forestdb.DefaultKVStoreConfig()
retry:
if slice.dbfile, err = forestdb.Open(filepath, config); err != nil {
if err == forestdb.RESULT_NO_DB_HEADERS {
logging.Warnf("NewForestDBSlice(): Open failed with no_db_header error...Resetting the forestdb file")
os.Remove(filepath)
goto retry
}
return nil, err
}
slice.config = config
slice.sysconf = sysconf
//open a separate file handle for compaction
if slice.compactFd, err = forestdb.Open(filepath, config); err != nil {
return nil, err
}
config.SetOpenFlags(forestdb.OPEN_FLAG_RDONLY)
if slice.statFd, err = forestdb.Open(filepath, config); err != nil {
return nil, err
}
slice.numWriters = sysconf["numSliceWriters"].Int()
slice.main = make([]*forestdb.KVStore, slice.numWriters)
for i := 0; i < slice.numWriters; i++ {
if slice.main[i], err = slice.dbfile.OpenKVStore("main", kvconfig); err != nil {
return nil, err
}
}
//create a separate back-index for non-primary indexes
if !isPrimary {
slice.back = make([]*forestdb.KVStore, slice.numWriters)
for i := 0; i < slice.numWriters; i++ {
if slice.back[i], err = slice.dbfile.OpenKVStore("back", kvconfig); err != nil {
return nil, err
}
}
}
// Make use of default kvstore provided by forestdb
if slice.meta, err = slice.dbfile.OpenKVStore("default", kvconfig); err != nil {
return nil, err
}
slice.path = path
slice.currfile = filepath
slice.idxInstId = idxInstId
slice.idxDefnId = idxDefnId
slice.id = sliceId
sliceBufSize := sysconf["settings.sliceBufSize"].Uint64()
slice.cmdCh = make(chan interface{}, sliceBufSize)
slice.workerDone = make([]chan bool, slice.numWriters)
slice.stopCh = make([]DoneChannel, slice.numWriters)
slice.isPrimary = isPrimary
for i := 0; i < slice.numWriters; i++ {
slice.stopCh[i] = make(DoneChannel)
slice.workerDone[i] = make(chan bool)
go slice.handleCommandsWorker(i)
}
logging.Debugf("ForestDBSlice:NewForestDBSlice \n\t Created New Slice Id %v IndexInstId %v "+
"WriterThreads %v", sliceId, idxInstId, slice.numWriters)
slice.setCommittedCount()
return slice, nil
}
//kv represents a key/value pair in storage format
type indexItem struct {
key []byte
docid []byte
}
//fdbSlice represents a forestdb slice
type fdbSlice struct {
get_bytes, insert_bytes, delete_bytes platform.AlignedInt64
//flushed count
flushedCount platform.AlignedUint64
// persisted items count
committedCount platform.AlignedUint64
// Fragmentation percent computed after last compaction
fragAfterCompaction platform.AlignedInt64
path string
currfile string
id SliceId //slice id
refCount int
lock sync.RWMutex
dbfile *forestdb.File
statFd *forestdb.File
//forestdb requires a separate file handle to be used for compaction
//as we need to allow concurrent db updates to happen on existing file handle
//while compaction is running in the background.
compactFd *forestdb.File
metaLock sync.Mutex
meta *forestdb.KVStore // handle for index meta
main []*forestdb.KVStore // handle for forward index
back []*forestdb.KVStore // handle for reverse index
config *forestdb.Config
idxDefnId common.IndexDefnId
idxInstId common.IndexInstId
status SliceStatus
isActive bool
isPrimary bool
isSoftDeleted bool
isSoftClosed bool
cmdCh chan interface{} //internal channel to buffer commands
stopCh []DoneChannel //internal channel to signal shutdown
workerDone []chan bool //worker status check channel
fatalDbErr error //store any fatal DB error
numWriters int //number of writer threads
//TODO: Remove this once these stats are
//captured by the stats library
totalFlushTime time.Duration
totalCommitTime time.Duration
idxStats *IndexStats
sysconf common.Config
confLock sync.Mutex
}
func (fdb *fdbSlice) IncrRef() {
fdb.lock.Lock()
defer fdb.lock.Unlock()
fdb.refCount++
}
func (fdb *fdbSlice) DecrRef() {
fdb.lock.Lock()
defer fdb.lock.Unlock()
fdb.refCount--
if fdb.refCount == 0 {
if fdb.isSoftClosed {
tryCloseFdbSlice(fdb)
}
if fdb.isSoftDeleted {
tryDeleteFdbSlice(fdb)
}
}
}
//Insert will insert the given key/value pair from slice.
//Internally the request is buffered and executed async.
//If forestdb has encountered any fatal error condition,
//it will be returned as error.
func (fdb *fdbSlice) Insert(key []byte, docid []byte) error {
fdb.idxStats.flushQueueSize.Add(1)
fdb.idxStats.numFlushQueued.Add(1)
fdb.cmdCh <- &indexItem{key: key, docid: docid}
return fdb.fatalDbErr
}
//Delete will delete the given document from slice.
//Internally the request is buffered and executed async.
//If forestdb has encountered any fatal error condition,
//it will be returned as error.
func (fdb *fdbSlice) Delete(docid []byte) error {
fdb.idxStats.flushQueueSize.Add(1)
fdb.idxStats.numFlushQueued.Add(1)
fdb.cmdCh <- docid
return fdb.fatalDbErr
}
//handleCommands keep listening to any buffered
//write requests for the slice and processes
//those. This will shut itself down internal
//shutdown channel is closed.
func (fdb *fdbSlice) handleCommandsWorker(workerId int) {
var start time.Time
var elapsed time.Duration
var c interface{}
var icmd *indexItem
var dcmd []byte
loop:
for {
select {
case c = <-fdb.cmdCh:
switch c.(type) {
case *indexItem:
icmd = c.(*indexItem)
start = time.Now()
fdb.insert((*icmd).key, (*icmd).docid, workerId)
elapsed = time.Since(start)
fdb.totalFlushTime += elapsed
case []byte:
dcmd = c.([]byte)
start = time.Now()
fdb.delete(dcmd, workerId)
elapsed = time.Since(start)
fdb.totalFlushTime += elapsed
default:
logging.Errorf("ForestDBSlice::handleCommandsWorker \n\tSliceId %v IndexInstId %v Received "+
"Unknown Command %v", fdb.id, fdb.idxInstId, c)
}
fdb.idxStats.flushQueueSize.Add(-1)
case <-fdb.stopCh[workerId]:
fdb.stopCh[workerId] <- true
break loop
//worker gets a status check message on this channel, it responds
//when its not processing any mutation
case <-fdb.workerDone[workerId]:
fdb.workerDone[workerId] <- true
}
}
}
//insert does the actual insert in forestdb
func (fdb *fdbSlice) insert(key []byte, docid []byte, workerId int) {
if fdb.isPrimary {
fdb.insertPrimaryIndex(key, docid, workerId)
} else {
fdb.insertSecIndex(key, docid, workerId)
}
fdb.logWriterStat()
}
func (fdb *fdbSlice) insertPrimaryIndex(key []byte, docid []byte, workerId int) {
var err error
logging.Tracef("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Set Key - %s", fdb.id, fdb.idxInstId, docid)
//check if the docid exists in the main index
t0 := time.Now()
if _, err = fdb.main[workerId].GetKV(key); err == nil {
fdb.idxStats.Timings.stKVGet.Put(time.Now().Sub(t0))
//skip
logging.Tracef("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Key %v Already Exists. "+
"Primary Index Update Skipped.", fdb.id, fdb.idxInstId, string(docid))
} else if err != nil && err != forestdb.RESULT_KEY_NOT_FOUND {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error locating "+
"mainindex entry %v", fdb.id, fdb.idxInstId, err)
} else if err == forestdb.RESULT_KEY_NOT_FOUND {
//set in main index
t0 := time.Now()
if err = fdb.main[workerId].SetKV(key, nil); err != nil {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error in Main Index Set. "+
"Skipped Key %s. Error %v", fdb.id, fdb.idxInstId, string(docid), err)
}
fdb.idxStats.Timings.stKVSet.Put(time.Now().Sub(t0))
platform.AddInt64(&fdb.insert_bytes, int64(len(key)))
}
}
func (fdb *fdbSlice) insertSecIndex(key []byte, docid []byte, workerId int) {
var err error
var oldkey []byte
//logging.Tracef("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Set Key - %s "+
// "Value - %s", fdb.id, fdb.idxInstId, k, v)
//check if the docid exists in the back index
if oldkey, err = fdb.getBackIndexEntry(docid, workerId); err != nil {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error locating "+
"backindex entry %v", fdb.id, fdb.idxInstId, err)
return
} else if oldkey != nil {
//If old-key from backindex matches with the new-key
//in mutation, skip it.
if bytes.Equal(oldkey, key) {
logging.Tracef("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Received Unchanged Key for "+
"Doc Id %v. Key %v. Skipped.", fdb.id, fdb.idxInstId, string(docid), key)
return
}
//there is already an entry in main index for this docid
//delete from main index
t0 := time.Now()
if err = fdb.main[workerId].DeleteKV(oldkey); err != nil {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error deleting "+
"entry from main index %v", fdb.id, fdb.idxInstId, err)
return
}
fdb.idxStats.Timings.stKVDelete.Put(time.Now().Sub(t0))
platform.AddInt64(&fdb.delete_bytes, int64(len(oldkey)))
// If a field value changed from "existing" to "missing" (ie, key = nil),
// we need to remove back index entry corresponding to the previous "existing" value.
if key == nil {
t0 := time.Now()
if err = fdb.back[workerId].DeleteKV(docid); err != nil {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error deleting "+
"entry from back index %v", fdb.id, fdb.idxInstId, err)
return
}
fdb.idxStats.Timings.stKVDelete.Put(time.Now().Sub(t0))
platform.AddInt64(&fdb.delete_bytes, int64(len(docid)))
}
}
if key == nil {
logging.Tracef("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Received NIL Key for "+
"Doc Id %s. Skipped.", fdb.id, fdb.idxInstId, docid)
return
}
//set the back index entry <docid, encodedkey>
t0 := time.Now()
if err = fdb.back[workerId].SetKV(docid, key); err != nil {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error in Back Index Set. "+
"Skipped Key %s. Value %v. Error %v", fdb.id, fdb.idxInstId, string(docid), key, err)
return
}
fdb.idxStats.Timings.stKVSet.Put(time.Now().Sub(t0))
platform.AddInt64(&fdb.insert_bytes, int64(len(docid)+len(key)))
t0 = time.Now()
//set in main index
if err = fdb.main[workerId].SetKV(key, nil); err != nil {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error in Main Index Set. "+
"Skipped Key %v. Error %v", fdb.id, fdb.idxInstId, key, err)
return
}
fdb.idxStats.Timings.stKVSet.Put(time.Now().Sub(t0))
platform.AddInt64(&fdb.insert_bytes, int64(len(key)))
}
//delete does the actual delete in forestdb
func (fdb *fdbSlice) delete(docid []byte, workerId int) {
if fdb.isPrimary {
fdb.deletePrimaryIndex(docid, workerId)
} else {
fdb.deleteSecIndex(docid, workerId)
}
fdb.logWriterStat()
}
func (fdb *fdbSlice) deletePrimaryIndex(docid []byte, workerId int) {
//logging.Tracef("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Delete Key - %s",
// fdb.id, fdb.idxInstId, docid)
if docid == nil {
common.CrashOnError(errors.New("Nil Primary Key"))
return
}
//docid -> key format
entry, err := NewPrimaryIndexEntry(docid)
common.CrashOnError(err)
//delete from main index
t0 := time.Now()
if err := fdb.main[workerId].DeleteKV(entry.Bytes()); err != nil {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Error deleting "+
"entry from main index for Doc %s. Error %v", fdb.id, fdb.idxInstId,
docid, err)
return
}
fdb.idxStats.Timings.stKVDelete.Put(time.Now().Sub(t0))
platform.AddInt64(&fdb.delete_bytes, int64(len(entry.Bytes())))
}
func (fdb *fdbSlice) deleteSecIndex(docid []byte, workerId int) {
//logging.Tracef("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Delete Key - %s",
// fdb.id, fdb.idxInstId, docid)
var olditm []byte
var err error
if olditm, err = fdb.getBackIndexEntry(docid, workerId); err != nil {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Error locating "+
"backindex entry for Doc %s. Error %v", fdb.id, fdb.idxInstId, docid, err)
return
}
//if the oldkey is nil, nothing needs to be done. This is the case of deletes
//which happened before index was created.
if olditm == nil {
logging.Tracef("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v Received NIL Key for "+
"Doc Id %v. Skipped.", fdb.id, fdb.idxInstId, docid)
return
}
//delete from main index
t0 := time.Now()
if err = fdb.main[workerId].DeleteKV(olditm); err != nil {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Error deleting "+
"entry from main index for Doc %s. Key %v. Error %v", fdb.id, fdb.idxInstId,
docid, olditm, err)
return
}
fdb.idxStats.Timings.stKVDelete.Put(time.Now().Sub(t0))
platform.AddInt64(&fdb.delete_bytes, int64(len(olditm)))
//delete from the back index
t0 = time.Now()
if err = fdb.back[workerId].DeleteKV(docid); err != nil {
fdb.checkFatalDbError(err)
logging.Errorf("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Error deleting "+
"entry from back index for Doc %s. Error %v", fdb.id, fdb.idxInstId, docid, err)
return
}
fdb.idxStats.Timings.stKVDelete.Put(time.Now().Sub(t0))
platform.AddInt64(&fdb.delete_bytes, int64(len(docid)))
}
//getBackIndexEntry returns an existing back index entry
//given the docid
func (fdb *fdbSlice) getBackIndexEntry(docid []byte, workerId int) ([]byte, error) {
// logging.Tracef("ForestDBSlice::getBackIndexEntry \n\tSliceId %v IndexInstId %v Get BackIndex Key - %s",
// fdb.id, fdb.idxInstId, docid)
var kbytes []byte
var err error
t0 := time.Now()
kbytes, err = fdb.back[workerId].GetKV(docid)
fdb.idxStats.Timings.stKVGet.Put(time.Now().Sub(t0))
platform.AddInt64(&fdb.get_bytes, int64(len(kbytes)))
//forestdb reports get in a non-existent key as an
//error, skip that
if err != nil && err != forestdb.RESULT_KEY_NOT_FOUND {
return nil, err
}
return kbytes, nil
}
//checkFatalDbError checks if the error returned from DB
//is fatal and stores it. This error will be returned
//to caller on next DB operation
func (fdb *fdbSlice) checkFatalDbError(err error) {
//panic on all DB errors and recover rather than risk
//inconsistent db state
common.CrashOnError(err)
errStr := err.Error()
switch errStr {
case "checksum error", "file corruption", "no db instance",
"alloc fail", "seek fail", "fsync fail":
fdb.fatalDbErr = err
}
}
// Creates an open snapshot handle from snapshot info
// Snapshot info is obtained from NewSnapshot() or GetSnapshots() API
// Returns error if snapshot handle cannot be created.
func (fdb *fdbSlice) OpenSnapshot(info SnapshotInfo) (Snapshot, error) {
snapInfo := info.(*fdbSnapshotInfo)
var s *fdbSnapshot
if fdb.isPrimary {
s = &fdbSnapshot{slice: fdb,
idxDefnId: fdb.idxDefnId,
idxInstId: fdb.idxInstId,
main: fdb.main[0],
ts: snapInfo.Timestamp(),
mainSeqNum: snapInfo.MainSeq,
committed: info.IsCommitted(),
}
} else {
s = &fdbSnapshot{slice: fdb,
idxDefnId: fdb.idxDefnId,
idxInstId: fdb.idxInstId,
main: fdb.main[0],
back: fdb.back[0],
ts: snapInfo.Timestamp(),
mainSeqNum: snapInfo.MainSeq,
backSeqNum: snapInfo.BackSeq,
committed: info.IsCommitted(),
}
}
if info.IsCommitted() {
s.meta = fdb.meta
s.metaSeqNum = snapInfo.MetaSeq
}
logging.Debugf("ForestDBSlice::OpenSnapshot \n\tSliceId %v IndexInstId %v Creating New "+
"Snapshot %v committed:%v", fdb.id, fdb.idxInstId, snapInfo, s.committed)
err := s.Create()
return s, err
}
func (fdb *fdbSlice) setCommittedCount() {
mainDbInfo, err := fdb.main[0].Info()
if err == nil {
platform.StoreUint64(&fdb.committedCount, mainDbInfo.DocCount())
} else {
logging.Errorf("ForestDB setCommittedCount failed %v", err)
}
}
func (fdb *fdbSlice) GetCommittedCount() uint64 {
return platform.LoadUint64(&fdb.committedCount)
}
//Rollback slice to given snapshot. Return error if
//not possible
func (fdb *fdbSlice) Rollback(info SnapshotInfo) error {
//get the seqnum from snapshot
snapInfo := info.(*fdbSnapshotInfo)
infos, err := fdb.getSnapshotsMeta()
if err != nil {
return err
}
sic := NewSnapshotInfoContainer(infos)
sic.RemoveRecentThanTS(info.Timestamp())
//rollback meta-store first, if main/back index rollback fails, recovery
//will pick up the rolled-back meta information.
err = fdb.meta.Rollback(snapInfo.MetaSeq)
if err != nil {
logging.Errorf("ForestDBSlice::Rollback \n\tSliceId %v IndexInstId %v. Error Rollback "+
"Meta Index to Snapshot %v. Error %v", fdb.id, fdb.idxInstId, info, err)
return err
}
//call forestdb to rollback for each kv store
err = fdb.main[0].Rollback(snapInfo.MainSeq)
if err != nil {
logging.Errorf("ForestDBSlice::Rollback \n\tSliceId %v IndexInstId %v. Error Rollback "+
"Main Index to Snapshot %v. Error %v", fdb.id, fdb.idxInstId, info, err)
return err
}
fdb.setCommittedCount()
//rollback back-index only for non-primary indexes
if !fdb.isPrimary {
err = fdb.back[0].Rollback(snapInfo.BackSeq)
if err != nil {
logging.Errorf("ForestDBSlice::Rollback \n\tSliceId %v IndexInstId %v. Error Rollback "+
"Back Index to Snapshot %v. Error %v", fdb.id, fdb.idxInstId, info, err)
return err
}
}
// Update valid snapshot list and commit
err = fdb.updateSnapshotsMeta(sic.List())
if err != nil {
return err
}
return fdb.dbfile.Commit(forestdb.COMMIT_MANUAL_WAL_FLUSH)
}
//RollbackToZero rollbacks the slice to initial state. Return error if
//not possible
func (fdb *fdbSlice) RollbackToZero() error {
zeroSeqNum := forestdb.SeqNum(0)
var err error
//rollback meta-store first, if main/back index rollback fails, recovery
//will pick up the rolled-back meta information.
err = fdb.meta.Rollback(zeroSeqNum)
if err != nil {
logging.Errorf("ForestDBSlice::Rollback \n\tSliceId %v IndexInstId %v. Error Rollback "+
"Meta Index to Zero. Error %v", fdb.id, fdb.idxInstId, err)
return err
}
//call forestdb to rollback
err = fdb.main[0].Rollback(zeroSeqNum)
if err != nil {
logging.Errorf("ForestDBSlice::Rollback \n\tSliceId %v IndexInstId %v. Error Rollback "+
"Main Index to Zero. Error %v", fdb.id, fdb.idxInstId, err)
return err
}
fdb.setCommittedCount()
//rollback back-index only for non-primary indexes
if !fdb.isPrimary {
err = fdb.back[0].Rollback(zeroSeqNum)
if err != nil {
logging.Errorf("ForestDBSlice::Rollback \n\tSliceId %v IndexInstId %v. Error Rollback "+
"Back Index to Zero. Error %v", fdb.id, fdb.idxInstId, err)
return err
}
}
return nil
}
//slice insert/delete methods are async. There
//can be outstanding mutations in internal queue to flush even
//after insert/delete have return success to caller.
//This method provides a mechanism to wait till internal
//queue is empty.
func (fdb *fdbSlice) waitPersist() {
//every SLICE_COMMIT_POLL_INTERVAL milliseconds,
//check for outstanding mutations. If there are
//none, proceed with the commit.
ticker := time.NewTicker(time.Millisecond * SLICE_COMMIT_POLL_INTERVAL)
for _ = range ticker.C {
if fdb.checkAllWorkersDone() {
break
}
}
}
//Commit persists the outstanding writes in underlying
//forestdb database. If Commit returns error, slice
//should be rolled back to previous snapshot.
func (fdb *fdbSlice) NewSnapshot(ts *common.TsVbuuid, commit bool) (SnapshotInfo, error) {
fdb.waitPersist()
mainDbInfo, err := fdb.main[0].Info()
if err != nil {
return nil, err
}
newSnapshotInfo := &fdbSnapshotInfo{
Ts: ts,
MainSeq: mainDbInfo.LastSeqNum(),
Committed: commit,
}
//for non-primary index add info for back-index
if !fdb.isPrimary {
backDbInfo, err := fdb.back[0].Info()
if err != nil {
return nil, err
}
newSnapshotInfo.BackSeq = backDbInfo.LastSeqNum()
}
if commit {
metaDbInfo, err := fdb.meta.Info()
if err != nil {
return nil, err
}
//the next meta seqno after this update
newSnapshotInfo.MetaSeq = metaDbInfo.LastSeqNum() + 1
infos, err := fdb.getSnapshotsMeta()
if err != nil {
return nil, err
}
sic := NewSnapshotInfoContainer(infos)
sic.Add(newSnapshotInfo)
fdb.confLock.Lock()
maxRollbacks := fdb.sysconf["settings.recovery.max_rollbacks"].Int()
fdb.confLock.Unlock()
if sic.Len() > maxRollbacks {
sic.RemoveOldest()
}
// Meta update should be done before commit
// Otherwise, metadata will not be atomically updated along with disk commit.
err = fdb.updateSnapshotsMeta(sic.List())
if err != nil {
return nil, err
}
// Commit database file
start := time.Now()
err = fdb.dbfile.Commit(forestdb.COMMIT_MANUAL_WAL_FLUSH)
elapsed := time.Since(start)
fdb.idxStats.Timings.stCommit.Put(elapsed)
fdb.totalCommitTime += elapsed
logging.Debugf("ForestDBSlice::Commit \n\tSliceId %v IndexInstId %v TotalFlushTime %v "+
"TotalCommitTime %v", fdb.id, fdb.idxInstId, fdb.totalFlushTime, fdb.totalCommitTime)
if err != nil {
logging.Errorf("ForestDBSlice::Commit \n\tSliceId %v IndexInstId %v Error in "+
"Index Commit %v", fdb.id, fdb.idxInstId, err)
return nil, err
}
fdb.setCommittedCount()
}
return newSnapshotInfo, nil
}
//checkAllWorkersDone return true if all workers have
//finished processing
func (fdb *fdbSlice) checkAllWorkersDone() bool {
//if there are mutations in the cmdCh, workers are
//not yet done
if len(fdb.cmdCh) > 0 {
return false
}
//worker queue is empty, make sure both workers are done
//processing the last mutation
for i := 0; i < fdb.numWriters; i++ {
fdb.workerDone[i] <- true
<-fdb.workerDone[i]
}
return true
}
func (fdb *fdbSlice) Close() {
fdb.lock.Lock()
defer fdb.lock.Unlock()
logging.Infof("ForestDBSlice::Close \n\tClosing Slice Id %v, IndexInstId %v, "+
"IndexDefnId %v", fdb.idxInstId, fdb.idxDefnId, fdb.id)
//signal shutdown for command handler routines
for i := 0; i < fdb.numWriters; i++ {
fdb.stopCh[i] <- true
<-fdb.stopCh[i]
}
if fdb.refCount > 0 {
fdb.isSoftClosed = true
} else {
tryCloseFdbSlice(fdb)
}
}
//Destroy removes the database file from disk.
//Slice is not recoverable after this.
func (fdb *fdbSlice) Destroy() {
fdb.lock.Lock()
defer fdb.lock.Unlock()
if fdb.refCount > 0 {
logging.Infof("ForestDBSlice::Destroy \n\tSoftdeleted Slice Id %v, IndexInstId %v, "+
"IndexDefnId %v", fdb.id, fdb.idxInstId, fdb.idxDefnId)
fdb.isSoftDeleted = true
} else {
tryDeleteFdbSlice(fdb)
}
}
//Id returns the Id for this Slice
func (fdb *fdbSlice) Id() SliceId {
return fdb.id
}
// FilePath returns the filepath for this Slice
func (fdb *fdbSlice) Path() string {
return fdb.path
}
//IsActive returns if the slice is active
func (fdb *fdbSlice) IsActive() bool {
return fdb.isActive
}
//SetActive sets the active state of this slice
func (fdb *fdbSlice) SetActive(isActive bool) {
fdb.isActive = isActive
}
//Status returns the status for this slice
func (fdb *fdbSlice) Status() SliceStatus {
return fdb.status
}
//SetStatus set new status for this slice
func (fdb *fdbSlice) SetStatus(status SliceStatus) {
fdb.status = status
}
//IndexInstId returns the Index InstanceId this
//slice is associated with
func (fdb *fdbSlice) IndexInstId() common.IndexInstId {
return fdb.idxInstId
}
//IndexDefnId returns the Index DefnId this slice
//is associated with
func (fdb *fdbSlice) IndexDefnId() common.IndexDefnId {
return fdb.idxDefnId
}
// Returns snapshot info list
func (fdb *fdbSlice) GetSnapshots() ([]SnapshotInfo, error) {
infos, err := fdb.getSnapshotsMeta()
return infos, err
}
func (fdb *fdbSlice) Compact() error {
fdb.IncrRef()
defer fdb.DecrRef()
//get oldest snapshot upto which compaction can be done
infos, err := fdb.getSnapshotsMeta()
if err != nil {
return err
}
sic := NewSnapshotInfoContainer(infos)
osnap := sic.GetOldest()
if osnap == nil {
logging.Infof("ForestDBSlice::Compact No Snapshot Found. Skipped Compaction."+
"Slice Id %v, IndexInstId %v, IndexDefnId %v", fdb.id, fdb.idxInstId, fdb.idxDefnId)
return nil
}
mainSeq := osnap.(*fdbSnapshotInfo).MainSeq
//find the db snapshot lower than oldest snapshot
snap, err := fdb.compactFd.GetAllSnapMarkers()
if err != nil {
return err
}
defer snap.FreeSnapMarkers()
var snapMarker *forestdb.SnapMarker
var compactSeqNum forestdb.SeqNum
snaploop:
for _, s := range snap.SnapInfoList() {
cm := s.GetKvsCommitMarkers()
for _, c := range cm {
//if seqNum of "main" kvs is less than or equal to oldest snapshot seqnum
//it is safe to compact upto that snapshot
if c.GetKvStoreName() == "main" && c.GetSeqNum() <= mainSeq {
snapMarker = s.GetSnapMarker()
compactSeqNum = c.GetSeqNum()
break snaploop
}
}
}
if snapMarker == nil {
logging.Infof("ForestDBSlice::Compact No Valid SnapMarker Found. Skipped Compaction."+
"Slice Id %v, IndexInstId %v, IndexDefnId %v", fdb.id, fdb.idxInstId, fdb.idxDefnId)
return nil
} else {
logging.Infof("ForestDBSlice::Compact Compacting upto SeqNum %v. "+
"Slice Id %v, IndexInstId %v, IndexDefnId %v", compactSeqNum, fdb.id,
fdb.idxInstId, fdb.idxDefnId)
}
newpath := newFdbFile(fdb.path, true)
// Remove any existing files leftover due to a crash during last compaction attempt
os.Remove(newpath)
err = fdb.compactFd.CompactUpto(newpath, snapMarker)
if err != nil {
return err
}
if _, e := os.Stat(fdb.currfile); e == nil {
err = os.Remove(fdb.currfile)
}
fdb.currfile = newpath
diskSz, err := common.FileSize(fdb.currfile)
config := forestdb.DefaultConfig()
config.SetOpenFlags(forestdb.OPEN_FLAG_RDONLY)
fdb.statFd.Close()
if fdb.statFd, err = forestdb.Open(fdb.currfile, config); err != nil {
return err
}
dataSz := int64(fdb.statFd.EstimateSpaceUsed())
frag := (diskSz - dataSz) * 100 / diskSz
platform.StoreInt64(&fdb.fragAfterCompaction, frag)
return err
}
func (fdb *fdbSlice) Statistics() (StorageStatistics, error) {
var sts StorageStatistics
sz, err := common.FileSize(fdb.currfile)
if err != nil {
return sts, err
}
sts.DataSize = int64(fdb.statFd.EstimateSpaceUsed())
sts.DiskSize = sz
// Compute approximate fragmentation percentage
// Since we keep multiple index snapshots after compaction, it is not
// trivial to compute fragmentation as ration of data size to disk size.
// Hence we compute approximate fragmentation by removing fragmentation
// threshold caused as a result of compaction.
sts.Fragmentation = 0
if sts.DataSize > 0 {
sts.Fragmentation = ((sts.DiskSize - sts.DataSize) * 100) / sts.DiskSize
}
compactionFrag := platform.LoadInt64(&fdb.fragAfterCompaction)
sts.Fragmentation -= compactionFrag
if sts.Fragmentation < 0 {
sts.Fragmentation = 0
}
sts.GetBytes = platform.LoadInt64(&fdb.get_bytes)
sts.InsertBytes = platform.LoadInt64(&fdb.insert_bytes)
sts.DeleteBytes = platform.LoadInt64(&fdb.delete_bytes)
return sts, nil
}
func (fdb *fdbSlice) UpdateConfig(cfg common.Config) {
fdb.confLock.Lock()
defer fdb.confLock.Unlock()
fdb.sysconf = cfg
}
func (fdb *fdbSlice) String() string {
str := fmt.Sprintf("SliceId: %v ", fdb.id)
str += fmt.Sprintf("File: %v ", fdb.path)
str += fmt.Sprintf("Index: %v ", fdb.idxInstId)
return str
}
func (fdb *fdbSlice) updateSnapshotsMeta(infos []SnapshotInfo) error {
fdb.metaLock.Lock()
defer fdb.metaLock.Unlock()
val, err := json.Marshal(infos)
if err != nil {
goto handle_err
}
err = fdb.meta.SetKV(snapshotMetaListKey, val)
if err != nil {
goto handle_err
}
return nil
handle_err:
return errors.New("Failed to update snapshots list -" + err.Error())
}
func (fdb *fdbSlice) getSnapshotsMeta() ([]SnapshotInfo, error) {
var tmp []*fdbSnapshotInfo
var snapList []SnapshotInfo
fdb.metaLock.Lock()
defer fdb.metaLock.Unlock()
data, err := fdb.meta.GetKV(snapshotMetaListKey)
if err != nil {
if err == forestdb.RESULT_KEY_NOT_FOUND {
return []SnapshotInfo(nil), nil
}
return nil, err
}
err = json.Unmarshal(data, &tmp)
if err != nil {
goto handle_err
}
for i := range tmp {
snapList = append(snapList, tmp[i])
}
return snapList, nil
handle_err:
return snapList, errors.New("Failed to retrieve snapshots list -" + err.Error())
}
func tryDeleteFdbSlice(fdb *fdbSlice) {
logging.Infof("ForestDBSlice::Destroy \n\tDestroying Slice Id %v, IndexInstId %v, "+
"IndexDefnId %v", fdb.id, fdb.idxInstId, fdb.idxDefnId)
if err := forestdb.Destroy(fdb.currfile, fdb.config); err != nil {
logging.Errorf("ForestDBSlice::Destroy \n\t Error Destroying Slice Id %v, "+
"IndexInstId %v, IndexDefnId %v. Error %v", fdb.id, fdb.idxInstId, fdb.idxDefnId, err)
}
//cleanup the disk directory
if err := os.RemoveAll(fdb.path); err != nil {
logging.Errorf("ForestDBSlice::Destroy \n\t Error Cleaning Up Slice Id %v, "+
"IndexInstId %v, IndexDefnId %v. Error %v", fdb.id, fdb.idxInstId, fdb.idxDefnId, err)
}
}
func tryCloseFdbSlice(fdb *fdbSlice) {
//close the main index
if fdb.main[0] != nil {
fdb.main[0].Close()
}
if !fdb.isPrimary {
//close the back index
if fdb.back[0] != nil {
fdb.back[0].Close()
}
}
if fdb.meta != nil {
fdb.meta.Close()
}
fdb.statFd.Close()
fdb.compactFd.Close()
fdb.dbfile.Close()
}
func newFdbFile(dirpath string, newVersion bool) string {
var version int = 0
pattern := fmt.Sprintf("data.fdb.*")
files, _ := filepath.Glob(filepath.Join(dirpath, pattern))
sort.Strings(files)
// Pick the first file with least version
if len(files) > 0 {
filename := filepath.Base(files[0])
_, err := fmt.Sscanf(filename, "data.fdb.%d", &version)
if err != nil {
panic(fmt.Sprintf("Invalid data file %s (%v)", files[0], err))
}
}
if newVersion {
version++
}
newFilename := fmt.Sprintf("data.fdb.%d", version)
return filepath.Join(dirpath, newFilename)
}
func (fdb *fdbSlice) logWriterStat() {
count := platform.AddUint64(&fdb.flushedCount, 1)
if (count%10000 == 0) || count == 1 {
logging.Infof("logWriterStat:: %v "+
"FlushedCount %v QueuedCount %v", fdb.idxInstId,
count, len(fdb.cmdCh))
}
}
|
// Copyright 2014 Volker Dobler. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ht
import (
"bytes"
"compress/gzip"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"math"
"mime"
"mime/multipart"
"net/http"
"net/textproto"
"net/url"
"os"
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/vdobler/ht/third_party/json5"
)
var (
// DefaultUserAgent is the user agent string to send in http requests
// if no user agent header is specified explicitely.
DefaultUserAgent = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.143 Safari/537.36"
// DefaultAccept is the accept header to be sent if no accept header
// is set explicitely in the test.
DefaultAccept = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"
// DefaultClientTimeout is the timeout used by the http clients.
DefaultClientTimeout = Duration(2 * time.Second)
)
// URLValues is a url.Values with a fancier JSON unmarshalling.
type URLValues url.Values
// UnmarshalJSON produces a url.Values (i.e. a map[string][]string) from
// various JSON5 representations. E.g.
// {
// a: 12,
// b: "foo",
// c: [ 23, "bar"]
// }
// can be unmarshaled with the expected result.
func (v *URLValues) UnmarshalJSON(data []byte) error {
vals := make(url.Values)
raw := map[string]json5.RawMessage{}
err := json5.Unmarshal(data, &raw)
if err != nil {
return err
}
for name, r := range raw {
var generic interface{}
err := json5.Unmarshal(r, &generic)
if err != nil {
return err
}
switch g := generic.(type) {
case float64:
vals[name] = []string{float64ToString(g)}
case string:
vals[name] = []string{g}
case []interface{}:
vals[name] = []string{}
for _, sub := range g {
switch gg := sub.(type) {
case float64:
vals[name] = append(vals[name], float64ToString(gg))
case string:
vals[name] = append(vals[name], gg)
default:
return fmt.Errorf("ht: illegal url query value %v of type %T for query parameter %s", sub, gg, name)
}
}
default:
return fmt.Errorf("ht: illegal url query value %v of type %T for query parameter %s", generic, g, name)
}
}
*v = URLValues(vals)
return nil
}
func float64ToString(f float64) string {
t := math.Trunc(f)
if math.Abs(t-f) < 1e-6 {
return strconv.Itoa(int(t))
}
return fmt.Sprintf("%g", f)
}
// Request is a HTTP request.
type Request struct {
// Method is the HTTP method to use.
// A empty method is equivalent to "GET"
Method string `json:",omitempty"`
// URL ist the URL of the request.
URL string
// Params contains the parameters and their values to send in
// the request.
//
// If the parameters are sent as multipart it is possible to include
// files by letting the parameter values start with "@file:". Two
// version are possible "@file:path/to/file" will send a file read
// from the given filesystem path while "@file:@name:the-file-data"
// will use the-file-data as the content.
Params URLValues `json:",omitempty"`
// ParamsAs determines how the parameters in the Param field are sent:
// "URL" or "": append properly encoded to URL
// "body" : send as application/x-www-form-urlencoded in body.
// "multipart": send as multipart in body.
// The two values "body" and "multipart" must not be used
// on a GET or HEAD request.
ParamsAs string `json:",omitempty"`
// Header contains the specific http headers to be sent in this request.
// User-Agent and Accept headers are set automaticaly to the global
// default values if not set explicitely.
Header http.Header `json:",omitempty"`
// Cookies contains the cookies to send in the request.
Cookies []Cookie `json:",omitempty"`
// Body is the full body to send in the request. Body must be
// empty if Params are sent as multipart or form-urlencoded.
Body string `json:",omitempty"`
// FollowRedirects determines if automatic following of
// redirects should be done.
FollowRedirects bool `json:",omitempty"`
Request *http.Request // the 'real' request
SentBody string // the 'real' body
}
// Response captures information about a http response.
type Response struct {
// Response is the received HTTP response. Its body has bean read and
// closed allready.
Response *http.Response
// Duration to receive response and read the whole body.
Duration Duration
// The received body and the error got while reading it.
BodyBytes []byte
BodyErr error
}
// Body returns a reader of the response body.
func (resp *Response) Body() *bytes.Reader {
return bytes.NewReader(resp.BodyBytes)
}
// Cookie.
type Cookie struct {
Name string
Value string `json:",omitempty"`
}
// Poll determines if and how to redo a test after a failure or if the
// test should be skipped alltogether. The zero value of Poll means "Just do
// the test once."
type Poll struct {
// Maximum number of redos. Both 0 and 1 mean: "Just one try. No redo."
// Negative values indicate that the test should be skipped.
Max int `json:",omitempty"`
// Duration to sleep between redos.
Sleep Duration `json:",omitempty"`
}
// ClientPool maintains a pool of clients for the given transport
// and cookie jar. ClientPools must not be copied.
type ClientPool struct {
// Transport will be used a the clients Transport
Transport http.RoundTripper
// Jar will be used as the clients Jar
Jar http.CookieJar
mu sync.Mutex
// clients are index by their timeout. Clients which follow redirects
// are distinguisehd by a negative timeout.
clients map[Duration]*http.Client
}
// ----------------------------------------------------------------------------
// Test
// Test is a single logical test which does one HTTP request and checks
// a number of Checks on the recieved Response.
type Test struct {
Name string
Description string
// Request is the HTTP request.
Request Request
// Checks contains all checks to perform on the response to the HTTP request.
Checks CheckList
// VarEx may be used to popultate variables from the response.
VarEx map[string]Extractor
Poll Poll `json:",omitempty"`
Timeout Duration // If zero use DefaultClientTimeout.
Verbosity int // Verbosity level in logging.
// ClientPool allows to inject special http.Transports or a
// cookie jar to be used by this Test.
ClientPool *ClientPool `json:"-"`
Response Response
// The following results are filled during Run.
Status Status `json:"-"`
Started time.Time `json:"-"`
Error error `json:"-"`
Duration Duration `json:"-"`
FullDuration Duration `json:"-"`
Tries int `json:"-"`
CheckResults []CheckResult `json:"-"` // The individual checks.
SeqNo string
client *http.Client
checks []Check // prepared checks.
}
// mergeRequest implements the merge strategy described in Merge for the Request.
func mergeRequest(m *Request, r Request) error {
allNonemptyMustBeSame := func(m *string, s string) error {
if s != "" {
if *m != "" && *m != s {
return fmt.Errorf("Cannot merge %q into %q", s, *m)
}
*m = s
}
return nil
}
onlyOneMayBeNonempty := func(m *string, s string) error {
if s != "" {
if *m != "" {
return fmt.Errorf("Won't overwrite %q with %q", *m, s)
}
*m = s
}
return nil
}
if err := allNonemptyMustBeSame(&(m.Method), r.Method); err != nil {
return err
}
if err := onlyOneMayBeNonempty(&(m.URL), r.URL); err != nil {
return err
}
for k, v := range r.Params {
m.Params[k] = v
}
if err := allNonemptyMustBeSame(&(m.ParamsAs), r.ParamsAs); err != nil {
return err
}
for k, v := range r.Header {
m.Header[k] = v
}
outer:
for _, rc := range r.Cookies {
for i := range m.Cookies {
if m.Cookies[i].Name == rc.Name {
m.Cookies[i].Value = rc.Value
continue outer
}
}
m.Cookies = append(m.Cookies, rc)
}
if err := onlyOneMayBeNonempty(&(m.Body), r.Body); err != nil {
return err
}
m.FollowRedirects = r.FollowRedirects
return nil
}
// Merge merges all tests into one. The individual fields are merged in the
// following way.
// Name Join all names
// Description Join all descriptions
// Request
// Method All nonempty must be the same
// URL Only one may be nonempty
// Params Merge by key
// ParamsAs All nonempty must be the same
// Header Merge by key
// Cookies Merge by cookie name
// Body Only one may be nonempty
// FollowRdr Last wins
// Checks Append all checks
// VarEx TODO
// Poll
// Max Use largest
// Sleep Use largest
// Timeout Use largets
// Verbosity Use largets
// ClientPool ignore
func Merge(tests ...*Test) (*Test, error) {
m := Test{}
// Name and description
s := []string{}
for _, t := range tests {
s = append(s, t.Name)
}
m.Name = "Merge of " + strings.Join(s, ", ")
s = s[:0]
for _, t := range tests {
s = append(s, t.Description)
}
m.Description = strings.Join(s, "\n")
m.Request.Params = make(URLValues)
m.Request.Header = make(http.Header)
m.VarEx = make(map[string]Extractor)
for _, t := range tests {
err := mergeRequest(&m.Request, t.Request)
if err != nil {
return &m, err
}
m.Checks = append(m.Checks, t.Checks...)
if t.Poll.Max > m.Poll.Max {
m.Poll.Max = t.Poll.Max
}
if t.Poll.Sleep > m.Poll.Sleep {
m.Poll.Sleep = t.Poll.Sleep
}
if t.Timeout > m.Timeout {
m.Timeout = t.Timeout
}
if t.Verbosity > m.Verbosity {
m.Verbosity = t.Verbosity
}
for name, value := range t.VarEx {
if old, ok := m.VarEx[name]; ok && old != value {
return &m, fmt.Errorf("wont overwrite extractor for %s", name)
}
m.VarEx[name] = value
}
}
return &m, nil
}
// Run runs the test t. The actual HTTP request is crafted and executed and
// the checks are performed on the received response. This whole process
// is repeated on failure or skipped entirely according to t.Poll.
//
// The given variables are subsitutet into the relevant parts of the reuestt
// and the checks.
//
// Normally all checks in t.Checks are executed. If the first check in
// t.Checks is a StatusCode check against 200 and it fails, then the rest of
// the tests are skipped.
//
// Run returns a non-nil error only if the test is bogus; a failing http
// request, problems reading the body or any failing checks do not trigger a
// non-nil return value.
func (t *Test) Run(variables map[string]string) error {
t.Started = time.Now()
t.CheckResults = make([]CheckResult, len(t.Checks)) // Zero value is NotRun
for i, c := range t.Checks {
t.CheckResults[i].Name = NameOf(c)
buf, err := json5.Marshal(c)
if err != nil {
buf = []byte(err.Error())
}
t.CheckResults[i].JSON = string(buf)
}
maxTries := t.Poll.Max
if maxTries == 0 {
maxTries = 1
}
if maxTries < 0 {
// This test is deliberately skipped. A zero duration is okay.
t.Status = Skipped
return nil
}
// Try until first success.
start := time.Now()
try := 1
for ; try <= maxTries; try++ {
t.Tries = try
if try > 1 {
time.Sleep(time.Duration(t.Poll.Sleep))
}
err := t.prepare(variables)
if err != nil {
t.Status, t.Error = Bogus, err
return err
}
t.execute()
if t.Status == Pass {
break
}
}
t.Duration = Duration(time.Since(start))
if t.Poll.Max > 1 {
if t.Status == Pass {
t.debugf("polling succeded after %d tries", try)
} else {
t.debugf("polling failed all %d tries", maxTries)
}
}
t.infof("test %s (%s %s)", t.Status, t.Duration, t.Response.Duration)
t.FullDuration = Duration(time.Since(t.Started))
return nil
}
// execute does a single request and check the response, the outcome is put
// into result.
func (t *Test) execute() {
var err error
err = t.executeRequest()
if err == nil {
if len(t.Checks) > 0 {
t.executeChecks(t.CheckResults)
} else {
t.Status = Pass
}
} else {
t.Status = Error
t.Error = err
}
}
// prepare the test for execution by substituting the given variables and
// crafting the underlying http request the checks.
func (t *Test) prepare(variables map[string]string) error {
// Create appropriate replace.
nowVars := t.nowVariables(time.Now())
allVars := mergeVariables(variables, nowVars)
repl, err := newReplacer(allVars)
if err != nil {
return err
}
// Create the request.
contentType, err := t.newRequest(repl)
if err != nil {
err := fmt.Errorf("failed preparing request: %s", err.Error())
t.errorf("%s", err.Error())
return err
}
// Make a deep copy of the header and set standard stuff and cookies.
t.Request.Request.Header = make(http.Header)
for h, v := range t.Request.Header {
rv := make([]string, len(v))
for i := range v {
rv[i] = repl.str.Replace(v[i])
}
t.Request.Request.Header[h] = rv
}
if t.Request.Request.Header.Get("Content-Type") == "" && contentType != "" {
t.Request.Request.Header.Set("Content-Type", contentType)
}
if t.Request.Request.Header.Get("Accept") == "" {
t.Request.Request.Header.Set("Accept", DefaultAccept)
}
if t.Request.Request.Header.Get("User-Agent") == "" {
t.Request.Request.Header.Set("User-Agent", DefaultUserAgent)
}
for _, cookie := range t.Request.Cookies {
cv := repl.str.Replace(cookie.Value)
t.Request.Request.AddCookie(&http.Cookie{Name: cookie.Name, Value: cv})
}
// Compile the checks.
t.checks = make([]Check, len(t.Checks))
cfc, cfce := []int{}, []string{}
for i := range t.Checks {
t.checks[i] = SubstituteVariables(t.Checks[i], repl.str, repl.fn)
e := t.checks[i].Prepare()
if e != nil {
cfc = append(cfc, i)
cfce = append(cfce, e.Error())
t.errorf("preparing check %d %q: %s",
i, NameOf(t.Checks[i]), e.Error())
}
}
if len(cfc) != 0 {
err := fmt.Errorf("bogus checks %v: %s", cfc, strings.Join(cfce, "; "))
return err
}
to := DefaultClientTimeout
if t.Timeout > 0 {
to = t.Timeout
}
if t.ClientPool != nil {
t.tracef("Taking client from pool")
t.client = t.ClientPool.Get(to, t.Request.FollowRedirects)
} else if t.Request.FollowRedirects {
t.client = &http.Client{CheckRedirect: doFollowRedirects, Timeout: time.Duration(to)}
} else {
t.client = &http.Client{CheckRedirect: dontFollowRedirects, Timeout: time.Duration(to)}
}
return nil
}
// newRequest sets up the request field of t.
// If a sepcial Content-Type header is needed (e.g. because of a multipart
// body) it is returned.
func (t *Test) newRequest(repl replacer) (contentType string, err error) {
// Prepare request method.
if t.Request.Method == "" {
t.Request.Method = "GET"
}
rurl := repl.str.Replace(t.Request.URL)
urlValues := make(URLValues)
for param, vals := range t.Request.Params {
rv := make([]string, len(vals))
for i, v := range vals {
rv[i] = repl.str.Replace(v)
}
urlValues[param] = rv
}
var body io.ReadCloser
if len(t.Request.Params) > 0 {
if t.Request.ParamsAs == "body" || t.Request.ParamsAs == "multipart" {
if t.Request.Method == "GET" || t.Request.Method == "HEAD" {
err := fmt.Errorf("%s does not allow body or multipart parameters", t.Request.Method)
return "", err
}
if t.Request.Body != "" {
err := fmt.Errorf("body used with body/multipart parameters")
return "", err
}
}
switch t.Request.ParamsAs {
case "URL", "":
if strings.Index(rurl, "?") != -1 {
rurl += "&" + url.Values(urlValues).Encode()
} else {
rurl += "?" + url.Values(urlValues).Encode()
}
case "body":
contentType = "application/x-www-form-urlencoded"
encoded := url.Values(urlValues).Encode()
t.Request.SentBody = encoded
body = ioutil.NopCloser(strings.NewReader(encoded))
case "multipart":
b, boundary, err := multipartBody(t.Request.Params)
if err != nil {
return "", err
}
bb, err := ioutil.ReadAll(b)
if err != nil {
return "", err
}
t.Request.SentBody = string(bb)
body = ioutil.NopCloser(bytes.NewReader(bb))
contentType = "multipart/form-data; boundary=" + boundary
default:
err := fmt.Errorf("unknown parameter method %q", t.Request.ParamsAs)
return "", err
}
}
// The body.
if t.Request.Body != "" {
rbody := repl.str.Replace(t.Request.Body)
t.Request.SentBody = rbody
body = ioutil.NopCloser(strings.NewReader(rbody))
}
t.Request.Request, err = http.NewRequest(t.Request.Method, rurl, body)
if err != nil {
return "", err
}
return contentType, nil
}
var (
redirectNofollow = errors.New("we do not follow redirects")
)
// executeRequest performs the HTTP request defined in t which must have been
// prepared by Prepare. Executing an unprepared Test results will panic.
func (t *Test) executeRequest() error {
t.debugf("requesting %q", t.Request.Request.URL.String())
var err error
start := time.Now()
resp, err := t.client.Do(t.Request.Request)
if ue, ok := err.(*url.Error); ok && ue.Err == redirectNofollow &&
!t.Request.FollowRedirects {
// Clear err if it is just our redirect non-following policy.
err = nil
}
t.Response.Response = resp
msg := "okay"
if err == nil {
var reader io.ReadCloser
switch resp.Header.Get("Content-Encoding") {
case "gzip":
reader, err = gzip.NewReader(resp.Body)
if err != nil {
t.Response.BodyErr = err
}
t.tracef("Unzipping gzip body")
default:
reader = resp.Body
}
t.Response.BodyBytes, t.Response.BodyErr = ioutil.ReadAll(reader)
reader.Close()
} else {
msg = fmt.Sprintf("fail %s", err.Error())
}
t.Response.Duration = Duration(time.Since(start))
t.debugf("request took %s, %s", t.Response.Duration, msg)
return err
}
// executeChecks applies the checks in t to the HTTP response received during
// executeRequest. A non-nil error is returned for bogus checks and checks
// which have errors: Just failing checks do not lead to non-nil-error
//
// Normally all checks in t.Checks are executed. If the first check in
// t.Checks is a StatusCode check against 200 and it fails, then the rest of
// the tests are skipped.
func (t *Test) executeChecks(result []CheckResult) {
done := false
for i, ck := range t.checks {
start := time.Now()
err := ck.Execute(t)
result[i].Duration = Duration(time.Since(start))
result[i].Error = err
if err != nil {
t.debugf("check %d %s failed: %s", i, NameOf(ck), err)
if _, ok := err.(MalformedCheck); ok {
result[i].Status = Bogus
} else {
result[i].Status = Fail
}
if t.Error == nil {
t.Error = err
}
// Abort needles checking if all went wrong.
if sc, ok := ck.(StatusCode); ok && i == 0 && sc.Expect == 200 {
t.tracef("skipping remaining tests")
// Clear Status and Error field as these might be
// populated from a prior try run of the test.
for j := 1; j < len(result); j++ {
result[j].Status = Skipped
result[j].Error = nil
}
done = true
}
} else {
result[i].Status = Pass
t.tracef("check %d %s: Pass", i, NameOf(ck))
}
if result[i].Status > t.Status {
t.Status = result[i].Status
}
if done {
break
}
}
}
func (t *Test) prepared() bool {
return t.Request.Request != nil
}
func (t *Test) errorf(format string, v ...interface{}) {
if t.Verbosity >= 0 {
format = "ERROR " + format + " [%q]"
v = append(v, t.Name)
log.Printf(format, v...)
}
}
func (t *Test) infof(format string, v ...interface{}) {
if t.Verbosity >= 1 {
format = "INFO " + format + " [%q]"
v = append(v, t.Name)
log.Printf(format, v...)
}
}
func (t *Test) debugf(format string, v ...interface{}) {
if t.Verbosity >= 2 {
format = "DEBUG " + format + " [%q]"
v = append(v, t.Name)
log.Printf(format, v...)
}
}
func (t *Test) tracef(format string, v ...interface{}) {
if t.Verbosity >= 3 {
format = "TRACE " + format + " [%q]"
v = append(v, t.Name)
log.Printf(format, v...)
}
}
// ----------------------------------------------------------------------------
// Multipart bodies
var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
func escapeQuotes(s string) string {
return quoteEscaper.Replace(s)
}
// TODO: handle errors
func multipartBody(param map[string][]string) (io.ReadCloser, string, error) {
var body *bytes.Buffer = &bytes.Buffer{}
var mpwriter = multipart.NewWriter(body)
// All non-file parameters come first
for n, v := range param {
if len(v) > 0 && strings.HasPrefix(v[0], "@file:") {
continue // files go at the end
}
// TODO: handle errors
if len(v) > 0 {
for _, vv := range v {
mpwriter.WriteField(n, vv)
}
} else {
mpwriter.WriteField(n, "")
}
}
// File parameters at bottom
for n, v := range param {
if !(len(v) > 0 && strings.HasPrefix(v[0], "@file:")) {
continue // allready written
}
filename := v[0][6:]
var file io.Reader
var basename string
if filename[0] == '@' {
i := strings.Index(filename, ":")
basename = filename[1:i]
file = strings.NewReader(filename[i+1:])
} else {
fsfile, err := os.Open(filename)
if err != nil {
return nil, "", fmt.Errorf(
"Unable to read file %q for multipart parameter %q: %s",
filename, n, err.Error())
}
defer fsfile.Close()
file = fsfile
basename = path.Base(filename)
}
// Doing fw, err := mpwriter.CreateFormFile(n, basename) would
// be much simpler but would fix the content type to
// application/octet-stream. We can do a bit better.
h := make(textproto.MIMEHeader)
h.Set("Content-Disposition",
fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
escapeQuotes(n), escapeQuotes(basename)))
var ct string = "application/octet-stream"
if i := strings.LastIndex(basename, "."); i != -1 {
ct = mime.TypeByExtension(basename[i:])
if ct == "" {
ct = "application/octet-stream"
}
}
h.Set("Content-Type", ct)
fw, err := mpwriter.CreatePart(h)
if err != nil {
return nil, "", fmt.Errorf(
"Unable to create part for parameter %q: %s",
n, err.Error())
}
io.Copy(fw, file)
}
mpwriter.Close()
return ioutil.NopCloser(body), mpwriter.Boundary(), nil
}
// -------------------------------------------------------------------------
// Methods of Poll and ClientPool
// Skip return whether the test should be skipped.
func (p Poll) Skip() bool {
return p.Max < 0
}
// Get retreives a new or existing http.Client for the given timeout and
// redirect following policy.
func (p *ClientPool) Get(timeout Duration, followRedirects bool) *http.Client {
if timeout == 0 {
log.Fatalln("ClientPool.Get called with zero timeout.")
}
p.mu.Lock()
defer p.mu.Unlock()
if len(p.clients) == 0 {
p.clients = make(map[Duration]*http.Client)
}
key := timeout
if followRedirects {
key = -key
}
if client, ok := p.clients[key]; ok {
return client
}
var client *http.Client
if followRedirects {
client = &http.Client{CheckRedirect: doFollowRedirects, Timeout: time.Duration(timeout)}
} else {
client = &http.Client{CheckRedirect: dontFollowRedirects, Timeout: time.Duration(timeout)}
}
if p.Jar != nil {
client.Jar = p.Jar
}
if p.Transport != nil {
client.Transport = p.Transport
}
p.clients[key] = client
return client
}
func doFollowRedirects(req *http.Request, via []*http.Request) error {
if len(via) >= 10 {
return errors.New("stopped after 10 redirects")
}
return nil
}
func dontFollowRedirects(req *http.Request, via []*http.Request) error {
return redirectNofollow
}
Document how VarEx are merged
// Copyright 2014 Volker Dobler. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ht
import (
"bytes"
"compress/gzip"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"math"
"mime"
"mime/multipart"
"net/http"
"net/textproto"
"net/url"
"os"
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/vdobler/ht/third_party/json5"
)
var (
// DefaultUserAgent is the user agent string to send in http requests
// if no user agent header is specified explicitely.
DefaultUserAgent = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.143 Safari/537.36"
// DefaultAccept is the accept header to be sent if no accept header
// is set explicitely in the test.
DefaultAccept = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"
// DefaultClientTimeout is the timeout used by the http clients.
DefaultClientTimeout = Duration(2 * time.Second)
)
// URLValues is a url.Values with a fancier JSON unmarshalling.
type URLValues url.Values
// UnmarshalJSON produces a url.Values (i.e. a map[string][]string) from
// various JSON5 representations. E.g.
// {
// a: 12,
// b: "foo",
// c: [ 23, "bar"]
// }
// can be unmarshaled with the expected result.
func (v *URLValues) UnmarshalJSON(data []byte) error {
vals := make(url.Values)
raw := map[string]json5.RawMessage{}
err := json5.Unmarshal(data, &raw)
if err != nil {
return err
}
for name, r := range raw {
var generic interface{}
err := json5.Unmarshal(r, &generic)
if err != nil {
return err
}
switch g := generic.(type) {
case float64:
vals[name] = []string{float64ToString(g)}
case string:
vals[name] = []string{g}
case []interface{}:
vals[name] = []string{}
for _, sub := range g {
switch gg := sub.(type) {
case float64:
vals[name] = append(vals[name], float64ToString(gg))
case string:
vals[name] = append(vals[name], gg)
default:
return fmt.Errorf("ht: illegal url query value %v of type %T for query parameter %s", sub, gg, name)
}
}
default:
return fmt.Errorf("ht: illegal url query value %v of type %T for query parameter %s", generic, g, name)
}
}
*v = URLValues(vals)
return nil
}
func float64ToString(f float64) string {
t := math.Trunc(f)
if math.Abs(t-f) < 1e-6 {
return strconv.Itoa(int(t))
}
return fmt.Sprintf("%g", f)
}
// Request is a HTTP request.
type Request struct {
// Method is the HTTP method to use.
// A empty method is equivalent to "GET"
Method string `json:",omitempty"`
// URL ist the URL of the request.
URL string
// Params contains the parameters and their values to send in
// the request.
//
// If the parameters are sent as multipart it is possible to include
// files by letting the parameter values start with "@file:". Two
// version are possible "@file:path/to/file" will send a file read
// from the given filesystem path while "@file:@name:the-file-data"
// will use the-file-data as the content.
Params URLValues `json:",omitempty"`
// ParamsAs determines how the parameters in the Param field are sent:
// "URL" or "": append properly encoded to URL
// "body" : send as application/x-www-form-urlencoded in body.
// "multipart": send as multipart in body.
// The two values "body" and "multipart" must not be used
// on a GET or HEAD request.
ParamsAs string `json:",omitempty"`
// Header contains the specific http headers to be sent in this request.
// User-Agent and Accept headers are set automaticaly to the global
// default values if not set explicitely.
Header http.Header `json:",omitempty"`
// Cookies contains the cookies to send in the request.
Cookies []Cookie `json:",omitempty"`
// Body is the full body to send in the request. Body must be
// empty if Params are sent as multipart or form-urlencoded.
Body string `json:",omitempty"`
// FollowRedirects determines if automatic following of
// redirects should be done.
FollowRedirects bool `json:",omitempty"`
Request *http.Request // the 'real' request
SentBody string // the 'real' body
}
// Response captures information about a http response.
type Response struct {
// Response is the received HTTP response. Its body has bean read and
// closed allready.
Response *http.Response
// Duration to receive response and read the whole body.
Duration Duration
// The received body and the error got while reading it.
BodyBytes []byte
BodyErr error
}
// Body returns a reader of the response body.
func (resp *Response) Body() *bytes.Reader {
return bytes.NewReader(resp.BodyBytes)
}
// Cookie.
type Cookie struct {
Name string
Value string `json:",omitempty"`
}
// Poll determines if and how to redo a test after a failure or if the
// test should be skipped alltogether. The zero value of Poll means "Just do
// the test once."
type Poll struct {
// Maximum number of redos. Both 0 and 1 mean: "Just one try. No redo."
// Negative values indicate that the test should be skipped.
Max int `json:",omitempty"`
// Duration to sleep between redos.
Sleep Duration `json:",omitempty"`
}
// ClientPool maintains a pool of clients for the given transport
// and cookie jar. ClientPools must not be copied.
type ClientPool struct {
// Transport will be used a the clients Transport
Transport http.RoundTripper
// Jar will be used as the clients Jar
Jar http.CookieJar
mu sync.Mutex
// clients are index by their timeout. Clients which follow redirects
// are distinguisehd by a negative timeout.
clients map[Duration]*http.Client
}
// ----------------------------------------------------------------------------
// Test
// Test is a single logical test which does one HTTP request and checks
// a number of Checks on the recieved Response.
type Test struct {
Name string
Description string
// Request is the HTTP request.
Request Request
// Checks contains all checks to perform on the response to the HTTP request.
Checks CheckList
// VarEx may be used to popultate variables from the response.
VarEx map[string]Extractor
Poll Poll `json:",omitempty"`
Timeout Duration // If zero use DefaultClientTimeout.
Verbosity int // Verbosity level in logging.
// ClientPool allows to inject special http.Transports or a
// cookie jar to be used by this Test.
ClientPool *ClientPool `json:"-"`
Response Response
// The following results are filled during Run.
Status Status `json:"-"`
Started time.Time `json:"-"`
Error error `json:"-"`
Duration Duration `json:"-"`
FullDuration Duration `json:"-"`
Tries int `json:"-"`
CheckResults []CheckResult `json:"-"` // The individual checks.
SeqNo string
client *http.Client
checks []Check // prepared checks.
}
// mergeRequest implements the merge strategy described in Merge for the Request.
func mergeRequest(m *Request, r Request) error {
allNonemptyMustBeSame := func(m *string, s string) error {
if s != "" {
if *m != "" && *m != s {
return fmt.Errorf("Cannot merge %q into %q", s, *m)
}
*m = s
}
return nil
}
onlyOneMayBeNonempty := func(m *string, s string) error {
if s != "" {
if *m != "" {
return fmt.Errorf("Won't overwrite %q with %q", *m, s)
}
*m = s
}
return nil
}
if err := allNonemptyMustBeSame(&(m.Method), r.Method); err != nil {
return err
}
if err := onlyOneMayBeNonempty(&(m.URL), r.URL); err != nil {
return err
}
for k, v := range r.Params {
m.Params[k] = v
}
if err := allNonemptyMustBeSame(&(m.ParamsAs), r.ParamsAs); err != nil {
return err
}
for k, v := range r.Header {
m.Header[k] = v
}
outer:
for _, rc := range r.Cookies {
for i := range m.Cookies {
if m.Cookies[i].Name == rc.Name {
m.Cookies[i].Value = rc.Value
continue outer
}
}
m.Cookies = append(m.Cookies, rc)
}
if err := onlyOneMayBeNonempty(&(m.Body), r.Body); err != nil {
return err
}
m.FollowRedirects = r.FollowRedirects
return nil
}
// Merge merges all tests into one. The individual fields are merged in the
// following way.
// Name Join all names
// Description Join all descriptions
// Request
// Method All nonempty must be the same
// URL Only one may be nonempty
// Params Merge by key
// ParamsAs All nonempty must be the same
// Header Merge by key
// Cookies Merge by cookie name
// Body Only one may be nonempty
// FollowRdr Last wins
// Checks Append all checks
// VarEx Merge, same keys must have same value
// Poll
// Max Use largest
// Sleep Use largest
// Timeout Use largets
// Verbosity Use largets
// ClientPool ignore
func Merge(tests ...*Test) (*Test, error) {
m := Test{}
// Name and description
s := []string{}
for _, t := range tests {
s = append(s, t.Name)
}
m.Name = "Merge of " + strings.Join(s, ", ")
s = s[:0]
for _, t := range tests {
s = append(s, t.Description)
}
m.Description = strings.Join(s, "\n")
m.Request.Params = make(URLValues)
m.Request.Header = make(http.Header)
m.VarEx = make(map[string]Extractor)
for _, t := range tests {
err := mergeRequest(&m.Request, t.Request)
if err != nil {
return &m, err
}
m.Checks = append(m.Checks, t.Checks...)
if t.Poll.Max > m.Poll.Max {
m.Poll.Max = t.Poll.Max
}
if t.Poll.Sleep > m.Poll.Sleep {
m.Poll.Sleep = t.Poll.Sleep
}
if t.Timeout > m.Timeout {
m.Timeout = t.Timeout
}
if t.Verbosity > m.Verbosity {
m.Verbosity = t.Verbosity
}
for name, value := range t.VarEx {
if old, ok := m.VarEx[name]; ok && old != value {
return &m, fmt.Errorf("wont overwrite extractor for %s", name)
}
m.VarEx[name] = value
}
}
return &m, nil
}
// Run runs the test t. The actual HTTP request is crafted and executed and
// the checks are performed on the received response. This whole process
// is repeated on failure or skipped entirely according to t.Poll.
//
// The given variables are subsitutet into the relevant parts of the reuestt
// and the checks.
//
// Normally all checks in t.Checks are executed. If the first check in
// t.Checks is a StatusCode check against 200 and it fails, then the rest of
// the tests are skipped.
//
// Run returns a non-nil error only if the test is bogus; a failing http
// request, problems reading the body or any failing checks do not trigger a
// non-nil return value.
func (t *Test) Run(variables map[string]string) error {
t.Started = time.Now()
t.CheckResults = make([]CheckResult, len(t.Checks)) // Zero value is NotRun
for i, c := range t.Checks {
t.CheckResults[i].Name = NameOf(c)
buf, err := json5.Marshal(c)
if err != nil {
buf = []byte(err.Error())
}
t.CheckResults[i].JSON = string(buf)
}
maxTries := t.Poll.Max
if maxTries == 0 {
maxTries = 1
}
if maxTries < 0 {
// This test is deliberately skipped. A zero duration is okay.
t.Status = Skipped
return nil
}
// Try until first success.
start := time.Now()
try := 1
for ; try <= maxTries; try++ {
t.Tries = try
if try > 1 {
time.Sleep(time.Duration(t.Poll.Sleep))
}
err := t.prepare(variables)
if err != nil {
t.Status, t.Error = Bogus, err
return err
}
t.execute()
if t.Status == Pass {
break
}
}
t.Duration = Duration(time.Since(start))
if t.Poll.Max > 1 {
if t.Status == Pass {
t.debugf("polling succeded after %d tries", try)
} else {
t.debugf("polling failed all %d tries", maxTries)
}
}
t.infof("test %s (%s %s)", t.Status, t.Duration, t.Response.Duration)
t.FullDuration = Duration(time.Since(t.Started))
return nil
}
// execute does a single request and check the response, the outcome is put
// into result.
func (t *Test) execute() {
var err error
err = t.executeRequest()
if err == nil {
if len(t.Checks) > 0 {
t.executeChecks(t.CheckResults)
} else {
t.Status = Pass
}
} else {
t.Status = Error
t.Error = err
}
}
// prepare the test for execution by substituting the given variables and
// crafting the underlying http request the checks.
func (t *Test) prepare(variables map[string]string) error {
// Create appropriate replace.
nowVars := t.nowVariables(time.Now())
allVars := mergeVariables(variables, nowVars)
repl, err := newReplacer(allVars)
if err != nil {
return err
}
// Create the request.
contentType, err := t.newRequest(repl)
if err != nil {
err := fmt.Errorf("failed preparing request: %s", err.Error())
t.errorf("%s", err.Error())
return err
}
// Make a deep copy of the header and set standard stuff and cookies.
t.Request.Request.Header = make(http.Header)
for h, v := range t.Request.Header {
rv := make([]string, len(v))
for i := range v {
rv[i] = repl.str.Replace(v[i])
}
t.Request.Request.Header[h] = rv
}
if t.Request.Request.Header.Get("Content-Type") == "" && contentType != "" {
t.Request.Request.Header.Set("Content-Type", contentType)
}
if t.Request.Request.Header.Get("Accept") == "" {
t.Request.Request.Header.Set("Accept", DefaultAccept)
}
if t.Request.Request.Header.Get("User-Agent") == "" {
t.Request.Request.Header.Set("User-Agent", DefaultUserAgent)
}
for _, cookie := range t.Request.Cookies {
cv := repl.str.Replace(cookie.Value)
t.Request.Request.AddCookie(&http.Cookie{Name: cookie.Name, Value: cv})
}
// Compile the checks.
t.checks = make([]Check, len(t.Checks))
cfc, cfce := []int{}, []string{}
for i := range t.Checks {
t.checks[i] = SubstituteVariables(t.Checks[i], repl.str, repl.fn)
e := t.checks[i].Prepare()
if e != nil {
cfc = append(cfc, i)
cfce = append(cfce, e.Error())
t.errorf("preparing check %d %q: %s",
i, NameOf(t.Checks[i]), e.Error())
}
}
if len(cfc) != 0 {
err := fmt.Errorf("bogus checks %v: %s", cfc, strings.Join(cfce, "; "))
return err
}
to := DefaultClientTimeout
if t.Timeout > 0 {
to = t.Timeout
}
if t.ClientPool != nil {
t.tracef("Taking client from pool")
t.client = t.ClientPool.Get(to, t.Request.FollowRedirects)
} else if t.Request.FollowRedirects {
t.client = &http.Client{CheckRedirect: doFollowRedirects, Timeout: time.Duration(to)}
} else {
t.client = &http.Client{CheckRedirect: dontFollowRedirects, Timeout: time.Duration(to)}
}
return nil
}
// newRequest sets up the request field of t.
// If a sepcial Content-Type header is needed (e.g. because of a multipart
// body) it is returned.
func (t *Test) newRequest(repl replacer) (contentType string, err error) {
// Prepare request method.
if t.Request.Method == "" {
t.Request.Method = "GET"
}
rurl := repl.str.Replace(t.Request.URL)
urlValues := make(URLValues)
for param, vals := range t.Request.Params {
rv := make([]string, len(vals))
for i, v := range vals {
rv[i] = repl.str.Replace(v)
}
urlValues[param] = rv
}
var body io.ReadCloser
if len(t.Request.Params) > 0 {
if t.Request.ParamsAs == "body" || t.Request.ParamsAs == "multipart" {
if t.Request.Method == "GET" || t.Request.Method == "HEAD" {
err := fmt.Errorf("%s does not allow body or multipart parameters", t.Request.Method)
return "", err
}
if t.Request.Body != "" {
err := fmt.Errorf("body used with body/multipart parameters")
return "", err
}
}
switch t.Request.ParamsAs {
case "URL", "":
if strings.Index(rurl, "?") != -1 {
rurl += "&" + url.Values(urlValues).Encode()
} else {
rurl += "?" + url.Values(urlValues).Encode()
}
case "body":
contentType = "application/x-www-form-urlencoded"
encoded := url.Values(urlValues).Encode()
t.Request.SentBody = encoded
body = ioutil.NopCloser(strings.NewReader(encoded))
case "multipart":
b, boundary, err := multipartBody(t.Request.Params)
if err != nil {
return "", err
}
bb, err := ioutil.ReadAll(b)
if err != nil {
return "", err
}
t.Request.SentBody = string(bb)
body = ioutil.NopCloser(bytes.NewReader(bb))
contentType = "multipart/form-data; boundary=" + boundary
default:
err := fmt.Errorf("unknown parameter method %q", t.Request.ParamsAs)
return "", err
}
}
// The body.
if t.Request.Body != "" {
rbody := repl.str.Replace(t.Request.Body)
t.Request.SentBody = rbody
body = ioutil.NopCloser(strings.NewReader(rbody))
}
t.Request.Request, err = http.NewRequest(t.Request.Method, rurl, body)
if err != nil {
return "", err
}
return contentType, nil
}
var (
redirectNofollow = errors.New("we do not follow redirects")
)
// executeRequest performs the HTTP request defined in t which must have been
// prepared by Prepare. Executing an unprepared Test results will panic.
func (t *Test) executeRequest() error {
t.debugf("requesting %q", t.Request.Request.URL.String())
var err error
start := time.Now()
resp, err := t.client.Do(t.Request.Request)
if ue, ok := err.(*url.Error); ok && ue.Err == redirectNofollow &&
!t.Request.FollowRedirects {
// Clear err if it is just our redirect non-following policy.
err = nil
}
t.Response.Response = resp
msg := "okay"
if err == nil {
var reader io.ReadCloser
switch resp.Header.Get("Content-Encoding") {
case "gzip":
reader, err = gzip.NewReader(resp.Body)
if err != nil {
t.Response.BodyErr = err
}
t.tracef("Unzipping gzip body")
default:
reader = resp.Body
}
t.Response.BodyBytes, t.Response.BodyErr = ioutil.ReadAll(reader)
reader.Close()
} else {
msg = fmt.Sprintf("fail %s", err.Error())
}
t.Response.Duration = Duration(time.Since(start))
t.debugf("request took %s, %s", t.Response.Duration, msg)
return err
}
// executeChecks applies the checks in t to the HTTP response received during
// executeRequest. A non-nil error is returned for bogus checks and checks
// which have errors: Just failing checks do not lead to non-nil-error
//
// Normally all checks in t.Checks are executed. If the first check in
// t.Checks is a StatusCode check against 200 and it fails, then the rest of
// the tests are skipped.
func (t *Test) executeChecks(result []CheckResult) {
done := false
for i, ck := range t.checks {
start := time.Now()
err := ck.Execute(t)
result[i].Duration = Duration(time.Since(start))
result[i].Error = err
if err != nil {
t.debugf("check %d %s failed: %s", i, NameOf(ck), err)
if _, ok := err.(MalformedCheck); ok {
result[i].Status = Bogus
} else {
result[i].Status = Fail
}
if t.Error == nil {
t.Error = err
}
// Abort needles checking if all went wrong.
if sc, ok := ck.(StatusCode); ok && i == 0 && sc.Expect == 200 {
t.tracef("skipping remaining tests")
// Clear Status and Error field as these might be
// populated from a prior try run of the test.
for j := 1; j < len(result); j++ {
result[j].Status = Skipped
result[j].Error = nil
}
done = true
}
} else {
result[i].Status = Pass
t.tracef("check %d %s: Pass", i, NameOf(ck))
}
if result[i].Status > t.Status {
t.Status = result[i].Status
}
if done {
break
}
}
}
func (t *Test) prepared() bool {
return t.Request.Request != nil
}
func (t *Test) errorf(format string, v ...interface{}) {
if t.Verbosity >= 0 {
format = "ERROR " + format + " [%q]"
v = append(v, t.Name)
log.Printf(format, v...)
}
}
func (t *Test) infof(format string, v ...interface{}) {
if t.Verbosity >= 1 {
format = "INFO " + format + " [%q]"
v = append(v, t.Name)
log.Printf(format, v...)
}
}
func (t *Test) debugf(format string, v ...interface{}) {
if t.Verbosity >= 2 {
format = "DEBUG " + format + " [%q]"
v = append(v, t.Name)
log.Printf(format, v...)
}
}
func (t *Test) tracef(format string, v ...interface{}) {
if t.Verbosity >= 3 {
format = "TRACE " + format + " [%q]"
v = append(v, t.Name)
log.Printf(format, v...)
}
}
// ----------------------------------------------------------------------------
// Multipart bodies
var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
func escapeQuotes(s string) string {
return quoteEscaper.Replace(s)
}
// TODO: handle errors
func multipartBody(param map[string][]string) (io.ReadCloser, string, error) {
var body *bytes.Buffer = &bytes.Buffer{}
var mpwriter = multipart.NewWriter(body)
// All non-file parameters come first
for n, v := range param {
if len(v) > 0 && strings.HasPrefix(v[0], "@file:") {
continue // files go at the end
}
// TODO: handle errors
if len(v) > 0 {
for _, vv := range v {
mpwriter.WriteField(n, vv)
}
} else {
mpwriter.WriteField(n, "")
}
}
// File parameters at bottom
for n, v := range param {
if !(len(v) > 0 && strings.HasPrefix(v[0], "@file:")) {
continue // allready written
}
filename := v[0][6:]
var file io.Reader
var basename string
if filename[0] == '@' {
i := strings.Index(filename, ":")
basename = filename[1:i]
file = strings.NewReader(filename[i+1:])
} else {
fsfile, err := os.Open(filename)
if err != nil {
return nil, "", fmt.Errorf(
"Unable to read file %q for multipart parameter %q: %s",
filename, n, err.Error())
}
defer fsfile.Close()
file = fsfile
basename = path.Base(filename)
}
// Doing fw, err := mpwriter.CreateFormFile(n, basename) would
// be much simpler but would fix the content type to
// application/octet-stream. We can do a bit better.
h := make(textproto.MIMEHeader)
h.Set("Content-Disposition",
fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
escapeQuotes(n), escapeQuotes(basename)))
var ct string = "application/octet-stream"
if i := strings.LastIndex(basename, "."); i != -1 {
ct = mime.TypeByExtension(basename[i:])
if ct == "" {
ct = "application/octet-stream"
}
}
h.Set("Content-Type", ct)
fw, err := mpwriter.CreatePart(h)
if err != nil {
return nil, "", fmt.Errorf(
"Unable to create part for parameter %q: %s",
n, err.Error())
}
io.Copy(fw, file)
}
mpwriter.Close()
return ioutil.NopCloser(body), mpwriter.Boundary(), nil
}
// -------------------------------------------------------------------------
// Methods of Poll and ClientPool
// Skip return whether the test should be skipped.
func (p Poll) Skip() bool {
return p.Max < 0
}
// Get retreives a new or existing http.Client for the given timeout and
// redirect following policy.
func (p *ClientPool) Get(timeout Duration, followRedirects bool) *http.Client {
if timeout == 0 {
log.Fatalln("ClientPool.Get called with zero timeout.")
}
p.mu.Lock()
defer p.mu.Unlock()
if len(p.clients) == 0 {
p.clients = make(map[Duration]*http.Client)
}
key := timeout
if followRedirects {
key = -key
}
if client, ok := p.clients[key]; ok {
return client
}
var client *http.Client
if followRedirects {
client = &http.Client{CheckRedirect: doFollowRedirects, Timeout: time.Duration(timeout)}
} else {
client = &http.Client{CheckRedirect: dontFollowRedirects, Timeout: time.Duration(timeout)}
}
if p.Jar != nil {
client.Jar = p.Jar
}
if p.Transport != nil {
client.Transport = p.Transport
}
p.clients[key] = client
return client
}
func doFollowRedirects(req *http.Request, via []*http.Request) error {
if len(via) >= 10 {
return errors.New("stopped after 10 redirects")
}
return nil
}
func dontFollowRedirects(req *http.Request, via []*http.Request) error {
return redirectNofollow
}
|
package gorethink
import (
"reflect"
"github.com/dancannon/gorethink/types"
test "gopkg.in/check.v1"
)
/* BEGIN FLOAT HELPERS */
// totally ripped off from math/all_test.go
// https://github.com/golang/go/blob/master/src/math/all_test.go#L1723-L1749
func tolerance(a, b, e float64) bool {
d := a - b
if d < 0 {
d = -d
}
if a != 0 {
e = e * a
if e < 0 {
e = -e
}
}
return d < e
}
func mehclose(a, b float64) bool { return tolerance(a, b, 1e-2) }
func kindaclose(a, b float64) bool { return tolerance(a, b, 1e-8) }
func prettyclose(a, b float64) bool { return tolerance(a, b, 1e-14) }
func veryclose(a, b float64) bool { return tolerance(a, b, 4e-16) }
func soclose(a, b, e float64) bool { return tolerance(a, b, e) }
func compareCoordinates(co [][]float64, lines types.Lines, c *test.C) {
for _, points := range lines {
for ip, point := range points {
v := reflect.ValueOf(point)
for i := 0; i < v.NumField(); i++ {
lc := co[ip][i]
f := v.Field(i).Float()
if !kindaclose(lc, f) {
c.Errorf("the deviation between the compared floats is too great [%v:%v]", lc, f)
}
}
}
}
}
/* END FLOAT HELPERS */
func (s *RethinkSuite) TestGeospatialDecodeGeometryPseudoType(c *test.C) {
var response types.Geometry
// setup coordinates
co := [][]float64{
{-122.423246, 37.779388},
{-122.423246, 37.329898},
{-121.88642, 37.329898},
{-121.88642, 37.329898},
{-122.423246, 37.779388},
}
gt := "Polygon"
res, err := Expr(map[string]interface{}{
"$reql_type$": "GEOMETRY",
"type": gt,
"coordinates": []interface{}{co},
}).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
// test shape
if response.Type != gt {
c.Errorf("expected [%v], instead [%v]", gt, response.Type)
}
// assert points are within threshold
compareCoordinates(co, response.Lines, c)
}
func (s *RethinkSuite) TestGeospatialEncodeGeometryPseudoType(c *test.C) {
encoded, err := encode(types.Geometry{
Type: "Polygon",
Lines: types.Lines{
types.Line{
types.Point{Lon: -122.423246, Lat: 37.779388},
types.Point{Lon: -122.423246, Lat: 37.329898},
types.Point{Lon: -121.88642, Lat: 37.329898},
types.Point{Lon: -121.88642, Lat: 37.779388},
types.Point{Lon: -122.423246, Lat: 37.779388},
},
},
})
c.Assert(err, test.IsNil)
c.Assert(encoded, test.DeepEquals, map[string]interface{}{
"$reql_type$": "GEOMETRY",
"type": "Polygon",
"coordinates": []interface{}{
[]interface{}{
[]interface{}{-122.423246, 37.779388},
[]interface{}{-122.423246, 37.329898},
[]interface{}{-121.88642, 37.329898},
[]interface{}{-121.88642, 37.779388},
[]interface{}{-122.423246, 37.779388},
},
},
})
}
func (s *RethinkSuite) TestGeospatialCircle(c *test.C) {
var response types.Geometry
res, err := Circle([]float64{-122.423246, 37.779388}, 10).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
co := [][]float64{
{-122.423246, 37.77929790366427},
{-122.42326814543915, 37.77929963483801},
{-122.4232894398445, 37.779304761831504},
{-122.42330906488651, 37.77931308761787},
{-122.42332626638755, 37.77932429224285},
{-122.42334038330416, 37.77933794512014},
{-122.42335087313059, 37.77935352157849},
{-122.42335733274696, 37.77937042302436},
{-122.4233595139113, 37.77938799994533},
{-122.42335733279968, 37.7794055768704},
{-122.42335087322802, 37.779422478327966},
{-122.42334038343147, 37.77943805480385},
{-122.42332626652532, 37.779451707701796},
{-122.42330906501378, 37.77946291234741},
{-122.42328943994191, 37.77947123815131},
{-122.42326814549187, 37.77947636515649},
{-122.423246, 37.779478096334365},
{-122.42322385450814, 37.77947636515649},
{-122.4232025600581, 37.77947123815131},
{-122.42318293498623, 37.77946291234741},
{-122.42316573347469, 37.779451707701796},
{-122.42315161656855, 37.77943805480385},
{-122.423141126772, 37.779422478327966},
{-122.42313466720033, 37.7794055768704},
{-122.42313248608872, 37.77938799994533},
{-122.42313466725305, 37.77937042302436},
{-122.42314112686942, 37.77935352157849},
{-122.42315161669585, 37.77933794512014},
{-122.42316573361246, 37.77932429224285},
{-122.4231829351135, 37.77931308761787},
{-122.42320256015552, 37.779304761831504},
{-122.42322385456086, 37.77929963483801},
{-122.423246, 37.77929790366427},
}
compareCoordinates(co, response.Lines, c)
}
func (s *RethinkSuite) TestGeospatialCirclePoint(c *test.C) {
var response types.Geometry
res, err := Circle(Point(-122.423246, 37.779388), 10).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
co := [][]float64{
{-122.423246, 37.77929790366427},
{-122.42326814543915, 37.77929963483801},
{-122.4232894398445, 37.779304761831504},
{-122.42330906488651, 37.77931308761787},
{-122.42332626638755, 37.77932429224285},
{-122.42334038330416, 37.77933794512014},
{-122.42335087313059, 37.77935352157849},
{-122.42335733274696, 37.77937042302436},
{-122.4233595139113, 37.77938799994533},
{-122.42335733279968, 37.7794055768704},
{-122.42335087322802, 37.779422478327966},
{-122.42334038343147, 37.77943805480385},
{-122.42332626652532, 37.779451707701796},
{-122.42330906501378, 37.77946291234741},
{-122.42328943994191, 37.77947123815131},
{-122.42326814549187, 37.77947636515649},
{-122.423246, 37.779478096334365},
{-122.42322385450814, 37.77947636515649},
{-122.4232025600581, 37.77947123815131},
{-122.42318293498623, 37.77946291234741},
{-122.42316573347469, 37.779451707701796},
{-122.42315161656855, 37.77943805480385},
{-122.423141126772, 37.779422478327966},
{-122.42313466720033, 37.7794055768704},
{-122.42313248608872, 37.77938799994533},
{-122.42313466725305, 37.77937042302436},
{-122.42314112686942, 37.77935352157849},
{-122.42315161669585, 37.77933794512014},
{-122.42316573361246, 37.77932429224285},
{-122.4231829351135, 37.77931308761787},
{-122.42320256015552, 37.779304761831504},
{-122.42322385456086, 37.77929963483801},
{-122.423246, 37.77929790366427},
}
compareCoordinates(co, response.Lines, c)
}
func (s *RethinkSuite) TestGeospatialCirclePointFill(c *test.C) {
var response types.Geometry
res, err := Circle(Point(-122.423246, 37.779388), 10, CircleOpts{Fill: true}).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
co := [][]float64{
{-122.423246, 37.77929790366427},
{-122.42326814543915, 37.77929963483801},
{-122.4232894398445, 37.779304761831504},
{-122.42330906488651, 37.77931308761787},
{-122.42332626638755, 37.77932429224285},
{-122.42334038330416, 37.77933794512014},
{-122.42335087313059, 37.77935352157849},
{-122.42335733274696, 37.77937042302436},
{-122.4233595139113, 37.77938799994533},
{-122.42335733279968, 37.7794055768704},
{-122.42335087322802, 37.779422478327966},
{-122.42334038343147, 37.77943805480385},
{-122.42332626652532, 37.779451707701796},
{-122.42330906501378, 37.77946291234741},
{-122.42328943994191, 37.77947123815131},
{-122.42326814549187, 37.77947636515649},
{-122.423246, 37.779478096334365},
{-122.42322385450814, 37.77947636515649},
{-122.4232025600581, 37.77947123815131},
{-122.42318293498623, 37.77946291234741},
{-122.42316573347469, 37.779451707701796},
{-122.42315161656855, 37.77943805480385},
{-122.423141126772, 37.779422478327966},
{-122.42313466720033, 37.7794055768704},
{-122.42313248608872, 37.77938799994533},
{-122.42313466725305, 37.77937042302436},
{-122.42314112686942, 37.77935352157849},
{-122.42315161669585, 37.77933794512014},
{-122.42316573361246, 37.77932429224285},
{-122.4231829351135, 37.77931308761787},
{-122.42320256015552, 37.779304761831504},
{-122.42322385456086, 37.77929963483801},
{-122.423246, 37.77929790366427},
}
compareCoordinates(co, response.Lines, c)
}
func (s *RethinkSuite) TestGeospatialPointDistanceMethod(c *test.C) {
var response float64
f := 734125.249602186
res, err := Point(-122.423246, 37.779388).Distance(Point(-117.220406, 32.719464)).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
if !kindaclose(response, f) {
c.Errorf("the deviation between the compared floats is too great [%v:%v]", response, f)
}
}
func (s *RethinkSuite) TestGeospatialPointDistanceRoot(c *test.C) {
var response float64
f := 734125.249602186
res, err := Distance(Point(-122.423246, 37.779388), Point(-117.220406, 32.719464)).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
if !kindaclose(response, f) {
c.Errorf("the deviation between the compared floats is too great [%v:%v]", response, f)
}
}
func (s *RethinkSuite) TestGeospatialPointDistanceRootKm(c *test.C) {
var response float64
f := 734.125249602186
res, err := Distance(Point(-122.423246, 37.779388), Point(-117.220406, 32.719464), DistanceOpts{Unit: "km"}).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
if !kindaclose(response, f) {
c.Errorf("the deviation between the compared floats is too great [%v:%v]", response, f)
}
}
func (s *RethinkSuite) TestGeospatialFill(c *test.C) {
var response types.Geometry
res, err := Line(
[]float64{-122.423246, 37.779388},
[]float64{-122.423246, 37.329898},
[]float64{-121.886420, 37.329898},
[]float64{-121.886420, 37.779388},
).Fill().Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.DeepEquals, types.Geometry{
Type: "Polygon",
Lines: types.Lines{
types.Line{
types.Point{Lon: -122.423246, Lat: 37.779388},
types.Point{Lon: -122.423246, Lat: 37.329898},
types.Point{Lon: -121.88642, Lat: 37.329898},
types.Point{Lon: -121.88642, Lat: 37.779388},
types.Point{Lon: -122.423246, Lat: 37.779388},
},
},
})
}
func (s *RethinkSuite) TestGeospatialGeojson(c *test.C) {
var response types.Geometry
res, err := Geojson(map[string]interface{}{
"type": "Point",
"coordinates": []interface{}{-122.423246, 37.779388},
}).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.DeepEquals, types.Geometry{
Type: "Point",
Point: types.Point{Lon: -122.423246, Lat: 37.779388},
})
}
func (s *RethinkSuite) TestGeospatialToGeojson(c *test.C) {
var response map[string]interface{}
res, err := Point(-122.423246, 37.779388).ToGeojson().Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.DeepEquals, map[string]interface{}{
"type": "Point",
"coordinates": []interface{}{-122.423246, 37.779388},
})
}
func (s *RethinkSuite) TestGeospatialGetIntersecting(c *test.C) {
// Setup table
Db("test").TableDrop("geospatial").Run(sess)
Db("test").TableCreate("geospatial").Run(sess)
Db("test").Table("geospatial").IndexCreate("area", IndexCreateOpts{
Geo: true,
}).Run(sess)
Db("test").Table("geospatial").Insert([]interface{}{
map[string]interface{}{"area": Circle(Point(-117.220406, 32.719464), 100000)},
map[string]interface{}{"area": Circle(Point(-100.220406, 20.719464), 100000)},
map[string]interface{}{"area": Circle(Point(-117.200406, 32.723464), 100000)},
}).Run(sess)
var response []interface{}
res, err := Db("test").Table("geospatial").GetIntersecting(
Circle(Point(-117.220406, 32.719464), 100000),
GetIntersectingOpts{
Index: "area",
},
).Run(sess)
c.Assert(err, test.IsNil)
err = res.All(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.HasLen, 2)
}
func (s *RethinkSuite) TestGeospatialGetNearest(c *test.C) {
// Setup table
Db("test").TableDrop("geospatial").Run(sess)
Db("test").TableCreate("geospatial").Run(sess)
Db("test").Table("geospatial").IndexCreate("area", IndexCreateOpts{
Geo: true,
}).Run(sess)
Db("test").Table("geospatial").Insert([]interface{}{
map[string]interface{}{"area": Circle(Point(-117.220406, 32.719464), 100000)},
map[string]interface{}{"area": Circle(Point(-100.220406, 20.719464), 100000)},
map[string]interface{}{"area": Circle(Point(-115.210306, 32.733364), 100000)},
}).Run(sess)
var response []interface{}
res, err := Db("test").Table("geospatial").GetNearest(
Point(-117.220406, 32.719464),
GetNearestOpts{
Index: "area",
MaxDist: 1,
},
).Run(sess)
c.Assert(err, test.IsNil)
err = res.All(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.HasLen, 1)
}
func (s *RethinkSuite) TestGeospatialIncludesTrue(c *test.C) {
var response bool
res, err := Polygon(
Point(-122.4, 37.7),
Point(-122.4, 37.3),
Point(-121.8, 37.3),
Point(-121.8, 37.7),
).Includes(Point(-122.3, 37.4)).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.Equals, true)
}
func (s *RethinkSuite) TestGeospatialIncludesFalse(c *test.C) {
var response bool
res, err := Polygon(
Point(-122.4, 37.7),
Point(-122.4, 37.3),
Point(-121.8, 37.3),
Point(-121.8, 37.7),
).Includes(Point(100.3, 37.4)).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.Equals, false)
}
func (s *RethinkSuite) TestGeospatialIntersectsTrue(c *test.C) {
var response bool
res, err := Polygon(
Point(-122.4, 37.7),
Point(-122.4, 37.3),
Point(-121.8, 37.3),
Point(-121.8, 37.7),
).Intersects(Polygon(
Point(-122.3, 37.4),
Point(-122.4, 37.3),
Point(-121.8, 37.3),
Point(-121.8, 37.4),
)).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.Equals, true)
}
func (s *RethinkSuite) TestGeospatialIntersectsFalse(c *test.C) {
var response bool
res, err := Polygon(
Point(-122.4, 37.7),
Point(-122.4, 37.3),
Point(-121.8, 37.3),
Point(-121.8, 37.7),
).Intersects(Polygon(
Point(-102.4, 37.7),
Point(-102.4, 37.3),
Point(-101.8, 37.3),
Point(-101.8, 37.7),
)).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.Equals, false)
}
func (s *RethinkSuite) TestGeospatialLineLatLon(c *test.C) {
var response types.Geometry
res, err := Line([]float64{-122.423246, 37.779388}, []float64{-121.886420, 37.329898}).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.DeepEquals, types.Geometry{
Type: "LineString",
Line: types.Line{
types.Point{Lon: -122.423246, Lat: 37.779388},
types.Point{Lon: -121.886420, Lat: 37.329898},
},
})
}
func (s *RethinkSuite) TestGeospatialLinePoint(c *test.C) {
var response types.Geometry
res, err := Line(Point(-122.423246, 37.779388), Point(-121.886420, 37.329898)).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.DeepEquals, types.Geometry{
Type: "LineString",
Line: types.Line{
types.Point{Lon: -122.423246, Lat: 37.779388},
types.Point{Lon: -121.886420, Lat: 37.329898},
},
})
}
func (s *RethinkSuite) TestGeospatialPoint(c *test.C) {
var response types.Geometry
res, err := Point(-122.423246, 37.779388).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.DeepEquals, types.Geometry{
Type: "Point",
Point: types.Point{Lon: -122.423246, Lat: 37.779388},
})
}
func (s *RethinkSuite) TestGeospatialPolygon(c *test.C) {
var response types.Geometry
res, err := Polygon(Point(-122.423246, 37.779388), Point(-122.423246, 37.329898), Point(-121.886420, 37.329898)).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.DeepEquals, types.Geometry{
Type: "Polygon",
Lines: types.Lines{
types.Line{
types.Point{Lon: -122.423246, Lat: 37.779388},
types.Point{Lon: -122.423246, Lat: 37.329898},
types.Point{Lon: -121.88642, Lat: 37.329898},
types.Point{Lon: -122.423246, Lat: 37.779388},
},
},
})
}
func (s *RethinkSuite) TestGeospatialPolygonSub(c *test.C) {
var response types.Geometry
res, err := Polygon(
Point(-122.4, 37.7),
Point(-122.4, 37.3),
Point(-121.8, 37.3),
Point(-121.8, 37.7),
).PolygonSub(Polygon(
Point(-122.3, 37.4),
Point(-122.3, 37.6),
Point(-122.0, 37.6),
Point(-122.0, 37.4),
)).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.DeepEquals, types.Geometry{
Type: "Polygon",
Lines: types.Lines{
types.Line{
types.Point{Lon: -122.4, Lat: 37.7},
types.Point{Lon: -122.4, Lat: 37.3},
types.Point{Lon: -121.8, Lat: 37.3},
types.Point{Lon: -121.8, Lat: 37.7},
types.Point{Lon: -122.4, Lat: 37.7},
},
types.Line{
types.Point{Lon: -122.3, Lat: 37.4},
types.Point{Lon: -122.3, Lat: 37.6},
types.Point{Lon: -122, Lat: 37.6},
types.Point{Lon: -122, Lat: 37.4},
types.Point{Lon: -122.3, Lat: 37.4},
},
},
})
}
Removed reflection from compareCoordinates
package gorethink
import (
"github.com/dancannon/gorethink/types"
test "gopkg.in/check.v1"
)
/* BEGIN FLOAT HELPERS */
// totally ripped off from math/all_test.go
// https://github.com/golang/go/blob/master/src/math/all_test.go#L1723-L1749
func tolerance(a, b, e float64) bool {
d := a - b
if d < 0 {
d = -d
}
if a != 0 {
e = e * a
if e < 0 {
e = -e
}
}
return d < e
}
func mehclose(a, b float64) bool { return tolerance(a, b, 1e-2) }
func kindaclose(a, b float64) bool { return tolerance(a, b, 1e-8) }
func prettyclose(a, b float64) bool { return tolerance(a, b, 1e-14) }
func veryclose(a, b float64) bool { return tolerance(a, b, 4e-16) }
func soclose(a, b, e float64) bool { return tolerance(a, b, e) }
func compareCoordinates(co [][]float64, lines types.Lines, c *test.C) {
for _, points := range lines {
for i, point := range points {
lon := co[i][0]
lat := co[i][1]
if !kindaclose(lon, point.Lon) {
c.Errorf("the deviation between the compared floats is too great [%v:%v]", lon, point.Lon)
}
if !kindaclose(lat, point.Lat) {
c.Errorf("the deviation between the compared floats is too great [%v:%v]", lat, point.Lat)
}
}
}
}
/* END FLOAT HELPERS */
func (s *RethinkSuite) TestGeospatialDecodeGeometryPseudoType(c *test.C) {
var response types.Geometry
// setup coordinates
co := [][]float64{
{-122.423246, 37.779388},
{-122.423246, 37.329898},
{-121.88642, 37.329898},
{-121.88642, 37.329898},
{-122.423246, 37.779388},
}
gt := "Polygon"
res, err := Expr(map[string]interface{}{
"$reql_type$": "GEOMETRY",
"type": gt,
"coordinates": []interface{}{co},
}).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
// test shape
if response.Type != gt {
c.Errorf("expected [%v], instead [%v]", gt, response.Type)
}
// assert points are within threshold
compareCoordinates(co, response.Lines, c)
}
func (s *RethinkSuite) TestGeospatialEncodeGeometryPseudoType(c *test.C) {
encoded, err := encode(types.Geometry{
Type: "Polygon",
Lines: types.Lines{
types.Line{
types.Point{Lon: -122.423246, Lat: 37.779388},
types.Point{Lon: -122.423246, Lat: 37.329898},
types.Point{Lon: -121.88642, Lat: 37.329898},
types.Point{Lon: -121.88642, Lat: 37.779388},
types.Point{Lon: -122.423246, Lat: 37.779388},
},
},
})
c.Assert(err, test.IsNil)
c.Assert(encoded, test.DeepEquals, map[string]interface{}{
"$reql_type$": "GEOMETRY",
"type": "Polygon",
"coordinates": []interface{}{
[]interface{}{
[]interface{}{-122.423246, 37.779388},
[]interface{}{-122.423246, 37.329898},
[]interface{}{-121.88642, 37.329898},
[]interface{}{-121.88642, 37.779388},
[]interface{}{-122.423246, 37.779388},
},
},
})
}
func (s *RethinkSuite) TestGeospatialCircle(c *test.C) {
var response types.Geometry
res, err := Circle([]float64{-122.423246, 37.779388}, 10).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
co := [][]float64{
{-122.423246, 37.77929790366427},
{-122.42326814543915, 37.77929963483801},
{-122.4232894398445, 37.779304761831504},
{-122.42330906488651, 37.77931308761787},
{-122.42332626638755, 37.77932429224285},
{-122.42334038330416, 37.77933794512014},
{-122.42335087313059, 37.77935352157849},
{-122.42335733274696, 37.77937042302436},
{-122.4233595139113, 37.77938799994533},
{-122.42335733279968, 37.7794055768704},
{-122.42335087322802, 37.779422478327966},
{-122.42334038343147, 37.77943805480385},
{-122.42332626652532, 37.779451707701796},
{-122.42330906501378, 37.77946291234741},
{-122.42328943994191, 37.77947123815131},
{-122.42326814549187, 37.77947636515649},
{-122.423246, 37.779478096334365},
{-122.42322385450814, 37.77947636515649},
{-122.4232025600581, 37.77947123815131},
{-122.42318293498623, 37.77946291234741},
{-122.42316573347469, 37.779451707701796},
{-122.42315161656855, 37.77943805480385},
{-122.423141126772, 37.779422478327966},
{-122.42313466720033, 37.7794055768704},
{-122.42313248608872, 37.77938799994533},
{-122.42313466725305, 37.77937042302436},
{-122.42314112686942, 37.77935352157849},
{-122.42315161669585, 37.77933794512014},
{-122.42316573361246, 37.77932429224285},
{-122.4231829351135, 37.77931308761787},
{-122.42320256015552, 37.779304761831504},
{-122.42322385456086, 37.77929963483801},
{-122.423246, 37.77929790366427},
}
compareCoordinates(co, response.Lines, c)
}
func (s *RethinkSuite) TestGeospatialCirclePoint(c *test.C) {
var response types.Geometry
res, err := Circle(Point(-122.423246, 37.779388), 10).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
co := [][]float64{
{-122.423246, 37.77929790366427},
{-122.42326814543915, 37.77929963483801},
{-122.4232894398445, 37.779304761831504},
{-122.42330906488651, 37.77931308761787},
{-122.42332626638755, 37.77932429224285},
{-122.42334038330416, 37.77933794512014},
{-122.42335087313059, 37.77935352157849},
{-122.42335733274696, 37.77937042302436},
{-122.4233595139113, 37.77938799994533},
{-122.42335733279968, 37.7794055768704},
{-122.42335087322802, 37.779422478327966},
{-122.42334038343147, 37.77943805480385},
{-122.42332626652532, 37.779451707701796},
{-122.42330906501378, 37.77946291234741},
{-122.42328943994191, 37.77947123815131},
{-122.42326814549187, 37.77947636515649},
{-122.423246, 37.779478096334365},
{-122.42322385450814, 37.77947636515649},
{-122.4232025600581, 37.77947123815131},
{-122.42318293498623, 37.77946291234741},
{-122.42316573347469, 37.779451707701796},
{-122.42315161656855, 37.77943805480385},
{-122.423141126772, 37.779422478327966},
{-122.42313466720033, 37.7794055768704},
{-122.42313248608872, 37.77938799994533},
{-122.42313466725305, 37.77937042302436},
{-122.42314112686942, 37.77935352157849},
{-122.42315161669585, 37.77933794512014},
{-122.42316573361246, 37.77932429224285},
{-122.4231829351135, 37.77931308761787},
{-122.42320256015552, 37.779304761831504},
{-122.42322385456086, 37.77929963483801},
{-122.423246, 37.77929790366427},
}
compareCoordinates(co, response.Lines, c)
}
func (s *RethinkSuite) TestGeospatialCirclePointFill(c *test.C) {
var response types.Geometry
res, err := Circle(Point(-122.423246, 37.779388), 10, CircleOpts{Fill: true}).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
co := [][]float64{
{-122.423246, 37.77929790366427},
{-122.42326814543915, 37.77929963483801},
{-122.4232894398445, 37.779304761831504},
{-122.42330906488651, 37.77931308761787},
{-122.42332626638755, 37.77932429224285},
{-122.42334038330416, 37.77933794512014},
{-122.42335087313059, 37.77935352157849},
{-122.42335733274696, 37.77937042302436},
{-122.4233595139113, 37.77938799994533},
{-122.42335733279968, 37.7794055768704},
{-122.42335087322802, 37.779422478327966},
{-122.42334038343147, 37.77943805480385},
{-122.42332626652532, 37.779451707701796},
{-122.42330906501378, 37.77946291234741},
{-122.42328943994191, 37.77947123815131},
{-122.42326814549187, 37.77947636515649},
{-122.423246, 37.779478096334365},
{-122.42322385450814, 37.77947636515649},
{-122.4232025600581, 37.77947123815131},
{-122.42318293498623, 37.77946291234741},
{-122.42316573347469, 37.779451707701796},
{-122.42315161656855, 37.77943805480385},
{-122.423141126772, 37.779422478327966},
{-122.42313466720033, 37.7794055768704},
{-122.42313248608872, 37.77938799994533},
{-122.42313466725305, 37.77937042302436},
{-122.42314112686942, 37.77935352157849},
{-122.42315161669585, 37.77933794512014},
{-122.42316573361246, 37.77932429224285},
{-122.4231829351135, 37.77931308761787},
{-122.42320256015552, 37.779304761831504},
{-122.42322385456086, 37.77929963483801},
{-122.423246, 37.77929790366427},
}
compareCoordinates(co, response.Lines, c)
}
func (s *RethinkSuite) TestGeospatialPointDistanceMethod(c *test.C) {
var response float64
f := 734125.249602186
res, err := Point(-122.423246, 37.779388).Distance(Point(-117.220406, 32.719464)).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
if !kindaclose(response, f) {
c.Errorf("the deviation between the compared floats is too great [%v:%v]", response, f)
}
}
func (s *RethinkSuite) TestGeospatialPointDistanceRoot(c *test.C) {
var response float64
f := 734125.249602186
res, err := Distance(Point(-122.423246, 37.779388), Point(-117.220406, 32.719464)).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
if !kindaclose(response, f) {
c.Errorf("the deviation between the compared floats is too great [%v:%v]", response, f)
}
}
func (s *RethinkSuite) TestGeospatialPointDistanceRootKm(c *test.C) {
var response float64
f := 734.125249602186
res, err := Distance(Point(-122.423246, 37.779388), Point(-117.220406, 32.719464), DistanceOpts{Unit: "km"}).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
if !kindaclose(response, f) {
c.Errorf("the deviation between the compared floats is too great [%v:%v]", response, f)
}
}
func (s *RethinkSuite) TestGeospatialFill(c *test.C) {
var response types.Geometry
res, err := Line(
[]float64{-122.423246, 37.779388},
[]float64{-122.423246, 37.329898},
[]float64{-121.886420, 37.329898},
[]float64{-121.886420, 37.779388},
).Fill().Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.DeepEquals, types.Geometry{
Type: "Polygon",
Lines: types.Lines{
types.Line{
types.Point{Lon: -122.423246, Lat: 37.779388},
types.Point{Lon: -122.423246, Lat: 37.329898},
types.Point{Lon: -121.88642, Lat: 37.329898},
types.Point{Lon: -121.88642, Lat: 37.779388},
types.Point{Lon: -122.423246, Lat: 37.779388},
},
},
})
}
func (s *RethinkSuite) TestGeospatialGeojson(c *test.C) {
var response types.Geometry
res, err := Geojson(map[string]interface{}{
"type": "Point",
"coordinates": []interface{}{-122.423246, 37.779388},
}).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.DeepEquals, types.Geometry{
Type: "Point",
Point: types.Point{Lon: -122.423246, Lat: 37.779388},
})
}
func (s *RethinkSuite) TestGeospatialToGeojson(c *test.C) {
var response map[string]interface{}
res, err := Point(-122.423246, 37.779388).ToGeojson().Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.DeepEquals, map[string]interface{}{
"type": "Point",
"coordinates": []interface{}{-122.423246, 37.779388},
})
}
func (s *RethinkSuite) TestGeospatialGetIntersecting(c *test.C) {
// Setup table
Db("test").TableDrop("geospatial").Run(sess)
Db("test").TableCreate("geospatial").Run(sess)
Db("test").Table("geospatial").IndexCreate("area", IndexCreateOpts{
Geo: true,
}).Run(sess)
Db("test").Table("geospatial").Insert([]interface{}{
map[string]interface{}{"area": Circle(Point(-117.220406, 32.719464), 100000)},
map[string]interface{}{"area": Circle(Point(-100.220406, 20.719464), 100000)},
map[string]interface{}{"area": Circle(Point(-117.200406, 32.723464), 100000)},
}).Run(sess)
var response []interface{}
res, err := Db("test").Table("geospatial").GetIntersecting(
Circle(Point(-117.220406, 32.719464), 100000),
GetIntersectingOpts{
Index: "area",
},
).Run(sess)
c.Assert(err, test.IsNil)
err = res.All(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.HasLen, 2)
}
func (s *RethinkSuite) TestGeospatialGetNearest(c *test.C) {
// Setup table
Db("test").TableDrop("geospatial").Run(sess)
Db("test").TableCreate("geospatial").Run(sess)
Db("test").Table("geospatial").IndexCreate("area", IndexCreateOpts{
Geo: true,
}).Run(sess)
Db("test").Table("geospatial").Insert([]interface{}{
map[string]interface{}{"area": Circle(Point(-117.220406, 32.719464), 100000)},
map[string]interface{}{"area": Circle(Point(-100.220406, 20.719464), 100000)},
map[string]interface{}{"area": Circle(Point(-115.210306, 32.733364), 100000)},
}).Run(sess)
var response []interface{}
res, err := Db("test").Table("geospatial").GetNearest(
Point(-117.220406, 32.719464),
GetNearestOpts{
Index: "area",
MaxDist: 1,
},
).Run(sess)
c.Assert(err, test.IsNil)
err = res.All(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.HasLen, 1)
}
func (s *RethinkSuite) TestGeospatialIncludesTrue(c *test.C) {
var response bool
res, err := Polygon(
Point(-122.4, 37.7),
Point(-122.4, 37.3),
Point(-121.8, 37.3),
Point(-121.8, 37.7),
).Includes(Point(-122.3, 37.4)).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.Equals, true)
}
func (s *RethinkSuite) TestGeospatialIncludesFalse(c *test.C) {
var response bool
res, err := Polygon(
Point(-122.4, 37.7),
Point(-122.4, 37.3),
Point(-121.8, 37.3),
Point(-121.8, 37.7),
).Includes(Point(100.3, 37.4)).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.Equals, false)
}
func (s *RethinkSuite) TestGeospatialIntersectsTrue(c *test.C) {
var response bool
res, err := Polygon(
Point(-122.4, 37.7),
Point(-122.4, 37.3),
Point(-121.8, 37.3),
Point(-121.8, 37.7),
).Intersects(Polygon(
Point(-122.3, 37.4),
Point(-122.4, 37.3),
Point(-121.8, 37.3),
Point(-121.8, 37.4),
)).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.Equals, true)
}
func (s *RethinkSuite) TestGeospatialIntersectsFalse(c *test.C) {
var response bool
res, err := Polygon(
Point(-122.4, 37.7),
Point(-122.4, 37.3),
Point(-121.8, 37.3),
Point(-121.8, 37.7),
).Intersects(Polygon(
Point(-102.4, 37.7),
Point(-102.4, 37.3),
Point(-101.8, 37.3),
Point(-101.8, 37.7),
)).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.Equals, false)
}
func (s *RethinkSuite) TestGeospatialLineLatLon(c *test.C) {
var response types.Geometry
res, err := Line([]float64{-122.423246, 37.779388}, []float64{-121.886420, 37.329898}).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.DeepEquals, types.Geometry{
Type: "LineString",
Line: types.Line{
types.Point{Lon: -122.423246, Lat: 37.779388},
types.Point{Lon: -121.886420, Lat: 37.329898},
},
})
}
func (s *RethinkSuite) TestGeospatialLinePoint(c *test.C) {
var response types.Geometry
res, err := Line(Point(-122.423246, 37.779388), Point(-121.886420, 37.329898)).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.DeepEquals, types.Geometry{
Type: "LineString",
Line: types.Line{
types.Point{Lon: -122.423246, Lat: 37.779388},
types.Point{Lon: -121.886420, Lat: 37.329898},
},
})
}
func (s *RethinkSuite) TestGeospatialPoint(c *test.C) {
var response types.Geometry
res, err := Point(-122.423246, 37.779388).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.DeepEquals, types.Geometry{
Type: "Point",
Point: types.Point{Lon: -122.423246, Lat: 37.779388},
})
}
func (s *RethinkSuite) TestGeospatialPolygon(c *test.C) {
var response types.Geometry
res, err := Polygon(Point(-122.423246, 37.779388), Point(-122.423246, 37.329898), Point(-121.886420, 37.329898)).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.DeepEquals, types.Geometry{
Type: "Polygon",
Lines: types.Lines{
types.Line{
types.Point{Lon: -122.423246, Lat: 37.779388},
types.Point{Lon: -122.423246, Lat: 37.329898},
types.Point{Lon: -121.88642, Lat: 37.329898},
types.Point{Lon: -122.423246, Lat: 37.779388},
},
},
})
}
func (s *RethinkSuite) TestGeospatialPolygonSub(c *test.C) {
var response types.Geometry
res, err := Polygon(
Point(-122.4, 37.7),
Point(-122.4, 37.3),
Point(-121.8, 37.3),
Point(-121.8, 37.7),
).PolygonSub(Polygon(
Point(-122.3, 37.4),
Point(-122.3, 37.6),
Point(-122.0, 37.6),
Point(-122.0, 37.4),
)).Run(sess)
c.Assert(err, test.IsNil)
err = res.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.DeepEquals, types.Geometry{
Type: "Polygon",
Lines: types.Lines{
types.Line{
types.Point{Lon: -122.4, Lat: 37.7},
types.Point{Lon: -122.4, Lat: 37.3},
types.Point{Lon: -121.8, Lat: 37.3},
types.Point{Lon: -121.8, Lat: 37.7},
types.Point{Lon: -122.4, Lat: 37.7},
},
types.Line{
types.Point{Lon: -122.3, Lat: 37.4},
types.Point{Lon: -122.3, Lat: 37.6},
types.Point{Lon: -122, Lat: 37.6},
types.Point{Lon: -122, Lat: 37.4},
types.Point{Lon: -122.3, Lat: 37.4},
},
},
})
}
|
//Command to test application without deploy:
//goapp serve app.yaml
//Command to deploy/update application:
//goapp deploy -application golangnode0 -version 0
package main
import (
"bytes"
"fmt"
"html/template"
"io/ioutil"
"log"
"net/http"
"net/smtp"
"appengine"
"appengine/urlfetch"
)
var statusContent string = "Default status"
type webPage struct {
Title string
}
type node struct {
id int
content string
}
type gmailUser struct {
name string
pswd string
}
func sendMail(msg string) {
mailUser := gmailUser{
"golangapplication@gmail.com",
"",
}
auth := smtp.PlainAuth("",
mailUser.name,
mailUser.pswd,
"smtp.gmail.com",
)
err := smtp.SendMail(
"smtp.gmail.com:587",
auth,
mailUser.name,
[]string{"rec@mail.com"},
[]byte(msg),
)
if err != nil {
log.Fatal(err)
}
}
//wrong func for Google App Engine deployment. Need to use appengine libs...=(
func echo() {
url := "http://golangappnode1.appspot.com/status"
var jsonStr = []byte(`{"msg":"Hello!"}`)
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr))
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
statusContent = string(body)
}
func helloWorld(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello World!")
}
func startPage(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
templatePage, _ := template.ParseFiles("start.html")
templatePage.Execute(w, &webPage{"simplePage"})
case "POST":
r.ParseForm()
//go echo()
fmt.Fprintf(w, "Successful read command/input from web-interface! Input contains - "+r.FormValue("nodeId")+" "+r.FormValue("echoContent"))
}
}
func statusServer(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
fmt.Fprintf(w, "Get status - "+statusContent)
case "POST":
buf := new(bytes.Buffer)
buf.ReadFrom(r.Body)
newStr := buf.String()
/*
nodeSends := node{}
err := json.Decoder(newStr).Decode(&nodeSends)
if err != nil {
panic(err)
}
*/
fmt.Fprintf(w, "Get data by params in POST - OK")
//statusContent = "POST request handled, " + "Node id: " + string(nodeSends.id) + ", Echo content: " + nodeSends.content
statusContent = "POST request handled, " + newStr
}
}
func testEcho(w http.ResponseWriter, r *http.Request) {
/*nodeOne := node{
1,
"Hello",
}
jsonNodeOne, err1 := json.Marshal(nodeOne)
if err1 != nil {
panic(err1)
}*/
r.ParseForm()
c := appengine.NewContext(r)
var jsonStr = []byte(`{"` + r.FormValue("nodeId") + `":"` + r.FormValue("echoContent") + `", "lol":"lol"}`)
//bs := []byte{1, 2, 3}
buf := bytes.NewBuffer(jsonStr)
client := http.Client{Transport: &urlfetch.Transport{Context: c}}
resp, err := client.Post("http://goappnode1.appspot.com/status", "application/octet-stream", buf)
if err != nil {
statusContent = err.Error()
fmt.Println(err)
}
respBody, _ := ioutil.ReadAll(resp.Body)
statusContent = "Response from node - " + string(respBody)
}
func showInfo(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "Information page for test project.")
fmt.Fprintln(w, "Language - Go;")
fmt.Fprintln(w, "Platform - Google Application Engine;")
}
func init() {
http.HandleFunc("/", startPage)
http.HandleFunc("/helloworld", helloWorld)
http.HandleFunc("/showinfo", showInfo)
http.HandleFunc("/status", statusServer)
http.HandleFunc("/echo", testEcho)
//Wrong code for App Enine - server cant understand what it need to show
//http.ListenAndServe(":80", nil)
}
//this func not needed for deploy on Google App Engine, init() func replace main()
/*
func main() {
//fmt.Println("Hello, test server started on 8080 port.\n - /helloworld - show title page\n - /showinfo - show information about this thing")
//http.ListenAndServe(":8080", nil)
go sender()
}*/
node messages fix
//Command to test application without deploy:
//goapp serve app.yaml
//Command to deploy/update application:
//goapp deploy -application golangnode0 -version 0
package main
import (
"bytes"
"fmt"
"html/template"
"io/ioutil"
"log"
"net/http"
"net/smtp"
"appengine"
"appengine/urlfetch"
)
var statusContent string = "Default status"
type webPage struct {
Title string
}
type node struct {
id int
content string
}
type gmailUser struct {
name string
pswd string
}
func sendMail(msg string) {
mailUser := gmailUser{
"golangapplication@gmail.com",
"",
}
auth := smtp.PlainAuth("",
mailUser.name,
mailUser.pswd,
"smtp.gmail.com",
)
err := smtp.SendMail(
"smtp.gmail.com:587",
auth,
mailUser.name,
[]string{"rec@mail.com"},
[]byte(msg),
)
if err != nil {
log.Fatal(err)
}
}
//wrong func for Google App Engine deployment. Need to use appengine libs...=(
func echo() {
url := "http://golangappnode1.appspot.com/status"
var jsonStr = []byte(`{"msg":"Hello!"}`)
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr))
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
statusContent = string(body)
}
func helloWorld(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello World!")
}
func startPage(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
templatePage, _ := template.ParseFiles("start.html")
templatePage.Execute(w, &webPage{"simplePage"})
case "POST":
r.ParseForm()
//go echo()
fmt.Fprintf(w, "Successful read command/input from web-interface! Input contains - "+r.FormValue("nodeId")+" "+r.FormValue("echoContent"))
}
}
func statusServer(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
fmt.Fprintf(w, "Get status - "+statusContent)
case "POST":
buf := new(bytes.Buffer)
buf.ReadFrom(r.Body)
newStr := buf.String()
/*
nodeSends := node{}
err := json.Decoder(newStr).Decode(&nodeSends)
if err != nil {
panic(err)
}
*/
fmt.Fprintf(w, "Get data by params in POST - OK")
//statusContent = "POST request handled, " + "Node id: " + string(nodeSends.id) + ", Echo content: " + nodeSends.content
statusContent = "POST request handled, " + newStr
}
}
func testEcho(w http.ResponseWriter, r *http.Request) {
/*nodeOne := node{
1,
"Hello",
}
jsonNodeOne, err1 := json.Marshal(nodeOne)
if err1 != nil {
panic(err1)
}*/
r.ParseForm()
c := appengine.NewContext(r)
var jsonStr = []byte(`{"Message from another node":"` + r.FormValue("echoContent") + `"}`)
//bs := []byte{1, 2, 3}
buf := bytes.NewBuffer(jsonStr)
client := http.Client{Transport: &urlfetch.Transport{Context: c}}
resp, err := client.Post("http://goappnode"+r.FormValue("nodeId")+".appspot.com/status", "application/octet-stream", buf)
if err != nil {
statusContent = err.Error()
fmt.Println(err)
}
respBody, _ := ioutil.ReadAll(resp.Body)
statusContent = "Response from node - " + string(respBody)
}
func showInfo(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "Information page for test project.")
fmt.Fprintln(w, "Language - Go;")
fmt.Fprintln(w, "Platform - Google Application Engine;")
}
func init() {
http.HandleFunc("/", startPage)
http.HandleFunc("/helloworld", helloWorld)
http.HandleFunc("/showinfo", showInfo)
http.HandleFunc("/status", statusServer)
http.HandleFunc("/echo", testEcho)
//Wrong code for App Enine - server cant understand what it need to show
//http.ListenAndServe(":80", nil)
}
//this func not needed for deploy on Google App Engine, init() func replace main()
/*
func main() {
//fmt.Println("Hello, test server started on 8080 port.\n - /helloworld - show title page\n - /showinfo - show information about this thing")
//http.ListenAndServe(":8080", nil)
go sender()
}*/
|
package snapshotfs
import (
"bytes"
"context"
"encoding/json"
"hash/fnv"
"io"
"os"
"path"
"path/filepath"
"runtime"
"sort"
"sync"
"sync/atomic"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
"github.com/kopia/kopia/fs"
"github.com/kopia/kopia/fs/ignorefs"
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/repo/logging"
"github.com/kopia/kopia/repo/object"
"github.com/kopia/kopia/snapshot"
"github.com/kopia/kopia/snapshot/policy"
)
const copyBufferSize = 128 * 1024
var log = logging.GetContextLoggerFunc("kopia/upload")
var errCancelled = errors.New("canceled")
// Uploader supports efficient uploading files and directories to repository.
type Uploader struct {
Progress UploadProgress
// automatically cancel the Upload after certain number of bytes
MaxUploadBytes int64
// ignore read errors
IgnoreReadErrors bool
// probability with cached entries will be ignored, must be [0..100]
// 0=always use cached object entries if possible
// 100=never use cached entries
ForceHashPercentage int
// Number of files to hash and upload in parallel.
ParallelUploads int
repo *repo.Repository
stats snapshot.Stats
canceled int32
uploadBufPool sync.Pool
}
// IsCancelled returns true if the upload is canceled.
func (u *Uploader) IsCancelled() bool {
return u.cancelReason() != ""
}
func (u *Uploader) cancelReason() string {
if c := atomic.LoadInt32(&u.canceled) != 0; c {
return "canceled"
}
_, wb := u.repo.Content.Stats.WrittenContent()
if mub := u.MaxUploadBytes; mub > 0 && wb > mub {
return "limit reached"
}
return ""
}
func (u *Uploader) uploadFileInternal(ctx context.Context, relativePath string, f fs.File, pol *policy.Policy) (*snapshot.DirEntry, error) {
u.Progress.HashingFile(relativePath)
defer u.Progress.FinishedHashingFile(relativePath, f.Size())
file, err := f.Open(ctx)
if err != nil {
return nil, errors.Wrap(err, "unable to open file")
}
defer file.Close() //nolint:errcheck
writer := u.repo.Objects.NewWriter(ctx, object.WriterOptions{
Description: "FILE:" + f.Name(),
Compressor: pol.CompressionPolicy.CompressorForFile(f),
})
defer writer.Close() //nolint:errcheck
written, err := u.copyWithProgress(writer, file, 0, f.Size())
if err != nil {
return nil, err
}
fi2, err := file.Entry()
if err != nil {
return nil, err
}
r, err := writer.Result()
if err != nil {
return nil, err
}
de, err := newDirEntry(fi2, r)
if err != nil {
return nil, errors.Wrap(err, "unable to create dir entry")
}
de.FileSize = written
return de, nil
}
func (u *Uploader) uploadSymlinkInternal(ctx context.Context, relativePath string, f fs.Symlink) (*snapshot.DirEntry, error) {
u.Progress.HashingFile(relativePath)
defer u.Progress.FinishedHashingFile(relativePath, f.Size())
target, err := f.Readlink(ctx)
if err != nil {
return nil, errors.Wrap(err, "unable to read symlink")
}
writer := u.repo.Objects.NewWriter(ctx, object.WriterOptions{
Description: "SYMLINK:" + f.Name(),
})
defer writer.Close() //nolint:errcheck
written, err := u.copyWithProgress(writer, bytes.NewBufferString(target), 0, f.Size())
if err != nil {
return nil, err
}
r, err := writer.Result()
if err != nil {
return nil, err
}
de, err := newDirEntry(f, r)
if err != nil {
return nil, errors.Wrap(err, "unable to create dir entry")
}
de.FileSize = written
return de, nil
}
func (u *Uploader) copyWithProgress(dst io.Writer, src io.Reader, completed, length int64) (int64, error) {
uploadBufPtr := u.uploadBufPool.Get().(*[]byte)
defer u.uploadBufPool.Put(uploadBufPtr)
uploadBuf := *uploadBufPtr
var written int64
for {
if u.IsCancelled() {
return 0, errCancelled
}
readBytes, readErr := src.Read(uploadBuf)
if readBytes > 0 {
wroteBytes, writeErr := dst.Write(uploadBuf[0:readBytes])
if wroteBytes > 0 {
written += int64(wroteBytes)
completed += int64(wroteBytes)
u.Progress.HashedBytes(int64(wroteBytes))
if length < completed {
length = completed
}
}
if writeErr != nil {
return written, writeErr
}
if readBytes != wroteBytes {
return written, io.ErrShortWrite
}
}
if readErr != nil {
if readErr == io.EOF {
break
}
return written, readErr
}
}
return written, nil
}
func newDirEntry(md fs.Entry, oid object.ID) (*snapshot.DirEntry, error) {
var entryType snapshot.EntryType
switch md := md.(type) {
case fs.Directory:
entryType = snapshot.EntryTypeDirectory
case fs.Symlink:
entryType = snapshot.EntryTypeSymlink
case fs.File:
entryType = snapshot.EntryTypeFile
default:
return nil, errors.Errorf("invalid entry type %T", md)
}
return &snapshot.DirEntry{
Name: md.Name(),
Type: entryType,
Permissions: snapshot.Permissions(md.Mode() & os.ModePerm),
FileSize: md.Size(),
ModTime: md.ModTime(),
UserID: md.Owner().UserID,
GroupID: md.Owner().GroupID,
ObjectID: oid,
}, nil
}
// uploadFile uploads the specified File to the repository.
func (u *Uploader) uploadFile(ctx context.Context, relativePath string, file fs.File, pol *policy.Policy) (*snapshot.DirEntry, error) {
res, err := u.uploadFileInternal(ctx, relativePath, file, pol)
if err != nil {
return nil, err
}
de, err := newDirEntry(file, res.ObjectID)
if err != nil {
return nil, errors.Wrap(err, "unable to create dir entry")
}
de.DirSummary = &fs.DirectorySummary{
TotalFileCount: 1,
TotalFileSize: res.FileSize,
MaxModTime: res.ModTime,
}
return de, nil
}
// uploadDir uploads the specified Directory to the repository.
// An optional ID of a hash-cache object may be provided, in which case the Uploader will use its
// contents to avoid hashing
func (u *Uploader) uploadDir(ctx context.Context, rootDir fs.Directory, policyTree *policy.Tree, previousDirs []fs.Directory) (*snapshot.DirEntry, error) {
oid, summ, err := uploadDirInternal(ctx, u, rootDir, policyTree, previousDirs, ".")
if err != nil {
return nil, err
}
de, err := newDirEntry(rootDir, oid)
if err != nil {
return nil, errors.Wrap(err, "unable to create dir entry")
}
de.DirSummary = &summ
return de, err
}
func (u *Uploader) foreachEntryUnlessCancelled(ctx context.Context, parallel int, relativePath string, entries fs.Entries, cb func(ctx context.Context, entry fs.Entry, entryRelativePath string) error) error {
if parallel > len(entries) {
// don't launch more goroutines than needed
parallel = len(entries)
}
if parallel == 0 {
return nil
}
ch := make(chan fs.Entry)
eg, ctx := errgroup.WithContext(ctx)
// one goroutine to pump entries into channel until ctx is closed.
eg.Go(func() error {
defer close(ch)
for _, e := range entries {
select {
case ch <- e: // sent to channel
case <-ctx.Done(): // context closed
return nil
}
}
return nil
})
// launch N workers in parallel
for i := 0; i < parallel; i++ {
eg.Go(func() error {
for entry := range ch {
if u.IsCancelled() {
return errCancelled
}
entryRelativePath := path.Join(relativePath, entry.Name())
if err := cb(ctx, entry, entryRelativePath); err != nil {
return err
}
}
return nil
})
}
return eg.Wait()
}
func (u *Uploader) populateChildEntries(parent *snapshot.DirManifest, children <-chan *snapshot.DirEntry) {
parentSummary := parent.Summary
for de := range children {
switch de.Type {
case snapshot.EntryTypeFile:
u.stats.TotalFileCount++
u.stats.TotalFileSize += de.FileSize
parentSummary.TotalFileCount++
parentSummary.TotalFileSize += de.FileSize
if de.ModTime.After(parentSummary.MaxModTime) {
parentSummary.MaxModTime = de.ModTime
}
case snapshot.EntryTypeDirectory:
if childSummary := de.DirSummary; childSummary != nil {
parentSummary.TotalFileCount += childSummary.TotalFileCount
parentSummary.TotalFileSize += childSummary.TotalFileSize
parentSummary.TotalDirCount += childSummary.TotalDirCount
if childSummary.MaxModTime.After(parentSummary.MaxModTime) {
parentSummary.MaxModTime = childSummary.MaxModTime
}
}
}
parent.Entries = append(parent.Entries, de)
}
// sort the result, directories first, then non-directories, ordered by name
sort.Slice(parent.Entries, func(i, j int) bool {
if leftDir, rightDir := isDir(parent.Entries[i]), isDir(parent.Entries[j]); leftDir != rightDir {
// directories get sorted before non-directories
return leftDir
}
return parent.Entries[i].Name < parent.Entries[j].Name
})
}
func isDir(e *snapshot.DirEntry) bool {
return e.Type == snapshot.EntryTypeDirectory
}
func (u *Uploader) processChildren(ctx context.Context, dirManifest *snapshot.DirManifest, relativePath string, entries fs.Entries, policyTree *policy.Tree, previousEntries []fs.Entries) error {
var wg sync.WaitGroup
// channel where we will add directory and file entries, possibly in parallel
output := make(chan *snapshot.DirEntry)
// goroutine that will drain data from 'output' and update dirManifest
wg.Add(1)
go func() {
defer wg.Done()
u.populateChildEntries(dirManifest, output)
}()
defer func() {
// before this function returns, close the output channel and wait for the goroutine above to complete.
close(output)
wg.Wait()
}()
if err := u.processSubdirectories(ctx, output, relativePath, entries, policyTree, previousEntries); err != nil {
return err
}
if err := u.processNonDirectories(ctx, output, relativePath, entries, policyTree, previousEntries); err != nil {
return err
}
return nil
}
func (u *Uploader) processSubdirectories(ctx context.Context, output chan *snapshot.DirEntry, relativePath string, entries fs.Entries, policyTree *policy.Tree, previousEntries []fs.Entries) error {
// for now don't process subdirectories in parallel, we need a mechanism to
// prevent explosion of parallelism
const parallelism = 1
return u.foreachEntryUnlessCancelled(ctx, parallelism, relativePath, entries, func(ctx context.Context, entry fs.Entry, entryRelativePath string) error {
dir, ok := entry.(fs.Directory)
if !ok {
// skip non-directories
return nil
}
var previousDirs []fs.Directory
for _, e := range previousEntries {
if d, _ := e.FindByName(entry.Name()).(fs.Directory); d != nil {
previousDirs = append(previousDirs, d)
}
}
previousDirs = uniqueDirectories(previousDirs)
oid, subdirsumm, err := uploadDirInternal(ctx, u, dir, policyTree.Child(entry.Name()), previousDirs, entryRelativePath)
if err == errCancelled {
return err
}
if err != nil {
// Note: This only catches errors in subdirectories of the snapshot root, not on the snapshot
// root itself. The intention is to always fail if the top level directory can't be read,
// otherwise a meaningless, empty snapshot is created that can't be restored.
ignoreDirErr := u.shouldIgnoreDirectoryReadErrors(policyTree)
if _, ok := err.(dirReadError); ok && ignoreDirErr {
log(ctx).Warningf("unable to read directory %q: %s, ignoring", dir.Name(), err)
return nil
}
return errors.Errorf("unable to process directory %q: %s", entry.Name(), err)
}
de, err := newDirEntry(dir, oid)
if err != nil {
return errors.Wrap(err, "unable to create dir entry")
}
de.DirSummary = &subdirsumm
output <- de
return nil
})
}
func metadataEquals(e1, e2 fs.Entry) bool {
if l, r := e1.ModTime(), e2.ModTime(); !l.Equal(r) {
return false
}
if l, r := e1.Mode(), e2.Mode(); l != r {
return false
}
if l, r := e1.Size(), e2.Size(); l != r {
return false
}
if l, r := e1.Owner(), e2.Owner(); l != r {
return false
}
return true
}
func findCachedEntry(ctx context.Context, entry fs.Entry, prevEntries []fs.Entries) fs.Entry {
for _, e := range prevEntries {
if ent := e.FindByName(entry.Name()); ent != nil {
if metadataEquals(entry, ent) {
return ent
}
log(ctx).Debugf("found non-matching entry for %v: %v %v %v", entry.Name(), ent.Mode(), ent.Size(), ent.ModTime())
}
}
log(ctx).Debugf("could not find cache entry for %v", entry.Name())
return nil
}
// objectIDPercent arbitrarily maps given object ID onto a number 0.99
func objectIDPercent(obj object.ID) int {
h := fnv.New32a()
io.WriteString(h, obj.String()) //nolint:errcheck
return int(h.Sum32() % 100) //nolint:gomnd
}
func (u *Uploader) maybeIgnoreCachedEntry(ctx context.Context, ent fs.Entry) fs.Entry {
if h, ok := ent.(object.HasObjectID); ok {
if objectIDPercent(h.ObjectID()) < u.ForceHashPercentage {
log(ctx).Debugf("ignoring valid cached object: %v", h.ObjectID())
return nil
}
return ent
}
return nil
}
func (u *Uploader) processNonDirectories(ctx context.Context, output chan *snapshot.DirEntry, dirRelativePath string, entries fs.Entries, policyTree *policy.Tree, prevEntries []fs.Entries) error {
workerCount := u.ParallelUploads
if workerCount == 0 {
workerCount = runtime.NumCPU()
}
return u.foreachEntryUnlessCancelled(ctx, workerCount, dirRelativePath, entries, func(ctx context.Context, entry fs.Entry, entryRelativePath string) error {
// note this function runs in parallel and updates 'u.stats', which must be done using atomic operations.
if _, ok := entry.(fs.Directory); ok {
// skip directories
return nil
}
// See if we had this name during either of previous passes.
if cachedEntry := u.maybeIgnoreCachedEntry(ctx, findCachedEntry(ctx, entry, prevEntries)); cachedEntry != nil {
atomic.AddInt32(&u.stats.CachedFiles, 1)
u.Progress.CachedFile(filepath.Join(dirRelativePath, entry.Name()), entry.Size())
// compute entryResult now, cachedEntry is short-lived
cachedDirEntry, err := newDirEntry(entry, cachedEntry.(object.HasObjectID).ObjectID())
if err != nil {
return errors.Wrap(err, "unable to create dir entry")
}
output <- cachedDirEntry
return nil
}
switch entry := entry.(type) {
case fs.Symlink:
de, err := u.uploadSymlinkInternal(ctx, filepath.Join(dirRelativePath, entry.Name()), entry)
if err != nil {
return u.maybeIgnoreFileReadError(err, policyTree)
}
output <- de
return nil
case fs.File:
atomic.AddInt32(&u.stats.NonCachedFiles, 1)
de, err := u.uploadFileInternal(ctx, filepath.Join(dirRelativePath, entry.Name()), entry, policyTree.Child(entry.Name()).EffectivePolicy())
if err != nil {
return u.maybeIgnoreFileReadError(err, policyTree)
}
output <- de
return nil
default:
return errors.Errorf("file type not supported: %v", entry.Mode())
}
})
}
func maybeReadDirectoryEntries(ctx context.Context, dir fs.Directory) fs.Entries {
if dir == nil {
return nil
}
ent, err := dir.Readdir(ctx)
if err != nil {
log(ctx).Warningf("unable to read previous directory entries: %v", err)
return nil
}
return ent
}
func uniqueDirectories(dirs []fs.Directory) []fs.Directory {
if len(dirs) <= 1 {
return dirs
}
var unique = map[object.ID]fs.Directory{}
for _, dir := range dirs {
unique[dir.(object.HasObjectID).ObjectID()] = dir
}
if len(unique) == len(dirs) {
return dirs
}
var result []fs.Directory
for _, d := range unique {
result = append(result, d)
}
return result
}
// dirReadError distinguishes an error thrown when attempting to read a directory
type dirReadError struct {
error
}
func uploadDirInternal(
ctx context.Context,
u *Uploader,
directory fs.Directory,
policyTree *policy.Tree,
previousDirs []fs.Directory,
dirRelativePath string,
) (object.ID, fs.DirectorySummary, error) {
u.stats.TotalDirectoryCount++
u.Progress.StartedDirectory(dirRelativePath)
defer u.Progress.FinishedDirectory(dirRelativePath)
dirManifest := &snapshot.DirManifest{
StreamType: directoryStreamType,
Summary: &fs.DirectorySummary{
TotalDirCount: 1,
},
}
defer func() {
dirManifest.Summary.IncompleteReason = u.cancelReason()
}()
t0 := u.repo.Time()
entries, direrr := directory.Readdir(ctx)
log(ctx).Debugf("finished reading directory %v in %v", dirRelativePath, u.repo.Time().Sub(t0))
if direrr != nil {
return "", fs.DirectorySummary{}, dirReadError{direrr}
}
var prevEntries []fs.Entries
for _, d := range uniqueDirectories(previousDirs) {
if ent := maybeReadDirectoryEntries(ctx, d); ent != nil {
prevEntries = append(prevEntries, ent)
}
}
if err := u.processChildren(ctx, dirManifest, dirRelativePath, entries, policyTree, prevEntries); err != nil && err != errCancelled {
return "", fs.DirectorySummary{}, err
}
if len(dirManifest.Entries) == 0 {
dirManifest.Summary.MaxModTime = directory.ModTime()
}
// at this point dirManifest is ready to go
writer := u.repo.Objects.NewWriter(ctx, object.WriterOptions{
Description: "DIR:" + dirRelativePath,
Prefix: "k",
})
if err := json.NewEncoder(writer).Encode(dirManifest); err != nil {
return "", fs.DirectorySummary{}, errors.Wrap(err, "unable to encode directory JSON")
}
oid, err := writer.Result()
return oid, *dirManifest.Summary, err
}
func (u *Uploader) maybeIgnoreFileReadError(err error, policyTree *policy.Tree) error {
errHandlingPolicy := policyTree.EffectivePolicy().ErrorHandlingPolicy
if u.IgnoreReadErrors || errHandlingPolicy.IgnoreFileErrorsOrDefault(false) {
return nil
}
return err
}
func (u *Uploader) shouldIgnoreDirectoryReadErrors(policyTree *policy.Tree) bool {
errHandlingPolicy := policyTree.EffectivePolicy().ErrorHandlingPolicy
if u.IgnoreReadErrors {
return true
}
return errHandlingPolicy.IgnoreDirectoryErrorsOrDefault(false)
}
// NewUploader creates new Uploader object for a given repository.
func NewUploader(r *repo.Repository) *Uploader {
return &Uploader{
repo: r,
Progress: &NullUploadProgress{},
IgnoreReadErrors: false,
ParallelUploads: 1,
uploadBufPool: sync.Pool{
New: func() interface{} {
p := make([]byte, copyBufferSize)
return &p
},
},
}
}
// Cancel requests cancellation of an upload that's in progress. Will typically result in an incomplete snapshot.
func (u *Uploader) Cancel() {
atomic.StoreInt32(&u.canceled, 1)
}
func (u *Uploader) maybeOpenDirectoryFromManifest(ctx context.Context, man *snapshot.Manifest) fs.Directory {
if man == nil {
return nil
}
ent, err := EntryFromDirEntry(u.repo, man.RootEntry)
if err != nil {
log(ctx).Warningf("invalid previous manifest root entry %v: %v", man.RootEntry, err)
return nil
}
dir, ok := ent.(fs.Directory)
if !ok {
log(ctx).Debugf("previous manifest root is not a directory (was %T %+v)", ent, man.RootEntry)
return nil
}
return dir
}
// Upload uploads contents of the specified filesystem entry (file or directory) to the repository and returns snapshot.Manifest with statistics.
// Old snapshot manifest, when provided can be used to speed up uploads by utilizing hash cache.
func (u *Uploader) Upload(
ctx context.Context,
source fs.Entry,
policyTree *policy.Tree,
sourceInfo snapshot.SourceInfo,
previousManifests ...*snapshot.Manifest,
) (*snapshot.Manifest, error) {
log(ctx).Debugf("Uploading %v", sourceInfo)
s := &snapshot.Manifest{
Source: sourceInfo,
}
maxPreviousTotalFileSize := int64(0)
maxPreviousFileCount := 0
for _, m := range previousManifests {
if s := m.Stats.TotalFileSize; s > maxPreviousTotalFileSize {
maxPreviousTotalFileSize = s
}
if s := m.Stats.TotalFileCount; s > maxPreviousFileCount {
maxPreviousFileCount = s
}
}
u.Progress.UploadStarted(maxPreviousFileCount, maxPreviousTotalFileSize)
defer u.Progress.UploadFinished()
u.stats = snapshot.Stats{}
var err error
s.StartTime = u.repo.Time()
switch entry := source.(type) {
case fs.Directory:
var previousDirs []fs.Directory
for _, m := range previousManifests {
if d := u.maybeOpenDirectoryFromManifest(ctx, m); d != nil {
previousDirs = append(previousDirs, d)
}
}
entry = ignorefs.New(entry, policyTree, ignorefs.ReportIgnoredFiles(func(_ string, md fs.Entry) {
u.stats.AddExcluded(md)
}))
s.RootEntry, err = u.uploadDir(ctx, entry, policyTree, previousDirs)
case fs.File:
s.RootEntry, err = u.uploadFile(ctx, entry.Name(), entry, policyTree.EffectivePolicy())
default:
return nil, errors.Errorf("unsupported source: %v", s.Source)
}
if err != nil {
return nil, err
}
s.IncompleteReason = u.cancelReason()
s.EndTime = u.repo.Time()
s.Stats = u.stats
return s, nil
}
upload: added missing writer.Close()
package snapshotfs
import (
"bytes"
"context"
"encoding/json"
"hash/fnv"
"io"
"os"
"path"
"path/filepath"
"runtime"
"sort"
"sync"
"sync/atomic"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
"github.com/kopia/kopia/fs"
"github.com/kopia/kopia/fs/ignorefs"
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/repo/logging"
"github.com/kopia/kopia/repo/object"
"github.com/kopia/kopia/snapshot"
"github.com/kopia/kopia/snapshot/policy"
)
const copyBufferSize = 128 * 1024
var log = logging.GetContextLoggerFunc("kopia/upload")
var errCancelled = errors.New("canceled")
// Uploader supports efficient uploading files and directories to repository.
type Uploader struct {
Progress UploadProgress
// automatically cancel the Upload after certain number of bytes
MaxUploadBytes int64
// ignore read errors
IgnoreReadErrors bool
// probability with cached entries will be ignored, must be [0..100]
// 0=always use cached object entries if possible
// 100=never use cached entries
ForceHashPercentage int
// Number of files to hash and upload in parallel.
ParallelUploads int
repo *repo.Repository
stats snapshot.Stats
canceled int32
uploadBufPool sync.Pool
}
// IsCancelled returns true if the upload is canceled.
func (u *Uploader) IsCancelled() bool {
return u.cancelReason() != ""
}
func (u *Uploader) cancelReason() string {
if c := atomic.LoadInt32(&u.canceled) != 0; c {
return "canceled"
}
_, wb := u.repo.Content.Stats.WrittenContent()
if mub := u.MaxUploadBytes; mub > 0 && wb > mub {
return "limit reached"
}
return ""
}
func (u *Uploader) uploadFileInternal(ctx context.Context, relativePath string, f fs.File, pol *policy.Policy) (*snapshot.DirEntry, error) {
u.Progress.HashingFile(relativePath)
defer u.Progress.FinishedHashingFile(relativePath, f.Size())
file, err := f.Open(ctx)
if err != nil {
return nil, errors.Wrap(err, "unable to open file")
}
defer file.Close() //nolint:errcheck
writer := u.repo.Objects.NewWriter(ctx, object.WriterOptions{
Description: "FILE:" + f.Name(),
Compressor: pol.CompressionPolicy.CompressorForFile(f),
})
defer writer.Close() //nolint:errcheck
written, err := u.copyWithProgress(writer, file, 0, f.Size())
if err != nil {
return nil, err
}
fi2, err := file.Entry()
if err != nil {
return nil, err
}
r, err := writer.Result()
if err != nil {
return nil, err
}
de, err := newDirEntry(fi2, r)
if err != nil {
return nil, errors.Wrap(err, "unable to create dir entry")
}
de.FileSize = written
return de, nil
}
func (u *Uploader) uploadSymlinkInternal(ctx context.Context, relativePath string, f fs.Symlink) (*snapshot.DirEntry, error) {
u.Progress.HashingFile(relativePath)
defer u.Progress.FinishedHashingFile(relativePath, f.Size())
target, err := f.Readlink(ctx)
if err != nil {
return nil, errors.Wrap(err, "unable to read symlink")
}
writer := u.repo.Objects.NewWriter(ctx, object.WriterOptions{
Description: "SYMLINK:" + f.Name(),
})
defer writer.Close() //nolint:errcheck
written, err := u.copyWithProgress(writer, bytes.NewBufferString(target), 0, f.Size())
if err != nil {
return nil, err
}
r, err := writer.Result()
if err != nil {
return nil, err
}
de, err := newDirEntry(f, r)
if err != nil {
return nil, errors.Wrap(err, "unable to create dir entry")
}
de.FileSize = written
return de, nil
}
func (u *Uploader) copyWithProgress(dst io.Writer, src io.Reader, completed, length int64) (int64, error) {
uploadBufPtr := u.uploadBufPool.Get().(*[]byte)
defer u.uploadBufPool.Put(uploadBufPtr)
uploadBuf := *uploadBufPtr
var written int64
for {
if u.IsCancelled() {
return 0, errCancelled
}
readBytes, readErr := src.Read(uploadBuf)
if readBytes > 0 {
wroteBytes, writeErr := dst.Write(uploadBuf[0:readBytes])
if wroteBytes > 0 {
written += int64(wroteBytes)
completed += int64(wroteBytes)
u.Progress.HashedBytes(int64(wroteBytes))
if length < completed {
length = completed
}
}
if writeErr != nil {
return written, writeErr
}
if readBytes != wroteBytes {
return written, io.ErrShortWrite
}
}
if readErr != nil {
if readErr == io.EOF {
break
}
return written, readErr
}
}
return written, nil
}
func newDirEntry(md fs.Entry, oid object.ID) (*snapshot.DirEntry, error) {
var entryType snapshot.EntryType
switch md := md.(type) {
case fs.Directory:
entryType = snapshot.EntryTypeDirectory
case fs.Symlink:
entryType = snapshot.EntryTypeSymlink
case fs.File:
entryType = snapshot.EntryTypeFile
default:
return nil, errors.Errorf("invalid entry type %T", md)
}
return &snapshot.DirEntry{
Name: md.Name(),
Type: entryType,
Permissions: snapshot.Permissions(md.Mode() & os.ModePerm),
FileSize: md.Size(),
ModTime: md.ModTime(),
UserID: md.Owner().UserID,
GroupID: md.Owner().GroupID,
ObjectID: oid,
}, nil
}
// uploadFile uploads the specified File to the repository.
func (u *Uploader) uploadFile(ctx context.Context, relativePath string, file fs.File, pol *policy.Policy) (*snapshot.DirEntry, error) {
res, err := u.uploadFileInternal(ctx, relativePath, file, pol)
if err != nil {
return nil, err
}
de, err := newDirEntry(file, res.ObjectID)
if err != nil {
return nil, errors.Wrap(err, "unable to create dir entry")
}
de.DirSummary = &fs.DirectorySummary{
TotalFileCount: 1,
TotalFileSize: res.FileSize,
MaxModTime: res.ModTime,
}
return de, nil
}
// uploadDir uploads the specified Directory to the repository.
// An optional ID of a hash-cache object may be provided, in which case the Uploader will use its
// contents to avoid hashing
func (u *Uploader) uploadDir(ctx context.Context, rootDir fs.Directory, policyTree *policy.Tree, previousDirs []fs.Directory) (*snapshot.DirEntry, error) {
oid, summ, err := uploadDirInternal(ctx, u, rootDir, policyTree, previousDirs, ".")
if err != nil {
return nil, err
}
de, err := newDirEntry(rootDir, oid)
if err != nil {
return nil, errors.Wrap(err, "unable to create dir entry")
}
de.DirSummary = &summ
return de, err
}
func (u *Uploader) foreachEntryUnlessCancelled(ctx context.Context, parallel int, relativePath string, entries fs.Entries, cb func(ctx context.Context, entry fs.Entry, entryRelativePath string) error) error {
if parallel > len(entries) {
// don't launch more goroutines than needed
parallel = len(entries)
}
if parallel == 0 {
return nil
}
ch := make(chan fs.Entry)
eg, ctx := errgroup.WithContext(ctx)
// one goroutine to pump entries into channel until ctx is closed.
eg.Go(func() error {
defer close(ch)
for _, e := range entries {
select {
case ch <- e: // sent to channel
case <-ctx.Done(): // context closed
return nil
}
}
return nil
})
// launch N workers in parallel
for i := 0; i < parallel; i++ {
eg.Go(func() error {
for entry := range ch {
if u.IsCancelled() {
return errCancelled
}
entryRelativePath := path.Join(relativePath, entry.Name())
if err := cb(ctx, entry, entryRelativePath); err != nil {
return err
}
}
return nil
})
}
return eg.Wait()
}
func (u *Uploader) populateChildEntries(parent *snapshot.DirManifest, children <-chan *snapshot.DirEntry) {
parentSummary := parent.Summary
for de := range children {
switch de.Type {
case snapshot.EntryTypeFile:
u.stats.TotalFileCount++
u.stats.TotalFileSize += de.FileSize
parentSummary.TotalFileCount++
parentSummary.TotalFileSize += de.FileSize
if de.ModTime.After(parentSummary.MaxModTime) {
parentSummary.MaxModTime = de.ModTime
}
case snapshot.EntryTypeDirectory:
if childSummary := de.DirSummary; childSummary != nil {
parentSummary.TotalFileCount += childSummary.TotalFileCount
parentSummary.TotalFileSize += childSummary.TotalFileSize
parentSummary.TotalDirCount += childSummary.TotalDirCount
if childSummary.MaxModTime.After(parentSummary.MaxModTime) {
parentSummary.MaxModTime = childSummary.MaxModTime
}
}
}
parent.Entries = append(parent.Entries, de)
}
// sort the result, directories first, then non-directories, ordered by name
sort.Slice(parent.Entries, func(i, j int) bool {
if leftDir, rightDir := isDir(parent.Entries[i]), isDir(parent.Entries[j]); leftDir != rightDir {
// directories get sorted before non-directories
return leftDir
}
return parent.Entries[i].Name < parent.Entries[j].Name
})
}
func isDir(e *snapshot.DirEntry) bool {
return e.Type == snapshot.EntryTypeDirectory
}
func (u *Uploader) processChildren(ctx context.Context, dirManifest *snapshot.DirManifest, relativePath string, entries fs.Entries, policyTree *policy.Tree, previousEntries []fs.Entries) error {
var wg sync.WaitGroup
// channel where we will add directory and file entries, possibly in parallel
output := make(chan *snapshot.DirEntry)
// goroutine that will drain data from 'output' and update dirManifest
wg.Add(1)
go func() {
defer wg.Done()
u.populateChildEntries(dirManifest, output)
}()
defer func() {
// before this function returns, close the output channel and wait for the goroutine above to complete.
close(output)
wg.Wait()
}()
if err := u.processSubdirectories(ctx, output, relativePath, entries, policyTree, previousEntries); err != nil {
return err
}
if err := u.processNonDirectories(ctx, output, relativePath, entries, policyTree, previousEntries); err != nil {
return err
}
return nil
}
func (u *Uploader) processSubdirectories(ctx context.Context, output chan *snapshot.DirEntry, relativePath string, entries fs.Entries, policyTree *policy.Tree, previousEntries []fs.Entries) error {
// for now don't process subdirectories in parallel, we need a mechanism to
// prevent explosion of parallelism
const parallelism = 1
return u.foreachEntryUnlessCancelled(ctx, parallelism, relativePath, entries, func(ctx context.Context, entry fs.Entry, entryRelativePath string) error {
dir, ok := entry.(fs.Directory)
if !ok {
// skip non-directories
return nil
}
var previousDirs []fs.Directory
for _, e := range previousEntries {
if d, _ := e.FindByName(entry.Name()).(fs.Directory); d != nil {
previousDirs = append(previousDirs, d)
}
}
previousDirs = uniqueDirectories(previousDirs)
oid, subdirsumm, err := uploadDirInternal(ctx, u, dir, policyTree.Child(entry.Name()), previousDirs, entryRelativePath)
if err == errCancelled {
return err
}
if err != nil {
// Note: This only catches errors in subdirectories of the snapshot root, not on the snapshot
// root itself. The intention is to always fail if the top level directory can't be read,
// otherwise a meaningless, empty snapshot is created that can't be restored.
ignoreDirErr := u.shouldIgnoreDirectoryReadErrors(policyTree)
if _, ok := err.(dirReadError); ok && ignoreDirErr {
log(ctx).Warningf("unable to read directory %q: %s, ignoring", dir.Name(), err)
return nil
}
return errors.Errorf("unable to process directory %q: %s", entry.Name(), err)
}
de, err := newDirEntry(dir, oid)
if err != nil {
return errors.Wrap(err, "unable to create dir entry")
}
de.DirSummary = &subdirsumm
output <- de
return nil
})
}
func metadataEquals(e1, e2 fs.Entry) bool {
if l, r := e1.ModTime(), e2.ModTime(); !l.Equal(r) {
return false
}
if l, r := e1.Mode(), e2.Mode(); l != r {
return false
}
if l, r := e1.Size(), e2.Size(); l != r {
return false
}
if l, r := e1.Owner(), e2.Owner(); l != r {
return false
}
return true
}
func findCachedEntry(ctx context.Context, entry fs.Entry, prevEntries []fs.Entries) fs.Entry {
for _, e := range prevEntries {
if ent := e.FindByName(entry.Name()); ent != nil {
if metadataEquals(entry, ent) {
return ent
}
log(ctx).Debugf("found non-matching entry for %v: %v %v %v", entry.Name(), ent.Mode(), ent.Size(), ent.ModTime())
}
}
log(ctx).Debugf("could not find cache entry for %v", entry.Name())
return nil
}
// objectIDPercent arbitrarily maps given object ID onto a number 0.99
func objectIDPercent(obj object.ID) int {
h := fnv.New32a()
io.WriteString(h, obj.String()) //nolint:errcheck
return int(h.Sum32() % 100) //nolint:gomnd
}
func (u *Uploader) maybeIgnoreCachedEntry(ctx context.Context, ent fs.Entry) fs.Entry {
if h, ok := ent.(object.HasObjectID); ok {
if objectIDPercent(h.ObjectID()) < u.ForceHashPercentage {
log(ctx).Debugf("ignoring valid cached object: %v", h.ObjectID())
return nil
}
return ent
}
return nil
}
func (u *Uploader) processNonDirectories(ctx context.Context, output chan *snapshot.DirEntry, dirRelativePath string, entries fs.Entries, policyTree *policy.Tree, prevEntries []fs.Entries) error {
workerCount := u.ParallelUploads
if workerCount == 0 {
workerCount = runtime.NumCPU()
}
return u.foreachEntryUnlessCancelled(ctx, workerCount, dirRelativePath, entries, func(ctx context.Context, entry fs.Entry, entryRelativePath string) error {
// note this function runs in parallel and updates 'u.stats', which must be done using atomic operations.
if _, ok := entry.(fs.Directory); ok {
// skip directories
return nil
}
// See if we had this name during either of previous passes.
if cachedEntry := u.maybeIgnoreCachedEntry(ctx, findCachedEntry(ctx, entry, prevEntries)); cachedEntry != nil {
atomic.AddInt32(&u.stats.CachedFiles, 1)
u.Progress.CachedFile(filepath.Join(dirRelativePath, entry.Name()), entry.Size())
// compute entryResult now, cachedEntry is short-lived
cachedDirEntry, err := newDirEntry(entry, cachedEntry.(object.HasObjectID).ObjectID())
if err != nil {
return errors.Wrap(err, "unable to create dir entry")
}
output <- cachedDirEntry
return nil
}
switch entry := entry.(type) {
case fs.Symlink:
de, err := u.uploadSymlinkInternal(ctx, filepath.Join(dirRelativePath, entry.Name()), entry)
if err != nil {
return u.maybeIgnoreFileReadError(err, policyTree)
}
output <- de
return nil
case fs.File:
atomic.AddInt32(&u.stats.NonCachedFiles, 1)
de, err := u.uploadFileInternal(ctx, filepath.Join(dirRelativePath, entry.Name()), entry, policyTree.Child(entry.Name()).EffectivePolicy())
if err != nil {
return u.maybeIgnoreFileReadError(err, policyTree)
}
output <- de
return nil
default:
return errors.Errorf("file type not supported: %v", entry.Mode())
}
})
}
func maybeReadDirectoryEntries(ctx context.Context, dir fs.Directory) fs.Entries {
if dir == nil {
return nil
}
ent, err := dir.Readdir(ctx)
if err != nil {
log(ctx).Warningf("unable to read previous directory entries: %v", err)
return nil
}
return ent
}
func uniqueDirectories(dirs []fs.Directory) []fs.Directory {
if len(dirs) <= 1 {
return dirs
}
var unique = map[object.ID]fs.Directory{}
for _, dir := range dirs {
unique[dir.(object.HasObjectID).ObjectID()] = dir
}
if len(unique) == len(dirs) {
return dirs
}
var result []fs.Directory
for _, d := range unique {
result = append(result, d)
}
return result
}
// dirReadError distinguishes an error thrown when attempting to read a directory
type dirReadError struct {
error
}
func uploadDirInternal(
ctx context.Context,
u *Uploader,
directory fs.Directory,
policyTree *policy.Tree,
previousDirs []fs.Directory,
dirRelativePath string,
) (object.ID, fs.DirectorySummary, error) {
u.stats.TotalDirectoryCount++
u.Progress.StartedDirectory(dirRelativePath)
defer u.Progress.FinishedDirectory(dirRelativePath)
dirManifest := &snapshot.DirManifest{
StreamType: directoryStreamType,
Summary: &fs.DirectorySummary{
TotalDirCount: 1,
},
}
defer func() {
dirManifest.Summary.IncompleteReason = u.cancelReason()
}()
t0 := u.repo.Time()
entries, direrr := directory.Readdir(ctx)
log(ctx).Debugf("finished reading directory %v in %v", dirRelativePath, u.repo.Time().Sub(t0))
if direrr != nil {
return "", fs.DirectorySummary{}, dirReadError{direrr}
}
var prevEntries []fs.Entries
for _, d := range uniqueDirectories(previousDirs) {
if ent := maybeReadDirectoryEntries(ctx, d); ent != nil {
prevEntries = append(prevEntries, ent)
}
}
if err := u.processChildren(ctx, dirManifest, dirRelativePath, entries, policyTree, prevEntries); err != nil && err != errCancelled {
return "", fs.DirectorySummary{}, err
}
if len(dirManifest.Entries) == 0 {
dirManifest.Summary.MaxModTime = directory.ModTime()
}
// at this point dirManifest is ready to go
writer := u.repo.Objects.NewWriter(ctx, object.WriterOptions{
Description: "DIR:" + dirRelativePath,
Prefix: "k",
})
defer writer.Close() //nolint:errcheck
if err := json.NewEncoder(writer).Encode(dirManifest); err != nil {
return "", fs.DirectorySummary{}, errors.Wrap(err, "unable to encode directory JSON")
}
oid, err := writer.Result()
return oid, *dirManifest.Summary, err
}
func (u *Uploader) maybeIgnoreFileReadError(err error, policyTree *policy.Tree) error {
errHandlingPolicy := policyTree.EffectivePolicy().ErrorHandlingPolicy
if u.IgnoreReadErrors || errHandlingPolicy.IgnoreFileErrorsOrDefault(false) {
return nil
}
return err
}
func (u *Uploader) shouldIgnoreDirectoryReadErrors(policyTree *policy.Tree) bool {
errHandlingPolicy := policyTree.EffectivePolicy().ErrorHandlingPolicy
if u.IgnoreReadErrors {
return true
}
return errHandlingPolicy.IgnoreDirectoryErrorsOrDefault(false)
}
// NewUploader creates new Uploader object for a given repository.
func NewUploader(r *repo.Repository) *Uploader {
return &Uploader{
repo: r,
Progress: &NullUploadProgress{},
IgnoreReadErrors: false,
ParallelUploads: 1,
uploadBufPool: sync.Pool{
New: func() interface{} {
p := make([]byte, copyBufferSize)
return &p
},
},
}
}
// Cancel requests cancellation of an upload that's in progress. Will typically result in an incomplete snapshot.
func (u *Uploader) Cancel() {
atomic.StoreInt32(&u.canceled, 1)
}
func (u *Uploader) maybeOpenDirectoryFromManifest(ctx context.Context, man *snapshot.Manifest) fs.Directory {
if man == nil {
return nil
}
ent, err := EntryFromDirEntry(u.repo, man.RootEntry)
if err != nil {
log(ctx).Warningf("invalid previous manifest root entry %v: %v", man.RootEntry, err)
return nil
}
dir, ok := ent.(fs.Directory)
if !ok {
log(ctx).Debugf("previous manifest root is not a directory (was %T %+v)", ent, man.RootEntry)
return nil
}
return dir
}
// Upload uploads contents of the specified filesystem entry (file or directory) to the repository and returns snapshot.Manifest with statistics.
// Old snapshot manifest, when provided can be used to speed up uploads by utilizing hash cache.
func (u *Uploader) Upload(
ctx context.Context,
source fs.Entry,
policyTree *policy.Tree,
sourceInfo snapshot.SourceInfo,
previousManifests ...*snapshot.Manifest,
) (*snapshot.Manifest, error) {
log(ctx).Debugf("Uploading %v", sourceInfo)
s := &snapshot.Manifest{
Source: sourceInfo,
}
maxPreviousTotalFileSize := int64(0)
maxPreviousFileCount := 0
for _, m := range previousManifests {
if s := m.Stats.TotalFileSize; s > maxPreviousTotalFileSize {
maxPreviousTotalFileSize = s
}
if s := m.Stats.TotalFileCount; s > maxPreviousFileCount {
maxPreviousFileCount = s
}
}
u.Progress.UploadStarted(maxPreviousFileCount, maxPreviousTotalFileSize)
defer u.Progress.UploadFinished()
u.stats = snapshot.Stats{}
var err error
s.StartTime = u.repo.Time()
switch entry := source.(type) {
case fs.Directory:
var previousDirs []fs.Directory
for _, m := range previousManifests {
if d := u.maybeOpenDirectoryFromManifest(ctx, m); d != nil {
previousDirs = append(previousDirs, d)
}
}
entry = ignorefs.New(entry, policyTree, ignorefs.ReportIgnoredFiles(func(_ string, md fs.Entry) {
u.stats.AddExcluded(md)
}))
s.RootEntry, err = u.uploadDir(ctx, entry, policyTree, previousDirs)
case fs.File:
s.RootEntry, err = u.uploadFile(ctx, entry.Name(), entry, policyTree.EffectivePolicy())
default:
return nil, errors.Errorf("unsupported source: %v", s.Source)
}
if err != nil {
return nil, err
}
s.IncompleteReason = u.cancelReason()
s.EndTime = u.repo.Time()
s.Stats = u.stats
return s, nil
}
|
package nsqd
import (
"bytes"
"container/heap"
"errors"
"log"
"math"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/bitly/nsq/util"
"github.com/bitly/nsq/util/pqueue"
)
// the amount of time a worker will wait when idle
const defaultWorkerWait = 100 * time.Millisecond
type Consumer interface {
UnPause()
Pause()
Close() error
TimedOutMessage()
Stats() ClientStats
Empty()
}
// Channel represents the concrete type for a NSQ channel (and also
// implements the Queue interface)
//
// There can be multiple channels per topic, each with there own unique set
// of subscribers (clients).
//
// Channels maintain all client and message metadata, orchestrating in-flight
// messages, timeouts, requeuing, etc.
type Channel struct {
// 64bit atomic vars need to be first for proper alignment on 32bit platforms
requeueCount uint64
messageCount uint64
timeoutCount uint64
sync.RWMutex
topicName string
name string
context *context
backend BackendQueue
incomingMsgChan chan *Message
memoryMsgChan chan *Message
clientMsgChan chan *Message
exitChan chan int
waitGroup util.WaitGroupWrapper
exitFlag int32
// state tracking
clients map[int64]Consumer
paused int32
ephemeralChannel bool
deleteCallback func(*Channel)
deleter sync.Once
// Stats tracking
e2eProcessingLatencyStream *util.Quantile
// TODO: these can be DRYd up
deferredMessages map[MessageID]*pqueue.Item
deferredPQ pqueue.PriorityQueue
deferredMutex sync.Mutex
inFlightMessages map[MessageID]*Message
inFlightPQ inFlightPqueue
inFlightMutex sync.Mutex
// stat counters
bufferedCount int32
}
// NewChannel creates a new instance of the Channel type and returns a pointer
func NewChannel(topicName string, channelName string, context *context,
deleteCallback func(*Channel)) *Channel {
c := &Channel{
topicName: topicName,
name: channelName,
incomingMsgChan: make(chan *Message, 1),
memoryMsgChan: make(chan *Message, context.nsqd.options.MemQueueSize),
clientMsgChan: make(chan *Message),
exitChan: make(chan int),
clients: make(map[int64]Consumer),
deleteCallback: deleteCallback,
context: context,
}
if len(context.nsqd.options.E2EProcessingLatencyPercentiles) > 0 {
c.e2eProcessingLatencyStream = util.NewQuantile(
context.nsqd.options.E2EProcessingLatencyWindowTime,
context.nsqd.options.E2EProcessingLatencyPercentiles,
)
}
c.initPQ()
if strings.HasSuffix(channelName, "#ephemeral") {
c.ephemeralChannel = true
c.backend = newDummyBackendQueue()
} else {
// backend names, for uniqueness, automatically include the topic...
backendName := getBackendName(topicName, channelName)
c.backend = newDiskQueue(backendName,
context.nsqd.options.DataPath,
context.nsqd.options.MaxBytesPerFile,
context.nsqd.options.SyncEvery,
context.nsqd.options.SyncTimeout)
}
go c.messagePump()
c.waitGroup.Wrap(func() { c.router() })
c.waitGroup.Wrap(func() { c.deferredWorker() })
c.waitGroup.Wrap(func() { c.inFlightWorker() })
go c.context.nsqd.Notify(c)
return c
}
func (c *Channel) initPQ() {
pqSize := int(math.Max(1, float64(c.context.nsqd.options.MemQueueSize)/10))
c.inFlightMessages = make(map[MessageID]*Message)
c.deferredMessages = make(map[MessageID]*pqueue.Item)
c.inFlightMutex.Lock()
c.inFlightPQ = newInFlightPqueue(pqSize)
c.inFlightMutex.Unlock()
c.deferredMutex.Lock()
c.deferredPQ = pqueue.New(pqSize)
c.deferredMutex.Unlock()
}
// Exiting returns a boolean indicating if this channel is closed/exiting
func (c *Channel) Exiting() bool {
return atomic.LoadInt32(&c.exitFlag) == 1
}
// Delete empties the channel and closes
func (c *Channel) Delete() error {
return c.exit(true)
}
// Close cleanly closes the Channel
func (c *Channel) Close() error {
return c.exit(false)
}
func (c *Channel) exit(deleted bool) error {
if !atomic.CompareAndSwapInt32(&c.exitFlag, 0, 1) {
return errors.New("exiting")
}
if deleted {
log.Printf("CHANNEL(%s): deleting", c.name)
// since we are explicitly deleting a channel (not just at system exit time)
// de-register this from the lookupd
go c.context.nsqd.Notify(c)
} else {
log.Printf("CHANNEL(%s): closing", c.name)
}
// this forceably closes client connections
c.RLock()
for _, client := range c.clients {
client.Close()
}
c.RUnlock()
close(c.exitChan)
// handle race condition w/ things writing into incomingMsgChan
c.Lock()
close(c.incomingMsgChan)
c.Unlock()
// synchronize the close of router() and pqWorkers (2)
c.waitGroup.Wait()
if deleted {
// empty the queue (deletes the backend files, too)
c.Empty()
return c.backend.Delete()
}
// write anything leftover to disk
c.flush()
return c.backend.Close()
}
func (c *Channel) Empty() error {
c.Lock()
defer c.Unlock()
c.initPQ()
for _, client := range c.clients {
client.Empty()
}
clientMsgChan := c.clientMsgChan
for {
select {
case _, ok := <-clientMsgChan:
if !ok {
// c.clientMsgChan may be closed while in this loop
// so just remove it from the select so we can make progress
clientMsgChan = nil
}
case <-c.memoryMsgChan:
default:
goto finish
}
}
finish:
return c.backend.Empty()
}
// flush persists all the messages in internal memory buffers to the backend
// it does not drain inflight/deferred because it is only called in Close()
func (c *Channel) flush() error {
var msgBuf bytes.Buffer
// messagePump is responsible for closing the channel it writes to
// this will read until its closed (exited)
for msg := range c.clientMsgChan {
log.Printf("CHANNEL(%s): recovered buffered message from clientMsgChan", c.name)
writeMessageToBackend(&msgBuf, msg, c.backend)
}
if len(c.memoryMsgChan) > 0 || len(c.inFlightMessages) > 0 || len(c.deferredMessages) > 0 {
log.Printf("CHANNEL(%s): flushing %d memory %d in-flight %d deferred messages to backend",
c.name, len(c.memoryMsgChan), len(c.inFlightMessages), len(c.deferredMessages))
}
for {
select {
case msg := <-c.memoryMsgChan:
err := writeMessageToBackend(&msgBuf, msg, c.backend)
if err != nil {
log.Printf("ERROR: failed to write message to backend - %s", err.Error())
}
default:
goto finish
}
}
finish:
for _, msg := range c.inFlightMessages {
err := writeMessageToBackend(&msgBuf, msg, c.backend)
if err != nil {
log.Printf("ERROR: failed to write message to backend - %s", err.Error())
}
}
for _, item := range c.deferredMessages {
msg := item.Value.(*Message)
err := writeMessageToBackend(&msgBuf, msg, c.backend)
if err != nil {
log.Printf("ERROR: failed to write message to backend - %s", err.Error())
}
}
return nil
}
func (c *Channel) Depth() int64 {
return int64(len(c.memoryMsgChan)) + c.backend.Depth() + int64(atomic.LoadInt32(&c.bufferedCount))
}
func (c *Channel) Pause() error {
return c.doPause(true)
}
func (c *Channel) UnPause() error {
return c.doPause(false)
}
func (c *Channel) doPause(pause bool) error {
if pause {
atomic.StoreInt32(&c.paused, 1)
} else {
atomic.StoreInt32(&c.paused, 0)
}
c.RLock()
for _, client := range c.clients {
if pause {
client.Pause()
} else {
client.UnPause()
}
}
c.RUnlock()
c.context.nsqd.Lock()
defer c.context.nsqd.Unlock()
// pro-actively persist metadata so in case of process failure
// nsqd won't suddenly (un)pause a channel
return c.context.nsqd.PersistMetadata()
}
func (c *Channel) IsPaused() bool {
return atomic.LoadInt32(&c.paused) == 1
}
// PutMessage writes to the appropriate incoming message channel
// (which will be routed asynchronously)
func (c *Channel) PutMessage(msg *Message) error {
c.RLock()
defer c.RUnlock()
if atomic.LoadInt32(&c.exitFlag) == 1 {
return errors.New("exiting")
}
c.incomingMsgChan <- msg
atomic.AddUint64(&c.messageCount, 1)
return nil
}
// TouchMessage resets the timeout for an in-flight message
func (c *Channel) TouchMessage(clientID int64, id MessageID) error {
msg, err := c.popInFlightMessage(clientID, id)
if err != nil {
return err
}
c.removeFromInFlightPQ(msg)
currentTimeout := time.Unix(0, msg.pri)
newTimeout := currentTimeout.Add(c.context.nsqd.options.MsgTimeout)
if newTimeout.Add(c.context.nsqd.options.MsgTimeout).Sub(msg.deliveryTS) >=
c.context.nsqd.options.MaxMsgTimeout {
// we would have gone over, set to the max
newTimeout = msg.deliveryTS.Add(c.context.nsqd.options.MaxMsgTimeout)
}
msg.pri = newTimeout.UnixNano()
err = c.pushInFlightMessage(msg)
if err != nil {
return err
}
c.addToInFlightPQ(msg)
return nil
}
// FinishMessage successfully discards an in-flight message
func (c *Channel) FinishMessage(clientID int64, id MessageID) error {
msg, err := c.popInFlightMessage(clientID, id)
if err != nil {
return err
}
c.removeFromInFlightPQ(msg)
if c.e2eProcessingLatencyStream != nil {
c.e2eProcessingLatencyStream.Insert(msg.Timestamp)
}
return nil
}
// RequeueMessage requeues a message based on `time.Duration`, ie:
//
// `timeoutMs` == 0 - requeue a message immediately
// `timeoutMs` > 0 - asynchronously wait for the specified timeout
// and requeue a message (aka "deferred requeue")
//
func (c *Channel) RequeueMessage(clientID int64, id MessageID, timeout time.Duration) error {
// remove from inflight first
msg, err := c.popInFlightMessage(clientID, id)
if err != nil {
return err
}
c.removeFromInFlightPQ(msg)
if timeout == 0 {
return c.doRequeue(msg)
}
// deferred requeue
return c.StartDeferredTimeout(msg, timeout)
}
// AddClient adds a client to the Channel's client list
func (c *Channel) AddClient(clientID int64, client Consumer) {
c.Lock()
defer c.Unlock()
_, ok := c.clients[clientID]
if ok {
return
}
c.clients[clientID] = client
}
// RemoveClient removes a client from the Channel's client list
func (c *Channel) RemoveClient(clientID int64) {
c.Lock()
defer c.Unlock()
_, ok := c.clients[clientID]
if !ok {
return
}
delete(c.clients, clientID)
if len(c.clients) == 0 && c.ephemeralChannel == true {
go c.deleter.Do(func() { c.deleteCallback(c) })
}
}
func (c *Channel) StartInFlightTimeout(msg *Message, clientID int64, timeout time.Duration) error {
now := time.Now()
msg.clientID = clientID
msg.deliveryTS = now
msg.pri = now.Add(timeout).UnixNano()
err := c.pushInFlightMessage(msg)
if err != nil {
return err
}
c.addToInFlightPQ(msg)
return nil
}
func (c *Channel) StartDeferredTimeout(msg *Message, timeout time.Duration) error {
absTs := time.Now().Add(timeout).UnixNano()
item := &pqueue.Item{Value: msg, Priority: absTs}
err := c.pushDeferredMessage(item)
if err != nil {
return err
}
c.addToDeferredPQ(item)
return nil
}
// doRequeue performs the low level operations to requeue a message
func (c *Channel) doRequeue(msg *Message) error {
c.RLock()
defer c.RUnlock()
if atomic.LoadInt32(&c.exitFlag) == 1 {
return errors.New("exiting")
}
c.incomingMsgChan <- msg
atomic.AddUint64(&c.requeueCount, 1)
return nil
}
// pushInFlightMessage atomically adds a message to the in-flight dictionary
func (c *Channel) pushInFlightMessage(msg *Message) error {
c.Lock()
_, ok := c.inFlightMessages[msg.ID]
if ok {
c.Unlock()
return errors.New("ID already in flight")
}
c.inFlightMessages[msg.ID] = msg
c.Unlock()
return nil
}
// popInFlightMessage atomically removes a message from the in-flight dictionary
func (c *Channel) popInFlightMessage(clientID int64, id MessageID) (*Message, error) {
c.Lock()
msg, ok := c.inFlightMessages[id]
if !ok {
c.Unlock()
return nil, errors.New("ID not in flight")
}
if msg.clientID != clientID {
c.Unlock()
return nil, errors.New("client does not own message")
}
delete(c.inFlightMessages, id)
c.Unlock()
return msg, nil
}
func (c *Channel) addToInFlightPQ(msg *Message) {
c.inFlightMutex.Lock()
c.inFlightPQ.Push(msg)
c.inFlightMutex.Unlock()
}
func (c *Channel) removeFromInFlightPQ(msg *Message) {
c.inFlightMutex.Lock()
if msg.index == -1 {
// this item has already been popped off the pqueue
c.inFlightMutex.Unlock()
return
}
c.inFlightPQ.Remove(msg.index)
c.inFlightMutex.Unlock()
}
func (c *Channel) pushDeferredMessage(item *pqueue.Item) error {
c.Lock()
defer c.Unlock()
// TODO: these map lookups are costly
id := item.Value.(*Message).ID
_, ok := c.deferredMessages[id]
if ok {
return errors.New("ID already deferred")
}
c.deferredMessages[id] = item
return nil
}
func (c *Channel) popDeferredMessage(id MessageID) (*pqueue.Item, error) {
c.Lock()
defer c.Unlock()
// TODO: these map lookups are costly
item, ok := c.deferredMessages[id]
if !ok {
return nil, errors.New("ID not deferred")
}
delete(c.deferredMessages, id)
return item, nil
}
func (c *Channel) addToDeferredPQ(item *pqueue.Item) {
c.deferredMutex.Lock()
defer c.deferredMutex.Unlock()
heap.Push(&c.deferredPQ, item)
}
// Router handles the muxing of incoming Channel messages, either writing
// to the in-memory channel or to the backend
func (c *Channel) router() {
var msgBuf bytes.Buffer
for msg := range c.incomingMsgChan {
select {
case c.memoryMsgChan <- msg:
default:
err := writeMessageToBackend(&msgBuf, msg, c.backend)
if err != nil {
log.Printf("CHANNEL(%s) ERROR: failed to write message to backend - %s", c.name, err.Error())
// theres not really much we can do at this point, you're certainly
// going to lose messages...
}
}
}
log.Printf("CHANNEL(%s): closing ... router", c.name)
}
// messagePump reads messages from either memory or backend and writes
// to the client output go channel
//
// it is also performs in-flight accounting and initiates the auto-requeue
// goroutine
func (c *Channel) messagePump() {
var msg *Message
var buf []byte
var err error
for {
// do an extra check for closed exit before we select on all the memory/backend/exitChan
// this solves the case where we are closed and something else is draining clientMsgChan into
// backend. we don't want to reverse that
if atomic.LoadInt32(&c.exitFlag) == 1 {
goto exit
}
select {
case msg = <-c.memoryMsgChan:
case buf = <-c.backend.ReadChan():
msg, err = decodeMessage(buf)
if err != nil {
log.Printf("ERROR: failed to decode message - %s", err.Error())
continue
}
case <-c.exitChan:
goto exit
}
msg.Attempts++
atomic.StoreInt32(&c.bufferedCount, 1)
c.clientMsgChan <- msg
atomic.StoreInt32(&c.bufferedCount, 0)
// the client will call back to mark as in-flight w/ it's info
}
exit:
log.Printf("CHANNEL(%s): closing ... messagePump", c.name)
close(c.clientMsgChan)
}
func (c *Channel) deferredWorker() {
c.pqWorker(&c.deferredPQ, &c.deferredMutex, func(item *pqueue.Item) {
msg := item.Value.(*Message)
_, err := c.popDeferredMessage(msg.ID)
if err != nil {
return
}
c.doRequeue(msg)
})
}
func (c *Channel) inFlightWorker() {
ticker := time.NewTicker(defaultWorkerWait)
for {
select {
case <-ticker.C:
case <-c.exitChan:
goto exit
}
now := time.Now().UnixNano()
for {
c.inFlightMutex.Lock()
msg, _ := c.inFlightPQ.PeekAndShift(now)
c.inFlightMutex.Unlock()
if msg == nil {
break
}
_, err := c.popInFlightMessage(msg.clientID, msg.ID)
if err != nil {
break
}
atomic.AddUint64(&c.timeoutCount, 1)
client, ok := c.clients[msg.clientID]
if ok {
client.TimedOutMessage()
}
c.doRequeue(msg)
}
}
exit:
log.Printf("CHANNEL(%s): closing ... inFlightWorker", c.name)
ticker.Stop()
}
// generic loop (executed in a goroutine) that periodically wakes up to walk
// the priority queue and call the callback
func (c *Channel) pqWorker(pq *pqueue.PriorityQueue, mutex *sync.Mutex, callback func(item *pqueue.Item)) {
ticker := time.NewTicker(defaultWorkerWait)
for {
select {
case <-ticker.C:
case <-c.exitChan:
goto exit
}
now := time.Now().UnixNano()
for {
mutex.Lock()
item, _ := pq.PeekAndShift(now)
mutex.Unlock()
if item == nil {
break
}
callback(item)
}
}
exit:
log.Printf("CHANNEL(%s): closing ... pqueue worker", c.name)
ticker.Stop()
}
nsqd: fix missing lock around client map
package nsqd
import (
"bytes"
"container/heap"
"errors"
"log"
"math"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/bitly/nsq/util"
"github.com/bitly/nsq/util/pqueue"
)
// the amount of time a worker will wait when idle
const defaultWorkerWait = 100 * time.Millisecond
type Consumer interface {
UnPause()
Pause()
Close() error
TimedOutMessage()
Stats() ClientStats
Empty()
}
// Channel represents the concrete type for a NSQ channel (and also
// implements the Queue interface)
//
// There can be multiple channels per topic, each with there own unique set
// of subscribers (clients).
//
// Channels maintain all client and message metadata, orchestrating in-flight
// messages, timeouts, requeuing, etc.
type Channel struct {
// 64bit atomic vars need to be first for proper alignment on 32bit platforms
requeueCount uint64
messageCount uint64
timeoutCount uint64
sync.RWMutex
topicName string
name string
context *context
backend BackendQueue
incomingMsgChan chan *Message
memoryMsgChan chan *Message
clientMsgChan chan *Message
exitChan chan int
waitGroup util.WaitGroupWrapper
exitFlag int32
// state tracking
clients map[int64]Consumer
paused int32
ephemeralChannel bool
deleteCallback func(*Channel)
deleter sync.Once
// Stats tracking
e2eProcessingLatencyStream *util.Quantile
// TODO: these can be DRYd up
deferredMessages map[MessageID]*pqueue.Item
deferredPQ pqueue.PriorityQueue
deferredMutex sync.Mutex
inFlightMessages map[MessageID]*Message
inFlightPQ inFlightPqueue
inFlightMutex sync.Mutex
// stat counters
bufferedCount int32
}
// NewChannel creates a new instance of the Channel type and returns a pointer
func NewChannel(topicName string, channelName string, context *context,
deleteCallback func(*Channel)) *Channel {
c := &Channel{
topicName: topicName,
name: channelName,
incomingMsgChan: make(chan *Message, 1),
memoryMsgChan: make(chan *Message, context.nsqd.options.MemQueueSize),
clientMsgChan: make(chan *Message),
exitChan: make(chan int),
clients: make(map[int64]Consumer),
deleteCallback: deleteCallback,
context: context,
}
if len(context.nsqd.options.E2EProcessingLatencyPercentiles) > 0 {
c.e2eProcessingLatencyStream = util.NewQuantile(
context.nsqd.options.E2EProcessingLatencyWindowTime,
context.nsqd.options.E2EProcessingLatencyPercentiles,
)
}
c.initPQ()
if strings.HasSuffix(channelName, "#ephemeral") {
c.ephemeralChannel = true
c.backend = newDummyBackendQueue()
} else {
// backend names, for uniqueness, automatically include the topic...
backendName := getBackendName(topicName, channelName)
c.backend = newDiskQueue(backendName,
context.nsqd.options.DataPath,
context.nsqd.options.MaxBytesPerFile,
context.nsqd.options.SyncEvery,
context.nsqd.options.SyncTimeout)
}
go c.messagePump()
c.waitGroup.Wrap(func() { c.router() })
c.waitGroup.Wrap(func() { c.deferredWorker() })
c.waitGroup.Wrap(func() { c.inFlightWorker() })
go c.context.nsqd.Notify(c)
return c
}
func (c *Channel) initPQ() {
pqSize := int(math.Max(1, float64(c.context.nsqd.options.MemQueueSize)/10))
c.inFlightMessages = make(map[MessageID]*Message)
c.deferredMessages = make(map[MessageID]*pqueue.Item)
c.inFlightMutex.Lock()
c.inFlightPQ = newInFlightPqueue(pqSize)
c.inFlightMutex.Unlock()
c.deferredMutex.Lock()
c.deferredPQ = pqueue.New(pqSize)
c.deferredMutex.Unlock()
}
// Exiting returns a boolean indicating if this channel is closed/exiting
func (c *Channel) Exiting() bool {
return atomic.LoadInt32(&c.exitFlag) == 1
}
// Delete empties the channel and closes
func (c *Channel) Delete() error {
return c.exit(true)
}
// Close cleanly closes the Channel
func (c *Channel) Close() error {
return c.exit(false)
}
func (c *Channel) exit(deleted bool) error {
if !atomic.CompareAndSwapInt32(&c.exitFlag, 0, 1) {
return errors.New("exiting")
}
if deleted {
log.Printf("CHANNEL(%s): deleting", c.name)
// since we are explicitly deleting a channel (not just at system exit time)
// de-register this from the lookupd
go c.context.nsqd.Notify(c)
} else {
log.Printf("CHANNEL(%s): closing", c.name)
}
// this forceably closes client connections
c.RLock()
for _, client := range c.clients {
client.Close()
}
c.RUnlock()
close(c.exitChan)
// handle race condition w/ things writing into incomingMsgChan
c.Lock()
close(c.incomingMsgChan)
c.Unlock()
// synchronize the close of router() and pqWorkers (2)
c.waitGroup.Wait()
if deleted {
// empty the queue (deletes the backend files, too)
c.Empty()
return c.backend.Delete()
}
// write anything leftover to disk
c.flush()
return c.backend.Close()
}
func (c *Channel) Empty() error {
c.Lock()
defer c.Unlock()
c.initPQ()
for _, client := range c.clients {
client.Empty()
}
clientMsgChan := c.clientMsgChan
for {
select {
case _, ok := <-clientMsgChan:
if !ok {
// c.clientMsgChan may be closed while in this loop
// so just remove it from the select so we can make progress
clientMsgChan = nil
}
case <-c.memoryMsgChan:
default:
goto finish
}
}
finish:
return c.backend.Empty()
}
// flush persists all the messages in internal memory buffers to the backend
// it does not drain inflight/deferred because it is only called in Close()
func (c *Channel) flush() error {
var msgBuf bytes.Buffer
// messagePump is responsible for closing the channel it writes to
// this will read until its closed (exited)
for msg := range c.clientMsgChan {
log.Printf("CHANNEL(%s): recovered buffered message from clientMsgChan", c.name)
writeMessageToBackend(&msgBuf, msg, c.backend)
}
if len(c.memoryMsgChan) > 0 || len(c.inFlightMessages) > 0 || len(c.deferredMessages) > 0 {
log.Printf("CHANNEL(%s): flushing %d memory %d in-flight %d deferred messages to backend",
c.name, len(c.memoryMsgChan), len(c.inFlightMessages), len(c.deferredMessages))
}
for {
select {
case msg := <-c.memoryMsgChan:
err := writeMessageToBackend(&msgBuf, msg, c.backend)
if err != nil {
log.Printf("ERROR: failed to write message to backend - %s", err.Error())
}
default:
goto finish
}
}
finish:
for _, msg := range c.inFlightMessages {
err := writeMessageToBackend(&msgBuf, msg, c.backend)
if err != nil {
log.Printf("ERROR: failed to write message to backend - %s", err.Error())
}
}
for _, item := range c.deferredMessages {
msg := item.Value.(*Message)
err := writeMessageToBackend(&msgBuf, msg, c.backend)
if err != nil {
log.Printf("ERROR: failed to write message to backend - %s", err.Error())
}
}
return nil
}
func (c *Channel) Depth() int64 {
return int64(len(c.memoryMsgChan)) + c.backend.Depth() + int64(atomic.LoadInt32(&c.bufferedCount))
}
func (c *Channel) Pause() error {
return c.doPause(true)
}
func (c *Channel) UnPause() error {
return c.doPause(false)
}
func (c *Channel) doPause(pause bool) error {
if pause {
atomic.StoreInt32(&c.paused, 1)
} else {
atomic.StoreInt32(&c.paused, 0)
}
c.RLock()
for _, client := range c.clients {
if pause {
client.Pause()
} else {
client.UnPause()
}
}
c.RUnlock()
c.context.nsqd.Lock()
defer c.context.nsqd.Unlock()
// pro-actively persist metadata so in case of process failure
// nsqd won't suddenly (un)pause a channel
return c.context.nsqd.PersistMetadata()
}
func (c *Channel) IsPaused() bool {
return atomic.LoadInt32(&c.paused) == 1
}
// PutMessage writes to the appropriate incoming message channel
// (which will be routed asynchronously)
func (c *Channel) PutMessage(msg *Message) error {
c.RLock()
defer c.RUnlock()
if atomic.LoadInt32(&c.exitFlag) == 1 {
return errors.New("exiting")
}
c.incomingMsgChan <- msg
atomic.AddUint64(&c.messageCount, 1)
return nil
}
// TouchMessage resets the timeout for an in-flight message
func (c *Channel) TouchMessage(clientID int64, id MessageID) error {
msg, err := c.popInFlightMessage(clientID, id)
if err != nil {
return err
}
c.removeFromInFlightPQ(msg)
currentTimeout := time.Unix(0, msg.pri)
newTimeout := currentTimeout.Add(c.context.nsqd.options.MsgTimeout)
if newTimeout.Add(c.context.nsqd.options.MsgTimeout).Sub(msg.deliveryTS) >=
c.context.nsqd.options.MaxMsgTimeout {
// we would have gone over, set to the max
newTimeout = msg.deliveryTS.Add(c.context.nsqd.options.MaxMsgTimeout)
}
msg.pri = newTimeout.UnixNano()
err = c.pushInFlightMessage(msg)
if err != nil {
return err
}
c.addToInFlightPQ(msg)
return nil
}
// FinishMessage successfully discards an in-flight message
func (c *Channel) FinishMessage(clientID int64, id MessageID) error {
msg, err := c.popInFlightMessage(clientID, id)
if err != nil {
return err
}
c.removeFromInFlightPQ(msg)
if c.e2eProcessingLatencyStream != nil {
c.e2eProcessingLatencyStream.Insert(msg.Timestamp)
}
return nil
}
// RequeueMessage requeues a message based on `time.Duration`, ie:
//
// `timeoutMs` == 0 - requeue a message immediately
// `timeoutMs` > 0 - asynchronously wait for the specified timeout
// and requeue a message (aka "deferred requeue")
//
func (c *Channel) RequeueMessage(clientID int64, id MessageID, timeout time.Duration) error {
// remove from inflight first
msg, err := c.popInFlightMessage(clientID, id)
if err != nil {
return err
}
c.removeFromInFlightPQ(msg)
if timeout == 0 {
return c.doRequeue(msg)
}
// deferred requeue
return c.StartDeferredTimeout(msg, timeout)
}
// AddClient adds a client to the Channel's client list
func (c *Channel) AddClient(clientID int64, client Consumer) {
c.Lock()
defer c.Unlock()
_, ok := c.clients[clientID]
if ok {
return
}
c.clients[clientID] = client
}
// RemoveClient removes a client from the Channel's client list
func (c *Channel) RemoveClient(clientID int64) {
c.Lock()
defer c.Unlock()
_, ok := c.clients[clientID]
if !ok {
return
}
delete(c.clients, clientID)
if len(c.clients) == 0 && c.ephemeralChannel == true {
go c.deleter.Do(func() { c.deleteCallback(c) })
}
}
func (c *Channel) StartInFlightTimeout(msg *Message, clientID int64, timeout time.Duration) error {
now := time.Now()
msg.clientID = clientID
msg.deliveryTS = now
msg.pri = now.Add(timeout).UnixNano()
err := c.pushInFlightMessage(msg)
if err != nil {
return err
}
c.addToInFlightPQ(msg)
return nil
}
func (c *Channel) StartDeferredTimeout(msg *Message, timeout time.Duration) error {
absTs := time.Now().Add(timeout).UnixNano()
item := &pqueue.Item{Value: msg, Priority: absTs}
err := c.pushDeferredMessage(item)
if err != nil {
return err
}
c.addToDeferredPQ(item)
return nil
}
// doRequeue performs the low level operations to requeue a message
func (c *Channel) doRequeue(msg *Message) error {
c.RLock()
defer c.RUnlock()
if atomic.LoadInt32(&c.exitFlag) == 1 {
return errors.New("exiting")
}
c.incomingMsgChan <- msg
atomic.AddUint64(&c.requeueCount, 1)
return nil
}
// pushInFlightMessage atomically adds a message to the in-flight dictionary
func (c *Channel) pushInFlightMessage(msg *Message) error {
c.Lock()
_, ok := c.inFlightMessages[msg.ID]
if ok {
c.Unlock()
return errors.New("ID already in flight")
}
c.inFlightMessages[msg.ID] = msg
c.Unlock()
return nil
}
// popInFlightMessage atomically removes a message from the in-flight dictionary
func (c *Channel) popInFlightMessage(clientID int64, id MessageID) (*Message, error) {
c.Lock()
msg, ok := c.inFlightMessages[id]
if !ok {
c.Unlock()
return nil, errors.New("ID not in flight")
}
if msg.clientID != clientID {
c.Unlock()
return nil, errors.New("client does not own message")
}
delete(c.inFlightMessages, id)
c.Unlock()
return msg, nil
}
func (c *Channel) addToInFlightPQ(msg *Message) {
c.inFlightMutex.Lock()
c.inFlightPQ.Push(msg)
c.inFlightMutex.Unlock()
}
func (c *Channel) removeFromInFlightPQ(msg *Message) {
c.inFlightMutex.Lock()
if msg.index == -1 {
// this item has already been popped off the pqueue
c.inFlightMutex.Unlock()
return
}
c.inFlightPQ.Remove(msg.index)
c.inFlightMutex.Unlock()
}
func (c *Channel) pushDeferredMessage(item *pqueue.Item) error {
c.Lock()
defer c.Unlock()
// TODO: these map lookups are costly
id := item.Value.(*Message).ID
_, ok := c.deferredMessages[id]
if ok {
return errors.New("ID already deferred")
}
c.deferredMessages[id] = item
return nil
}
func (c *Channel) popDeferredMessage(id MessageID) (*pqueue.Item, error) {
c.Lock()
defer c.Unlock()
// TODO: these map lookups are costly
item, ok := c.deferredMessages[id]
if !ok {
return nil, errors.New("ID not deferred")
}
delete(c.deferredMessages, id)
return item, nil
}
func (c *Channel) addToDeferredPQ(item *pqueue.Item) {
c.deferredMutex.Lock()
defer c.deferredMutex.Unlock()
heap.Push(&c.deferredPQ, item)
}
// Router handles the muxing of incoming Channel messages, either writing
// to the in-memory channel or to the backend
func (c *Channel) router() {
var msgBuf bytes.Buffer
for msg := range c.incomingMsgChan {
select {
case c.memoryMsgChan <- msg:
default:
err := writeMessageToBackend(&msgBuf, msg, c.backend)
if err != nil {
log.Printf("CHANNEL(%s) ERROR: failed to write message to backend - %s", c.name, err.Error())
// theres not really much we can do at this point, you're certainly
// going to lose messages...
}
}
}
log.Printf("CHANNEL(%s): closing ... router", c.name)
}
// messagePump reads messages from either memory or backend and writes
// to the client output go channel
//
// it is also performs in-flight accounting and initiates the auto-requeue
// goroutine
func (c *Channel) messagePump() {
var msg *Message
var buf []byte
var err error
for {
// do an extra check for closed exit before we select on all the memory/backend/exitChan
// this solves the case where we are closed and something else is draining clientMsgChan into
// backend. we don't want to reverse that
if atomic.LoadInt32(&c.exitFlag) == 1 {
goto exit
}
select {
case msg = <-c.memoryMsgChan:
case buf = <-c.backend.ReadChan():
msg, err = decodeMessage(buf)
if err != nil {
log.Printf("ERROR: failed to decode message - %s", err.Error())
continue
}
case <-c.exitChan:
goto exit
}
msg.Attempts++
atomic.StoreInt32(&c.bufferedCount, 1)
c.clientMsgChan <- msg
atomic.StoreInt32(&c.bufferedCount, 0)
// the client will call back to mark as in-flight w/ it's info
}
exit:
log.Printf("CHANNEL(%s): closing ... messagePump", c.name)
close(c.clientMsgChan)
}
func (c *Channel) deferredWorker() {
c.pqWorker(&c.deferredPQ, &c.deferredMutex, func(item *pqueue.Item) {
msg := item.Value.(*Message)
_, err := c.popDeferredMessage(msg.ID)
if err != nil {
return
}
c.doRequeue(msg)
})
}
func (c *Channel) inFlightWorker() {
ticker := time.NewTicker(defaultWorkerWait)
for {
select {
case <-ticker.C:
case <-c.exitChan:
goto exit
}
now := time.Now().UnixNano()
for {
c.inFlightMutex.Lock()
msg, _ := c.inFlightPQ.PeekAndShift(now)
c.inFlightMutex.Unlock()
if msg == nil {
break
}
_, err := c.popInFlightMessage(msg.clientID, msg.ID)
if err != nil {
break
}
atomic.AddUint64(&c.timeoutCount, 1)
c.RLock()
client, ok := c.clients[msg.clientID]
c.RUnlock()
if ok {
client.TimedOutMessage()
}
c.doRequeue(msg)
}
}
exit:
log.Printf("CHANNEL(%s): closing ... inFlightWorker", c.name)
ticker.Stop()
}
// generic loop (executed in a goroutine) that periodically wakes up to walk
// the priority queue and call the callback
func (c *Channel) pqWorker(pq *pqueue.PriorityQueue, mutex *sync.Mutex, callback func(item *pqueue.Item)) {
ticker := time.NewTicker(defaultWorkerWait)
for {
select {
case <-ticker.C:
case <-c.exitChan:
goto exit
}
now := time.Now().UnixNano()
for {
mutex.Lock()
item, _ := pq.PeekAndShift(now)
mutex.Unlock()
if item == nil {
break
}
callback(item)
}
}
exit:
log.Printf("CHANNEL(%s): closing ... pqueue worker", c.name)
ticker.Stop()
}
|
// Package filters 为过滤器定义.
package filters
import (
"github.com/astaxie/beego"
"github.com/astaxie/beego/context"
)
func init() {
Authorize := func(ctx *context.Context) {
_, ok := ctx.Input.Session("uid").(int)
if !ok {
ctx.Redirect(302, "/login")
}
}
beego.InsertFilter("/member/*",beego.BeforeRouter,Authorize);
beego.InsertFilter("/member",beego.BeforeRouter,Authorize);
beego.InsertFilter("/",beego.BeforeRouter,Authorize);
beego.InsertFilter("/server",beego.BeforeRouter,Authorize)
beego.InsertFilter("/server/*",beego.BeforeRouter,Authorize)
beego.InsertFilter("/hook",beego.BeforeRouter,Authorize)
beego.InsertFilter("/hook/*",beego.BeforeRouter,Authorize)
}
修复个人中心不登录返回错误的BUG
// Package filters 为过滤器定义.
package filters
import (
"github.com/astaxie/beego"
"github.com/astaxie/beego/context"
)
func init() {
Authorize := func(ctx *context.Context) {
_, ok := ctx.Input.Session("uid").(int)
if !ok {
ctx.Redirect(302, "/login")
}
}
beego.InsertFilter("/member/*",beego.BeforeRouter,Authorize);
beego.InsertFilter("/member",beego.BeforeRouter,Authorize);
beego.InsertFilter("/",beego.BeforeRouter,Authorize);
beego.InsertFilter("/server",beego.BeforeRouter,Authorize)
beego.InsertFilter("/server/*",beego.BeforeRouter,Authorize)
beego.InsertFilter("/hook",beego.BeforeRouter,Authorize)
beego.InsertFilter("/hook/*",beego.BeforeRouter,Authorize)
beego.InsertFilter("/my",beego.BeforeRouter,Authorize)
}
|
package host
import (
"sync/atomic"
"testing"
"time"
"github.com/NebulousLabs/Sia/modules"
)
// blockingPortForward is a dependency set that causes the host port forward
// call at startup to block for 10 seconds, simulating the amount of blocking
// that can occur in production.
//
// blockingPortForward will also cause managedClearPort to always return an
// error.
type blockingPortForward struct {
productionDependencies
}
// disrupt will cause the port forward call to block for 10 seconds, but still
// complete normally. disrupt will also cause managedClearPort to return an
// error.
func (blockingPortForward) disrupt(s string) bool {
// Return an error when clearing the port.
if s == "managedClearPort return error" {
return true
}
// Block during port forwarding.
if s == "managedForwardPort" {
time.Sleep(time.Second * 3)
}
return false
}
// TestPortFowardBlocking checks that the host does not accidentally call a
// write on a closed logger due to a long-running port forward call.
func TestPortForwardBlocking(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
t.Parallel()
ht, err := newMockHostTester(blockingPortForward{}, "TestPortForwardBlocking")
if err != nil {
t.Fatal(err)
}
// The close operation would previously fail here because of improper
// thread control regarding upnp and shutdown.
err = ht.Close()
if err != nil {
t.Fatal(err)
}
// The trailing sleep is needed to catch the previously existing error
// where the host was not shutting down correctly. Currently, the extra
// sleep does nothing, but in the regression a logging panic would occur.
time.Sleep(time.Second * 4)
}
// TestHostWorkingStatus checks that the host properly updates its working
// state
func TestHostWorkingStatus(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
t.Parallel()
ht, err := newHostTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer ht.Close()
if ht.host.WorkingStatus() != modules.HostWorkingStatusChecking {
t.Fatal("expected working state to initially be modules.HostWorkingStatusChecking")
}
for i := 0; i < 5; i++ {
// Simulate some setting calls, and see if the host picks up on it.
atomic.AddUint64(&ht.host.atomicSettingsCalls, workingStatusThreshold+1)
success := false
for start := time.Now(); time.Since(start) < 30*time.Second; time.Sleep(time.Millisecond * 10) {
if ht.host.WorkingStatus() == modules.HostWorkingStatusWorking {
success = true
break
}
}
if !success {
t.Fatal("expected working state to flip to HostWorkingStatusWorking after incrementing settings calls")
}
// make no settins calls, host should flip back to NotWorking
success = false
for start := time.Now(); time.Since(start) < 30*time.Second; time.Sleep(time.Millisecond * 10) {
if ht.host.WorkingStatus() == modules.HostWorkingStatusNotWorking {
success = true
break
}
}
if !success {
t.Fatal("expected working state to flip to HostStatusNotWorking if no settings calls occur")
}
}
}
// TestHostConnectabilityStatus checks that the host properly updates its connectable
// state
func TestHostConnectabilityStatus(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
t.Parallel()
ht, err := newHostTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer ht.Close()
if ht.host.ConnectabilityStatus() != modules.HostConnectabilityStatusChecking {
t.Fatal("expected connectability state to initially be ConnectablityStateChecking")
}
success := false
for start := time.Now(); time.Since(start) < 30*time.Second; time.Sleep(time.Millisecond * 10) {
if ht.host.ConnectabilityStatus() == modules.HostConnectabilityStatusConnectable {
success = true
break
}
}
if !success {
t.Fatal("expected connectability state to flip to HostConnectabilityStatusConnectable")
}
}
comment and fix host test ndfs
package host
import (
"sync/atomic"
"testing"
"time"
"github.com/NebulousLabs/Sia/modules"
)
// blockingPortForward is a dependency set that causes the host port forward
// call at startup to block for 10 seconds, simulating the amount of blocking
// that can occur in production.
//
// blockingPortForward will also cause managedClearPort to always return an
// error.
type blockingPortForward struct {
productionDependencies
}
// disrupt will cause the port forward call to block for 10 seconds, but still
// complete normally. disrupt will also cause managedClearPort to return an
// error.
func (blockingPortForward) disrupt(s string) bool {
// Return an error when clearing the port.
if s == "managedClearPort return error" {
return true
}
// Block during port forwarding.
if s == "managedForwardPort" {
time.Sleep(time.Second * 3)
}
return false
}
// TestPortFowardBlocking checks that the host does not accidentally call a
// write on a closed logger due to a long-running port forward call.
func TestPortForwardBlocking(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
t.Parallel()
ht, err := newMockHostTester(blockingPortForward{}, "TestPortForwardBlocking")
if err != nil {
t.Fatal(err)
}
// The close operation would previously fail here because of improper
// thread control regarding upnp and shutdown.
err = ht.Close()
if err != nil {
t.Fatal(err)
}
// The trailing sleep is needed to catch the previously existing error
// where the host was not shutting down correctly. Currently, the extra
// sleep does nothing, but in the regression a logging panic would occur.
time.Sleep(time.Second * 4)
}
// TestHostWorkingStatus checks that the host properly updates its working
// state
func TestHostWorkingStatus(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
t.Parallel()
ht, err := newHostTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer ht.Close()
// this causes an ndf, because it relies on the host tester starting up and
// fully returning faster than the first check, which isnt always the case.
// Disabled for now.
// if ht.host.WorkingStatus() != modules.HostWorkingStatusChecking {
// t.Fatal("expected working state to initially be modules.HostWorkingStatusChecking")
// }
for i := 0; i < 5; i++ {
// Simulate some setting calls, and see if the host picks up on it.
atomic.AddUint64(&ht.host.atomicSettingsCalls, workingStatusThreshold+1)
success := false
for start := time.Now(); time.Since(start) < 30*time.Second; time.Sleep(time.Millisecond * 10) {
if ht.host.WorkingStatus() == modules.HostWorkingStatusWorking {
success = true
break
}
}
if !success {
t.Fatal("expected working state to flip to HostWorkingStatusWorking after incrementing settings calls")
}
// make no settins calls, host should flip back to NotWorking
success = false
for start := time.Now(); time.Since(start) < 30*time.Second; time.Sleep(time.Millisecond * 10) {
if ht.host.WorkingStatus() == modules.HostWorkingStatusNotWorking {
success = true
break
}
}
if !success {
t.Fatal("expected working state to flip to HostStatusNotWorking if no settings calls occur")
}
}
}
// TestHostConnectabilityStatus checks that the host properly updates its connectable
// state
func TestHostConnectabilityStatus(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
t.Parallel()
ht, err := newHostTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer ht.Close()
// this causes an ndf, because it relies on the host tester starting up and
// fully returning faster than the first check, which isnt always the case.
// Disabled for now.
// if ht.host.ConnectabilityStatus() != modules.HostConnectabilityStatusChecking {
// t.Fatal("expected connectability state to initially be ConnectablityStateChecking")
// }
success := false
for start := time.Now(); time.Since(start) < 30*time.Second; time.Sleep(time.Millisecond * 10) {
if ht.host.ConnectabilityStatus() == modules.HostConnectabilityStatusConnectable {
success = true
break
}
}
if !success {
t.Fatal("expected connectability state to flip to HostConnectabilityStatusConnectable")
}
}
|
// Copyright 2012, 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package testing
import (
"fmt"
"os"
"runtime"
"strings"
"time"
"github.com/juju/loggo"
"github.com/juju/testing"
jc "github.com/juju/testing/checkers"
"github.com/juju/utils"
"github.com/juju/utils/arch"
"github.com/juju/utils/featureflag"
jujuos "github.com/juju/utils/os"
"github.com/juju/utils/series"
gc "gopkg.in/check.v1"
"github.com/juju/juju/juju/osenv"
"github.com/juju/juju/network"
"github.com/juju/juju/wrench"
)
var logger = loggo.GetLogger("juju.testing")
// JujuOSEnvSuite isolates the tests from Juju environment variables.
// This is intended to be only used by existing suites, usually embedded in
// BaseSuite and in FakeJujuXDGDataHomeSuite. Eventually the tests relying on
// JujuOSEnvSuite will be converted to use the IsolationSuite in
// github.com/juju/testing, and this suite will be removed.
// Do not use JujuOSEnvSuite when writing new tests.
type JujuOSEnvSuite struct {
oldJujuXDGDataHome string
oldHomeEnv string
oldEnvironment map[string]string
initialFeatureFlags string
regKeyExisted bool
regEntryExisted bool
oldRegEntryValue string
}
func (s *JujuOSEnvSuite) SetUpTest(c *gc.C) {
s.oldEnvironment = make(map[string]string)
for _, name := range []string{
osenv.JujuXDGDataHomeEnvKey,
osenv.JujuModelEnvKey,
osenv.JujuLoggingConfigEnvKey,
osenv.JujuFeatureFlagEnvKey,
osenv.XDGDataHome,
} {
s.oldEnvironment[name] = os.Getenv(name)
os.Setenv(name, "")
}
s.oldHomeEnv = utils.Home()
s.oldJujuXDGDataHome = osenv.SetJujuXDGDataHome("")
err := utils.SetHome("")
c.Assert(err, jc.ErrorIsNil)
// Update the feature flag set to be the requested initial set.
// This works for both windows and unix, even though normally
// the feature flags on windows are determined using the registry.
// For tests, setting with the environment variable isolates us
// from a single resource that was hitting contention during parallel
// test runs.
os.Setenv(osenv.JujuFeatureFlagEnvKey, s.initialFeatureFlags)
featureflag.SetFlagsFromEnvironment(osenv.JujuFeatureFlagEnvKey)
}
func (s *JujuOSEnvSuite) TearDownTest(c *gc.C) {
for name, value := range s.oldEnvironment {
os.Setenv(name, value)
}
err := utils.SetHome(s.oldHomeEnv)
c.Assert(err, jc.ErrorIsNil)
osenv.SetJujuXDGDataHome(s.oldJujuXDGDataHome)
}
// SkipIfPPC64EL skips the test if the arch is PPC64EL and the
// compiler is gccgo.
func SkipIfPPC64EL(c *gc.C, bugID string) {
if runtime.Compiler == "gccgo" &&
arch.NormaliseArch(runtime.GOARCH) == arch.PPC64EL {
c.Skip(fmt.Sprintf("Test disabled on PPC64EL until fixed - see bug %s", bugID))
}
}
// SkipIfI386 skips the test if the arch is I386.
func SkipIfI386(c *gc.C, bugID string) {
if arch.NormaliseArch(runtime.GOARCH) == arch.I386 {
c.Skip(fmt.Sprintf("Test disabled on I386 until fixed - see bug %s", bugID))
}
}
// SkipIfWindowsBug skips the test if the OS is Windows.
func SkipIfWindowsBug(c *gc.C, bugID string) {
if runtime.GOOS == "windows" {
c.Skip(fmt.Sprintf("Test disabled on Windows until fixed - see bug %s", bugID))
}
}
// SetInitialFeatureFlags sets the feature flags to be in effect for
// the next call to SetUpTest.
func (s *JujuOSEnvSuite) SetInitialFeatureFlags(flags ...string) {
s.initialFeatureFlags = strings.Join(flags, ",")
}
func (s *JujuOSEnvSuite) SetFeatureFlags(flag ...string) {
flags := strings.Join(flag, ",")
if err := os.Setenv(osenv.JujuFeatureFlagEnvKey, flags); err != nil {
panic(err)
}
logger.Debugf("setting feature flags: %s", flags)
featureflag.SetFlagsFromEnvironment(osenv.JujuFeatureFlagEnvKey)
}
// BaseSuite provides required functionality for all test suites
// when embedded in a gocheck suite type:
// - logger redirect
// - no outgoing network access
// - protection of user's home directory
// - scrubbing of env vars
// TODO (frankban) 2014-06-09: switch to using IsolationSuite.
// NOTE: there will be many tests that fail when you try to change
// to the IsolationSuite that rely on external things in PATH.
type BaseSuite struct {
oldLtsForTesting string
testing.CleanupSuite
testing.LoggingSuite
JujuOSEnvSuite
}
func (s *BaseSuite) SetUpSuite(c *gc.C) {
wrench.SetEnabled(false)
s.CleanupSuite.SetUpSuite(c)
s.LoggingSuite.SetUpSuite(c)
// JujuOSEnvSuite does not have a suite setup.
s.PatchValue(&utils.OutgoingAccessAllowed, false)
// LTS-dependent requires new entry upon new LTS release.
s.oldLtsForTesting = series.SetLatestLtsForTesting("xenial")
}
func (s *BaseSuite) TearDownSuite(c *gc.C) {
// JujuOSEnvSuite does not have a suite teardown.
_ = series.SetLatestLtsForTesting(s.oldLtsForTesting)
s.LoggingSuite.TearDownSuite(c)
s.CleanupSuite.TearDownSuite(c)
}
func (s *BaseSuite) SetUpTest(c *gc.C) {
s.CleanupSuite.SetUpTest(c)
s.LoggingSuite.SetUpTest(c)
s.JujuOSEnvSuite.SetUpTest(c)
c.Assert(utils.OutgoingAccessAllowed, gc.Equals, false)
// We do this to isolate invocations of bash from pulling in the
// ambient user environment, and potentially affecting the tests.
// We can't always just use IsolationSuite because we still need
// PATH and possibly a couple other envars.
s.PatchEnvironment("BASH_ENV", "")
network.SetPreferIPv6(false)
}
func (s *BaseSuite) TearDownTest(c *gc.C) {
s.JujuOSEnvSuite.TearDownTest(c)
s.LoggingSuite.TearDownTest(c)
s.CleanupSuite.TearDownTest(c)
}
// CheckString compares two strings. If they do not match then the spot
// where they do not match is logged.
func CheckString(c *gc.C, value, expected string) {
if !c.Check(value, gc.Equals, expected) {
diffStrings(c, value, expected)
}
}
func diffStrings(c *gc.C, value, expected string) {
// If only Go had a diff library.
vlines := strings.Split(value, "\n")
elines := strings.Split(expected, "\n")
vsize := len(vlines)
esize := len(elines)
if vsize < 2 || esize < 2 {
return
}
smaller := elines
if vsize < esize {
smaller = vlines
}
for i := range smaller {
vline := vlines[i]
eline := elines[i]
if vline != eline {
c.Logf("first mismatched line (%d/%d):", i, len(smaller))
c.Log("expected: " + eline)
c.Log("got: " + vline)
break
}
}
}
// TestCleanup is used to allow DumpTestLogsAfter to take any test suite
// that supports the standard cleanup function.
type TestCleanup interface {
AddCleanup(func(*gc.C))
}
// DumpTestLogsAfter will write the test logs to stdout if the timeout
// is reached.
func DumpTestLogsAfter(timeout time.Duration, c *gc.C, cleaner TestCleanup) {
done := make(chan interface{})
go func() {
select {
case <-time.After(timeout):
fmt.Printf(c.GetTestLog())
case <-done:
}
}()
cleaner.AddCleanup(func(_ *gc.C) {
close(done)
})
}
type PackageManagerStruct struct {
PackageManager string
RepositoryManager string
PackageQuery string
}
func GetPackageManager() (s PackageManagerStruct, err error) {
switch jujuos.HostOS() {
case jujuos.CentOS:
s.PackageManager = "yum"
s.PackageQuery = "yum"
s.RepositoryManager = "yum-config-manager --add-repo"
case jujuos.Ubuntu:
s.PackageManager = "apt-get"
s.PackageQuery = "dpkg-query"
s.RepositoryManager = "add-apt-repository"
default:
s.PackageManager = "apt-get"
s.PackageQuery = "dpkg-query"
s.RepositoryManager = "add-apt-repository"
}
return s, nil
}
testing: Do not call network.SetPreferIPv6() in BaseSuite.SetUpTest()
// Copyright 2012, 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package testing
import (
"fmt"
"os"
"runtime"
"strings"
"time"
"github.com/juju/loggo"
"github.com/juju/testing"
jc "github.com/juju/testing/checkers"
"github.com/juju/utils"
"github.com/juju/utils/arch"
"github.com/juju/utils/featureflag"
jujuos "github.com/juju/utils/os"
"github.com/juju/utils/series"
gc "gopkg.in/check.v1"
"github.com/juju/juju/juju/osenv"
"github.com/juju/juju/wrench"
)
var logger = loggo.GetLogger("juju.testing")
// JujuOSEnvSuite isolates the tests from Juju environment variables.
// This is intended to be only used by existing suites, usually embedded in
// BaseSuite and in FakeJujuXDGDataHomeSuite. Eventually the tests relying on
// JujuOSEnvSuite will be converted to use the IsolationSuite in
// github.com/juju/testing, and this suite will be removed.
// Do not use JujuOSEnvSuite when writing new tests.
type JujuOSEnvSuite struct {
oldJujuXDGDataHome string
oldHomeEnv string
oldEnvironment map[string]string
initialFeatureFlags string
regKeyExisted bool
regEntryExisted bool
oldRegEntryValue string
}
func (s *JujuOSEnvSuite) SetUpTest(c *gc.C) {
s.oldEnvironment = make(map[string]string)
for _, name := range []string{
osenv.JujuXDGDataHomeEnvKey,
osenv.JujuModelEnvKey,
osenv.JujuLoggingConfigEnvKey,
osenv.JujuFeatureFlagEnvKey,
osenv.XDGDataHome,
} {
s.oldEnvironment[name] = os.Getenv(name)
os.Setenv(name, "")
}
s.oldHomeEnv = utils.Home()
s.oldJujuXDGDataHome = osenv.SetJujuXDGDataHome("")
err := utils.SetHome("")
c.Assert(err, jc.ErrorIsNil)
// Update the feature flag set to be the requested initial set.
// This works for both windows and unix, even though normally
// the feature flags on windows are determined using the registry.
// For tests, setting with the environment variable isolates us
// from a single resource that was hitting contention during parallel
// test runs.
os.Setenv(osenv.JujuFeatureFlagEnvKey, s.initialFeatureFlags)
featureflag.SetFlagsFromEnvironment(osenv.JujuFeatureFlagEnvKey)
}
func (s *JujuOSEnvSuite) TearDownTest(c *gc.C) {
for name, value := range s.oldEnvironment {
os.Setenv(name, value)
}
err := utils.SetHome(s.oldHomeEnv)
c.Assert(err, jc.ErrorIsNil)
osenv.SetJujuXDGDataHome(s.oldJujuXDGDataHome)
}
// SkipIfPPC64EL skips the test if the arch is PPC64EL and the
// compiler is gccgo.
func SkipIfPPC64EL(c *gc.C, bugID string) {
if runtime.Compiler == "gccgo" &&
arch.NormaliseArch(runtime.GOARCH) == arch.PPC64EL {
c.Skip(fmt.Sprintf("Test disabled on PPC64EL until fixed - see bug %s", bugID))
}
}
// SkipIfI386 skips the test if the arch is I386.
func SkipIfI386(c *gc.C, bugID string) {
if arch.NormaliseArch(runtime.GOARCH) == arch.I386 {
c.Skip(fmt.Sprintf("Test disabled on I386 until fixed - see bug %s", bugID))
}
}
// SkipIfWindowsBug skips the test if the OS is Windows.
func SkipIfWindowsBug(c *gc.C, bugID string) {
if runtime.GOOS == "windows" {
c.Skip(fmt.Sprintf("Test disabled on Windows until fixed - see bug %s", bugID))
}
}
// SetInitialFeatureFlags sets the feature flags to be in effect for
// the next call to SetUpTest.
func (s *JujuOSEnvSuite) SetInitialFeatureFlags(flags ...string) {
s.initialFeatureFlags = strings.Join(flags, ",")
}
func (s *JujuOSEnvSuite) SetFeatureFlags(flag ...string) {
flags := strings.Join(flag, ",")
if err := os.Setenv(osenv.JujuFeatureFlagEnvKey, flags); err != nil {
panic(err)
}
logger.Debugf("setting feature flags: %s", flags)
featureflag.SetFlagsFromEnvironment(osenv.JujuFeatureFlagEnvKey)
}
// BaseSuite provides required functionality for all test suites
// when embedded in a gocheck suite type:
// - logger redirect
// - no outgoing network access
// - protection of user's home directory
// - scrubbing of env vars
// TODO (frankban) 2014-06-09: switch to using IsolationSuite.
// NOTE: there will be many tests that fail when you try to change
// to the IsolationSuite that rely on external things in PATH.
type BaseSuite struct {
oldLtsForTesting string
testing.CleanupSuite
testing.LoggingSuite
JujuOSEnvSuite
}
func (s *BaseSuite) SetUpSuite(c *gc.C) {
wrench.SetEnabled(false)
s.CleanupSuite.SetUpSuite(c)
s.LoggingSuite.SetUpSuite(c)
// JujuOSEnvSuite does not have a suite setup.
s.PatchValue(&utils.OutgoingAccessAllowed, false)
// LTS-dependent requires new entry upon new LTS release.
s.oldLtsForTesting = series.SetLatestLtsForTesting("xenial")
}
func (s *BaseSuite) TearDownSuite(c *gc.C) {
// JujuOSEnvSuite does not have a suite teardown.
_ = series.SetLatestLtsForTesting(s.oldLtsForTesting)
s.LoggingSuite.TearDownSuite(c)
s.CleanupSuite.TearDownSuite(c)
}
func (s *BaseSuite) SetUpTest(c *gc.C) {
s.CleanupSuite.SetUpTest(c)
s.LoggingSuite.SetUpTest(c)
s.JujuOSEnvSuite.SetUpTest(c)
c.Assert(utils.OutgoingAccessAllowed, gc.Equals, false)
// We do this to isolate invocations of bash from pulling in the
// ambient user environment, and potentially affecting the tests.
// We can't always just use IsolationSuite because we still need
// PATH and possibly a couple other envars.
s.PatchEnvironment("BASH_ENV", "")
}
func (s *BaseSuite) TearDownTest(c *gc.C) {
s.JujuOSEnvSuite.TearDownTest(c)
s.LoggingSuite.TearDownTest(c)
s.CleanupSuite.TearDownTest(c)
}
// CheckString compares two strings. If they do not match then the spot
// where they do not match is logged.
func CheckString(c *gc.C, value, expected string) {
if !c.Check(value, gc.Equals, expected) {
diffStrings(c, value, expected)
}
}
func diffStrings(c *gc.C, value, expected string) {
// If only Go had a diff library.
vlines := strings.Split(value, "\n")
elines := strings.Split(expected, "\n")
vsize := len(vlines)
esize := len(elines)
if vsize < 2 || esize < 2 {
return
}
smaller := elines
if vsize < esize {
smaller = vlines
}
for i := range smaller {
vline := vlines[i]
eline := elines[i]
if vline != eline {
c.Logf("first mismatched line (%d/%d):", i, len(smaller))
c.Log("expected: " + eline)
c.Log("got: " + vline)
break
}
}
}
// TestCleanup is used to allow DumpTestLogsAfter to take any test suite
// that supports the standard cleanup function.
type TestCleanup interface {
AddCleanup(func(*gc.C))
}
// DumpTestLogsAfter will write the test logs to stdout if the timeout
// is reached.
func DumpTestLogsAfter(timeout time.Duration, c *gc.C, cleaner TestCleanup) {
done := make(chan interface{})
go func() {
select {
case <-time.After(timeout):
fmt.Printf(c.GetTestLog())
case <-done:
}
}()
cleaner.AddCleanup(func(_ *gc.C) {
close(done)
})
}
type PackageManagerStruct struct {
PackageManager string
RepositoryManager string
PackageQuery string
}
func GetPackageManager() (s PackageManagerStruct, err error) {
switch jujuos.HostOS() {
case jujuos.CentOS:
s.PackageManager = "yum"
s.PackageQuery = "yum"
s.RepositoryManager = "yum-config-manager --add-repo"
case jujuos.Ubuntu:
s.PackageManager = "apt-get"
s.PackageQuery = "dpkg-query"
s.RepositoryManager = "add-apt-repository"
default:
s.PackageManager = "apt-get"
s.PackageQuery = "dpkg-query"
s.RepositoryManager = "add-apt-repository"
}
return s, nil
}
|
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package middlewares
import "net/url"
// flashes enumerates all the flash types
const (
SuccessFlash = "SuccessMsg"
ErrorFlash = "ErrorMsg"
WarnFlash = "WarningMsg"
InfoFlash = "InfoMsg"
)
var (
// FlashNow FIXME:
FlashNow bool
)
// Flash represents a one time data transfer between two requests.
type Flash struct {
DataStore
url.Values
ErrorMsg, WarningMsg, InfoMsg, SuccessMsg string
}
func (f *Flash) set(name, msg string, current ...bool) {
isShow := false
if (len(current) == 0 && FlashNow) ||
(len(current) > 0 && current[0]) {
isShow = true
}
if isShow {
f.GetData()["Flash"] = f
} else {
f.Set(name, msg)
}
}
// Error sets error message
func (f *Flash) Error(msg string, current ...bool) {
f.ErrorMsg = msg
f.set("error", msg, current...)
}
// Warning sets warning message
func (f *Flash) Warning(msg string, current ...bool) {
f.WarningMsg = msg
f.set("warning", msg, current...)
}
// Info sets info message
func (f *Flash) Info(msg string, current ...bool) {
f.InfoMsg = msg
f.set("info", msg, current...)
}
// Success sets success message
func (f *Flash) Success(msg string, current ...bool) {
f.SuccessMsg = msg
f.set("success", msg, current...)
}
Fix: url.Values map was not initialized (#14485)
Values map was not initialized, leading to error 500 on submission of initial configuration
Co-authored-by: 6543 <38d17c89bbb30dc9e2665c7359356aaa08c63a4d@obermui.de>
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package middlewares
import "net/url"
// flashes enumerates all the flash types
const (
SuccessFlash = "SuccessMsg"
ErrorFlash = "ErrorMsg"
WarnFlash = "WarningMsg"
InfoFlash = "InfoMsg"
)
var (
// FlashNow FIXME:
FlashNow bool
)
// Flash represents a one time data transfer between two requests.
type Flash struct {
DataStore
url.Values
ErrorMsg, WarningMsg, InfoMsg, SuccessMsg string
}
func (f *Flash) set(name, msg string, current ...bool) {
if f.Values == nil {
f.Values = make(map[string][]string)
}
isShow := false
if (len(current) == 0 && FlashNow) ||
(len(current) > 0 && current[0]) {
isShow = true
}
if isShow {
f.GetData()["Flash"] = f
} else {
f.Set(name, msg)
}
}
// Error sets error message
func (f *Flash) Error(msg string, current ...bool) {
f.ErrorMsg = msg
f.set("error", msg, current...)
}
// Warning sets warning message
func (f *Flash) Warning(msg string, current ...bool) {
f.WarningMsg = msg
f.set("warning", msg, current...)
}
// Info sets info message
func (f *Flash) Info(msg string, current ...bool) {
f.InfoMsg = msg
f.set("info", msg, current...)
}
// Success sets success message
func (f *Flash) Success(msg string, current ...bool) {
f.SuccessMsg = msg
f.set("success", msg, current...)
}
|
package bahn // import "github.com/octo/icestat/bahn"
import (
"encoding/json"
"net/http"
"time"
)
// PositionURL is the URL of JSONP-encoded information about the train's location and speed.
const PositionURL = "http://www.ombord.info/api/jsonp/position/"
// Position holds the position of the train as well as some other GPS related
// data (speed, #satellites, ...).
type Position struct {
Version string
Time time.Time
Latitude float64
Longitude float64
Altitude float64
// Speed in km/h.
Speed float64
Satellites int
}
// UnmarshalJSON implements the encoding/json.Unmarshaler interface.
func (p *Position) UnmarshalJSON(b []byte) error {
var parsed struct {
Version string
Time int `json:",string"`
Age string
Latitude float64 `json:",string"`
Longitude float64 `json:",string"`
Altitude float64 `json:",string"`
// Speed in m/s.
Speed float64 `json:",string"`
// Gyroskop?
CMG string
Satellites int `json:",string"`
Mode string
}
if err := json.Unmarshal(b, &parsed); err != nil {
return err
}
*p = Position{
Version: parsed.Version,
Time: time.Unix(int64(parsed.Time), 0),
Latitude: float64(parsed.Latitude),
Longitude: float64(parsed.Longitude),
Altitude: float64(parsed.Altitude),
// convert speed from m/s to km/h
Speed: float64(parsed.Speed) * 3600.0 / 1000.0,
Satellites: int(parsed.Satellites),
}
return nil
}
// PositionInfo returns information about the train's position and speed.
func PositionInfo() (*Position, error) {
res, err := http.Get(PositionURL)
if err != nil {
return nil, err
}
var p Position
if err := unmarshalJSONP(res.Body, &p); err != nil {
return nil, err
}
return &p, nil
}
Package bahn: Better error messages.
package bahn // import "github.com/octo/icestat/bahn"
import (
"encoding/json"
"fmt"
"net/http"
"time"
)
// PositionURL is the URL of JSONP-encoded information about the train's location and speed.
const PositionURL = "http://www.ombord.info/api/jsonp/position/"
// Position holds the position of the train as well as some other GPS related
// data (speed, #satellites, ...).
type Position struct {
Version string
Time time.Time
Latitude float64
Longitude float64
Altitude float64
// Speed in km/h.
Speed float64
Satellites int
}
// UnmarshalJSON implements the encoding/json.Unmarshaler interface.
func (p *Position) UnmarshalJSON(b []byte) error {
var parsed struct {
Version string
Time int `json:",string"`
Age string
Latitude float64 `json:",string"`
Longitude float64 `json:",string"`
Altitude float64 `json:",string"`
// Speed in m/s.
Speed float64 `json:",string"`
// Gyroskop?
CMG string
Satellites int `json:",string"`
Mode string
}
if err := json.Unmarshal(b, &parsed); err != nil {
return err
}
*p = Position{
Version: parsed.Version,
Time: time.Unix(int64(parsed.Time), 0),
Latitude: float64(parsed.Latitude),
Longitude: float64(parsed.Longitude),
Altitude: float64(parsed.Altitude),
// convert speed from m/s to km/h
Speed: float64(parsed.Speed) * 3600.0 / 1000.0,
Satellites: int(parsed.Satellites),
}
return nil
}
// PositionInfo returns information about the train's position and speed.
func PositionInfo() (*Position, error) {
res, err := http.Get(PositionURL)
if err != nil {
return nil, fmt.Errorf("GET %s: %v", PositionURL, err)
}
var p Position
if err := unmarshalJSONP(res.Body, &p); err != nil {
return nil, fmt.Errorf("unmarshal %s: %v", PositionURL, err)
}
return &p, nil
}
|
// Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
package oci
import (
"log"
)
const Version = "3.56.0"
func PrintVersion() {
log.Printf("[INFO] terraform-provider-oci %s\n", Version)
}
Finalize changelog and release for version v3.57.0
// Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
package oci
import (
"log"
)
const Version = "3.57.0"
func PrintVersion() {
log.Printf("[INFO] terraform-provider-oci %s\n", Version)
}
|
// Copyright 2014 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package upgrades
import (
"fmt"
"launchpad.net/juju-core/environs/config"
"launchpad.net/juju-core/state"
)
func updateRsyslogPort(context Context) error {
agentConfig := context.AgentConfig()
info, ok := agentConfig.StateInfo()
if !ok {
return fmt.Errorf("Failed to get StateInfo")
}
st, err := state.Open(info, state.DefaultDialOpts(), nil)
if err != nil {
return err
}
defer st.Close()
attrs := map[string]interface{}{
"syslog-port": config.DefaultSyslogPort,
}
return st.UpdateEnvironConfig(attrs, nil, nil)
}
Add comment
// Copyright 2014 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package upgrades
import (
"fmt"
"launchpad.net/juju-core/environs/config"
"launchpad.net/juju-core/state"
)
func updateRsyslogPort(context Context) error {
agentConfig := context.AgentConfig()
info, ok := agentConfig.StateInfo()
if !ok {
return fmt.Errorf("Failed to get StateInfo")
}
// we need to re-open state so we can bypass validation
// as the syslog-port is normally immutable
st, err := state.Open(info, state.DefaultDialOpts(), nil)
if err != nil {
return err
}
defer st.Close()
attrs := map[string]interface{}{
"syslog-port": config.DefaultSyslogPort,
}
return st.UpdateEnvironConfig(attrs, nil, nil)
}
|
package file
import (
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/cosiner/gohper/errors"
"github.com/cosiner/gohper/io2"
)
const ErrDestIsFile = errors.Err("destnation is a file")
// FileOpFunc accept a file descriptor, return an error or nil
type FileOpFunc func(*os.File) error
// Open file use given flag
func Open(fname string, flags int, fn FileOpFunc) error {
fd, err := os.OpenFile(fname, flags, FilePerm)
if err != nil {
return err
}
if fn != nil {
err = fn(fd)
}
if e := fd.Close(); e != nil && err == nil {
err = e
}
return io2.NonEOF(err)
}
// WriteFlag return os.O_APPEND if not delete content, else os.O_TRUNC
func WriteFlag(trunc bool) int {
if trunc {
return os.O_TRUNC
}
return os.O_APPEND
}
// FirstLine read first line from file
func FirstLine(src string) (line string, err error) {
err = Filter(src, func(_ int, l []byte) ([]byte, error) {
line = string(l)
return nil, io.EOF
})
return
}
// Filter file content with given filter, file is in ReadOnly mode
func Filter(src string, filter io2.LineFilterFunc) error {
return Read(src, func(fd *os.File) (err error) {
return io2.Filter(fd, nil, false, filter)
})
}
// FilterTo filter file content with given filter, then write result
// to dest file
func FilterTo(dst, src string, trunc bool, filter io2.LineFilterFunc) error {
return Read(src, func(sfd *os.File) (err error) {
return OpenOrCreate(dst, trunc, func(dfd *os.File) error {
return io2.Filter(sfd, dfd, true, filter)
})
})
}
// Copy src file to dest file
func Copy(dst, src string) error {
return FilterTo(dst, src, true, io2.NopLineFilte)
}
// CopyDir copy directory from source to destination
func CopyDir(dst, src string) error {
err := os.MkdirAll(dst, 0755)
if err != nil {
return err
}
files, err := ioutil.ReadDir(src)
for i := 0; i < len(files) && err == nil; i++ {
file := files[i].Name()
df := filepath.Join(dst, file)
sf := filepath.Join(src, file)
if IsFile(sf) {
err = Copy(df, sf)
} else {
err = CopyDir(df, sf)
}
}
return err
}
// Overwrite delete all content in file, and write new content to it
func Overwrite(src string, content string) error {
return Trunc(src, func(fd *os.File) error {
_, err := fd.WriteString(content)
return err
})
}
// for Read
func Read(fname string, fn FileOpFunc) error {
return Open(fname, os.O_RDONLY, fn)
}
func ReadWrite(fname string, fn FileOpFunc) error {
return Open(fname, os.O_RDWR, fn)
}
// for Write
func Write(fname string, fn FileOpFunc) error {
return Open(fname, os.O_WRONLY, fn)
}
func Trunc(fname string, fn FileOpFunc) error {
return Open(fname, os.O_WRONLY|os.O_TRUNC, fn)
}
func Create(fname string, fn FileOpFunc) error {
return Open(fname, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fn)
}
func Append(fname string, fn FileOpFunc) error {
return Open(fname, os.O_WRONLY|os.O_APPEND, fn)
}
func OpenOrCreate(fname string, trunc bool, fn FileOpFunc) error {
return Open(fname, os.O_CREATE|os.O_WRONLY|WriteFlag(trunc), fn)
}
Fix typo
package file
import (
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/cosiner/gohper/errors"
"github.com/cosiner/gohper/io2"
)
const ErrDestIsFile = errors.Err("destination is a file")
// FileOpFunc accept a file descriptor, return an error or nil
type FileOpFunc func(*os.File) error
// Open file use given flag
func Open(fname string, flags int, fn FileOpFunc) error {
fd, err := os.OpenFile(fname, flags, FilePerm)
if err != nil {
return err
}
if fn != nil {
err = fn(fd)
}
if e := fd.Close(); e != nil && err == nil {
err = e
}
return io2.NonEOF(err)
}
// WriteFlag return os.O_APPEND if not delete content, else os.O_TRUNC
func WriteFlag(trunc bool) int {
if trunc {
return os.O_TRUNC
}
return os.O_APPEND
}
// FirstLine read first line from file
func FirstLine(src string) (line string, err error) {
err = Filter(src, func(_ int, l []byte) ([]byte, error) {
line = string(l)
return nil, io.EOF
})
return
}
// Filter file content with given filter, file is in ReadOnly mode
func Filter(src string, filter io2.LineFilterFunc) error {
return Read(src, func(fd *os.File) (err error) {
return io2.Filter(fd, nil, false, filter)
})
}
// FilterTo filter file content with given filter, then write result
// to dest file
func FilterTo(dst, src string, trunc bool, filter io2.LineFilterFunc) error {
return Read(src, func(sfd *os.File) (err error) {
return OpenOrCreate(dst, trunc, func(dfd *os.File) error {
return io2.Filter(sfd, dfd, true, filter)
})
})
}
// Copy src file to dest file
func Copy(dst, src string) error {
return FilterTo(dst, src, true, io2.NopLineFilte)
}
// CopyDir copy directory from source to destination
func CopyDir(dst, src string) error {
err := os.MkdirAll(dst, 0755)
if err != nil {
return err
}
files, err := ioutil.ReadDir(src)
for i := 0; i < len(files) && err == nil; i++ {
file := files[i].Name()
df := filepath.Join(dst, file)
sf := filepath.Join(src, file)
if IsFile(sf) {
err = Copy(df, sf)
} else {
err = CopyDir(df, sf)
}
}
return err
}
// Overwrite delete all content in file, and write new content to it
func Overwrite(src string, content string) error {
return Trunc(src, func(fd *os.File) error {
_, err := fd.WriteString(content)
return err
})
}
// for Read
func Read(fname string, fn FileOpFunc) error {
return Open(fname, os.O_RDONLY, fn)
}
func ReadWrite(fname string, fn FileOpFunc) error {
return Open(fname, os.O_RDWR, fn)
}
// for Write
func Write(fname string, fn FileOpFunc) error {
return Open(fname, os.O_WRONLY, fn)
}
func Trunc(fname string, fn FileOpFunc) error {
return Open(fname, os.O_WRONLY|os.O_TRUNC, fn)
}
func Create(fname string, fn FileOpFunc) error {
return Open(fname, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fn)
}
func Append(fname string, fn FileOpFunc) error {
return Open(fname, os.O_WRONLY|os.O_APPEND, fn)
}
func OpenOrCreate(fname string, trunc bool, fn FileOpFunc) error {
return Open(fname, os.O_CREATE|os.O_WRONLY|WriteFlag(trunc), fn)
}
|
// Copyright 2018 The ACH Authors
// Use of this source code is governed by an Apache License
// license that can be found in the LICENSE file.
package ach
import (
"testing"
)
// mockBatchSHRHeader creates a BatchSHR BatchHeader
func mockBatchSHRHeader() *BatchHeader {
bh := NewBatchHeader()
bh.ServiceClassCode = 225
bh.StandardEntryClassCode = "SHR"
bh.CompanyName = "Payee Name"
bh.CompanyIdentification = "121042882"
bh.CompanyEntryDescription = "ACH SHR"
bh.ODFIIdentification = "12104288"
return bh
}
// mockSHREntryDetail creates a BatchSHR EntryDetail
func mockSHREntryDetail() *EntryDetail {
entry := NewEntryDetail()
entry.TransactionCode = 27
entry.SetRDFI("231380104")
entry.DFIAccountNumber = "744-5678-99"
entry.Amount = 25000
entry.SetSHRCardExpirationDate("0718")
entry.SetSHRDocumentReferenceNumber(12345678910)
entry.SetSHRIndividualCardAccountNumber(12345678910123456)
entry.SetTraceNumber(mockBatchSHRHeader().ODFIIdentification, 123)
entry.DiscretionaryData = "01"
entry.Category = CategoryForward
return entry
}
// mockBatchSHR creates a BatchSHR
func mockBatchSHR() *BatchSHR {
mockBatch := NewBatchSHR(mockBatchSHRHeader())
mockBatch.AddEntry(mockSHREntryDetail())
mockBatch.GetEntries()[0].AddAddenda(mockAddenda02())
if err := mockBatch.Create(); err != nil {
panic(err)
}
return mockBatch
}
// testBatchSHRHeader creates a BatchSHR BatchHeader
func testBatchSHRHeader(t testing.TB) {
batch, _ := NewBatch(mockBatchSHRHeader())
err, ok := batch.(*BatchSHR)
if !ok {
t.Errorf("Expecting BatchSHR got %T", err)
}
}
// TestBatchSHRHeader tests validating BatchSHR BatchHeader
func TestBatchSHRHeader(t *testing.T) {
testBatchSHRHeader(t)
}
// BenchmarkBatchSHRHeader benchmarks validating BatchSHR BatchHeader
func BenchmarkBatchSHRHeader(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRHeader(b)
}
}
// testBatchSHRCreate validates BatchSHR create
func testBatchSHRCreate(t testing.TB) {
mockBatch := mockBatchSHR()
mockBatch.Create()
if err := mockBatch.Validate(); err != nil {
t.Errorf("%T: %s", err, err)
}
}
// TestBatchSHRCreate tests validating BatchSHR create
func TestBatchSHRCreate(t *testing.T) {
testBatchSHRCreate(t)
}
// BenchmarkBatchSHRCreate benchmarks validating BatchSHR create
func BenchmarkBatchSHRCreate(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRCreate(b)
}
}
// testBatchSHRStandardEntryClassCode validates BatchSHR create for an invalid StandardEntryClassCode
func testBatchSHRStandardEntryClassCode(t testing.TB) {
mockBatch := mockBatchSHR()
mockBatch.Header.StandardEntryClassCode = "WEB"
mockBatch.Create()
if err := mockBatch.Validate(); err != nil {
if e, ok := err.(*BatchError); ok {
if e.FieldName != "StandardEntryClassCode" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRStandardEntryClassCode tests validating BatchSHR create for an invalid StandardEntryClassCode
func TestBatchSHRStandardEntryClassCode(t *testing.T) {
testBatchSHRStandardEntryClassCode(t)
}
// BenchmarkBatchSHRStandardEntryClassCode benchmarks validating BatchSHR create for an invalid StandardEntryClassCode
func BenchmarkBatchSHRStandardEntryClassCode(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRStandardEntryClassCode(b)
}
}
// testBatchSHRServiceClassCodeEquality validates service class code equality
func testBatchSHRServiceClassCodeEquality(t testing.TB) {
mockBatch := mockBatchSHR()
mockBatch.GetControl().ServiceClassCode = 200
if err := mockBatch.Validate(); err != nil {
if e, ok := err.(*BatchError); ok {
if e.FieldName != "ServiceClassCode" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRServiceClassCodeEquality tests validating service class code equality
func TestBatchSHRServiceClassCodeEquality(t *testing.T) {
testBatchSHRServiceClassCodeEquality(t)
}
// BenchmarkBatchSHRServiceClassCodeEquality benchmarks validating service class code equality
func BenchmarkBatchSHRServiceClassCodeEquality(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRServiceClassCodeEquality(b)
}
}
// testBatchSHRServiceClass200 validates BatchSHR create for an invalid ServiceClassCode 200
func testBatchSHRServiceClass200(t testing.TB) {
mockBatch := mockBatchSHR()
mockBatch.Header.ServiceClassCode = 200
mockBatch.Create()
if err := mockBatch.Validate(); err != nil {
if e, ok := err.(*BatchError); ok {
if e.FieldName != "ServiceClassCode" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRServiceClass200 tests validating BatchSHR create for an invalid ServiceClassCode 200
func TestBatchSHRServiceClass200(t *testing.T) {
testBatchSHRServiceClass200(t)
}
// BenchmarkBatchSHRServiceClass200 benchmarks validating BatchSHR create for an invalid ServiceClassCode 200
func BenchmarkBatchSHRServiceClass200(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRServiceClass200(b)
}
}
// testBatchSHRServiceClass220 validates BatchSHR create for an invalid ServiceClassCode 220
func testBatchSHRServiceClass220(t testing.TB) {
mockBatch := mockBatchSHR()
mockBatch.Header.ServiceClassCode = 220
mockBatch.Create()
if err := mockBatch.Validate(); err != nil {
if e, ok := err.(*BatchError); ok {
if e.FieldName != "ServiceClassCode" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRServiceClass220 tests validating BatchSHR create for an invalid ServiceClassCode 220
func TestBatchSHRServiceClass220(t *testing.T) {
testBatchSHRServiceClass220(t)
}
// BenchmarkBatchSHRServiceClass220 benchmarks validating BatchSHR create for an invalid ServiceClassCode 220
func BenchmarkBatchSHRServiceClass220(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRServiceClass220(b)
}
}
// testBatchSHRServiceClass280 validates BatchSHR create for an invalid ServiceClassCode 280
func testBatchSHRServiceClass280(t testing.TB) {
mockBatch := mockBatchSHR()
mockBatch.Header.ServiceClassCode = 280
mockBatch.Create()
if err := mockBatch.Validate(); err != nil {
if e, ok := err.(*BatchError); ok {
if e.FieldName != "ServiceClassCode" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRServiceClass280 tests validating BatchSHR create for an invalid ServiceClassCode 280
func TestBatchSHRServiceClass280(t *testing.T) {
testBatchSHRServiceClass280(t)
}
// BenchmarkBatchSHRServiceClass280 benchmarks validating BatchSHR create for an invalid ServiceClassCode 280
func BenchmarkBatchSHRServiceClass280(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRServiceClass280(b)
}
}
// testBatchSHRTransactionCode validates BatchSHR TransactionCode is not a credit
func testBatchSHRTransactionCode(t testing.TB) {
mockBatch := mockBatchSHR()
mockBatch.GetEntries()[0].TransactionCode = 22
if err := mockBatch.Create(); err != nil {
if e, ok := err.(*BatchError); ok {
if e.FieldName != "TransactionCode" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRTransactionCode tests validating BatchSHR TransactionCode is not a credit
func TestBatchSHRTransactionCode(t *testing.T) {
testBatchSHRTransactionCode(t)
}
// BenchmarkBatchSHRTransactionCode benchmarks validating BatchSHR TransactionCode is not a credit
func BenchmarkBatchSHRTransactionCode(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRTransactionCode(b)
}
}
// testBatchSHRAddendaCount validates BatchSHR Addendum count of 2
func testBatchSHRAddendaCount(t testing.TB) {
mockBatch := mockBatchSHR()
mockBatch.GetEntries()[0].AddAddenda(mockAddenda02())
mockBatch.Create()
if err := mockBatch.Validate(); err != nil {
if e, ok := err.(*BatchError); ok {
if e.FieldName != "Addendum" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRAddendaCount tests validating BatchSHR Addendum count of 2
func TestBatchSHRAddendaCount(t *testing.T) {
testBatchSHRAddendaCount(t)
}
// BenchmarkBatchSHRAddendaCount benchmarks validating BatchSHR Addendum count of 2
func BenchmarkBatchSHRAddendaCount(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRAddendaCount(b)
}
}
// testBatchSHRAddendaCountZero validates Addendum count of 0
func testBatchSHRAddendaCountZero(t testing.TB) {
mockBatch := NewBatchSHR(mockBatchSHRHeader())
mockBatch.AddEntry(mockSHREntryDetail())
if err := mockBatch.Create(); err != nil {
if e, ok := err.(*BatchError); ok {
if e.FieldName != "Addendum" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRAddendaCountZero tests validating Addendum count of 0
func TestBatchSHRAddendaCountZero(t *testing.T) {
testBatchSHRAddendaCountZero(t)
}
// BenchmarkBatchSHRAddendaCountZero benchmarks validating Addendum count of 0
func BenchmarkBatchSHRAddendaCountZero(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRAddendaCountZero(b)
}
}
// testBatchSHRInvalidAddendum validates Addendum must be Addenda02
func testBatchSHRInvalidAddendum(t testing.TB) {
mockBatch := NewBatchSHR(mockBatchSHRHeader())
mockBatch.AddEntry(mockSHREntryDetail())
mockBatch.GetEntries()[0].AddAddenda(mockAddenda05())
if err := mockBatch.Create(); err != nil {
if e, ok := err.(*BatchError); ok {
if e.FieldName != "Addendum" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRInvalidAddendum tests validating Addendum must be Addenda02
func TestBatchSHRInvalidAddendum(t *testing.T) {
testBatchSHRInvalidAddendum(t)
}
// BenchmarkBatchSHRInvalidAddendum benchmarks validating Addendum must be Addenda02
func BenchmarkBatchSHRInvalidAddendum(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRInvalidAddendum(b)
}
}
// testBatchSHRInvalidAddenda validates Addendum must be Addenda02
func testBatchSHRInvalidAddenda(t testing.TB) {
mockBatch := NewBatchSHR(mockBatchSHRHeader())
mockBatch.AddEntry(mockSHREntryDetail())
addenda02 := mockAddenda02()
addenda02.recordType = "63"
mockBatch.GetEntries()[0].AddAddenda(addenda02)
if err := mockBatch.Create(); err != nil {
if e, ok := err.(*BatchError); ok {
if e.FieldName != "recordType" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRInvalidAddenda tests validating Addendum must be Addenda02
func TestBatchSHRInvalidAddenda(t *testing.T) {
testBatchSHRInvalidAddenda(t)
}
// BenchmarkBatchSHRInvalidAddenda benchmarks validating Addendum must be Addenda02
func BenchmarkBatchSHRInvalidAddenda(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRInvalidAddenda(b)
}
}
// ToDo: Using a FieldError may need to add a BatchError and use *BatchError
// testBatchSHRInvalidBuild validates an invalid batch build
func testBatchSHRInvalidBuild(t testing.TB) {
mockBatch := mockBatchSHR()
mockBatch.GetHeader().recordType = "3"
if err := mockBatch.Create(); err != nil {
if e, ok := err.(*FieldError); ok {
if e.FieldName != "recordType" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRInvalidBuild tests validating an invalid batch build
func TestBatchSHRInvalidBuild(t *testing.T) {
testBatchSHRInvalidBuild(t)
}
// BenchmarkBatchSHRInvalidBuild benchmarks validating an invalid batch build
func BenchmarkBatchSHRInvalidBuild(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRInvalidBuild(b)
}
}
// testBatchSHRCardTransactionType validates BatchSHR create for an invalid CardTransactionType
func testBatchSHRCardTransactionType(t testing.TB) {
mockBatch := mockBatchSHR()
mockBatch.GetEntries()[0].DiscretionaryData = "555"
if err := mockBatch.Validate(); err != nil {
if e, ok := err.(*BatchError); ok {
if e.FieldName != "CardTransactionType" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRCardTransactionType tests validating BatchSHR create for an invalid CardTransactionType
func TestBatchSHRCardTransactionType(t *testing.T) {
testBatchSHRCardTransactionType(t)
}
// BenchmarkBatchSHRCardTransactionType benchmarks validating BatchSHR create for an invalid CardTransactionType
func BenchmarkBatchSHRCardTransactionType(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRCardTransactionType(b)
}
}
#207 code coverage
#207 code coverage
// Copyright 2018 The ACH Authors
// Use of this source code is governed by an Apache License
// license that can be found in the LICENSE file.
package ach
import (
"testing"
)
// mockBatchSHRHeader creates a BatchSHR BatchHeader
func mockBatchSHRHeader() *BatchHeader {
bh := NewBatchHeader()
bh.ServiceClassCode = 225
bh.StandardEntryClassCode = "SHR"
bh.CompanyName = "Payee Name"
bh.CompanyIdentification = "121042882"
bh.CompanyEntryDescription = "ACH SHR"
bh.ODFIIdentification = "12104288"
return bh
}
// mockSHREntryDetail creates a BatchSHR EntryDetail
func mockSHREntryDetail() *EntryDetail {
entry := NewEntryDetail()
entry.TransactionCode = 27
entry.SetRDFI("231380104")
entry.DFIAccountNumber = "744-5678-99"
entry.Amount = 25000
entry.SetSHRCardExpirationDate("0718")
entry.SetSHRDocumentReferenceNumber(12345678910)
entry.SetSHRIndividualCardAccountNumber(12345678910123456)
entry.SetTraceNumber(mockBatchSHRHeader().ODFIIdentification, 123)
entry.DiscretionaryData = "01"
entry.Category = CategoryForward
return entry
}
// mockBatchSHR creates a BatchSHR
func mockBatchSHR() *BatchSHR {
mockBatch := NewBatchSHR(mockBatchSHRHeader())
mockBatch.AddEntry(mockSHREntryDetail())
mockBatch.GetEntries()[0].AddAddenda(mockAddenda02())
if err := mockBatch.Create(); err != nil {
panic(err)
}
return mockBatch
}
// testBatchSHRHeader creates a BatchSHR BatchHeader
func testBatchSHRHeader(t testing.TB) {
batch, _ := NewBatch(mockBatchSHRHeader())
err, ok := batch.(*BatchSHR)
if !ok {
t.Errorf("Expecting BatchSHR got %T", err)
}
}
// TestBatchSHRHeader tests validating BatchSHR BatchHeader
func TestBatchSHRHeader(t *testing.T) {
testBatchSHRHeader(t)
}
// BenchmarkBatchSHRHeader benchmarks validating BatchSHR BatchHeader
func BenchmarkBatchSHRHeader(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRHeader(b)
}
}
// testBatchSHRCreate validates BatchSHR create
func testBatchSHRCreate(t testing.TB) {
mockBatch := mockBatchSHR()
mockBatch.Create()
if err := mockBatch.Validate(); err != nil {
t.Errorf("%T: %s", err, err)
}
}
// TestBatchSHRCreate tests validating BatchSHR create
func TestBatchSHRCreate(t *testing.T) {
testBatchSHRCreate(t)
}
// BenchmarkBatchSHRCreate benchmarks validating BatchSHR create
func BenchmarkBatchSHRCreate(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRCreate(b)
}
}
// testBatchSHRStandardEntryClassCode validates BatchSHR create for an invalid StandardEntryClassCode
func testBatchSHRStandardEntryClassCode(t testing.TB) {
mockBatch := mockBatchSHR()
mockBatch.Header.StandardEntryClassCode = "WEB"
mockBatch.Create()
if err := mockBatch.Validate(); err != nil {
if e, ok := err.(*BatchError); ok {
if e.FieldName != "StandardEntryClassCode" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRStandardEntryClassCode tests validating BatchSHR create for an invalid StandardEntryClassCode
func TestBatchSHRStandardEntryClassCode(t *testing.T) {
testBatchSHRStandardEntryClassCode(t)
}
// BenchmarkBatchSHRStandardEntryClassCode benchmarks validating BatchSHR create for an invalid StandardEntryClassCode
func BenchmarkBatchSHRStandardEntryClassCode(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRStandardEntryClassCode(b)
}
}
// testBatchSHRServiceClassCodeEquality validates service class code equality
func testBatchSHRServiceClassCodeEquality(t testing.TB) {
mockBatch := mockBatchSHR()
mockBatch.GetControl().ServiceClassCode = 200
if err := mockBatch.Validate(); err != nil {
if e, ok := err.(*BatchError); ok {
if e.FieldName != "ServiceClassCode" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRServiceClassCodeEquality tests validating service class code equality
func TestBatchSHRServiceClassCodeEquality(t *testing.T) {
testBatchSHRServiceClassCodeEquality(t)
}
// BenchmarkBatchSHRServiceClassCodeEquality benchmarks validating service class code equality
func BenchmarkBatchSHRServiceClassCodeEquality(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRServiceClassCodeEquality(b)
}
}
// testBatchSHRServiceClass200 validates BatchSHR create for an invalid ServiceClassCode 200
func testBatchSHRServiceClass200(t testing.TB) {
mockBatch := mockBatchSHR()
mockBatch.Header.ServiceClassCode = 200
mockBatch.Create()
if err := mockBatch.Validate(); err != nil {
if e, ok := err.(*BatchError); ok {
if e.FieldName != "ServiceClassCode" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRServiceClass200 tests validating BatchSHR create for an invalid ServiceClassCode 200
func TestBatchSHRServiceClass200(t *testing.T) {
testBatchSHRServiceClass200(t)
}
// BenchmarkBatchSHRServiceClass200 benchmarks validating BatchSHR create for an invalid ServiceClassCode 200
func BenchmarkBatchSHRServiceClass200(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRServiceClass200(b)
}
}
// testBatchSHRServiceClass220 validates BatchSHR create for an invalid ServiceClassCode 220
func testBatchSHRServiceClass220(t testing.TB) {
mockBatch := mockBatchSHR()
mockBatch.Header.ServiceClassCode = 220
mockBatch.Create()
if err := mockBatch.Validate(); err != nil {
if e, ok := err.(*BatchError); ok {
if e.FieldName != "ServiceClassCode" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRServiceClass220 tests validating BatchSHR create for an invalid ServiceClassCode 220
func TestBatchSHRServiceClass220(t *testing.T) {
testBatchSHRServiceClass220(t)
}
// BenchmarkBatchSHRServiceClass220 benchmarks validating BatchSHR create for an invalid ServiceClassCode 220
func BenchmarkBatchSHRServiceClass220(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRServiceClass220(b)
}
}
// testBatchSHRServiceClass280 validates BatchSHR create for an invalid ServiceClassCode 280
func testBatchSHRServiceClass280(t testing.TB) {
mockBatch := mockBatchSHR()
mockBatch.Header.ServiceClassCode = 280
mockBatch.Create()
if err := mockBatch.Validate(); err != nil {
if e, ok := err.(*BatchError); ok {
if e.FieldName != "ServiceClassCode" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRServiceClass280 tests validating BatchSHR create for an invalid ServiceClassCode 280
func TestBatchSHRServiceClass280(t *testing.T) {
testBatchSHRServiceClass280(t)
}
// BenchmarkBatchSHRServiceClass280 benchmarks validating BatchSHR create for an invalid ServiceClassCode 280
func BenchmarkBatchSHRServiceClass280(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRServiceClass280(b)
}
}
// testBatchSHRTransactionCode validates BatchSHR TransactionCode is not a credit
func testBatchSHRTransactionCode(t testing.TB) {
mockBatch := mockBatchSHR()
mockBatch.GetEntries()[0].TransactionCode = 22
if err := mockBatch.Create(); err != nil {
if e, ok := err.(*BatchError); ok {
if e.FieldName != "TransactionCode" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRTransactionCode tests validating BatchSHR TransactionCode is not a credit
func TestBatchSHRTransactionCode(t *testing.T) {
testBatchSHRTransactionCode(t)
}
// BenchmarkBatchSHRTransactionCode benchmarks validating BatchSHR TransactionCode is not a credit
func BenchmarkBatchSHRTransactionCode(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRTransactionCode(b)
}
}
// testBatchSHRAddendaCount validates BatchSHR Addendum count of 2
func testBatchSHRAddendaCount(t testing.TB) {
mockBatch := mockBatchSHR()
mockBatch.GetEntries()[0].AddAddenda(mockAddenda02())
mockBatch.Create()
if err := mockBatch.Validate(); err != nil {
if e, ok := err.(*BatchError); ok {
if e.FieldName != "Addendum" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRAddendaCount tests validating BatchSHR Addendum count of 2
func TestBatchSHRAddendaCount(t *testing.T) {
testBatchSHRAddendaCount(t)
}
// BenchmarkBatchSHRAddendaCount benchmarks validating BatchSHR Addendum count of 2
func BenchmarkBatchSHRAddendaCount(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRAddendaCount(b)
}
}
// testBatchSHRAddendaCountZero validates Addendum count of 0
func testBatchSHRAddendaCountZero(t testing.TB) {
mockBatch := NewBatchSHR(mockBatchSHRHeader())
mockBatch.AddEntry(mockSHREntryDetail())
if err := mockBatch.Create(); err != nil {
if e, ok := err.(*BatchError); ok {
if e.FieldName != "Addendum" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRAddendaCountZero tests validating Addendum count of 0
func TestBatchSHRAddendaCountZero(t *testing.T) {
testBatchSHRAddendaCountZero(t)
}
// BenchmarkBatchSHRAddendaCountZero benchmarks validating Addendum count of 0
func BenchmarkBatchSHRAddendaCountZero(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRAddendaCountZero(b)
}
}
// testBatchSHRInvalidAddendum validates Addendum must be Addenda02
func testBatchSHRInvalidAddendum(t testing.TB) {
mockBatch := NewBatchSHR(mockBatchSHRHeader())
mockBatch.AddEntry(mockSHREntryDetail())
mockBatch.GetEntries()[0].AddAddenda(mockAddenda05())
if err := mockBatch.Create(); err != nil {
if e, ok := err.(*BatchError); ok {
if e.FieldName != "Addendum" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRInvalidAddendum tests validating Addendum must be Addenda02
func TestBatchSHRInvalidAddendum(t *testing.T) {
testBatchSHRInvalidAddendum(t)
}
// BenchmarkBatchSHRInvalidAddendum benchmarks validating Addendum must be Addenda02
func BenchmarkBatchSHRInvalidAddendum(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRInvalidAddendum(b)
}
}
// testBatchSHRInvalidAddenda validates Addendum must be Addenda02
func testBatchSHRInvalidAddenda(t testing.TB) {
mockBatch := NewBatchSHR(mockBatchSHRHeader())
mockBatch.AddEntry(mockSHREntryDetail())
addenda02 := mockAddenda02()
addenda02.recordType = "63"
mockBatch.GetEntries()[0].AddAddenda(addenda02)
if err := mockBatch.Create(); err != nil {
if e, ok := err.(*BatchError); ok {
if e.FieldName != "recordType" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRInvalidAddenda tests validating Addendum must be Addenda02
func TestBatchSHRInvalidAddenda(t *testing.T) {
testBatchSHRInvalidAddenda(t)
}
// BenchmarkBatchSHRInvalidAddenda benchmarks validating Addendum must be Addenda02
func BenchmarkBatchSHRInvalidAddenda(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRInvalidAddenda(b)
}
}
// ToDo: Using a FieldError may need to add a BatchError and use *BatchError
// testBatchSHRInvalidBuild validates an invalid batch build
func testBatchSHRInvalidBuild(t testing.TB) {
mockBatch := mockBatchSHR()
mockBatch.GetHeader().recordType = "3"
if err := mockBatch.Create(); err != nil {
if e, ok := err.(*FieldError); ok {
if e.FieldName != "recordType" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRInvalidBuild tests validating an invalid batch build
func TestBatchSHRInvalidBuild(t *testing.T) {
testBatchSHRInvalidBuild(t)
}
// BenchmarkBatchSHRInvalidBuild benchmarks validating an invalid batch build
func BenchmarkBatchSHRInvalidBuild(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRInvalidBuild(b)
}
}
// testBatchSHRCardTransactionType validates BatchSHR create for an invalid CardTransactionType
func testBatchSHRCardTransactionType(t testing.TB) {
mockBatch := mockBatchSHR()
mockBatch.GetEntries()[0].DiscretionaryData = "555"
if err := mockBatch.Validate(); err != nil {
if e, ok := err.(*BatchError); ok {
if e.FieldName != "CardTransactionType" {
t.Errorf("%T: %s", err, err)
}
} else {
t.Errorf("%T: %s", err, err)
}
}
}
// TestBatchSHRCardTransactionType tests validating BatchSHR create for an invalid CardTransactionType
func TestBatchSHRCardTransactionType(t *testing.T) {
testBatchSHRCardTransactionType(t)
}
// BenchmarkBatchSHRCardTransactionType benchmarks validating BatchSHR create for an invalid CardTransactionType
func BenchmarkBatchSHRCardTransactionType(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRCardTransactionType(b)
}
}
// testBatchSHRCardExpirationDateField validates SHRCardExpirationDate
// characters 0-4 of underlying IdentificationNumber
func testBatchSHRCardExpirationDateField(t testing.TB) {
mockBatch := mockBatchSHR()
ts := mockBatch.Entries[0].SHRCardExpirationDateField()
if ts != "0718" {
t.Error("Card Expiration Date is invalid")
}
}
// TestBatchSHRCardExpirationDateField tests validatingSHRCardExpirationDate
// characters 0-4 of underlying IdentificationNumber
func TestBatchSHRCardExpirationDateField(t *testing.T) {
testBatchSHRCardExpirationDateField(t)
}
// BenchmarkBatchSHRCardExpirationDateField benchmarks validating SHRCardExpirationDate
// characters 0-4 of underlying IdentificationNumber
func BenchmarkBatchSHRCardExpirationDateField(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRCardExpirationDateField(b)
}
}
// testBatchSHRDocumentReferenceNumberField validates SHRDocumentReferenceNumberField
// characters 5-15 of underlying IdentificationNumber
func testBatchSHRDocumentReferenceNumberField(t testing.TB) {
mockBatch := mockBatchSHR()
ts := mockBatch.Entries[0].SHRDocumentReferenceNumberField()
if ts != 12345678910 {
t.Error("Document Reference Number is invalid")
}
}
// TestBatchSHRDocumentReferenceNumberField tests validating SHRDocumentReferenceNumberField
// characters 5-15 of underlying IdentificationNumber
func TestBatchSHRDocumentReferenceNumberField(t *testing.T) {
testBatchSHRDocumentReferenceNumberField(t)
}
// BenchmarkBatchSHRDocumentReferenceNumberField benchmarks validating SHRDocumentReferenceNumberField
// characters 5-15 of underlying IdentificationNumber
func BenchmarkSHRDocumentReferenceNumberField(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRDocumentReferenceNumberField(b)
}
}
// testBatchSHRIndividualCardAccountNumberField validates SHRIndividualCardAccountNumberField
// underlying IndividualName
func testBatchSHRIndividualCardAccountNumberField(t testing.TB) {
mockBatch := mockBatchSHR()
ts := mockBatch.Entries[0].SHRIndividualCardAccountNumberField()
if ts != 12345678910123456 {
t.Error("Individual Card Account Number is invalid")
}
}
// TestBatchSHRIndividualCardAccountNumberField tests validating SHRIndividualCardAccountNumberField
// underlying IndividualName
func TestBatchSHRIndividualCardAccountNumberField(t *testing.T) {
testBatchSHRIndividualCardAccountNumberField(t)
}
// BenchmarkBatchSHRIndividualCardAccountNumberField benchmarks validating SHRIndividualCardAccountNumberField
// underlying IndividualName
func BenchmarkBatchSHRDocumentReferenceNumberField(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testBatchSHRIndividualCardAccountNumberField(b)
}
}
|
package jo
// Parsing events.
type Event int
const (
None = iota
SyntaxError
ObjectStart
ObjectEnd
KeyStart
KeyEnd
ArrayStart
ArrayEnd
StringStart
StringEnd
NumberStart
NumberEnd
BoolStart
BoolEnd
NullStart
NullEnd
)
// Parser states.
const (
_StateValue = iota
_StateObjectKeyOrEnd // {
_StateObjectColon // {"foo"
_StateObjectCommaOrEnd // {"foo":"bar"
_StateArrayValueOrEnd // [
_StateArrayCommaOrEnd // ["any value"
_StateArrayValue // ["any value",
_StateStringUnicode // "\u
_StateStringUnicode2 // "\u1
_StateStringUnicode3 // "\u12
_StateStringUnicode4 // "\u123
_StateString // "
_StateStringEscaped // "\
_StateKeyUnicode // "\u
_StateKeyUnicode2 // "\u1
_StateKeyUnicode3 // "\u12
_StateKeyUnicode4 // "\u123
_StateKey // "
_StateKeyEscaped // "\
_StateNumberNegative // -
_StateNumberZero // 0
_StateNumber // 123
_StateNumberDotFirstDigit // 123.
_StateNumberDotDigit // 123.4
_StateNumberExpSign // 123e
_StateNumberExpFirstDigit // 123e+
_StateNumberExpDigit // 123e+1
_StateTrue // t
_StateTrue2 // tr
_StateTrue3 // tru
_StateFalse // f
_StateFalse2 // fa
_StateFalse3 // fal
_StateFalse4 // fals
_StateNull // n
_StateNull2 // nu
_StateNull3 // nul
_StateDone
_StateSyntaxError
)
// Our own little implementation of the `error` interface.
type syntaxError string
func (e syntaxError) Error() string {
return string(e)
}
// Parser state machine.
type Parser struct {
state int
queue []int
err error
}
// Parses a byte slice containing JSON data. Returns the number of bytes
// read and an appropriate Event.
func (p *Parser) Parse(input []byte) (int, Event) {
for i := 0; i < len(input); i++ {
b := input[i]
switch p.state {
case _StateValue:
switch {
case b == '{':
p.state = _StateObjectKeyOrEnd
return i + 1, ObjectStart
case b == '[':
p.state = _StateArrayValueOrEnd
return i + 1, ArrayStart
case b == '"':
p.state = _StateString
return i + 1, StringStart
case b == '-':
p.state = _StateNumberNegative
return i + 1, NumberStart
case b == '0':
p.state = _StateNumberZero
return i + 1, NumberStart
case '1' <= b && b <= '9':
p.state = _StateNumber
return i + 1, NumberStart
case b == 't':
p.state = _StateTrue
return i + 1, BoolStart
case b == 'f':
p.state = _StateFalse
return i + 1, BoolStart
case b == 'n':
p.state = _StateNull
return i + 1, NullStart
default:
return i, p.error(`_StateValue: @todo`)
}
case _StateObjectKeyOrEnd:
if b == '}' {
p.state = p.next()
return i + 1, ObjectEnd
}
if b != '"' {
return i, p.error(`_StateObjectKeyOrEnd: @todo`)
}
p.state = _StateKey
return i + 1, KeyStart
case _StateObjectColon:
if b != ':' {
return i, p.error(`_StateObjectKeyOrEnd: @todo`)
}
p.push(_StateObjectCommaOrEnd)
p.state = _StateValue
case _StateObjectCommaOrEnd:
switch b {
case '}':
p.state = p.next()
return i + 1, ObjectEnd
case ',':
p.push(_StateObjectCommaOrEnd)
p.state = _StateValue
default:
return i, p.error(`_StateObjectCommaOrEnd: @todo`)
}
case _StateArrayValueOrEnd:
if b == ']' {
p.state = p.next()
return i + 1, ArrayEnd
}
p.push(_StateArrayCommaOrEnd)
p.state = _StateValue
i-- // rewind and let _StateValue do the parsing
case _StateArrayCommaOrEnd:
switch b {
case ']':
p.state = p.next()
return i + 1, ArrayEnd
case ',':
p.push(_StateArrayCommaOrEnd)
p.state = _StateValue
default:
return i, p.error(`_StateArrayCommaOrEnd: @todo`)
}
case _StateStringUnicode, _StateKeyUnicode,
_StateStringUnicode2, _StateKeyUnicode2,
_StateStringUnicode3, _StateKeyUnicode3,
_StateStringUnicode4, _StateKeyUnicode4:
switch {
case '0' <= b && b <= '9':
case 'a' <= b && b <= 'f':
case 'A' <= b && b <= 'F':
default:
return i, p.error(`_StateStringUnicodeX: @todo`)
}
// note that _State{String,Key}Unicode4 + 1 == _State{String/Key}
p.state++
case _StateString, _StateKey:
switch {
case b == '"':
var ev Event
if p.state == _StateKey {
ev = KeyEnd
p.state = _StateObjectColon
} else {
ev = StringEnd
p.state = p.next()
}
return i + 1, ev
case b == '\\':
p.state++ // go to _State{String,Key}Escaped
case b < 0x20:
return i, p.error(`_StateString: @todo`)
}
case _StateStringEscaped, _StateKeyEscaped:
switch b {
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
p.state-- // back to _State{String,Key}
case 'u':
p.state = _StateStringUnicode
default:
return i, p.error(`_StateStringEscaped: @todo`)
}
case _StateNumberNegative:
switch {
case b == '0':
p.state = _StateNumberZero
case '1' <= b && b <= '9':
p.state = _StateNumber
default:
return i, p.error(`_StateNumberNegative: @todo`)
}
case _StateNumber:
if '0' <= b && b <= '9' {
break
}
fallthrough
case _StateNumberZero:
switch b {
case '.':
p.state = _StateNumberDotFirstDigit
case 'e', 'E':
p.state = _StateNumberExpSign
default:
p.state = p.next()
return i, NumberEnd // rewind (note: `i` instead of `i + 1`)
}
case _StateNumberDotFirstDigit:
if b < '0' || b > '9' {
return i, p.error(`_StateNumberDot: @todo`)
}
p.state++
case _StateNumberDotDigit:
switch {
case b == 'e', b == 'E':
p.state = _StateNumberExpSign
case b < '0' || b > '9':
return i, p.error(`_StateNumberDotDigit: @todo`)
}
case _StateNumberExpSign:
p.state++
if b == '+' || b == '-' {
break
}
fallthrough
case _StateNumberExpFirstDigit:
if b < '0' || b > '9' {
return i, p.error(`_StateNumberAfterExp: @todo`)
}
p.state++
case _StateNumberExpDigit:
if b < '0' || b > '9' {
p.state = p.next()
return i + 1, NumberEnd
}
case _StateTrue:
if b != 'r' {
return i, p.error(`_StateTrue: @todo`)
}
p.state++
case _StateTrue2:
if b != 'u' {
return i, p.error(`_StateTrue2: @todo`)
}
p.state++
case _StateTrue3:
if b != 'e' {
return i, p.error(`_StateTrue3: @todo`)
}
p.state = p.next()
return i + 1, BoolEnd
case _StateFalse:
if b != 'a' {
return i, p.error(`_StateFalse: @todo`)
}
p.state++
case _StateFalse2:
if b != 'l' {
return i, p.error(`_StateFalse2: @todo`)
}
p.state++
case _StateFalse3:
if b != 's' {
return i, p.error(`_StateFalse3: @todo`)
}
p.state++
case _StateFalse4:
if b != 'e' {
return i, p.error(`_StateFalse4: @todo`)
}
p.state = p.next()
return i + 1, BoolEnd
case _StateNull:
if b != 'u' {
return i, p.error(`_StateNull: @todo`)
}
p.state++
case _StateNull2:
if b != 'l' {
return i, p.error(`_StateNull2: @todo`)
}
p.state++
case _StateNull3:
if b != 'l' {
return i, p.error(`_StateNull3: @todo`)
}
p.state = p.next()
return i + 1, NullEnd
case _StateDone:
return i, p.error(`_StateDone: @todo`)
default:
panic(`invalid state`)
}
}
return len(input), None
}
// Informs the parser not to expect any further input. Returns
// pending NumberEnd events if there are any, or a SyntaxError
// if EOF was not expected -- otherwise None.
func (p *Parser) Eof() Event {
switch p.state {
case _StateNumberZero,
_StateNumber,
_StateNumberDotDigit,
_StateNumberExpDigit:
p.state = _StateDone
return NumberEnd
case _StateDone:
return None
}
return p.error(`.Eof(): @todo`)
}
// Pops the next state off the parser struct's queue.
func (p *Parser) next() int {
length := len(p.queue)
// with the "state queue" empty, we can only wait for EOF
if length == 0 {
return _StateDone
}
state := p.queue[length-1]
p.queue = p.queue[:length-1]
return state
}
// Insert a new state at the top of the queue.
func (p *Parser) push(state int) {
p.queue = append(p.queue, state)
}
// Registers a syntax error. Always returns a SyntaxError event.
func (p *Parser) error(message string) Event {
p.err = syntaxError(message)
return SyntaxError
}
Add support for whitespace between tokens
package jo
// Parsing events.
type Event int
const (
None = iota
SyntaxError
ObjectStart
ObjectEnd
KeyStart
KeyEnd
ArrayStart
ArrayEnd
StringStart
StringEnd
NumberStart
NumberEnd
BoolStart
BoolEnd
NullStart
NullEnd
)
// Parser states.
const (
_StateValue = iota
_StateDone
_StateObjectKeyOrEnd // {
_StateObjectColon // {"foo"
_StateObjectCommaOrEnd // {"foo":"bar"
_StateArrayValueOrEnd // [
_StateArrayCommaOrEnd // ["any value"
_StateArrayValue // ["any value",
// leading whitespace should be consumed from all states
// above this line
_AllowWhitespace
_StateStringUnicode // "\u
_StateStringUnicode2 // "\u1
_StateStringUnicode3 // "\u12
_StateStringUnicode4 // "\u123
_StateString // "
_StateStringEscaped // "\
_StateKeyUnicode // "\u
_StateKeyUnicode2 // "\u1
_StateKeyUnicode3 // "\u12
_StateKeyUnicode4 // "\u123
_StateKey // "
_StateKeyEscaped // "\
_StateNumberNegative // -
_StateNumberZero // 0
_StateNumber // 123
_StateNumberDotFirstDigit // 123.
_StateNumberDotDigit // 123.4
_StateNumberExpSign // 123e
_StateNumberExpFirstDigit // 123e+
_StateNumberExpDigit // 123e+1
_StateTrue // t
_StateTrue2 // tr
_StateTrue3 // tru
_StateFalse // f
_StateFalse2 // fa
_StateFalse3 // fal
_StateFalse4 // fals
_StateNull // n
_StateNull2 // nu
_StateNull3 // nul
_StateSyntaxError
)
// Our own little implementation of the `error` interface.
type syntaxError string
func (e syntaxError) Error() string {
return string(e)
}
// Parser state machine.
type Parser struct {
state int
queue []int
err error
}
// Parses a byte slice containing JSON data. Returns the number of bytes
// read and an appropriate Event.
func (p *Parser) Parse(input []byte) (int, Event) {
for i := 0; i < len(input); i++ {
b := input[i]
// optionally trim leading whitespace
if p.state < _AllowWhitespace &&
(b == ' ' || b == '\t' || b == '\n' || b == '\r') {
continue
}
switch p.state {
case _StateValue:
switch {
case b == '{':
p.state = _StateObjectKeyOrEnd
return i + 1, ObjectStart
case b == '[':
p.state = _StateArrayValueOrEnd
return i + 1, ArrayStart
case b == '"':
p.state = _StateString
return i + 1, StringStart
case b == '-':
p.state = _StateNumberNegative
return i + 1, NumberStart
case b == '0':
p.state = _StateNumberZero
return i + 1, NumberStart
case '1' <= b && b <= '9':
p.state = _StateNumber
return i + 1, NumberStart
case b == 't':
p.state = _StateTrue
return i + 1, BoolStart
case b == 'f':
p.state = _StateFalse
return i + 1, BoolStart
case b == 'n':
p.state = _StateNull
return i + 1, NullStart
default:
return i, p.error(`_StateValue: @todo`)
}
case _StateObjectKeyOrEnd:
if b == '}' {
p.state = p.next()
return i + 1, ObjectEnd
}
if b != '"' {
return i, p.error(`_StateObjectKeyOrEnd: @todo`)
}
p.state = _StateKey
return i + 1, KeyStart
case _StateObjectColon:
if b != ':' {
return i, p.error(`_StateObjectKeyOrEnd: @todo`)
}
p.push(_StateObjectCommaOrEnd)
p.state = _StateValue
case _StateObjectCommaOrEnd:
switch b {
case '}':
p.state = p.next()
return i + 1, ObjectEnd
case ',':
p.push(_StateObjectCommaOrEnd)
p.state = _StateValue
default:
return i, p.error(`_StateObjectCommaOrEnd: @todo`)
}
case _StateArrayValueOrEnd:
if b == ']' {
p.state = p.next()
return i + 1, ArrayEnd
}
p.push(_StateArrayCommaOrEnd)
p.state = _StateValue
i-- // rewind and let _StateValue do the parsing
case _StateArrayCommaOrEnd:
switch b {
case ']':
p.state = p.next()
return i + 1, ArrayEnd
case ',':
p.push(_StateArrayCommaOrEnd)
p.state = _StateValue
default:
return i, p.error(`_StateArrayCommaOrEnd: @todo`)
}
case _StateStringUnicode, _StateKeyUnicode,
_StateStringUnicode2, _StateKeyUnicode2,
_StateStringUnicode3, _StateKeyUnicode3,
_StateStringUnicode4, _StateKeyUnicode4:
switch {
case '0' <= b && b <= '9':
case 'a' <= b && b <= 'f':
case 'A' <= b && b <= 'F':
default:
return i, p.error(`_StateStringUnicodeX: @todo`)
}
// note that _State{String,Key}Unicode4 + 1 == _State{String/Key}
p.state++
case _StateString, _StateKey:
switch {
case b == '"':
var ev Event
if p.state == _StateKey {
ev = KeyEnd
p.state = _StateObjectColon
} else {
ev = StringEnd
p.state = p.next()
}
return i + 1, ev
case b == '\\':
p.state++ // go to _State{String,Key}Escaped
case b < 0x20:
return i, p.error(`_StateString: @todo`)
}
case _StateStringEscaped, _StateKeyEscaped:
switch b {
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
p.state-- // back to _State{String,Key}
case 'u':
p.state = _StateStringUnicode
default:
return i, p.error(`_StateStringEscaped: @todo`)
}
case _StateNumberNegative:
switch {
case b == '0':
p.state = _StateNumberZero
case '1' <= b && b <= '9':
p.state = _StateNumber
default:
return i, p.error(`_StateNumberNegative: @todo`)
}
case _StateNumber:
if '0' <= b && b <= '9' {
break
}
fallthrough
case _StateNumberZero:
switch b {
case '.':
p.state = _StateNumberDotFirstDigit
case 'e', 'E':
p.state = _StateNumberExpSign
default:
p.state = p.next()
return i, NumberEnd // rewind (note: `i` instead of `i + 1`)
}
case _StateNumberDotFirstDigit:
if b < '0' || b > '9' {
return i, p.error(`_StateNumberDot: @todo`)
}
p.state++
case _StateNumberDotDigit:
switch {
case b == 'e', b == 'E':
p.state = _StateNumberExpSign
case b < '0' || b > '9':
return i, p.error(`_StateNumberDotDigit: @todo`)
}
case _StateNumberExpSign:
p.state++
if b == '+' || b == '-' {
break
}
fallthrough
case _StateNumberExpFirstDigit:
if b < '0' || b > '9' {
return i, p.error(`_StateNumberAfterExp: @todo`)
}
p.state++
case _StateNumberExpDigit:
if b < '0' || b > '9' {
p.state = p.next()
return i + 1, NumberEnd
}
case _StateTrue:
if b != 'r' {
return i, p.error(`_StateTrue: @todo`)
}
p.state++
case _StateTrue2:
if b != 'u' {
return i, p.error(`_StateTrue2: @todo`)
}
p.state++
case _StateTrue3:
if b != 'e' {
return i, p.error(`_StateTrue3: @todo`)
}
p.state = p.next()
return i + 1, BoolEnd
case _StateFalse:
if b != 'a' {
return i, p.error(`_StateFalse: @todo`)
}
p.state++
case _StateFalse2:
if b != 'l' {
return i, p.error(`_StateFalse2: @todo`)
}
p.state++
case _StateFalse3:
if b != 's' {
return i, p.error(`_StateFalse3: @todo`)
}
p.state++
case _StateFalse4:
if b != 'e' {
return i, p.error(`_StateFalse4: @todo`)
}
p.state = p.next()
return i + 1, BoolEnd
case _StateNull:
if b != 'u' {
return i, p.error(`_StateNull: @todo`)
}
p.state++
case _StateNull2:
if b != 'l' {
return i, p.error(`_StateNull2: @todo`)
}
p.state++
case _StateNull3:
if b != 'l' {
return i, p.error(`_StateNull3: @todo`)
}
p.state = p.next()
return i + 1, NullEnd
case _StateDone:
return i, p.error(`_StateDone: @todo`)
default:
panic(`invalid state`)
}
}
return len(input), None
}
// Informs the parser not to expect any further input. Returns
// pending NumberEnd events if there are any, or a SyntaxError
// if EOF was not expected -- otherwise None.
func (p *Parser) Eof() Event {
switch p.state {
case _StateNumberZero,
_StateNumber,
_StateNumberDotDigit,
_StateNumberExpDigit:
p.state = _StateDone
return NumberEnd
case _StateDone:
return None
}
return p.error(`.Eof(): @todo`)
}
// Pops the next state off the parser struct's queue.
func (p *Parser) next() int {
length := len(p.queue)
// with the "state queue" empty, we can only wait for EOF
if length == 0 {
return _StateDone
}
state := p.queue[length-1]
p.queue = p.queue[:length-1]
return state
}
// Insert a new state at the top of the queue.
func (p *Parser) push(state int) {
p.queue = append(p.queue, state)
}
// Registers a syntax error. Always returns a SyntaxError event.
func (p *Parser) error(message string) Event {
p.err = syntaxError(message)
return SyntaxError
}
|
/*
(c) Copyright [2021] Hewlett Packard Enterprise Development LP
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package ov
package ov
import (
"encoding/json"
"errors"
"fmt"
"os"
"github.com/HewlettPackard/oneview-golang/rest"
"github.com/HewlettPackard/oneview-golang/utils"
"github.com/docker/machine/libmachine/log"
)
// FirmwareOption structure for firware settings
type FirmwareOption struct {
ComplianceControl string `json:"complianceControl,omitempty"` // complianceControl
ConsistencyState string `json:"consistencyState,omitempty"` //Consistency state of the firmware component.
FirmwareActivationType string `json:"firmwareActivationType,omitempty"`
FirmwareBaselineUri utils.Nstring `json:"firmwareBaselineUri,omitempty"` // "firmwareBaselineUri": null,
FirmwareInstallType string `json:"firmwareInstallType,omitempty"` // Specifies the way a Service Pack for ProLiant (SPP) is installed. This field is used if the 'manageFirmware' field is true. Possible values are
FirmwareScheduleDateTime string `json:"firmwareScheduleDateTime,omitempty"` // Identifies the date and time the Service Pack for Proliant (SPP) will be activated.
ForceInstallFirmware bool `json:"forceInstallFirmware"` // "forceInstallFirmware": false,
ManageFirmware bool `json:"manageFirmware"` // "manageFirmware": false
ReapplyState string `json:"reapplyState,omitempty"` //Current reapply state of the firmware component.
}
// BootModeOption mode option
type BootModeOption struct {
ComplianceControl string `json:"complianceControl,omitempty"` // complianceControl
ManageMode *bool `json:"manageMode"` // "manageMode": true,
Mode string `json:"mode,omitempty"` // "mode": "BIOS",
PXEBootPolicy utils.Nstring `json:"pxeBootPolicy,omitempty"` // "pxeBootPolicy": null
SecureBoot string `json:"secureBoot,omitempty"` // Enable or disable UEFI Secure Boot
}
// BootManagement management
type BootManagement struct {
ComplianceControl string `json:"complianceControl,omitempty"` // complianceControl
ManageBoot bool `json:"manageBoot,omitempty"` // "manageBoot": true,
Order []string `json:"order,omitempty"` // "order": ["CD","USB","HardDisk","PXE"]
}
// BiosSettings structure
type BiosSettings struct {
ID string `json:"id,omitempty"` // id
Value string `json:"value,omitempty"` // value
}
// BiosOption - bios options
type BiosOption struct {
ComplianceControl string `json:"complianceControl,omitempty"` // complianceControl
ConsistencyState string `json:"consistencyState,omitempty"` //Consistency state of the BIOS component
ManageBios *bool `json:"manageBios"` // "manageBios": false,
OverriddenSettings []BiosSettings `json:"overriddenSettings,omitempty"` // "overriddenSettings": []
ReapplyState string `json:"reapplyState,omitempty"` //Current reapply state of the BIOS component.
}
type ConnectionSettings struct {
ComplianceControl string `json:"complianceControl,omitempty"` // "complianceControl": "Checked",
ManageConnections bool `json:"manageConnections,omitempty"` // "manageConnections": false,
Connections []Connection `json:"connections,omitempty"`
ReapplyState string `json:"reapplyState,omitempty"` //Current reapply state of the connection downlinks associated with the server profile
}
type Options struct {
Op string `json:"op,omitempty"` // "op": "replace",
Path string `json:"path,omitempty"` // "path": "/templateCompliance",
Value string `json:"value,omitempty"` // "value": "Compliant",
}
type Servers struct {
EnclosureGroupName string `json:"enclosureGroupName, omitempty"`
EnclosureName string `json:"enclosureGroupName, omitempty"`
EnclosureUri string `json:"enclosureGroupName, omitempty"`
EnclosureBay int `json:"enclosureBay, omitempty"`
ServerHardwareName string `json:"serverHardwareName, omitempty"`
ServerHardwareUri string `json:"serverHardwareUri, omitempty"`
ServerHardwareTypeName string `json:"serverHardwareTypeName, omitempty"`
ServerHardwareTypeUri string `json:"serverHardwareTypeUri, omitempty"`
EnclosureGroupUri string `json:"enclosuregroupUri, omitempty"`
PowerState string `json:"powerState, omitempty"`
FormFactor []string `json:"formFactor, omitempty"`
ServerHardwareStatus string `json:"serverHardwareStatus, omitempty"`
}
type AvailableTarget struct {
Type string `json:"type, omitempty"`
Members []Servers `json:"targets, omitempty"`
}
type ManagementProcessor struct {
ComplianceControl string `json:"complianceControl,omitempty"` // complianceControl
ManageMp bool `json:"manageMp,omitempty"`
MpSettings []mpSettings `json:"mpSettings,omitempty"`
ReapplyState string `json:"reapplyState,omitempty"`
}
type mpSettings struct {
Args string `json:"args, omitempty"`
SettingType string `json:"settingType, omitempty"`
}
// ServerProfile - server profile object for ov
type ServerProfile struct {
Affinity string `json:"affinity,omitempty"` // "affinity": "Bay",
AssociatedServer utils.Nstring `json:"associatedServer,omitempty"` // "associatedServer": null,
Bios *BiosOption `json:"bios,omitempty"` // "bios": { },
Boot BootManagement `json:"boot,omitempty"` // "boot": { },
BootMode BootModeOption `json:"bootMode,omitempty"` // "bootMode": {},
Category string `json:"category,omitempty"` // "category": "server-profiles",
ConnectionSettings ConnectionSettings `json:"connectionSettings,omitempty"`
Created string `json:"created,omitempty"` // "created": "20150831T154835.250Z",
Description string `json:"description,omitempty"` // "description": "Docker Machine Bay 16",
ETAG string `json:"eTag,omitempty"` // "eTag": "1441036118675/8"
EnclosureBay int `json:"enclosureBay,omitempty"` // "enclosureBay": 16,
EnclosureGroupURI utils.Nstring `json:"enclosureGroupUri,omitempty"` // "enclosureGroupUri": "/rest/enclosure-groups/56ad0069-8362-42fd-b4e3-f5c5a69af039",
EnclosureURI utils.Nstring `json:"enclosureUri,omitempty"` // "enclosureUri": "/rest/enclosures/092SN51207RR",
Firmware FirmwareOption `json:"firmware,omitempty"` // "firmware": { },
HideUnusedFlexNics bool `json:"hideUnusedFlexNics"` // "hideUnusedFlexNics": false,
InProgress bool `json:"inProgress,omitempty"` // "inProgress": false,
InitialScopeUris []utils.Nstring `json:"initialScopeUris,omitempty"` // "initialScopeUris":[],
IscsiInitiatorName string `json:"iscsiInitiatorName,omitempty"` //When iscsiInitatorNameType is set to UserDefined
IscsiInitiatorNameType string `json:"iscsiInitiatorNameType,omitempty"` //When set to UserDefined, the value of iscsiInitatorName is used as provided
LocalStorage LocalStorageOptions `json:"localStorage,omitempty"` // "localStorage": {},
MACType string `json:"macType,omitempty"` // "macType": "Physical",
ManagementProcessor *ManagementProcessor `json:"managementProcessor,omitempty"` //
Modified string `json:"modified,omitempty"` // "modified": "20150902T175611.657Z",
Name string `json:"name,omitempty"` // "name": "Server_Profile_scs79",
OSDeploymentSettings OSDeploymentSettings `json:"osDeploymentSettings,omitempty"` // "osDeploymentSettings": {...},
ProfileUUID utils.Nstring `json:"profileUUID,omitempty"` //The automatically generated 36-byte Universally Unique ID of the server profile.
RefreshState string `json:"refreshState,omitempty"` //Current refresh State of this Server Profile
SanStorage SanStorageOptions `json:"sanStorage,omitempty"` // "sanStorage": {},
ScopesUri utils.Nstring `json:"scopesUri,omitempty"` // "scopesUri": "/rest/scopes/resources/rest/server-profiles/DB7726F7-F601-4EA8-B4A6-D1EE1B32C07C",
SerialNumber utils.Nstring `json:"serialNumber,omitempty"` // "serialNumber": "2M25090RMW",
SerialNumberType string `json:"serialNumberType,omitempty"` // "serialNumberType": "Physical",
ServerHardwareReapplyState string `json:"serverHardwareReapplyState,omitempty"` //Current reapply state of the server that is associated with this server profile
ServerHardwareTypeURI utils.Nstring `json:"serverHardwareTypeUri,omitempty"` // "serverHardwareTypeUri": "/rest/server-hardware-types/DB7726F7-F601-4EA8-B4A6-D1EE1B32C07C",
ServerHardwareURI utils.Nstring `json:"serverHardwareUri,omitempty"` // "serverHardwareUri": "/rest/server-hardware/30373237-3132-4D32-3235-303930524D57",
ServerProfileDescription string `json:"serverProfileDescription,omitempty"` // "serverProfileDescription":
ServerProfileTemplateURI utils.Nstring `json:"serverProfileTemplateUri,omitempty"` // undocmented option
ServiceManager string `json:"serviceManager,omitempty"` //Name of a service manager that is designated owner of the profile
State string `json:"state,omitempty"` // "state": "Normal",
Status string `json:"status,omitempty"` // "status": "Critical",
TaskURI utils.Nstring `json:"taskUri,omitempty"` // "taskUri": "/rest/tasks/6F0DF438-7D30-41A2-A36D-62AB866BC7E8",
TemplateCompliance string `json:"templateCompliance,omitempty"` // v2 Compliant, NonCompliant, Unknown
Type string `json:"type,omitempty"` // "type": "ServerProfileV4",
URI utils.Nstring `json:"uri,omitempty"` // "uri": "/rest/server-profiles/9979b3a4-646a-4c3e-bca6-80ca0b403a93",
UUID utils.Nstring `json:"uuid,omitempty"` // "uuid": "30373237-3132-4D32-3235-303930524D57",
WWNType string `json:"wwnType,omitempty"` // "wwnType": "Physical",
}
// GetConnectionByName gets the connection from a profile with a given name
func (s ServerProfile) GetConnectionByName(name string) (Connection, error) {
var connection Connection
for _, c := range s.ConnectionSettings.Connections {
if c.Name == name {
return c, nil
}
}
return connection, errors.New("Error connection not found on server profile, please try a different connection name.")
}
// ServerProfileList a list of ServerProfile objects
// TODO: missing properties, need to think how we can make a higher lvl structure like an OVList
// Then things like Members are inherited
type ServerProfileList struct {
Total int `json:"total,omitempty"` // "total": 1,
Count int `json:"count,omitempty"` // "count": 1,
Start int `json:"start,omitempty"` // "start": 0,
PrevPageURI utils.Nstring `json:"prevPageUri,omitempty"` // "prevPageUri": null,
NextPageURI utils.Nstring `json:"nextPageUri,omitempty"` // "nextPageUri": null,
URI utils.Nstring `json:"uri,omitempty"` // "uri": "/rest/server-profiles?filter=serialNumber%20matches%20%272M25090RMW%27&sort=name:asc"
Members []ServerProfile `json:"members,omitempty"` // "members":[]
}
// GetProfileByName gets a server profile by name
func (c *OVClient) GetProfileByName(name string) (ServerProfile, error) {
var (
profile ServerProfile
)
profiles, err := c.GetProfiles("", "", fmt.Sprintf("name matches '%s'", name), "name:asc", "")
if profiles.Total > 0 {
return profiles.Members[0], err
} else {
return profile, err
}
}
// GetProfileBySN accepts serial number
func (c *OVClient) GetProfileBySN(serialnum string) (ServerProfile, error) {
var (
profile ServerProfile
)
profiles, err := c.GetProfiles("", "", fmt.Sprintf("serialNumber matches '%s'", serialnum), "name:asc", "")
if profiles.Total > 0 {
return profiles.Members[0], err
} else {
return profile, err
}
}
// GetProfiles - get a server profiles
func (c *OVClient) GetProfiles(start string, count string, filter string, sort string, scopeUris string) (ServerProfileList, error) {
var (
uri = "/rest/server-profiles"
q map[string]interface{}
profiles ServerProfileList
)
q = make(map[string]interface{})
if len(filter) > 0 {
q["filter"] = filter
}
if sort != "" {
q["sort"] = sort
}
if start != "" {
q["start"] = start
}
if count != "" {
q["count"] = count
}
if scopeUris != "" {
q["scopeUris"] = scopeUris
}
// refresh login
c.RefreshLogin()
c.SetAuthHeaderOptions(c.GetAuthHeaderMap())
// Setup query
if len(q) > 0 {
c.SetQueryString(q)
}
data, err := c.RestAPICall(rest.GET, uri, nil)
if err != nil {
return profiles, err
}
log.Debugf("GetProfiles %s", data)
if err := json.Unmarshal([]byte(data), &profiles); err != nil {
return profiles, err
}
return profiles, nil
}
// GetProfileByURI - get the profile from a uri
func (c *OVClient) GetProfileByURI(uri utils.Nstring) (ServerProfile, error) {
var (
profile ServerProfile
)
// refresh login
c.RefreshLogin()
c.SetAuthHeaderOptions(c.GetAuthHeaderMap())
data, err := c.RestAPICall(rest.GET, uri.String(), nil)
if err != nil {
return profile, err
}
log.Debugf("GetProfileByURI %s", data)
if err := json.Unmarshal([]byte(data), &profile); err != nil {
return profile, err
}
return profile, nil
}
// GetAvailableServers - To fetch available server hardwares
func (c *OVClient) GetAvailableServers(ServerHardwareUri string) (bool, error) {
var (
hardwareUri = "/rest/server-profiles/available-targets"
isHardwareAvailable = false
profiles AvailableTarget
)
// refresh login
c.RefreshLogin()
c.SetAuthHeaderOptions(c.GetAuthHeaderMap())
sh_data, err := c.RestAPICall(rest.GET, hardwareUri, nil)
if err != nil {
return isHardwareAvailable, err
}
if err := json.Unmarshal([]byte(sh_data), &profiles); err != nil {
return isHardwareAvailable, err
}
for i := 0; i < len(profiles.Members); i++ {
if profiles.Members[i].ServerHardwareUri == ServerHardwareUri {
isHardwareAvailable = true
}
}
return isHardwareAvailable, nil
}
// SubmitNewProfile - submit new profile template
func (c *OVClient) SubmitNewProfile(p ServerProfile) (err error) {
log.Infof("Initializing creation of server profile for %s.", p.Name)
var (
uri = "/rest/server-profiles"
server ServerHardware
t *Task
)
// refresh login
c.RefreshLogin()
c.SetAuthHeaderOptions(c.GetAuthHeaderMap())
t = t.NewProfileTask(c)
t.ResetTask()
log.Debugf("REST : %s \n %+v\n", uri, p)
log.Debugf("task -> %+v", t)
// Get available server hardwares to assign it to SP
if p.ServerHardwareURI != "" {
isHardwareAvailable, err := c.GetAvailableServers(p.ServerHardwareURI.String())
if err != nil || isHardwareAvailable == false {
log.Errorf("Error getting available Hardware: %s", p.ServerHardwareURI.String())
if err != nil {
log.Warnf("Error: %s", err)
}
os.Exit(1)
}
server, err = c.GetServerHardwareByUri(p.ServerHardwareURI)
}
server, err = c.GetServerHardwareByUri(p.ServerHardwareURI)
if err != nil {
log.Warnf("Problem getting server hardware, %s", err)
}
// power off the server so that we can add to SP
if server.Name != "" {
server.PowerOff()
}
data, err := c.RestAPICall(rest.POST, uri, p)
if err != nil {
t.TaskIsDone = true
log.Errorf("Error submitting new profile request: %s", err)
return err
}
log.Debugf("Response New Profile %s", data)
if err := json.Unmarshal([]byte(data), &t); err != nil {
t.TaskIsDone = true
log.Errorf("Error with task un-marshal: %s", err)
return err
}
err = t.Wait()
if err != nil {
return err
}
return nil
}
// create profile from template
func (c *OVClient) CreateProfileFromTemplate(name string, template ServerProfile, blade ServerHardware) error {
log.Debugf("TEMPLATE : %+v\n", template)
var (
new_template ServerProfile
err error
)
//GET on /rest/server-profile-templates/{id}new-profile
log.Debugf("getting profile by URI %+v, v2", template.URI)
new_template, err = c.GetProfileByURI(template.URI)
if err != nil {
return err
}
if c.APIVersion == 200 {
new_template.Type = "ServerProfileV5"
} else if c.APIVersion == 300 {
new_template.Type = "ServerProfileV6"
} else if c.APIVersion == 500 {
new_template.Type = "ServerProfileV7"
} else if c.APIVersion == 600 {
new_template.Type = "ServerProfileV8"
} else if c.APIVersion == 800 {
new_template.Type = "ServerProfileV9"
} else if c.APIVersion == 1000 {
new_template.Type = "ServerProfileV10"
} else if c.APIVersion == 1200 {
new_template.Type = "ServerProfileV11"
} else if c.APIVersion >= 1600 {
new_template.Type = "ServerProfileV12"
}
new_template.ServerProfileTemplateURI = template.URI // create relationship
new_template.ConnectionSettings = ConnectionSettings{
Connections: template.ConnectionSettings.Connections,
}
log.Debugf("new_template -> %+v", new_template)
new_template.ServerHardwareURI = blade.URI
new_template.Description += " " + name
new_template.Name = name
log.Debugf("new_template -> %+v", new_template)
err = c.SubmitNewProfile(new_template)
return err
}
// submit new profile template
func (c *OVClient) SubmitDeleteProfile(p ServerProfile) (t *Task, err error) {
var (
uri = p.URI.String()
)
t = t.NewProfileTask(c)
t.ResetTask()
log.Debugf("REST : %s \n %+v\n", uri, p)
log.Debugf("task -> %+v", t)
if uri == "" {
log.Warn("Unable to post delete, no uri found.")
t.TaskIsDone = true
return t, err
}
data, err := c.RestAPICall(rest.DELETE, uri, nil)
if err != nil {
log.Errorf("Error submitting new profile request: %s", err)
t.TaskIsDone = true
return t, err
}
log.Debugf("Response delete profile %s", data)
if err := json.Unmarshal(data, &t); err != nil {
t.TaskIsDone = true
log.Errorf("Error with task un-marshal: %s", err)
return t, err
}
return t, err
}
// delete a profile, assign the server and remove the profile from the system
func (c *OVClient) DeleteProfile(name string) error {
// get the profile for this server
var (
servernamemsg string
server ServerHardware
profile ServerProfile
err error
)
servernamemsg = "'no server'"
profile, err = c.GetProfileByName(name)
if err != nil {
return err
}
if profile.Name != "" {
if profile.ServerHardwareURI != "" {
server, err = c.GetServerHardwareByUri(profile.ServerHardwareURI)
if err != nil {
log.Warnf("Problem getting server hardware, %s", err)
} else {
if server.Name != "" {
servernamemsg = server.Name
}
}
}
log.Infof("Delete server profile %s from oneview, %s will be unassigned.", profile.Name, servernamemsg)
// power off the server so that we can remove it
if server.Name != "" {
server.PowerOff()
}
// submit delete task
t, err := c.SubmitDeleteProfile(profile)
if c.APIVersion < 1000 {
err = t.Wait()
if err != nil {
return err
}
}
// check for task execution
} else {
log.Infof("Profile could not be found to delete, %s, skipping delete ...", name)
}
return nil
}
func (c *OVClient) UpdateServerProfile(p ServerProfile) error {
log.Infof("Initializing update of server profile for %s.", p.Name)
var (
uri = p.URI.String()
t *Task
)
// refresh login
c.RefreshLogin()
c.SetAuthHeaderOptions(c.GetAuthHeaderMap())
t = t.NewProfileTask(c)
t.ResetTask()
log.Debugf("REST : %s \n %+v\n", uri, p)
log.Debugf("task -> %+v", t)
data, err := c.RestAPICall(rest.PUT, uri, p)
if err != nil {
t.TaskIsDone = true
log.Errorf("Error submitting update server profile request: %s", err)
return err
}
log.Debugf("Response update ServerProfile %s", data)
if err := json.Unmarshal([]byte(data), &t); err != nil {
t.TaskIsDone = true
log.Errorf("Error with task un-marshal: %s", err)
return err
}
err = t.Wait()
if err != nil {
return err
}
return nil
}
func (c *OVClient) PatchServerProfile(p ServerProfile, request []Options) error {
log.Infof("Initializing update of server profile for %s.", p.Name)
var (
uri = p.URI.String()
t *Task
)
// refresh login
c.RefreshLogin()
c.SetAuthHeaderOptions(c.GetAuthHeaderMap())
t = t.NewProfileTask(c)
t.ResetTask()
log.Debugf("REST : %s \n %+v\n", uri, request)
log.Debugf("task -> %+v", t)
data, err := c.RestAPICall(rest.PATCH, uri, request)
if err != nil {
t.TaskIsDone = true
log.Errorf("Error submitting update server profile request: %s", err)
return err
}
log.Debugf("Response update ServerProfile %s", data)
if err := json.Unmarshal([]byte(data), &t); err != nil {
t.TaskIsDone = true
log.Errorf("Error with task un-marshal: %s", err)
return err
}
err = t.Wait()
if err != nil {
return err
}
return nil
}
updated struct for mpSettings
/*
(c) Copyright [2021] Hewlett Packard Enterprise Development LP
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package ov
package ov
import (
"encoding/json"
"errors"
"fmt"
"os"
"github.com/HewlettPackard/oneview-golang/rest"
"github.com/HewlettPackard/oneview-golang/utils"
"github.com/docker/machine/libmachine/log"
)
// FirmwareOption structure for firware settings
type FirmwareOption struct {
ComplianceControl string `json:"complianceControl,omitempty"` // complianceControl
ConsistencyState string `json:"consistencyState,omitempty"` //Consistency state of the firmware component.
FirmwareActivationType string `json:"firmwareActivationType,omitempty"`
FirmwareBaselineUri utils.Nstring `json:"firmwareBaselineUri,omitempty"` // "firmwareBaselineUri": null,
FirmwareInstallType string `json:"firmwareInstallType,omitempty"` // Specifies the way a Service Pack for ProLiant (SPP) is installed. This field is used if the 'manageFirmware' field is true. Possible values are
FirmwareScheduleDateTime string `json:"firmwareScheduleDateTime,omitempty"` // Identifies the date and time the Service Pack for Proliant (SPP) will be activated.
ForceInstallFirmware bool `json:"forceInstallFirmware"` // "forceInstallFirmware": false,
ManageFirmware bool `json:"manageFirmware"` // "manageFirmware": false
ReapplyState string `json:"reapplyState,omitempty"` //Current reapply state of the firmware component.
}
// BootModeOption mode option
type BootModeOption struct {
ComplianceControl string `json:"complianceControl,omitempty"` // complianceControl
ManageMode *bool `json:"manageMode"` // "manageMode": true,
Mode string `json:"mode,omitempty"` // "mode": "BIOS",
PXEBootPolicy utils.Nstring `json:"pxeBootPolicy,omitempty"` // "pxeBootPolicy": null
SecureBoot string `json:"secureBoot,omitempty"` // Enable or disable UEFI Secure Boot
}
// BootManagement management
type BootManagement struct {
ComplianceControl string `json:"complianceControl,omitempty"` // complianceControl
ManageBoot bool `json:"manageBoot,omitempty"` // "manageBoot": true,
Order []string `json:"order,omitempty"` // "order": ["CD","USB","HardDisk","PXE"]
}
// BiosSettings structure
type BiosSettings struct {
ID string `json:"id,omitempty"` // id
Value string `json:"value,omitempty"` // value
}
// BiosOption - bios options
type BiosOption struct {
ComplianceControl string `json:"complianceControl,omitempty"` // complianceControl
ConsistencyState string `json:"consistencyState,omitempty"` //Consistency state of the BIOS component
ManageBios *bool `json:"manageBios"` // "manageBios": false,
OverriddenSettings []BiosSettings `json:"overriddenSettings,omitempty"` // "overriddenSettings": []
ReapplyState string `json:"reapplyState,omitempty"` //Current reapply state of the BIOS component.
}
type ConnectionSettings struct {
ComplianceControl string `json:"complianceControl,omitempty"` // "complianceControl": "Checked",
ManageConnections bool `json:"manageConnections,omitempty"` // "manageConnections": false,
Connections []Connection `json:"connections,omitempty"`
ReapplyState string `json:"reapplyState,omitempty"` //Current reapply state of the connection downlinks associated with the server profile
}
type Options struct {
Op string `json:"op,omitempty"` // "op": "replace",
Path string `json:"path,omitempty"` // "path": "/templateCompliance",
Value string `json:"value,omitempty"` // "value": "Compliant",
}
type Servers struct {
EnclosureGroupName string `json:"enclosureGroupName, omitempty"`
EnclosureName string `json:"enclosureGroupName, omitempty"`
EnclosureUri string `json:"enclosureGroupName, omitempty"`
EnclosureBay int `json:"enclosureBay, omitempty"`
ServerHardwareName string `json:"serverHardwareName, omitempty"`
ServerHardwareUri string `json:"serverHardwareUri, omitempty"`
ServerHardwareTypeName string `json:"serverHardwareTypeName, omitempty"`
ServerHardwareTypeUri string `json:"serverHardwareTypeUri, omitempty"`
EnclosureGroupUri string `json:"enclosuregroupUri, omitempty"`
PowerState string `json:"powerState, omitempty"`
FormFactor []string `json:"formFactor, omitempty"`
ServerHardwareStatus string `json:"serverHardwareStatus, omitempty"`
}
type AvailableTarget struct {
Type string `json:"type, omitempty"`
Members []Servers `json:"targets, omitempty"`
}
type ManagementProcessor struct {
ComplianceControl string `json:"complianceControl,omitempty"` // complianceControl
ManageMp bool `json:"manageMp,omitempty"`
MpSettings []mpSettings `json:"mpSettings,omitempty"`
ReapplyState string `json:"reapplyState,omitempty"`
}
type mpSettings struct {
Args map[string]interface{} `json:"args, omitempty"`
SettingType string `json:"settingType, omitempty"`
}
// ServerProfile - server profile object for ov
type ServerProfile struct {
Affinity string `json:"affinity,omitempty"` // "affinity": "Bay",
AssociatedServer utils.Nstring `json:"associatedServer,omitempty"` // "associatedServer": null,
Bios *BiosOption `json:"bios,omitempty"` // "bios": { },
Boot BootManagement `json:"boot,omitempty"` // "boot": { },
BootMode BootModeOption `json:"bootMode,omitempty"` // "bootMode": {},
Category string `json:"category,omitempty"` // "category": "server-profiles",
ConnectionSettings ConnectionSettings `json:"connectionSettings,omitempty"`
Created string `json:"created,omitempty"` // "created": "20150831T154835.250Z",
Description string `json:"description,omitempty"` // "description": "Docker Machine Bay 16",
ETAG string `json:"eTag,omitempty"` // "eTag": "1441036118675/8"
EnclosureBay int `json:"enclosureBay,omitempty"` // "enclosureBay": 16,
EnclosureGroupURI utils.Nstring `json:"enclosureGroupUri,omitempty"` // "enclosureGroupUri": "/rest/enclosure-groups/56ad0069-8362-42fd-b4e3-f5c5a69af039",
EnclosureURI utils.Nstring `json:"enclosureUri,omitempty"` // "enclosureUri": "/rest/enclosures/092SN51207RR",
Firmware FirmwareOption `json:"firmware,omitempty"` // "firmware": { },
HideUnusedFlexNics bool `json:"hideUnusedFlexNics"` // "hideUnusedFlexNics": false,
InProgress bool `json:"inProgress,omitempty"` // "inProgress": false,
InitialScopeUris []utils.Nstring `json:"initialScopeUris,omitempty"` // "initialScopeUris":[],
IscsiInitiatorName string `json:"iscsiInitiatorName,omitempty"` //When iscsiInitatorNameType is set to UserDefined
IscsiInitiatorNameType string `json:"iscsiInitiatorNameType,omitempty"` //When set to UserDefined, the value of iscsiInitatorName is used as provided
LocalStorage LocalStorageOptions `json:"localStorage,omitempty"` // "localStorage": {},
MACType string `json:"macType,omitempty"` // "macType": "Physical",
ManagementProcessor *ManagementProcessor `json:"managementProcessor,omitempty"` //
Modified string `json:"modified,omitempty"` // "modified": "20150902T175611.657Z",
Name string `json:"name,omitempty"` // "name": "Server_Profile_scs79",
OSDeploymentSettings OSDeploymentSettings `json:"osDeploymentSettings,omitempty"` // "osDeploymentSettings": {...},
ProfileUUID utils.Nstring `json:"profileUUID,omitempty"` //The automatically generated 36-byte Universally Unique ID of the server profile.
RefreshState string `json:"refreshState,omitempty"` //Current refresh State of this Server Profile
SanStorage SanStorageOptions `json:"sanStorage,omitempty"` // "sanStorage": {},
ScopesUri utils.Nstring `json:"scopesUri,omitempty"` // "scopesUri": "/rest/scopes/resources/rest/server-profiles/DB7726F7-F601-4EA8-B4A6-D1EE1B32C07C",
SerialNumber utils.Nstring `json:"serialNumber,omitempty"` // "serialNumber": "2M25090RMW",
SerialNumberType string `json:"serialNumberType,omitempty"` // "serialNumberType": "Physical",
ServerHardwareReapplyState string `json:"serverHardwareReapplyState,omitempty"` //Current reapply state of the server that is associated with this server profile
ServerHardwareTypeURI utils.Nstring `json:"serverHardwareTypeUri,omitempty"` // "serverHardwareTypeUri": "/rest/server-hardware-types/DB7726F7-F601-4EA8-B4A6-D1EE1B32C07C",
ServerHardwareURI utils.Nstring `json:"serverHardwareUri,omitempty"` // "serverHardwareUri": "/rest/server-hardware/30373237-3132-4D32-3235-303930524D57",
ServerProfileDescription string `json:"serverProfileDescription,omitempty"` // "serverProfileDescription":
ServerProfileTemplateURI utils.Nstring `json:"serverProfileTemplateUri,omitempty"` // undocmented option
ServiceManager string `json:"serviceManager,omitempty"` //Name of a service manager that is designated owner of the profile
State string `json:"state,omitempty"` // "state": "Normal",
Status string `json:"status,omitempty"` // "status": "Critical",
TaskURI utils.Nstring `json:"taskUri,omitempty"` // "taskUri": "/rest/tasks/6F0DF438-7D30-41A2-A36D-62AB866BC7E8",
TemplateCompliance string `json:"templateCompliance,omitempty"` // v2 Compliant, NonCompliant, Unknown
Type string `json:"type,omitempty"` // "type": "ServerProfileV4",
URI utils.Nstring `json:"uri,omitempty"` // "uri": "/rest/server-profiles/9979b3a4-646a-4c3e-bca6-80ca0b403a93",
UUID utils.Nstring `json:"uuid,omitempty"` // "uuid": "30373237-3132-4D32-3235-303930524D57",
WWNType string `json:"wwnType,omitempty"` // "wwnType": "Physical",
}
// GetConnectionByName gets the connection from a profile with a given name
func (s ServerProfile) GetConnectionByName(name string) (Connection, error) {
var connection Connection
for _, c := range s.ConnectionSettings.Connections {
if c.Name == name {
return c, nil
}
}
return connection, errors.New("Error connection not found on server profile, please try a different connection name.")
}
// ServerProfileList a list of ServerProfile objects
// TODO: missing properties, need to think how we can make a higher lvl structure like an OVList
// Then things like Members are inherited
type ServerProfileList struct {
Total int `json:"total,omitempty"` // "total": 1,
Count int `json:"count,omitempty"` // "count": 1,
Start int `json:"start,omitempty"` // "start": 0,
PrevPageURI utils.Nstring `json:"prevPageUri,omitempty"` // "prevPageUri": null,
NextPageURI utils.Nstring `json:"nextPageUri,omitempty"` // "nextPageUri": null,
URI utils.Nstring `json:"uri,omitempty"` // "uri": "/rest/server-profiles?filter=serialNumber%20matches%20%272M25090RMW%27&sort=name:asc"
Members []ServerProfile `json:"members,omitempty"` // "members":[]
}
// GetProfileByName gets a server profile by name
func (c *OVClient) GetProfileByName(name string) (ServerProfile, error) {
var (
profile ServerProfile
)
profiles, err := c.GetProfiles("", "", fmt.Sprintf("name matches '%s'", name), "name:asc", "")
if profiles.Total > 0 {
return profiles.Members[0], err
} else {
return profile, err
}
}
// GetProfileBySN accepts serial number
func (c *OVClient) GetProfileBySN(serialnum string) (ServerProfile, error) {
var (
profile ServerProfile
)
profiles, err := c.GetProfiles("", "", fmt.Sprintf("serialNumber matches '%s'", serialnum), "name:asc", "")
if profiles.Total > 0 {
return profiles.Members[0], err
} else {
return profile, err
}
}
// GetProfiles - get a server profiles
func (c *OVClient) GetProfiles(start string, count string, filter string, sort string, scopeUris string) (ServerProfileList, error) {
var (
uri = "/rest/server-profiles"
q map[string]interface{}
profiles ServerProfileList
)
q = make(map[string]interface{})
if len(filter) > 0 {
q["filter"] = filter
}
if sort != "" {
q["sort"] = sort
}
if start != "" {
q["start"] = start
}
if count != "" {
q["count"] = count
}
if scopeUris != "" {
q["scopeUris"] = scopeUris
}
// refresh login
c.RefreshLogin()
c.SetAuthHeaderOptions(c.GetAuthHeaderMap())
// Setup query
if len(q) > 0 {
c.SetQueryString(q)
}
data, err := c.RestAPICall(rest.GET, uri, nil)
if err != nil {
return profiles, err
}
log.Debugf("GetProfiles %s", data)
if err := json.Unmarshal([]byte(data), &profiles); err != nil {
return profiles, err
}
return profiles, nil
}
// GetProfileByURI - get the profile from a uri
func (c *OVClient) GetProfileByURI(uri utils.Nstring) (ServerProfile, error) {
var (
profile ServerProfile
)
// refresh login
c.RefreshLogin()
c.SetAuthHeaderOptions(c.GetAuthHeaderMap())
data, err := c.RestAPICall(rest.GET, uri.String(), nil)
if err != nil {
return profile, err
}
log.Debugf("GetProfileByURI %s", data)
if err := json.Unmarshal([]byte(data), &profile); err != nil {
return profile, err
}
return profile, nil
}
// GetAvailableServers - To fetch available server hardwares
func (c *OVClient) GetAvailableServers(ServerHardwareUri string) (bool, error) {
var (
hardwareUri = "/rest/server-profiles/available-targets"
isHardwareAvailable = false
profiles AvailableTarget
)
// refresh login
c.RefreshLogin()
c.SetAuthHeaderOptions(c.GetAuthHeaderMap())
sh_data, err := c.RestAPICall(rest.GET, hardwareUri, nil)
if err != nil {
return isHardwareAvailable, err
}
if err := json.Unmarshal([]byte(sh_data), &profiles); err != nil {
return isHardwareAvailable, err
}
for i := 0; i < len(profiles.Members); i++ {
if profiles.Members[i].ServerHardwareUri == ServerHardwareUri {
isHardwareAvailable = true
}
}
return isHardwareAvailable, nil
}
// SubmitNewProfile - submit new profile template
func (c *OVClient) SubmitNewProfile(p ServerProfile) (err error) {
log.Infof("Initializing creation of server profile for %s.", p.Name)
var (
uri = "/rest/server-profiles"
server ServerHardware
t *Task
)
// refresh login
c.RefreshLogin()
c.SetAuthHeaderOptions(c.GetAuthHeaderMap())
t = t.NewProfileTask(c)
t.ResetTask()
log.Debugf("REST : %s \n %+v\n", uri, p)
log.Debugf("task -> %+v", t)
// Get available server hardwares to assign it to SP
if p.ServerHardwareURI != "" {
isHardwareAvailable, err := c.GetAvailableServers(p.ServerHardwareURI.String())
if err != nil || isHardwareAvailable == false {
log.Errorf("Error getting available Hardware: %s", p.ServerHardwareURI.String())
if err != nil {
log.Warnf("Error: %s", err)
}
os.Exit(1)
}
server, err = c.GetServerHardwareByUri(p.ServerHardwareURI)
}
server, err = c.GetServerHardwareByUri(p.ServerHardwareURI)
if err != nil {
log.Warnf("Problem getting server hardware, %s", err)
}
// power off the server so that we can add to SP
if server.Name != "" {
server.PowerOff()
}
data, err := c.RestAPICall(rest.POST, uri, p)
if err != nil {
t.TaskIsDone = true
log.Errorf("Error submitting new profile request: %s", err)
return err
}
log.Debugf("Response New Profile %s", data)
if err := json.Unmarshal([]byte(data), &t); err != nil {
t.TaskIsDone = true
log.Errorf("Error with task un-marshal: %s", err)
return err
}
err = t.Wait()
if err != nil {
return err
}
return nil
}
// create profile from template
func (c *OVClient) CreateProfileFromTemplate(name string, template ServerProfile, blade ServerHardware) error {
log.Debugf("TEMPLATE : %+v\n", template)
var (
new_template ServerProfile
err error
)
//GET on /rest/server-profile-templates/{id}new-profile
log.Debugf("getting profile by URI %+v, v2", template.URI)
new_template, err = c.GetProfileByURI(template.URI)
if err != nil {
return err
}
if c.APIVersion == 200 {
new_template.Type = "ServerProfileV5"
} else if c.APIVersion == 300 {
new_template.Type = "ServerProfileV6"
} else if c.APIVersion == 500 {
new_template.Type = "ServerProfileV7"
} else if c.APIVersion == 600 {
new_template.Type = "ServerProfileV8"
} else if c.APIVersion == 800 {
new_template.Type = "ServerProfileV9"
} else if c.APIVersion == 1000 {
new_template.Type = "ServerProfileV10"
} else if c.APIVersion == 1200 {
new_template.Type = "ServerProfileV11"
} else if c.APIVersion >= 1600 {
new_template.Type = "ServerProfileV12"
}
new_template.ServerProfileTemplateURI = template.URI // create relationship
new_template.ConnectionSettings = ConnectionSettings{
Connections: template.ConnectionSettings.Connections,
}
log.Debugf("new_template -> %+v", new_template)
new_template.ServerHardwareURI = blade.URI
new_template.Description += " " + name
new_template.Name = name
log.Debugf("new_template -> %+v", new_template)
err = c.SubmitNewProfile(new_template)
return err
}
// submit new profile template
func (c *OVClient) SubmitDeleteProfile(p ServerProfile) (t *Task, err error) {
var (
uri = p.URI.String()
)
t = t.NewProfileTask(c)
t.ResetTask()
log.Debugf("REST : %s \n %+v\n", uri, p)
log.Debugf("task -> %+v", t)
if uri == "" {
log.Warn("Unable to post delete, no uri found.")
t.TaskIsDone = true
return t, err
}
data, err := c.RestAPICall(rest.DELETE, uri, nil)
if err != nil {
log.Errorf("Error submitting new profile request: %s", err)
t.TaskIsDone = true
return t, err
}
log.Debugf("Response delete profile %s", data)
if err := json.Unmarshal(data, &t); err != nil {
t.TaskIsDone = true
log.Errorf("Error with task un-marshal: %s", err)
return t, err
}
return t, err
}
// delete a profile, assign the server and remove the profile from the system
func (c *OVClient) DeleteProfile(name string) error {
// get the profile for this server
var (
servernamemsg string
server ServerHardware
profile ServerProfile
err error
)
servernamemsg = "'no server'"
profile, err = c.GetProfileByName(name)
if err != nil {
return err
}
if profile.Name != "" {
if profile.ServerHardwareURI != "" {
server, err = c.GetServerHardwareByUri(profile.ServerHardwareURI)
if err != nil {
log.Warnf("Problem getting server hardware, %s", err)
} else {
if server.Name != "" {
servernamemsg = server.Name
}
}
}
log.Infof("Delete server profile %s from oneview, %s will be unassigned.", profile.Name, servernamemsg)
// power off the server so that we can remove it
if server.Name != "" {
server.PowerOff()
}
// submit delete task
t, err := c.SubmitDeleteProfile(profile)
if c.APIVersion < 1000 {
err = t.Wait()
if err != nil {
return err
}
}
// check for task execution
} else {
log.Infof("Profile could not be found to delete, %s, skipping delete ...", name)
}
return nil
}
func (c *OVClient) UpdateServerProfile(p ServerProfile) error {
log.Infof("Initializing update of server profile for %s.", p.Name)
var (
uri = p.URI.String()
t *Task
)
// refresh login
c.RefreshLogin()
c.SetAuthHeaderOptions(c.GetAuthHeaderMap())
t = t.NewProfileTask(c)
t.ResetTask()
log.Debugf("REST : %s \n %+v\n", uri, p)
log.Debugf("task -> %+v", t)
data, err := c.RestAPICall(rest.PUT, uri, p)
if err != nil {
t.TaskIsDone = true
log.Errorf("Error submitting update server profile request: %s", err)
return err
}
log.Debugf("Response update ServerProfile %s", data)
if err := json.Unmarshal([]byte(data), &t); err != nil {
t.TaskIsDone = true
log.Errorf("Error with task un-marshal: %s", err)
return err
}
err = t.Wait()
if err != nil {
return err
}
return nil
}
func (c *OVClient) PatchServerProfile(p ServerProfile, request []Options) error {
log.Infof("Initializing update of server profile for %s.", p.Name)
var (
uri = p.URI.String()
t *Task
)
// refresh login
c.RefreshLogin()
c.SetAuthHeaderOptions(c.GetAuthHeaderMap())
t = t.NewProfileTask(c)
t.ResetTask()
log.Debugf("REST : %s \n %+v\n", uri, request)
log.Debugf("task -> %+v", t)
data, err := c.RestAPICall(rest.PATCH, uri, request)
if err != nil {
t.TaskIsDone = true
log.Errorf("Error submitting update server profile request: %s", err)
return err
}
log.Debugf("Response update ServerProfile %s", data)
if err := json.Unmarshal([]byte(data), &t); err != nil {
t.TaskIsDone = true
log.Errorf("Error with task un-marshal: %s", err)
return err
}
err = t.Wait()
if err != nil {
return err
}
return nil
}
|
package datadog
import (
"container/ring"
"context"
"fmt"
"net/http"
"strings"
"sync"
"time"
"github.com/DataDog/datadog-go/statsd"
"github.com/sirupsen/logrus"
vhttp "github.com/stripe/veneur/http"
"github.com/stripe/veneur/protocol"
"github.com/stripe/veneur/samplers"
"github.com/stripe/veneur/sinks"
"github.com/stripe/veneur/ssf"
"github.com/stripe/veneur/trace"
)
const DatadogResourceKey = "resource"
const datadogNameKey = "name"
type DatadogMetricSink struct {
HTTPClient *http.Client
APIKey string
DDHostname string
hostname string
flushMaxPerBody int
statsd *statsd.Client
tags []string
interval float64
traceClient *trace.Client
log *logrus.Logger
}
// DDMetric is a data structure that represents the JSON that Datadog
// wants when posting to the API
type DDMetric struct {
Name string `json:"metric"`
Value [1][2]float64 `json:"points"`
Tags []string `json:"tags,omitempty"`
MetricType string `json:"type"`
Hostname string `json:"host,omitempty"`
DeviceName string `json:"device_name,omitempty"`
Interval int32 `json:"interval,omitempty"`
}
// NewDatadogMetricSink creates a new Datadog sink for trace spans.
func NewDatadogMetricSink(interval float64, flushMaxPerBody int, hostname string, tags []string, ddHostname string, apiKey string, httpClient *http.Client, stats *statsd.Client, log *logrus.Logger) (*DatadogMetricSink, error) {
return &DatadogMetricSink{
HTTPClient: httpClient,
APIKey: apiKey,
DDHostname: ddHostname,
statsd: stats,
interval: interval,
flushMaxPerBody: flushMaxPerBody,
hostname: hostname,
tags: tags,
log: log,
}, nil
}
// Name returns the name of this sink.
func (dd *DatadogMetricSink) Name() string {
return "datadog"
}
// Start sets the sink up.
func (dd *DatadogMetricSink) Start(cl *trace.Client) error {
dd.traceClient = cl
return nil
}
func (dd *DatadogMetricSink) Flush(ctx context.Context, interMetrics []samplers.InterMetric) error {
span, _ := trace.StartSpanFromContext(ctx, "")
defer span.ClientFinish(dd.traceClient)
metrics := dd.finalizeMetrics(interMetrics)
// break the metrics into chunks of approximately equal size, such that
// each chunk is less than the limit
// we compute the chunks using rounding-up integer division
workers := ((len(metrics) - 1) / dd.flushMaxPerBody) + 1
chunkSize := ((len(metrics) - 1) / workers) + 1
dd.log.WithField("workers", workers).Debug("Worker count chosen")
dd.log.WithField("chunkSize", chunkSize).Debug("Chunk size chosen")
var wg sync.WaitGroup
flushStart := time.Now()
for i := 0; i < workers; i++ {
chunk := metrics[i*chunkSize:]
if i < workers-1 {
// trim to chunk size unless this is the last one
chunk = chunk[:chunkSize]
}
wg.Add(1)
go dd.flushPart(span.Attach(ctx), chunk, &wg)
}
wg.Wait()
dd.statsd.TimeInMilliseconds(sinks.MetricKeyMetricFlushDuration, float64(time.Since(flushStart).Nanoseconds()), []string{fmt.Sprintf("sink:%s", dd.Name())}, 1.0)
dd.statsd.Count(sinks.MetricKeyTotalMetricsFlushed, int64(len(metrics)), []string{fmt.Sprintf("sink:%s", dd.Name())}, 1.0)
dd.log.WithField("metrics", len(metrics)).Info("Completed flush to Datadog")
return nil
}
func (dd *DatadogMetricSink) FlushEventsChecks(ctx context.Context, events []samplers.UDPEvent, checks []samplers.UDPServiceCheck) {
span, _ := trace.StartSpanFromContext(ctx, "")
defer span.ClientFinish(dd.traceClient)
// fill in the default hostname for packets that didn't set it
for i := range events {
if events[i].Hostname == "" {
events[i].Hostname = dd.hostname
}
events[i].Tags = append(events[i].Tags, dd.tags...)
}
for i := range checks {
if checks[i].Hostname == "" {
checks[i].Hostname = dd.hostname
}
checks[i].Tags = append(checks[i].Tags, dd.tags...)
}
if len(events) != 0 {
// this endpoint is not documented at all, its existence is only known from
// the official dd-agent
// we don't actually pass all the body keys that dd-agent passes here... but
// it still works
err := vhttp.PostHelper(context.TODO(), dd.HTTPClient, dd.statsd, dd.traceClient, http.MethodPost, fmt.Sprintf("%s/intake?api_key=%s", dd.DDHostname, dd.APIKey), map[string]map[string][]samplers.UDPEvent{
"events": {
"api": events,
},
}, "flush_events", true, dd.log)
if err == nil {
dd.log.WithField("events", len(events)).Info("Completed flushing events to Datadog")
} else {
dd.log.WithFields(logrus.Fields{
"events": len(events),
logrus.ErrorKey: err}).Warn("Error flushing events to Datadog")
}
}
if len(checks) != 0 {
// this endpoint is not documented to take an array... but it does
// another curious constraint of this endpoint is that it does not
// support "Content-Encoding: deflate"
err := vhttp.PostHelper(context.TODO(), dd.HTTPClient, dd.statsd, dd.traceClient, http.MethodPost, fmt.Sprintf("%s/api/v1/check_run?api_key=%s", dd.DDHostname, dd.APIKey), checks, "flush_checks", false, dd.log)
if err == nil {
dd.log.WithField("checks", len(checks)).Info("Completed flushing service checks to Datadog")
} else {
dd.log.WithFields(logrus.Fields{
"checks": len(checks),
logrus.ErrorKey: err}).Warn("Error flushing checks to Datadog")
}
}
}
func (dd *DatadogMetricSink) finalizeMetrics(metrics []samplers.InterMetric) []DDMetric {
ddMetrics := make([]DDMetric, 0, len(metrics))
for _, m := range metrics {
if !sinks.IsAcceptableMetric(m, dd) {
continue
}
// Defensively copy tags since we're gonna mutate it
tags := make([]string, len(dd.tags))
copy(tags, dd.tags)
metricType := ""
value := m.Value
switch m.Type {
case samplers.CounterMetric:
// We convert counters into rates for Datadog
metricType = "rate"
value = m.Value / dd.interval
case samplers.GaugeMetric:
metricType = "gauge"
default:
dd.log.WithField("metric_type", m.Type).Warn("Encountered an unknown metric type")
continue
}
ddMetric := DDMetric{
Name: m.Name,
Value: [1][2]float64{
[2]float64{
float64(m.Timestamp), value,
},
},
Tags: tags,
MetricType: metricType,
Interval: int32(dd.interval),
}
// Let's look for "magic tags" that override metric fields host and device.
for _, tag := range m.Tags {
// This overrides hostname
if strings.HasPrefix(tag, "host:") {
// Override the hostname with the tag, trimming off the prefix.
ddMetric.Hostname = tag[5:]
} else if strings.HasPrefix(tag, "device:") {
// Same as above, but device this time
ddMetric.DeviceName = tag[7:]
} else {
// Add it, no reason to exclude it.
ddMetric.Tags = append(ddMetric.Tags, tag)
}
}
if ddMetric.Hostname == "" {
// No magic tag, set the hostname
ddMetric.Hostname = dd.hostname
}
ddMetrics = append(ddMetrics, ddMetric)
}
return ddMetrics
}
func (dd *DatadogMetricSink) flushPart(ctx context.Context, metricSlice []DDMetric, wg *sync.WaitGroup) {
defer wg.Done()
vhttp.PostHelper(ctx, dd.HTTPClient, dd.statsd, dd.traceClient, http.MethodPost, fmt.Sprintf("%s/api/v1/series?api_key=%s", dd.DDHostname, dd.APIKey), map[string][]DDMetric{
"series": metricSlice,
}, "flush", true, dd.log)
}
// DatadogTraceSpan represents a trace span as JSON for the
// Datadog tracing API.
type DatadogTraceSpan struct {
Duration int64 `json:"duration"`
Error int64 `json:"error"`
Meta map[string]string `json:"meta"`
Metrics map[string]float64 `json:"metrics"`
Name string `json:"name"`
ParentID int64 `json:"parent_id,omitempty"`
Resource string `json:"resource,omitempty"`
Service string `json:"service"`
SpanID int64 `json:"span_id"`
Start int64 `json:"start"`
TraceID int64 `json:"trace_id"`
Type string `json:"type"`
}
// DatadogSpanSink is a sink for sending spans to a Datadog trace agent.
type DatadogSpanSink struct {
HTTPClient *http.Client
buffer *ring.Ring
bufferSize int
mutex *sync.Mutex
stats *statsd.Client
commonTags map[string]string
traceAddress string
traceClient *trace.Client
log *logrus.Logger
}
// NewDatadogSpanSink creates a new Datadog sink for trace spans.
func NewDatadogSpanSink(address string, bufferSize int, stats *statsd.Client, httpClient *http.Client, commonTags map[string]string, log *logrus.Logger) (*DatadogSpanSink, error) {
return &DatadogSpanSink{
HTTPClient: httpClient,
bufferSize: bufferSize,
buffer: ring.New(bufferSize),
mutex: &sync.Mutex{},
stats: stats,
commonTags: commonTags,
traceAddress: address,
log: log,
}, nil
}
// Name returns the name of this sink.
func (dd *DatadogSpanSink) Name() string {
return "datadog"
}
// Start performs final adjustments on the sink.
func (dd *DatadogSpanSink) Start(cl *trace.Client) error {
dd.traceClient = cl
return nil
}
// Ingest takes the span and adds it to the ringbuffer.
func (dd *DatadogSpanSink) Ingest(span *ssf.SSFSpan) error {
if err := protocol.ValidateTrace(span); err != nil {
return err
}
dd.mutex.Lock()
defer dd.mutex.Unlock()
dd.buffer.Value = span
dd.buffer = dd.buffer.Next()
return nil
}
// Flush signals the sink to send it's spans to their destination. For this
// sync it means we'll be making an HTTP request to send them along. We assume
// it's beneficial to performance to defer these until the normal 10s flush.
func (dd *DatadogSpanSink) Flush() {
dd.mutex.Lock()
flushStart := time.Now()
ssfSpans := make([]*ssf.SSFSpan, 0, dd.buffer.Len())
dd.buffer.Do(func(t interface{}) {
const tooEarly = 1497
const tooLate = 1497629343000000
if t != nil {
ssfSpan, ok := t.(*ssf.SSFSpan)
if !ok {
dd.log.Error("Got an unknown object in tracing ring!")
dd.mutex.Unlock()
// We'll just skip this one so we don't poison pill or anything.
return
}
var timeErr string
if ssfSpan.StartTimestamp < tooEarly {
timeErr = "type:tooEarly"
}
if ssfSpan.StartTimestamp > tooLate {
timeErr = "type:tooLate"
}
if timeErr != "" {
dd.stats.Incr("worker.trace.sink.timestamp_error", []string{timeErr}, 1) // TODO tag as dd?
}
if ssfSpan.Tags == nil {
ssfSpan.Tags = make(map[string]string)
}
// Add common tags from veneur's config
// this will overwrite tags already present on the span
for k, v := range dd.commonTags {
ssfSpan.Tags[k] = v
}
ssfSpans = append(ssfSpans, ssfSpan)
}
})
// Reset the ring.
dd.buffer = ring.New(dd.bufferSize)
// We're done manipulating stuff, let Ingest loose again.
dd.mutex.Unlock()
serviceCount := make(map[string]int64)
// Datadog wants the spans for each trace in an array, so make a map.
traceMap := map[int64][]*DatadogTraceSpan{}
// Convert the SSFSpans into Datadog Spans
for _, span := range ssfSpans {
// -1 is a canonical way of passing in invalid info in Go
// so we should support that too
parentID := span.ParentId
// check if this is the root span
if parentID <= 0 {
// we need parentId to be zero for json:omitempty to work
parentID = 0
}
resource := span.Tags[DatadogResourceKey]
if resource == "" {
resource = "unknown"
}
tags := map[string]string{}
// Get the span's existing tags
for k, v := range span.Tags {
tags[k] = v
}
name := span.Name
if name == "" {
name = "unknown"
}
delete(tags, DatadogResourceKey)
var errorCode int64
if span.Error {
errorCode = 2
}
ddspan := &DatadogTraceSpan{
TraceID: span.TraceId,
SpanID: span.Id,
ParentID: parentID,
Service: span.Service,
Name: name,
Resource: resource,
Start: span.StartTimestamp,
Duration: span.EndTimestamp - span.StartTimestamp,
// TODO don't hardcode
Type: "web",
Error: errorCode,
Meta: tags,
}
serviceCount[span.Service]++
if _, ok := traceMap[span.TraceId]; !ok {
traceMap[span.TraceId] = []*DatadogTraceSpan{}
}
traceMap[span.TraceId] = append(traceMap[span.TraceId], ddspan)
}
// Smush the spans into a two-dimensional array now that they are grouped by trace id.
finalTraces := make([][]*DatadogTraceSpan, len(traceMap))
idx := 0
for _, val := range traceMap {
finalTraces[idx] = val
idx++
}
if len(finalTraces) != 0 {
// this endpoint is not documented to take an array... but it does
// another curious constraint of this endpoint is that it does not
// support "Content-Encoding: deflate"
err := vhttp.PostHelper(context.TODO(), dd.HTTPClient, dd.stats, dd.traceClient, http.MethodPut, fmt.Sprintf("%s/v0.3/traces", dd.traceAddress), finalTraces, "flush_traces", false, dd.log)
if err == nil {
dd.log.WithField("traces", len(finalTraces)).Info("Completed flushing traces to Datadog")
} else {
dd.log.WithFields(logrus.Fields{
"traces": len(finalTraces),
logrus.ErrorKey: err}).Warn("Error flushing traces to Datadog")
}
for service, count := range serviceCount {
dd.stats.Count(sinks.MetricKeyTotalSpansFlushed, count, []string{fmt.Sprintf("sink:%s", dd.Name()), fmt.Sprintf("service:%s", service)}, 1)
}
dd.stats.TimeInMilliseconds(sinks.MetricKeySpanFlushDuration, float64(time.Since(flushStart).Nanoseconds()), []string{fmt.Sprintf("sink:%s", dd.Name())}, 1.0)
} else {
dd.log.Info("No traces to flush to Datadog, skipping.")
}
}
Rearrange delete
package datadog
import (
"container/ring"
"context"
"fmt"
"net/http"
"strings"
"sync"
"time"
"github.com/DataDog/datadog-go/statsd"
"github.com/sirupsen/logrus"
vhttp "github.com/stripe/veneur/http"
"github.com/stripe/veneur/protocol"
"github.com/stripe/veneur/samplers"
"github.com/stripe/veneur/sinks"
"github.com/stripe/veneur/ssf"
"github.com/stripe/veneur/trace"
)
const DatadogResourceKey = "resource"
const datadogNameKey = "name"
type DatadogMetricSink struct {
HTTPClient *http.Client
APIKey string
DDHostname string
hostname string
flushMaxPerBody int
statsd *statsd.Client
tags []string
interval float64
traceClient *trace.Client
log *logrus.Logger
}
// DDMetric is a data structure that represents the JSON that Datadog
// wants when posting to the API
type DDMetric struct {
Name string `json:"metric"`
Value [1][2]float64 `json:"points"`
Tags []string `json:"tags,omitempty"`
MetricType string `json:"type"`
Hostname string `json:"host,omitempty"`
DeviceName string `json:"device_name,omitempty"`
Interval int32 `json:"interval,omitempty"`
}
// NewDatadogMetricSink creates a new Datadog sink for trace spans.
func NewDatadogMetricSink(interval float64, flushMaxPerBody int, hostname string, tags []string, ddHostname string, apiKey string, httpClient *http.Client, stats *statsd.Client, log *logrus.Logger) (*DatadogMetricSink, error) {
return &DatadogMetricSink{
HTTPClient: httpClient,
APIKey: apiKey,
DDHostname: ddHostname,
statsd: stats,
interval: interval,
flushMaxPerBody: flushMaxPerBody,
hostname: hostname,
tags: tags,
log: log,
}, nil
}
// Name returns the name of this sink.
func (dd *DatadogMetricSink) Name() string {
return "datadog"
}
// Start sets the sink up.
func (dd *DatadogMetricSink) Start(cl *trace.Client) error {
dd.traceClient = cl
return nil
}
func (dd *DatadogMetricSink) Flush(ctx context.Context, interMetrics []samplers.InterMetric) error {
span, _ := trace.StartSpanFromContext(ctx, "")
defer span.ClientFinish(dd.traceClient)
metrics := dd.finalizeMetrics(interMetrics)
// break the metrics into chunks of approximately equal size, such that
// each chunk is less than the limit
// we compute the chunks using rounding-up integer division
workers := ((len(metrics) - 1) / dd.flushMaxPerBody) + 1
chunkSize := ((len(metrics) - 1) / workers) + 1
dd.log.WithField("workers", workers).Debug("Worker count chosen")
dd.log.WithField("chunkSize", chunkSize).Debug("Chunk size chosen")
var wg sync.WaitGroup
flushStart := time.Now()
for i := 0; i < workers; i++ {
chunk := metrics[i*chunkSize:]
if i < workers-1 {
// trim to chunk size unless this is the last one
chunk = chunk[:chunkSize]
}
wg.Add(1)
go dd.flushPart(span.Attach(ctx), chunk, &wg)
}
wg.Wait()
dd.statsd.TimeInMilliseconds(sinks.MetricKeyMetricFlushDuration, float64(time.Since(flushStart).Nanoseconds()), []string{fmt.Sprintf("sink:%s", dd.Name())}, 1.0)
dd.statsd.Count(sinks.MetricKeyTotalMetricsFlushed, int64(len(metrics)), []string{fmt.Sprintf("sink:%s", dd.Name())}, 1.0)
dd.log.WithField("metrics", len(metrics)).Info("Completed flush to Datadog")
return nil
}
func (dd *DatadogMetricSink) FlushEventsChecks(ctx context.Context, events []samplers.UDPEvent, checks []samplers.UDPServiceCheck) {
span, _ := trace.StartSpanFromContext(ctx, "")
defer span.ClientFinish(dd.traceClient)
// fill in the default hostname for packets that didn't set it
for i := range events {
if events[i].Hostname == "" {
events[i].Hostname = dd.hostname
}
events[i].Tags = append(events[i].Tags, dd.tags...)
}
for i := range checks {
if checks[i].Hostname == "" {
checks[i].Hostname = dd.hostname
}
checks[i].Tags = append(checks[i].Tags, dd.tags...)
}
if len(events) != 0 {
// this endpoint is not documented at all, its existence is only known from
// the official dd-agent
// we don't actually pass all the body keys that dd-agent passes here... but
// it still works
err := vhttp.PostHelper(context.TODO(), dd.HTTPClient, dd.statsd, dd.traceClient, http.MethodPost, fmt.Sprintf("%s/intake?api_key=%s", dd.DDHostname, dd.APIKey), map[string]map[string][]samplers.UDPEvent{
"events": {
"api": events,
},
}, "flush_events", true, dd.log)
if err == nil {
dd.log.WithField("events", len(events)).Info("Completed flushing events to Datadog")
} else {
dd.log.WithFields(logrus.Fields{
"events": len(events),
logrus.ErrorKey: err}).Warn("Error flushing events to Datadog")
}
}
if len(checks) != 0 {
// this endpoint is not documented to take an array... but it does
// another curious constraint of this endpoint is that it does not
// support "Content-Encoding: deflate"
err := vhttp.PostHelper(context.TODO(), dd.HTTPClient, dd.statsd, dd.traceClient, http.MethodPost, fmt.Sprintf("%s/api/v1/check_run?api_key=%s", dd.DDHostname, dd.APIKey), checks, "flush_checks", false, dd.log)
if err == nil {
dd.log.WithField("checks", len(checks)).Info("Completed flushing service checks to Datadog")
} else {
dd.log.WithFields(logrus.Fields{
"checks": len(checks),
logrus.ErrorKey: err}).Warn("Error flushing checks to Datadog")
}
}
}
func (dd *DatadogMetricSink) finalizeMetrics(metrics []samplers.InterMetric) []DDMetric {
ddMetrics := make([]DDMetric, 0, len(metrics))
for _, m := range metrics {
if !sinks.IsAcceptableMetric(m, dd) {
continue
}
// Defensively copy tags since we're gonna mutate it
tags := make([]string, len(dd.tags))
copy(tags, dd.tags)
metricType := ""
value := m.Value
switch m.Type {
case samplers.CounterMetric:
// We convert counters into rates for Datadog
metricType = "rate"
value = m.Value / dd.interval
case samplers.GaugeMetric:
metricType = "gauge"
default:
dd.log.WithField("metric_type", m.Type).Warn("Encountered an unknown metric type")
continue
}
ddMetric := DDMetric{
Name: m.Name,
Value: [1][2]float64{
[2]float64{
float64(m.Timestamp), value,
},
},
Tags: tags,
MetricType: metricType,
Interval: int32(dd.interval),
}
// Let's look for "magic tags" that override metric fields host and device.
for _, tag := range m.Tags {
// This overrides hostname
if strings.HasPrefix(tag, "host:") {
// Override the hostname with the tag, trimming off the prefix.
ddMetric.Hostname = tag[5:]
} else if strings.HasPrefix(tag, "device:") {
// Same as above, but device this time
ddMetric.DeviceName = tag[7:]
} else {
// Add it, no reason to exclude it.
ddMetric.Tags = append(ddMetric.Tags, tag)
}
}
if ddMetric.Hostname == "" {
// No magic tag, set the hostname
ddMetric.Hostname = dd.hostname
}
ddMetrics = append(ddMetrics, ddMetric)
}
return ddMetrics
}
func (dd *DatadogMetricSink) flushPart(ctx context.Context, metricSlice []DDMetric, wg *sync.WaitGroup) {
defer wg.Done()
vhttp.PostHelper(ctx, dd.HTTPClient, dd.statsd, dd.traceClient, http.MethodPost, fmt.Sprintf("%s/api/v1/series?api_key=%s", dd.DDHostname, dd.APIKey), map[string][]DDMetric{
"series": metricSlice,
}, "flush", true, dd.log)
}
// DatadogTraceSpan represents a trace span as JSON for the
// Datadog tracing API.
type DatadogTraceSpan struct {
Duration int64 `json:"duration"`
Error int64 `json:"error"`
Meta map[string]string `json:"meta"`
Metrics map[string]float64 `json:"metrics"`
Name string `json:"name"`
ParentID int64 `json:"parent_id,omitempty"`
Resource string `json:"resource,omitempty"`
Service string `json:"service"`
SpanID int64 `json:"span_id"`
Start int64 `json:"start"`
TraceID int64 `json:"trace_id"`
Type string `json:"type"`
}
// DatadogSpanSink is a sink for sending spans to a Datadog trace agent.
type DatadogSpanSink struct {
HTTPClient *http.Client
buffer *ring.Ring
bufferSize int
mutex *sync.Mutex
stats *statsd.Client
commonTags map[string]string
traceAddress string
traceClient *trace.Client
log *logrus.Logger
}
// NewDatadogSpanSink creates a new Datadog sink for trace spans.
func NewDatadogSpanSink(address string, bufferSize int, stats *statsd.Client, httpClient *http.Client, commonTags map[string]string, log *logrus.Logger) (*DatadogSpanSink, error) {
return &DatadogSpanSink{
HTTPClient: httpClient,
bufferSize: bufferSize,
buffer: ring.New(bufferSize),
mutex: &sync.Mutex{},
stats: stats,
commonTags: commonTags,
traceAddress: address,
log: log,
}, nil
}
// Name returns the name of this sink.
func (dd *DatadogSpanSink) Name() string {
return "datadog"
}
// Start performs final adjustments on the sink.
func (dd *DatadogSpanSink) Start(cl *trace.Client) error {
dd.traceClient = cl
return nil
}
// Ingest takes the span and adds it to the ringbuffer.
func (dd *DatadogSpanSink) Ingest(span *ssf.SSFSpan) error {
if err := protocol.ValidateTrace(span); err != nil {
return err
}
dd.mutex.Lock()
defer dd.mutex.Unlock()
dd.buffer.Value = span
dd.buffer = dd.buffer.Next()
return nil
}
// Flush signals the sink to send it's spans to their destination. For this
// sync it means we'll be making an HTTP request to send them along. We assume
// it's beneficial to performance to defer these until the normal 10s flush.
func (dd *DatadogSpanSink) Flush() {
dd.mutex.Lock()
flushStart := time.Now()
ssfSpans := make([]*ssf.SSFSpan, 0, dd.buffer.Len())
dd.buffer.Do(func(t interface{}) {
const tooEarly = 1497
const tooLate = 1497629343000000
if t != nil {
ssfSpan, ok := t.(*ssf.SSFSpan)
if !ok {
dd.log.Error("Got an unknown object in tracing ring!")
dd.mutex.Unlock()
// We'll just skip this one so we don't poison pill or anything.
return
}
var timeErr string
if ssfSpan.StartTimestamp < tooEarly {
timeErr = "type:tooEarly"
}
if ssfSpan.StartTimestamp > tooLate {
timeErr = "type:tooLate"
}
if timeErr != "" {
dd.stats.Incr("worker.trace.sink.timestamp_error", []string{timeErr}, 1) // TODO tag as dd?
}
if ssfSpan.Tags == nil {
ssfSpan.Tags = make(map[string]string)
}
// Add common tags from veneur's config
// this will overwrite tags already present on the span
for k, v := range dd.commonTags {
ssfSpan.Tags[k] = v
}
ssfSpans = append(ssfSpans, ssfSpan)
}
})
// Reset the ring.
dd.buffer = ring.New(dd.bufferSize)
// We're done manipulating stuff, let Ingest loose again.
dd.mutex.Unlock()
serviceCount := make(map[string]int64)
// Datadog wants the spans for each trace in an array, so make a map.
traceMap := map[int64][]*DatadogTraceSpan{}
// Convert the SSFSpans into Datadog Spans
for _, span := range ssfSpans {
// -1 is a canonical way of passing in invalid info in Go
// so we should support that too
parentID := span.ParentId
// check if this is the root span
if parentID <= 0 {
// we need parentId to be zero for json:omitempty to work
parentID = 0
}
tags := map[string]string{}
// Get the span's existing tags
for k, v := range span.Tags {
tags[k] = v
}
resource := span.Tags[DatadogResourceKey]
if resource == "" {
resource = "unknown"
}
delete(tags, DatadogResourceKey)
name := span.Name
if name == "" {
name = "unknown"
}
var errorCode int64
if span.Error {
errorCode = 2
}
ddspan := &DatadogTraceSpan{
TraceID: span.TraceId,
SpanID: span.Id,
ParentID: parentID,
Service: span.Service,
Name: name,
Resource: resource,
Start: span.StartTimestamp,
Duration: span.EndTimestamp - span.StartTimestamp,
// TODO don't hardcode
Type: "web",
Error: errorCode,
Meta: tags,
}
serviceCount[span.Service]++
if _, ok := traceMap[span.TraceId]; !ok {
traceMap[span.TraceId] = []*DatadogTraceSpan{}
}
traceMap[span.TraceId] = append(traceMap[span.TraceId], ddspan)
}
// Smush the spans into a two-dimensional array now that they are grouped by trace id.
finalTraces := make([][]*DatadogTraceSpan, len(traceMap))
idx := 0
for _, val := range traceMap {
finalTraces[idx] = val
idx++
}
if len(finalTraces) != 0 {
// this endpoint is not documented to take an array... but it does
// another curious constraint of this endpoint is that it does not
// support "Content-Encoding: deflate"
err := vhttp.PostHelper(context.TODO(), dd.HTTPClient, dd.stats, dd.traceClient, http.MethodPut, fmt.Sprintf("%s/v0.3/traces", dd.traceAddress), finalTraces, "flush_traces", false, dd.log)
if err == nil {
dd.log.WithField("traces", len(finalTraces)).Info("Completed flushing traces to Datadog")
} else {
dd.log.WithFields(logrus.Fields{
"traces": len(finalTraces),
logrus.ErrorKey: err}).Warn("Error flushing traces to Datadog")
}
for service, count := range serviceCount {
dd.stats.Count(sinks.MetricKeyTotalSpansFlushed, count, []string{fmt.Sprintf("sink:%s", dd.Name()), fmt.Sprintf("service:%s", service)}, 1)
}
dd.stats.TimeInMilliseconds(sinks.MetricKeySpanFlushDuration, float64(time.Since(flushStart).Nanoseconds()), []string{fmt.Sprintf("sink:%s", dd.Name())}, 1.0)
} else {
dd.log.Info("No traces to flush to Datadog, skipping.")
}
}
|
/*
Copyright 2015 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package httplib implements common utility functions for writing
// classic HTTP handlers
package httplib
import (
"net/http"
"strings"
)
// SetNoCacheHeaders tells proxies and browsers do not cache the content
func SetNoCacheHeaders(h http.Header) {
h.Set("Cache-Control", "no-cache, no-store, must-revalidate")
h.Set("Pragma", "no-cache")
h.Set("Expires", "0")
}
// SetStaticFileHeaders sets security header flags for static non-html resources
func SetStaticFileHeaders(h http.Header) {
SetSameOriginIFrame(h)
SetNoSniff(h)
}
// SetIndexHTMLHeaders sets security header flags for main index.html page
func SetIndexHTMLHeaders(h http.Header) {
SetNoCacheHeaders(h)
SetSameOriginIFrame(h)
SetNoSniff(h)
// Only send the origin of the document as the referrer in all cases.
// The document https://example.com/page.html will send the referrer https://example.com/.
h.Set("Referrer-Policy", "origin")
// X-Frame-Options indicates that the page can only be displayed in iframe on the same origin as the page itself
h.Set("X-Frame-Options", "SAMEORIGIN")
// X-XSS-Protection is a feature of Internet Explorer, Chrome and Safari that stops pages
// from loading when they detect reflected cross-site scripting (XSS) attacks.
h.Set("X-XSS-Protection", "1; mode=block")
// Once a supported browser receives this header that browser will prevent any communications from
// being sent over HTTP to the specified domain and will instead send all communications over HTTPS.
// It also prevents HTTPS click through prompts on browsers
h.Set("Strict-Transport-Security", "max-age=31536000; includeSubDomains")
// Prevent web browsers from using content sniffing to discover a file’s MIME type
h.Set("X-Content-Type-Options", "nosniff")
// Set content policy flags
var cspValue = strings.Join([]string{
// enterprise version uses stripe.com to update billing information
"script-src 'self' https://js.stripe.com",
// 'unsafe-inline' needed for reactjs inline styles
"style-src 'self' 'unsafe-inline'",
"object-src 'none'",
"img-src 'self' data: blob:",
"base-uri 'self'",
}, ";")
h.Set("Content-Security-Policy", cspValue)
}
// SetSameOriginIFrame sets X-Frame-Options flag
func SetSameOriginIFrame(h http.Header) {
// X-Frame-Options indicates that the page can only be displayed in iframe on the same origin as the page itself
h.Set("X-Frame-Options", "SAMEORIGIN")
}
// SetNoSniff sets X-Content-Type-Options flag
func SetNoSniff(h http.Header) {
// Prevent web browsers from using content sniffing to discover a file’s MIME type
h.Set("X-Content-Type-Options", "nosniff")
}
// SetWebConfigHeaders sets headers for webConfig.js
func SetWebConfigHeaders(h http.Header) {
SetStaticFileHeaders(h)
h.Set("Content-Type", "application/javascript")
}
Make CSP more strict (#7390)
closes https://gravitational.zendesk.com/agent/tickets/3062
/*
Copyright 2015 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package httplib implements common utility functions for writing
// classic HTTP handlers
package httplib
import (
"net/http"
"strings"
)
// SetNoCacheHeaders tells proxies and browsers do not cache the content
func SetNoCacheHeaders(h http.Header) {
h.Set("Cache-Control", "no-cache, no-store, must-revalidate")
h.Set("Pragma", "no-cache")
h.Set("Expires", "0")
}
// SetStaticFileHeaders sets security header flags for static non-html resources
func SetStaticFileHeaders(h http.Header) {
SetSameOriginIFrame(h)
SetNoSniff(h)
}
// SetIndexHTMLHeaders sets security header flags for main index.html page
func SetIndexHTMLHeaders(h http.Header) {
SetNoCacheHeaders(h)
SetSameOriginIFrame(h)
SetNoSniff(h)
// Only send the origin of the document as the referrer in all cases.
// The document https://example.com/page.html will send the referrer https://example.com/.
h.Set("Referrer-Policy", "origin")
// X-Frame-Options indicates that the page can only be displayed in iframe on the same origin as the page itself
h.Set("X-Frame-Options", "SAMEORIGIN")
// X-XSS-Protection is a feature of Internet Explorer, Chrome and Safari that stops pages
// from loading when they detect reflected cross-site scripting (XSS) attacks.
h.Set("X-XSS-Protection", "1; mode=block")
// Once a supported browser receives this header that browser will prevent any communications from
// being sent over HTTP to the specified domain and will instead send all communications over HTTPS.
// It also prevents HTTPS click through prompts on browsers
h.Set("Strict-Transport-Security", "max-age=31536000; includeSubDomains")
// Prevent web browsers from using content sniffing to discover a file’s MIME type
h.Set("X-Content-Type-Options", "nosniff")
// Set content policy flags
var cspValue = strings.Join([]string{
"default-src 'self'",
// cloud version uses stripe.com to update billing information
"script-src 'self' https://js.stripe.com",
"frame-src https://js.stripe.com",
"frame-ancestors 'none'",
// 'unsafe-inline' is required by CSS-in-JS to work
"style-src 'self' 'unsafe-inline'",
"object-src 'none'",
"img-src 'self' data: blob:",
"font-src 'self' data:",
"base-uri 'self'",
"form-action 'self'",
}, ";")
h.Set("Content-Security-Policy", cspValue)
}
// SetSameOriginIFrame sets X-Frame-Options flag
func SetSameOriginIFrame(h http.Header) {
// X-Frame-Options indicates that the page can only be displayed in iframe on the same origin as the page itself
h.Set("X-Frame-Options", "SAMEORIGIN")
}
// SetNoSniff sets X-Content-Type-Options flag
func SetNoSniff(h http.Header) {
// Prevent web browsers from using content sniffing to discover a file’s MIME type
h.Set("X-Content-Type-Options", "nosniff")
}
// SetWebConfigHeaders sets headers for webConfig.js
func SetWebConfigHeaders(h http.Header) {
SetStaticFileHeaders(h)
h.Set("Content-Type", "application/javascript")
}
|
package packer
import (
"fmt"
"sort"
multierror "github.com/hashicorp/go-multierror"
version "github.com/hashicorp/go-version"
"github.com/hashicorp/packer/template"
"github.com/hashicorp/packer/template/interpolate"
)
// Core is the main executor of Packer. If Packer is being used as a
// library, this is the struct you'll want to instantiate to get anything done.
type Core struct {
Template *template.Template
components ComponentFinder
variables map[string]string
builds map[string]*template.Builder
version string
secrets []string
except []string
only []string
}
// CoreConfig is the structure for initializing a new Core. Once a CoreConfig
// is used to initialize a Core, it shouldn't be re-used or modified again.
type CoreConfig struct {
Components ComponentFinder
Template *template.Template
Variables map[string]string
SensitiveVariables []string
Version string
// These are set by command-line flags
Except []string
Only []string
}
// The function type used to lookup Builder implementations.
type BuilderFunc func(name string) (Builder, error)
// The function type used to lookup Hook implementations.
type HookFunc func(name string) (Hook, error)
// The function type used to lookup PostProcessor implementations.
type PostProcessorFunc func(name string) (PostProcessor, error)
// The function type used to lookup Provisioner implementations.
type ProvisionerFunc func(name string) (Provisioner, error)
// ComponentFinder is a struct that contains the various function
// pointers necessary to look up components of Packer such as builders,
// commands, etc.
type ComponentFinder struct {
Builder BuilderFunc
Hook HookFunc
PostProcessor PostProcessorFunc
Provisioner ProvisionerFunc
}
// NewCore creates a new Core.
func NewCore(c *CoreConfig) (*Core, error) {
result := &Core{
Template: c.Template,
components: c.Components,
variables: c.Variables,
version: c.Version,
only: c.Only,
except: c.Except,
}
if err := result.validate(); err != nil {
return nil, err
}
if err := result.init(); err != nil {
return nil, err
}
for _, secret := range result.secrets {
LogSecretFilter.Set(secret)
}
// Go through and interpolate all the build names. We should be able
// to do this at this point with the variables.
result.builds = make(map[string]*template.Builder)
for _, b := range c.Template.Builders {
v, err := interpolate.Render(b.Name, result.Context())
if err != nil {
return nil, fmt.Errorf(
"Error interpolating builder '%s': %s",
b.Name, err)
}
result.builds[v] = b
}
return result, nil
}
// BuildNames returns the builds that are available in this configured core.
func (c *Core) BuildNames() []string {
r := make([]string, 0, len(c.builds))
for n := range c.builds {
r = append(r, n)
}
sort.Strings(r)
return r
}
// Build returns the Build object for the given name.
func (c *Core) Build(n string) (Build, error) {
// Setup the builder
configBuilder, ok := c.builds[n]
if !ok {
return nil, fmt.Errorf("no such build found: %s", n)
}
builder, err := c.components.Builder(configBuilder.Type)
if err != nil {
return nil, fmt.Errorf(
"error initializing builder '%s': %s",
configBuilder.Type, err)
}
if builder == nil {
return nil, fmt.Errorf(
"builder type not found: %s", configBuilder.Type)
}
// rawName is the uninterpolated name that we use for various lookups
rawName := configBuilder.Name
// Setup the provisioners for this build
provisioners := make([]coreBuildProvisioner, 0, len(c.Template.Provisioners))
for _, rawP := range c.Template.Provisioners {
// If we're skipping this, then ignore it
if rawP.OnlyExcept.Skip(rawName) {
continue
}
// Get the provisioner
provisioner, err := c.components.Provisioner(rawP.Type)
if err != nil {
return nil, fmt.Errorf(
"error initializing provisioner '%s': %s",
rawP.Type, err)
}
if provisioner == nil {
return nil, fmt.Errorf(
"provisioner type not found: %s", rawP.Type)
}
// Get the configuration
config := make([]interface{}, 1, 2)
config[0] = rawP.Config
if rawP.Override != nil {
if override, ok := rawP.Override[rawName]; ok {
config = append(config, override)
}
}
// If we're pausing, we wrap the provisioner in a special pauser.
if rawP.PauseBefore > 0 {
provisioner = &PausedProvisioner{
PauseBefore: rawP.PauseBefore,
Provisioner: provisioner,
}
}
provisioners = append(provisioners, coreBuildProvisioner{
pType: rawP.Type,
provisioner: provisioner,
config: config,
})
}
// Setup the post-processors
postProcessors := make([][]coreBuildPostProcessor, 0, len(c.Template.PostProcessors))
for _, rawPs := range c.Template.PostProcessors {
current := make([]coreBuildPostProcessor, 0, len(rawPs))
for _, rawP := range rawPs {
if rawP.Skip(rawName) {
continue
}
// -except skips post-processor & build
foundExcept := false
for _, except := range c.except {
if except == rawP.Name {
foundExcept = true
}
}
if foundExcept {
continue
}
// Get the post-processor
postProcessor, err := c.components.PostProcessor(rawP.Type)
if err != nil {
return nil, fmt.Errorf(
"error initializing post-processor '%s': %s",
rawP.Type, err)
}
if postProcessor == nil {
return nil, fmt.Errorf(
"post-processor type not found: %s", rawP.Type)
}
current = append(current, coreBuildPostProcessor{
processor: postProcessor,
processorType: rawP.Type,
config: rawP.Config,
keepInputArtifact: rawP.KeepInputArtifact,
})
}
// If we have no post-processors in this chain, just continue.
if len(current) == 0 {
continue
}
postProcessors = append(postProcessors, current)
}
// TODO hooks one day
return &coreBuild{
name: n,
builder: builder,
builderConfig: configBuilder.Config,
builderType: configBuilder.Type,
postProcessors: postProcessors,
provisioners: provisioners,
templatePath: c.Template.Path,
variables: c.variables,
}, nil
}
// Context returns an interpolation context.
func (c *Core) Context() *interpolate.Context {
return &interpolate.Context{
TemplatePath: c.Template.Path,
UserVariables: c.variables,
}
}
// validate does a full validation of the template.
//
// This will automatically call template.validate() in addition to doing
// richer semantic checks around variables and so on.
func (c *Core) validate() error {
// First validate the template in general, we can't do anything else
// unless the template itself is valid.
if err := c.Template.Validate(); err != nil {
return err
}
// Validate the minimum version is satisfied
if c.Template.MinVersion != "" {
versionActual, err := version.NewVersion(c.version)
if err != nil {
// This shouldn't happen since we set it via the compiler
panic(err)
}
versionMin, err := version.NewVersion(c.Template.MinVersion)
if err != nil {
return fmt.Errorf(
"min_version is invalid: %s", err)
}
if versionActual.LessThan(versionMin) {
return fmt.Errorf(
"This template requires Packer version %s or higher; using %s",
versionMin,
versionActual)
}
}
// Validate variables are set
var err error
for n, v := range c.Template.Variables {
if v.Required {
if _, ok := c.variables[n]; !ok {
err = multierror.Append(err, fmt.Errorf(
"required variable not set: %s", n))
}
}
}
// TODO: validate all builders exist
// TODO: ^^ provisioner
// TODO: ^^ post-processor
return err
}
func (c *Core) init() error {
if c.variables == nil {
c.variables = make(map[string]string)
}
// Go through the variables and interpolate the environment variables
ctx := c.Context()
ctx.EnableEnv = true
ctx.UserVariables = nil
for k, v := range c.Template.Variables {
// Ignore variables that are required
if v.Required {
continue
}
// Ignore variables that have a value
if _, ok := c.variables[k]; ok {
continue
}
// Interpolate the default
def, err := interpolate.Render(v.Default, ctx)
if err != nil {
return fmt.Errorf(
"error interpolating default value for '%s': %s",
k, err)
}
c.variables[k] = def
}
for _, v := range c.Template.SensitiveVariables {
def, err := interpolate.Render(v.Default, ctx)
if err != nil {
return fmt.Errorf(
"error interpolating default value for '%#v': %s",
v, err)
}
c.secrets = append(c.secrets, def)
}
// Interpolate the push configuration
if _, err := interpolate.RenderInterface(&c.Template.Push, c.Context()); err != nil {
return fmt.Errorf("Error interpolating 'push': %s", err)
}
return nil
}
post-processor except: don't match empty names
package packer
import (
"fmt"
"sort"
multierror "github.com/hashicorp/go-multierror"
version "github.com/hashicorp/go-version"
"github.com/hashicorp/packer/template"
"github.com/hashicorp/packer/template/interpolate"
)
// Core is the main executor of Packer. If Packer is being used as a
// library, this is the struct you'll want to instantiate to get anything done.
type Core struct {
Template *template.Template
components ComponentFinder
variables map[string]string
builds map[string]*template.Builder
version string
secrets []string
except []string
only []string
}
// CoreConfig is the structure for initializing a new Core. Once a CoreConfig
// is used to initialize a Core, it shouldn't be re-used or modified again.
type CoreConfig struct {
Components ComponentFinder
Template *template.Template
Variables map[string]string
SensitiveVariables []string
Version string
// These are set by command-line flags
Except []string
Only []string
}
// The function type used to lookup Builder implementations.
type BuilderFunc func(name string) (Builder, error)
// The function type used to lookup Hook implementations.
type HookFunc func(name string) (Hook, error)
// The function type used to lookup PostProcessor implementations.
type PostProcessorFunc func(name string) (PostProcessor, error)
// The function type used to lookup Provisioner implementations.
type ProvisionerFunc func(name string) (Provisioner, error)
// ComponentFinder is a struct that contains the various function
// pointers necessary to look up components of Packer such as builders,
// commands, etc.
type ComponentFinder struct {
Builder BuilderFunc
Hook HookFunc
PostProcessor PostProcessorFunc
Provisioner ProvisionerFunc
}
// NewCore creates a new Core.
func NewCore(c *CoreConfig) (*Core, error) {
result := &Core{
Template: c.Template,
components: c.Components,
variables: c.Variables,
version: c.Version,
only: c.Only,
except: c.Except,
}
if err := result.validate(); err != nil {
return nil, err
}
if err := result.init(); err != nil {
return nil, err
}
for _, secret := range result.secrets {
LogSecretFilter.Set(secret)
}
// Go through and interpolate all the build names. We should be able
// to do this at this point with the variables.
result.builds = make(map[string]*template.Builder)
for _, b := range c.Template.Builders {
v, err := interpolate.Render(b.Name, result.Context())
if err != nil {
return nil, fmt.Errorf(
"Error interpolating builder '%s': %s",
b.Name, err)
}
result.builds[v] = b
}
return result, nil
}
// BuildNames returns the builds that are available in this configured core.
func (c *Core) BuildNames() []string {
r := make([]string, 0, len(c.builds))
for n := range c.builds {
r = append(r, n)
}
sort.Strings(r)
return r
}
// Build returns the Build object for the given name.
func (c *Core) Build(n string) (Build, error) {
// Setup the builder
configBuilder, ok := c.builds[n]
if !ok {
return nil, fmt.Errorf("no such build found: %s", n)
}
builder, err := c.components.Builder(configBuilder.Type)
if err != nil {
return nil, fmt.Errorf(
"error initializing builder '%s': %s",
configBuilder.Type, err)
}
if builder == nil {
return nil, fmt.Errorf(
"builder type not found: %s", configBuilder.Type)
}
// rawName is the uninterpolated name that we use for various lookups
rawName := configBuilder.Name
// Setup the provisioners for this build
provisioners := make([]coreBuildProvisioner, 0, len(c.Template.Provisioners))
for _, rawP := range c.Template.Provisioners {
// If we're skipping this, then ignore it
if rawP.OnlyExcept.Skip(rawName) {
continue
}
// Get the provisioner
provisioner, err := c.components.Provisioner(rawP.Type)
if err != nil {
return nil, fmt.Errorf(
"error initializing provisioner '%s': %s",
rawP.Type, err)
}
if provisioner == nil {
return nil, fmt.Errorf(
"provisioner type not found: %s", rawP.Type)
}
// Get the configuration
config := make([]interface{}, 1, 2)
config[0] = rawP.Config
if rawP.Override != nil {
if override, ok := rawP.Override[rawName]; ok {
config = append(config, override)
}
}
// If we're pausing, we wrap the provisioner in a special pauser.
if rawP.PauseBefore > 0 {
provisioner = &PausedProvisioner{
PauseBefore: rawP.PauseBefore,
Provisioner: provisioner,
}
}
provisioners = append(provisioners, coreBuildProvisioner{
pType: rawP.Type,
provisioner: provisioner,
config: config,
})
}
// Setup the post-processors
postProcessors := make([][]coreBuildPostProcessor, 0, len(c.Template.PostProcessors))
for _, rawPs := range c.Template.PostProcessors {
current := make([]coreBuildPostProcessor, 0, len(rawPs))
for _, rawP := range rawPs {
if rawP.Skip(rawName) {
continue
}
// -except skips post-processor & build
foundExcept := false
for _, except := range c.except {
if except != "" && except == rawP.Name {
foundExcept = true
}
}
if foundExcept {
continue
}
// Get the post-processor
postProcessor, err := c.components.PostProcessor(rawP.Type)
if err != nil {
return nil, fmt.Errorf(
"error initializing post-processor '%s': %s",
rawP.Type, err)
}
if postProcessor == nil {
return nil, fmt.Errorf(
"post-processor type not found: %s", rawP.Type)
}
current = append(current, coreBuildPostProcessor{
processor: postProcessor,
processorType: rawP.Type,
config: rawP.Config,
keepInputArtifact: rawP.KeepInputArtifact,
})
}
// If we have no post-processors in this chain, just continue.
if len(current) == 0 {
continue
}
postProcessors = append(postProcessors, current)
}
// TODO hooks one day
return &coreBuild{
name: n,
builder: builder,
builderConfig: configBuilder.Config,
builderType: configBuilder.Type,
postProcessors: postProcessors,
provisioners: provisioners,
templatePath: c.Template.Path,
variables: c.variables,
}, nil
}
// Context returns an interpolation context.
func (c *Core) Context() *interpolate.Context {
return &interpolate.Context{
TemplatePath: c.Template.Path,
UserVariables: c.variables,
}
}
// validate does a full validation of the template.
//
// This will automatically call template.validate() in addition to doing
// richer semantic checks around variables and so on.
func (c *Core) validate() error {
// First validate the template in general, we can't do anything else
// unless the template itself is valid.
if err := c.Template.Validate(); err != nil {
return err
}
// Validate the minimum version is satisfied
if c.Template.MinVersion != "" {
versionActual, err := version.NewVersion(c.version)
if err != nil {
// This shouldn't happen since we set it via the compiler
panic(err)
}
versionMin, err := version.NewVersion(c.Template.MinVersion)
if err != nil {
return fmt.Errorf(
"min_version is invalid: %s", err)
}
if versionActual.LessThan(versionMin) {
return fmt.Errorf(
"This template requires Packer version %s or higher; using %s",
versionMin,
versionActual)
}
}
// Validate variables are set
var err error
for n, v := range c.Template.Variables {
if v.Required {
if _, ok := c.variables[n]; !ok {
err = multierror.Append(err, fmt.Errorf(
"required variable not set: %s", n))
}
}
}
// TODO: validate all builders exist
// TODO: ^^ provisioner
// TODO: ^^ post-processor
return err
}
func (c *Core) init() error {
if c.variables == nil {
c.variables = make(map[string]string)
}
// Go through the variables and interpolate the environment variables
ctx := c.Context()
ctx.EnableEnv = true
ctx.UserVariables = nil
for k, v := range c.Template.Variables {
// Ignore variables that are required
if v.Required {
continue
}
// Ignore variables that have a value
if _, ok := c.variables[k]; ok {
continue
}
// Interpolate the default
def, err := interpolate.Render(v.Default, ctx)
if err != nil {
return fmt.Errorf(
"error interpolating default value for '%s': %s",
k, err)
}
c.variables[k] = def
}
for _, v := range c.Template.SensitiveVariables {
def, err := interpolate.Render(v.Default, ctx)
if err != nil {
return fmt.Errorf(
"error interpolating default value for '%#v': %s",
v, err)
}
c.secrets = append(c.secrets, def)
}
// Interpolate the push configuration
if _, err := interpolate.RenderInterface(&c.Template.Push, c.Context()); err != nil {
return fmt.Errorf("Error interpolating 'push': %s", err)
}
return nil
}
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// OWNER = sig/cli
package kubectl
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"mime/multipart"
"net"
"net/http"
"net/http/httptest"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"time"
"github.com/elazarl/goproxy"
"github.com/ghodss/yaml"
"k8s.io/api/core/v1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/kubectl/cmd/util"
uexec "k8s.io/kubernetes/pkg/util/exec"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/generated"
"k8s.io/kubernetes/test/e2e/scheduling"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
nautilusImage = "gcr.io/google_containers/update-demo:nautilus"
kittenImage = "gcr.io/google_containers/update-demo:kitten"
updateDemoSelector = "name=update-demo"
updateDemoContainer = "update-demo"
frontendSelector = "app=guestbook,tier=frontend"
redisMasterSelector = "app=redis,role=master"
redisSlaveSelector = "app=redis,role=slave"
goproxyContainer = "goproxy"
goproxyPodSelector = "name=goproxy"
netexecContainer = "netexec"
netexecPodSelector = "name=netexec"
kubectlProxyPort = 8011
guestbookStartupTimeout = 10 * time.Minute
guestbookResponseTimeout = 3 * time.Minute
simplePodSelector = "name=nginx"
simplePodName = "nginx"
nginxDefaultOutput = "Welcome to nginx!"
simplePodPort = 80
pausePodSelector = "name=pause"
pausePodName = "pause"
runJobTimeout = 5 * time.Minute
busyboxImage = "gcr.io/google_containers/busybox:1.24"
nginxImage = "gcr.io/google_containers/nginx-slim:0.7"
newNginxImage = "gcr.io/google_containers/nginx-slim:0.8"
kubeCtlManifestPath = "test/e2e/testing-manifests/kubectl"
redisControllerFilename = "redis-master-controller.json"
redisServiceFilename = "redis-master-service.json"
nginxDeployment1Filename = "nginx-deployment1.yaml"
nginxDeployment2Filename = "nginx-deployment2.yaml"
nginxDeployment3Filename = "nginx-deployment3.yaml"
redisImage = "gcr.io/k8s-testimages/redis:e2e"
)
var (
proxyRegexp = regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)")
// Extended pod logging options were introduced in #13780 (v1.1.0) so we don't expect tests
// that rely on extended pod logging options to work on clusters before that.
//
// TODO(ihmccreery): remove once we don't care about v1.0 anymore, (tentatively in v1.3).
extendedPodLogFilterVersion = utilversion.MustParseSemantic("v1.1.0")
// NodePorts were made optional in #12831 (v1.1.0) so we don't expect tests that used to
// require NodePorts but no longer include them to work on clusters before that.
//
// TODO(ihmccreery): remove once we don't care about v1.0 anymore, (tentatively in v1.3).
nodePortsOptionalVersion = utilversion.MustParseSemantic("v1.1.0")
// Jobs were introduced in v1.1, so we don't expect tests that rely on jobs to work on
// clusters before that.
//
// TODO(ihmccreery): remove once we don't care about v1.0 anymore, (tentatively in v1.3).
jobsVersion = utilversion.MustParseSemantic("v1.1.0")
// Deployments were introduced by default in v1.2, so we don't expect tests that rely on
// deployments to work on clusters before that.
//
// TODO(ihmccreery): remove once we don't care about v1.1 anymore, (tentatively in v1.4).
deploymentsVersion = utilversion.MustParseSemantic("v1.2.0-alpha.7.726")
// Pod probe parameters were introduced in #15967 (v1.2) so we don't expect tests that use
// these probe parameters to work on clusters before that.
//
// TODO(ihmccreery): remove once we don't care about v1.1 anymore, (tentatively in v1.4).
podProbeParametersVersion = utilversion.MustParseSemantic("v1.2.0-alpha.4")
// 'kubectl create quota' was introduced in #28351 (v1.4) so we don't expect tests that use
// 'kubectl create quota' to work on kubectl clients before that.
kubectlCreateQuotaVersion = utilversion.MustParseSemantic("v1.4.0-alpha.2")
// Returning container command exit codes in kubectl run/exec was introduced in #26541 (v1.4)
// so we don't expect tests that verifies return code to work on kubectl clients before that.
kubectlContainerExitCodeVersion = utilversion.MustParseSemantic("v1.4.0-alpha.3")
CronJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "cronjobs"}
ScheduledJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "scheduledjobs"}
)
// Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
// Aware of the kubectl example files map.
func cleanupKubectlInputs(fileContents string, ns string, selectors ...string) {
By("using delete to clean up resources")
var nsArg string
if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns)
}
// support backward compatibility : file paths or raw json - since we are removing file path
// dependencies from this test.
framework.RunKubectlOrDieInput(fileContents, "delete", "--grace-period=0", "--force", "-f", "-", nsArg)
framework.AssertCleanup(ns, selectors...)
}
func readTestFileOrDie(file string) []byte {
return generated.ReadOrDie(path.Join(kubeCtlManifestPath, file))
}
func runKubectlRetryOrDie(args ...string) string {
var err error
var output string
for i := 0; i < 5; i++ {
output, err = framework.RunKubectl(args...)
if err == nil || (!strings.Contains(err.Error(), genericregistry.OptimisticLockErrorMsg) && !strings.Contains(err.Error(), "Operation cannot be fulfilled")) {
break
}
time.Sleep(time.Second)
}
// Expect no errors to be present after retries are finished
// Copied from framework #ExecOrDie
framework.Logf("stdout: %q", output)
Expect(err).NotTo(HaveOccurred())
return output
}
// duplicated setup to avoid polluting "normal" clients with alpha features which confuses the generated clients
var _ = kubectlDescribe("Kubectl alpha client", func() {
defer GinkgoRecover()
f := framework.NewDefaultFramework("kubectl")
var c clientset.Interface
var ns string
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
})
// Customized Wait / ForEach wrapper for this test. These demonstrate the
framework.KubeDescribe("Kubectl run ScheduledJob", func() {
var nsFlag string
var sjName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
sjName = "e2e-test-echo-scheduledjob"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "cronjobs", sjName, nsFlag)
})
It("should create a ScheduledJob", func() {
framework.SkipIfMissingResource(f.ClientPool, ScheduledJobGroupVersionResource, f.Namespace.Name)
schedule := "*/5 * * * ?"
framework.RunKubectlOrDie("run", sjName, "--restart=OnFailure", "--generator=scheduledjob/v2alpha1",
"--schedule="+schedule, "--image="+busyboxImage, nsFlag)
By("verifying the ScheduledJob " + sjName + " was created")
sj, err := c.BatchV2alpha1().CronJobs(ns).Get(sjName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting ScheduledJob %s: %v", sjName, err)
}
if sj.Spec.Schedule != schedule {
framework.Failf("Failed creating a ScheduledJob with correct schedule %s, but got %s", schedule, sj.Spec.Schedule)
}
containers := sj.Spec.JobTemplate.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage {
framework.Failf("Failed creating ScheduledJob %s for 1 pod with expected image %s: %#v", sjName, busyboxImage, containers)
}
restartPolicy := sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy
if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
framework.Failf("Failed creating a ScheduledJob with correct restart policy %s, but got %s", v1.RestartPolicyOnFailure, restartPolicy)
}
})
})
framework.KubeDescribe("Kubectl run CronJob", func() {
var nsFlag string
var cjName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
cjName = "e2e-test-echo-cronjob"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "cronjobs", cjName, nsFlag)
})
It("should create a CronJob", func() {
framework.SkipIfMissingResource(f.ClientPool, CronJobGroupVersionResource, f.Namespace.Name)
schedule := "*/5 * * * ?"
framework.RunKubectlOrDie("run", cjName, "--restart=OnFailure", "--generator=cronjob/v2alpha1",
"--schedule="+schedule, "--image="+busyboxImage, nsFlag)
By("verifying the CronJob " + cjName + " was created")
sj, err := c.BatchV2alpha1().CronJobs(ns).Get(cjName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting CronJob %s: %v", cjName, err)
}
if sj.Spec.Schedule != schedule {
framework.Failf("Failed creating a CronJob with correct schedule %s", schedule)
}
containers := sj.Spec.JobTemplate.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage {
framework.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers)
}
if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
framework.Failf("Failed creating a CronJob with correct restart policy for --restart=OnFailure")
}
})
})
})
var _ = kubectlDescribe("Kubectl client", func() {
defer GinkgoRecover()
f := framework.NewDefaultFramework("kubectl")
// Reustable cluster state function. This won't be adversly affected by lazy initialization of framework.
clusterState := func() *framework.ClusterVerification {
return f.NewClusterVerification(
f.Namespace,
framework.PodStateVerification{
Selectors: map[string]string{"app": "redis"},
ValidPhases: []v1.PodPhase{v1.PodRunning /*v1.PodPending*/},
})
}
forEachPod := func(podFunc func(p v1.Pod)) {
clusterState().ForEach(podFunc)
}
var c clientset.Interface
var ns string
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
})
// Customized Wait / ForEach wrapper for this test. These demonstrate the
// idiomatic way to wrap the ClusterVerification structs for syntactic sugar in large
// test files.
// Print debug info if atLeast Pods are not found before the timeout
waitForOrFailWithDebug := func(atLeast int) {
pods, err := clusterState().WaitFor(atLeast, framework.PodStartTimeout)
if err != nil || len(pods) < atLeast {
// TODO: Generalize integrating debug info into these tests so we always get debug info when we need it
framework.DumpAllNamespaceInfo(f.ClientSet, ns)
framework.Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err)
}
}
framework.KubeDescribe("Update Demo", func() {
var nautilus, kitten []byte
BeforeEach(func() {
updateDemoRoot := "test/fixtures/doc-yaml/user-guide/update-demo"
nautilus = generated.ReadOrDie(filepath.Join(updateDemoRoot, "nautilus-rc.yaml"))
kitten = generated.ReadOrDie(filepath.Join(updateDemoRoot, "kitten-rc.yaml"))
})
It("should create and stop a replication controller [Conformance]", func() {
defer cleanupKubectlInputs(string(nautilus), ns, updateDemoSelector)
By("creating a replication controller")
framework.RunKubectlOrDieInput(string(nautilus[:]), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
})
It("should scale a replication controller [Conformance]", func() {
defer cleanupKubectlInputs(string(nautilus[:]), ns, updateDemoSelector)
By("creating a replication controller")
framework.RunKubectlOrDieInput(string(nautilus[:]), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
By("scaling down the replication controller")
framework.RunKubectlOrDie("scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
By("scaling up the replication controller")
framework.RunKubectlOrDie("scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
})
It("should do a rolling update of a replication controller [Conformance]", func() {
By("creating the initial replication controller")
framework.RunKubectlOrDieInput(string(nautilus[:]), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
By("rolling-update to new replication controller")
framework.RunKubectlOrDieInput(string(kitten[:]), "rolling-update", "update-demo-nautilus", "--update-period=1s", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, kittenImage, 2, "update-demo", updateDemoSelector, getUDData("kitten.jpg", ns), ns)
// Everything will hopefully be cleaned up when the namespace is deleted.
})
})
framework.KubeDescribe("Guestbook application", func() {
forEachGBFile := func(run func(s string)) {
for _, gbAppFile := range []string{
"examples/guestbook/frontend-deployment.yaml",
"examples/guestbook/frontend-service.yaml",
"examples/guestbook/redis-master-deployment.yaml",
"examples/guestbook/redis-master-service.yaml",
"examples/guestbook/redis-slave-deployment.yaml",
"examples/guestbook/redis-slave-service.yaml",
} {
contents := generated.ReadOrDie(gbAppFile)
run(string(contents))
}
}
It("should create and stop a working application [Conformance]", func() {
framework.SkipUnlessServerVersionGTE(deploymentsVersion, c.Discovery())
defer forEachGBFile(func(contents string) {
cleanupKubectlInputs(contents, ns)
})
By("creating all guestbook components")
forEachGBFile(func(contents string) {
framework.Logf(contents)
framework.RunKubectlOrDieInput(contents, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
})
By("validating guestbook app")
validateGuestbookApp(c, ns)
})
})
framework.KubeDescribe("Simple pod", func() {
var podPath []byte
BeforeEach(func() {
podPath = generated.ReadOrDie(path.Join(kubeCtlManifestPath, "pod-with-readiness-probe.yaml"))
By(fmt.Sprintf("creating the pod from %v", string(podPath)))
framework.RunKubectlOrDieInput(string(podPath[:]), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
Expect(framework.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout)).To(BeTrue())
})
AfterEach(func() {
cleanupKubectlInputs(string(podPath[:]), ns, simplePodSelector)
})
It("should support exec", func() {
By("executing a command in the container")
execOutput := framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodName, "echo", "running", "in", "container")
if e, a := "running in container", strings.TrimSpace(execOutput); e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
}
By("executing a very long command in the container")
veryLongData := make([]rune, 20000)
for i := 0; i < len(veryLongData); i++ {
veryLongData[i] = 'a'
}
execOutput = framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodName, "echo", string(veryLongData))
Expect(string(veryLongData)).To(Equal(strings.TrimSpace(execOutput)), "Unexpected kubectl exec output")
By("executing a command in the container with noninteractive stdin")
execOutput = framework.NewKubectlCommand("exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "cat").
WithStdinData("abcd1234").
ExecOrDie()
if e, a := "abcd1234", execOutput; e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
}
// pretend that we're a user in an interactive shell
r, closer, err := newBlockingReader("echo hi\nexit\n")
if err != nil {
framework.Failf("Error creating blocking reader: %v", err)
}
// NOTE this is solely for test cleanup!
defer closer.Close()
By("executing a command in the container with pseudo-interactive stdin")
execOutput = framework.NewKubectlCommand("exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "bash").
WithStdinReader(r).
ExecOrDie()
if e, a := "hi", strings.TrimSpace(execOutput); e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
}
})
It("should support exec through an HTTP proxy", func() {
// Note: We are skipping local since we want to verify an apiserver with HTTPS.
// At this time local only supports plain HTTP.
framework.SkipIfProviderIs("local")
// Fail if the variable isn't set
if framework.TestContext.Host == "" {
framework.Failf("--host variable must be set to the full URI to the api server on e2e run.")
}
By("Starting goproxy")
testSrv, proxyLogs := startLocalProxy()
defer testSrv.Close()
proxyAddr := testSrv.URL
for _, proxyVar := range []string{"https_proxy", "HTTPS_PROXY"} {
proxyLogs.Reset()
By("Running kubectl via an HTTP proxy using " + proxyVar)
output := framework.NewKubectlCommand(fmt.Sprintf("--namespace=%s", ns), "exec", "nginx", "echo", "running", "in", "container").
WithEnv(append(os.Environ(), fmt.Sprintf("%s=%s", proxyVar, proxyAddr))).
ExecOrDie()
// Verify we got the normal output captured by the exec server
expectedExecOutput := "running in container\n"
if output != expectedExecOutput {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output)
}
// Verify the proxy server logs saw the connection
expectedProxyLog := fmt.Sprintf("Accepting CONNECT to %s", strings.TrimRight(strings.TrimLeft(framework.TestContext.Host, "https://"), "/api"))
proxyLog := proxyLogs.String()
if !strings.Contains(proxyLog, expectedProxyLog) {
framework.Failf("Missing expected log result on proxy server for %s. Expected: %q, got %q", proxyVar, expectedProxyLog, proxyLog)
}
}
})
It("should return command exit codes", func() {
framework.SkipUnlessKubectlVersionGTE(kubectlContainerExitCodeVersion)
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("execing into a container with a successful command")
_, err := framework.NewKubectlCommand(nsFlag, "exec", "nginx", "--", "/bin/sh", "-c", "exit 0").Exec()
framework.ExpectNoError(err)
By("execing into a container with a failing command")
_, err = framework.NewKubectlCommand(nsFlag, "exec", "nginx", "--", "/bin/sh", "-c", "exit 42").Exec()
ee, ok := err.(uexec.ExitError)
Expect(ok).To(Equal(true))
Expect(ee.ExitStatus()).To(Equal(42))
By("running a successful command")
_, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "success", "--", "/bin/sh", "-c", "exit 0").Exec()
framework.ExpectNoError(err)
By("running a failing command")
_, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec()
ee, ok = err.(uexec.ExitError)
Expect(ok).To(Equal(true))
Expect(ee.ExitStatus()).To(Equal(42))
By("running a failing command without --restart=Never")
_, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "failure-2", "--", "/bin/sh", "-c", "cat && exit 42").
WithStdinData("abcd1234").
Exec()
framework.ExpectNoError(err)
By("running a failing command without --restart=Never, but with --rm")
_, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "--rm", "failure-3", "--", "/bin/sh", "-c", "cat && exit 42").
WithStdinData("abcd1234").
Exec()
framework.ExpectNoError(err)
framework.WaitForPodToDisappear(f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout)
By("running a failing command with --leave-stdin-open")
_, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42").
WithStdinData("abcd1234").
Exec()
framework.ExpectNoError(err)
})
It("should support inline execution and attach", func() {
framework.SkipIfContainerRuntimeIs("rkt") // #23335
framework.SkipUnlessServerVersionGTE(jobsVersion, c.Discovery())
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("executing a command with run and attach with stdin")
runOutput := framework.NewKubectlCommand(nsFlag, "run", "run-test", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234").
ExecOrDie()
Expect(runOutput).To(ContainSubstring("abcd1234"))
Expect(runOutput).To(ContainSubstring("stdin closed"))
Expect(c.Batch().Jobs(ns).Delete("run-test", nil)).To(BeNil())
By("executing a command with run and attach without stdin")
runOutput = framework.NewKubectlCommand(fmt.Sprintf("--namespace=%v", ns), "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234").
ExecOrDie()
Expect(runOutput).ToNot(ContainSubstring("abcd1234"))
Expect(runOutput).To(ContainSubstring("stdin closed"))
Expect(c.Batch().Jobs(ns).Delete("run-test-2", nil)).To(BeNil())
By("executing a command with run and attach with stdin with open stdin should remain running")
runOutput = framework.NewKubectlCommand(nsFlag, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234\n").
ExecOrDie()
Expect(runOutput).ToNot(ContainSubstring("stdin closed"))
g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
runTestPod, _, err := util.GetFirstPod(f.InternalClientset.Core(), ns, labels.SelectorFromSet(map[string]string{"run": "run-test-3"}), 1*time.Minute, g)
if err != nil {
os.Exit(1)
}
if !framework.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, time.Minute) {
framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3")
}
// NOTE: we cannot guarantee our output showed up in the container logs before stdin was closed, so we have
// to loop test.
err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
if !framework.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, 1*time.Second) {
framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3")
}
logOutput := framework.RunKubectlOrDie(nsFlag, "logs", runTestPod.Name)
Expect(logOutput).ToNot(ContainSubstring("stdin closed"))
return strings.Contains(logOutput, "abcd1234"), nil
})
if err != nil {
os.Exit(1)
}
Expect(err).To(BeNil())
Expect(c.Batch().Jobs(ns).Delete("run-test-3", nil)).To(BeNil())
})
It("should support port-forward", func() {
By("forwarding the container port to a local port")
cmd := runPortForward(ns, simplePodName, simplePodPort)
defer cmd.Stop()
By("curling local port output")
localAddr := fmt.Sprintf("http://localhost:%d", cmd.port)
body, err := curl(localAddr)
framework.Logf("got: %s", body)
if err != nil {
framework.Failf("Failed http.Get of forwarded port (%s): %v", localAddr, err)
}
if !strings.Contains(body, nginxDefaultOutput) {
framework.Failf("Container port output missing expected value. Wanted:'%s', got: %s", nginxDefaultOutput, body)
}
})
It("should handle in-cluster config", func() {
By("adding rbac permissions")
// grant the view permission widely to allow inspection of the `invalid` namespace and the default namespace
framework.BindClusterRole(f.ClientSet.Rbac(), "view", f.Namespace.Name,
rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
err := framework.WaitForAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(),
serviceaccount.MakeUsername(f.Namespace.Name, "default"),
f.Namespace.Name, "list", schema.GroupResource{Resource: "pods"}, true)
framework.ExpectNoError(err)
By("overriding icc with values provided by flags")
kubectlPath := framework.TestContext.KubectlPath
// we need the actual kubectl binary, not the script wrapper
kubectlPathNormalizer := exec.Command("which", kubectlPath)
if strings.HasSuffix(kubectlPath, "kubectl.sh") {
kubectlPathNormalizer = exec.Command(kubectlPath, "path")
}
kubectlPathNormalized, err := kubectlPathNormalizer.Output()
framework.ExpectNoError(err)
kubectlPath = strings.TrimSpace(string(kubectlPathNormalized))
inClusterHost := strings.TrimSpace(framework.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_HOST"))
inClusterPort := strings.TrimSpace(framework.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_PORT"))
framework.Logf("copying %s to the %s pod", kubectlPath, simplePodName)
framework.RunKubectlOrDie("cp", kubectlPath, ns+"/"+simplePodName+":/tmp/")
// Build a kubeconfig file that will make use of the injected ca and token,
// but point at the DNS host and the default namespace
tmpDir, err := ioutil.TempDir("", "icc-override")
overrideKubeconfigName := "icc-override.kubeconfig"
framework.ExpectNoError(err)
defer func() { os.Remove(tmpDir) }()
framework.ExpectNoError(ioutil.WriteFile(filepath.Join(tmpDir, overrideKubeconfigName), []byte(`
kind: Config
apiVersion: v1
clusters:
- cluster:
api-version: v1
server: https://kubernetes.default.svc:443
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
name: kubeconfig-cluster
contexts:
- context:
cluster: kubeconfig-cluster
namespace: default
user: kubeconfig-user
name: kubeconfig-context
current-context: kubeconfig-context
users:
- name: kubeconfig-user
user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
`), os.FileMode(0755)))
framework.Logf("copying override kubeconfig to the %s pod", simplePodName)
framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, overrideKubeconfigName), ns+"/"+simplePodName+":/tmp/")
framework.ExpectNoError(ioutil.WriteFile(filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), []byte(`
kind: ConfigMap
apiVersion: v1
metadata:
name: "configmap with namespace and invalid name"
namespace: configmap-namespace
`), os.FileMode(0755)))
framework.ExpectNoError(ioutil.WriteFile(filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), []byte(`
kind: ConfigMap
apiVersion: v1
metadata:
name: "configmap without namespace and invalid name"
`), os.FileMode(0755)))
framework.Logf("copying configmap manifests to the %s pod", simplePodName)
framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), ns+"/"+simplePodName+":/tmp/")
framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), ns+"/"+simplePodName+":/tmp/")
By("getting pods with in-cluster configs")
execOutput := framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --v=7 2>&1")
Expect(execOutput).To(MatchRegexp("nginx +1/1 +Running"))
Expect(execOutput).To(ContainSubstring("Using in-cluster namespace"))
Expect(execOutput).To(ContainSubstring("Using in-cluster configuration"))
By("creating an object containing a namespace with in-cluster config")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-with-namespace.yaml --v=7 2>&1")
Expect(err).To(ContainSubstring("Using in-cluster namespace"))
Expect(err).To(ContainSubstring("Using in-cluster configuration"))
Expect(err).To(ContainSubstring(fmt.Sprintf("POST https://%s:%s/api/v1/namespaces/configmap-namespace/configmaps", inClusterHost, inClusterPort)))
By("creating an object not containing a namespace with in-cluster config")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-without-namespace.yaml --v=7 2>&1")
Expect(err).To(ContainSubstring("Using in-cluster namespace"))
Expect(err).To(ContainSubstring("Using in-cluster configuration"))
Expect(err).To(ContainSubstring(fmt.Sprintf("POST https://%s:%s/api/v1/namespaces/%s/configmaps", inClusterHost, inClusterPort, f.Namespace.Name)))
By("trying to use kubectl with invalid token")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --token=invalid --v=7 2>&1")
framework.Logf("got err %v", err)
Expect(err).To(HaveOccurred())
Expect(err).To(ContainSubstring("Using in-cluster namespace"))
Expect(err).To(ContainSubstring("Using in-cluster configuration"))
Expect(err).To(ContainSubstring("Authorization: Bearer invalid"))
Expect(err).To(ContainSubstring("Response Status: 401 Unauthorized"))
By("trying to use kubectl with invalid server")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --server=invalid --v=6 2>&1")
framework.Logf("got err %v", err)
Expect(err).To(HaveOccurred())
Expect(err).To(ContainSubstring("Unable to connect to the server"))
Expect(err).To(ContainSubstring("GET http://invalid/api"))
By("trying to use kubectl with invalid namespace")
execOutput = framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --namespace=invalid --v=6 2>&1")
Expect(execOutput).To(ContainSubstring("No resources found"))
Expect(execOutput).ToNot(ContainSubstring("Using in-cluster namespace"))
Expect(execOutput).To(ContainSubstring("Using in-cluster configuration"))
Expect(execOutput).To(MatchRegexp(fmt.Sprintf("GET http[s]?://%s:%s/api/v1/namespaces/invalid/pods", inClusterHost, inClusterPort)))
By("trying to use kubectl with kubeconfig")
execOutput = framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --kubeconfig=/tmp/"+overrideKubeconfigName+" --v=6 2>&1")
Expect(execOutput).ToNot(ContainSubstring("Using in-cluster namespace"))
Expect(execOutput).ToNot(ContainSubstring("Using in-cluster configuration"))
Expect(execOutput).To(ContainSubstring("GET https://kubernetes.default.svc:443/api/v1/namespaces/default/pods"))
})
})
framework.KubeDescribe("Kubectl api-versions", func() {
It("should check if v1 is in available api versions [Conformance]", func() {
By("validating api verions")
output := framework.RunKubectlOrDie("api-versions")
if !strings.Contains(output, "v1") {
framework.Failf("No v1 in kubectl api-versions")
}
})
})
framework.KubeDescribe("Kubectl apply", func() {
It("should apply a new configuration to an existing RC", func() {
controllerJson := readTestFileOrDie(redisControllerFilename)
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("creating Redis RC")
framework.RunKubectlOrDieInput(string(controllerJson), "create", "-f", "-", nsFlag)
By("applying a modified configuration")
stdin := modifyReplicationControllerConfiguration(string(controllerJson))
framework.NewKubectlCommand("apply", "-f", "-", nsFlag).
WithStdinReader(stdin).
ExecOrDie()
By("checking the result")
forEachReplicationController(c, ns, "app", "redis", validateReplicationControllerConfiguration)
})
It("should reuse port when apply to an existing SVC", func() {
serviceJson := readTestFileOrDie(redisServiceFilename)
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("creating Redis SVC")
framework.RunKubectlOrDieInput(string(serviceJson[:]), "create", "-f", "-", nsFlag)
By("getting the original port")
originalNodePort := framework.RunKubectlOrDie("get", "service", "redis-master", nsFlag, "-o", "jsonpath={.spec.ports[0].port}")
By("applying the same configuration")
framework.RunKubectlOrDieInput(string(serviceJson[:]), "apply", "-f", "-", nsFlag)
By("getting the port after applying configuration")
currentNodePort := framework.RunKubectlOrDie("get", "service", "redis-master", nsFlag, "-o", "jsonpath={.spec.ports[0].port}")
By("checking the result")
if originalNodePort != currentNodePort {
framework.Failf("port should keep the same")
}
})
It("apply set/view last-applied", func() {
deployment1Yaml := readTestFileOrDie(nginxDeployment1Filename)
deployment2Yaml := readTestFileOrDie(nginxDeployment2Filename)
deployment3Yaml := readTestFileOrDie(nginxDeployment3Filename)
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("deployment replicas number is 2")
framework.RunKubectlOrDieInput(string(deployment1Yaml[:]), "apply", "-f", "-", nsFlag)
By("check the last-applied matches expectations annotations")
output := framework.RunKubectlOrDieInput(string(deployment1Yaml[:]), "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json")
requiredString := "\"replicas\": 2"
if !strings.Contains(output, requiredString) {
framework.Failf("Missing %s in kubectl view-last-applied", requiredString)
}
By("apply file doesn't have replicas")
framework.RunKubectlOrDieInput(string(deployment2Yaml[:]), "apply", "set-last-applied", "-f", "-", nsFlag)
By("check last-applied has been updated, annotations doesn't replicas")
output = framework.RunKubectlOrDieInput(string(deployment1Yaml[:]), "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json")
requiredString = "\"replicas\": 2"
if strings.Contains(output, requiredString) {
framework.Failf("Missing %s in kubectl view-last-applied", requiredString)
}
By("scale set replicas to 3")
nginxDeploy := "nginx-deployment"
framework.RunKubectlOrDie("scale", "deployment", nginxDeploy, "--replicas=3", nsFlag)
By("apply file doesn't have replicas but image changed")
framework.RunKubectlOrDieInput(string(deployment3Yaml[:]), "apply", "-f", "-", nsFlag)
By("verify replicas still is 3 and image has been updated")
output = framework.RunKubectlOrDieInput(string(deployment3Yaml[:]), "get", "-f", "-", nsFlag, "-o", "json")
requiredItems := []string{"\"replicas\": 3", "nginx-slim:0.7"}
for _, item := range requiredItems {
if !strings.Contains(output, item) {
framework.Failf("Missing %s in kubectl apply", item)
}
}
})
})
framework.KubeDescribe("Kubectl cluster-info", func() {
It("should check if Kubernetes master services is included in cluster-info [Conformance]", func() {
By("validating cluster-info")
output := framework.RunKubectlOrDie("cluster-info")
// Can't check exact strings due to terminal control commands (colors)
requiredItems := []string{"Kubernetes master", "is running at"}
if framework.ProviderIs("gce", "gke") {
requiredItems = append(requiredItems, "KubeDNS", "Heapster")
}
for _, item := range requiredItems {
if !strings.Contains(output, item) {
framework.Failf("Missing %s in kubectl cluster-info", item)
}
}
})
})
framework.KubeDescribe("Kubectl describe", func() {
It("should check if kubectl describe prints relevant information for rc and pods [Conformance]", func() {
framework.SkipUnlessServerVersionGTE(nodePortsOptionalVersion, c.Discovery())
kv, err := framework.KubectlVersion()
Expect(err).NotTo(HaveOccurred())
framework.SkipUnlessServerVersionGTE(kv, c.Discovery())
controllerJson := readTestFileOrDie(redisControllerFilename)
serviceJson := readTestFileOrDie(redisServiceFilename)
nsFlag := fmt.Sprintf("--namespace=%v", ns)
framework.RunKubectlOrDieInput(string(controllerJson[:]), "create", "-f", "-", nsFlag)
framework.RunKubectlOrDieInput(string(serviceJson[:]), "create", "-f", "-", nsFlag)
By("Waiting for Redis master to start.")
waitForOrFailWithDebug(1)
// Pod
forEachPod(func(pod v1.Pod) {
output := framework.RunKubectlOrDie("describe", "pod", pod.Name, nsFlag)
requiredStrings := [][]string{
{"Name:", "redis-master-"},
{"Namespace:", ns},
{"Node:"},
{"Labels:", "app=redis"},
{"role=master"},
{"Annotations:"},
{"Status:", "Running"},
{"IP:"},
{"Created By:", "ReplicationController/redis-master"},
{"Controlled By:", "ReplicationController/redis-master"},
{"Image:", redisImage},
{"State:", "Running"},
{"QoS Class:", "BestEffort"},
}
checkOutput(output, requiredStrings)
})
// Rc
requiredStrings := [][]string{
{"Name:", "redis-master"},
{"Namespace:", ns},
{"Selector:", "app=redis,role=master"},
{"Labels:", "app=redis"},
{"role=master"},
{"Annotations:"},
{"Replicas:", "1 current", "1 desired"},
{"Pods Status:", "1 Running", "0 Waiting", "0 Succeeded", "0 Failed"},
{"Pod Template:"},
{"Image:", redisImage},
{"Events:"}}
checkKubectlOutputWithRetry(requiredStrings, "describe", "rc", "redis-master", nsFlag)
// Service
output := framework.RunKubectlOrDie("describe", "service", "redis-master", nsFlag)
requiredStrings = [][]string{
{"Name:", "redis-master"},
{"Namespace:", ns},
{"Labels:", "app=redis"},
{"role=master"},
{"Annotations:"},
{"Selector:", "app=redis", "role=master"},
{"Type:", "ClusterIP"},
{"IP:"},
{"Port:", "<unset>", "6379/TCP"},
{"Endpoints:"},
{"Session Affinity:", "None"}}
checkOutput(output, requiredStrings)
// Node
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Core().Nodes().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
node := nodes.Items[0]
output = framework.RunKubectlOrDie("describe", "node", node.Name)
requiredStrings = [][]string{
{"Name:", node.Name},
{"Labels:"},
{"Annotations:"},
{"CreationTimestamp:"},
{"Conditions:"},
{"Type", "Status", "LastHeartbeatTime", "LastTransitionTime", "Reason", "Message"},
{"Addresses:"},
{"Capacity:"},
{"Version:"},
{"Kernel Version:"},
{"OS Image:"},
{"Container Runtime Version:"},
{"Kubelet Version:"},
{"Kube-Proxy Version:"},
{"Pods:"}}
checkOutput(output, requiredStrings)
// Namespace
output = framework.RunKubectlOrDie("describe", "namespace", ns)
requiredStrings = [][]string{
{"Name:", ns},
{"Labels:"},
{"Annotations:"},
{"Status:", "Active"}}
checkOutput(output, requiredStrings)
// Quota and limitrange are skipped for now.
})
})
framework.KubeDescribe("Kubectl expose", func() {
It("should create services for rc [Conformance]", func() {
controllerJson := readTestFileOrDie(redisControllerFilename)
nsFlag := fmt.Sprintf("--namespace=%v", ns)
redisPort := 6379
By("creating Redis RC")
framework.Logf("namespace %v", ns)
framework.RunKubectlOrDieInput(string(controllerJson[:]), "create", "-f", "-", nsFlag)
// It may take a while for the pods to get registered in some cases, wait to be sure.
By("Waiting for Redis master to start.")
waitForOrFailWithDebug(1)
forEachPod(func(pod v1.Pod) {
framework.Logf("wait on redis-master startup in %v ", ns)
framework.LookForStringInLog(ns, pod.Name, "redis-master", "The server is now ready to accept connections", framework.PodStartTimeout)
})
validateService := func(name string, servicePort int, timeout time.Duration) {
err := wait.Poll(framework.Poll, timeout, func() (bool, error) {
endpoints, err := c.Core().Endpoints(ns).Get(name, metav1.GetOptions{})
if err != nil {
// log the real error
framework.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err)
// if the error is API not found or could not find default credentials or TLS handshake timeout, try again
if apierrs.IsNotFound(err) ||
apierrs.IsUnauthorized(err) ||
apierrs.IsServerTimeout(err) {
err = nil
}
return false, err
}
uidToPort := framework.GetContainerPortsByPodUID(endpoints)
if len(uidToPort) == 0 {
framework.Logf("No endpoint found, retrying")
return false, nil
}
if len(uidToPort) > 1 {
framework.Failf("Too many endpoints found")
}
for _, port := range uidToPort {
if port[0] != redisPort {
framework.Failf("Wrong endpoint port: %d", port[0])
}
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
service, err := c.Core().Services(ns).Get(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
if len(service.Spec.Ports) != 1 {
framework.Failf("1 port is expected")
}
port := service.Spec.Ports[0]
if port.Port != int32(servicePort) {
framework.Failf("Wrong service port: %d", port.Port)
}
if port.TargetPort.IntValue() != redisPort {
framework.Failf("Wrong target port: %d", port.TargetPort.IntValue())
}
}
By("exposing RC")
framework.RunKubectlOrDie("expose", "rc", "redis-master", "--name=rm2", "--port=1234", fmt.Sprintf("--target-port=%d", redisPort), nsFlag)
framework.WaitForService(c, ns, "rm2", true, framework.Poll, framework.ServiceStartTimeout)
validateService("rm2", 1234, framework.ServiceStartTimeout)
By("exposing service")
framework.RunKubectlOrDie("expose", "service", "rm2", "--name=rm3", "--port=2345", fmt.Sprintf("--target-port=%d", redisPort), nsFlag)
framework.WaitForService(c, ns, "rm3", true, framework.Poll, framework.ServiceStartTimeout)
validateService("rm3", 2345, framework.ServiceStartTimeout)
})
})
framework.KubeDescribe("Kubectl label", func() {
var pod []byte
var nsFlag string
BeforeEach(func() {
pod = readTestFileOrDie("pause-pod.yaml")
By("creating the pod")
nsFlag = fmt.Sprintf("--namespace=%v", ns)
framework.RunKubectlOrDieInput(string(pod), "create", "-f", "-", nsFlag)
Expect(framework.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout)).To(BeTrue())
})
AfterEach(func() {
cleanupKubectlInputs(string(pod[:]), ns, pausePodSelector)
})
It("should update the label on a resource [Conformance]", func() {
labelName := "testing-label"
labelValue := "testing-label-value"
By("adding the label " + labelName + " with value " + labelValue + " to a pod")
framework.RunKubectlOrDie("label", "pods", pausePodName, labelName+"="+labelValue, nsFlag)
By("verifying the pod has the label " + labelName + " with the value " + labelValue)
output := framework.RunKubectlOrDie("get", "pod", pausePodName, "-L", labelName, nsFlag)
if !strings.Contains(output, labelValue) {
framework.Failf("Failed updating label " + labelName + " to the pod " + pausePodName)
}
By("removing the label " + labelName + " of a pod")
framework.RunKubectlOrDie("label", "pods", pausePodName, labelName+"-", nsFlag)
By("verifying the pod doesn't have the label " + labelName)
output = framework.RunKubectlOrDie("get", "pod", pausePodName, "-L", labelName, nsFlag)
if strings.Contains(output, labelValue) {
framework.Failf("Failed removing label " + labelName + " of the pod " + pausePodName)
}
})
})
framework.KubeDescribe("Kubectl logs", func() {
var rc []byte
var nsFlag string
containerName := "redis-master"
BeforeEach(func() {
rc = readTestFileOrDie(redisControllerFilename)
By("creating an rc")
nsFlag = fmt.Sprintf("--namespace=%v", ns)
framework.RunKubectlOrDieInput(string(rc[:]), "create", "-f", "-", nsFlag)
})
AfterEach(func() {
cleanupKubectlInputs(string(rc[:]), ns, simplePodSelector)
})
It("should be able to retrieve and filter logs [Conformance]", func() {
framework.SkipUnlessServerVersionGTE(extendedPodLogFilterVersion, c.Discovery())
// Split("something\n", "\n") returns ["something", ""], so
// strip trailing newline first
lines := func(out string) []string {
return strings.Split(strings.TrimRight(out, "\n"), "\n")
}
By("Waiting for Redis master to start.")
waitForOrFailWithDebug(1)
forEachPod(func(pod v1.Pod) {
By("checking for a matching strings")
_, err := framework.LookForStringInLog(ns, pod.Name, containerName, "The server is now ready to accept connections", framework.PodStartTimeout)
Expect(err).NotTo(HaveOccurred())
By("limiting log lines")
out := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--tail=1")
Expect(len(out)).NotTo(BeZero())
Expect(len(lines(out))).To(Equal(1))
By("limiting log bytes")
out = framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--limit-bytes=1")
Expect(len(lines(out))).To(Equal(1))
Expect(len(out)).To(Equal(1))
By("exposing timestamps")
out = framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--tail=1", "--timestamps")
l := lines(out)
Expect(len(l)).To(Equal(1))
words := strings.Split(l[0], " ")
Expect(len(words)).To(BeNumerically(">", 1))
if _, err := time.Parse(time.RFC3339Nano, words[0]); err != nil {
if _, err := time.Parse(time.RFC3339, words[0]); err != nil {
framework.Failf("expected %q to be RFC3339 or RFC3339Nano", words[0])
}
}
By("restricting to a time range")
// Note: we must wait at least two seconds,
// because the granularity is only 1 second and
// it could end up rounding the wrong way.
time.Sleep(2500 * time.Millisecond) // ensure that startup logs on the node are seen as older than 1s
recent_out := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--since=1s")
recent := len(strings.Split(recent_out, "\n"))
older_out := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--since=24h")
older := len(strings.Split(older_out, "\n"))
Expect(recent).To(BeNumerically("<", older), "expected recent(%v) to be less than older(%v)\nrecent lines:\n%v\nolder lines:\n%v\n", recent, older, recent_out, older_out)
})
})
})
framework.KubeDescribe("Kubectl patch", func() {
It("should add annotations for pods in rc [Conformance]", func() {
controllerJson := readTestFileOrDie(redisControllerFilename)
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("creating Redis RC")
framework.RunKubectlOrDieInput(string(controllerJson[:]), "create", "-f", "-", nsFlag)
By("Waiting for Redis master to start.")
waitForOrFailWithDebug(1)
By("patching all pods")
forEachPod(func(pod v1.Pod) {
framework.RunKubectlOrDie("patch", "pod", pod.Name, nsFlag, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}")
})
By("checking annotations")
forEachPod(func(pod v1.Pod) {
found := false
for key, val := range pod.Annotations {
if key == "x" && val == "y" {
found = true
break
}
}
if !found {
framework.Failf("Added annotation not found")
}
})
})
})
framework.KubeDescribe("Kubectl version", func() {
It("should check is all data is printed [Conformance]", func() {
version := framework.RunKubectlOrDie("version")
requiredItems := []string{"Client Version:", "Server Version:", "Major:", "Minor:", "GitCommit:"}
for _, item := range requiredItems {
if !strings.Contains(version, item) {
framework.Failf("Required item %s not found in %s", item, version)
}
}
})
})
framework.KubeDescribe("Kubectl run default", func() {
var nsFlag string
var name string
var cleanUp func()
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
gte, err := framework.ServerVersionGTE(deploymentsVersion, c.Discovery())
if err != nil {
framework.Failf("Failed to get server version: %v", err)
}
if gte {
name = "e2e-test-nginx-deployment"
cleanUp = func() { framework.RunKubectlOrDie("delete", "deployment", name, nsFlag) }
} else {
name = "e2e-test-nginx-rc"
cleanUp = func() { framework.RunKubectlOrDie("delete", "rc", name, nsFlag) }
}
})
AfterEach(func() {
cleanUp()
})
It("should create an rc or deployment from an image [Conformance]", func() {
By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", name, "--image="+nginxImage, nsFlag)
By("verifying the pod controlled by " + name + " gets created")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": name}))
podlist, err := framework.WaitForPodsWithLabel(c, ns, label)
if err != nil {
framework.Failf("Failed getting pod controlled by %s: %v", name, err)
}
pods := podlist.Items
if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != nginxImage {
framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag)
framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods))
}
})
})
framework.KubeDescribe("Kubectl run rc", func() {
var nsFlag string
var rcName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
rcName = "e2e-test-nginx-rc"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "rc", rcName, nsFlag)
})
It("should create an rc from an image [Conformance]", func() {
By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag)
By("verifying the rc " + rcName + " was created")
rc, err := c.Core().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting rc %s: %v", rcName, err)
}
containers := rc.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
framework.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage)
}
By("verifying the pod controlled by rc " + rcName + " was created")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": rcName}))
podlist, err := framework.WaitForPodsWithLabel(c, ns, label)
if err != nil {
framework.Failf("Failed getting pod controlled by rc %s: %v", rcName, err)
}
pods := podlist.Items
if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != nginxImage {
framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag)
framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods))
}
By("confirm that you can get logs from an rc")
podNames := []string{}
for _, pod := range pods {
podNames = append(podNames, pod.Name)
}
if !framework.CheckPodsRunningReady(c, ns, podNames, framework.PodStartTimeout) {
framework.Failf("Pods for rc %s were not ready", rcName)
}
_, err = framework.RunKubectl("logs", "rc/"+rcName, nsFlag)
// a non-nil error is fine as long as we actually found a pod.
if err != nil && !strings.Contains(err.Error(), " in pod ") {
framework.Failf("Failed getting logs by rc %s: %v", rcName, err)
}
})
})
framework.KubeDescribe("Kubectl rolling-update", func() {
var nsFlag string
var rcName string
var c clientset.Interface
BeforeEach(func() {
c = f.ClientSet
nsFlag = fmt.Sprintf("--namespace=%v", ns)
rcName = "e2e-test-nginx-rc"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "rc", rcName, nsFlag)
})
It("should support rolling-update to same image [Conformance]", func() {
By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag)
By("verifying the rc " + rcName + " was created")
rc, err := c.Core().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting rc %s: %v", rcName, err)
}
containers := rc.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
framework.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage)
}
framework.WaitForRCToStabilize(c, ns, rcName, framework.PodStartTimeout)
By("rolling-update to same image controller")
runKubectlRetryOrDie("rolling-update", rcName, "--update-period=1s", "--image="+nginxImage, "--image-pull-policy="+string(v1.PullIfNotPresent), nsFlag)
framework.ValidateController(c, nginxImage, 1, rcName, "run="+rcName, noOpValidatorFn, ns)
})
})
framework.KubeDescribe("Kubectl run deployment", func() {
var nsFlag string
var dName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
dName = "e2e-test-nginx-deployment"
})
AfterEach(func() {
err := wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) {
out, err := framework.RunKubectl("delete", "deployment", dName, nsFlag)
if err != nil {
if strings.Contains(err.Error(), "could not find default credentials") {
err = nil
}
return false, fmt.Errorf("kubectl delete failed output: %s, err: %v", out, err)
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
})
It("should create a deployment from an image [Conformance]", func() {
framework.SkipUnlessServerVersionGTE(deploymentsVersion, c.Discovery())
By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", dName, "--image="+nginxImage, "--generator=deployment/v1beta1", nsFlag)
By("verifying the deployment " + dName + " was created")
d, err := c.Extensions().Deployments(ns).Get(dName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting deployment %s: %v", dName, err)
}
containers := d.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
framework.Failf("Failed creating deployment %s for 1 pod with expected image %s", dName, nginxImage)
}
By("verifying the pod controlled by deployment " + dName + " was created")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": dName}))
podlist, err := framework.WaitForPodsWithLabel(c, ns, label)
if err != nil {
framework.Failf("Failed getting pod controlled by deployment %s: %v", dName, err)
}
pods := podlist.Items
if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != nginxImage {
framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag)
framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods))
}
})
})
framework.KubeDescribe("Kubectl run job", func() {
var nsFlag string
var jobName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
jobName = "e2e-test-nginx-job"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "jobs", jobName, nsFlag)
})
It("should create a job from an image when restart is OnFailure [Conformance]", func() {
framework.SkipUnlessServerVersionGTE(jobsVersion, c.Discovery())
By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", jobName, "--restart=OnFailure", "--generator=job/v1", "--image="+nginxImage, nsFlag)
By("verifying the job " + jobName + " was created")
job, err := c.Batch().Jobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting job %s: %v", jobName, err)
}
containers := job.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
framework.Failf("Failed creating job %s for 1 pod with expected image %s: %#v", jobName, nginxImage, containers)
}
if job.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
framework.Failf("Failed creating a job with correct restart policy for --restart=OnFailure")
}
})
})
framework.KubeDescribe("Kubectl run pod", func() {
var nsFlag string
var podName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
podName = "e2e-test-nginx-pod"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "pods", podName, nsFlag)
})
It("should create a pod from an image when restart is Never [Conformance]", func() {
framework.SkipUnlessServerVersionGTE(jobsVersion, c.Discovery())
By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", podName, "--restart=Never", "--generator=run-pod/v1", "--image="+nginxImage, nsFlag)
By("verifying the pod " + podName + " was created")
pod, err := c.Core().Pods(ns).Get(podName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting pod %s: %v", podName, err)
}
containers := pod.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
framework.Failf("Failed creating pod %s with expected image %s", podName, nginxImage)
}
if pod.Spec.RestartPolicy != v1.RestartPolicyNever {
framework.Failf("Failed creating a pod with correct restart policy for --restart=Never")
}
})
})
framework.KubeDescribe("Kubectl replace", func() {
var nsFlag string
var podName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
podName = "e2e-test-nginx-pod"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "pods", podName, nsFlag)
})
It("should update a single-container pod's image [Conformance]", func() {
framework.SkipUnlessServerVersionGTE(jobsVersion, c.Discovery())
By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", podName, "--generator=run-pod/v1", "--image="+nginxImage, "--labels=run="+podName, nsFlag)
By("verifying the pod " + podName + " is running")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": podName}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
if err != nil {
framework.Failf("Failed getting pod %s: %v", podName, err)
}
By("verifying the pod " + podName + " was created")
podJson := framework.RunKubectlOrDie("get", "pod", podName, nsFlag, "-o", "json")
if !strings.Contains(podJson, podName) {
framework.Failf("Failed to find pod %s in [%s]", podName, podJson)
}
By("replace the image in the pod")
podJson = strings.Replace(podJson, nginxImage, busyboxImage, 1)
framework.RunKubectlOrDieInput(podJson, "replace", "-f", "-", nsFlag)
By("verifying the pod " + podName + " has the right image " + busyboxImage)
pod, err := c.Core().Pods(ns).Get(podName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting deployment %s: %v", podName, err)
}
containers := pod.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage {
framework.Failf("Failed creating pod with expected image %s", busyboxImage)
}
})
})
framework.KubeDescribe("Kubectl run --rm job", func() {
jobName := "e2e-test-rm-busybox-job"
It("should create a job from an image, then delete the job [Conformance]", func() {
nsFlag := fmt.Sprintf("--namespace=%v", ns)
// The rkt runtime doesn't support attach, see #23335
framework.SkipIfContainerRuntimeIs("rkt")
framework.SkipUnlessServerVersionGTE(jobsVersion, c.Discovery())
By("executing a command with run --rm and attach with stdin")
t := time.NewTimer(runJobTimeout)
defer t.Stop()
runOutput := framework.NewKubectlCommand(nsFlag, "run", jobName, "--image="+busyboxImage, "--rm=true", "--generator=job/v1", "--restart=OnFailure", "--attach=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234").
WithTimeout(t.C).
ExecOrDie()
Expect(runOutput).To(ContainSubstring("abcd1234"))
Expect(runOutput).To(ContainSubstring("stdin closed"))
By("verifying the job " + jobName + " was deleted")
_, err := c.Batch().Jobs(ns).Get(jobName, metav1.GetOptions{})
Expect(err).To(HaveOccurred())
Expect(apierrs.IsNotFound(err)).To(BeTrue())
})
})
framework.KubeDescribe("Proxy server", func() {
// TODO: test proxy options (static, prefix, etc)
It("should support proxy with --port 0 [Conformance]", func() {
By("starting the proxy server")
port, cmd, err := startProxyServer()
if cmd != nil {
defer framework.TryKill(cmd)
}
if err != nil {
framework.Failf("Failed to start proxy server: %v", err)
}
By("curling proxy /api/ output")
localAddr := fmt.Sprintf("http://localhost:%d/api/", port)
apiVersions, err := getAPIVersions(localAddr)
if err != nil {
framework.Failf("Expected at least one supported apiversion, got error %v", err)
}
if len(apiVersions.Versions) < 1 {
framework.Failf("Expected at least one supported apiversion, got %v", apiVersions)
}
})
It("should support --unix-socket=/path [Conformance]", func() {
By("Starting the proxy")
tmpdir, err := ioutil.TempDir("", "kubectl-proxy-unix")
if err != nil {
framework.Failf("Failed to create temporary directory: %v", err)
}
path := filepath.Join(tmpdir, "test")
defer os.Remove(path)
defer os.Remove(tmpdir)
cmd := framework.KubectlCmd("proxy", fmt.Sprintf("--unix-socket=%s", path))
stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
if err != nil {
framework.Failf("Failed to start kubectl command: %v", err)
}
defer stdout.Close()
defer stderr.Close()
defer framework.TryKill(cmd)
buf := make([]byte, 128)
if _, err = stdout.Read(buf); err != nil {
framework.Failf("Expected output from kubectl proxy: %v", err)
}
By("retrieving proxy /api/ output")
_, err = curlUnix("http://unused/api", path)
if err != nil {
framework.Failf("Failed get of /api at %s: %v", path, err)
}
})
})
// This test must run [Serial] because it modifies the node so it doesn't allow pods to execute on
// it, which will affect anything else running in parallel.
framework.KubeDescribe("Kubectl taint [Serial]", func() {
It("should update the taint on a node", func() {
testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-001-%s", string(uuid.NewUUID())),
Value: "testing-taint-value",
Effect: v1.TaintEffectNoSchedule,
}
nodeName := scheduling.GetNodeThatCanRunPod(f)
By("adding the taint " + testTaint.ToString() + " to a node")
runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.ToString())
defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, testTaint)
By("verifying the node has the taint " + testTaint.ToString())
output := runKubectlRetryOrDie("describe", "node", nodeName)
requiredStrings := [][]string{
{"Name:", nodeName},
{"Taints:"},
{testTaint.ToString()},
}
checkOutput(output, requiredStrings)
By("removing the taint " + testTaint.ToString() + " of a node")
runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.Key+":"+string(testTaint.Effect)+"-")
By("verifying the node doesn't have the taint " + testTaint.Key)
output = runKubectlRetryOrDie("describe", "node", nodeName)
if strings.Contains(output, testTaint.Key) {
framework.Failf("Failed removing taint " + testTaint.Key + " of the node " + nodeName)
}
})
It("should remove all the taints with the same key off a node", func() {
testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-002-%s", string(uuid.NewUUID())),
Value: "testing-taint-value",
Effect: v1.TaintEffectNoSchedule,
}
nodeName := scheduling.GetNodeThatCanRunPod(f)
By("adding the taint " + testTaint.ToString() + " to a node")
runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.ToString())
defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, testTaint)
By("verifying the node has the taint " + testTaint.ToString())
output := runKubectlRetryOrDie("describe", "node", nodeName)
requiredStrings := [][]string{
{"Name:", nodeName},
{"Taints:"},
{testTaint.ToString()},
}
checkOutput(output, requiredStrings)
newTestTaint := v1.Taint{
Key: testTaint.Key,
Value: "another-testing-taint-value",
Effect: v1.TaintEffectPreferNoSchedule,
}
By("adding another taint " + newTestTaint.ToString() + " to the node")
runKubectlRetryOrDie("taint", "nodes", nodeName, newTestTaint.ToString())
defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, newTestTaint)
By("verifying the node has the taint " + newTestTaint.ToString())
output = runKubectlRetryOrDie("describe", "node", nodeName)
requiredStrings = [][]string{
{"Name:", nodeName},
{"Taints:"},
{newTestTaint.ToString()},
}
checkOutput(output, requiredStrings)
noExecuteTaint := v1.Taint{
Key: testTaint.Key,
Value: "testing-taint-value-no-execute",
Effect: v1.TaintEffectNoExecute,
}
By("adding NoExecute taint " + noExecuteTaint.ToString() + " to the node")
runKubectlRetryOrDie("taint", "nodes", nodeName, noExecuteTaint.ToString())
defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, noExecuteTaint)
By("verifying the node has the taint " + noExecuteTaint.ToString())
output = runKubectlRetryOrDie("describe", "node", nodeName)
requiredStrings = [][]string{
{"Name:", nodeName},
{"Taints:"},
{noExecuteTaint.ToString()},
}
checkOutput(output, requiredStrings)
By("removing all taints that have the same key " + testTaint.Key + " of the node")
runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.Key+"-")
By("verifying the node doesn't have the taints that have the same key " + testTaint.Key)
output = runKubectlRetryOrDie("describe", "node", nodeName)
if strings.Contains(output, testTaint.Key) {
framework.Failf("Failed removing taints " + testTaint.Key + " of the node " + nodeName)
}
})
})
framework.KubeDescribe("Kubectl create quota", func() {
It("should create a quota without scopes", func() {
framework.SkipUnlessKubectlVersionGTE(kubectlCreateQuotaVersion)
nsFlag := fmt.Sprintf("--namespace=%v", ns)
quotaName := "million"
By("calling kubectl quota")
framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000,services=1000000", nsFlag)
By("verifying that the quota was created")
quota, err := c.Core().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting quota %s: %v", quotaName, err)
}
if len(quota.Spec.Scopes) != 0 {
framework.Failf("Expected empty scopes, got %v", quota.Spec.Scopes)
}
if len(quota.Spec.Hard) != 2 {
framework.Failf("Expected two resources, got %v", quota.Spec.Hard)
}
r, found := quota.Spec.Hard[v1.ResourcePods]
if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
framework.Failf("Expected pods=1000000, got %v", r)
}
r, found = quota.Spec.Hard[v1.ResourceServices]
if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
framework.Failf("Expected services=1000000, got %v", r)
}
})
It("should create a quota with scopes", func() {
framework.SkipUnlessKubectlVersionGTE(kubectlCreateQuotaVersion)
nsFlag := fmt.Sprintf("--namespace=%v", ns)
quotaName := "scopes"
By("calling kubectl quota")
framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating", nsFlag)
By("verifying that the quota was created")
quota, err := c.Core().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting quota %s: %v", quotaName, err)
}
if len(quota.Spec.Scopes) != 2 {
framework.Failf("Expected two scopes, got %v", quota.Spec.Scopes)
}
scopes := make(map[v1.ResourceQuotaScope]struct{})
for _, scope := range quota.Spec.Scopes {
scopes[scope] = struct{}{}
}
if _, found := scopes[v1.ResourceQuotaScopeBestEffort]; !found {
framework.Failf("Expected BestEffort scope, got %v", quota.Spec.Scopes)
}
if _, found := scopes[v1.ResourceQuotaScopeNotTerminating]; !found {
framework.Failf("Expected NotTerminating scope, got %v", quota.Spec.Scopes)
}
})
It("should reject quota with invalid scopes", func() {
framework.SkipUnlessKubectlVersionGTE(kubectlCreateQuotaVersion)
nsFlag := fmt.Sprintf("--namespace=%v", ns)
quotaName := "scopes"
By("calling kubectl quota")
out, err := framework.RunKubectl("create", "quota", quotaName, "--hard=hard=pods=1000000", "--scopes=Foo", nsFlag)
if err == nil {
framework.Failf("Expected kubectl to fail, but it succeeded: %s", out)
}
})
})
})
// Checks whether the output split by line contains the required elements.
func checkOutputReturnError(output string, required [][]string) error {
outputLines := strings.Split(output, "\n")
currentLine := 0
for _, requirement := range required {
for currentLine < len(outputLines) && !strings.Contains(outputLines[currentLine], requirement[0]) {
currentLine++
}
if currentLine == len(outputLines) {
return fmt.Errorf("failed to find %s in %s", requirement[0], output)
}
for _, item := range requirement[1:] {
if !strings.Contains(outputLines[currentLine], item) {
return fmt.Errorf("failed to find %s in %s", item, outputLines[currentLine])
}
}
}
return nil
}
func checkOutput(output string, required [][]string) {
err := checkOutputReturnError(output, required)
if err != nil {
framework.Failf("%v", err)
}
}
func checkKubectlOutputWithRetry(required [][]string, args ...string) {
var pollErr error
wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
output := framework.RunKubectlOrDie(args...)
err := checkOutputReturnError(output, required)
if err != nil {
pollErr = err
return false, nil
}
pollErr = nil
return true, nil
})
if pollErr != nil {
framework.Failf("%v", pollErr)
}
return
}
func getAPIVersions(apiEndpoint string) (*metav1.APIVersions, error) {
body, err := curl(apiEndpoint)
if err != nil {
return nil, fmt.Errorf("Failed http.Get of %s: %v", apiEndpoint, err)
}
var apiVersions metav1.APIVersions
if err := json.Unmarshal([]byte(body), &apiVersions); err != nil {
return nil, fmt.Errorf("Failed to parse /api output %s: %v", body, err)
}
return &apiVersions, nil
}
func startProxyServer() (int, *exec.Cmd, error) {
// Specifying port 0 indicates we want the os to pick a random port.
cmd := framework.KubectlCmd("proxy", "-p", "0")
stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
if err != nil {
return -1, nil, err
}
defer stdout.Close()
defer stderr.Close()
buf := make([]byte, 128)
var n int
if n, err = stdout.Read(buf); err != nil {
return -1, cmd, fmt.Errorf("Failed to read from kubectl proxy stdout: %v", err)
}
output := string(buf[:n])
match := proxyRegexp.FindStringSubmatch(output)
if len(match) == 2 {
if port, err := strconv.Atoi(match[1]); err == nil {
return port, cmd, nil
}
}
return -1, cmd, fmt.Errorf("Failed to parse port from proxy stdout: %s", output)
}
func curlUnix(url string, path string) (string, error) {
dial := func(proto, addr string) (net.Conn, error) {
return net.Dial("unix", path)
}
transport := utilnet.SetTransportDefaults(&http.Transport{
Dial: dial,
})
return curlTransport(url, transport)
}
func curlTransport(url string, transport *http.Transport) (string, error) {
client := &http.Client{Transport: transport}
resp, err := client.Get(url)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(body[:]), nil
}
func curl(url string) (string, error) {
return curlTransport(url, utilnet.SetTransportDefaults(&http.Transport{}))
}
func validateGuestbookApp(c clientset.Interface, ns string) {
framework.Logf("Waiting for all frontend pods to be Running.")
label := labels.SelectorFromSet(labels.Set(map[string]string{"tier": "frontend", "app": "guestbook"}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for frontend to serve content.")
if !waitForGuestbookResponse(c, "get", "", `{"data": ""}`, guestbookStartupTimeout, ns) {
framework.Failf("Frontend service did not start serving content in %v seconds.", guestbookStartupTimeout.Seconds())
}
framework.Logf("Trying to add a new entry to the guestbook.")
if !waitForGuestbookResponse(c, "set", "TestEntry", `{"message": "Updated"}`, guestbookResponseTimeout, ns) {
framework.Failf("Cannot added new entry in %v seconds.", guestbookResponseTimeout.Seconds())
}
framework.Logf("Verifying that added entry can be retrieved.")
if !waitForGuestbookResponse(c, "get", "", `{"data": "TestEntry"}`, guestbookResponseTimeout, ns) {
framework.Failf("Entry to guestbook wasn't correctly added in %v seconds.", guestbookResponseTimeout.Seconds())
}
}
// Returns whether received expected response from guestbook on time.
func waitForGuestbookResponse(c clientset.Interface, cmd, arg, expectedResponse string, timeout time.Duration, ns string) bool {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
res, err := makeRequestToGuestbook(c, cmd, arg, ns)
if err == nil && res == expectedResponse {
return true
}
framework.Logf("Failed to get response from guestbook. err: %v, response: %s", err, res)
}
return false
}
func makeRequestToGuestbook(c clientset.Interface, cmd, value string, ns string) (string, error) {
proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Core().RESTClient().Get())
if errProxy != nil {
return "", errProxy
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
result, err := proxyRequest.Namespace(ns).
Context(ctx).
Name("frontend").
Suffix("/guestbook.php").
Param("cmd", cmd).
Param("key", "messages").
Param("value", value).
Do().
Raw()
return string(result), err
}
type updateDemoData struct {
Image string
}
const applyTestLabel = "kubectl.kubernetes.io/apply-test"
func readBytesFromFile(filename string) []byte {
file, err := os.Open(filename)
if err != nil {
framework.Failf(err.Error())
}
defer file.Close()
data, err := ioutil.ReadAll(file)
if err != nil {
framework.Failf(err.Error())
}
return data
}
func readReplicationControllerFromString(contents string) *v1.ReplicationController {
rc := v1.ReplicationController{}
if err := yaml.Unmarshal([]byte(contents), &rc); err != nil {
framework.Failf(err.Error())
}
return &rc
}
func modifyReplicationControllerConfiguration(contents string) io.Reader {
rc := readReplicationControllerFromString(contents)
rc.Labels[applyTestLabel] = "ADDED"
rc.Spec.Selector[applyTestLabel] = "ADDED"
rc.Spec.Template.Labels[applyTestLabel] = "ADDED"
data, err := json.Marshal(rc)
if err != nil {
framework.Failf("json marshal failed: %s\n", err)
}
return bytes.NewReader(data)
}
func forEachReplicationController(c clientset.Interface, ns, selectorKey, selectorValue string, fn func(v1.ReplicationController)) {
var rcs *v1.ReplicationControllerList
var err error
for t := time.Now(); time.Since(t) < framework.PodListTimeout; time.Sleep(framework.Poll) {
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
options := metav1.ListOptions{LabelSelector: label.String()}
rcs, err = c.Core().ReplicationControllers(ns).List(options)
Expect(err).NotTo(HaveOccurred())
if len(rcs.Items) > 0 {
break
}
}
if rcs == nil || len(rcs.Items) == 0 {
framework.Failf("No replication controllers found")
}
for _, rc := range rcs.Items {
fn(rc)
}
}
func validateReplicationControllerConfiguration(rc v1.ReplicationController) {
if rc.Name == "redis-master" {
if _, ok := rc.Annotations[v1.LastAppliedConfigAnnotation]; !ok {
framework.Failf("Annotation not found in modified configuration:\n%v\n", rc)
}
if value, ok := rc.Labels[applyTestLabel]; !ok || value != "ADDED" {
framework.Failf("Added label %s not found in modified configuration:\n%v\n", applyTestLabel, rc)
}
}
}
// getUDData creates a validator function based on the input string (i.e. kitten.jpg).
// For example, if you send "kitten.jpg", this function verifies that the image jpg = kitten.jpg
// in the container's json field.
func getUDData(jpgExpected string, ns string) func(clientset.Interface, string) error {
// getUDData validates data.json in the update-demo (returns nil if data is ok).
return func(c clientset.Interface, podID string) error {
framework.Logf("validating pod %s", podID)
subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, c.Discovery())
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
var body []byte
if subResourceProxyAvailable {
body, err = c.Core().RESTClient().Get().
Namespace(ns).
Resource("pods").
SubResource("proxy").
Name(podID).
Suffix("data.json").
Do().
Raw()
} else {
body, err = c.Core().RESTClient().Get().
Prefix("proxy").
Namespace(ns).
Resource("pods").
Name(podID).
Suffix("data.json").
Do().
Raw()
}
if err != nil {
if ctx.Err() != nil {
framework.Failf("Failed to retrieve data from container: %v", err)
}
return err
}
framework.Logf("got data: %s", body)
var data updateDemoData
if err := json.Unmarshal(body, &data); err != nil {
return err
}
framework.Logf("Unmarshalled json jpg/img => %s , expecting %s .", data, jpgExpected)
if strings.Contains(data.Image, jpgExpected) {
return nil
} else {
return fmt.Errorf("data served up in container is inaccurate, %s didn't contain %s", data, jpgExpected)
}
}
}
func noOpValidatorFn(c clientset.Interface, podID string) error { return nil }
// newBlockingReader returns a reader that allows reading the given string,
// then blocks until Close() is called on the returned closer.
//
// We're explicitly returning the reader and closer separately, because
// the closer needs to be the *os.File we get from os.Pipe(). This is required
// so the exec of kubectl can pass the underlying file descriptor to the exec
// syscall, instead of creating another os.Pipe and blocking on the io.Copy
// between the source (e.g. stdin) and the write half of the pipe.
func newBlockingReader(s string) (io.Reader, io.Closer, error) {
r, w, err := os.Pipe()
if err != nil {
return nil, nil, err
}
w.Write([]byte(s))
return r, w, nil
}
// newStreamingUpload creates a new http.Request that will stream POST
// a file to a URI.
func newStreamingUpload(filePath string) (*io.PipeReader, *multipart.Writer, error) {
file, err := os.Open(filePath)
if err != nil {
return nil, nil, err
}
defer file.Close()
r, w := io.Pipe()
postBodyWriter := multipart.NewWriter(w)
go streamingUpload(file, filepath.Base(filePath), postBodyWriter, w)
return r, postBodyWriter, err
}
// streamingUpload streams a file via a pipe through a multipart.Writer.
// Generally one should use newStreamingUpload instead of calling this directly.
func streamingUpload(file *os.File, fileName string, postBodyWriter *multipart.Writer, w *io.PipeWriter) {
defer GinkgoRecover()
defer file.Close()
defer w.Close()
// Set up the form file
fileWriter, err := postBodyWriter.CreateFormFile("file", fileName)
if err != nil {
framework.Failf("Unable to to write file at %s to buffer. Error: %s", fileName, err)
}
// Copy kubectl binary into the file writer
if _, err := io.Copy(fileWriter, file); err != nil {
framework.Failf("Unable to to copy file at %s into the file writer. Error: %s", fileName, err)
}
// Nothing more should be written to this instance of the postBodyWriter
if err := postBodyWriter.Close(); err != nil {
framework.Failf("Unable to close the writer for file upload. Error: %s", err)
}
}
func startLocalProxy() (srv *httptest.Server, logs *bytes.Buffer) {
logs = &bytes.Buffer{}
p := goproxy.NewProxyHttpServer()
p.Verbose = true
p.Logger = log.New(logs, "", 0)
return httptest.NewServer(p), logs
}
Correcting two spelling mistakes Reustable->Reusable adversly->adversely
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// OWNER = sig/cli
package kubectl
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"mime/multipart"
"net"
"net/http"
"net/http/httptest"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"time"
"github.com/elazarl/goproxy"
"github.com/ghodss/yaml"
"k8s.io/api/core/v1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/kubectl/cmd/util"
uexec "k8s.io/kubernetes/pkg/util/exec"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/generated"
"k8s.io/kubernetes/test/e2e/scheduling"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
nautilusImage = "gcr.io/google_containers/update-demo:nautilus"
kittenImage = "gcr.io/google_containers/update-demo:kitten"
updateDemoSelector = "name=update-demo"
updateDemoContainer = "update-demo"
frontendSelector = "app=guestbook,tier=frontend"
redisMasterSelector = "app=redis,role=master"
redisSlaveSelector = "app=redis,role=slave"
goproxyContainer = "goproxy"
goproxyPodSelector = "name=goproxy"
netexecContainer = "netexec"
netexecPodSelector = "name=netexec"
kubectlProxyPort = 8011
guestbookStartupTimeout = 10 * time.Minute
guestbookResponseTimeout = 3 * time.Minute
simplePodSelector = "name=nginx"
simplePodName = "nginx"
nginxDefaultOutput = "Welcome to nginx!"
simplePodPort = 80
pausePodSelector = "name=pause"
pausePodName = "pause"
runJobTimeout = 5 * time.Minute
busyboxImage = "gcr.io/google_containers/busybox:1.24"
nginxImage = "gcr.io/google_containers/nginx-slim:0.7"
newNginxImage = "gcr.io/google_containers/nginx-slim:0.8"
kubeCtlManifestPath = "test/e2e/testing-manifests/kubectl"
redisControllerFilename = "redis-master-controller.json"
redisServiceFilename = "redis-master-service.json"
nginxDeployment1Filename = "nginx-deployment1.yaml"
nginxDeployment2Filename = "nginx-deployment2.yaml"
nginxDeployment3Filename = "nginx-deployment3.yaml"
redisImage = "gcr.io/k8s-testimages/redis:e2e"
)
var (
proxyRegexp = regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)")
// Extended pod logging options were introduced in #13780 (v1.1.0) so we don't expect tests
// that rely on extended pod logging options to work on clusters before that.
//
// TODO(ihmccreery): remove once we don't care about v1.0 anymore, (tentatively in v1.3).
extendedPodLogFilterVersion = utilversion.MustParseSemantic("v1.1.0")
// NodePorts were made optional in #12831 (v1.1.0) so we don't expect tests that used to
// require NodePorts but no longer include them to work on clusters before that.
//
// TODO(ihmccreery): remove once we don't care about v1.0 anymore, (tentatively in v1.3).
nodePortsOptionalVersion = utilversion.MustParseSemantic("v1.1.0")
// Jobs were introduced in v1.1, so we don't expect tests that rely on jobs to work on
// clusters before that.
//
// TODO(ihmccreery): remove once we don't care about v1.0 anymore, (tentatively in v1.3).
jobsVersion = utilversion.MustParseSemantic("v1.1.0")
// Deployments were introduced by default in v1.2, so we don't expect tests that rely on
// deployments to work on clusters before that.
//
// TODO(ihmccreery): remove once we don't care about v1.1 anymore, (tentatively in v1.4).
deploymentsVersion = utilversion.MustParseSemantic("v1.2.0-alpha.7.726")
// Pod probe parameters were introduced in #15967 (v1.2) so we don't expect tests that use
// these probe parameters to work on clusters before that.
//
// TODO(ihmccreery): remove once we don't care about v1.1 anymore, (tentatively in v1.4).
podProbeParametersVersion = utilversion.MustParseSemantic("v1.2.0-alpha.4")
// 'kubectl create quota' was introduced in #28351 (v1.4) so we don't expect tests that use
// 'kubectl create quota' to work on kubectl clients before that.
kubectlCreateQuotaVersion = utilversion.MustParseSemantic("v1.4.0-alpha.2")
// Returning container command exit codes in kubectl run/exec was introduced in #26541 (v1.4)
// so we don't expect tests that verifies return code to work on kubectl clients before that.
kubectlContainerExitCodeVersion = utilversion.MustParseSemantic("v1.4.0-alpha.3")
CronJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "cronjobs"}
ScheduledJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "scheduledjobs"}
)
// Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
// Aware of the kubectl example files map.
func cleanupKubectlInputs(fileContents string, ns string, selectors ...string) {
By("using delete to clean up resources")
var nsArg string
if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns)
}
// support backward compatibility : file paths or raw json - since we are removing file path
// dependencies from this test.
framework.RunKubectlOrDieInput(fileContents, "delete", "--grace-period=0", "--force", "-f", "-", nsArg)
framework.AssertCleanup(ns, selectors...)
}
func readTestFileOrDie(file string) []byte {
return generated.ReadOrDie(path.Join(kubeCtlManifestPath, file))
}
func runKubectlRetryOrDie(args ...string) string {
var err error
var output string
for i := 0; i < 5; i++ {
output, err = framework.RunKubectl(args...)
if err == nil || (!strings.Contains(err.Error(), genericregistry.OptimisticLockErrorMsg) && !strings.Contains(err.Error(), "Operation cannot be fulfilled")) {
break
}
time.Sleep(time.Second)
}
// Expect no errors to be present after retries are finished
// Copied from framework #ExecOrDie
framework.Logf("stdout: %q", output)
Expect(err).NotTo(HaveOccurred())
return output
}
// duplicated setup to avoid polluting "normal" clients with alpha features which confuses the generated clients
var _ = kubectlDescribe("Kubectl alpha client", func() {
defer GinkgoRecover()
f := framework.NewDefaultFramework("kubectl")
var c clientset.Interface
var ns string
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
})
// Customized Wait / ForEach wrapper for this test. These demonstrate the
framework.KubeDescribe("Kubectl run ScheduledJob", func() {
var nsFlag string
var sjName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
sjName = "e2e-test-echo-scheduledjob"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "cronjobs", sjName, nsFlag)
})
It("should create a ScheduledJob", func() {
framework.SkipIfMissingResource(f.ClientPool, ScheduledJobGroupVersionResource, f.Namespace.Name)
schedule := "*/5 * * * ?"
framework.RunKubectlOrDie("run", sjName, "--restart=OnFailure", "--generator=scheduledjob/v2alpha1",
"--schedule="+schedule, "--image="+busyboxImage, nsFlag)
By("verifying the ScheduledJob " + sjName + " was created")
sj, err := c.BatchV2alpha1().CronJobs(ns).Get(sjName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting ScheduledJob %s: %v", sjName, err)
}
if sj.Spec.Schedule != schedule {
framework.Failf("Failed creating a ScheduledJob with correct schedule %s, but got %s", schedule, sj.Spec.Schedule)
}
containers := sj.Spec.JobTemplate.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage {
framework.Failf("Failed creating ScheduledJob %s for 1 pod with expected image %s: %#v", sjName, busyboxImage, containers)
}
restartPolicy := sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy
if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
framework.Failf("Failed creating a ScheduledJob with correct restart policy %s, but got %s", v1.RestartPolicyOnFailure, restartPolicy)
}
})
})
framework.KubeDescribe("Kubectl run CronJob", func() {
var nsFlag string
var cjName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
cjName = "e2e-test-echo-cronjob"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "cronjobs", cjName, nsFlag)
})
It("should create a CronJob", func() {
framework.SkipIfMissingResource(f.ClientPool, CronJobGroupVersionResource, f.Namespace.Name)
schedule := "*/5 * * * ?"
framework.RunKubectlOrDie("run", cjName, "--restart=OnFailure", "--generator=cronjob/v2alpha1",
"--schedule="+schedule, "--image="+busyboxImage, nsFlag)
By("verifying the CronJob " + cjName + " was created")
sj, err := c.BatchV2alpha1().CronJobs(ns).Get(cjName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting CronJob %s: %v", cjName, err)
}
if sj.Spec.Schedule != schedule {
framework.Failf("Failed creating a CronJob with correct schedule %s", schedule)
}
containers := sj.Spec.JobTemplate.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage {
framework.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers)
}
if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
framework.Failf("Failed creating a CronJob with correct restart policy for --restart=OnFailure")
}
})
})
})
var _ = kubectlDescribe("Kubectl client", func() {
defer GinkgoRecover()
f := framework.NewDefaultFramework("kubectl")
// Reusable cluster state function. This won't be adversely affected by lazy initialization of framework.
clusterState := func() *framework.ClusterVerification {
return f.NewClusterVerification(
f.Namespace,
framework.PodStateVerification{
Selectors: map[string]string{"app": "redis"},
ValidPhases: []v1.PodPhase{v1.PodRunning /*v1.PodPending*/},
})
}
forEachPod := func(podFunc func(p v1.Pod)) {
clusterState().ForEach(podFunc)
}
var c clientset.Interface
var ns string
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
})
// Customized Wait / ForEach wrapper for this test. These demonstrate the
// idiomatic way to wrap the ClusterVerification structs for syntactic sugar in large
// test files.
// Print debug info if atLeast Pods are not found before the timeout
waitForOrFailWithDebug := func(atLeast int) {
pods, err := clusterState().WaitFor(atLeast, framework.PodStartTimeout)
if err != nil || len(pods) < atLeast {
// TODO: Generalize integrating debug info into these tests so we always get debug info when we need it
framework.DumpAllNamespaceInfo(f.ClientSet, ns)
framework.Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err)
}
}
framework.KubeDescribe("Update Demo", func() {
var nautilus, kitten []byte
BeforeEach(func() {
updateDemoRoot := "test/fixtures/doc-yaml/user-guide/update-demo"
nautilus = generated.ReadOrDie(filepath.Join(updateDemoRoot, "nautilus-rc.yaml"))
kitten = generated.ReadOrDie(filepath.Join(updateDemoRoot, "kitten-rc.yaml"))
})
It("should create and stop a replication controller [Conformance]", func() {
defer cleanupKubectlInputs(string(nautilus), ns, updateDemoSelector)
By("creating a replication controller")
framework.RunKubectlOrDieInput(string(nautilus[:]), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
})
It("should scale a replication controller [Conformance]", func() {
defer cleanupKubectlInputs(string(nautilus[:]), ns, updateDemoSelector)
By("creating a replication controller")
framework.RunKubectlOrDieInput(string(nautilus[:]), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
By("scaling down the replication controller")
framework.RunKubectlOrDie("scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
By("scaling up the replication controller")
framework.RunKubectlOrDie("scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
})
It("should do a rolling update of a replication controller [Conformance]", func() {
By("creating the initial replication controller")
framework.RunKubectlOrDieInput(string(nautilus[:]), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
By("rolling-update to new replication controller")
framework.RunKubectlOrDieInput(string(kitten[:]), "rolling-update", "update-demo-nautilus", "--update-period=1s", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, kittenImage, 2, "update-demo", updateDemoSelector, getUDData("kitten.jpg", ns), ns)
// Everything will hopefully be cleaned up when the namespace is deleted.
})
})
framework.KubeDescribe("Guestbook application", func() {
forEachGBFile := func(run func(s string)) {
for _, gbAppFile := range []string{
"examples/guestbook/frontend-deployment.yaml",
"examples/guestbook/frontend-service.yaml",
"examples/guestbook/redis-master-deployment.yaml",
"examples/guestbook/redis-master-service.yaml",
"examples/guestbook/redis-slave-deployment.yaml",
"examples/guestbook/redis-slave-service.yaml",
} {
contents := generated.ReadOrDie(gbAppFile)
run(string(contents))
}
}
It("should create and stop a working application [Conformance]", func() {
framework.SkipUnlessServerVersionGTE(deploymentsVersion, c.Discovery())
defer forEachGBFile(func(contents string) {
cleanupKubectlInputs(contents, ns)
})
By("creating all guestbook components")
forEachGBFile(func(contents string) {
framework.Logf(contents)
framework.RunKubectlOrDieInput(contents, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
})
By("validating guestbook app")
validateGuestbookApp(c, ns)
})
})
framework.KubeDescribe("Simple pod", func() {
var podPath []byte
BeforeEach(func() {
podPath = generated.ReadOrDie(path.Join(kubeCtlManifestPath, "pod-with-readiness-probe.yaml"))
By(fmt.Sprintf("creating the pod from %v", string(podPath)))
framework.RunKubectlOrDieInput(string(podPath[:]), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
Expect(framework.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout)).To(BeTrue())
})
AfterEach(func() {
cleanupKubectlInputs(string(podPath[:]), ns, simplePodSelector)
})
It("should support exec", func() {
By("executing a command in the container")
execOutput := framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodName, "echo", "running", "in", "container")
if e, a := "running in container", strings.TrimSpace(execOutput); e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
}
By("executing a very long command in the container")
veryLongData := make([]rune, 20000)
for i := 0; i < len(veryLongData); i++ {
veryLongData[i] = 'a'
}
execOutput = framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodName, "echo", string(veryLongData))
Expect(string(veryLongData)).To(Equal(strings.TrimSpace(execOutput)), "Unexpected kubectl exec output")
By("executing a command in the container with noninteractive stdin")
execOutput = framework.NewKubectlCommand("exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "cat").
WithStdinData("abcd1234").
ExecOrDie()
if e, a := "abcd1234", execOutput; e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
}
// pretend that we're a user in an interactive shell
r, closer, err := newBlockingReader("echo hi\nexit\n")
if err != nil {
framework.Failf("Error creating blocking reader: %v", err)
}
// NOTE this is solely for test cleanup!
defer closer.Close()
By("executing a command in the container with pseudo-interactive stdin")
execOutput = framework.NewKubectlCommand("exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "bash").
WithStdinReader(r).
ExecOrDie()
if e, a := "hi", strings.TrimSpace(execOutput); e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
}
})
It("should support exec through an HTTP proxy", func() {
// Note: We are skipping local since we want to verify an apiserver with HTTPS.
// At this time local only supports plain HTTP.
framework.SkipIfProviderIs("local")
// Fail if the variable isn't set
if framework.TestContext.Host == "" {
framework.Failf("--host variable must be set to the full URI to the api server on e2e run.")
}
By("Starting goproxy")
testSrv, proxyLogs := startLocalProxy()
defer testSrv.Close()
proxyAddr := testSrv.URL
for _, proxyVar := range []string{"https_proxy", "HTTPS_PROXY"} {
proxyLogs.Reset()
By("Running kubectl via an HTTP proxy using " + proxyVar)
output := framework.NewKubectlCommand(fmt.Sprintf("--namespace=%s", ns), "exec", "nginx", "echo", "running", "in", "container").
WithEnv(append(os.Environ(), fmt.Sprintf("%s=%s", proxyVar, proxyAddr))).
ExecOrDie()
// Verify we got the normal output captured by the exec server
expectedExecOutput := "running in container\n"
if output != expectedExecOutput {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output)
}
// Verify the proxy server logs saw the connection
expectedProxyLog := fmt.Sprintf("Accepting CONNECT to %s", strings.TrimRight(strings.TrimLeft(framework.TestContext.Host, "https://"), "/api"))
proxyLog := proxyLogs.String()
if !strings.Contains(proxyLog, expectedProxyLog) {
framework.Failf("Missing expected log result on proxy server for %s. Expected: %q, got %q", proxyVar, expectedProxyLog, proxyLog)
}
}
})
It("should return command exit codes", func() {
framework.SkipUnlessKubectlVersionGTE(kubectlContainerExitCodeVersion)
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("execing into a container with a successful command")
_, err := framework.NewKubectlCommand(nsFlag, "exec", "nginx", "--", "/bin/sh", "-c", "exit 0").Exec()
framework.ExpectNoError(err)
By("execing into a container with a failing command")
_, err = framework.NewKubectlCommand(nsFlag, "exec", "nginx", "--", "/bin/sh", "-c", "exit 42").Exec()
ee, ok := err.(uexec.ExitError)
Expect(ok).To(Equal(true))
Expect(ee.ExitStatus()).To(Equal(42))
By("running a successful command")
_, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "success", "--", "/bin/sh", "-c", "exit 0").Exec()
framework.ExpectNoError(err)
By("running a failing command")
_, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec()
ee, ok = err.(uexec.ExitError)
Expect(ok).To(Equal(true))
Expect(ee.ExitStatus()).To(Equal(42))
By("running a failing command without --restart=Never")
_, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "failure-2", "--", "/bin/sh", "-c", "cat && exit 42").
WithStdinData("abcd1234").
Exec()
framework.ExpectNoError(err)
By("running a failing command without --restart=Never, but with --rm")
_, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "--rm", "failure-3", "--", "/bin/sh", "-c", "cat && exit 42").
WithStdinData("abcd1234").
Exec()
framework.ExpectNoError(err)
framework.WaitForPodToDisappear(f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout)
By("running a failing command with --leave-stdin-open")
_, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42").
WithStdinData("abcd1234").
Exec()
framework.ExpectNoError(err)
})
It("should support inline execution and attach", func() {
framework.SkipIfContainerRuntimeIs("rkt") // #23335
framework.SkipUnlessServerVersionGTE(jobsVersion, c.Discovery())
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("executing a command with run and attach with stdin")
runOutput := framework.NewKubectlCommand(nsFlag, "run", "run-test", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234").
ExecOrDie()
Expect(runOutput).To(ContainSubstring("abcd1234"))
Expect(runOutput).To(ContainSubstring("stdin closed"))
Expect(c.Batch().Jobs(ns).Delete("run-test", nil)).To(BeNil())
By("executing a command with run and attach without stdin")
runOutput = framework.NewKubectlCommand(fmt.Sprintf("--namespace=%v", ns), "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234").
ExecOrDie()
Expect(runOutput).ToNot(ContainSubstring("abcd1234"))
Expect(runOutput).To(ContainSubstring("stdin closed"))
Expect(c.Batch().Jobs(ns).Delete("run-test-2", nil)).To(BeNil())
By("executing a command with run and attach with stdin with open stdin should remain running")
runOutput = framework.NewKubectlCommand(nsFlag, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234\n").
ExecOrDie()
Expect(runOutput).ToNot(ContainSubstring("stdin closed"))
g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
runTestPod, _, err := util.GetFirstPod(f.InternalClientset.Core(), ns, labels.SelectorFromSet(map[string]string{"run": "run-test-3"}), 1*time.Minute, g)
if err != nil {
os.Exit(1)
}
if !framework.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, time.Minute) {
framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3")
}
// NOTE: we cannot guarantee our output showed up in the container logs before stdin was closed, so we have
// to loop test.
err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
if !framework.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, 1*time.Second) {
framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3")
}
logOutput := framework.RunKubectlOrDie(nsFlag, "logs", runTestPod.Name)
Expect(logOutput).ToNot(ContainSubstring("stdin closed"))
return strings.Contains(logOutput, "abcd1234"), nil
})
if err != nil {
os.Exit(1)
}
Expect(err).To(BeNil())
Expect(c.Batch().Jobs(ns).Delete("run-test-3", nil)).To(BeNil())
})
It("should support port-forward", func() {
By("forwarding the container port to a local port")
cmd := runPortForward(ns, simplePodName, simplePodPort)
defer cmd.Stop()
By("curling local port output")
localAddr := fmt.Sprintf("http://localhost:%d", cmd.port)
body, err := curl(localAddr)
framework.Logf("got: %s", body)
if err != nil {
framework.Failf("Failed http.Get of forwarded port (%s): %v", localAddr, err)
}
if !strings.Contains(body, nginxDefaultOutput) {
framework.Failf("Container port output missing expected value. Wanted:'%s', got: %s", nginxDefaultOutput, body)
}
})
It("should handle in-cluster config", func() {
By("adding rbac permissions")
// grant the view permission widely to allow inspection of the `invalid` namespace and the default namespace
framework.BindClusterRole(f.ClientSet.Rbac(), "view", f.Namespace.Name,
rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
err := framework.WaitForAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(),
serviceaccount.MakeUsername(f.Namespace.Name, "default"),
f.Namespace.Name, "list", schema.GroupResource{Resource: "pods"}, true)
framework.ExpectNoError(err)
By("overriding icc with values provided by flags")
kubectlPath := framework.TestContext.KubectlPath
// we need the actual kubectl binary, not the script wrapper
kubectlPathNormalizer := exec.Command("which", kubectlPath)
if strings.HasSuffix(kubectlPath, "kubectl.sh") {
kubectlPathNormalizer = exec.Command(kubectlPath, "path")
}
kubectlPathNormalized, err := kubectlPathNormalizer.Output()
framework.ExpectNoError(err)
kubectlPath = strings.TrimSpace(string(kubectlPathNormalized))
inClusterHost := strings.TrimSpace(framework.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_HOST"))
inClusterPort := strings.TrimSpace(framework.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_PORT"))
framework.Logf("copying %s to the %s pod", kubectlPath, simplePodName)
framework.RunKubectlOrDie("cp", kubectlPath, ns+"/"+simplePodName+":/tmp/")
// Build a kubeconfig file that will make use of the injected ca and token,
// but point at the DNS host and the default namespace
tmpDir, err := ioutil.TempDir("", "icc-override")
overrideKubeconfigName := "icc-override.kubeconfig"
framework.ExpectNoError(err)
defer func() { os.Remove(tmpDir) }()
framework.ExpectNoError(ioutil.WriteFile(filepath.Join(tmpDir, overrideKubeconfigName), []byte(`
kind: Config
apiVersion: v1
clusters:
- cluster:
api-version: v1
server: https://kubernetes.default.svc:443
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
name: kubeconfig-cluster
contexts:
- context:
cluster: kubeconfig-cluster
namespace: default
user: kubeconfig-user
name: kubeconfig-context
current-context: kubeconfig-context
users:
- name: kubeconfig-user
user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
`), os.FileMode(0755)))
framework.Logf("copying override kubeconfig to the %s pod", simplePodName)
framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, overrideKubeconfigName), ns+"/"+simplePodName+":/tmp/")
framework.ExpectNoError(ioutil.WriteFile(filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), []byte(`
kind: ConfigMap
apiVersion: v1
metadata:
name: "configmap with namespace and invalid name"
namespace: configmap-namespace
`), os.FileMode(0755)))
framework.ExpectNoError(ioutil.WriteFile(filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), []byte(`
kind: ConfigMap
apiVersion: v1
metadata:
name: "configmap without namespace and invalid name"
`), os.FileMode(0755)))
framework.Logf("copying configmap manifests to the %s pod", simplePodName)
framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), ns+"/"+simplePodName+":/tmp/")
framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), ns+"/"+simplePodName+":/tmp/")
By("getting pods with in-cluster configs")
execOutput := framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --v=7 2>&1")
Expect(execOutput).To(MatchRegexp("nginx +1/1 +Running"))
Expect(execOutput).To(ContainSubstring("Using in-cluster namespace"))
Expect(execOutput).To(ContainSubstring("Using in-cluster configuration"))
By("creating an object containing a namespace with in-cluster config")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-with-namespace.yaml --v=7 2>&1")
Expect(err).To(ContainSubstring("Using in-cluster namespace"))
Expect(err).To(ContainSubstring("Using in-cluster configuration"))
Expect(err).To(ContainSubstring(fmt.Sprintf("POST https://%s:%s/api/v1/namespaces/configmap-namespace/configmaps", inClusterHost, inClusterPort)))
By("creating an object not containing a namespace with in-cluster config")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-without-namespace.yaml --v=7 2>&1")
Expect(err).To(ContainSubstring("Using in-cluster namespace"))
Expect(err).To(ContainSubstring("Using in-cluster configuration"))
Expect(err).To(ContainSubstring(fmt.Sprintf("POST https://%s:%s/api/v1/namespaces/%s/configmaps", inClusterHost, inClusterPort, f.Namespace.Name)))
By("trying to use kubectl with invalid token")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --token=invalid --v=7 2>&1")
framework.Logf("got err %v", err)
Expect(err).To(HaveOccurred())
Expect(err).To(ContainSubstring("Using in-cluster namespace"))
Expect(err).To(ContainSubstring("Using in-cluster configuration"))
Expect(err).To(ContainSubstring("Authorization: Bearer invalid"))
Expect(err).To(ContainSubstring("Response Status: 401 Unauthorized"))
By("trying to use kubectl with invalid server")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --server=invalid --v=6 2>&1")
framework.Logf("got err %v", err)
Expect(err).To(HaveOccurred())
Expect(err).To(ContainSubstring("Unable to connect to the server"))
Expect(err).To(ContainSubstring("GET http://invalid/api"))
By("trying to use kubectl with invalid namespace")
execOutput = framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --namespace=invalid --v=6 2>&1")
Expect(execOutput).To(ContainSubstring("No resources found"))
Expect(execOutput).ToNot(ContainSubstring("Using in-cluster namespace"))
Expect(execOutput).To(ContainSubstring("Using in-cluster configuration"))
Expect(execOutput).To(MatchRegexp(fmt.Sprintf("GET http[s]?://%s:%s/api/v1/namespaces/invalid/pods", inClusterHost, inClusterPort)))
By("trying to use kubectl with kubeconfig")
execOutput = framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --kubeconfig=/tmp/"+overrideKubeconfigName+" --v=6 2>&1")
Expect(execOutput).ToNot(ContainSubstring("Using in-cluster namespace"))
Expect(execOutput).ToNot(ContainSubstring("Using in-cluster configuration"))
Expect(execOutput).To(ContainSubstring("GET https://kubernetes.default.svc:443/api/v1/namespaces/default/pods"))
})
})
framework.KubeDescribe("Kubectl api-versions", func() {
It("should check if v1 is in available api versions [Conformance]", func() {
By("validating api verions")
output := framework.RunKubectlOrDie("api-versions")
if !strings.Contains(output, "v1") {
framework.Failf("No v1 in kubectl api-versions")
}
})
})
framework.KubeDescribe("Kubectl apply", func() {
It("should apply a new configuration to an existing RC", func() {
controllerJson := readTestFileOrDie(redisControllerFilename)
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("creating Redis RC")
framework.RunKubectlOrDieInput(string(controllerJson), "create", "-f", "-", nsFlag)
By("applying a modified configuration")
stdin := modifyReplicationControllerConfiguration(string(controllerJson))
framework.NewKubectlCommand("apply", "-f", "-", nsFlag).
WithStdinReader(stdin).
ExecOrDie()
By("checking the result")
forEachReplicationController(c, ns, "app", "redis", validateReplicationControllerConfiguration)
})
It("should reuse port when apply to an existing SVC", func() {
serviceJson := readTestFileOrDie(redisServiceFilename)
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("creating Redis SVC")
framework.RunKubectlOrDieInput(string(serviceJson[:]), "create", "-f", "-", nsFlag)
By("getting the original port")
originalNodePort := framework.RunKubectlOrDie("get", "service", "redis-master", nsFlag, "-o", "jsonpath={.spec.ports[0].port}")
By("applying the same configuration")
framework.RunKubectlOrDieInput(string(serviceJson[:]), "apply", "-f", "-", nsFlag)
By("getting the port after applying configuration")
currentNodePort := framework.RunKubectlOrDie("get", "service", "redis-master", nsFlag, "-o", "jsonpath={.spec.ports[0].port}")
By("checking the result")
if originalNodePort != currentNodePort {
framework.Failf("port should keep the same")
}
})
It("apply set/view last-applied", func() {
deployment1Yaml := readTestFileOrDie(nginxDeployment1Filename)
deployment2Yaml := readTestFileOrDie(nginxDeployment2Filename)
deployment3Yaml := readTestFileOrDie(nginxDeployment3Filename)
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("deployment replicas number is 2")
framework.RunKubectlOrDieInput(string(deployment1Yaml[:]), "apply", "-f", "-", nsFlag)
By("check the last-applied matches expectations annotations")
output := framework.RunKubectlOrDieInput(string(deployment1Yaml[:]), "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json")
requiredString := "\"replicas\": 2"
if !strings.Contains(output, requiredString) {
framework.Failf("Missing %s in kubectl view-last-applied", requiredString)
}
By("apply file doesn't have replicas")
framework.RunKubectlOrDieInput(string(deployment2Yaml[:]), "apply", "set-last-applied", "-f", "-", nsFlag)
By("check last-applied has been updated, annotations doesn't replicas")
output = framework.RunKubectlOrDieInput(string(deployment1Yaml[:]), "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json")
requiredString = "\"replicas\": 2"
if strings.Contains(output, requiredString) {
framework.Failf("Missing %s in kubectl view-last-applied", requiredString)
}
By("scale set replicas to 3")
nginxDeploy := "nginx-deployment"
framework.RunKubectlOrDie("scale", "deployment", nginxDeploy, "--replicas=3", nsFlag)
By("apply file doesn't have replicas but image changed")
framework.RunKubectlOrDieInput(string(deployment3Yaml[:]), "apply", "-f", "-", nsFlag)
By("verify replicas still is 3 and image has been updated")
output = framework.RunKubectlOrDieInput(string(deployment3Yaml[:]), "get", "-f", "-", nsFlag, "-o", "json")
requiredItems := []string{"\"replicas\": 3", "nginx-slim:0.7"}
for _, item := range requiredItems {
if !strings.Contains(output, item) {
framework.Failf("Missing %s in kubectl apply", item)
}
}
})
})
framework.KubeDescribe("Kubectl cluster-info", func() {
It("should check if Kubernetes master services is included in cluster-info [Conformance]", func() {
By("validating cluster-info")
output := framework.RunKubectlOrDie("cluster-info")
// Can't check exact strings due to terminal control commands (colors)
requiredItems := []string{"Kubernetes master", "is running at"}
if framework.ProviderIs("gce", "gke") {
requiredItems = append(requiredItems, "KubeDNS", "Heapster")
}
for _, item := range requiredItems {
if !strings.Contains(output, item) {
framework.Failf("Missing %s in kubectl cluster-info", item)
}
}
})
})
framework.KubeDescribe("Kubectl describe", func() {
It("should check if kubectl describe prints relevant information for rc and pods [Conformance]", func() {
framework.SkipUnlessServerVersionGTE(nodePortsOptionalVersion, c.Discovery())
kv, err := framework.KubectlVersion()
Expect(err).NotTo(HaveOccurred())
framework.SkipUnlessServerVersionGTE(kv, c.Discovery())
controllerJson := readTestFileOrDie(redisControllerFilename)
serviceJson := readTestFileOrDie(redisServiceFilename)
nsFlag := fmt.Sprintf("--namespace=%v", ns)
framework.RunKubectlOrDieInput(string(controllerJson[:]), "create", "-f", "-", nsFlag)
framework.RunKubectlOrDieInput(string(serviceJson[:]), "create", "-f", "-", nsFlag)
By("Waiting for Redis master to start.")
waitForOrFailWithDebug(1)
// Pod
forEachPod(func(pod v1.Pod) {
output := framework.RunKubectlOrDie("describe", "pod", pod.Name, nsFlag)
requiredStrings := [][]string{
{"Name:", "redis-master-"},
{"Namespace:", ns},
{"Node:"},
{"Labels:", "app=redis"},
{"role=master"},
{"Annotations:"},
{"Status:", "Running"},
{"IP:"},
{"Created By:", "ReplicationController/redis-master"},
{"Controlled By:", "ReplicationController/redis-master"},
{"Image:", redisImage},
{"State:", "Running"},
{"QoS Class:", "BestEffort"},
}
checkOutput(output, requiredStrings)
})
// Rc
requiredStrings := [][]string{
{"Name:", "redis-master"},
{"Namespace:", ns},
{"Selector:", "app=redis,role=master"},
{"Labels:", "app=redis"},
{"role=master"},
{"Annotations:"},
{"Replicas:", "1 current", "1 desired"},
{"Pods Status:", "1 Running", "0 Waiting", "0 Succeeded", "0 Failed"},
{"Pod Template:"},
{"Image:", redisImage},
{"Events:"}}
checkKubectlOutputWithRetry(requiredStrings, "describe", "rc", "redis-master", nsFlag)
// Service
output := framework.RunKubectlOrDie("describe", "service", "redis-master", nsFlag)
requiredStrings = [][]string{
{"Name:", "redis-master"},
{"Namespace:", ns},
{"Labels:", "app=redis"},
{"role=master"},
{"Annotations:"},
{"Selector:", "app=redis", "role=master"},
{"Type:", "ClusterIP"},
{"IP:"},
{"Port:", "<unset>", "6379/TCP"},
{"Endpoints:"},
{"Session Affinity:", "None"}}
checkOutput(output, requiredStrings)
// Node
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Core().Nodes().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
node := nodes.Items[0]
output = framework.RunKubectlOrDie("describe", "node", node.Name)
requiredStrings = [][]string{
{"Name:", node.Name},
{"Labels:"},
{"Annotations:"},
{"CreationTimestamp:"},
{"Conditions:"},
{"Type", "Status", "LastHeartbeatTime", "LastTransitionTime", "Reason", "Message"},
{"Addresses:"},
{"Capacity:"},
{"Version:"},
{"Kernel Version:"},
{"OS Image:"},
{"Container Runtime Version:"},
{"Kubelet Version:"},
{"Kube-Proxy Version:"},
{"Pods:"}}
checkOutput(output, requiredStrings)
// Namespace
output = framework.RunKubectlOrDie("describe", "namespace", ns)
requiredStrings = [][]string{
{"Name:", ns},
{"Labels:"},
{"Annotations:"},
{"Status:", "Active"}}
checkOutput(output, requiredStrings)
// Quota and limitrange are skipped for now.
})
})
framework.KubeDescribe("Kubectl expose", func() {
It("should create services for rc [Conformance]", func() {
controllerJson := readTestFileOrDie(redisControllerFilename)
nsFlag := fmt.Sprintf("--namespace=%v", ns)
redisPort := 6379
By("creating Redis RC")
framework.Logf("namespace %v", ns)
framework.RunKubectlOrDieInput(string(controllerJson[:]), "create", "-f", "-", nsFlag)
// It may take a while for the pods to get registered in some cases, wait to be sure.
By("Waiting for Redis master to start.")
waitForOrFailWithDebug(1)
forEachPod(func(pod v1.Pod) {
framework.Logf("wait on redis-master startup in %v ", ns)
framework.LookForStringInLog(ns, pod.Name, "redis-master", "The server is now ready to accept connections", framework.PodStartTimeout)
})
validateService := func(name string, servicePort int, timeout time.Duration) {
err := wait.Poll(framework.Poll, timeout, func() (bool, error) {
endpoints, err := c.Core().Endpoints(ns).Get(name, metav1.GetOptions{})
if err != nil {
// log the real error
framework.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err)
// if the error is API not found or could not find default credentials or TLS handshake timeout, try again
if apierrs.IsNotFound(err) ||
apierrs.IsUnauthorized(err) ||
apierrs.IsServerTimeout(err) {
err = nil
}
return false, err
}
uidToPort := framework.GetContainerPortsByPodUID(endpoints)
if len(uidToPort) == 0 {
framework.Logf("No endpoint found, retrying")
return false, nil
}
if len(uidToPort) > 1 {
framework.Failf("Too many endpoints found")
}
for _, port := range uidToPort {
if port[0] != redisPort {
framework.Failf("Wrong endpoint port: %d", port[0])
}
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
service, err := c.Core().Services(ns).Get(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
if len(service.Spec.Ports) != 1 {
framework.Failf("1 port is expected")
}
port := service.Spec.Ports[0]
if port.Port != int32(servicePort) {
framework.Failf("Wrong service port: %d", port.Port)
}
if port.TargetPort.IntValue() != redisPort {
framework.Failf("Wrong target port: %d", port.TargetPort.IntValue())
}
}
By("exposing RC")
framework.RunKubectlOrDie("expose", "rc", "redis-master", "--name=rm2", "--port=1234", fmt.Sprintf("--target-port=%d", redisPort), nsFlag)
framework.WaitForService(c, ns, "rm2", true, framework.Poll, framework.ServiceStartTimeout)
validateService("rm2", 1234, framework.ServiceStartTimeout)
By("exposing service")
framework.RunKubectlOrDie("expose", "service", "rm2", "--name=rm3", "--port=2345", fmt.Sprintf("--target-port=%d", redisPort), nsFlag)
framework.WaitForService(c, ns, "rm3", true, framework.Poll, framework.ServiceStartTimeout)
validateService("rm3", 2345, framework.ServiceStartTimeout)
})
})
framework.KubeDescribe("Kubectl label", func() {
var pod []byte
var nsFlag string
BeforeEach(func() {
pod = readTestFileOrDie("pause-pod.yaml")
By("creating the pod")
nsFlag = fmt.Sprintf("--namespace=%v", ns)
framework.RunKubectlOrDieInput(string(pod), "create", "-f", "-", nsFlag)
Expect(framework.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout)).To(BeTrue())
})
AfterEach(func() {
cleanupKubectlInputs(string(pod[:]), ns, pausePodSelector)
})
It("should update the label on a resource [Conformance]", func() {
labelName := "testing-label"
labelValue := "testing-label-value"
By("adding the label " + labelName + " with value " + labelValue + " to a pod")
framework.RunKubectlOrDie("label", "pods", pausePodName, labelName+"="+labelValue, nsFlag)
By("verifying the pod has the label " + labelName + " with the value " + labelValue)
output := framework.RunKubectlOrDie("get", "pod", pausePodName, "-L", labelName, nsFlag)
if !strings.Contains(output, labelValue) {
framework.Failf("Failed updating label " + labelName + " to the pod " + pausePodName)
}
By("removing the label " + labelName + " of a pod")
framework.RunKubectlOrDie("label", "pods", pausePodName, labelName+"-", nsFlag)
By("verifying the pod doesn't have the label " + labelName)
output = framework.RunKubectlOrDie("get", "pod", pausePodName, "-L", labelName, nsFlag)
if strings.Contains(output, labelValue) {
framework.Failf("Failed removing label " + labelName + " of the pod " + pausePodName)
}
})
})
framework.KubeDescribe("Kubectl logs", func() {
var rc []byte
var nsFlag string
containerName := "redis-master"
BeforeEach(func() {
rc = readTestFileOrDie(redisControllerFilename)
By("creating an rc")
nsFlag = fmt.Sprintf("--namespace=%v", ns)
framework.RunKubectlOrDieInput(string(rc[:]), "create", "-f", "-", nsFlag)
})
AfterEach(func() {
cleanupKubectlInputs(string(rc[:]), ns, simplePodSelector)
})
It("should be able to retrieve and filter logs [Conformance]", func() {
framework.SkipUnlessServerVersionGTE(extendedPodLogFilterVersion, c.Discovery())
// Split("something\n", "\n") returns ["something", ""], so
// strip trailing newline first
lines := func(out string) []string {
return strings.Split(strings.TrimRight(out, "\n"), "\n")
}
By("Waiting for Redis master to start.")
waitForOrFailWithDebug(1)
forEachPod(func(pod v1.Pod) {
By("checking for a matching strings")
_, err := framework.LookForStringInLog(ns, pod.Name, containerName, "The server is now ready to accept connections", framework.PodStartTimeout)
Expect(err).NotTo(HaveOccurred())
By("limiting log lines")
out := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--tail=1")
Expect(len(out)).NotTo(BeZero())
Expect(len(lines(out))).To(Equal(1))
By("limiting log bytes")
out = framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--limit-bytes=1")
Expect(len(lines(out))).To(Equal(1))
Expect(len(out)).To(Equal(1))
By("exposing timestamps")
out = framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--tail=1", "--timestamps")
l := lines(out)
Expect(len(l)).To(Equal(1))
words := strings.Split(l[0], " ")
Expect(len(words)).To(BeNumerically(">", 1))
if _, err := time.Parse(time.RFC3339Nano, words[0]); err != nil {
if _, err := time.Parse(time.RFC3339, words[0]); err != nil {
framework.Failf("expected %q to be RFC3339 or RFC3339Nano", words[0])
}
}
By("restricting to a time range")
// Note: we must wait at least two seconds,
// because the granularity is only 1 second and
// it could end up rounding the wrong way.
time.Sleep(2500 * time.Millisecond) // ensure that startup logs on the node are seen as older than 1s
recent_out := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--since=1s")
recent := len(strings.Split(recent_out, "\n"))
older_out := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--since=24h")
older := len(strings.Split(older_out, "\n"))
Expect(recent).To(BeNumerically("<", older), "expected recent(%v) to be less than older(%v)\nrecent lines:\n%v\nolder lines:\n%v\n", recent, older, recent_out, older_out)
})
})
})
framework.KubeDescribe("Kubectl patch", func() {
It("should add annotations for pods in rc [Conformance]", func() {
controllerJson := readTestFileOrDie(redisControllerFilename)
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("creating Redis RC")
framework.RunKubectlOrDieInput(string(controllerJson[:]), "create", "-f", "-", nsFlag)
By("Waiting for Redis master to start.")
waitForOrFailWithDebug(1)
By("patching all pods")
forEachPod(func(pod v1.Pod) {
framework.RunKubectlOrDie("patch", "pod", pod.Name, nsFlag, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}")
})
By("checking annotations")
forEachPod(func(pod v1.Pod) {
found := false
for key, val := range pod.Annotations {
if key == "x" && val == "y" {
found = true
break
}
}
if !found {
framework.Failf("Added annotation not found")
}
})
})
})
framework.KubeDescribe("Kubectl version", func() {
It("should check is all data is printed [Conformance]", func() {
version := framework.RunKubectlOrDie("version")
requiredItems := []string{"Client Version:", "Server Version:", "Major:", "Minor:", "GitCommit:"}
for _, item := range requiredItems {
if !strings.Contains(version, item) {
framework.Failf("Required item %s not found in %s", item, version)
}
}
})
})
framework.KubeDescribe("Kubectl run default", func() {
var nsFlag string
var name string
var cleanUp func()
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
gte, err := framework.ServerVersionGTE(deploymentsVersion, c.Discovery())
if err != nil {
framework.Failf("Failed to get server version: %v", err)
}
if gte {
name = "e2e-test-nginx-deployment"
cleanUp = func() { framework.RunKubectlOrDie("delete", "deployment", name, nsFlag) }
} else {
name = "e2e-test-nginx-rc"
cleanUp = func() { framework.RunKubectlOrDie("delete", "rc", name, nsFlag) }
}
})
AfterEach(func() {
cleanUp()
})
It("should create an rc or deployment from an image [Conformance]", func() {
By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", name, "--image="+nginxImage, nsFlag)
By("verifying the pod controlled by " + name + " gets created")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": name}))
podlist, err := framework.WaitForPodsWithLabel(c, ns, label)
if err != nil {
framework.Failf("Failed getting pod controlled by %s: %v", name, err)
}
pods := podlist.Items
if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != nginxImage {
framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag)
framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods))
}
})
})
framework.KubeDescribe("Kubectl run rc", func() {
var nsFlag string
var rcName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
rcName = "e2e-test-nginx-rc"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "rc", rcName, nsFlag)
})
It("should create an rc from an image [Conformance]", func() {
By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag)
By("verifying the rc " + rcName + " was created")
rc, err := c.Core().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting rc %s: %v", rcName, err)
}
containers := rc.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
framework.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage)
}
By("verifying the pod controlled by rc " + rcName + " was created")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": rcName}))
podlist, err := framework.WaitForPodsWithLabel(c, ns, label)
if err != nil {
framework.Failf("Failed getting pod controlled by rc %s: %v", rcName, err)
}
pods := podlist.Items
if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != nginxImage {
framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag)
framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods))
}
By("confirm that you can get logs from an rc")
podNames := []string{}
for _, pod := range pods {
podNames = append(podNames, pod.Name)
}
if !framework.CheckPodsRunningReady(c, ns, podNames, framework.PodStartTimeout) {
framework.Failf("Pods for rc %s were not ready", rcName)
}
_, err = framework.RunKubectl("logs", "rc/"+rcName, nsFlag)
// a non-nil error is fine as long as we actually found a pod.
if err != nil && !strings.Contains(err.Error(), " in pod ") {
framework.Failf("Failed getting logs by rc %s: %v", rcName, err)
}
})
})
framework.KubeDescribe("Kubectl rolling-update", func() {
var nsFlag string
var rcName string
var c clientset.Interface
BeforeEach(func() {
c = f.ClientSet
nsFlag = fmt.Sprintf("--namespace=%v", ns)
rcName = "e2e-test-nginx-rc"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "rc", rcName, nsFlag)
})
It("should support rolling-update to same image [Conformance]", func() {
By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag)
By("verifying the rc " + rcName + " was created")
rc, err := c.Core().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting rc %s: %v", rcName, err)
}
containers := rc.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
framework.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage)
}
framework.WaitForRCToStabilize(c, ns, rcName, framework.PodStartTimeout)
By("rolling-update to same image controller")
runKubectlRetryOrDie("rolling-update", rcName, "--update-period=1s", "--image="+nginxImage, "--image-pull-policy="+string(v1.PullIfNotPresent), nsFlag)
framework.ValidateController(c, nginxImage, 1, rcName, "run="+rcName, noOpValidatorFn, ns)
})
})
framework.KubeDescribe("Kubectl run deployment", func() {
var nsFlag string
var dName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
dName = "e2e-test-nginx-deployment"
})
AfterEach(func() {
err := wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) {
out, err := framework.RunKubectl("delete", "deployment", dName, nsFlag)
if err != nil {
if strings.Contains(err.Error(), "could not find default credentials") {
err = nil
}
return false, fmt.Errorf("kubectl delete failed output: %s, err: %v", out, err)
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
})
It("should create a deployment from an image [Conformance]", func() {
framework.SkipUnlessServerVersionGTE(deploymentsVersion, c.Discovery())
By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", dName, "--image="+nginxImage, "--generator=deployment/v1beta1", nsFlag)
By("verifying the deployment " + dName + " was created")
d, err := c.Extensions().Deployments(ns).Get(dName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting deployment %s: %v", dName, err)
}
containers := d.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
framework.Failf("Failed creating deployment %s for 1 pod with expected image %s", dName, nginxImage)
}
By("verifying the pod controlled by deployment " + dName + " was created")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": dName}))
podlist, err := framework.WaitForPodsWithLabel(c, ns, label)
if err != nil {
framework.Failf("Failed getting pod controlled by deployment %s: %v", dName, err)
}
pods := podlist.Items
if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != nginxImage {
framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag)
framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods))
}
})
})
framework.KubeDescribe("Kubectl run job", func() {
var nsFlag string
var jobName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
jobName = "e2e-test-nginx-job"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "jobs", jobName, nsFlag)
})
It("should create a job from an image when restart is OnFailure [Conformance]", func() {
framework.SkipUnlessServerVersionGTE(jobsVersion, c.Discovery())
By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", jobName, "--restart=OnFailure", "--generator=job/v1", "--image="+nginxImage, nsFlag)
By("verifying the job " + jobName + " was created")
job, err := c.Batch().Jobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting job %s: %v", jobName, err)
}
containers := job.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
framework.Failf("Failed creating job %s for 1 pod with expected image %s: %#v", jobName, nginxImage, containers)
}
if job.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
framework.Failf("Failed creating a job with correct restart policy for --restart=OnFailure")
}
})
})
framework.KubeDescribe("Kubectl run pod", func() {
var nsFlag string
var podName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
podName = "e2e-test-nginx-pod"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "pods", podName, nsFlag)
})
It("should create a pod from an image when restart is Never [Conformance]", func() {
framework.SkipUnlessServerVersionGTE(jobsVersion, c.Discovery())
By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", podName, "--restart=Never", "--generator=run-pod/v1", "--image="+nginxImage, nsFlag)
By("verifying the pod " + podName + " was created")
pod, err := c.Core().Pods(ns).Get(podName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting pod %s: %v", podName, err)
}
containers := pod.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
framework.Failf("Failed creating pod %s with expected image %s", podName, nginxImage)
}
if pod.Spec.RestartPolicy != v1.RestartPolicyNever {
framework.Failf("Failed creating a pod with correct restart policy for --restart=Never")
}
})
})
framework.KubeDescribe("Kubectl replace", func() {
var nsFlag string
var podName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
podName = "e2e-test-nginx-pod"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "pods", podName, nsFlag)
})
It("should update a single-container pod's image [Conformance]", func() {
framework.SkipUnlessServerVersionGTE(jobsVersion, c.Discovery())
By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", podName, "--generator=run-pod/v1", "--image="+nginxImage, "--labels=run="+podName, nsFlag)
By("verifying the pod " + podName + " is running")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": podName}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
if err != nil {
framework.Failf("Failed getting pod %s: %v", podName, err)
}
By("verifying the pod " + podName + " was created")
podJson := framework.RunKubectlOrDie("get", "pod", podName, nsFlag, "-o", "json")
if !strings.Contains(podJson, podName) {
framework.Failf("Failed to find pod %s in [%s]", podName, podJson)
}
By("replace the image in the pod")
podJson = strings.Replace(podJson, nginxImage, busyboxImage, 1)
framework.RunKubectlOrDieInput(podJson, "replace", "-f", "-", nsFlag)
By("verifying the pod " + podName + " has the right image " + busyboxImage)
pod, err := c.Core().Pods(ns).Get(podName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting deployment %s: %v", podName, err)
}
containers := pod.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage {
framework.Failf("Failed creating pod with expected image %s", busyboxImage)
}
})
})
framework.KubeDescribe("Kubectl run --rm job", func() {
jobName := "e2e-test-rm-busybox-job"
It("should create a job from an image, then delete the job [Conformance]", func() {
nsFlag := fmt.Sprintf("--namespace=%v", ns)
// The rkt runtime doesn't support attach, see #23335
framework.SkipIfContainerRuntimeIs("rkt")
framework.SkipUnlessServerVersionGTE(jobsVersion, c.Discovery())
By("executing a command with run --rm and attach with stdin")
t := time.NewTimer(runJobTimeout)
defer t.Stop()
runOutput := framework.NewKubectlCommand(nsFlag, "run", jobName, "--image="+busyboxImage, "--rm=true", "--generator=job/v1", "--restart=OnFailure", "--attach=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234").
WithTimeout(t.C).
ExecOrDie()
Expect(runOutput).To(ContainSubstring("abcd1234"))
Expect(runOutput).To(ContainSubstring("stdin closed"))
By("verifying the job " + jobName + " was deleted")
_, err := c.Batch().Jobs(ns).Get(jobName, metav1.GetOptions{})
Expect(err).To(HaveOccurred())
Expect(apierrs.IsNotFound(err)).To(BeTrue())
})
})
framework.KubeDescribe("Proxy server", func() {
// TODO: test proxy options (static, prefix, etc)
It("should support proxy with --port 0 [Conformance]", func() {
By("starting the proxy server")
port, cmd, err := startProxyServer()
if cmd != nil {
defer framework.TryKill(cmd)
}
if err != nil {
framework.Failf("Failed to start proxy server: %v", err)
}
By("curling proxy /api/ output")
localAddr := fmt.Sprintf("http://localhost:%d/api/", port)
apiVersions, err := getAPIVersions(localAddr)
if err != nil {
framework.Failf("Expected at least one supported apiversion, got error %v", err)
}
if len(apiVersions.Versions) < 1 {
framework.Failf("Expected at least one supported apiversion, got %v", apiVersions)
}
})
It("should support --unix-socket=/path [Conformance]", func() {
By("Starting the proxy")
tmpdir, err := ioutil.TempDir("", "kubectl-proxy-unix")
if err != nil {
framework.Failf("Failed to create temporary directory: %v", err)
}
path := filepath.Join(tmpdir, "test")
defer os.Remove(path)
defer os.Remove(tmpdir)
cmd := framework.KubectlCmd("proxy", fmt.Sprintf("--unix-socket=%s", path))
stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
if err != nil {
framework.Failf("Failed to start kubectl command: %v", err)
}
defer stdout.Close()
defer stderr.Close()
defer framework.TryKill(cmd)
buf := make([]byte, 128)
if _, err = stdout.Read(buf); err != nil {
framework.Failf("Expected output from kubectl proxy: %v", err)
}
By("retrieving proxy /api/ output")
_, err = curlUnix("http://unused/api", path)
if err != nil {
framework.Failf("Failed get of /api at %s: %v", path, err)
}
})
})
// This test must run [Serial] because it modifies the node so it doesn't allow pods to execute on
// it, which will affect anything else running in parallel.
framework.KubeDescribe("Kubectl taint [Serial]", func() {
It("should update the taint on a node", func() {
testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-001-%s", string(uuid.NewUUID())),
Value: "testing-taint-value",
Effect: v1.TaintEffectNoSchedule,
}
nodeName := scheduling.GetNodeThatCanRunPod(f)
By("adding the taint " + testTaint.ToString() + " to a node")
runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.ToString())
defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, testTaint)
By("verifying the node has the taint " + testTaint.ToString())
output := runKubectlRetryOrDie("describe", "node", nodeName)
requiredStrings := [][]string{
{"Name:", nodeName},
{"Taints:"},
{testTaint.ToString()},
}
checkOutput(output, requiredStrings)
By("removing the taint " + testTaint.ToString() + " of a node")
runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.Key+":"+string(testTaint.Effect)+"-")
By("verifying the node doesn't have the taint " + testTaint.Key)
output = runKubectlRetryOrDie("describe", "node", nodeName)
if strings.Contains(output, testTaint.Key) {
framework.Failf("Failed removing taint " + testTaint.Key + " of the node " + nodeName)
}
})
It("should remove all the taints with the same key off a node", func() {
testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-002-%s", string(uuid.NewUUID())),
Value: "testing-taint-value",
Effect: v1.TaintEffectNoSchedule,
}
nodeName := scheduling.GetNodeThatCanRunPod(f)
By("adding the taint " + testTaint.ToString() + " to a node")
runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.ToString())
defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, testTaint)
By("verifying the node has the taint " + testTaint.ToString())
output := runKubectlRetryOrDie("describe", "node", nodeName)
requiredStrings := [][]string{
{"Name:", nodeName},
{"Taints:"},
{testTaint.ToString()},
}
checkOutput(output, requiredStrings)
newTestTaint := v1.Taint{
Key: testTaint.Key,
Value: "another-testing-taint-value",
Effect: v1.TaintEffectPreferNoSchedule,
}
By("adding another taint " + newTestTaint.ToString() + " to the node")
runKubectlRetryOrDie("taint", "nodes", nodeName, newTestTaint.ToString())
defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, newTestTaint)
By("verifying the node has the taint " + newTestTaint.ToString())
output = runKubectlRetryOrDie("describe", "node", nodeName)
requiredStrings = [][]string{
{"Name:", nodeName},
{"Taints:"},
{newTestTaint.ToString()},
}
checkOutput(output, requiredStrings)
noExecuteTaint := v1.Taint{
Key: testTaint.Key,
Value: "testing-taint-value-no-execute",
Effect: v1.TaintEffectNoExecute,
}
By("adding NoExecute taint " + noExecuteTaint.ToString() + " to the node")
runKubectlRetryOrDie("taint", "nodes", nodeName, noExecuteTaint.ToString())
defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, noExecuteTaint)
By("verifying the node has the taint " + noExecuteTaint.ToString())
output = runKubectlRetryOrDie("describe", "node", nodeName)
requiredStrings = [][]string{
{"Name:", nodeName},
{"Taints:"},
{noExecuteTaint.ToString()},
}
checkOutput(output, requiredStrings)
By("removing all taints that have the same key " + testTaint.Key + " of the node")
runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.Key+"-")
By("verifying the node doesn't have the taints that have the same key " + testTaint.Key)
output = runKubectlRetryOrDie("describe", "node", nodeName)
if strings.Contains(output, testTaint.Key) {
framework.Failf("Failed removing taints " + testTaint.Key + " of the node " + nodeName)
}
})
})
framework.KubeDescribe("Kubectl create quota", func() {
It("should create a quota without scopes", func() {
framework.SkipUnlessKubectlVersionGTE(kubectlCreateQuotaVersion)
nsFlag := fmt.Sprintf("--namespace=%v", ns)
quotaName := "million"
By("calling kubectl quota")
framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000,services=1000000", nsFlag)
By("verifying that the quota was created")
quota, err := c.Core().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting quota %s: %v", quotaName, err)
}
if len(quota.Spec.Scopes) != 0 {
framework.Failf("Expected empty scopes, got %v", quota.Spec.Scopes)
}
if len(quota.Spec.Hard) != 2 {
framework.Failf("Expected two resources, got %v", quota.Spec.Hard)
}
r, found := quota.Spec.Hard[v1.ResourcePods]
if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
framework.Failf("Expected pods=1000000, got %v", r)
}
r, found = quota.Spec.Hard[v1.ResourceServices]
if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
framework.Failf("Expected services=1000000, got %v", r)
}
})
It("should create a quota with scopes", func() {
framework.SkipUnlessKubectlVersionGTE(kubectlCreateQuotaVersion)
nsFlag := fmt.Sprintf("--namespace=%v", ns)
quotaName := "scopes"
By("calling kubectl quota")
framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating", nsFlag)
By("verifying that the quota was created")
quota, err := c.Core().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting quota %s: %v", quotaName, err)
}
if len(quota.Spec.Scopes) != 2 {
framework.Failf("Expected two scopes, got %v", quota.Spec.Scopes)
}
scopes := make(map[v1.ResourceQuotaScope]struct{})
for _, scope := range quota.Spec.Scopes {
scopes[scope] = struct{}{}
}
if _, found := scopes[v1.ResourceQuotaScopeBestEffort]; !found {
framework.Failf("Expected BestEffort scope, got %v", quota.Spec.Scopes)
}
if _, found := scopes[v1.ResourceQuotaScopeNotTerminating]; !found {
framework.Failf("Expected NotTerminating scope, got %v", quota.Spec.Scopes)
}
})
It("should reject quota with invalid scopes", func() {
framework.SkipUnlessKubectlVersionGTE(kubectlCreateQuotaVersion)
nsFlag := fmt.Sprintf("--namespace=%v", ns)
quotaName := "scopes"
By("calling kubectl quota")
out, err := framework.RunKubectl("create", "quota", quotaName, "--hard=hard=pods=1000000", "--scopes=Foo", nsFlag)
if err == nil {
framework.Failf("Expected kubectl to fail, but it succeeded: %s", out)
}
})
})
})
// Checks whether the output split by line contains the required elements.
func checkOutputReturnError(output string, required [][]string) error {
outputLines := strings.Split(output, "\n")
currentLine := 0
for _, requirement := range required {
for currentLine < len(outputLines) && !strings.Contains(outputLines[currentLine], requirement[0]) {
currentLine++
}
if currentLine == len(outputLines) {
return fmt.Errorf("failed to find %s in %s", requirement[0], output)
}
for _, item := range requirement[1:] {
if !strings.Contains(outputLines[currentLine], item) {
return fmt.Errorf("failed to find %s in %s", item, outputLines[currentLine])
}
}
}
return nil
}
func checkOutput(output string, required [][]string) {
err := checkOutputReturnError(output, required)
if err != nil {
framework.Failf("%v", err)
}
}
func checkKubectlOutputWithRetry(required [][]string, args ...string) {
var pollErr error
wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
output := framework.RunKubectlOrDie(args...)
err := checkOutputReturnError(output, required)
if err != nil {
pollErr = err
return false, nil
}
pollErr = nil
return true, nil
})
if pollErr != nil {
framework.Failf("%v", pollErr)
}
return
}
func getAPIVersions(apiEndpoint string) (*metav1.APIVersions, error) {
body, err := curl(apiEndpoint)
if err != nil {
return nil, fmt.Errorf("Failed http.Get of %s: %v", apiEndpoint, err)
}
var apiVersions metav1.APIVersions
if err := json.Unmarshal([]byte(body), &apiVersions); err != nil {
return nil, fmt.Errorf("Failed to parse /api output %s: %v", body, err)
}
return &apiVersions, nil
}
func startProxyServer() (int, *exec.Cmd, error) {
// Specifying port 0 indicates we want the os to pick a random port.
cmd := framework.KubectlCmd("proxy", "-p", "0")
stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
if err != nil {
return -1, nil, err
}
defer stdout.Close()
defer stderr.Close()
buf := make([]byte, 128)
var n int
if n, err = stdout.Read(buf); err != nil {
return -1, cmd, fmt.Errorf("Failed to read from kubectl proxy stdout: %v", err)
}
output := string(buf[:n])
match := proxyRegexp.FindStringSubmatch(output)
if len(match) == 2 {
if port, err := strconv.Atoi(match[1]); err == nil {
return port, cmd, nil
}
}
return -1, cmd, fmt.Errorf("Failed to parse port from proxy stdout: %s", output)
}
func curlUnix(url string, path string) (string, error) {
dial := func(proto, addr string) (net.Conn, error) {
return net.Dial("unix", path)
}
transport := utilnet.SetTransportDefaults(&http.Transport{
Dial: dial,
})
return curlTransport(url, transport)
}
func curlTransport(url string, transport *http.Transport) (string, error) {
client := &http.Client{Transport: transport}
resp, err := client.Get(url)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(body[:]), nil
}
func curl(url string) (string, error) {
return curlTransport(url, utilnet.SetTransportDefaults(&http.Transport{}))
}
func validateGuestbookApp(c clientset.Interface, ns string) {
framework.Logf("Waiting for all frontend pods to be Running.")
label := labels.SelectorFromSet(labels.Set(map[string]string{"tier": "frontend", "app": "guestbook"}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for frontend to serve content.")
if !waitForGuestbookResponse(c, "get", "", `{"data": ""}`, guestbookStartupTimeout, ns) {
framework.Failf("Frontend service did not start serving content in %v seconds.", guestbookStartupTimeout.Seconds())
}
framework.Logf("Trying to add a new entry to the guestbook.")
if !waitForGuestbookResponse(c, "set", "TestEntry", `{"message": "Updated"}`, guestbookResponseTimeout, ns) {
framework.Failf("Cannot added new entry in %v seconds.", guestbookResponseTimeout.Seconds())
}
framework.Logf("Verifying that added entry can be retrieved.")
if !waitForGuestbookResponse(c, "get", "", `{"data": "TestEntry"}`, guestbookResponseTimeout, ns) {
framework.Failf("Entry to guestbook wasn't correctly added in %v seconds.", guestbookResponseTimeout.Seconds())
}
}
// Returns whether received expected response from guestbook on time.
func waitForGuestbookResponse(c clientset.Interface, cmd, arg, expectedResponse string, timeout time.Duration, ns string) bool {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
res, err := makeRequestToGuestbook(c, cmd, arg, ns)
if err == nil && res == expectedResponse {
return true
}
framework.Logf("Failed to get response from guestbook. err: %v, response: %s", err, res)
}
return false
}
func makeRequestToGuestbook(c clientset.Interface, cmd, value string, ns string) (string, error) {
proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Core().RESTClient().Get())
if errProxy != nil {
return "", errProxy
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
result, err := proxyRequest.Namespace(ns).
Context(ctx).
Name("frontend").
Suffix("/guestbook.php").
Param("cmd", cmd).
Param("key", "messages").
Param("value", value).
Do().
Raw()
return string(result), err
}
type updateDemoData struct {
Image string
}
const applyTestLabel = "kubectl.kubernetes.io/apply-test"
func readBytesFromFile(filename string) []byte {
file, err := os.Open(filename)
if err != nil {
framework.Failf(err.Error())
}
defer file.Close()
data, err := ioutil.ReadAll(file)
if err != nil {
framework.Failf(err.Error())
}
return data
}
func readReplicationControllerFromString(contents string) *v1.ReplicationController {
rc := v1.ReplicationController{}
if err := yaml.Unmarshal([]byte(contents), &rc); err != nil {
framework.Failf(err.Error())
}
return &rc
}
func modifyReplicationControllerConfiguration(contents string) io.Reader {
rc := readReplicationControllerFromString(contents)
rc.Labels[applyTestLabel] = "ADDED"
rc.Spec.Selector[applyTestLabel] = "ADDED"
rc.Spec.Template.Labels[applyTestLabel] = "ADDED"
data, err := json.Marshal(rc)
if err != nil {
framework.Failf("json marshal failed: %s\n", err)
}
return bytes.NewReader(data)
}
func forEachReplicationController(c clientset.Interface, ns, selectorKey, selectorValue string, fn func(v1.ReplicationController)) {
var rcs *v1.ReplicationControllerList
var err error
for t := time.Now(); time.Since(t) < framework.PodListTimeout; time.Sleep(framework.Poll) {
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
options := metav1.ListOptions{LabelSelector: label.String()}
rcs, err = c.Core().ReplicationControllers(ns).List(options)
Expect(err).NotTo(HaveOccurred())
if len(rcs.Items) > 0 {
break
}
}
if rcs == nil || len(rcs.Items) == 0 {
framework.Failf("No replication controllers found")
}
for _, rc := range rcs.Items {
fn(rc)
}
}
func validateReplicationControllerConfiguration(rc v1.ReplicationController) {
if rc.Name == "redis-master" {
if _, ok := rc.Annotations[v1.LastAppliedConfigAnnotation]; !ok {
framework.Failf("Annotation not found in modified configuration:\n%v\n", rc)
}
if value, ok := rc.Labels[applyTestLabel]; !ok || value != "ADDED" {
framework.Failf("Added label %s not found in modified configuration:\n%v\n", applyTestLabel, rc)
}
}
}
// getUDData creates a validator function based on the input string (i.e. kitten.jpg).
// For example, if you send "kitten.jpg", this function verifies that the image jpg = kitten.jpg
// in the container's json field.
func getUDData(jpgExpected string, ns string) func(clientset.Interface, string) error {
// getUDData validates data.json in the update-demo (returns nil if data is ok).
return func(c clientset.Interface, podID string) error {
framework.Logf("validating pod %s", podID)
subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, c.Discovery())
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
var body []byte
if subResourceProxyAvailable {
body, err = c.Core().RESTClient().Get().
Namespace(ns).
Resource("pods").
SubResource("proxy").
Name(podID).
Suffix("data.json").
Do().
Raw()
} else {
body, err = c.Core().RESTClient().Get().
Prefix("proxy").
Namespace(ns).
Resource("pods").
Name(podID).
Suffix("data.json").
Do().
Raw()
}
if err != nil {
if ctx.Err() != nil {
framework.Failf("Failed to retrieve data from container: %v", err)
}
return err
}
framework.Logf("got data: %s", body)
var data updateDemoData
if err := json.Unmarshal(body, &data); err != nil {
return err
}
framework.Logf("Unmarshalled json jpg/img => %s , expecting %s .", data, jpgExpected)
if strings.Contains(data.Image, jpgExpected) {
return nil
} else {
return fmt.Errorf("data served up in container is inaccurate, %s didn't contain %s", data, jpgExpected)
}
}
}
func noOpValidatorFn(c clientset.Interface, podID string) error { return nil }
// newBlockingReader returns a reader that allows reading the given string,
// then blocks until Close() is called on the returned closer.
//
// We're explicitly returning the reader and closer separately, because
// the closer needs to be the *os.File we get from os.Pipe(). This is required
// so the exec of kubectl can pass the underlying file descriptor to the exec
// syscall, instead of creating another os.Pipe and blocking on the io.Copy
// between the source (e.g. stdin) and the write half of the pipe.
func newBlockingReader(s string) (io.Reader, io.Closer, error) {
r, w, err := os.Pipe()
if err != nil {
return nil, nil, err
}
w.Write([]byte(s))
return r, w, nil
}
// newStreamingUpload creates a new http.Request that will stream POST
// a file to a URI.
func newStreamingUpload(filePath string) (*io.PipeReader, *multipart.Writer, error) {
file, err := os.Open(filePath)
if err != nil {
return nil, nil, err
}
defer file.Close()
r, w := io.Pipe()
postBodyWriter := multipart.NewWriter(w)
go streamingUpload(file, filepath.Base(filePath), postBodyWriter, w)
return r, postBodyWriter, err
}
// streamingUpload streams a file via a pipe through a multipart.Writer.
// Generally one should use newStreamingUpload instead of calling this directly.
func streamingUpload(file *os.File, fileName string, postBodyWriter *multipart.Writer, w *io.PipeWriter) {
defer GinkgoRecover()
defer file.Close()
defer w.Close()
// Set up the form file
fileWriter, err := postBodyWriter.CreateFormFile("file", fileName)
if err != nil {
framework.Failf("Unable to to write file at %s to buffer. Error: %s", fileName, err)
}
// Copy kubectl binary into the file writer
if _, err := io.Copy(fileWriter, file); err != nil {
framework.Failf("Unable to to copy file at %s into the file writer. Error: %s", fileName, err)
}
// Nothing more should be written to this instance of the postBodyWriter
if err := postBodyWriter.Close(); err != nil {
framework.Failf("Unable to close the writer for file upload. Error: %s", err)
}
}
func startLocalProxy() (srv *httptest.Server, logs *bytes.Buffer) {
logs = &bytes.Buffer{}
p := goproxy.NewProxyHttpServer()
p.Verbose = true
p.Logger = log.New(logs, "", 0)
return httptest.NewServer(p), logs
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// godoc: Go Documentation Server
// Web server tree:
//
// http://godoc/ main landing page
// http://godoc/doc/ serve from $GOROOT/doc - spec, mem, tutorial, etc.
// http://godoc/src/ serve files from $GOROOT/src; .go gets pretty-printed
// http://godoc/cmd/ serve documentation about commands (TODO)
// http://godoc/pkg/ serve documentation about packages
// (idea is if you say import "compress/zlib", you go to
// http://godoc/pkg/compress/zlib)
//
// Command-line interface:
//
// godoc packagepath [name ...]
//
// godoc compress/zlib
// - prints doc for package proto
// godoc compress/zlib Cipher NewCMAC
// - prints doc for Cipher and NewCMAC in package crypto/block
package main
import (
"container/vector";
"flag";
"fmt";
"go/ast";
"go/doc";
"go/parser";
"go/token";
"http";
"io";
"log";
"net";
"os";
pathutil "path";
"sort";
"strings";
"tabwriter";
"template";
"time";
"astprinter";
"comment";
)
const Pkg = "/pkg/" // name for auto-generated package documentation tree
var (
verbose = flag.Bool("v", false, "verbose mode");
// file system roots
launchdir string; // directory from which godoc was launched
goroot string;
pkgroot = flag.String("pkgroot", "src/lib", "root package source directory (if unrooted, relative to goroot)");
tmplroot = flag.String("tmplroot", "usr/gri/pretty", "root template directory (if unrooted, relative to goroot)");
// layout control
tabwidth = flag.Int("tabwidth", 4, "tab width");
usetabs = flag.Bool("tabs", false, "align with tabs instead of spaces");
html = flag.Bool("html", false, "print HTML in command-line mode");
// server control
httpaddr = flag.String("http", "", "HTTP service address (e.g., ':6060')");
)
func init() {
var err os.Error;
goroot, err = os.Getenv("GOROOT");
if err != nil {
goroot = "/home/r/go-release/go";
}
flag.StringVar(&goroot, "goroot", goroot, "Go root directory");
}
// ----------------------------------------------------------------------------
// Support
func isGoFile(dir *os.Dir) bool {
return dir.IsRegular() && pathutil.Ext(dir.Name) == ".go";
}
func isDir(name string) bool {
d, err := os.Stat(name);
return err == nil && d.IsDirectory();
}
func makeTabwriter(writer io.Writer) *tabwriter.Writer {
padchar := byte(' ');
if *usetabs {
padchar = '\t';
}
return tabwriter.NewWriter(writer, *tabwidth, 1, padchar, tabwriter.FilterHTML);
}
// TODO(rsc): this belongs in a library somewhere, maybe os
func ReadFile(name string) ([]byte, os.Error) {
f, err := os.Open(name, os.O_RDONLY, 0);
if err != nil {
return nil, err;
}
defer f.Close();
var buf io.ByteBuffer;
if n, err := io.Copy(f, &buf); err != nil {
return nil, err;
}
return buf.Data(), nil;
}
// ----------------------------------------------------------------------------
// Parsing
// A single error in the parsed file.
type parseError struct {
src []byte; // source before error
line int; // line number of error
msg string; // error message
}
// All the errors in the parsed file, plus surrounding source code.
// Each error has a slice giving the source text preceding it
// (starting where the last error occurred). The final element in list[]
// has msg = "", to give the remainder of the source code.
// This data structure is handed to the templates parseerror.txt and parseerror.html.
//
type parseErrors struct {
filename string; // path to file
list []parseError; // the errors
src []byte; // the file's entire source code
}
// Parses a file (path) and returns the corresponding AST and
// a sorted list (by file position) of errors, if any.
//
func parse(path string, mode uint) (*ast.Program, *parseErrors) {
src, err := ReadFile(path);
if err != nil {
log.Stderrf("ReadFile %s: %v", path, err);
errs := []parseError{parseError{nil, 0, err.String()}};
return nil, &parseErrors{path, errs, nil};
}
prog, err := parser.Parse(src, mode);
if err != nil {
// sort and convert error list
if errors, ok := err.(parser.ErrorList); ok {
sort.Sort(errors);
errs := make([]parseError, len(errors) + 1); // +1 for final fragment of source
offs := 0;
for i, r := range errors {
// Should always be true, but check for robustness.
if 0 <= r.Pos.Offset && r.Pos.Offset <= len(src) {
errs[i].src = src[offs : r.Pos.Offset];
offs = r.Pos.Offset;
}
errs[i].line = r.Pos.Line;
errs[i].msg = r.Msg;
}
errs[len(errors)].src = src[offs : len(src)];
return nil, &parseErrors{path, errs, src};
} else {
// TODO should have some default handling here to be more robust
panic("unreachable");
}
}
return prog, nil;
}
// ----------------------------------------------------------------------------
// Templates
// Return text for decl.
func DeclText(d ast.Decl) []byte {
var buf io.ByteBuffer;
var p astPrinter.Printer;
p.Init(&buf, nil, nil, false);
d.Visit(&p);
return buf.Data();
}
// Return text for expr.
func ExprText(d ast.Expr) []byte {
var buf io.ByteBuffer;
var p astPrinter.Printer;
p.Init(&buf, nil, nil, false);
d.Visit(&p);
return buf.Data();
}
// Convert x, whatever it is, to text form.
func toText(x interface{}) []byte {
type String interface { String() string }
switch v := x.(type) {
case []byte:
return v;
case string:
return io.StringBytes(v);
case String:
return io.StringBytes(v.String());
case ast.Decl:
return DeclText(v);
case ast.Expr:
return ExprText(v);
}
var buf io.ByteBuffer;
fmt.Fprint(&buf, x);
return buf.Data();
}
// Template formatter for "html" format.
func htmlFmt(w io.Writer, x interface{}, format string) {
// Can do better than text in some cases.
switch v := x.(type) {
case ast.Decl:
var p astPrinter.Printer;
tw := makeTabwriter(w);
p.Init(tw, nil, nil, true);
v.Visit(&p);
tw.Flush();
case ast.Expr:
var p astPrinter.Printer;
tw := makeTabwriter(w);
p.Init(tw, nil, nil, true);
v.Visit(&p);
tw.Flush();
default:
template.HtmlEscape(w, toText(x));
}
}
// Template formatter for "html-comment" format.
func htmlCommentFmt(w io.Writer, x interface{}, format string) {
comment.ToHtml(w, toText(x));
}
// Template formatter for "" (default) format.
func textFmt(w io.Writer, x interface{}, format string) {
w.Write(toText(x));
}
var fmap = template.FormatterMap{
"": textFmt,
"html": htmlFmt,
"html-comment": htmlCommentFmt,
}
func readTemplate(name string) *template.Template {
path := pathutil.Join(*tmplroot, name);
data, err := ReadFile(path);
if err != nil {
log.Exitf("ReadFile %s: %v", path, err);
}
t, err1 := template.Parse(string(data), fmap);
if err1 != nil {
log.Exitf("%s: %v", name, err);
}
return t;
}
var godocHtml *template.Template
var packageHtml *template.Template
var packageText *template.Template
var dirlistHtml *template.Template;
var dirlistText *template.Template;
var parseerrorHtml *template.Template;
var parseerrorText *template.Template;
func readTemplates() {
// have to delay until after flags processing,
// so that main has chdir'ed to goroot.
godocHtml = readTemplate("godoc.html");
packageHtml = readTemplate("package.html");
packageText = readTemplate("package.txt");
dirlistHtml = readTemplate("dirlist.html");
dirlistText = readTemplate("dirlist.txt");
parseerrorHtml = readTemplate("parseerror.html");
parseerrorText = readTemplate("parseerror.txt");
}
// ----------------------------------------------------------------------------
// Generic HTML wrapper
func servePage(c *http.Conn, title, content interface{}) {
type Data struct {
title interface{};
header interface{};
timestamp string;
content interface{};
}
var d Data;
d.title = title;
d.header = title;
d.timestamp = time.UTC().String();
d.content = content;
godocHtml.Execute(&d, c);
}
func serveText(c *http.Conn, text []byte) {
c.SetHeader("content-type", "text/plain; charset=utf-8");
c.Write(text);
}
// ----------------------------------------------------------------------------
// Files
func serveParseErrors(c *http.Conn, errors *parseErrors) {
// format errors
var buf io.ByteBuffer;
parseerrorHtml.Execute(errors, &buf);
servePage(c, errors.filename + " - Parse Errors", buf.Data());
}
func serveGoSource(c *http.Conn, name string) {
prog, errors := parse(name, parser.ParseComments);
if errors != nil {
serveParseErrors(c, errors);
return;
}
var buf io.ByteBuffer;
fmt.Fprintln(&buf, "<pre>");
var p astPrinter.Printer;
writer := makeTabwriter(&buf); // for nicely formatted output
p.Init(writer, nil, nil, true);
p.DoProgram(prog);
writer.Flush(); // ignore errors
fmt.Fprintln(&buf, "</pre>");
servePage(c, name + " - Go source", buf.Data());
}
var fileServer = http.FileServer(".", "");
func serveFile(c *http.Conn, req *http.Request) {
// pick off special cases and hand the rest to the standard file server
switch {
case req.Url.Path == "/":
// serve landing page.
// TODO: hide page from ordinary file serving.
// writing doc/index.html will take care of that.
http.ServeFile(c, req, "doc/root.html");
case req.Url.Path == "/doc/root.html":
// hide landing page from its real name
// TODO why - there is no reason for this (remove eventually)
http.NotFound(c, req);
case pathutil.Ext(req.Url.Path) == ".go":
serveGoSource(c, req.Url.Path[1 : len(req.Url.Path)]); // strip leading '/' from name
default:
// TODO not good enough - don't want to download files
// want to see them
fileServer.ServeHTTP(c, req);
}
}
// ----------------------------------------------------------------------------
// Packages
type pakDesc struct {
dirname string; // relative to goroot
pakname string; // same as last component of importpath
importpath string; // import "___"
filenames map[string] bool; // set of file (names) belonging to this package
}
// TODO if we don't plan to use the directory information, simplify to []string
type dirList []*os.Dir
func (d dirList) Len() int { return len(d) }
func (d dirList) Less(i, j int) bool { return d[i].Name < d[j].Name }
func (d dirList) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func isPackageFile(dirname, filename, pakname string) bool {
// ignore test files
if strings.HasSuffix(filename, "_test.go") {
return false;
}
// determine package name
prog, errors := parse(dirname + "/" + filename, parser.PackageClauseOnly);
if prog == nil {
return false;
}
return prog != nil && prog.Name.Value == pakname;
}
// Returns the package denoted by path and the list of
// sub-directories in the corresponding package directory.
// If there is no such package, the first result is nil. If
// there are no sub-directories, that list is nil.
func findPackage(path string) (*pakDesc, dirList) {
// get directory contents, if possible
importpath := pathutil.Clean(path); // no trailing '/'
dirname := pathutil.Join(*pkgroot, importpath);
if !isDir(dirname) {
return nil, nil;
}
fd, err1 := os.Open(dirname, os.O_RDONLY, 0);
if err1 != nil {
log.Stderrf("open %s: %v", dirname, err1);
return nil, nil;
}
list, err2 := fd.Readdir(-1);
if err2 != nil {
log.Stderrf("readdir %s: %v", dirname, err2);
return nil, nil;
}
// the package name is is the directory name within its parent
_, pakname := pathutil.Split(dirname);
// collect all files belonging to the package and count the
// number of sub-directories
filenames := make(map[string]bool);
nsub := 0;
for i, entry := range list {
switch {
case isGoFile(&entry) && isPackageFile(dirname, entry.Name, pakname):
// add file to package desc
if tmp, found := filenames[entry.Name]; found {
panic("internal error: same file added more than once: " + entry.Name);
}
filenames[entry.Name] = true;
case entry.IsDirectory():
nsub++;
}
}
// make the list of sub-directories, if any
var subdirs dirList;
if nsub > 0 {
subdirs = make(dirList, nsub);
nsub = 0;
for i, entry := range list {
if entry.IsDirectory() {
// make a copy here so sorting (and other code) doesn't
// have to make one every time an entry is moved
copy := new(os.Dir);
*copy = entry;
subdirs[nsub] = copy;
nsub++;
}
}
sort.Sort(subdirs);
}
// if there are no package files, then there is no package
if len(filenames) == 0 {
return nil, subdirs;
}
return &pakDesc{dirname, pakname, importpath, filenames}, subdirs;
}
func (p *pakDesc) Doc() (*doc.PackageDoc, *parseErrors) {
// compute documentation
var r doc.DocReader;
i := 0;
for filename := range p.filenames {
prog, err := parse(p.dirname + "/" + filename, parser.ParseComments);
if err != nil {
return nil, err;
}
if i == 0 {
// first file - initialize doc
r.Init(prog.Name.Value, p.importpath);
}
i++;
r.AddProgram(prog);
}
return r.Doc(), nil;
}
func servePackage(c *http.Conn, desc *pakDesc) {
doc, errors := desc.Doc();
if errors != nil {
serveParseErrors(c, errors);
return;
}
var buf io.ByteBuffer;
if false { // TODO req.Params["format"] == "text"
err := packageText.Execute(doc, &buf);
if err != nil {
log.Stderrf("packageText.Execute: %s", err);
}
serveText(c, buf.Data());
return;
}
err := packageHtml.Execute(doc, &buf);
if err != nil {
log.Stderrf("packageHtml.Execute: %s", err);
}
servePage(c, doc.ImportPath + " - Go package documentation", buf.Data());
}
func serveDirList(c *http.Conn, path string, dirs dirList) {
var buf io.ByteBuffer;
err := dirlistHtml.Execute(dirs, &buf);
if err != nil {
log.Stderrf("dirlist.Execute: %s", err);
}
servePage(c, path + " - Directories", buf.Data());
}
func servePkg(c *http.Conn, r *http.Request) {
path := r.Url.Path;
path = path[len(Pkg) : len(path)];
desc, dirs := findPackage(path);
/*
// TODO do we still need this?
if r.Url.Path != Pkg + info.Path {
http.Redirect(c, info.Path, http.StatusMovedPermanently);
return;
}
*/
if desc != nil {
servePackage(c, desc);
// TODO should also serve sub-directories if there are any
} else {
// make sure path is not empty otherwise html links become rooted
// and won't work correctly
if path == "" {
path = ".";
}
serveDirList(c, path, dirs);
}
}
// ----------------------------------------------------------------------------
// Server
func loggingHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(c *http.Conn, req *http.Request) {
log.Stderrf("%s\t%s", req.Host, req.Url.Path);
h.ServeHTTP(c, req);
})
}
func restartGodoc(c *http.Conn, r *http.Request) {
binary := os.Args[0];
if len(binary) > 0 || binary[0] != '/' {
binary = pathutil.Join(launchdir, binary);
}
pid, err := os.ForkExec(binary, os.Args, os.Environ(), "", []*os.File{os.Stdin, os.Stdout, os.Stderr});
if err != nil {
log.Stderrf("os.ForkExec(%s): %v", binary, err);
return; // do not terminate
}
log.Stderrf("restarted %s, pid = %d\n", binary, pid);
os.Exit(0);
}
func usage() {
fmt.Fprintf(os.Stderr,
"usage: godoc package [name ...]\n"
" godoc -http=:6060\n"
);
flag.PrintDefaults();
os.Exit(1);
}
func main() {
flag.Parse();
// Check usage first; get usage message out early.
switch {
case *httpaddr != "":
if flag.NArg() != 0 {
usage();
}
default:
if flag.NArg() == 0 {
usage();
}
}
var err os.Error;
if launchdir, err = os.Getwd(); err != nil {
log.Stderrf("unable to determine current working directory - restart may fail");
launchdir = "";
}
if err := os.Chdir(goroot); err != nil {
log.Exitf("chdir %s: %v", goroot, err);
}
readTemplates();
if *httpaddr != "" {
var handler http.Handler = http.DefaultServeMux;
if *verbose {
log.Stderrf("Go Documentation Server\n");
log.Stderrf("address = %s\n", *httpaddr);
log.Stderrf("goroot = %s\n", goroot);
log.Stderrf("pkgroot = %s\n", *pkgroot);
log.Stderrf("tmplroot = %s\n", *tmplroot);
handler = loggingHandler(handler);
}
http.Handle(Pkg, http.HandlerFunc(servePkg));
http.Handle("/debug/restart", http.HandlerFunc(restartGodoc));
http.Handle("/", http.HandlerFunc(serveFile));
// The server may have been restarted; always wait 1sec to
// give the forking server a chance to shut down and release
// the http port. (This is necessary because under OS X Exec
// won't work if there are more than one thread running.)
time.Sleep(1e9);
if err := http.ListenAndServe(*httpaddr, handler); err != nil {
log.Exitf("ListenAndServe %s: %v", *httpaddr, err)
}
return;
}
if *html {
packageText = packageHtml;
dirlistText = dirlistHtml;
parseerrorText = parseerrorHtml;
}
desc, dirs := findPackage(flag.Arg(0));
if desc == nil {
err := dirlistText.Execute(dirs, os.Stdout);
if err != nil {
log.Stderrf("dirlistText.Execute: %s", err);
}
os.Exit(0);
}
doc, errors := desc.Doc();
if errors != nil {
err := parseerrorText.Execute(errors, os.Stderr);
if err != nil {
log.Stderrf("parseerrorText.Execute: %s", err);
}
os.Exit(1);
}
if flag.NArg() > 1 {
args := flag.Args();
doc.Filter(args[1 : len(args)]);
}
packageText.Execute(doc, os.Stdout);
}
fix typo
TBR=rsc
OCL=29010
CL=29010
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// godoc: Go Documentation Server
// Web server tree:
//
// http://godoc/ main landing page
// http://godoc/doc/ serve from $GOROOT/doc - spec, mem, tutorial, etc.
// http://godoc/src/ serve files from $GOROOT/src; .go gets pretty-printed
// http://godoc/cmd/ serve documentation about commands (TODO)
// http://godoc/pkg/ serve documentation about packages
// (idea is if you say import "compress/zlib", you go to
// http://godoc/pkg/compress/zlib)
//
// Command-line interface:
//
// godoc packagepath [name ...]
//
// godoc compress/zlib
// - prints doc for package proto
// godoc compress/zlib Cipher NewCMAC
// - prints doc for Cipher and NewCMAC in package crypto/block
package main
import (
"container/vector";
"flag";
"fmt";
"go/ast";
"go/doc";
"go/parser";
"go/token";
"http";
"io";
"log";
"net";
"os";
pathutil "path";
"sort";
"strings";
"tabwriter";
"template";
"time";
"astprinter";
"comment";
)
const Pkg = "/pkg/" // name for auto-generated package documentation tree
var (
verbose = flag.Bool("v", false, "verbose mode");
// file system roots
launchdir string; // directory from which godoc was launched
goroot string;
pkgroot = flag.String("pkgroot", "src/lib", "root package source directory (if unrooted, relative to goroot)");
tmplroot = flag.String("tmplroot", "usr/gri/pretty", "root template directory (if unrooted, relative to goroot)");
// layout control
tabwidth = flag.Int("tabwidth", 4, "tab width");
usetabs = flag.Bool("tabs", false, "align with tabs instead of spaces");
html = flag.Bool("html", false, "print HTML in command-line mode");
// server control
httpaddr = flag.String("http", "", "HTTP service address (e.g., ':6060')");
)
func init() {
var err os.Error;
goroot, err = os.Getenv("GOROOT");
if err != nil {
goroot = "/home/r/go-release/go";
}
flag.StringVar(&goroot, "goroot", goroot, "Go root directory");
}
// ----------------------------------------------------------------------------
// Support
func isGoFile(dir *os.Dir) bool {
return dir.IsRegular() && pathutil.Ext(dir.Name) == ".go";
}
func isDir(name string) bool {
d, err := os.Stat(name);
return err == nil && d.IsDirectory();
}
func makeTabwriter(writer io.Writer) *tabwriter.Writer {
padchar := byte(' ');
if *usetabs {
padchar = '\t';
}
return tabwriter.NewWriter(writer, *tabwidth, 1, padchar, tabwriter.FilterHTML);
}
// TODO(rsc): this belongs in a library somewhere, maybe os
func ReadFile(name string) ([]byte, os.Error) {
f, err := os.Open(name, os.O_RDONLY, 0);
if err != nil {
return nil, err;
}
defer f.Close();
var buf io.ByteBuffer;
if n, err := io.Copy(f, &buf); err != nil {
return nil, err;
}
return buf.Data(), nil;
}
// ----------------------------------------------------------------------------
// Parsing
// A single error in the parsed file.
type parseError struct {
src []byte; // source before error
line int; // line number of error
msg string; // error message
}
// All the errors in the parsed file, plus surrounding source code.
// Each error has a slice giving the source text preceding it
// (starting where the last error occurred). The final element in list[]
// has msg = "", to give the remainder of the source code.
// This data structure is handed to the templates parseerror.txt and parseerror.html.
//
type parseErrors struct {
filename string; // path to file
list []parseError; // the errors
src []byte; // the file's entire source code
}
// Parses a file (path) and returns the corresponding AST and
// a sorted list (by file position) of errors, if any.
//
func parse(path string, mode uint) (*ast.Program, *parseErrors) {
src, err := ReadFile(path);
if err != nil {
log.Stderrf("ReadFile %s: %v", path, err);
errs := []parseError{parseError{nil, 0, err.String()}};
return nil, &parseErrors{path, errs, nil};
}
prog, err := parser.Parse(src, mode);
if err != nil {
// sort and convert error list
if errors, ok := err.(parser.ErrorList); ok {
sort.Sort(errors);
errs := make([]parseError, len(errors) + 1); // +1 for final fragment of source
offs := 0;
for i, r := range errors {
// Should always be true, but check for robustness.
if 0 <= r.Pos.Offset && r.Pos.Offset <= len(src) {
errs[i].src = src[offs : r.Pos.Offset];
offs = r.Pos.Offset;
}
errs[i].line = r.Pos.Line;
errs[i].msg = r.Msg;
}
errs[len(errors)].src = src[offs : len(src)];
return nil, &parseErrors{path, errs, src};
} else {
// TODO should have some default handling here to be more robust
panic("unreachable");
}
}
return prog, nil;
}
// ----------------------------------------------------------------------------
// Templates
// Return text for decl.
func DeclText(d ast.Decl) []byte {
var buf io.ByteBuffer;
var p astPrinter.Printer;
p.Init(&buf, nil, nil, false);
d.Visit(&p);
return buf.Data();
}
// Return text for expr.
func ExprText(d ast.Expr) []byte {
var buf io.ByteBuffer;
var p astPrinter.Printer;
p.Init(&buf, nil, nil, false);
d.Visit(&p);
return buf.Data();
}
// Convert x, whatever it is, to text form.
func toText(x interface{}) []byte {
type String interface { String() string }
switch v := x.(type) {
case []byte:
return v;
case string:
return io.StringBytes(v);
case String:
return io.StringBytes(v.String());
case ast.Decl:
return DeclText(v);
case ast.Expr:
return ExprText(v);
}
var buf io.ByteBuffer;
fmt.Fprint(&buf, x);
return buf.Data();
}
// Template formatter for "html" format.
func htmlFmt(w io.Writer, x interface{}, format string) {
// Can do better than text in some cases.
switch v := x.(type) {
case ast.Decl:
var p astPrinter.Printer;
tw := makeTabwriter(w);
p.Init(tw, nil, nil, true);
v.Visit(&p);
tw.Flush();
case ast.Expr:
var p astPrinter.Printer;
tw := makeTabwriter(w);
p.Init(tw, nil, nil, true);
v.Visit(&p);
tw.Flush();
default:
template.HtmlEscape(w, toText(x));
}
}
// Template formatter for "html-comment" format.
func htmlCommentFmt(w io.Writer, x interface{}, format string) {
comment.ToHtml(w, toText(x));
}
// Template formatter for "" (default) format.
func textFmt(w io.Writer, x interface{}, format string) {
w.Write(toText(x));
}
var fmap = template.FormatterMap{
"": textFmt,
"html": htmlFmt,
"html-comment": htmlCommentFmt,
}
func readTemplate(name string) *template.Template {
path := pathutil.Join(*tmplroot, name);
data, err := ReadFile(path);
if err != nil {
log.Exitf("ReadFile %s: %v", path, err);
}
t, err1 := template.Parse(string(data), fmap);
if err1 != nil {
log.Exitf("%s: %v", name, err);
}
return t;
}
var godocHtml *template.Template
var packageHtml *template.Template
var packageText *template.Template
var dirlistHtml *template.Template;
var dirlistText *template.Template;
var parseerrorHtml *template.Template;
var parseerrorText *template.Template;
func readTemplates() {
// have to delay until after flags processing,
// so that main has chdir'ed to goroot.
godocHtml = readTemplate("godoc.html");
packageHtml = readTemplate("package.html");
packageText = readTemplate("package.txt");
dirlistHtml = readTemplate("dirlist.html");
dirlistText = readTemplate("dirlist.txt");
parseerrorHtml = readTemplate("parseerror.html");
parseerrorText = readTemplate("parseerror.txt");
}
// ----------------------------------------------------------------------------
// Generic HTML wrapper
func servePage(c *http.Conn, title, content interface{}) {
type Data struct {
title interface{};
header interface{};
timestamp string;
content interface{};
}
var d Data;
d.title = title;
d.header = title;
d.timestamp = time.UTC().String();
d.content = content;
godocHtml.Execute(&d, c);
}
func serveText(c *http.Conn, text []byte) {
c.SetHeader("content-type", "text/plain; charset=utf-8");
c.Write(text);
}
// ----------------------------------------------------------------------------
// Files
func serveParseErrors(c *http.Conn, errors *parseErrors) {
// format errors
var buf io.ByteBuffer;
parseerrorHtml.Execute(errors, &buf);
servePage(c, errors.filename + " - Parse Errors", buf.Data());
}
func serveGoSource(c *http.Conn, name string) {
prog, errors := parse(name, parser.ParseComments);
if errors != nil {
serveParseErrors(c, errors);
return;
}
var buf io.ByteBuffer;
fmt.Fprintln(&buf, "<pre>");
var p astPrinter.Printer;
writer := makeTabwriter(&buf); // for nicely formatted output
p.Init(writer, nil, nil, true);
p.DoProgram(prog);
writer.Flush(); // ignore errors
fmt.Fprintln(&buf, "</pre>");
servePage(c, name + " - Go source", buf.Data());
}
var fileServer = http.FileServer(".", "");
func serveFile(c *http.Conn, req *http.Request) {
// pick off special cases and hand the rest to the standard file server
switch {
case req.Url.Path == "/":
// serve landing page.
// TODO: hide page from ordinary file serving.
// writing doc/index.html will take care of that.
http.ServeFile(c, req, "doc/root.html");
case req.Url.Path == "/doc/root.html":
// hide landing page from its real name
// TODO why - there is no reason for this (remove eventually)
http.NotFound(c, req);
case pathutil.Ext(req.Url.Path) == ".go":
serveGoSource(c, req.Url.Path[1 : len(req.Url.Path)]); // strip leading '/' from name
default:
// TODO not good enough - don't want to download files
// want to see them
fileServer.ServeHTTP(c, req);
}
}
// ----------------------------------------------------------------------------
// Packages
type pakDesc struct {
dirname string; // relative to goroot
pakname string; // same as last component of importpath
importpath string; // import "___"
filenames map[string] bool; // set of file (names) belonging to this package
}
// TODO if we don't plan to use the directory information, simplify to []string
type dirList []*os.Dir
func (d dirList) Len() int { return len(d) }
func (d dirList) Less(i, j int) bool { return d[i].Name < d[j].Name }
func (d dirList) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func isPackageFile(dirname, filename, pakname string) bool {
// ignore test files
if strings.HasSuffix(filename, "_test.go") {
return false;
}
// determine package name
prog, errors := parse(dirname + "/" + filename, parser.PackageClauseOnly);
if prog == nil {
return false;
}
return prog != nil && prog.Name.Value == pakname;
}
// Returns the package denoted by path and the list of
// sub-directories in the corresponding package directory.
// If there is no such package, the first result is nil. If
// there are no sub-directories, that list is nil.
func findPackage(path string) (*pakDesc, dirList) {
// get directory contents, if possible
importpath := pathutil.Clean(path); // no trailing '/'
dirname := pathutil.Join(*pkgroot, importpath);
if !isDir(dirname) {
return nil, nil;
}
fd, err1 := os.Open(dirname, os.O_RDONLY, 0);
if err1 != nil {
log.Stderrf("open %s: %v", dirname, err1);
return nil, nil;
}
list, err2 := fd.Readdir(-1);
if err2 != nil {
log.Stderrf("readdir %s: %v", dirname, err2);
return nil, nil;
}
// the package name is is the directory name within its parent
_, pakname := pathutil.Split(dirname);
// collect all files belonging to the package and count the
// number of sub-directories
filenames := make(map[string]bool);
nsub := 0;
for i, entry := range list {
switch {
case isGoFile(&entry) && isPackageFile(dirname, entry.Name, pakname):
// add file to package desc
if tmp, found := filenames[entry.Name]; found {
panic("internal error: same file added more than once: " + entry.Name);
}
filenames[entry.Name] = true;
case entry.IsDirectory():
nsub++;
}
}
// make the list of sub-directories, if any
var subdirs dirList;
if nsub > 0 {
subdirs = make(dirList, nsub);
nsub = 0;
for i, entry := range list {
if entry.IsDirectory() {
// make a copy here so sorting (and other code) doesn't
// have to make one every time an entry is moved
copy := new(os.Dir);
*copy = entry;
subdirs[nsub] = copy;
nsub++;
}
}
sort.Sort(subdirs);
}
// if there are no package files, then there is no package
if len(filenames) == 0 {
return nil, subdirs;
}
return &pakDesc{dirname, pakname, importpath, filenames}, subdirs;
}
func (p *pakDesc) Doc() (*doc.PackageDoc, *parseErrors) {
// compute documentation
var r doc.DocReader;
i := 0;
for filename := range p.filenames {
prog, err := parse(p.dirname + "/" + filename, parser.ParseComments);
if err != nil {
return nil, err;
}
if i == 0 {
// first file - initialize doc
r.Init(prog.Name.Value, p.importpath);
}
i++;
r.AddProgram(prog);
}
return r.Doc(), nil;
}
func servePackage(c *http.Conn, desc *pakDesc) {
doc, errors := desc.Doc();
if errors != nil {
serveParseErrors(c, errors);
return;
}
var buf io.ByteBuffer;
if false { // TODO req.Params["format"] == "text"
err := packageText.Execute(doc, &buf);
if err != nil {
log.Stderrf("packageText.Execute: %s", err);
}
serveText(c, buf.Data());
return;
}
err := packageHtml.Execute(doc, &buf);
if err != nil {
log.Stderrf("packageHtml.Execute: %s", err);
}
servePage(c, doc.ImportPath + " - Go package documentation", buf.Data());
}
func serveDirList(c *http.Conn, path string, dirs dirList) {
var buf io.ByteBuffer;
err := dirlistHtml.Execute(dirs, &buf);
if err != nil {
log.Stderrf("dirlist.Execute: %s", err);
}
servePage(c, path + " - Directories", buf.Data());
}
func servePkg(c *http.Conn, r *http.Request) {
path := r.Url.Path;
path = path[len(Pkg) : len(path)];
desc, dirs := findPackage(path);
/*
// TODO do we still need this?
if r.Url.Path != Pkg + info.Path {
http.Redirect(c, info.Path, http.StatusMovedPermanently);
return;
}
*/
if desc != nil {
servePackage(c, desc);
// TODO should also serve sub-directories if there are any
} else {
// make sure path is not empty otherwise html links become rooted
// and won't work correctly
if path == "" {
path = ".";
}
serveDirList(c, path, dirs);
}
}
// ----------------------------------------------------------------------------
// Server
func loggingHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(c *http.Conn, req *http.Request) {
log.Stderrf("%s\t%s", req.Host, req.Url.Path);
h.ServeHTTP(c, req);
})
}
func restartGodoc(c *http.Conn, r *http.Request) {
binary := os.Args[0];
if len(binary) > 0 && binary[0] != '/' {
binary = pathutil.Join(launchdir, binary);
}
pid, err := os.ForkExec(binary, os.Args, os.Environ(), "", []*os.File{os.Stdin, os.Stdout, os.Stderr});
if err != nil {
log.Stderrf("os.ForkExec(%s): %v", binary, err);
return; // do not terminate
}
log.Stderrf("restarted %s, pid = %d\n", binary, pid);
os.Exit(0);
}
func usage() {
fmt.Fprintf(os.Stderr,
"usage: godoc package [name ...]\n"
" godoc -http=:6060\n"
);
flag.PrintDefaults();
os.Exit(1);
}
func main() {
flag.Parse();
// Check usage first; get usage message out early.
switch {
case *httpaddr != "":
if flag.NArg() != 0 {
usage();
}
default:
if flag.NArg() == 0 {
usage();
}
}
var err os.Error;
if launchdir, err = os.Getwd(); err != nil {
log.Stderrf("unable to determine current working directory - restart may fail");
launchdir = "";
}
if err := os.Chdir(goroot); err != nil {
log.Exitf("chdir %s: %v", goroot, err);
}
readTemplates();
if *httpaddr != "" {
var handler http.Handler = http.DefaultServeMux;
if *verbose {
log.Stderrf("Go Documentation Server\n");
log.Stderrf("address = %s\n", *httpaddr);
log.Stderrf("goroot = %s\n", goroot);
log.Stderrf("pkgroot = %s\n", *pkgroot);
log.Stderrf("tmplroot = %s\n", *tmplroot);
handler = loggingHandler(handler);
}
http.Handle(Pkg, http.HandlerFunc(servePkg));
http.Handle("/debug/restart", http.HandlerFunc(restartGodoc));
http.Handle("/", http.HandlerFunc(serveFile));
// The server may have been restarted; always wait 1sec to
// give the forking server a chance to shut down and release
// the http port. (This is necessary because under OS X Exec
// won't work if there are more than one thread running.)
time.Sleep(1e9);
if err := http.ListenAndServe(*httpaddr, handler); err != nil {
log.Exitf("ListenAndServe %s: %v", *httpaddr, err)
}
return;
}
if *html {
packageText = packageHtml;
dirlistText = dirlistHtml;
parseerrorText = parseerrorHtml;
}
desc, dirs := findPackage(flag.Arg(0));
if desc == nil {
err := dirlistText.Execute(dirs, os.Stdout);
if err != nil {
log.Stderrf("dirlistText.Execute: %s", err);
}
os.Exit(0);
}
doc, errors := desc.Doc();
if errors != nil {
err := parseerrorText.Execute(errors, os.Stderr);
if err != nil {
log.Stderrf("parseerrorText.Execute: %s", err);
}
os.Exit(1);
}
if flag.NArg() > 1 {
args := flag.Args();
doc.Filter(args[1 : len(args)]);
}
packageText.Execute(doc, os.Stdout);
}
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// OWNER = sig/cli
package kubectl
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httptest"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"time"
"github.com/elazarl/goproxy"
openapi_v2 "github.com/googleapis/gnostic/openapiv2"
"sigs.k8s.io/yaml"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubectl/pkg/polymorphichelpers"
"k8s.io/kubernetes/pkg/controller"
commonutils "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/scheduling"
testutils "k8s.io/kubernetes/test/utils"
"k8s.io/kubernetes/test/utils/crd"
imageutils "k8s.io/kubernetes/test/utils/image"
uexec "k8s.io/utils/exec"
"k8s.io/utils/pointer"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
updateDemoSelector = "name=update-demo"
guestbookStartupTimeout = 10 * time.Minute
guestbookResponseTimeout = 3 * time.Minute
simplePodSelector = "name=httpd"
simplePodName = "httpd"
simplePodResourceName = "pod/httpd"
httpdDefaultOutput = "It works!"
simplePodPort = 80
pausePodSelector = "name=pause"
pausePodName = "pause"
busyboxPodSelector = "app=busybox1"
busyboxPodName = "busybox1"
kubeCtlManifestPath = "test/e2e/testing-manifests/kubectl"
agnhostControllerFilename = "agnhost-primary-controller.json.in"
agnhostServiceFilename = "agnhost-primary-service.json"
httpdDeployment1Filename = "httpd-deployment1.yaml.in"
httpdDeployment2Filename = "httpd-deployment2.yaml.in"
httpdDeployment3Filename = "httpd-deployment3.yaml.in"
httpdRCFilename = "httpd-rc.yaml.in"
metaPattern = `"kind":"%s","apiVersion":"%s/%s","metadata":{"name":"%s"}`
)
var (
nautilusImage = imageutils.GetE2EImage(imageutils.Nautilus)
httpdImage = imageutils.GetE2EImage(imageutils.Httpd)
busyboxImage = imageutils.GetE2EImage(imageutils.BusyBox)
agnhostImage = imageutils.GetE2EImage(imageutils.Agnhost)
)
var (
proxyRegexp = regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)")
cronJobGroupVersionResourceAlpha = schema.GroupVersionResource{Group: "batch", Version: "v2alpha1", Resource: "cronjobs"}
cronJobGroupVersionResourceBeta = schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"}
)
var schemaFoo = []byte(`description: Foo CRD for Testing
type: object
properties:
spec:
type: object
description: Specification of Foo
properties:
bars:
description: List of Bars and their specs.
type: array
items:
type: object
required:
- name
properties:
name:
description: Name of Bar.
type: string
age:
description: Age of Bar.
type: string
bazs:
description: List of Bazs.
items:
type: string
type: array
status:
description: Status of Foo
type: object
properties:
bars:
description: List of Bars and their statuses.
type: array
items:
type: object
properties:
name:
description: Name of Bar.
type: string
available:
description: Whether the Bar is installed.
type: boolean
quxType:
description: Indicates to external qux type.
pattern: in-tree|out-of-tree
type: string`)
// Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
// Aware of the kubectl example files map.
func cleanupKubectlInputs(fileContents string, ns string, selectors ...string) {
ginkgo.By("using delete to clean up resources")
// support backward compatibility : file paths or raw json - since we are removing file path
// dependencies from this test.
framework.RunKubectlOrDieInput(ns, fileContents, "delete", "--grace-period=0", "--force", "-f", "-")
assertCleanup(ns, selectors...)
}
// assertCleanup asserts that cleanup of a namespace wrt selectors occurred.
func assertCleanup(ns string, selectors ...string) {
var e error
verifyCleanupFunc := func() (bool, error) {
e = nil
for _, selector := range selectors {
resources := framework.RunKubectlOrDie(ns, "get", "rc,svc", "-l", selector, "--no-headers")
if resources != "" {
e = fmt.Errorf("Resources left running after stop:\n%s", resources)
return false, nil
}
pods := framework.RunKubectlOrDie(ns, "get", "pods", "-l", selector, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
if pods != "" {
e = fmt.Errorf("Pods left unterminated after stop:\n%s", pods)
return false, nil
}
}
return true, nil
}
err := wait.PollImmediate(500*time.Millisecond, 1*time.Minute, verifyCleanupFunc)
if err != nil {
framework.Failf(e.Error())
}
}
func readTestFileOrDie(file string) []byte {
data, err := e2etestfiles.Read(path.Join(kubeCtlManifestPath, file))
if err != nil {
framework.Fail(err.Error(), 1)
}
return data
}
func runKubectlRetryOrDie(ns string, args ...string) string {
var err error
var output string
for i := 0; i < 5; i++ {
output, err = framework.RunKubectl(ns, args...)
if err == nil || (!strings.Contains(err.Error(), genericregistry.OptimisticLockErrorMsg) && !strings.Contains(err.Error(), "Operation cannot be fulfilled")) {
break
}
time.Sleep(time.Second)
}
// Expect no errors to be present after retries are finished
// Copied from framework #ExecOrDie
framework.Logf("stdout: %q", output)
framework.ExpectNoError(err)
return output
}
var _ = SIGDescribe("Kubectl client", func() {
defer ginkgo.GinkgoRecover()
f := framework.NewDefaultFramework("kubectl")
// Reusable cluster state function. This won't be adversely affected by lazy initialization of framework.
clusterState := func() *framework.ClusterVerification {
return f.NewClusterVerification(
f.Namespace,
framework.PodStateVerification{
Selectors: map[string]string{"app": "agnhost"},
ValidPhases: []v1.PodPhase{v1.PodRunning /*v1.PodPending*/},
})
}
forEachPod := func(podFunc func(p v1.Pod)) {
clusterState().ForEach(podFunc)
}
var c clientset.Interface
var ns string
ginkgo.BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
})
// Customized Wait / ForEach wrapper for this test. These demonstrate the
// idiomatic way to wrap the ClusterVerification structs for syntactic sugar in large
// test files.
// Print debug info if atLeast Pods are not found before the timeout
waitForOrFailWithDebug := func(atLeast int) {
pods, err := clusterState().WaitFor(atLeast, framework.PodStartTimeout)
if err != nil || len(pods) < atLeast {
// TODO: Generalize integrating debug info into these tests so we always get debug info when we need it
framework.DumpAllNamespaceInfo(f.ClientSet, ns)
framework.Failf("Verified %d of %d pods , error: %v", len(pods), atLeast, err)
}
}
debugDiscovery := func() {
home := os.Getenv("HOME")
if len(home) == 0 {
framework.Logf("no $HOME envvar set")
return
}
cacheDir := filepath.Join(home, ".kube", "cache", "discovery")
err := filepath.Walk(cacheDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// only pay attention to $host_$port/v1/serverresources.json files
subpath := strings.TrimPrefix(path, cacheDir+string(filepath.Separator))
parts := filepath.SplitList(subpath)
if len(parts) != 3 || parts[1] != "v1" || parts[2] != "serverresources.json" {
return nil
}
framework.Logf("%s modified at %s (current time: %s)", path, info.ModTime(), time.Now())
data, readError := ioutil.ReadFile(path)
if readError != nil {
framework.Logf("%s error: %v", path, readError)
} else {
framework.Logf("%s content: %s", path, string(data))
}
return nil
})
framework.Logf("scanned %s for discovery docs: %v", home, err)
}
ginkgo.Describe("Update Demo", func() {
var nautilus string
ginkgo.BeforeEach(func() {
updateDemoRoot := "test/fixtures/doc-yaml/user-guide/update-demo"
data, err := e2etestfiles.Read(filepath.Join(updateDemoRoot, "nautilus-rc.yaml.in"))
if err != nil {
framework.Fail(err.Error())
}
nautilus = commonutils.SubstituteImageName(string(data))
})
/*
Release: v1.9
Testname: Kubectl, replication controller
Description: Create a Pod and a container with a given image. Configure replication controller to run 2 replicas. The number of running instances of the Pod MUST equal the number of replicas set on the replication controller which is 2.
*/
framework.ConformanceIt("should create and stop a replication controller ", func() {
defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector)
ginkgo.By("creating a replication controller")
framework.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-")
validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
})
/*
Release: v1.9
Testname: Kubectl, scale replication controller
Description: Create a Pod and a container with a given image. Configure replication controller to run 2 replicas. The number of running instances of the Pod MUST equal the number of replicas set on the replication controller which is 2. Update the replicaset to 1. Number of running instances of the Pod MUST be 1. Update the replicaset to 2. Number of running instances of the Pod MUST be 2.
*/
framework.ConformanceIt("should scale a replication controller ", func() {
defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector)
ginkgo.By("creating a replication controller")
framework.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-")
validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
ginkgo.By("scaling down the replication controller")
debugDiscovery()
framework.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m")
validateController(c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
ginkgo.By("scaling up the replication controller")
debugDiscovery()
framework.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m")
validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
})
})
ginkgo.Describe("Guestbook application", func() {
forEachGBFile := func(run func(s string)) {
guestbookRoot := "test/e2e/testing-manifests/guestbook"
for _, gbAppFile := range []string{
"agnhost-replica-service.yaml",
"agnhost-primary-service.yaml",
"frontend-service.yaml",
"frontend-deployment.yaml.in",
"agnhost-primary-deployment.yaml.in",
"agnhost-replica-deployment.yaml.in",
} {
data, err := e2etestfiles.Read(filepath.Join(guestbookRoot, gbAppFile))
if err != nil {
framework.Fail(err.Error())
}
contents := commonutils.SubstituteImageName(string(data))
run(contents)
}
}
/*
Release: v1.9
Testname: Kubectl, guestbook application
Description: Create Guestbook application that contains an agnhost primary server, 2 agnhost replicas, frontend application, frontend service and agnhost primary service and agnhost replica service. Using frontend service, the test will write an entry into the guestbook application which will store the entry into the backend agnhost store. Application flow MUST work as expected and the data written MUST be available to read.
*/
framework.ConformanceIt("should create and stop a working application ", func() {
defer forEachGBFile(func(contents string) {
cleanupKubectlInputs(contents, ns)
})
ginkgo.By("creating all guestbook components")
forEachGBFile(func(contents string) {
framework.Logf(contents)
framework.RunKubectlOrDieInput(ns, contents, "create", "-f", "-")
})
ginkgo.By("validating guestbook app")
validateGuestbookApp(c, ns)
})
})
ginkgo.Describe("Simple pod", func() {
var podYaml string
ginkgo.BeforeEach(func() {
ginkgo.By(fmt.Sprintf("creating the pod from %v", podYaml))
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pod-with-readiness-probe.yaml.in")))
framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout), true)
})
ginkgo.AfterEach(func() {
cleanupKubectlInputs(podYaml, ns, simplePodSelector)
})
ginkgo.It("should support exec", func() {
ginkgo.By("executing a command in the container")
execOutput := framework.RunKubectlOrDie(ns, "exec", simplePodName, "echo", "running", "in", "container")
if e, a := "running in container", strings.TrimSpace(execOutput); e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
}
ginkgo.By("executing a very long command in the container")
veryLongData := make([]rune, 20000)
for i := 0; i < len(veryLongData); i++ {
veryLongData[i] = 'a'
}
execOutput = framework.RunKubectlOrDie(ns, "exec", simplePodName, "echo", string(veryLongData))
framework.ExpectEqual(string(veryLongData), strings.TrimSpace(execOutput), "Unexpected kubectl exec output")
ginkgo.By("executing a command in the container with noninteractive stdin")
execOutput = framework.NewKubectlCommand(ns, "exec", "-i", simplePodName, "cat").
WithStdinData("abcd1234").
ExecOrDie(ns)
if e, a := "abcd1234", execOutput; e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
}
// pretend that we're a user in an interactive shell
r, closer, err := newBlockingReader("echo hi\nexit\n")
if err != nil {
framework.Failf("Error creating blocking reader: %v", err)
}
// NOTE this is solely for test cleanup!
defer closer.Close()
ginkgo.By("executing a command in the container with pseudo-interactive stdin")
execOutput = framework.NewKubectlCommand(ns, "exec", "-i", simplePodName, "sh").
WithStdinReader(r).
ExecOrDie(ns)
if e, a := "hi", strings.TrimSpace(execOutput); e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
}
})
ginkgo.It("should support exec using resource/name", func() {
ginkgo.By("executing a command in the container")
execOutput := framework.RunKubectlOrDie(ns, "exec", simplePodResourceName, "echo", "running", "in", "container")
if e, a := "running in container", strings.TrimSpace(execOutput); e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
}
})
ginkgo.It("should support exec through an HTTP proxy", func() {
// Fail if the variable isn't set
if framework.TestContext.Host == "" {
framework.Failf("--host variable must be set to the full URI to the api server on e2e run.")
}
ginkgo.By("Starting goproxy")
testSrv, proxyLogs := startLocalProxy()
defer testSrv.Close()
proxyAddr := testSrv.URL
for _, proxyVar := range []string{"https_proxy", "HTTPS_PROXY"} {
proxyLogs.Reset()
ginkgo.By("Running kubectl via an HTTP proxy using " + proxyVar)
output := framework.NewKubectlCommand(ns, fmt.Sprintf("--namespace=%s", ns), "exec", "httpd", "echo", "running", "in", "container").
WithEnv(append(os.Environ(), fmt.Sprintf("%s=%s", proxyVar, proxyAddr))).
ExecOrDie(ns)
// Verify we got the normal output captured by the exec server
expectedExecOutput := "running in container\n"
if output != expectedExecOutput {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output)
}
// Verify the proxy server logs saw the connection
expectedProxyLog := fmt.Sprintf("Accepting CONNECT to %s", strings.TrimSuffix(strings.TrimPrefix(framework.TestContext.Host, "https://"), "/api"))
proxyLog := proxyLogs.String()
if !strings.Contains(proxyLog, expectedProxyLog) {
framework.Failf("Missing expected log result on proxy server for %s. Expected: %q, got %q", proxyVar, expectedProxyLog, proxyLog)
}
}
})
ginkgo.It("should support exec through kubectl proxy", func() {
// Fail if the variable isn't set
if framework.TestContext.Host == "" {
framework.Failf("--host variable must be set to the full URI to the api server on e2e run.")
}
ginkgo.By("Starting kubectl proxy")
port, proxyCmd, err := startProxyServer(ns)
framework.ExpectNoError(err)
defer framework.TryKill(proxyCmd)
//proxyLogs.Reset()
host := fmt.Sprintf("--server=http://127.0.0.1:%d", port)
ginkgo.By("Running kubectl via kubectl proxy using " + host)
output := framework.NewKubectlCommand(
ns, host, fmt.Sprintf("--namespace=%s", ns),
"exec", "httpd", "echo", "running", "in", "container",
).ExecOrDie(ns)
// Verify we got the normal output captured by the exec server
expectedExecOutput := "running in container\n"
if output != expectedExecOutput {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output)
}
})
ginkgo.It("should return command exit codes", func() {
ginkgo.By("execing into a container with a successful command")
_, err := framework.NewKubectlCommand(ns, "exec", "httpd", "--", "/bin/sh", "-c", "exit 0").Exec()
framework.ExpectNoError(err)
ginkgo.By("execing into a container with a failing command")
_, err = framework.NewKubectlCommand(ns, "exec", "httpd", "--", "/bin/sh", "-c", "exit 42").Exec()
ee, ok := err.(uexec.ExitError)
framework.ExpectEqual(ok, true)
framework.ExpectEqual(ee.ExitStatus(), 42)
ginkgo.By("running a successful command")
_, err = framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", "success", "--", "/bin/sh", "-c", "exit 0").Exec()
framework.ExpectNoError(err)
ginkgo.By("running a failing command")
_, err = framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec()
ee, ok = err.(uexec.ExitError)
framework.ExpectEqual(ok, true)
framework.ExpectEqual(ee.ExitStatus(), 42)
ginkgo.By("running a failing command without --restart=Never")
_, err = framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "failure-2", "--", "/bin/sh", "-c", "cat && exit 42").
WithStdinData("abcd1234").
Exec()
ee, ok = err.(uexec.ExitError)
framework.ExpectEqual(ok, true)
if !strings.Contains(ee.String(), "timed out") {
framework.Failf("Missing expected 'timed out' error, got: %#v", ee)
}
ginkgo.By("running a failing command without --restart=Never, but with --rm")
_, err = framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "--rm", "failure-3", "--", "/bin/sh", "-c", "cat && exit 42").
WithStdinData("abcd1234").
Exec()
ee, ok = err.(uexec.ExitError)
framework.ExpectEqual(ok, true)
if !strings.Contains(ee.String(), "timed out") {
framework.Failf("Missing expected 'timed out' error, got: %#v", ee)
}
e2epod.WaitForPodToDisappear(f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout)
ginkgo.By("running a failing command with --leave-stdin-open")
_, err = framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42").
WithStdinData("abcd1234").
Exec()
framework.ExpectNoError(err)
})
ginkgo.It("should support inline execution and attach", func() {
waitForStdinContent := func(pod, content string) string {
var logOutput string
err := wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
logOutput = framework.RunKubectlOrDie(ns, "logs", pod)
return strings.Contains(logOutput, content), nil
})
gomega.Expect(err).To(gomega.BeNil(), fmt.Sprintf("unexpected error waiting for '%v' output", content))
return logOutput
}
ginkgo.By("executing a command with run and attach with stdin")
// We wait for a non-empty line so we know kubectl has attached
framework.NewKubectlCommand(ns, "run", "run-test", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--stdin", "--", "sh", "-c", "echo -n read: && cat && echo 'stdin closed'").
WithStdinData("value\nabcd1234").
ExecOrDie(ns)
runOutput := waitForStdinContent("run-test", "stdin closed")
gomega.Expect(runOutput).To(gomega.ContainSubstring("read:value"))
gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test", metav1.DeleteOptions{})).To(gomega.BeNil())
ginkgo.By("executing a command with run and attach without stdin")
// There is a race on this scenario described in #73099
// It fails if we are not able to attach before the container prints
// "stdin closed", but hasn't exited yet.
// We wait 10 seconds before printing to give time to kubectl to attach
// to the container, this does not solve the race though.
framework.NewKubectlCommand(ns, "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234").
ExecOrDie(ns)
runOutput = waitForStdinContent("run-test-2", "stdin closed")
gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("abcd1234"))
gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test-2", metav1.DeleteOptions{})).To(gomega.BeNil())
ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running")
framework.NewKubectlCommand(ns, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234\n").
ExecOrDie(ns)
runOutput = waitForStdinContent("run-test-3", "abcd1234")
gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("stdin closed"))
g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
runTestPod, _, err := polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g)
framework.ExpectNoError(err)
if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, time.Minute) {
framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3")
}
gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test-3", metav1.DeleteOptions{})).To(gomega.BeNil())
})
ginkgo.It("should contain last line of the log", func() {
podName := "run-log-test"
ginkgo.By("executing a command with run")
framework.RunKubectlOrDie(ns, "run", podName, "--image="+busyboxImage, "--restart=OnFailure", "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF")
if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, []string{podName}, framework.PodStartTimeout) {
framework.Failf("Pod for run-log-test was not ready")
}
logOutput := framework.RunKubectlOrDie(ns, "logs", "-f", "run-log-test")
gomega.Expect(logOutput).To(gomega.ContainSubstring("EOF"))
})
ginkgo.It("should support port-forward", func() {
ginkgo.By("forwarding the container port to a local port")
cmd := runPortForward(ns, simplePodName, simplePodPort)
defer cmd.Stop()
ginkgo.By("curling local port output")
localAddr := fmt.Sprintf("http://localhost:%d", cmd.port)
body, err := curl(localAddr)
framework.Logf("got: %s", body)
if err != nil {
framework.Failf("Failed http.Get of forwarded port (%s): %v", localAddr, err)
}
if !strings.Contains(body, httpdDefaultOutput) {
framework.Failf("Container port output missing expected value. Wanted:'%s', got: %s", httpdDefaultOutput, body)
}
})
ginkgo.It("should handle in-cluster config", func() {
// TODO: Find a way to download and copy the appropriate kubectl binary, or maybe a multi-arch kubectl image
// for now this only works on amd64
e2eskipper.SkipUnlessNodeOSArchIs("amd64")
ginkgo.By("adding rbac permissions")
// grant the view permission widely to allow inspection of the `invalid` namespace and the default namespace
err := e2eauth.BindClusterRole(f.ClientSet.RbacV1(), "view", f.Namespace.Name,
rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
framework.ExpectNoError(err)
err = e2eauth.WaitForAuthorizationUpdate(f.ClientSet.AuthorizationV1(),
serviceaccount.MakeUsername(f.Namespace.Name, "default"),
f.Namespace.Name, "list", schema.GroupResource{Resource: "pods"}, true)
framework.ExpectNoError(err)
ginkgo.By("overriding icc with values provided by flags")
kubectlPath := framework.TestContext.KubectlPath
// we need the actual kubectl binary, not the script wrapper
kubectlPathNormalizer := exec.Command("which", kubectlPath)
if strings.HasSuffix(kubectlPath, "kubectl.sh") {
kubectlPathNormalizer = exec.Command(kubectlPath, "path")
}
kubectlPathNormalized, err := kubectlPathNormalizer.Output()
framework.ExpectNoError(err)
kubectlPath = strings.TrimSpace(string(kubectlPathNormalized))
inClusterHost := strings.TrimSpace(framework.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_HOST"))
inClusterPort := strings.TrimSpace(framework.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_PORT"))
inClusterURL := net.JoinHostPort(inClusterHost, inClusterPort)
framework.Logf("copying %s to the %s pod", kubectlPath, simplePodName)
framework.RunKubectlOrDie(ns, "cp", kubectlPath, ns+"/"+simplePodName+":/tmp/")
// Build a kubeconfig file that will make use of the injected ca and token,
// but point at the DNS host and the default namespace
tmpDir, err := ioutil.TempDir("", "icc-override")
overrideKubeconfigName := "icc-override.kubeconfig"
framework.ExpectNoError(err)
defer func() { os.Remove(tmpDir) }()
framework.ExpectNoError(ioutil.WriteFile(filepath.Join(tmpDir, overrideKubeconfigName), []byte(`
kind: Config
apiVersion: v1
clusters:
- cluster:
api-version: v1
server: https://kubernetes.default.svc:443
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
name: kubeconfig-cluster
contexts:
- context:
cluster: kubeconfig-cluster
namespace: default
user: kubeconfig-user
name: kubeconfig-context
current-context: kubeconfig-context
users:
- name: kubeconfig-user
user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
`), os.FileMode(0755)))
framework.Logf("copying override kubeconfig to the %s pod", simplePodName)
framework.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, overrideKubeconfigName), ns+"/"+simplePodName+":/tmp/")
framework.ExpectNoError(ioutil.WriteFile(filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), []byte(`
kind: ConfigMap
apiVersion: v1
metadata:
name: "configmap with namespace and invalid name"
namespace: configmap-namespace
`), os.FileMode(0755)))
framework.ExpectNoError(ioutil.WriteFile(filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), []byte(`
kind: ConfigMap
apiVersion: v1
metadata:
name: "configmap without namespace and invalid name"
`), os.FileMode(0755)))
framework.Logf("copying configmap manifests to the %s pod", simplePodName)
framework.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), ns+"/"+simplePodName+":/tmp/")
framework.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), ns+"/"+simplePodName+":/tmp/")
ginkgo.By("getting pods with in-cluster configs")
execOutput := framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --v=6 2>&1")
gomega.Expect(execOutput).To(gomega.MatchRegexp("httpd +1/1 +Running"))
gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster configuration"))
ginkgo.By("creating an object containing a namespace with in-cluster config")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-with-namespace.yaml --v=6 2>&1")
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration"))
gomega.Expect(err).To(gomega.ContainSubstring(fmt.Sprintf("POST https://%s/api/v1/namespaces/configmap-namespace/configmaps", inClusterURL)))
ginkgo.By("creating an object not containing a namespace with in-cluster config")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-without-namespace.yaml --v=6 2>&1")
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration"))
gomega.Expect(err).To(gomega.ContainSubstring(fmt.Sprintf("POST https://%s/api/v1/namespaces/%s/configmaps", inClusterURL, f.Namespace.Name)))
ginkgo.By("trying to use kubectl with invalid token")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --token=invalid --v=7 2>&1")
framework.Logf("got err %v", err)
framework.ExpectError(err)
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration"))
gomega.Expect(err).To(gomega.ContainSubstring("Response Status: 401 Unauthorized"))
ginkgo.By("trying to use kubectl with invalid server")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --server=invalid --v=6 2>&1")
framework.Logf("got err %v", err)
framework.ExpectError(err)
gomega.Expect(err).To(gomega.ContainSubstring("Unable to connect to the server"))
gomega.Expect(err).To(gomega.ContainSubstring("GET http://invalid/api"))
ginkgo.By("trying to use kubectl with invalid namespace")
execOutput = framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --namespace=invalid --v=6 2>&1")
gomega.Expect(execOutput).To(gomega.ContainSubstring("No resources found"))
gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster configuration"))
gomega.Expect(execOutput).To(gomega.MatchRegexp(fmt.Sprintf("GET http[s]?://[\\[]?%s[\\]]?:%s/api/v1/namespaces/invalid/pods", inClusterHost, inClusterPort)))
ginkgo.By("trying to use kubectl with kubeconfig")
execOutput = framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --kubeconfig=/tmp/"+overrideKubeconfigName+" --v=6 2>&1")
gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster configuration"))
gomega.Expect(execOutput).To(gomega.ContainSubstring("GET https://kubernetes.default.svc:443/api/v1/namespaces/default/pods"))
})
})
ginkgo.Describe("Kubectl api-versions", func() {
/*
Release: v1.9
Testname: Kubectl, check version v1
Description: Run kubectl to get api versions, output MUST contain returned versions with 'v1' listed.
*/
framework.ConformanceIt("should check if v1 is in available api versions ", func() {
ginkgo.By("validating api versions")
output := framework.RunKubectlOrDie(ns, "api-versions")
if !strings.Contains(output, "v1") {
framework.Failf("No v1 in kubectl api-versions")
}
})
})
ginkgo.Describe("Kubectl get componentstatuses", func() {
ginkgo.It("should get componentstatuses", func() {
ginkgo.By("getting list of componentstatuses")
output := framework.RunKubectlOrDie(ns, "get", "componentstatuses", "-o", "jsonpath={.items[*].metadata.name}")
components := strings.Split(output, " ")
ginkgo.By("getting details of componentstatuses")
for _, component := range components {
ginkgo.By("getting status of " + component)
framework.RunKubectlOrDie(ns, "get", "componentstatuses", component)
}
})
})
ginkgo.Describe("Kubectl apply", func() {
ginkgo.It("should apply a new configuration to an existing RC", func() {
controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
ginkgo.By("creating Agnhost RC")
framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
ginkgo.By("applying a modified configuration")
stdin := modifyReplicationControllerConfiguration(controllerJSON)
framework.NewKubectlCommand(ns, "apply", "-f", "-").
WithStdinReader(stdin).
ExecOrDie(ns)
ginkgo.By("checking the result")
forEachReplicationController(c, ns, "app", "agnhost", validateReplicationControllerConfiguration)
})
ginkgo.It("should reuse port when apply to an existing SVC", func() {
serviceJSON := readTestFileOrDie(agnhostServiceFilename)
ginkgo.By("creating Agnhost SVC")
framework.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "create", "-f", "-")
ginkgo.By("getting the original port")
originalNodePort := framework.RunKubectlOrDie(ns, "get", "service", "agnhost-primary", "-o", "jsonpath={.spec.ports[0].port}")
ginkgo.By("applying the same configuration")
framework.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "apply", "-f", "-")
ginkgo.By("getting the port after applying configuration")
currentNodePort := framework.RunKubectlOrDie(ns, "get", "service", "agnhost-primary", "-o", "jsonpath={.spec.ports[0].port}")
ginkgo.By("checking the result")
if originalNodePort != currentNodePort {
framework.Failf("port should keep the same")
}
})
ginkgo.It("apply set/view last-applied", func() {
deployment1Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment1Filename)))
deployment2Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment2Filename)))
deployment3Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment3Filename)))
ginkgo.By("deployment replicas number is 2")
framework.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "-f", "-")
ginkgo.By("check the last-applied matches expectations annotations")
output := framework.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "view-last-applied", "-f", "-", "-o", "json")
requiredString := "\"replicas\": 2"
if !strings.Contains(output, requiredString) {
framework.Failf("Missing %s in kubectl view-last-applied", requiredString)
}
ginkgo.By("apply file doesn't have replicas")
framework.RunKubectlOrDieInput(ns, deployment2Yaml, "apply", "set-last-applied", "-f", "-")
ginkgo.By("check last-applied has been updated, annotations doesn't have replicas")
output = framework.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "view-last-applied", "-f", "-", "-o", "json")
requiredString = "\"replicas\": 2"
if strings.Contains(output, requiredString) {
framework.Failf("Presenting %s in kubectl view-last-applied", requiredString)
}
ginkgo.By("scale set replicas to 3")
httpdDeploy := "httpd-deployment"
debugDiscovery()
framework.RunKubectlOrDie(ns, "scale", "deployment", httpdDeploy, "--replicas=3")
ginkgo.By("apply file doesn't have replicas but image changed")
framework.RunKubectlOrDieInput(ns, deployment3Yaml, "apply", "-f", "-")
ginkgo.By("verify replicas still is 3 and image has been updated")
output = framework.RunKubectlOrDieInput(ns, deployment3Yaml, "get", "-f", "-", "-o", "json")
requiredItems := []string{"\"replicas\": 3", imageutils.GetE2EImage(imageutils.Httpd)}
for _, item := range requiredItems {
if !strings.Contains(output, item) {
framework.Failf("Missing %s in kubectl apply", item)
}
}
})
})
ginkgo.Describe("Kubectl diff", func() {
/*
Release: v1.19
Testname: Kubectl, diff Deployment
Description: Create a Deployment with httpd image. Declare the same Deployment with a different image, busybox. Diff of live Deployment with declared Deployment MUST include the difference between live and declared image.
*/
framework.ConformanceIt("should check if kubectl diff finds a difference for Deployments", func() {
ginkgo.By("create deployment with httpd image")
deployment := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment3Filename)))
framework.RunKubectlOrDieInput(ns, deployment, "create", "-f", "-")
ginkgo.By("verify diff finds difference between live and declared image")
deployment = strings.Replace(deployment, httpdImage, busyboxImage, 1)
if !strings.Contains(deployment, busyboxImage) {
framework.Failf("Failed replacing image from %s to %s in:\n%s\n", httpdImage, busyboxImage, deployment)
}
output, err := framework.RunKubectlInput(ns, deployment, "diff", "-f", "-")
if err, ok := err.(*exec.ExitError); ok && err.ExitCode() == 1 {
framework.Failf("Expected kubectl diff exit code of 1, but got %d: %v\n", err.ExitCode(), err)
}
requiredItems := []string{httpdImage, busyboxImage}
for _, item := range requiredItems {
if !strings.Contains(output, item) {
framework.Failf("Missing %s in kubectl diff output:\n%s\n%v\n", item, output, err)
}
}
framework.RunKubectlOrDieInput(ns, deployment, "delete", "-f", "-")
})
})
ginkgo.Describe("Kubectl server-side dry-run", func() {
/*
Release: v1.19
Testname: Kubectl, server-side dry-run Pod
Description: The command 'kubectl run' must create a pod with the specified image name. After, the command 'kubectl replace --dry-run=server' should update the Pod with the new image name and server-side dry-run enabled. The image name must not change.
*/
framework.ConformanceIt("should check if kubectl can dry-run update Pods", func() {
ginkgo.By("running the image " + httpdImage)
podName := "e2e-test-httpd-pod"
framework.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, "--labels=run="+podName)
ginkgo.By("replace the image in the pod with server-side dry-run")
podJSON := framework.RunKubectlOrDie(ns, "get", "pod", podName, "-o", "json")
podJSON = strings.Replace(podJSON, httpdImage, busyboxImage, 1)
if !strings.Contains(podJSON, busyboxImage) {
framework.Failf("Failed replacing image from %s to %s in:\n%s\n", httpdImage, busyboxImage, podJSON)
}
framework.RunKubectlOrDieInput(ns, podJSON, "replace", "-f", "-", "--dry-run", "server")
ginkgo.By("verifying the pod " + podName + " has the right image " + httpdImage)
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting pod %s: %v", podName, err)
}
containers := pod.Spec.Containers
if checkContainersImage(containers, httpdImage) {
framework.Failf("Failed creating pod with expected image %s", httpdImage)
}
framework.RunKubectlOrDie(ns, "delete", "pods", podName)
})
})
// definitionMatchesGVK returns true if the specified GVK is listed as an x-kubernetes-group-version-kind extension
definitionMatchesGVK := func(extensions []*openapi_v2.NamedAny, desiredGVK schema.GroupVersionKind) bool {
for _, extension := range extensions {
if extension.GetValue().GetYaml() == "" ||
extension.GetName() != "x-kubernetes-group-version-kind" {
continue
}
var values []map[string]string
err := yaml.Unmarshal([]byte(extension.GetValue().GetYaml()), &values)
if err != nil {
framework.Logf("%v\n%s", err, string(extension.GetValue().GetYaml()))
continue
}
for _, value := range values {
if value["group"] != desiredGVK.Group {
continue
}
if value["version"] != desiredGVK.Version {
continue
}
if value["kind"] != desiredGVK.Kind {
continue
}
return true
}
}
return false
}
// schemaForGVK returns a schema (if defined) for the specified GVK
schemaForGVK := func(desiredGVK schema.GroupVersionKind) *openapi_v2.Schema {
d, err := f.ClientSet.Discovery().OpenAPISchema()
if err != nil {
framework.Failf("%v", err)
}
if d == nil || d.Definitions == nil {
return nil
}
for _, p := range d.Definitions.AdditionalProperties {
if p == nil || p.Value == nil {
continue
}
if !definitionMatchesGVK(p.Value.VendorExtension, desiredGVK) {
continue
}
return p.Value
}
return nil
}
ginkgo.Describe("Kubectl client-side validation", func() {
ginkgo.It("should create/apply a CR with unknown fields for CRD with no validation schema", func() {
ginkgo.By("create CRD with no validation schema")
crd, err := crd.CreateTestCRD(f)
if err != nil {
framework.Failf("failed to create test CRD: %v", err)
}
defer crd.CleanUp()
ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
time.Sleep(10 * time.Second)
meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr")
randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta)
if err := createApplyCustomResource(randomCR, f.Namespace.Name, "test-cr", crd); err != nil {
framework.Failf("%v", err)
}
})
ginkgo.It("should create/apply a valid CR for CRD with validation schema", func() {
ginkgo.By("prepare CRD with validation schema")
crd, err := crd.CreateTestCRD(f, func(crd *apiextensionsv1.CustomResourceDefinition) {
props := &apiextensionsv1.JSONSchemaProps{}
if err := yaml.Unmarshal(schemaFoo, props); err != nil {
framework.Failf("failed to unmarshal schema: %v", err)
}
for i := range crd.Spec.Versions {
crd.Spec.Versions[i].Schema = &apiextensionsv1.CustomResourceValidation{OpenAPIV3Schema: props}
}
})
if err != nil {
framework.Failf("failed to create test CRD: %v", err)
}
defer crd.CleanUp()
ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
time.Sleep(10 * time.Second)
meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr")
validCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}]}}`, meta)
if err := createApplyCustomResource(validCR, f.Namespace.Name, "test-cr", crd); err != nil {
framework.Failf("%v", err)
}
})
ginkgo.It("should create/apply a valid CR with arbitrary-extra properties for CRD with partially-specified validation schema", func() {
ginkgo.By("prepare CRD with partially-specified validation schema")
crd, err := crd.CreateTestCRD(f, func(crd *apiextensionsv1.CustomResourceDefinition) {
props := &apiextensionsv1.JSONSchemaProps{}
if err := yaml.Unmarshal(schemaFoo, props); err != nil {
framework.Failf("failed to unmarshal schema: %v", err)
}
// Allow for arbitrary-extra properties.
props.XPreserveUnknownFields = pointer.BoolPtr(true)
for i := range crd.Spec.Versions {
crd.Spec.Versions[i].Schema = &apiextensionsv1.CustomResourceValidation{OpenAPIV3Schema: props}
}
})
if err != nil {
framework.Failf("failed to create test CRD: %v", err)
}
defer crd.CleanUp()
ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
time.Sleep(10 * time.Second)
schema := schemaForGVK(schema.GroupVersionKind{Group: crd.Crd.Spec.Group, Version: crd.Crd.Spec.Versions[0].Name, Kind: crd.Crd.Spec.Names.Kind})
framework.ExpectNotEqual(schema, nil, "retrieving a schema for the crd")
meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr")
validArbitraryCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}],"extraProperty":"arbitrary-value"}}`, meta)
err = createApplyCustomResource(validArbitraryCR, f.Namespace.Name, "test-cr", crd)
framework.ExpectNoError(err, "creating custom resource")
})
})
ginkgo.Describe("Kubectl cluster-info", func() {
/*
Release: v1.9
Testname: Kubectl, cluster info
Description: Call kubectl to get cluster-info, output MUST contain cluster-info returned and Kubernetes control plane SHOULD be running.
*/
framework.ConformanceIt("should check if Kubernetes control plane services is included in cluster-info ", func() {
ginkgo.By("validating cluster-info")
output := framework.RunKubectlOrDie(ns, "cluster-info")
// Can't check exact strings due to terminal control commands (colors)
requiredItems := []string{"Kubernetes control plane", "is running at"}
for _, item := range requiredItems {
if !strings.Contains(output, item) {
framework.Failf("Missing %s in kubectl cluster-info", item)
}
}
})
})
ginkgo.Describe("Kubectl cluster-info dump", func() {
ginkgo.It("should check if cluster-info dump succeeds", func() {
ginkgo.By("running cluster-info dump")
framework.RunKubectlOrDie(ns, "cluster-info", "dump")
})
})
ginkgo.Describe("Kubectl describe", func() {
/*
Release: v1.9
Testname: Kubectl, describe pod or rc
Description: Deploy an agnhost controller and an agnhost service. Kubectl describe pods SHOULD return the name, namespace, labels, state and other information as expected. Kubectl describe on rc, service, node and namespace SHOULD also return proper information.
*/
framework.ConformanceIt("should check if kubectl describe prints relevant information for rc and pods ", func() {
controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
serviceJSON := readTestFileOrDie(agnhostServiceFilename)
framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
framework.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "create", "-f", "-")
ginkgo.By("Waiting for Agnhost primary to start.")
waitForOrFailWithDebug(1)
// Pod
forEachPod(func(pod v1.Pod) {
output := framework.RunKubectlOrDie(ns, "describe", "pod", pod.Name)
requiredStrings := [][]string{
{"Name:", "agnhost-primary-"},
{"Namespace:", ns},
{"Node:"},
{"Labels:", "app=agnhost"},
{"role=primary"},
{"Annotations:"},
{"Status:", "Running"},
{"IP:"},
{"Controlled By:", "ReplicationController/agnhost-primary"},
{"Image:", agnhostImage},
{"State:", "Running"},
{"QoS Class:", "BestEffort"},
}
checkOutput(output, requiredStrings)
})
// Rc
requiredStrings := [][]string{
{"Name:", "agnhost-primary"},
{"Namespace:", ns},
{"Selector:", "app=agnhost,role=primary"},
{"Labels:", "app=agnhost"},
{"role=primary"},
{"Annotations:"},
{"Replicas:", "1 current", "1 desired"},
{"Pods Status:", "1 Running", "0 Waiting", "0 Succeeded", "0 Failed"},
{"Pod Template:"},
{"Image:", agnhostImage},
{"Events:"}}
checkKubectlOutputWithRetry(ns, requiredStrings, "describe", "rc", "agnhost-primary")
// Service
output := framework.RunKubectlOrDie(ns, "describe", "service", "agnhost-primary")
requiredStrings = [][]string{
{"Name:", "agnhost-primary"},
{"Namespace:", ns},
{"Labels:", "app=agnhost"},
{"role=primary"},
{"Annotations:"},
{"Selector:", "app=agnhost", "role=primary"},
{"Type:", "ClusterIP"},
{"IP:"},
{"Port:", "<unset>", "6379/TCP"},
{"Endpoints:"},
{"Session Affinity:", "None"}}
checkOutput(output, requiredStrings)
// Node
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
node := nodes.Items[0]
output = framework.RunKubectlOrDie(ns, "describe", "node", node.Name)
requiredStrings = [][]string{
{"Name:", node.Name},
{"Labels:"},
{"Annotations:"},
{"CreationTimestamp:"},
{"Conditions:"},
{"Type", "Status", "LastHeartbeatTime", "LastTransitionTime", "Reason", "Message"},
{"Addresses:"},
{"Capacity:"},
{"Version:"},
{"Kernel Version:"},
{"OS Image:"},
{"Container Runtime Version:"},
{"Kubelet Version:"},
{"Kube-Proxy Version:"},
{"Pods:"}}
checkOutput(output, requiredStrings)
// Namespace
output = framework.RunKubectlOrDie(ns, "describe", "namespace", ns)
requiredStrings = [][]string{
{"Name:", ns},
{"Labels:"},
{"Annotations:"},
{"Status:", "Active"}}
checkOutput(output, requiredStrings)
// Quota and limitrange are skipped for now.
})
ginkgo.It("should check if kubectl describe prints relevant information for cronjob", func() {
ginkgo.By("creating a cronjob")
cronjobYaml := commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-cronjob.yaml.in")))
framework.RunKubectlOrDieInput(ns, cronjobYaml, "create", "-f", "-")
ginkgo.By("waiting for cronjob to start.")
err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
cj, err := c.BatchV1beta1().CronJobs(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed getting CronJob %s: %v", ns, err)
}
return len(cj.Items) > 0, nil
})
framework.ExpectNoError(err)
ginkgo.By("verifying kubectl describe prints")
output := framework.RunKubectlOrDie(ns, "describe", "cronjob", "cronjob-test")
requiredStrings := [][]string{
{"Name:", "cronjob-test"},
{"Namespace:", ns},
{"Labels:"},
{"Annotations:"},
{"Schedule:", "*/1 * * * *"},
{"Concurrency Policy:", "Allow"},
{"Suspend:", "False"},
{"Successful Job History Limit:", "3"},
{"Failed Job History Limit:", "1"},
{"Starting Deadline Seconds:", "30s"},
{"Selector:"},
{"Parallelism:"},
{"Completions:"},
}
checkOutput(output, requiredStrings)
})
})
ginkgo.Describe("Kubectl expose", func() {
/*
Release: v1.9
Testname: Kubectl, create service, replication controller
Description: Create a Pod running agnhost listening to port 6379. Using kubectl expose the agnhost primary replication controllers at port 1234. Validate that the replication controller is listening on port 1234 and the target port is set to 6379, port that agnhost primary is listening. Using kubectl expose the agnhost primary as a service at port 2345. The service MUST be listening on port 2345 and the target port is set to 6379, port that agnhost primary is listening.
*/
framework.ConformanceIt("should create services for rc ", func() {
controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
agnhostPort := 6379
ginkgo.By("creating Agnhost RC")
framework.Logf("namespace %v", ns)
framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
// It may take a while for the pods to get registered in some cases, wait to be sure.
ginkgo.By("Waiting for Agnhost primary to start.")
waitForOrFailWithDebug(1)
forEachPod(func(pod v1.Pod) {
framework.Logf("wait on agnhost-primary startup in %v ", ns)
framework.LookForStringInLog(ns, pod.Name, "agnhost-primary", "Paused", framework.PodStartTimeout)
})
validateService := func(name string, servicePort int, timeout time.Duration) {
err := wait.Poll(framework.Poll, timeout, func() (bool, error) {
ep, err := c.CoreV1().Endpoints(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
// log the real error
framework.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err)
// if the error is API not found or could not find default credentials or TLS handshake timeout, try again
if apierrors.IsNotFound(err) ||
apierrors.IsUnauthorized(err) ||
apierrors.IsServerTimeout(err) {
err = nil
}
return false, err
}
uidToPort := e2eendpoints.GetContainerPortsByPodUID(ep)
if len(uidToPort) == 0 {
framework.Logf("No endpoint found, retrying")
return false, nil
}
if len(uidToPort) > 1 {
framework.Failf("Too many endpoints found")
}
for _, port := range uidToPort {
if port[0] != agnhostPort {
framework.Failf("Wrong endpoint port: %d", port[0])
}
}
return true, nil
})
framework.ExpectNoError(err)
e2eservice, err := c.CoreV1().Services(ns).Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err)
if len(e2eservice.Spec.Ports) != 1 {
framework.Failf("1 port is expected")
}
port := e2eservice.Spec.Ports[0]
if port.Port != int32(servicePort) {
framework.Failf("Wrong service port: %d", port.Port)
}
if port.TargetPort.IntValue() != agnhostPort {
framework.Failf("Wrong target port: %d", port.TargetPort.IntValue())
}
}
ginkgo.By("exposing RC")
framework.RunKubectlOrDie(ns, "expose", "rc", "agnhost-primary", "--name=rm2", "--port=1234", fmt.Sprintf("--target-port=%d", agnhostPort))
e2enetwork.WaitForService(c, ns, "rm2", true, framework.Poll, framework.ServiceStartTimeout)
validateService("rm2", 1234, framework.ServiceStartTimeout)
ginkgo.By("exposing service")
framework.RunKubectlOrDie(ns, "expose", "service", "rm2", "--name=rm3", "--port=2345", fmt.Sprintf("--target-port=%d", agnhostPort))
e2enetwork.WaitForService(c, ns, "rm3", true, framework.Poll, framework.ServiceStartTimeout)
validateService("rm3", 2345, framework.ServiceStartTimeout)
})
})
ginkgo.Describe("Kubectl label", func() {
var podYaml string
ginkgo.BeforeEach(func() {
ginkgo.By("creating the pod")
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pause-pod.yaml.in")))
framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout), true)
})
ginkgo.AfterEach(func() {
cleanupKubectlInputs(podYaml, ns, pausePodSelector)
})
/*
Release: v1.9
Testname: Kubectl, label update
Description: When a Pod is running, update a Label using 'kubectl label' command. The label MUST be created in the Pod. A 'kubectl get pod' with -l option on the container MUST verify that the label can be read back. Use 'kubectl label label-' to remove the label. 'kubectl get pod' with -l option SHOULD not list the deleted label as the label is removed.
*/
framework.ConformanceIt("should update the label on a resource ", func() {
labelName := "testing-label"
labelValue := "testing-label-value"
ginkgo.By("adding the label " + labelName + " with value " + labelValue + " to a pod")
framework.RunKubectlOrDie(ns, "label", "pods", pausePodName, labelName+"="+labelValue)
ginkgo.By("verifying the pod has the label " + labelName + " with the value " + labelValue)
output := framework.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName)
if !strings.Contains(output, labelValue) {
framework.Failf("Failed updating label " + labelName + " to the pod " + pausePodName)
}
ginkgo.By("removing the label " + labelName + " of a pod")
framework.RunKubectlOrDie(ns, "label", "pods", pausePodName, labelName+"-")
ginkgo.By("verifying the pod doesn't have the label " + labelName)
output = framework.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName)
if strings.Contains(output, labelValue) {
framework.Failf("Failed removing label " + labelName + " of the pod " + pausePodName)
}
})
})
ginkgo.Describe("Kubectl copy", func() {
var podYaml string
ginkgo.BeforeEach(func() {
ginkgo.By("creating the pod")
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-pod.yaml.in")))
framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{busyboxPodName}, framework.PodStartTimeout), true)
})
ginkgo.AfterEach(func() {
cleanupKubectlInputs(podYaml, ns, busyboxPodSelector)
})
/*
Release: v1.12
Testname: Kubectl, copy
Description: When a Pod is running, copy a known file from it to a temporary local destination.
*/
ginkgo.It("should copy a file from a running Pod", func() {
remoteContents := "foobar\n"
podSource := fmt.Sprintf("%s:/root/foo/bar/foo.bar", busyboxPodName)
tempDestination, err := ioutil.TempFile(os.TempDir(), "copy-foobar")
if err != nil {
framework.Failf("Failed creating temporary destination file: %v", err)
}
ginkgo.By("specifying a remote filepath " + podSource + " on the pod")
framework.RunKubectlOrDie(ns, "cp", podSource, tempDestination.Name())
ginkgo.By("verifying that the contents of the remote file " + podSource + " have been copied to a local file " + tempDestination.Name())
localData, err := ioutil.ReadAll(tempDestination)
if err != nil {
framework.Failf("Failed reading temporary local file: %v", err)
}
if string(localData) != remoteContents {
framework.Failf("Failed copying remote file contents. Expected %s but got %s", remoteContents, string(localData))
}
})
})
ginkgo.Describe("Kubectl logs", func() {
podName := "logs-generator"
containerName := "logs-generator"
ginkgo.BeforeEach(func() {
ginkgo.By("creating an pod")
// Agnhost image generates logs for a total of 100 lines over 20s.
framework.RunKubectlOrDie(ns, "run", podName, "--image="+agnhostImage, "--restart=Never", "--", "logs-generator", "--log-lines-total", "100", "--run-duration", "20s")
})
ginkgo.AfterEach(func() {
framework.RunKubectlOrDie(ns, "delete", "pod", podName)
})
/*
Release: v1.9
Testname: Kubectl, logs
Description: When a Pod is running then it MUST generate logs.
Starting a Pod should have a expected log line. Also log command options MUST work as expected and described below.
'kubectl logs -tail=1' should generate a output of one line, the last line in the log.
'kubectl --limit-bytes=1' should generate a single byte output.
'kubectl --tail=1 --timestamp should generate one line with timestamp in RFC3339 format
'kubectl --since=1s' should output logs that are only 1 second older from now
'kubectl --since=24h' should output logs that are only 1 day older from now
*/
framework.ConformanceIt("should be able to retrieve and filter logs ", func() {
// Split("something\n", "\n") returns ["something", ""], so
// strip trailing newline first
lines := func(out string) []string {
return strings.Split(strings.TrimRight(out, "\n"), "\n")
}
ginkgo.By("Waiting for log generator to start.")
if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, []string{podName}, framework.PodStartTimeout) {
framework.Failf("Pod %s was not ready", podName)
}
ginkgo.By("checking for a matching strings")
_, err := framework.LookForStringInLog(ns, podName, containerName, "/api/v1/namespaces/kube-system", framework.PodStartTimeout)
framework.ExpectNoError(err)
ginkgo.By("limiting log lines")
out := framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--tail=1")
framework.Logf("got output %q", out)
gomega.Expect(len(out)).NotTo(gomega.BeZero())
framework.ExpectEqual(len(lines(out)), 1)
ginkgo.By("limiting log bytes")
out = framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--limit-bytes=1")
framework.Logf("got output %q", out)
framework.ExpectEqual(len(lines(out)), 1)
framework.ExpectEqual(len(out), 1)
ginkgo.By("exposing timestamps")
out = framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--tail=1", "--timestamps")
framework.Logf("got output %q", out)
l := lines(out)
framework.ExpectEqual(len(l), 1)
words := strings.Split(l[0], " ")
gomega.Expect(len(words)).To(gomega.BeNumerically(">", 1))
if _, err := time.Parse(time.RFC3339Nano, words[0]); err != nil {
if _, err := time.Parse(time.RFC3339, words[0]); err != nil {
framework.Failf("expected %q to be RFC3339 or RFC3339Nano", words[0])
}
}
ginkgo.By("restricting to a time range")
// Note: we must wait at least two seconds,
// because the granularity is only 1 second and
// it could end up rounding the wrong way.
time.Sleep(2500 * time.Millisecond) // ensure that startup logs on the node are seen as older than 1s
recentOut := framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--since=1s")
recent := len(strings.Split(recentOut, "\n"))
olderOut := framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--since=24h")
older := len(strings.Split(olderOut, "\n"))
gomega.Expect(recent).To(gomega.BeNumerically("<", older), "expected recent(%v) to be less than older(%v)\nrecent lines:\n%v\nolder lines:\n%v\n", recent, older, recentOut, olderOut)
})
})
ginkgo.Describe("Kubectl patch", func() {
/*
Release: v1.9
Testname: Kubectl, patch to annotate
Description: Start running agnhost and a replication controller. When the pod is running, using 'kubectl patch' command add annotations. The annotation MUST be added to running pods and SHOULD be able to read added annotations from each of the Pods running under the replication controller.
*/
framework.ConformanceIt("should add annotations for pods in rc ", func() {
controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
ginkgo.By("creating Agnhost RC")
framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
ginkgo.By("Waiting for Agnhost primary to start.")
waitForOrFailWithDebug(1)
ginkgo.By("patching all pods")
forEachPod(func(pod v1.Pod) {
framework.RunKubectlOrDie(ns, "patch", "pod", pod.Name, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}")
})
ginkgo.By("checking annotations")
forEachPod(func(pod v1.Pod) {
found := false
for key, val := range pod.Annotations {
if key == "x" && val == "y" {
found = true
break
}
}
if !found {
framework.Failf("Added annotation not found")
}
})
})
})
ginkgo.Describe("Kubectl version", func() {
/*
Release: v1.9
Testname: Kubectl, version
Description: The command 'kubectl version' MUST return the major, minor versions, GitCommit, etc of the Client and the Server that the kubectl is configured to connect to.
*/
framework.ConformanceIt("should check is all data is printed ", func() {
version := framework.RunKubectlOrDie(ns, "version")
requiredItems := []string{"Client Version:", "Server Version:", "Major:", "Minor:", "GitCommit:"}
for _, item := range requiredItems {
if !strings.Contains(version, item) {
framework.Failf("Required item %s not found in %s", item, version)
}
}
})
})
ginkgo.Describe("Kubectl run pod", func() {
var podName string
ginkgo.BeforeEach(func() {
podName = "e2e-test-httpd-pod"
})
ginkgo.AfterEach(func() {
framework.RunKubectlOrDie(ns, "delete", "pods", podName)
})
/*
Release: v1.9
Testname: Kubectl, run pod
Description: Command 'kubectl run' MUST create a pod, when a image name is specified in the run command. After the run command there SHOULD be a pod that should exist with one container running the specified image.
*/
framework.ConformanceIt("should create a pod from an image when restart is Never ", func() {
ginkgo.By("running the image " + httpdImage)
framework.RunKubectlOrDie(ns, "run", podName, "--restart=Never", "--image="+httpdImage)
ginkgo.By("verifying the pod " + podName + " was created")
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting pod %s: %v", podName, err)
}
containers := pod.Spec.Containers
if checkContainersImage(containers, httpdImage) {
framework.Failf("Failed creating pod %s with expected image %s", podName, httpdImage)
}
if pod.Spec.RestartPolicy != v1.RestartPolicyNever {
framework.Failf("Failed creating a pod with correct restart policy for --restart=Never")
}
})
})
ginkgo.Describe("Kubectl replace", func() {
var podName string
ginkgo.BeforeEach(func() {
podName = "e2e-test-httpd-pod"
})
ginkgo.AfterEach(func() {
framework.RunKubectlOrDie(ns, "delete", "pods", podName)
})
/*
Release: v1.9
Testname: Kubectl, replace
Description: Command 'kubectl replace' on a existing Pod with a new spec MUST update the image of the container running in the Pod. A -f option to 'kubectl replace' SHOULD force to re-create the resource. The new Pod SHOULD have the container with new change to the image.
*/
framework.ConformanceIt("should update a single-container pod's image ", func() {
ginkgo.By("running the image " + httpdImage)
framework.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, "--labels=run="+podName)
ginkgo.By("verifying the pod " + podName + " is running")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": podName}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
if err != nil {
framework.Failf("Failed getting pod %s: %v", podName, err)
}
ginkgo.By("verifying the pod " + podName + " was created")
podJSON := framework.RunKubectlOrDie(ns, "get", "pod", podName, "-o", "json")
if !strings.Contains(podJSON, podName) {
framework.Failf("Failed to find pod %s in [%s]", podName, podJSON)
}
ginkgo.By("replace the image in the pod")
podJSON = strings.Replace(podJSON, httpdImage, busyboxImage, 1)
framework.RunKubectlOrDieInput(ns, podJSON, "replace", "-f", "-")
ginkgo.By("verifying the pod " + podName + " has the right image " + busyboxImage)
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting deployment %s: %v", podName, err)
}
containers := pod.Spec.Containers
if checkContainersImage(containers, busyboxImage) {
framework.Failf("Failed creating pod with expected image %s", busyboxImage)
}
})
})
ginkgo.Describe("Proxy server", func() {
// TODO: test proxy options (static, prefix, etc)
/*
Release: v1.9
Testname: Kubectl, proxy port zero
Description: Start a proxy server on port zero by running 'kubectl proxy' with --port=0. Call the proxy server by requesting api versions from unix socket. The proxy server MUST provide at least one version string.
*/
framework.ConformanceIt("should support proxy with --port 0 ", func() {
ginkgo.By("starting the proxy server")
port, cmd, err := startProxyServer(ns)
if cmd != nil {
defer framework.TryKill(cmd)
}
if err != nil {
framework.Failf("Failed to start proxy server: %v", err)
}
ginkgo.By("curling proxy /api/ output")
localAddr := fmt.Sprintf("http://localhost:%d/api/", port)
apiVersions, err := getAPIVersions(localAddr)
if err != nil {
framework.Failf("Expected at least one supported apiversion, got error %v", err)
}
if len(apiVersions.Versions) < 1 {
framework.Failf("Expected at least one supported apiversion, got %v", apiVersions)
}
})
/*
Release: v1.9
Testname: Kubectl, proxy socket
Description: Start a proxy server on by running 'kubectl proxy' with --unix-socket=<some path>. Call the proxy server by requesting api versions from http://locahost:0/api. The proxy server MUST provide at least one version string
*/
framework.ConformanceIt("should support --unix-socket=/path ", func() {
ginkgo.By("Starting the proxy")
tmpdir, err := ioutil.TempDir("", "kubectl-proxy-unix")
if err != nil {
framework.Failf("Failed to create temporary directory: %v", err)
}
path := filepath.Join(tmpdir, "test")
defer os.Remove(path)
defer os.Remove(tmpdir)
tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
cmd := tk.KubectlCmd("proxy", fmt.Sprintf("--unix-socket=%s", path))
stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
if err != nil {
framework.Failf("Failed to start kubectl command: %v", err)
}
defer stdout.Close()
defer stderr.Close()
defer framework.TryKill(cmd)
buf := make([]byte, 128)
if _, err = stdout.Read(buf); err != nil {
framework.Failf("Expected output from kubectl proxy: %v", err)
}
ginkgo.By("retrieving proxy /api/ output")
_, err = curlUnix("http://unused/api", path)
if err != nil {
framework.Failf("Failed get of /api at %s: %v", path, err)
}
})
})
// This test must run [Serial] because it modifies the node so it doesn't allow pods to execute on
// it, which will affect anything else running in parallel.
ginkgo.Describe("Kubectl taint [Serial]", func() {
ginkgo.It("should update the taint on a node", func() {
testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-001-%s", string(uuid.NewUUID())),
Value: "testing-taint-value",
Effect: v1.TaintEffectNoSchedule,
}
nodeName := scheduling.GetNodeThatCanRunPod(f)
ginkgo.By("adding the taint " + testTaint.ToString() + " to a node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.ToString())
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, testTaint)
ginkgo.By("verifying the node has the taint " + testTaint.ToString())
output := runKubectlRetryOrDie(ns, "describe", "node", nodeName)
requiredStrings := [][]string{
{"Name:", nodeName},
{"Taints:"},
{testTaint.ToString()},
}
checkOutput(output, requiredStrings)
ginkgo.By("removing the taint " + testTaint.ToString() + " of a node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.Key+":"+string(testTaint.Effect)+"-")
ginkgo.By("verifying the node doesn't have the taint " + testTaint.Key)
output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
if strings.Contains(output, testTaint.Key) {
framework.Failf("Failed removing taint " + testTaint.Key + " of the node " + nodeName)
}
})
ginkgo.It("should remove all the taints with the same key off a node", func() {
testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-002-%s", string(uuid.NewUUID())),
Value: "testing-taint-value",
Effect: v1.TaintEffectNoSchedule,
}
nodeName := scheduling.GetNodeThatCanRunPod(f)
ginkgo.By("adding the taint " + testTaint.ToString() + " to a node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.ToString())
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, testTaint)
ginkgo.By("verifying the node has the taint " + testTaint.ToString())
output := runKubectlRetryOrDie(ns, "describe", "node", nodeName)
requiredStrings := [][]string{
{"Name:", nodeName},
{"Taints:"},
{testTaint.ToString()},
}
checkOutput(output, requiredStrings)
newTestTaint := v1.Taint{
Key: testTaint.Key,
Value: "another-testing-taint-value",
Effect: v1.TaintEffectPreferNoSchedule,
}
ginkgo.By("adding another taint " + newTestTaint.ToString() + " to the node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, newTestTaint.ToString())
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, newTestTaint)
ginkgo.By("verifying the node has the taint " + newTestTaint.ToString())
output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
requiredStrings = [][]string{
{"Name:", nodeName},
{"Taints:"},
{newTestTaint.ToString()},
}
checkOutput(output, requiredStrings)
noExecuteTaint := v1.Taint{
Key: testTaint.Key,
Value: "testing-taint-value-no-execute",
Effect: v1.TaintEffectNoExecute,
}
ginkgo.By("adding NoExecute taint " + noExecuteTaint.ToString() + " to the node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, noExecuteTaint.ToString())
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, noExecuteTaint)
ginkgo.By("verifying the node has the taint " + noExecuteTaint.ToString())
output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
requiredStrings = [][]string{
{"Name:", nodeName},
{"Taints:"},
{noExecuteTaint.ToString()},
}
checkOutput(output, requiredStrings)
ginkgo.By("removing all taints that have the same key " + testTaint.Key + " of the node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.Key+"-")
ginkgo.By("verifying the node doesn't have the taints that have the same key " + testTaint.Key)
output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
if strings.Contains(output, testTaint.Key) {
framework.Failf("Failed removing taints " + testTaint.Key + " of the node " + nodeName)
}
})
})
ginkgo.Describe("Kubectl create quota", func() {
ginkgo.It("should create a quota without scopes", func() {
quotaName := "million"
ginkgo.By("calling kubectl quota")
framework.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000,services=1000000")
ginkgo.By("verifying that the quota was created")
quota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting quota %s: %v", quotaName, err)
}
if len(quota.Spec.Scopes) != 0 {
framework.Failf("Expected empty scopes, got %v", quota.Spec.Scopes)
}
if len(quota.Spec.Hard) != 2 {
framework.Failf("Expected two resources, got %v", quota.Spec.Hard)
}
r, found := quota.Spec.Hard[v1.ResourcePods]
if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
framework.Failf("Expected pods=1000000, got %v", r)
}
r, found = quota.Spec.Hard[v1.ResourceServices]
if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
framework.Failf("Expected services=1000000, got %v", r)
}
})
ginkgo.It("should create a quota with scopes", func() {
quotaName := "scopes"
ginkgo.By("calling kubectl quota")
framework.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating")
ginkgo.By("verifying that the quota was created")
quota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting quota %s: %v", quotaName, err)
}
if len(quota.Spec.Scopes) != 2 {
framework.Failf("Expected two scopes, got %v", quota.Spec.Scopes)
}
scopes := make(map[v1.ResourceQuotaScope]struct{})
for _, scope := range quota.Spec.Scopes {
scopes[scope] = struct{}{}
}
if _, found := scopes[v1.ResourceQuotaScopeBestEffort]; !found {
framework.Failf("Expected BestEffort scope, got %v", quota.Spec.Scopes)
}
if _, found := scopes[v1.ResourceQuotaScopeNotTerminating]; !found {
framework.Failf("Expected NotTerminating scope, got %v", quota.Spec.Scopes)
}
})
ginkgo.It("should reject quota with invalid scopes", func() {
quotaName := "scopes"
ginkgo.By("calling kubectl quota")
out, err := framework.RunKubectl(ns, "create", "quota", quotaName, "--hard=hard=pods=1000000", "--scopes=Foo")
if err == nil {
framework.Failf("Expected kubectl to fail, but it succeeded: %s", out)
}
})
})
})
// Checks whether the output split by line contains the required elements.
func checkOutputReturnError(output string, required [][]string) error {
outputLines := strings.Split(output, "\n")
currentLine := 0
for _, requirement := range required {
for currentLine < len(outputLines) && !strings.Contains(outputLines[currentLine], requirement[0]) {
currentLine++
}
if currentLine == len(outputLines) {
return fmt.Errorf("failed to find %s in %s", requirement[0], output)
}
for _, item := range requirement[1:] {
if !strings.Contains(outputLines[currentLine], item) {
return fmt.Errorf("failed to find %s in %s", item, outputLines[currentLine])
}
}
}
return nil
}
func checkOutput(output string, required [][]string) {
err := checkOutputReturnError(output, required)
if err != nil {
framework.Failf("%v", err)
}
}
func checkKubectlOutputWithRetry(namespace string, required [][]string, args ...string) {
var pollErr error
wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
output := framework.RunKubectlOrDie(namespace, args...)
err := checkOutputReturnError(output, required)
if err != nil {
pollErr = err
return false, nil
}
pollErr = nil
return true, nil
})
if pollErr != nil {
framework.Failf("%v", pollErr)
}
return
}
func checkContainersImage(containers []v1.Container, expectImage string) bool {
return containers == nil || len(containers) != 1 || containers[0].Image != expectImage
}
func getAPIVersions(apiEndpoint string) (*metav1.APIVersions, error) {
body, err := curl(apiEndpoint)
if err != nil {
return nil, fmt.Errorf("Failed http.Get of %s: %v", apiEndpoint, err)
}
var apiVersions metav1.APIVersions
if err := json.Unmarshal([]byte(body), &apiVersions); err != nil {
return nil, fmt.Errorf("Failed to parse /api output %s: %v", body, err)
}
return &apiVersions, nil
}
func startProxyServer(ns string) (int, *exec.Cmd, error) {
// Specifying port 0 indicates we want the os to pick a random port.
tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
cmd := tk.KubectlCmd("proxy", "-p", "0", "--disable-filter")
stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
if err != nil {
return -1, nil, err
}
defer stdout.Close()
defer stderr.Close()
buf := make([]byte, 128)
var n int
if n, err = stdout.Read(buf); err != nil {
return -1, cmd, fmt.Errorf("Failed to read from kubectl proxy stdout: %v", err)
}
output := string(buf[:n])
match := proxyRegexp.FindStringSubmatch(output)
if len(match) == 2 {
if port, err := strconv.Atoi(match[1]); err == nil {
return port, cmd, nil
}
}
return -1, cmd, fmt.Errorf("Failed to parse port from proxy stdout: %s", output)
}
func curlUnix(url string, path string) (string, error) {
dial := func(ctx context.Context, proto, addr string) (net.Conn, error) {
var d net.Dialer
return d.DialContext(ctx, "unix", path)
}
transport := utilnet.SetTransportDefaults(&http.Transport{
DialContext: dial,
})
return curlTransport(url, transport)
}
func curlTransport(url string, transport *http.Transport) (string, error) {
client := &http.Client{Transport: transport}
resp, err := client.Get(url)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(body[:]), nil
}
func curl(url string) (string, error) {
return curlTransport(url, utilnet.SetTransportDefaults(&http.Transport{}))
}
func validateGuestbookApp(c clientset.Interface, ns string) {
framework.Logf("Waiting for all frontend pods to be Running.")
label := labels.SelectorFromSet(labels.Set(map[string]string{"tier": "frontend", "app": "guestbook"}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
framework.ExpectNoError(err)
framework.Logf("Waiting for frontend to serve content.")
if !waitForGuestbookResponse(c, "get", "", `{"data":""}`, guestbookStartupTimeout, ns) {
framework.Failf("Frontend service did not start serving content in %v seconds.", guestbookStartupTimeout.Seconds())
}
framework.Logf("Trying to add a new entry to the guestbook.")
if !waitForGuestbookResponse(c, "set", "TestEntry", `{"message":"Updated"}`, guestbookResponseTimeout, ns) {
framework.Failf("Cannot added new entry in %v seconds.", guestbookResponseTimeout.Seconds())
}
framework.Logf("Verifying that added entry can be retrieved.")
if !waitForGuestbookResponse(c, "get", "", `{"data":"TestEntry"}`, guestbookResponseTimeout, ns) {
framework.Failf("Entry to guestbook wasn't correctly added in %v seconds.", guestbookResponseTimeout.Seconds())
}
}
// Returns whether received expected response from guestbook on time.
func waitForGuestbookResponse(c clientset.Interface, cmd, arg, expectedResponse string, timeout time.Duration, ns string) bool {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
res, err := makeRequestToGuestbook(c, cmd, arg, ns)
if err == nil && res == expectedResponse {
return true
}
framework.Logf("Failed to get response from guestbook. err: %v, response: %s", err, res)
}
return false
}
func makeRequestToGuestbook(c clientset.Interface, cmd, value string, ns string) (string, error) {
proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get())
if errProxy != nil {
return "", errProxy
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
result, err := proxyRequest.Namespace(ns).
Name("frontend").
Suffix("/guestbook").
Param("cmd", cmd).
Param("key", "messages").
Param("value", value).
Do(ctx).
Raw()
return string(result), err
}
type updateDemoData struct {
Image string
}
const applyTestLabel = "kubectl.kubernetes.io/apply-test"
func readReplicationControllerFromString(contents string) *v1.ReplicationController {
rc := v1.ReplicationController{}
if err := yaml.Unmarshal([]byte(contents), &rc); err != nil {
framework.Failf(err.Error())
}
return &rc
}
func modifyReplicationControllerConfiguration(contents string) io.Reader {
rc := readReplicationControllerFromString(contents)
rc.Labels[applyTestLabel] = "ADDED"
rc.Spec.Selector[applyTestLabel] = "ADDED"
rc.Spec.Template.Labels[applyTestLabel] = "ADDED"
data, err := json.Marshal(rc)
if err != nil {
framework.Failf("json marshal failed: %s\n", err)
}
return bytes.NewReader(data)
}
func forEachReplicationController(c clientset.Interface, ns, selectorKey, selectorValue string, fn func(v1.ReplicationController)) {
var rcs *v1.ReplicationControllerList
var err error
for t := time.Now(); time.Since(t) < framework.PodListTimeout; time.Sleep(framework.Poll) {
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
options := metav1.ListOptions{LabelSelector: label.String()}
rcs, err = c.CoreV1().ReplicationControllers(ns).List(context.TODO(), options)
framework.ExpectNoError(err)
if len(rcs.Items) > 0 {
break
}
}
if rcs == nil || len(rcs.Items) == 0 {
framework.Failf("No replication controllers found")
}
for _, rc := range rcs.Items {
fn(rc)
}
}
func validateReplicationControllerConfiguration(rc v1.ReplicationController) {
if rc.Name == "agnhost-primary" {
if _, ok := rc.Annotations[v1.LastAppliedConfigAnnotation]; !ok {
framework.Failf("Annotation not found in modified configuration:\n%v\n", rc)
}
if value, ok := rc.Labels[applyTestLabel]; !ok || value != "ADDED" {
framework.Failf("Added label %s not found in modified configuration:\n%v\n", applyTestLabel, rc)
}
}
}
// getUDData creates a validator function based on the input string (i.e. kitten.jpg).
// For example, if you send "kitten.jpg", this function verifies that the image jpg = kitten.jpg
// in the container's json field.
func getUDData(jpgExpected string, ns string) func(clientset.Interface, string) error {
// getUDData validates data.json in the update-demo (returns nil if data is ok).
return func(c clientset.Interface, podID string) error {
framework.Logf("validating pod %s", podID)
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
body, err := c.CoreV1().RESTClient().Get().
Namespace(ns).
Resource("pods").
SubResource("proxy").
Name(podID).
Suffix("data.json").
Do(context.TODO()).
Raw()
if err != nil {
if ctx.Err() != nil {
framework.Failf("Failed to retrieve data from container: %v", err)
}
return err
}
framework.Logf("got data: %s", body)
var data updateDemoData
if err := json.Unmarshal(body, &data); err != nil {
return err
}
framework.Logf("Unmarshalled json jpg/img => %s , expecting %s .", data, jpgExpected)
if strings.Contains(data.Image, jpgExpected) {
return nil
}
return fmt.Errorf("data served up in container is inaccurate, %s didn't contain %s", data, jpgExpected)
}
}
// newBlockingReader returns a reader that allows reading the given string,
// then blocks until Close() is called on the returned closer.
//
// We're explicitly returning the reader and closer separately, because
// the closer needs to be the *os.File we get from os.Pipe(). This is required
// so the exec of kubectl can pass the underlying file descriptor to the exec
// syscall, instead of creating another os.Pipe and blocking on the io.Copy
// between the source (e.g. stdin) and the write half of the pipe.
func newBlockingReader(s string) (io.Reader, io.Closer, error) {
r, w, err := os.Pipe()
if err != nil {
return nil, nil, err
}
w.Write([]byte(s))
return r, w, nil
}
func startLocalProxy() (srv *httptest.Server, logs *bytes.Buffer) {
logs = &bytes.Buffer{}
p := goproxy.NewProxyHttpServer()
p.Verbose = true
p.Logger = log.New(logs, "", 0)
return httptest.NewServer(p), logs
}
// createApplyCustomResource asserts that given CustomResource be created and applied
// without being rejected by client-side validation
func createApplyCustomResource(resource, namespace, name string, crd *crd.TestCrd) error {
ginkgo.By("successfully create CR")
if _, err := framework.RunKubectlInput(namespace, resource, "create", "--validate=true", "-f", "-"); err != nil {
return fmt.Errorf("failed to create CR %s in namespace %s: %v", resource, namespace, err)
}
if _, err := framework.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil {
return fmt.Errorf("failed to delete CR %s: %v", name, err)
}
ginkgo.By("successfully apply CR")
if _, err := framework.RunKubectlInput(namespace, resource, "apply", "--validate=true", "-f", "-"); err != nil {
return fmt.Errorf("failed to apply CR %s in namespace %s: %v", resource, namespace, err)
}
if _, err := framework.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil {
return fmt.Errorf("failed to delete CR %s: %v", name, err)
}
return nil
}
// trimDockerRegistry is the function for trimming the docker.io/library from the beginning of the imagename.
// If community docker installed it will not prefix the registry names with the dockerimages vs registry names prefixed with other runtimes or docker installed via RHEL extra repo.
// So this function will help to trim the docker.io/library if exists
func trimDockerRegistry(imagename string) string {
imagename = strings.Replace(imagename, "docker.io/", "", 1)
return strings.Replace(imagename, "library/", "", 1)
}
// validatorFn is the function which is individual tests will implement.
// we may want it to return more than just an error, at some point.
type validatorFn func(c clientset.Interface, podID string) error
// validateController is a generic mechanism for testing RC's that are running.
// It takes a container name, a test name, and a validator function which is plugged in by a specific test.
// "containername": this is grepped for.
// "containerImage" : this is the name of the image we expect to be launched. Not to confuse w/ images (kitten.jpg) which are validated.
// "testname": which gets bubbled up to the logging/failure messages if errors happen.
// "validator" function: This function is given a podID and a client, and it can do some specific validations that way.
func validateController(c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {
containerImage = trimDockerRegistry(containerImage)
getPodsTemplate := "--template={{range.items}}{{.metadata.name}} {{end}}"
getContainerStateTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "%s") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}`, containername)
getImageTemplate := fmt.Sprintf(`--template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "%s"}}{{.image}}{{end}}{{end}}{{end}}`, containername)
ginkgo.By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector
waitLoop:
for start := time.Now(); time.Since(start) < framework.PodStartTimeout; time.Sleep(5 * time.Second) {
getPodsOutput := framework.RunKubectlOrDie(ns, "get", "pods", "-o", "template", getPodsTemplate, "-l", testname)
pods := strings.Fields(getPodsOutput)
if numPods := len(pods); numPods != replicas {
ginkgo.By(fmt.Sprintf("Replicas for %s: expected=%d actual=%d", testname, replicas, numPods))
continue
}
var runningPods []string
for _, podID := range pods {
running := framework.RunKubectlOrDie(ns, "get", "pods", podID, "-o", "template", getContainerStateTemplate)
if running != "true" {
framework.Logf("%s is created but not running", podID)
continue waitLoop
}
currentImage := framework.RunKubectlOrDie(ns, "get", "pods", podID, "-o", "template", getImageTemplate)
currentImage = trimDockerRegistry(currentImage)
if currentImage != containerImage {
framework.Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
continue waitLoop
}
// Call the generic validator function here.
// This might validate for example, that (1) getting a url works and (2) url is serving correct content.
if err := validator(c, podID); err != nil {
framework.Logf("%s is running right image but validator function failed: %v", podID, err)
continue waitLoop
}
framework.Logf("%s is verified up and running", podID)
runningPods = append(runningPods, podID)
}
// If we reach here, then all our checks passed.
if len(runningPods) == replicas {
return
}
}
// Reaching here means that one of more checks failed multiple times. Assuming its not a race condition, something is broken.
framework.Failf("Timed out after %v seconds waiting for %s pods to reach valid state", framework.PodStartTimeout.Seconds(), testname)
}
check if kubectl version required values are empty
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// OWNER = sig/cli
package kubectl
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httptest"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"time"
"github.com/elazarl/goproxy"
openapi_v2 "github.com/googleapis/gnostic/openapiv2"
"sigs.k8s.io/yaml"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubectl/pkg/polymorphichelpers"
"k8s.io/kubernetes/pkg/controller"
commonutils "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/scheduling"
testutils "k8s.io/kubernetes/test/utils"
"k8s.io/kubernetes/test/utils/crd"
imageutils "k8s.io/kubernetes/test/utils/image"
uexec "k8s.io/utils/exec"
"k8s.io/utils/pointer"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
updateDemoSelector = "name=update-demo"
guestbookStartupTimeout = 10 * time.Minute
guestbookResponseTimeout = 3 * time.Minute
simplePodSelector = "name=httpd"
simplePodName = "httpd"
simplePodResourceName = "pod/httpd"
httpdDefaultOutput = "It works!"
simplePodPort = 80
pausePodSelector = "name=pause"
pausePodName = "pause"
busyboxPodSelector = "app=busybox1"
busyboxPodName = "busybox1"
kubeCtlManifestPath = "test/e2e/testing-manifests/kubectl"
agnhostControllerFilename = "agnhost-primary-controller.json.in"
agnhostServiceFilename = "agnhost-primary-service.json"
httpdDeployment1Filename = "httpd-deployment1.yaml.in"
httpdDeployment2Filename = "httpd-deployment2.yaml.in"
httpdDeployment3Filename = "httpd-deployment3.yaml.in"
httpdRCFilename = "httpd-rc.yaml.in"
metaPattern = `"kind":"%s","apiVersion":"%s/%s","metadata":{"name":"%s"}`
)
var (
nautilusImage = imageutils.GetE2EImage(imageutils.Nautilus)
httpdImage = imageutils.GetE2EImage(imageutils.Httpd)
busyboxImage = imageutils.GetE2EImage(imageutils.BusyBox)
agnhostImage = imageutils.GetE2EImage(imageutils.Agnhost)
)
var (
proxyRegexp = regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)")
cronJobGroupVersionResourceAlpha = schema.GroupVersionResource{Group: "batch", Version: "v2alpha1", Resource: "cronjobs"}
cronJobGroupVersionResourceBeta = schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"}
)
var schemaFoo = []byte(`description: Foo CRD for Testing
type: object
properties:
spec:
type: object
description: Specification of Foo
properties:
bars:
description: List of Bars and their specs.
type: array
items:
type: object
required:
- name
properties:
name:
description: Name of Bar.
type: string
age:
description: Age of Bar.
type: string
bazs:
description: List of Bazs.
items:
type: string
type: array
status:
description: Status of Foo
type: object
properties:
bars:
description: List of Bars and their statuses.
type: array
items:
type: object
properties:
name:
description: Name of Bar.
type: string
available:
description: Whether the Bar is installed.
type: boolean
quxType:
description: Indicates to external qux type.
pattern: in-tree|out-of-tree
type: string`)
// Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
// Aware of the kubectl example files map.
func cleanupKubectlInputs(fileContents string, ns string, selectors ...string) {
ginkgo.By("using delete to clean up resources")
// support backward compatibility : file paths or raw json - since we are removing file path
// dependencies from this test.
framework.RunKubectlOrDieInput(ns, fileContents, "delete", "--grace-period=0", "--force", "-f", "-")
assertCleanup(ns, selectors...)
}
// assertCleanup asserts that cleanup of a namespace wrt selectors occurred.
func assertCleanup(ns string, selectors ...string) {
var e error
verifyCleanupFunc := func() (bool, error) {
e = nil
for _, selector := range selectors {
resources := framework.RunKubectlOrDie(ns, "get", "rc,svc", "-l", selector, "--no-headers")
if resources != "" {
e = fmt.Errorf("Resources left running after stop:\n%s", resources)
return false, nil
}
pods := framework.RunKubectlOrDie(ns, "get", "pods", "-l", selector, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
if pods != "" {
e = fmt.Errorf("Pods left unterminated after stop:\n%s", pods)
return false, nil
}
}
return true, nil
}
err := wait.PollImmediate(500*time.Millisecond, 1*time.Minute, verifyCleanupFunc)
if err != nil {
framework.Failf(e.Error())
}
}
func readTestFileOrDie(file string) []byte {
data, err := e2etestfiles.Read(path.Join(kubeCtlManifestPath, file))
if err != nil {
framework.Fail(err.Error(), 1)
}
return data
}
func runKubectlRetryOrDie(ns string, args ...string) string {
var err error
var output string
for i := 0; i < 5; i++ {
output, err = framework.RunKubectl(ns, args...)
if err == nil || (!strings.Contains(err.Error(), genericregistry.OptimisticLockErrorMsg) && !strings.Contains(err.Error(), "Operation cannot be fulfilled")) {
break
}
time.Sleep(time.Second)
}
// Expect no errors to be present after retries are finished
// Copied from framework #ExecOrDie
framework.Logf("stdout: %q", output)
framework.ExpectNoError(err)
return output
}
var _ = SIGDescribe("Kubectl client", func() {
defer ginkgo.GinkgoRecover()
f := framework.NewDefaultFramework("kubectl")
// Reusable cluster state function. This won't be adversely affected by lazy initialization of framework.
clusterState := func() *framework.ClusterVerification {
return f.NewClusterVerification(
f.Namespace,
framework.PodStateVerification{
Selectors: map[string]string{"app": "agnhost"},
ValidPhases: []v1.PodPhase{v1.PodRunning /*v1.PodPending*/},
})
}
forEachPod := func(podFunc func(p v1.Pod)) {
clusterState().ForEach(podFunc)
}
var c clientset.Interface
var ns string
ginkgo.BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
})
// Customized Wait / ForEach wrapper for this test. These demonstrate the
// idiomatic way to wrap the ClusterVerification structs for syntactic sugar in large
// test files.
// Print debug info if atLeast Pods are not found before the timeout
waitForOrFailWithDebug := func(atLeast int) {
pods, err := clusterState().WaitFor(atLeast, framework.PodStartTimeout)
if err != nil || len(pods) < atLeast {
// TODO: Generalize integrating debug info into these tests so we always get debug info when we need it
framework.DumpAllNamespaceInfo(f.ClientSet, ns)
framework.Failf("Verified %d of %d pods , error: %v", len(pods), atLeast, err)
}
}
debugDiscovery := func() {
home := os.Getenv("HOME")
if len(home) == 0 {
framework.Logf("no $HOME envvar set")
return
}
cacheDir := filepath.Join(home, ".kube", "cache", "discovery")
err := filepath.Walk(cacheDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// only pay attention to $host_$port/v1/serverresources.json files
subpath := strings.TrimPrefix(path, cacheDir+string(filepath.Separator))
parts := filepath.SplitList(subpath)
if len(parts) != 3 || parts[1] != "v1" || parts[2] != "serverresources.json" {
return nil
}
framework.Logf("%s modified at %s (current time: %s)", path, info.ModTime(), time.Now())
data, readError := ioutil.ReadFile(path)
if readError != nil {
framework.Logf("%s error: %v", path, readError)
} else {
framework.Logf("%s content: %s", path, string(data))
}
return nil
})
framework.Logf("scanned %s for discovery docs: %v", home, err)
}
ginkgo.Describe("Update Demo", func() {
var nautilus string
ginkgo.BeforeEach(func() {
updateDemoRoot := "test/fixtures/doc-yaml/user-guide/update-demo"
data, err := e2etestfiles.Read(filepath.Join(updateDemoRoot, "nautilus-rc.yaml.in"))
if err != nil {
framework.Fail(err.Error())
}
nautilus = commonutils.SubstituteImageName(string(data))
})
/*
Release: v1.9
Testname: Kubectl, replication controller
Description: Create a Pod and a container with a given image. Configure replication controller to run 2 replicas. The number of running instances of the Pod MUST equal the number of replicas set on the replication controller which is 2.
*/
framework.ConformanceIt("should create and stop a replication controller ", func() {
defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector)
ginkgo.By("creating a replication controller")
framework.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-")
validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
})
/*
Release: v1.9
Testname: Kubectl, scale replication controller
Description: Create a Pod and a container with a given image. Configure replication controller to run 2 replicas. The number of running instances of the Pod MUST equal the number of replicas set on the replication controller which is 2. Update the replicaset to 1. Number of running instances of the Pod MUST be 1. Update the replicaset to 2. Number of running instances of the Pod MUST be 2.
*/
framework.ConformanceIt("should scale a replication controller ", func() {
defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector)
ginkgo.By("creating a replication controller")
framework.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-")
validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
ginkgo.By("scaling down the replication controller")
debugDiscovery()
framework.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m")
validateController(c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
ginkgo.By("scaling up the replication controller")
debugDiscovery()
framework.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m")
validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
})
})
ginkgo.Describe("Guestbook application", func() {
forEachGBFile := func(run func(s string)) {
guestbookRoot := "test/e2e/testing-manifests/guestbook"
for _, gbAppFile := range []string{
"agnhost-replica-service.yaml",
"agnhost-primary-service.yaml",
"frontend-service.yaml",
"frontend-deployment.yaml.in",
"agnhost-primary-deployment.yaml.in",
"agnhost-replica-deployment.yaml.in",
} {
data, err := e2etestfiles.Read(filepath.Join(guestbookRoot, gbAppFile))
if err != nil {
framework.Fail(err.Error())
}
contents := commonutils.SubstituteImageName(string(data))
run(contents)
}
}
/*
Release: v1.9
Testname: Kubectl, guestbook application
Description: Create Guestbook application that contains an agnhost primary server, 2 agnhost replicas, frontend application, frontend service and agnhost primary service and agnhost replica service. Using frontend service, the test will write an entry into the guestbook application which will store the entry into the backend agnhost store. Application flow MUST work as expected and the data written MUST be available to read.
*/
framework.ConformanceIt("should create and stop a working application ", func() {
defer forEachGBFile(func(contents string) {
cleanupKubectlInputs(contents, ns)
})
ginkgo.By("creating all guestbook components")
forEachGBFile(func(contents string) {
framework.Logf(contents)
framework.RunKubectlOrDieInput(ns, contents, "create", "-f", "-")
})
ginkgo.By("validating guestbook app")
validateGuestbookApp(c, ns)
})
})
ginkgo.Describe("Simple pod", func() {
var podYaml string
ginkgo.BeforeEach(func() {
ginkgo.By(fmt.Sprintf("creating the pod from %v", podYaml))
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pod-with-readiness-probe.yaml.in")))
framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout), true)
})
ginkgo.AfterEach(func() {
cleanupKubectlInputs(podYaml, ns, simplePodSelector)
})
ginkgo.It("should support exec", func() {
ginkgo.By("executing a command in the container")
execOutput := framework.RunKubectlOrDie(ns, "exec", simplePodName, "echo", "running", "in", "container")
if e, a := "running in container", strings.TrimSpace(execOutput); e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
}
ginkgo.By("executing a very long command in the container")
veryLongData := make([]rune, 20000)
for i := 0; i < len(veryLongData); i++ {
veryLongData[i] = 'a'
}
execOutput = framework.RunKubectlOrDie(ns, "exec", simplePodName, "echo", string(veryLongData))
framework.ExpectEqual(string(veryLongData), strings.TrimSpace(execOutput), "Unexpected kubectl exec output")
ginkgo.By("executing a command in the container with noninteractive stdin")
execOutput = framework.NewKubectlCommand(ns, "exec", "-i", simplePodName, "cat").
WithStdinData("abcd1234").
ExecOrDie(ns)
if e, a := "abcd1234", execOutput; e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
}
// pretend that we're a user in an interactive shell
r, closer, err := newBlockingReader("echo hi\nexit\n")
if err != nil {
framework.Failf("Error creating blocking reader: %v", err)
}
// NOTE this is solely for test cleanup!
defer closer.Close()
ginkgo.By("executing a command in the container with pseudo-interactive stdin")
execOutput = framework.NewKubectlCommand(ns, "exec", "-i", simplePodName, "sh").
WithStdinReader(r).
ExecOrDie(ns)
if e, a := "hi", strings.TrimSpace(execOutput); e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
}
})
ginkgo.It("should support exec using resource/name", func() {
ginkgo.By("executing a command in the container")
execOutput := framework.RunKubectlOrDie(ns, "exec", simplePodResourceName, "echo", "running", "in", "container")
if e, a := "running in container", strings.TrimSpace(execOutput); e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
}
})
ginkgo.It("should support exec through an HTTP proxy", func() {
// Fail if the variable isn't set
if framework.TestContext.Host == "" {
framework.Failf("--host variable must be set to the full URI to the api server on e2e run.")
}
ginkgo.By("Starting goproxy")
testSrv, proxyLogs := startLocalProxy()
defer testSrv.Close()
proxyAddr := testSrv.URL
for _, proxyVar := range []string{"https_proxy", "HTTPS_PROXY"} {
proxyLogs.Reset()
ginkgo.By("Running kubectl via an HTTP proxy using " + proxyVar)
output := framework.NewKubectlCommand(ns, fmt.Sprintf("--namespace=%s", ns), "exec", "httpd", "echo", "running", "in", "container").
WithEnv(append(os.Environ(), fmt.Sprintf("%s=%s", proxyVar, proxyAddr))).
ExecOrDie(ns)
// Verify we got the normal output captured by the exec server
expectedExecOutput := "running in container\n"
if output != expectedExecOutput {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output)
}
// Verify the proxy server logs saw the connection
expectedProxyLog := fmt.Sprintf("Accepting CONNECT to %s", strings.TrimSuffix(strings.TrimPrefix(framework.TestContext.Host, "https://"), "/api"))
proxyLog := proxyLogs.String()
if !strings.Contains(proxyLog, expectedProxyLog) {
framework.Failf("Missing expected log result on proxy server for %s. Expected: %q, got %q", proxyVar, expectedProxyLog, proxyLog)
}
}
})
ginkgo.It("should support exec through kubectl proxy", func() {
// Fail if the variable isn't set
if framework.TestContext.Host == "" {
framework.Failf("--host variable must be set to the full URI to the api server on e2e run.")
}
ginkgo.By("Starting kubectl proxy")
port, proxyCmd, err := startProxyServer(ns)
framework.ExpectNoError(err)
defer framework.TryKill(proxyCmd)
//proxyLogs.Reset()
host := fmt.Sprintf("--server=http://127.0.0.1:%d", port)
ginkgo.By("Running kubectl via kubectl proxy using " + host)
output := framework.NewKubectlCommand(
ns, host, fmt.Sprintf("--namespace=%s", ns),
"exec", "httpd", "echo", "running", "in", "container",
).ExecOrDie(ns)
// Verify we got the normal output captured by the exec server
expectedExecOutput := "running in container\n"
if output != expectedExecOutput {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output)
}
})
ginkgo.It("should return command exit codes", func() {
ginkgo.By("execing into a container with a successful command")
_, err := framework.NewKubectlCommand(ns, "exec", "httpd", "--", "/bin/sh", "-c", "exit 0").Exec()
framework.ExpectNoError(err)
ginkgo.By("execing into a container with a failing command")
_, err = framework.NewKubectlCommand(ns, "exec", "httpd", "--", "/bin/sh", "-c", "exit 42").Exec()
ee, ok := err.(uexec.ExitError)
framework.ExpectEqual(ok, true)
framework.ExpectEqual(ee.ExitStatus(), 42)
ginkgo.By("running a successful command")
_, err = framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", "success", "--", "/bin/sh", "-c", "exit 0").Exec()
framework.ExpectNoError(err)
ginkgo.By("running a failing command")
_, err = framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec()
ee, ok = err.(uexec.ExitError)
framework.ExpectEqual(ok, true)
framework.ExpectEqual(ee.ExitStatus(), 42)
ginkgo.By("running a failing command without --restart=Never")
_, err = framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "failure-2", "--", "/bin/sh", "-c", "cat && exit 42").
WithStdinData("abcd1234").
Exec()
ee, ok = err.(uexec.ExitError)
framework.ExpectEqual(ok, true)
if !strings.Contains(ee.String(), "timed out") {
framework.Failf("Missing expected 'timed out' error, got: %#v", ee)
}
ginkgo.By("running a failing command without --restart=Never, but with --rm")
_, err = framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "--rm", "failure-3", "--", "/bin/sh", "-c", "cat && exit 42").
WithStdinData("abcd1234").
Exec()
ee, ok = err.(uexec.ExitError)
framework.ExpectEqual(ok, true)
if !strings.Contains(ee.String(), "timed out") {
framework.Failf("Missing expected 'timed out' error, got: %#v", ee)
}
e2epod.WaitForPodToDisappear(f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout)
ginkgo.By("running a failing command with --leave-stdin-open")
_, err = framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42").
WithStdinData("abcd1234").
Exec()
framework.ExpectNoError(err)
})
ginkgo.It("should support inline execution and attach", func() {
waitForStdinContent := func(pod, content string) string {
var logOutput string
err := wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
logOutput = framework.RunKubectlOrDie(ns, "logs", pod)
return strings.Contains(logOutput, content), nil
})
gomega.Expect(err).To(gomega.BeNil(), fmt.Sprintf("unexpected error waiting for '%v' output", content))
return logOutput
}
ginkgo.By("executing a command with run and attach with stdin")
// We wait for a non-empty line so we know kubectl has attached
framework.NewKubectlCommand(ns, "run", "run-test", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--stdin", "--", "sh", "-c", "echo -n read: && cat && echo 'stdin closed'").
WithStdinData("value\nabcd1234").
ExecOrDie(ns)
runOutput := waitForStdinContent("run-test", "stdin closed")
gomega.Expect(runOutput).To(gomega.ContainSubstring("read:value"))
gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test", metav1.DeleteOptions{})).To(gomega.BeNil())
ginkgo.By("executing a command with run and attach without stdin")
// There is a race on this scenario described in #73099
// It fails if we are not able to attach before the container prints
// "stdin closed", but hasn't exited yet.
// We wait 10 seconds before printing to give time to kubectl to attach
// to the container, this does not solve the race though.
framework.NewKubectlCommand(ns, "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234").
ExecOrDie(ns)
runOutput = waitForStdinContent("run-test-2", "stdin closed")
gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("abcd1234"))
gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test-2", metav1.DeleteOptions{})).To(gomega.BeNil())
ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running")
framework.NewKubectlCommand(ns, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234\n").
ExecOrDie(ns)
runOutput = waitForStdinContent("run-test-3", "abcd1234")
gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("stdin closed"))
g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
runTestPod, _, err := polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g)
framework.ExpectNoError(err)
if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, time.Minute) {
framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3")
}
gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test-3", metav1.DeleteOptions{})).To(gomega.BeNil())
})
ginkgo.It("should contain last line of the log", func() {
podName := "run-log-test"
ginkgo.By("executing a command with run")
framework.RunKubectlOrDie(ns, "run", podName, "--image="+busyboxImage, "--restart=OnFailure", "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF")
if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, []string{podName}, framework.PodStartTimeout) {
framework.Failf("Pod for run-log-test was not ready")
}
logOutput := framework.RunKubectlOrDie(ns, "logs", "-f", "run-log-test")
gomega.Expect(logOutput).To(gomega.ContainSubstring("EOF"))
})
ginkgo.It("should support port-forward", func() {
ginkgo.By("forwarding the container port to a local port")
cmd := runPortForward(ns, simplePodName, simplePodPort)
defer cmd.Stop()
ginkgo.By("curling local port output")
localAddr := fmt.Sprintf("http://localhost:%d", cmd.port)
body, err := curl(localAddr)
framework.Logf("got: %s", body)
if err != nil {
framework.Failf("Failed http.Get of forwarded port (%s): %v", localAddr, err)
}
if !strings.Contains(body, httpdDefaultOutput) {
framework.Failf("Container port output missing expected value. Wanted:'%s', got: %s", httpdDefaultOutput, body)
}
})
ginkgo.It("should handle in-cluster config", func() {
// TODO: Find a way to download and copy the appropriate kubectl binary, or maybe a multi-arch kubectl image
// for now this only works on amd64
e2eskipper.SkipUnlessNodeOSArchIs("amd64")
ginkgo.By("adding rbac permissions")
// grant the view permission widely to allow inspection of the `invalid` namespace and the default namespace
err := e2eauth.BindClusterRole(f.ClientSet.RbacV1(), "view", f.Namespace.Name,
rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
framework.ExpectNoError(err)
err = e2eauth.WaitForAuthorizationUpdate(f.ClientSet.AuthorizationV1(),
serviceaccount.MakeUsername(f.Namespace.Name, "default"),
f.Namespace.Name, "list", schema.GroupResource{Resource: "pods"}, true)
framework.ExpectNoError(err)
ginkgo.By("overriding icc with values provided by flags")
kubectlPath := framework.TestContext.KubectlPath
// we need the actual kubectl binary, not the script wrapper
kubectlPathNormalizer := exec.Command("which", kubectlPath)
if strings.HasSuffix(kubectlPath, "kubectl.sh") {
kubectlPathNormalizer = exec.Command(kubectlPath, "path")
}
kubectlPathNormalized, err := kubectlPathNormalizer.Output()
framework.ExpectNoError(err)
kubectlPath = strings.TrimSpace(string(kubectlPathNormalized))
inClusterHost := strings.TrimSpace(framework.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_HOST"))
inClusterPort := strings.TrimSpace(framework.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_PORT"))
inClusterURL := net.JoinHostPort(inClusterHost, inClusterPort)
framework.Logf("copying %s to the %s pod", kubectlPath, simplePodName)
framework.RunKubectlOrDie(ns, "cp", kubectlPath, ns+"/"+simplePodName+":/tmp/")
// Build a kubeconfig file that will make use of the injected ca and token,
// but point at the DNS host and the default namespace
tmpDir, err := ioutil.TempDir("", "icc-override")
overrideKubeconfigName := "icc-override.kubeconfig"
framework.ExpectNoError(err)
defer func() { os.Remove(tmpDir) }()
framework.ExpectNoError(ioutil.WriteFile(filepath.Join(tmpDir, overrideKubeconfigName), []byte(`
kind: Config
apiVersion: v1
clusters:
- cluster:
api-version: v1
server: https://kubernetes.default.svc:443
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
name: kubeconfig-cluster
contexts:
- context:
cluster: kubeconfig-cluster
namespace: default
user: kubeconfig-user
name: kubeconfig-context
current-context: kubeconfig-context
users:
- name: kubeconfig-user
user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
`), os.FileMode(0755)))
framework.Logf("copying override kubeconfig to the %s pod", simplePodName)
framework.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, overrideKubeconfigName), ns+"/"+simplePodName+":/tmp/")
framework.ExpectNoError(ioutil.WriteFile(filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), []byte(`
kind: ConfigMap
apiVersion: v1
metadata:
name: "configmap with namespace and invalid name"
namespace: configmap-namespace
`), os.FileMode(0755)))
framework.ExpectNoError(ioutil.WriteFile(filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), []byte(`
kind: ConfigMap
apiVersion: v1
metadata:
name: "configmap without namespace and invalid name"
`), os.FileMode(0755)))
framework.Logf("copying configmap manifests to the %s pod", simplePodName)
framework.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), ns+"/"+simplePodName+":/tmp/")
framework.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), ns+"/"+simplePodName+":/tmp/")
ginkgo.By("getting pods with in-cluster configs")
execOutput := framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --v=6 2>&1")
gomega.Expect(execOutput).To(gomega.MatchRegexp("httpd +1/1 +Running"))
gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster configuration"))
ginkgo.By("creating an object containing a namespace with in-cluster config")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-with-namespace.yaml --v=6 2>&1")
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration"))
gomega.Expect(err).To(gomega.ContainSubstring(fmt.Sprintf("POST https://%s/api/v1/namespaces/configmap-namespace/configmaps", inClusterURL)))
ginkgo.By("creating an object not containing a namespace with in-cluster config")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-without-namespace.yaml --v=6 2>&1")
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration"))
gomega.Expect(err).To(gomega.ContainSubstring(fmt.Sprintf("POST https://%s/api/v1/namespaces/%s/configmaps", inClusterURL, f.Namespace.Name)))
ginkgo.By("trying to use kubectl with invalid token")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --token=invalid --v=7 2>&1")
framework.Logf("got err %v", err)
framework.ExpectError(err)
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration"))
gomega.Expect(err).To(gomega.ContainSubstring("Response Status: 401 Unauthorized"))
ginkgo.By("trying to use kubectl with invalid server")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --server=invalid --v=6 2>&1")
framework.Logf("got err %v", err)
framework.ExpectError(err)
gomega.Expect(err).To(gomega.ContainSubstring("Unable to connect to the server"))
gomega.Expect(err).To(gomega.ContainSubstring("GET http://invalid/api"))
ginkgo.By("trying to use kubectl with invalid namespace")
execOutput = framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --namespace=invalid --v=6 2>&1")
gomega.Expect(execOutput).To(gomega.ContainSubstring("No resources found"))
gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster configuration"))
gomega.Expect(execOutput).To(gomega.MatchRegexp(fmt.Sprintf("GET http[s]?://[\\[]?%s[\\]]?:%s/api/v1/namespaces/invalid/pods", inClusterHost, inClusterPort)))
ginkgo.By("trying to use kubectl with kubeconfig")
execOutput = framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --kubeconfig=/tmp/"+overrideKubeconfigName+" --v=6 2>&1")
gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster configuration"))
gomega.Expect(execOutput).To(gomega.ContainSubstring("GET https://kubernetes.default.svc:443/api/v1/namespaces/default/pods"))
})
})
ginkgo.Describe("Kubectl api-versions", func() {
/*
Release: v1.9
Testname: Kubectl, check version v1
Description: Run kubectl to get api versions, output MUST contain returned versions with 'v1' listed.
*/
framework.ConformanceIt("should check if v1 is in available api versions ", func() {
ginkgo.By("validating api versions")
output := framework.RunKubectlOrDie(ns, "api-versions")
if !strings.Contains(output, "v1") {
framework.Failf("No v1 in kubectl api-versions")
}
})
})
ginkgo.Describe("Kubectl get componentstatuses", func() {
ginkgo.It("should get componentstatuses", func() {
ginkgo.By("getting list of componentstatuses")
output := framework.RunKubectlOrDie(ns, "get", "componentstatuses", "-o", "jsonpath={.items[*].metadata.name}")
components := strings.Split(output, " ")
ginkgo.By("getting details of componentstatuses")
for _, component := range components {
ginkgo.By("getting status of " + component)
framework.RunKubectlOrDie(ns, "get", "componentstatuses", component)
}
})
})
ginkgo.Describe("Kubectl apply", func() {
ginkgo.It("should apply a new configuration to an existing RC", func() {
controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
ginkgo.By("creating Agnhost RC")
framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
ginkgo.By("applying a modified configuration")
stdin := modifyReplicationControllerConfiguration(controllerJSON)
framework.NewKubectlCommand(ns, "apply", "-f", "-").
WithStdinReader(stdin).
ExecOrDie(ns)
ginkgo.By("checking the result")
forEachReplicationController(c, ns, "app", "agnhost", validateReplicationControllerConfiguration)
})
ginkgo.It("should reuse port when apply to an existing SVC", func() {
serviceJSON := readTestFileOrDie(agnhostServiceFilename)
ginkgo.By("creating Agnhost SVC")
framework.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "create", "-f", "-")
ginkgo.By("getting the original port")
originalNodePort := framework.RunKubectlOrDie(ns, "get", "service", "agnhost-primary", "-o", "jsonpath={.spec.ports[0].port}")
ginkgo.By("applying the same configuration")
framework.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "apply", "-f", "-")
ginkgo.By("getting the port after applying configuration")
currentNodePort := framework.RunKubectlOrDie(ns, "get", "service", "agnhost-primary", "-o", "jsonpath={.spec.ports[0].port}")
ginkgo.By("checking the result")
if originalNodePort != currentNodePort {
framework.Failf("port should keep the same")
}
})
ginkgo.It("apply set/view last-applied", func() {
deployment1Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment1Filename)))
deployment2Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment2Filename)))
deployment3Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment3Filename)))
ginkgo.By("deployment replicas number is 2")
framework.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "-f", "-")
ginkgo.By("check the last-applied matches expectations annotations")
output := framework.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "view-last-applied", "-f", "-", "-o", "json")
requiredString := "\"replicas\": 2"
if !strings.Contains(output, requiredString) {
framework.Failf("Missing %s in kubectl view-last-applied", requiredString)
}
ginkgo.By("apply file doesn't have replicas")
framework.RunKubectlOrDieInput(ns, deployment2Yaml, "apply", "set-last-applied", "-f", "-")
ginkgo.By("check last-applied has been updated, annotations doesn't have replicas")
output = framework.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "view-last-applied", "-f", "-", "-o", "json")
requiredString = "\"replicas\": 2"
if strings.Contains(output, requiredString) {
framework.Failf("Presenting %s in kubectl view-last-applied", requiredString)
}
ginkgo.By("scale set replicas to 3")
httpdDeploy := "httpd-deployment"
debugDiscovery()
framework.RunKubectlOrDie(ns, "scale", "deployment", httpdDeploy, "--replicas=3")
ginkgo.By("apply file doesn't have replicas but image changed")
framework.RunKubectlOrDieInput(ns, deployment3Yaml, "apply", "-f", "-")
ginkgo.By("verify replicas still is 3 and image has been updated")
output = framework.RunKubectlOrDieInput(ns, deployment3Yaml, "get", "-f", "-", "-o", "json")
requiredItems := []string{"\"replicas\": 3", imageutils.GetE2EImage(imageutils.Httpd)}
for _, item := range requiredItems {
if !strings.Contains(output, item) {
framework.Failf("Missing %s in kubectl apply", item)
}
}
})
})
ginkgo.Describe("Kubectl diff", func() {
/*
Release: v1.19
Testname: Kubectl, diff Deployment
Description: Create a Deployment with httpd image. Declare the same Deployment with a different image, busybox. Diff of live Deployment with declared Deployment MUST include the difference between live and declared image.
*/
framework.ConformanceIt("should check if kubectl diff finds a difference for Deployments", func() {
ginkgo.By("create deployment with httpd image")
deployment := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment3Filename)))
framework.RunKubectlOrDieInput(ns, deployment, "create", "-f", "-")
ginkgo.By("verify diff finds difference between live and declared image")
deployment = strings.Replace(deployment, httpdImage, busyboxImage, 1)
if !strings.Contains(deployment, busyboxImage) {
framework.Failf("Failed replacing image from %s to %s in:\n%s\n", httpdImage, busyboxImage, deployment)
}
output, err := framework.RunKubectlInput(ns, deployment, "diff", "-f", "-")
if err, ok := err.(*exec.ExitError); ok && err.ExitCode() == 1 {
framework.Failf("Expected kubectl diff exit code of 1, but got %d: %v\n", err.ExitCode(), err)
}
requiredItems := []string{httpdImage, busyboxImage}
for _, item := range requiredItems {
if !strings.Contains(output, item) {
framework.Failf("Missing %s in kubectl diff output:\n%s\n%v\n", item, output, err)
}
}
framework.RunKubectlOrDieInput(ns, deployment, "delete", "-f", "-")
})
})
ginkgo.Describe("Kubectl server-side dry-run", func() {
/*
Release: v1.19
Testname: Kubectl, server-side dry-run Pod
Description: The command 'kubectl run' must create a pod with the specified image name. After, the command 'kubectl replace --dry-run=server' should update the Pod with the new image name and server-side dry-run enabled. The image name must not change.
*/
framework.ConformanceIt("should check if kubectl can dry-run update Pods", func() {
ginkgo.By("running the image " + httpdImage)
podName := "e2e-test-httpd-pod"
framework.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, "--labels=run="+podName)
ginkgo.By("replace the image in the pod with server-side dry-run")
podJSON := framework.RunKubectlOrDie(ns, "get", "pod", podName, "-o", "json")
podJSON = strings.Replace(podJSON, httpdImage, busyboxImage, 1)
if !strings.Contains(podJSON, busyboxImage) {
framework.Failf("Failed replacing image from %s to %s in:\n%s\n", httpdImage, busyboxImage, podJSON)
}
framework.RunKubectlOrDieInput(ns, podJSON, "replace", "-f", "-", "--dry-run", "server")
ginkgo.By("verifying the pod " + podName + " has the right image " + httpdImage)
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting pod %s: %v", podName, err)
}
containers := pod.Spec.Containers
if checkContainersImage(containers, httpdImage) {
framework.Failf("Failed creating pod with expected image %s", httpdImage)
}
framework.RunKubectlOrDie(ns, "delete", "pods", podName)
})
})
// definitionMatchesGVK returns true if the specified GVK is listed as an x-kubernetes-group-version-kind extension
definitionMatchesGVK := func(extensions []*openapi_v2.NamedAny, desiredGVK schema.GroupVersionKind) bool {
for _, extension := range extensions {
if extension.GetValue().GetYaml() == "" ||
extension.GetName() != "x-kubernetes-group-version-kind" {
continue
}
var values []map[string]string
err := yaml.Unmarshal([]byte(extension.GetValue().GetYaml()), &values)
if err != nil {
framework.Logf("%v\n%s", err, string(extension.GetValue().GetYaml()))
continue
}
for _, value := range values {
if value["group"] != desiredGVK.Group {
continue
}
if value["version"] != desiredGVK.Version {
continue
}
if value["kind"] != desiredGVK.Kind {
continue
}
return true
}
}
return false
}
// schemaForGVK returns a schema (if defined) for the specified GVK
schemaForGVK := func(desiredGVK schema.GroupVersionKind) *openapi_v2.Schema {
d, err := f.ClientSet.Discovery().OpenAPISchema()
if err != nil {
framework.Failf("%v", err)
}
if d == nil || d.Definitions == nil {
return nil
}
for _, p := range d.Definitions.AdditionalProperties {
if p == nil || p.Value == nil {
continue
}
if !definitionMatchesGVK(p.Value.VendorExtension, desiredGVK) {
continue
}
return p.Value
}
return nil
}
ginkgo.Describe("Kubectl client-side validation", func() {
ginkgo.It("should create/apply a CR with unknown fields for CRD with no validation schema", func() {
ginkgo.By("create CRD with no validation schema")
crd, err := crd.CreateTestCRD(f)
if err != nil {
framework.Failf("failed to create test CRD: %v", err)
}
defer crd.CleanUp()
ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
time.Sleep(10 * time.Second)
meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr")
randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta)
if err := createApplyCustomResource(randomCR, f.Namespace.Name, "test-cr", crd); err != nil {
framework.Failf("%v", err)
}
})
ginkgo.It("should create/apply a valid CR for CRD with validation schema", func() {
ginkgo.By("prepare CRD with validation schema")
crd, err := crd.CreateTestCRD(f, func(crd *apiextensionsv1.CustomResourceDefinition) {
props := &apiextensionsv1.JSONSchemaProps{}
if err := yaml.Unmarshal(schemaFoo, props); err != nil {
framework.Failf("failed to unmarshal schema: %v", err)
}
for i := range crd.Spec.Versions {
crd.Spec.Versions[i].Schema = &apiextensionsv1.CustomResourceValidation{OpenAPIV3Schema: props}
}
})
if err != nil {
framework.Failf("failed to create test CRD: %v", err)
}
defer crd.CleanUp()
ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
time.Sleep(10 * time.Second)
meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr")
validCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}]}}`, meta)
if err := createApplyCustomResource(validCR, f.Namespace.Name, "test-cr", crd); err != nil {
framework.Failf("%v", err)
}
})
ginkgo.It("should create/apply a valid CR with arbitrary-extra properties for CRD with partially-specified validation schema", func() {
ginkgo.By("prepare CRD with partially-specified validation schema")
crd, err := crd.CreateTestCRD(f, func(crd *apiextensionsv1.CustomResourceDefinition) {
props := &apiextensionsv1.JSONSchemaProps{}
if err := yaml.Unmarshal(schemaFoo, props); err != nil {
framework.Failf("failed to unmarshal schema: %v", err)
}
// Allow for arbitrary-extra properties.
props.XPreserveUnknownFields = pointer.BoolPtr(true)
for i := range crd.Spec.Versions {
crd.Spec.Versions[i].Schema = &apiextensionsv1.CustomResourceValidation{OpenAPIV3Schema: props}
}
})
if err != nil {
framework.Failf("failed to create test CRD: %v", err)
}
defer crd.CleanUp()
ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
time.Sleep(10 * time.Second)
schema := schemaForGVK(schema.GroupVersionKind{Group: crd.Crd.Spec.Group, Version: crd.Crd.Spec.Versions[0].Name, Kind: crd.Crd.Spec.Names.Kind})
framework.ExpectNotEqual(schema, nil, "retrieving a schema for the crd")
meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr")
validArbitraryCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}],"extraProperty":"arbitrary-value"}}`, meta)
err = createApplyCustomResource(validArbitraryCR, f.Namespace.Name, "test-cr", crd)
framework.ExpectNoError(err, "creating custom resource")
})
})
ginkgo.Describe("Kubectl cluster-info", func() {
/*
Release: v1.9
Testname: Kubectl, cluster info
Description: Call kubectl to get cluster-info, output MUST contain cluster-info returned and Kubernetes control plane SHOULD be running.
*/
framework.ConformanceIt("should check if Kubernetes control plane services is included in cluster-info ", func() {
ginkgo.By("validating cluster-info")
output := framework.RunKubectlOrDie(ns, "cluster-info")
// Can't check exact strings due to terminal control commands (colors)
requiredItems := []string{"Kubernetes control plane", "is running at"}
for _, item := range requiredItems {
if !strings.Contains(output, item) {
framework.Failf("Missing %s in kubectl cluster-info", item)
}
}
})
})
ginkgo.Describe("Kubectl cluster-info dump", func() {
ginkgo.It("should check if cluster-info dump succeeds", func() {
ginkgo.By("running cluster-info dump")
framework.RunKubectlOrDie(ns, "cluster-info", "dump")
})
})
ginkgo.Describe("Kubectl describe", func() {
/*
Release: v1.9
Testname: Kubectl, describe pod or rc
Description: Deploy an agnhost controller and an agnhost service. Kubectl describe pods SHOULD return the name, namespace, labels, state and other information as expected. Kubectl describe on rc, service, node and namespace SHOULD also return proper information.
*/
framework.ConformanceIt("should check if kubectl describe prints relevant information for rc and pods ", func() {
controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
serviceJSON := readTestFileOrDie(agnhostServiceFilename)
framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
framework.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "create", "-f", "-")
ginkgo.By("Waiting for Agnhost primary to start.")
waitForOrFailWithDebug(1)
// Pod
forEachPod(func(pod v1.Pod) {
output := framework.RunKubectlOrDie(ns, "describe", "pod", pod.Name)
requiredStrings := [][]string{
{"Name:", "agnhost-primary-"},
{"Namespace:", ns},
{"Node:"},
{"Labels:", "app=agnhost"},
{"role=primary"},
{"Annotations:"},
{"Status:", "Running"},
{"IP:"},
{"Controlled By:", "ReplicationController/agnhost-primary"},
{"Image:", agnhostImage},
{"State:", "Running"},
{"QoS Class:", "BestEffort"},
}
checkOutput(output, requiredStrings)
})
// Rc
requiredStrings := [][]string{
{"Name:", "agnhost-primary"},
{"Namespace:", ns},
{"Selector:", "app=agnhost,role=primary"},
{"Labels:", "app=agnhost"},
{"role=primary"},
{"Annotations:"},
{"Replicas:", "1 current", "1 desired"},
{"Pods Status:", "1 Running", "0 Waiting", "0 Succeeded", "0 Failed"},
{"Pod Template:"},
{"Image:", agnhostImage},
{"Events:"}}
checkKubectlOutputWithRetry(ns, requiredStrings, "describe", "rc", "agnhost-primary")
// Service
output := framework.RunKubectlOrDie(ns, "describe", "service", "agnhost-primary")
requiredStrings = [][]string{
{"Name:", "agnhost-primary"},
{"Namespace:", ns},
{"Labels:", "app=agnhost"},
{"role=primary"},
{"Annotations:"},
{"Selector:", "app=agnhost", "role=primary"},
{"Type:", "ClusterIP"},
{"IP:"},
{"Port:", "<unset>", "6379/TCP"},
{"Endpoints:"},
{"Session Affinity:", "None"}}
checkOutput(output, requiredStrings)
// Node
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
node := nodes.Items[0]
output = framework.RunKubectlOrDie(ns, "describe", "node", node.Name)
requiredStrings = [][]string{
{"Name:", node.Name},
{"Labels:"},
{"Annotations:"},
{"CreationTimestamp:"},
{"Conditions:"},
{"Type", "Status", "LastHeartbeatTime", "LastTransitionTime", "Reason", "Message"},
{"Addresses:"},
{"Capacity:"},
{"Version:"},
{"Kernel Version:"},
{"OS Image:"},
{"Container Runtime Version:"},
{"Kubelet Version:"},
{"Kube-Proxy Version:"},
{"Pods:"}}
checkOutput(output, requiredStrings)
// Namespace
output = framework.RunKubectlOrDie(ns, "describe", "namespace", ns)
requiredStrings = [][]string{
{"Name:", ns},
{"Labels:"},
{"Annotations:"},
{"Status:", "Active"}}
checkOutput(output, requiredStrings)
// Quota and limitrange are skipped for now.
})
ginkgo.It("should check if kubectl describe prints relevant information for cronjob", func() {
ginkgo.By("creating a cronjob")
cronjobYaml := commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-cronjob.yaml.in")))
framework.RunKubectlOrDieInput(ns, cronjobYaml, "create", "-f", "-")
ginkgo.By("waiting for cronjob to start.")
err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
cj, err := c.BatchV1beta1().CronJobs(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed getting CronJob %s: %v", ns, err)
}
return len(cj.Items) > 0, nil
})
framework.ExpectNoError(err)
ginkgo.By("verifying kubectl describe prints")
output := framework.RunKubectlOrDie(ns, "describe", "cronjob", "cronjob-test")
requiredStrings := [][]string{
{"Name:", "cronjob-test"},
{"Namespace:", ns},
{"Labels:"},
{"Annotations:"},
{"Schedule:", "*/1 * * * *"},
{"Concurrency Policy:", "Allow"},
{"Suspend:", "False"},
{"Successful Job History Limit:", "3"},
{"Failed Job History Limit:", "1"},
{"Starting Deadline Seconds:", "30s"},
{"Selector:"},
{"Parallelism:"},
{"Completions:"},
}
checkOutput(output, requiredStrings)
})
})
ginkgo.Describe("Kubectl expose", func() {
/*
Release: v1.9
Testname: Kubectl, create service, replication controller
Description: Create a Pod running agnhost listening to port 6379. Using kubectl expose the agnhost primary replication controllers at port 1234. Validate that the replication controller is listening on port 1234 and the target port is set to 6379, port that agnhost primary is listening. Using kubectl expose the agnhost primary as a service at port 2345. The service MUST be listening on port 2345 and the target port is set to 6379, port that agnhost primary is listening.
*/
framework.ConformanceIt("should create services for rc ", func() {
controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
agnhostPort := 6379
ginkgo.By("creating Agnhost RC")
framework.Logf("namespace %v", ns)
framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
// It may take a while for the pods to get registered in some cases, wait to be sure.
ginkgo.By("Waiting for Agnhost primary to start.")
waitForOrFailWithDebug(1)
forEachPod(func(pod v1.Pod) {
framework.Logf("wait on agnhost-primary startup in %v ", ns)
framework.LookForStringInLog(ns, pod.Name, "agnhost-primary", "Paused", framework.PodStartTimeout)
})
validateService := func(name string, servicePort int, timeout time.Duration) {
err := wait.Poll(framework.Poll, timeout, func() (bool, error) {
ep, err := c.CoreV1().Endpoints(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
// log the real error
framework.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err)
// if the error is API not found or could not find default credentials or TLS handshake timeout, try again
if apierrors.IsNotFound(err) ||
apierrors.IsUnauthorized(err) ||
apierrors.IsServerTimeout(err) {
err = nil
}
return false, err
}
uidToPort := e2eendpoints.GetContainerPortsByPodUID(ep)
if len(uidToPort) == 0 {
framework.Logf("No endpoint found, retrying")
return false, nil
}
if len(uidToPort) > 1 {
framework.Failf("Too many endpoints found")
}
for _, port := range uidToPort {
if port[0] != agnhostPort {
framework.Failf("Wrong endpoint port: %d", port[0])
}
}
return true, nil
})
framework.ExpectNoError(err)
e2eservice, err := c.CoreV1().Services(ns).Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err)
if len(e2eservice.Spec.Ports) != 1 {
framework.Failf("1 port is expected")
}
port := e2eservice.Spec.Ports[0]
if port.Port != int32(servicePort) {
framework.Failf("Wrong service port: %d", port.Port)
}
if port.TargetPort.IntValue() != agnhostPort {
framework.Failf("Wrong target port: %d", port.TargetPort.IntValue())
}
}
ginkgo.By("exposing RC")
framework.RunKubectlOrDie(ns, "expose", "rc", "agnhost-primary", "--name=rm2", "--port=1234", fmt.Sprintf("--target-port=%d", agnhostPort))
e2enetwork.WaitForService(c, ns, "rm2", true, framework.Poll, framework.ServiceStartTimeout)
validateService("rm2", 1234, framework.ServiceStartTimeout)
ginkgo.By("exposing service")
framework.RunKubectlOrDie(ns, "expose", "service", "rm2", "--name=rm3", "--port=2345", fmt.Sprintf("--target-port=%d", agnhostPort))
e2enetwork.WaitForService(c, ns, "rm3", true, framework.Poll, framework.ServiceStartTimeout)
validateService("rm3", 2345, framework.ServiceStartTimeout)
})
})
ginkgo.Describe("Kubectl label", func() {
var podYaml string
ginkgo.BeforeEach(func() {
ginkgo.By("creating the pod")
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pause-pod.yaml.in")))
framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout), true)
})
ginkgo.AfterEach(func() {
cleanupKubectlInputs(podYaml, ns, pausePodSelector)
})
/*
Release: v1.9
Testname: Kubectl, label update
Description: When a Pod is running, update a Label using 'kubectl label' command. The label MUST be created in the Pod. A 'kubectl get pod' with -l option on the container MUST verify that the label can be read back. Use 'kubectl label label-' to remove the label. 'kubectl get pod' with -l option SHOULD not list the deleted label as the label is removed.
*/
framework.ConformanceIt("should update the label on a resource ", func() {
labelName := "testing-label"
labelValue := "testing-label-value"
ginkgo.By("adding the label " + labelName + " with value " + labelValue + " to a pod")
framework.RunKubectlOrDie(ns, "label", "pods", pausePodName, labelName+"="+labelValue)
ginkgo.By("verifying the pod has the label " + labelName + " with the value " + labelValue)
output := framework.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName)
if !strings.Contains(output, labelValue) {
framework.Failf("Failed updating label " + labelName + " to the pod " + pausePodName)
}
ginkgo.By("removing the label " + labelName + " of a pod")
framework.RunKubectlOrDie(ns, "label", "pods", pausePodName, labelName+"-")
ginkgo.By("verifying the pod doesn't have the label " + labelName)
output = framework.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName)
if strings.Contains(output, labelValue) {
framework.Failf("Failed removing label " + labelName + " of the pod " + pausePodName)
}
})
})
ginkgo.Describe("Kubectl copy", func() {
var podYaml string
ginkgo.BeforeEach(func() {
ginkgo.By("creating the pod")
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-pod.yaml.in")))
framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{busyboxPodName}, framework.PodStartTimeout), true)
})
ginkgo.AfterEach(func() {
cleanupKubectlInputs(podYaml, ns, busyboxPodSelector)
})
/*
Release: v1.12
Testname: Kubectl, copy
Description: When a Pod is running, copy a known file from it to a temporary local destination.
*/
ginkgo.It("should copy a file from a running Pod", func() {
remoteContents := "foobar\n"
podSource := fmt.Sprintf("%s:/root/foo/bar/foo.bar", busyboxPodName)
tempDestination, err := ioutil.TempFile(os.TempDir(), "copy-foobar")
if err != nil {
framework.Failf("Failed creating temporary destination file: %v", err)
}
ginkgo.By("specifying a remote filepath " + podSource + " on the pod")
framework.RunKubectlOrDie(ns, "cp", podSource, tempDestination.Name())
ginkgo.By("verifying that the contents of the remote file " + podSource + " have been copied to a local file " + tempDestination.Name())
localData, err := ioutil.ReadAll(tempDestination)
if err != nil {
framework.Failf("Failed reading temporary local file: %v", err)
}
if string(localData) != remoteContents {
framework.Failf("Failed copying remote file contents. Expected %s but got %s", remoteContents, string(localData))
}
})
})
ginkgo.Describe("Kubectl logs", func() {
podName := "logs-generator"
containerName := "logs-generator"
ginkgo.BeforeEach(func() {
ginkgo.By("creating an pod")
// Agnhost image generates logs for a total of 100 lines over 20s.
framework.RunKubectlOrDie(ns, "run", podName, "--image="+agnhostImage, "--restart=Never", "--", "logs-generator", "--log-lines-total", "100", "--run-duration", "20s")
})
ginkgo.AfterEach(func() {
framework.RunKubectlOrDie(ns, "delete", "pod", podName)
})
/*
Release: v1.9
Testname: Kubectl, logs
Description: When a Pod is running then it MUST generate logs.
Starting a Pod should have a expected log line. Also log command options MUST work as expected and described below.
'kubectl logs -tail=1' should generate a output of one line, the last line in the log.
'kubectl --limit-bytes=1' should generate a single byte output.
'kubectl --tail=1 --timestamp should generate one line with timestamp in RFC3339 format
'kubectl --since=1s' should output logs that are only 1 second older from now
'kubectl --since=24h' should output logs that are only 1 day older from now
*/
framework.ConformanceIt("should be able to retrieve and filter logs ", func() {
// Split("something\n", "\n") returns ["something", ""], so
// strip trailing newline first
lines := func(out string) []string {
return strings.Split(strings.TrimRight(out, "\n"), "\n")
}
ginkgo.By("Waiting for log generator to start.")
if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, []string{podName}, framework.PodStartTimeout) {
framework.Failf("Pod %s was not ready", podName)
}
ginkgo.By("checking for a matching strings")
_, err := framework.LookForStringInLog(ns, podName, containerName, "/api/v1/namespaces/kube-system", framework.PodStartTimeout)
framework.ExpectNoError(err)
ginkgo.By("limiting log lines")
out := framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--tail=1")
framework.Logf("got output %q", out)
gomega.Expect(len(out)).NotTo(gomega.BeZero())
framework.ExpectEqual(len(lines(out)), 1)
ginkgo.By("limiting log bytes")
out = framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--limit-bytes=1")
framework.Logf("got output %q", out)
framework.ExpectEqual(len(lines(out)), 1)
framework.ExpectEqual(len(out), 1)
ginkgo.By("exposing timestamps")
out = framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--tail=1", "--timestamps")
framework.Logf("got output %q", out)
l := lines(out)
framework.ExpectEqual(len(l), 1)
words := strings.Split(l[0], " ")
gomega.Expect(len(words)).To(gomega.BeNumerically(">", 1))
if _, err := time.Parse(time.RFC3339Nano, words[0]); err != nil {
if _, err := time.Parse(time.RFC3339, words[0]); err != nil {
framework.Failf("expected %q to be RFC3339 or RFC3339Nano", words[0])
}
}
ginkgo.By("restricting to a time range")
// Note: we must wait at least two seconds,
// because the granularity is only 1 second and
// it could end up rounding the wrong way.
time.Sleep(2500 * time.Millisecond) // ensure that startup logs on the node are seen as older than 1s
recentOut := framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--since=1s")
recent := len(strings.Split(recentOut, "\n"))
olderOut := framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--since=24h")
older := len(strings.Split(olderOut, "\n"))
gomega.Expect(recent).To(gomega.BeNumerically("<", older), "expected recent(%v) to be less than older(%v)\nrecent lines:\n%v\nolder lines:\n%v\n", recent, older, recentOut, olderOut)
})
})
ginkgo.Describe("Kubectl patch", func() {
/*
Release: v1.9
Testname: Kubectl, patch to annotate
Description: Start running agnhost and a replication controller. When the pod is running, using 'kubectl patch' command add annotations. The annotation MUST be added to running pods and SHOULD be able to read added annotations from each of the Pods running under the replication controller.
*/
framework.ConformanceIt("should add annotations for pods in rc ", func() {
controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
ginkgo.By("creating Agnhost RC")
framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
ginkgo.By("Waiting for Agnhost primary to start.")
waitForOrFailWithDebug(1)
ginkgo.By("patching all pods")
forEachPod(func(pod v1.Pod) {
framework.RunKubectlOrDie(ns, "patch", "pod", pod.Name, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}")
})
ginkgo.By("checking annotations")
forEachPod(func(pod v1.Pod) {
found := false
for key, val := range pod.Annotations {
if key == "x" && val == "y" {
found = true
break
}
}
if !found {
framework.Failf("Added annotation not found")
}
})
})
})
ginkgo.Describe("Kubectl version", func() {
/*
Release: v1.9
Testname: Kubectl, version
Description: The command 'kubectl version' MUST return the major, minor versions, GitCommit, etc of the Client and the Server that the kubectl is configured to connect to.
*/
framework.ConformanceIt("should check is all data is printed ", func() {
versionString := framework.RunKubectlOrDie(ns, "version")
// we expect following values for: Major -> digit, Minor -> numeric followed by an optional '+', GitCommit -> alphanumeric
requiredItems := []string{"Client Version: ", "Server Version: "}
for _, item := range requiredItems {
if matched, _ := regexp.MatchString(item+`version.Info\{Major:"\d", Minor:"\d+\+?", GitVersion:"v\d\.\d+\.[\d\w\-\.\+]+", GitCommit:"[0-9a-f]+"`, versionString); !matched {
framework.Failf("Item %s value is not valid in %s\n", item, versionString)
}
}
})
})
ginkgo.Describe("Kubectl run pod", func() {
var podName string
ginkgo.BeforeEach(func() {
podName = "e2e-test-httpd-pod"
})
ginkgo.AfterEach(func() {
framework.RunKubectlOrDie(ns, "delete", "pods", podName)
})
/*
Release: v1.9
Testname: Kubectl, run pod
Description: Command 'kubectl run' MUST create a pod, when a image name is specified in the run command. After the run command there SHOULD be a pod that should exist with one container running the specified image.
*/
framework.ConformanceIt("should create a pod from an image when restart is Never ", func() {
ginkgo.By("running the image " + httpdImage)
framework.RunKubectlOrDie(ns, "run", podName, "--restart=Never", "--image="+httpdImage)
ginkgo.By("verifying the pod " + podName + " was created")
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting pod %s: %v", podName, err)
}
containers := pod.Spec.Containers
if checkContainersImage(containers, httpdImage) {
framework.Failf("Failed creating pod %s with expected image %s", podName, httpdImage)
}
if pod.Spec.RestartPolicy != v1.RestartPolicyNever {
framework.Failf("Failed creating a pod with correct restart policy for --restart=Never")
}
})
})
ginkgo.Describe("Kubectl replace", func() {
var podName string
ginkgo.BeforeEach(func() {
podName = "e2e-test-httpd-pod"
})
ginkgo.AfterEach(func() {
framework.RunKubectlOrDie(ns, "delete", "pods", podName)
})
/*
Release: v1.9
Testname: Kubectl, replace
Description: Command 'kubectl replace' on a existing Pod with a new spec MUST update the image of the container running in the Pod. A -f option to 'kubectl replace' SHOULD force to re-create the resource. The new Pod SHOULD have the container with new change to the image.
*/
framework.ConformanceIt("should update a single-container pod's image ", func() {
ginkgo.By("running the image " + httpdImage)
framework.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, "--labels=run="+podName)
ginkgo.By("verifying the pod " + podName + " is running")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": podName}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
if err != nil {
framework.Failf("Failed getting pod %s: %v", podName, err)
}
ginkgo.By("verifying the pod " + podName + " was created")
podJSON := framework.RunKubectlOrDie(ns, "get", "pod", podName, "-o", "json")
if !strings.Contains(podJSON, podName) {
framework.Failf("Failed to find pod %s in [%s]", podName, podJSON)
}
ginkgo.By("replace the image in the pod")
podJSON = strings.Replace(podJSON, httpdImage, busyboxImage, 1)
framework.RunKubectlOrDieInput(ns, podJSON, "replace", "-f", "-")
ginkgo.By("verifying the pod " + podName + " has the right image " + busyboxImage)
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting deployment %s: %v", podName, err)
}
containers := pod.Spec.Containers
if checkContainersImage(containers, busyboxImage) {
framework.Failf("Failed creating pod with expected image %s", busyboxImage)
}
})
})
ginkgo.Describe("Proxy server", func() {
// TODO: test proxy options (static, prefix, etc)
/*
Release: v1.9
Testname: Kubectl, proxy port zero
Description: Start a proxy server on port zero by running 'kubectl proxy' with --port=0. Call the proxy server by requesting api versions from unix socket. The proxy server MUST provide at least one version string.
*/
framework.ConformanceIt("should support proxy with --port 0 ", func() {
ginkgo.By("starting the proxy server")
port, cmd, err := startProxyServer(ns)
if cmd != nil {
defer framework.TryKill(cmd)
}
if err != nil {
framework.Failf("Failed to start proxy server: %v", err)
}
ginkgo.By("curling proxy /api/ output")
localAddr := fmt.Sprintf("http://localhost:%d/api/", port)
apiVersions, err := getAPIVersions(localAddr)
if err != nil {
framework.Failf("Expected at least one supported apiversion, got error %v", err)
}
if len(apiVersions.Versions) < 1 {
framework.Failf("Expected at least one supported apiversion, got %v", apiVersions)
}
})
/*
Release: v1.9
Testname: Kubectl, proxy socket
Description: Start a proxy server on by running 'kubectl proxy' with --unix-socket=<some path>. Call the proxy server by requesting api versions from http://locahost:0/api. The proxy server MUST provide at least one version string
*/
framework.ConformanceIt("should support --unix-socket=/path ", func() {
ginkgo.By("Starting the proxy")
tmpdir, err := ioutil.TempDir("", "kubectl-proxy-unix")
if err != nil {
framework.Failf("Failed to create temporary directory: %v", err)
}
path := filepath.Join(tmpdir, "test")
defer os.Remove(path)
defer os.Remove(tmpdir)
tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
cmd := tk.KubectlCmd("proxy", fmt.Sprintf("--unix-socket=%s", path))
stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
if err != nil {
framework.Failf("Failed to start kubectl command: %v", err)
}
defer stdout.Close()
defer stderr.Close()
defer framework.TryKill(cmd)
buf := make([]byte, 128)
if _, err = stdout.Read(buf); err != nil {
framework.Failf("Expected output from kubectl proxy: %v", err)
}
ginkgo.By("retrieving proxy /api/ output")
_, err = curlUnix("http://unused/api", path)
if err != nil {
framework.Failf("Failed get of /api at %s: %v", path, err)
}
})
})
// This test must run [Serial] because it modifies the node so it doesn't allow pods to execute on
// it, which will affect anything else running in parallel.
ginkgo.Describe("Kubectl taint [Serial]", func() {
ginkgo.It("should update the taint on a node", func() {
testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-001-%s", string(uuid.NewUUID())),
Value: "testing-taint-value",
Effect: v1.TaintEffectNoSchedule,
}
nodeName := scheduling.GetNodeThatCanRunPod(f)
ginkgo.By("adding the taint " + testTaint.ToString() + " to a node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.ToString())
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, testTaint)
ginkgo.By("verifying the node has the taint " + testTaint.ToString())
output := runKubectlRetryOrDie(ns, "describe", "node", nodeName)
requiredStrings := [][]string{
{"Name:", nodeName},
{"Taints:"},
{testTaint.ToString()},
}
checkOutput(output, requiredStrings)
ginkgo.By("removing the taint " + testTaint.ToString() + " of a node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.Key+":"+string(testTaint.Effect)+"-")
ginkgo.By("verifying the node doesn't have the taint " + testTaint.Key)
output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
if strings.Contains(output, testTaint.Key) {
framework.Failf("Failed removing taint " + testTaint.Key + " of the node " + nodeName)
}
})
ginkgo.It("should remove all the taints with the same key off a node", func() {
testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-002-%s", string(uuid.NewUUID())),
Value: "testing-taint-value",
Effect: v1.TaintEffectNoSchedule,
}
nodeName := scheduling.GetNodeThatCanRunPod(f)
ginkgo.By("adding the taint " + testTaint.ToString() + " to a node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.ToString())
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, testTaint)
ginkgo.By("verifying the node has the taint " + testTaint.ToString())
output := runKubectlRetryOrDie(ns, "describe", "node", nodeName)
requiredStrings := [][]string{
{"Name:", nodeName},
{"Taints:"},
{testTaint.ToString()},
}
checkOutput(output, requiredStrings)
newTestTaint := v1.Taint{
Key: testTaint.Key,
Value: "another-testing-taint-value",
Effect: v1.TaintEffectPreferNoSchedule,
}
ginkgo.By("adding another taint " + newTestTaint.ToString() + " to the node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, newTestTaint.ToString())
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, newTestTaint)
ginkgo.By("verifying the node has the taint " + newTestTaint.ToString())
output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
requiredStrings = [][]string{
{"Name:", nodeName},
{"Taints:"},
{newTestTaint.ToString()},
}
checkOutput(output, requiredStrings)
noExecuteTaint := v1.Taint{
Key: testTaint.Key,
Value: "testing-taint-value-no-execute",
Effect: v1.TaintEffectNoExecute,
}
ginkgo.By("adding NoExecute taint " + noExecuteTaint.ToString() + " to the node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, noExecuteTaint.ToString())
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, noExecuteTaint)
ginkgo.By("verifying the node has the taint " + noExecuteTaint.ToString())
output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
requiredStrings = [][]string{
{"Name:", nodeName},
{"Taints:"},
{noExecuteTaint.ToString()},
}
checkOutput(output, requiredStrings)
ginkgo.By("removing all taints that have the same key " + testTaint.Key + " of the node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.Key+"-")
ginkgo.By("verifying the node doesn't have the taints that have the same key " + testTaint.Key)
output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
if strings.Contains(output, testTaint.Key) {
framework.Failf("Failed removing taints " + testTaint.Key + " of the node " + nodeName)
}
})
})
ginkgo.Describe("Kubectl create quota", func() {
ginkgo.It("should create a quota without scopes", func() {
quotaName := "million"
ginkgo.By("calling kubectl quota")
framework.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000,services=1000000")
ginkgo.By("verifying that the quota was created")
quota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting quota %s: %v", quotaName, err)
}
if len(quota.Spec.Scopes) != 0 {
framework.Failf("Expected empty scopes, got %v", quota.Spec.Scopes)
}
if len(quota.Spec.Hard) != 2 {
framework.Failf("Expected two resources, got %v", quota.Spec.Hard)
}
r, found := quota.Spec.Hard[v1.ResourcePods]
if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
framework.Failf("Expected pods=1000000, got %v", r)
}
r, found = quota.Spec.Hard[v1.ResourceServices]
if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
framework.Failf("Expected services=1000000, got %v", r)
}
})
ginkgo.It("should create a quota with scopes", func() {
quotaName := "scopes"
ginkgo.By("calling kubectl quota")
framework.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating")
ginkgo.By("verifying that the quota was created")
quota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting quota %s: %v", quotaName, err)
}
if len(quota.Spec.Scopes) != 2 {
framework.Failf("Expected two scopes, got %v", quota.Spec.Scopes)
}
scopes := make(map[v1.ResourceQuotaScope]struct{})
for _, scope := range quota.Spec.Scopes {
scopes[scope] = struct{}{}
}
if _, found := scopes[v1.ResourceQuotaScopeBestEffort]; !found {
framework.Failf("Expected BestEffort scope, got %v", quota.Spec.Scopes)
}
if _, found := scopes[v1.ResourceQuotaScopeNotTerminating]; !found {
framework.Failf("Expected NotTerminating scope, got %v", quota.Spec.Scopes)
}
})
ginkgo.It("should reject quota with invalid scopes", func() {
quotaName := "scopes"
ginkgo.By("calling kubectl quota")
out, err := framework.RunKubectl(ns, "create", "quota", quotaName, "--hard=hard=pods=1000000", "--scopes=Foo")
if err == nil {
framework.Failf("Expected kubectl to fail, but it succeeded: %s", out)
}
})
})
})
// Checks whether the output split by line contains the required elements.
func checkOutputReturnError(output string, required [][]string) error {
outputLines := strings.Split(output, "\n")
currentLine := 0
for _, requirement := range required {
for currentLine < len(outputLines) && !strings.Contains(outputLines[currentLine], requirement[0]) {
currentLine++
}
if currentLine == len(outputLines) {
return fmt.Errorf("failed to find %s in %s", requirement[0], output)
}
for _, item := range requirement[1:] {
if !strings.Contains(outputLines[currentLine], item) {
return fmt.Errorf("failed to find %s in %s", item, outputLines[currentLine])
}
}
}
return nil
}
func checkOutput(output string, required [][]string) {
err := checkOutputReturnError(output, required)
if err != nil {
framework.Failf("%v", err)
}
}
func checkKubectlOutputWithRetry(namespace string, required [][]string, args ...string) {
var pollErr error
wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
output := framework.RunKubectlOrDie(namespace, args...)
err := checkOutputReturnError(output, required)
if err != nil {
pollErr = err
return false, nil
}
pollErr = nil
return true, nil
})
if pollErr != nil {
framework.Failf("%v", pollErr)
}
return
}
func checkContainersImage(containers []v1.Container, expectImage string) bool {
return containers == nil || len(containers) != 1 || containers[0].Image != expectImage
}
func getAPIVersions(apiEndpoint string) (*metav1.APIVersions, error) {
body, err := curl(apiEndpoint)
if err != nil {
return nil, fmt.Errorf("Failed http.Get of %s: %v", apiEndpoint, err)
}
var apiVersions metav1.APIVersions
if err := json.Unmarshal([]byte(body), &apiVersions); err != nil {
return nil, fmt.Errorf("Failed to parse /api output %s: %v", body, err)
}
return &apiVersions, nil
}
func startProxyServer(ns string) (int, *exec.Cmd, error) {
// Specifying port 0 indicates we want the os to pick a random port.
tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
cmd := tk.KubectlCmd("proxy", "-p", "0", "--disable-filter")
stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
if err != nil {
return -1, nil, err
}
defer stdout.Close()
defer stderr.Close()
buf := make([]byte, 128)
var n int
if n, err = stdout.Read(buf); err != nil {
return -1, cmd, fmt.Errorf("Failed to read from kubectl proxy stdout: %v", err)
}
output := string(buf[:n])
match := proxyRegexp.FindStringSubmatch(output)
if len(match) == 2 {
if port, err := strconv.Atoi(match[1]); err == nil {
return port, cmd, nil
}
}
return -1, cmd, fmt.Errorf("Failed to parse port from proxy stdout: %s", output)
}
func curlUnix(url string, path string) (string, error) {
dial := func(ctx context.Context, proto, addr string) (net.Conn, error) {
var d net.Dialer
return d.DialContext(ctx, "unix", path)
}
transport := utilnet.SetTransportDefaults(&http.Transport{
DialContext: dial,
})
return curlTransport(url, transport)
}
func curlTransport(url string, transport *http.Transport) (string, error) {
client := &http.Client{Transport: transport}
resp, err := client.Get(url)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(body[:]), nil
}
func curl(url string) (string, error) {
return curlTransport(url, utilnet.SetTransportDefaults(&http.Transport{}))
}
func validateGuestbookApp(c clientset.Interface, ns string) {
framework.Logf("Waiting for all frontend pods to be Running.")
label := labels.SelectorFromSet(labels.Set(map[string]string{"tier": "frontend", "app": "guestbook"}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
framework.ExpectNoError(err)
framework.Logf("Waiting for frontend to serve content.")
if !waitForGuestbookResponse(c, "get", "", `{"data":""}`, guestbookStartupTimeout, ns) {
framework.Failf("Frontend service did not start serving content in %v seconds.", guestbookStartupTimeout.Seconds())
}
framework.Logf("Trying to add a new entry to the guestbook.")
if !waitForGuestbookResponse(c, "set", "TestEntry", `{"message":"Updated"}`, guestbookResponseTimeout, ns) {
framework.Failf("Cannot added new entry in %v seconds.", guestbookResponseTimeout.Seconds())
}
framework.Logf("Verifying that added entry can be retrieved.")
if !waitForGuestbookResponse(c, "get", "", `{"data":"TestEntry"}`, guestbookResponseTimeout, ns) {
framework.Failf("Entry to guestbook wasn't correctly added in %v seconds.", guestbookResponseTimeout.Seconds())
}
}
// Returns whether received expected response from guestbook on time.
func waitForGuestbookResponse(c clientset.Interface, cmd, arg, expectedResponse string, timeout time.Duration, ns string) bool {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
res, err := makeRequestToGuestbook(c, cmd, arg, ns)
if err == nil && res == expectedResponse {
return true
}
framework.Logf("Failed to get response from guestbook. err: %v, response: %s", err, res)
}
return false
}
func makeRequestToGuestbook(c clientset.Interface, cmd, value string, ns string) (string, error) {
proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get())
if errProxy != nil {
return "", errProxy
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
result, err := proxyRequest.Namespace(ns).
Name("frontend").
Suffix("/guestbook").
Param("cmd", cmd).
Param("key", "messages").
Param("value", value).
Do(ctx).
Raw()
return string(result), err
}
type updateDemoData struct {
Image string
}
const applyTestLabel = "kubectl.kubernetes.io/apply-test"
func readReplicationControllerFromString(contents string) *v1.ReplicationController {
rc := v1.ReplicationController{}
if err := yaml.Unmarshal([]byte(contents), &rc); err != nil {
framework.Failf(err.Error())
}
return &rc
}
func modifyReplicationControllerConfiguration(contents string) io.Reader {
rc := readReplicationControllerFromString(contents)
rc.Labels[applyTestLabel] = "ADDED"
rc.Spec.Selector[applyTestLabel] = "ADDED"
rc.Spec.Template.Labels[applyTestLabel] = "ADDED"
data, err := json.Marshal(rc)
if err != nil {
framework.Failf("json marshal failed: %s\n", err)
}
return bytes.NewReader(data)
}
func forEachReplicationController(c clientset.Interface, ns, selectorKey, selectorValue string, fn func(v1.ReplicationController)) {
var rcs *v1.ReplicationControllerList
var err error
for t := time.Now(); time.Since(t) < framework.PodListTimeout; time.Sleep(framework.Poll) {
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
options := metav1.ListOptions{LabelSelector: label.String()}
rcs, err = c.CoreV1().ReplicationControllers(ns).List(context.TODO(), options)
framework.ExpectNoError(err)
if len(rcs.Items) > 0 {
break
}
}
if rcs == nil || len(rcs.Items) == 0 {
framework.Failf("No replication controllers found")
}
for _, rc := range rcs.Items {
fn(rc)
}
}
func validateReplicationControllerConfiguration(rc v1.ReplicationController) {
if rc.Name == "agnhost-primary" {
if _, ok := rc.Annotations[v1.LastAppliedConfigAnnotation]; !ok {
framework.Failf("Annotation not found in modified configuration:\n%v\n", rc)
}
if value, ok := rc.Labels[applyTestLabel]; !ok || value != "ADDED" {
framework.Failf("Added label %s not found in modified configuration:\n%v\n", applyTestLabel, rc)
}
}
}
// getUDData creates a validator function based on the input string (i.e. kitten.jpg).
// For example, if you send "kitten.jpg", this function verifies that the image jpg = kitten.jpg
// in the container's json field.
func getUDData(jpgExpected string, ns string) func(clientset.Interface, string) error {
// getUDData validates data.json in the update-demo (returns nil if data is ok).
return func(c clientset.Interface, podID string) error {
framework.Logf("validating pod %s", podID)
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
body, err := c.CoreV1().RESTClient().Get().
Namespace(ns).
Resource("pods").
SubResource("proxy").
Name(podID).
Suffix("data.json").
Do(context.TODO()).
Raw()
if err != nil {
if ctx.Err() != nil {
framework.Failf("Failed to retrieve data from container: %v", err)
}
return err
}
framework.Logf("got data: %s", body)
var data updateDemoData
if err := json.Unmarshal(body, &data); err != nil {
return err
}
framework.Logf("Unmarshalled json jpg/img => %s , expecting %s .", data, jpgExpected)
if strings.Contains(data.Image, jpgExpected) {
return nil
}
return fmt.Errorf("data served up in container is inaccurate, %s didn't contain %s", data, jpgExpected)
}
}
// newBlockingReader returns a reader that allows reading the given string,
// then blocks until Close() is called on the returned closer.
//
// We're explicitly returning the reader and closer separately, because
// the closer needs to be the *os.File we get from os.Pipe(). This is required
// so the exec of kubectl can pass the underlying file descriptor to the exec
// syscall, instead of creating another os.Pipe and blocking on the io.Copy
// between the source (e.g. stdin) and the write half of the pipe.
func newBlockingReader(s string) (io.Reader, io.Closer, error) {
r, w, err := os.Pipe()
if err != nil {
return nil, nil, err
}
w.Write([]byte(s))
return r, w, nil
}
func startLocalProxy() (srv *httptest.Server, logs *bytes.Buffer) {
logs = &bytes.Buffer{}
p := goproxy.NewProxyHttpServer()
p.Verbose = true
p.Logger = log.New(logs, "", 0)
return httptest.NewServer(p), logs
}
// createApplyCustomResource asserts that given CustomResource be created and applied
// without being rejected by client-side validation
func createApplyCustomResource(resource, namespace, name string, crd *crd.TestCrd) error {
ginkgo.By("successfully create CR")
if _, err := framework.RunKubectlInput(namespace, resource, "create", "--validate=true", "-f", "-"); err != nil {
return fmt.Errorf("failed to create CR %s in namespace %s: %v", resource, namespace, err)
}
if _, err := framework.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil {
return fmt.Errorf("failed to delete CR %s: %v", name, err)
}
ginkgo.By("successfully apply CR")
if _, err := framework.RunKubectlInput(namespace, resource, "apply", "--validate=true", "-f", "-"); err != nil {
return fmt.Errorf("failed to apply CR %s in namespace %s: %v", resource, namespace, err)
}
if _, err := framework.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil {
return fmt.Errorf("failed to delete CR %s: %v", name, err)
}
return nil
}
// trimDockerRegistry is the function for trimming the docker.io/library from the beginning of the imagename.
// If community docker installed it will not prefix the registry names with the dockerimages vs registry names prefixed with other runtimes or docker installed via RHEL extra repo.
// So this function will help to trim the docker.io/library if exists
func trimDockerRegistry(imagename string) string {
imagename = strings.Replace(imagename, "docker.io/", "", 1)
return strings.Replace(imagename, "library/", "", 1)
}
// validatorFn is the function which is individual tests will implement.
// we may want it to return more than just an error, at some point.
type validatorFn func(c clientset.Interface, podID string) error
// validateController is a generic mechanism for testing RC's that are running.
// It takes a container name, a test name, and a validator function which is plugged in by a specific test.
// "containername": this is grepped for.
// "containerImage" : this is the name of the image we expect to be launched. Not to confuse w/ images (kitten.jpg) which are validated.
// "testname": which gets bubbled up to the logging/failure messages if errors happen.
// "validator" function: This function is given a podID and a client, and it can do some specific validations that way.
func validateController(c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {
containerImage = trimDockerRegistry(containerImage)
getPodsTemplate := "--template={{range.items}}{{.metadata.name}} {{end}}"
getContainerStateTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "%s") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}`, containername)
getImageTemplate := fmt.Sprintf(`--template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "%s"}}{{.image}}{{end}}{{end}}{{end}}`, containername)
ginkgo.By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector
waitLoop:
for start := time.Now(); time.Since(start) < framework.PodStartTimeout; time.Sleep(5 * time.Second) {
getPodsOutput := framework.RunKubectlOrDie(ns, "get", "pods", "-o", "template", getPodsTemplate, "-l", testname)
pods := strings.Fields(getPodsOutput)
if numPods := len(pods); numPods != replicas {
ginkgo.By(fmt.Sprintf("Replicas for %s: expected=%d actual=%d", testname, replicas, numPods))
continue
}
var runningPods []string
for _, podID := range pods {
running := framework.RunKubectlOrDie(ns, "get", "pods", podID, "-o", "template", getContainerStateTemplate)
if running != "true" {
framework.Logf("%s is created but not running", podID)
continue waitLoop
}
currentImage := framework.RunKubectlOrDie(ns, "get", "pods", podID, "-o", "template", getImageTemplate)
currentImage = trimDockerRegistry(currentImage)
if currentImage != containerImage {
framework.Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
continue waitLoop
}
// Call the generic validator function here.
// This might validate for example, that (1) getting a url works and (2) url is serving correct content.
if err := validator(c, podID); err != nil {
framework.Logf("%s is running right image but validator function failed: %v", podID, err)
continue waitLoop
}
framework.Logf("%s is verified up and running", podID)
runningPods = append(runningPods, podID)
}
// If we reach here, then all our checks passed.
if len(runningPods) == replicas {
return
}
}
// Reaching here means that one of more checks failed multiple times. Assuming its not a race condition, something is broken.
framework.Failf("Timed out after %v seconds waiting for %s pods to reach valid state", framework.PodStartTimeout.Seconds(), testname)
}
|
package waveform
import (
"fmt"
"image/color"
"testing"
)
// TestOptionsError verifies that the format of OptionsError.Error does
// not change.
func TestOptionsError(t *testing.T) {
var tests = []struct {
option string
reason string
}{
{"foo", "bar"},
{"baz", "qux"},
{"one", "two"},
}
for _, test := range tests {
// Generate options error
opErr := &OptionsError{
Option: test.option,
Reason: test.reason,
}
// Verify correct format
if opErr.Error() != fmt.Sprintf("%s: %s", test.option, test.reason) {
t.Fatalf("unexpected Error string: %v", opErr.Error())
}
}
}
// TestOptionColorsOK verifies that Colors returns no error with acceptable input.
func TestOptionColorsOK(t *testing.T) {
testWaveformOptionFunc(t, Colors(color.Black, color.Black, color.Black), nil)
}
// TestOptionColorsNilForeground verifies that Colors does not accept a nil
// foreground color.
func TestOptionColorsNilForeground(t *testing.T) {
testWaveformOptionFunc(t, Colors(nil, color.Black, color.Black), errColorsNilForeground)
}
// TestOptionColorsNilBackground verifies that Colors does not accept a nil
// backround color.
func TestOptionColorsNilBackground(t *testing.T) {
testWaveformOptionFunc(t, Colors(color.Black, nil, color.Black), errColorsNilBackground)
}
// TestOptionFunctionOK verifies that Function returns no error with acceptable input.
func TestOptionFunctionOK(t *testing.T) {
testWaveformOptionFunc(t, Function(RMSF64Samples), nil)
}
// TestOptionFunctionNil verifies that Function does not accept a nil SampleReduceFunc.
func TestOptionFunctionNil(t *testing.T) {
testWaveformOptionFunc(t, Function(nil), errFunctionNil)
}
// TestOptionResolutionOK verifies that Resolution returns no error with acceptable input.
func TestOptionResolutionOK(t *testing.T) {
testWaveformOptionFunc(t, Resolution(1), nil)
}
// TestOptionResolutionZero verifies that Resolution does not accept integer 0.
func TestOptionResolutionZero(t *testing.T) {
testWaveformOptionFunc(t, Resolution(0), errResolutionZero)
}
// TestOptionScaleOK verifies that Scale returns no error with acceptable input.
func TestOptionScaleOK(t *testing.T) {
testWaveformOptionFunc(t, Scale(1, 1), nil)
}
// TestOptionScaleXZero verifies that Scale does not accept an X value integer 0.
func TestOptionScaleXZero(t *testing.T) {
testWaveformOptionFunc(t, Scale(0, 1), errScaleXZero)
}
// TestOptionScaleYZero verifies that Scale does not accept an Y value integer 0.
func TestOptionScaleYZero(t *testing.T) {
testWaveformOptionFunc(t, Scale(1, 0), errScaleYZero)
}
// TestOptionScaleClippingOK verifies that ScaleClipping returns no error.
func TestOptionScaleClippingOK(t *testing.T) {
testWaveformOptionFunc(t, ScaleClipping(), nil)
}
// TestOptionSharpnessOK verifies that Sharpness returns no error.
func TestOptionSharpnessOK(t *testing.T) {
testWaveformOptionFunc(t, Sharpness(0), nil)
}
// TestWaveformSetColors verifies that the Waveform.SetColors method properly
// modifies struct members.
func TestWaveformSetColors(t *testing.T) {
// Predefined test values
fg := color.Black
bg := color.White
alt := color.White
// Generate empty Waveform, apply parameters
w := &Waveform{}
if err := w.SetColors(fg, bg, alt); err != nil {
t.Fatal(err)
}
// Validate that struct members are set properly
if w.fg != fg {
t.Fatalf("unexpected foreground color: %v != %v", w.fg, fg)
}
if w.bg != bg {
t.Fatalf("unexpected background color: %v != %v", w.bg, bg)
}
if w.alt != alt {
t.Fatalf("unexpected alternate color: %v != %v", w.alt, alt)
}
}
// TestWaveformSetFunction verifies that the Waveform.SetFunction method properly
// modifies struct members.
func TestWaveformSetFunction(t *testing.T) {
// Generate empty Waveform, apply parameters
w := &Waveform{}
if err := w.SetFunction(RMSF64Samples); err != nil {
t.Fatal(err)
}
// Validate that struct members are set properly
if w.function == nil {
t.Fatalf("SetFunction failed, nil function member")
}
}
// TestWaveformSetResolution verifies that the Waveform.SetResolution method properly
// modifies struct members.
func TestWaveformSetResolution(t *testing.T) {
// Predefined test values
res := uint(1)
// Generate empty Waveform, apply parameters
w := &Waveform{}
if err := w.SetResolution(res); err != nil {
t.Fatal(err)
}
// Validate that struct members are set properly
if w.resolution != res {
t.Fatalf("unexpected resolution: %v != %v", w.resolution, res)
}
}
// TestWaveformSetScale verifies that the Waveform.SetScale method properly
// modifies struct members.
func TestWaveformSetScale(t *testing.T) {
// Predefined test values
x := uint(1)
y := uint(1)
// Generate empty Waveform, apply parameters
w := &Waveform{}
if err := w.SetScale(x, y); err != nil {
t.Fatal(err)
}
// Validate that struct members are set properly
if w.scaleX != x {
t.Fatalf("unexpected scale X: %v != %v", w.scaleX, x)
}
if w.scaleY != y {
t.Fatalf("unexpected scale Y: %v != %v", w.scaleY, y)
}
}
// TestWaveformSetScaleClipping verifies that the Waveform.SetScaleClipping method properly
// modifies struct members.
func TestWaveformSetScaleClipping(t *testing.T) {
// Generate empty Waveform, apply function
w := &Waveform{}
if err := w.SetScaleClipping(); err != nil {
t.Fatal(err)
}
// Validate that struct members are set properly
if !w.scaleClipping {
t.Fatalf("SetScaleClipping failed, false scaleClipping member")
}
}
// TestWaveformSetSharpness verifies that the Waveform.SetSharpness method properly
// modifies struct members.
func TestWaveformSetSharpness(t *testing.T) {
// Predefined test values
sharpness := uint(1)
// Generate empty Waveform, apply parameters
w := &Waveform{}
if err := w.SetSharpness(sharpness); err != nil {
t.Fatal(err)
}
// Validate that struct members are set properly
if w.sharpness != sharpness {
t.Fatalf("unexpected sharpness: %v != %v", w.sharpness, sharpness)
}
}
// testWaveformOptionFunc is a test helper which verifies that applying the
// input OptionsFunc to a new Waveform struct generates the appropriate
// error output.
func testWaveformOptionFunc(t *testing.T, fn OptionsFunc, err error) {
if _, wErr := New(nil, fn); wErr != err {
t.Fatalf("unexpected error: %v != %v", wErr, err)
}
}
option_test: add TestWaveformSetOptionsNil
package waveform
import (
"fmt"
"image/color"
"testing"
)
// TestOptionsError verifies that the format of OptionsError.Error does
// not change.
func TestOptionsError(t *testing.T) {
var tests = []struct {
option string
reason string
}{
{"foo", "bar"},
{"baz", "qux"},
{"one", "two"},
}
for _, test := range tests {
// Generate options error
opErr := &OptionsError{
Option: test.option,
Reason: test.reason,
}
// Verify correct format
if opErr.Error() != fmt.Sprintf("%s: %s", test.option, test.reason) {
t.Fatalf("unexpected Error string: %v", opErr.Error())
}
}
}
// TestOptionColorsOK verifies that Colors returns no error with acceptable input.
func TestOptionColorsOK(t *testing.T) {
testWaveformOptionFunc(t, Colors(color.Black, color.Black, color.Black), nil)
}
// TestOptionColorsNilForeground verifies that Colors does not accept a nil
// foreground color.
func TestOptionColorsNilForeground(t *testing.T) {
testWaveformOptionFunc(t, Colors(nil, color.Black, color.Black), errColorsNilForeground)
}
// TestOptionColorsNilBackground verifies that Colors does not accept a nil
// backround color.
func TestOptionColorsNilBackground(t *testing.T) {
testWaveformOptionFunc(t, Colors(color.Black, nil, color.Black), errColorsNilBackground)
}
// TestOptionFunctionOK verifies that Function returns no error with acceptable input.
func TestOptionFunctionOK(t *testing.T) {
testWaveformOptionFunc(t, Function(RMSF64Samples), nil)
}
// TestOptionFunctionNil verifies that Function does not accept a nil SampleReduceFunc.
func TestOptionFunctionNil(t *testing.T) {
testWaveformOptionFunc(t, Function(nil), errFunctionNil)
}
// TestOptionResolutionOK verifies that Resolution returns no error with acceptable input.
func TestOptionResolutionOK(t *testing.T) {
testWaveformOptionFunc(t, Resolution(1), nil)
}
// TestOptionResolutionZero verifies that Resolution does not accept integer 0.
func TestOptionResolutionZero(t *testing.T) {
testWaveformOptionFunc(t, Resolution(0), errResolutionZero)
}
// TestOptionScaleOK verifies that Scale returns no error with acceptable input.
func TestOptionScaleOK(t *testing.T) {
testWaveformOptionFunc(t, Scale(1, 1), nil)
}
// TestOptionScaleXZero verifies that Scale does not accept an X value integer 0.
func TestOptionScaleXZero(t *testing.T) {
testWaveformOptionFunc(t, Scale(0, 1), errScaleXZero)
}
// TestOptionScaleYZero verifies that Scale does not accept an Y value integer 0.
func TestOptionScaleYZero(t *testing.T) {
testWaveformOptionFunc(t, Scale(1, 0), errScaleYZero)
}
// TestOptionScaleClippingOK verifies that ScaleClipping returns no error.
func TestOptionScaleClippingOK(t *testing.T) {
testWaveformOptionFunc(t, ScaleClipping(), nil)
}
// TestOptionSharpnessOK verifies that Sharpness returns no error.
func TestOptionSharpnessOK(t *testing.T) {
testWaveformOptionFunc(t, Sharpness(0), nil)
}
// TestWaveformSetOptionsNil verifies that Waveform.SetOptions ignores any
// nil OptionsFunc arguments.
func TestWaveformSetOptionsNil(t *testing.T) {
testWaveformOptionFunc(t, nil, nil)
}
// TestWaveformSetColors verifies that the Waveform.SetColors method properly
// modifies struct members.
func TestWaveformSetColors(t *testing.T) {
// Predefined test values
fg := color.Black
bg := color.White
alt := color.White
// Generate empty Waveform, apply parameters
w := &Waveform{}
if err := w.SetColors(fg, bg, alt); err != nil {
t.Fatal(err)
}
// Validate that struct members are set properly
if w.fg != fg {
t.Fatalf("unexpected foreground color: %v != %v", w.fg, fg)
}
if w.bg != bg {
t.Fatalf("unexpected background color: %v != %v", w.bg, bg)
}
if w.alt != alt {
t.Fatalf("unexpected alternate color: %v != %v", w.alt, alt)
}
}
// TestWaveformSetFunction verifies that the Waveform.SetFunction method properly
// modifies struct members.
func TestWaveformSetFunction(t *testing.T) {
// Generate empty Waveform, apply parameters
w := &Waveform{}
if err := w.SetFunction(RMSF64Samples); err != nil {
t.Fatal(err)
}
// Validate that struct members are set properly
if w.function == nil {
t.Fatalf("SetFunction failed, nil function member")
}
}
// TestWaveformSetResolution verifies that the Waveform.SetResolution method properly
// modifies struct members.
func TestWaveformSetResolution(t *testing.T) {
// Predefined test values
res := uint(1)
// Generate empty Waveform, apply parameters
w := &Waveform{}
if err := w.SetResolution(res); err != nil {
t.Fatal(err)
}
// Validate that struct members are set properly
if w.resolution != res {
t.Fatalf("unexpected resolution: %v != %v", w.resolution, res)
}
}
// TestWaveformSetScale verifies that the Waveform.SetScale method properly
// modifies struct members.
func TestWaveformSetScale(t *testing.T) {
// Predefined test values
x := uint(1)
y := uint(1)
// Generate empty Waveform, apply parameters
w := &Waveform{}
if err := w.SetScale(x, y); err != nil {
t.Fatal(err)
}
// Validate that struct members are set properly
if w.scaleX != x {
t.Fatalf("unexpected scale X: %v != %v", w.scaleX, x)
}
if w.scaleY != y {
t.Fatalf("unexpected scale Y: %v != %v", w.scaleY, y)
}
}
// TestWaveformSetScaleClipping verifies that the Waveform.SetScaleClipping method properly
// modifies struct members.
func TestWaveformSetScaleClipping(t *testing.T) {
// Generate empty Waveform, apply function
w := &Waveform{}
if err := w.SetScaleClipping(); err != nil {
t.Fatal(err)
}
// Validate that struct members are set properly
if !w.scaleClipping {
t.Fatalf("SetScaleClipping failed, false scaleClipping member")
}
}
// TestWaveformSetSharpness verifies that the Waveform.SetSharpness method properly
// modifies struct members.
func TestWaveformSetSharpness(t *testing.T) {
// Predefined test values
sharpness := uint(1)
// Generate empty Waveform, apply parameters
w := &Waveform{}
if err := w.SetSharpness(sharpness); err != nil {
t.Fatal(err)
}
// Validate that struct members are set properly
if w.sharpness != sharpness {
t.Fatalf("unexpected sharpness: %v != %v", w.sharpness, sharpness)
}
}
// testWaveformOptionFunc is a test helper which verifies that applying the
// input OptionsFunc to a new Waveform struct generates the appropriate
// error output.
func testWaveformOptionFunc(t *testing.T, fn OptionsFunc, err error) {
if _, wErr := New(nil, fn); wErr != err {
t.Fatalf("unexpected error: %v != %v", wErr, err)
}
}
|
package osc
import (
"bufio"
"bytes"
"io"
"net"
"reflect"
"sync"
"testing"
"time"
)
func TestMessage_Append(t *testing.T) {
oscAddress := "/address"
message := NewMessage(oscAddress)
if message.Address != oscAddress {
t.Errorf("OSC address should be \"%s\" and is \"%s\"", oscAddress, message.Address)
}
message.Append("string argument")
message.Append(123456789)
message.Append(true)
if message.CountArguments() != 3 {
t.Errorf("Number of arguments should be %d and is %d", 3, message.CountArguments())
}
}
func TestMessage_Equals(t *testing.T) {
msg1 := NewMessage("/address")
msg2 := NewMessage("/address")
msg1.Append(1234)
msg2.Append(1234)
msg1.Append("test string")
msg2.Append("test string")
if !msg1.Equals(msg2) {
t.Error("Messages should be equal")
}
}
func TestMessage_TypeTags(t *testing.T) {
for _, tt := range []struct {
desc string
msg *Message
tags string
ok bool
}{
{"addr_only", NewMessage("/"), ",", true},
{"nil", NewMessage("/", nil), ",N", true},
{"bool_true", NewMessage("/", true), ",T", true},
{"bool_false", NewMessage("/", false), ",F", true},
{"int32", NewMessage("/", int32(1)), ",i", true},
{"int64", NewMessage("/", int64(2)), ",h", true},
{"float32", NewMessage("/", float32(3.0)), ",f", true},
{"float64", NewMessage("/", float64(4.0)), ",d", true},
{"string", NewMessage("/", "5"), ",s", true},
{"[]byte", NewMessage("/", []byte{'6'}), ",b", true},
{"two_args", NewMessage("/", "123", int32(456)), ",si", true},
{"invalid_msg", nil, "", false},
{"invalid_arg", NewMessage("/foo/bar", 789), "", false},
} {
tags, err := tt.msg.TypeTags()
if err != nil && tt.ok {
t.Errorf("%s: TypeTags() unexpected error: %s", tt.desc, err)
continue
}
if err == nil && !tt.ok {
t.Errorf("%s: TypeTags() expected an error", tt.desc)
continue
}
if !tt.ok {
continue
}
if got, want := tags, tt.tags; got != want {
t.Errorf("%s: TypeTags() = '%s', want = '%s'", tt.desc, got, want)
}
}
}
func TestMessage_String(t *testing.T) {
for _, tt := range []struct {
desc string
msg *Message
str string
}{
{"nil", nil, ""},
{"addr_only", NewMessage("/foo/bar"), "/foo/bar ,"},
{"one_addr", NewMessage("/foo/bar", "123"), "/foo/bar ,s 123"},
{"two_args", NewMessage("/foo/bar", "123", int32(456)), "/foo/bar ,si 123 456"},
} {
if got, want := tt.msg.String(), tt.str; got != want {
t.Errorf("%s: String() = '%s', want = '%s'", tt.desc, got, want)
}
}
}
func TestAddMsgHandler(t *testing.T) {
d := NewStandardDispatcher()
err := d.AddMsgHandler("/address/test", func(msg *Message) {})
if err != nil {
t.Error("Expected that OSC address '/address/test' is valid")
}
}
func TestAddMsgHandlerWithInvalidAddress(t *testing.T) {
d := NewStandardDispatcher()
err := d.AddMsgHandler("/address*/test", func(msg *Message) {})
if err == nil {
t.Error("Expected error with '/address*/test'")
}
}
func TestServerMessageDispatching(t *testing.T) {
finish := make(chan bool)
start := make(chan bool)
done := sync.WaitGroup{}
done.Add(2)
// Start the OSC server in a new go-routine
go func() {
conn, err := net.ListenPacket("udp", "localhost:6677")
if err != nil {
t.Fatal(err)
}
defer conn.Close()
d := NewStandardDispatcher()
err = d.AddMsgHandler("/address/test", func(msg *Message) {
if len(msg.Arguments) != 1 {
t.Error("Argument length should be 1 and is: " + string(len(msg.Arguments)))
}
if msg.Arguments[0].(int32) != 1122 {
t.Error("Argument should be 1122 and is: " + string(msg.Arguments[0].(int32)))
}
// Stop OSC server
conn.Close()
finish <- true
})
if err != nil {
t.Error("Error adding message handler")
}
server := &Server{Addr: "localhost:6677", Dispatcher: d}
start <- true
server.Serve(conn)
}()
go func() {
timeout := time.After(5 * time.Second)
select {
case <-timeout:
case <-start:
time.Sleep(500 * time.Millisecond)
client := NewClient("localhost", 6677)
msg := NewMessage("/address/test")
msg.Append(int32(1122))
client.Send(msg)
}
done.Done()
select {
case <-timeout:
case <-finish:
}
done.Done()
}()
done.Wait()
}
func TestServerMessageReceiving(t *testing.T) {
finish := make(chan bool)
start := make(chan bool)
done := sync.WaitGroup{}
done.Add(2)
// Start the server in a go-routine
go func() {
server := &Server{}
c, err := net.ListenPacket("udp", "localhost:6677")
if err != nil {
t.Fatal(err)
}
defer c.Close()
// Start the client
start <- true
packet, err := server.ReceivePacket(c)
if err != nil {
t.Errorf("server error: %v", err)
return
}
if packet == nil {
t.Error("nil packet")
return
}
msg := packet.(*Message)
if msg.CountArguments() != 2 {
t.Errorf("Argument length should be 2 and is: %d\n", msg.CountArguments())
}
if msg.Arguments[0].(int32) != 1122 {
t.Error("Argument should be 1122 and is: " + string(msg.Arguments[0].(int32)))
}
if msg.Arguments[1].(int32) != 3344 {
t.Error("Argument should be 3344 and is: " + string(msg.Arguments[1].(int32)))
}
c.Close()
finish <- true
}()
go func() {
timeout := time.After(5 * time.Second)
select {
case <-timeout:
case <-start:
client := NewClient("localhost", 6677)
msg := NewMessage("/address/test")
msg.Append(int32(1122))
msg.Append(int32(3344))
time.Sleep(500 * time.Millisecond)
client.Send(msg)
}
done.Done()
select {
case <-timeout:
case <-finish:
}
done.Done()
}()
done.Wait()
}
func TestReadTimeout(t *testing.T) {
start := make(chan bool)
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
defer wg.Done()
select {
case <-time.After(5 * time.Second):
t.Fatal("timed out")
case <-start:
client := NewClient("localhost", 6677)
msg := NewMessage("/address/test1")
err := client.Send(msg)
if err != nil {
t.Fatal(err)
}
time.Sleep(150 * time.Millisecond)
msg = NewMessage("/address/test2")
err = client.Send(msg)
if err != nil {
t.Fatal(err)
}
}
}()
go func() {
defer wg.Done()
server := &Server{ReadTimeout: 100 * time.Millisecond}
c, err := net.ListenPacket("udp", "localhost:6677")
if err != nil {
t.Fatal(err)
}
defer c.Close()
start <- true
p, err := server.ReceivePacket(c)
if err != nil {
t.Errorf("server error: %v", err)
return
}
if got, want := p.(*Message).Address, "/address/test1"; got != want {
t.Errorf("wrong address; got = %s, want = %s", got, want)
return
}
// Second receive should time out since client is delayed 150 milliseconds
if _, err = server.ReceivePacket(c); err == nil {
t.Errorf("expected error")
return
}
// Next receive should get it
p, err = server.ReceivePacket(c)
if err != nil {
t.Errorf("server error: %v", err)
return
}
if got, want := p.(*Message).Address, "/address/test2"; got != want {
t.Errorf("wrong address; got = %s, want = %s", got, want)
return
}
}()
wg.Wait()
}
func TestReadPaddedString(t *testing.T) {
for _, tt := range []struct {
buf []byte // buffer
n int // bytes needed
s string // resulting string
e error // expected error
}{
{[]byte{'t', 'e', 's', 't', 'S', 't', 'r', 'i', 'n', 'g', 0, 0}, 12, "testString", nil},
{[]byte{'t', 'e', 's', 't', 'e', 'r', 's', 0}, 8, "testers", nil},
{[]byte{'t', 'e', 's', 't', 's', 0, 0, 0}, 8, "tests", nil},
{[]byte{'t', 'e', 's', 't', 0, 0, 0, 0}, 8, "test", nil},
{[]byte{}, 0, "", io.EOF},
{[]byte{'t', 'e', 's', 0}, 4, "tes", nil}, // OSC uses null terminated strings
{[]byte{'t', 'e', 's', 0, 0, 0, 0, 0}, 4, "tes", nil}, // Additional nulls should be ignored
{[]byte{'t', 'e', 's', 0, 0, 0}, 4, "tes", nil}, // Whether or not the nulls fall on a 4 byte padding boundary
{[]byte{'t', 'e', 's', 't'}, 0, "", io.EOF}, // if there is no null byte at the end, it doesn't work.
} {
buf := bytes.NewBuffer(tt.buf)
s, n, err := readPaddedString(bufio.NewReader(buf))
if got, want := err, tt.e; got != want {
t.Errorf("%s: Unexpected error reading padded string; got = %s, want = %s", tt.s, got, want)
}
if got, want := n, tt.n; got != want {
t.Errorf("%s: Bytes needed don't match; got = %d, want = %d", tt.s, got, want)
}
if got, want := s, tt.s; got != want {
t.Errorf("%s: Strings don't match; got = %s, want = %s", tt.s, got, want)
}
}
}
func TestWritePaddedString(t *testing.T) {
for _, tt := range []struct {
s string // string
buf []byte // resulting buffer
n int // bytes expected
}{
{"testString", []byte{'t', 'e', 's', 't', 'S', 't', 'r', 'i', 'n', 'g', 0, 0}, 12},
{"testers", []byte{'t', 'e', 's', 't', 'e', 'r', 's', 0}, 8},
{"tests", []byte{'t', 'e', 's', 't', 's', 0, 0, 0}, 8},
{"test", []byte{'t', 'e', 's', 't', 0, 0, 0, 0}, 8},
{"tes", []byte{'t', 'e', 's', 0}, 4},
{"tes\x00", []byte{'t', 'e', 's', 0}, 4}, // Don't add a second null terminator if one is already present
{"tes\x00\x00\x00\x00\x00", []byte{'t', 'e', 's', 0}, 4}, // Skip extra nulls
{"tes\x00\x00\x00", []byte{'t', 'e', 's', 0}, 4}, // Even if they don't fall on a 4 byte padding boundary
{"", []byte{0, 0, 0, 0}, 4}, // OSC uses null terminated strings, padded to the 4 byte boundary
} {
buf := []byte{}
bytesBuffer := bytes.NewBuffer(buf)
n, err := writePaddedString(tt.s, bytesBuffer)
if err != nil {
t.Errorf(err.Error())
}
if got, want := n, tt.n; got != want {
t.Errorf("%s: Count of bytes written don't match; got = %d, want = %d", tt.s, got, want)
}
if got, want := bytesBuffer, tt.buf; bytes.Equal(got.Bytes(), want) {
t.Errorf("%s: Buffers don't match; got = %s, want = %s", tt.s, got.Bytes(), want)
}
}
}
func TestPadBytesNeeded(t *testing.T) {
var n int
n = padBytesNeeded(4)
if n != 0 {
t.Errorf("Number of pad bytes should be 0 and is: %d", n)
}
n = padBytesNeeded(3)
if n != 1 {
t.Errorf("Number of pad bytes should be 1 and is: %d", n)
}
n = padBytesNeeded(2)
if n != 2 {
t.Errorf("Number of pad bytes should be 2 and is: %d", n)
}
n = padBytesNeeded(1)
if n != 3 {
t.Errorf("Number of pad bytes should be 3 and is: %d", n)
}
n = padBytesNeeded(0)
if n != 0 {
t.Errorf("Number of pad bytes should be 0 and is: %d", n)
}
n = padBytesNeeded(5)
if n != 3 {
t.Errorf("Number of pad bytes should be 3 and is: %d", n)
}
n = padBytesNeeded(7)
if n != 1 {
t.Errorf("Number of pad bytes should be 1 and is: %d", n)
}
n = padBytesNeeded(32)
if n != 0 {
t.Errorf("Number of pad bytes should be 0 and is: %d", n)
}
n = padBytesNeeded(63)
if n != 1 {
t.Errorf("Number of pad bytes should be 1 and is: %d", n)
}
n = padBytesNeeded(10)
if n != 2 {
t.Errorf("Number of pad bytes should be 2 and is: %d", n)
}
}
func TestTypeTagsString(t *testing.T) {
msg := NewMessage("/some/address")
msg.Append(int32(100))
msg.Append(true)
msg.Append(false)
typeTags, err := msg.TypeTags()
if err != nil {
t.Error(err.Error())
}
if typeTags != ",iTF" {
t.Errorf("Type tag string should be ',iTF' and is: %s", typeTags)
}
}
func TestClientSetLocalAddr(t *testing.T) {
client := NewClient("localhost", 8967)
err := client.SetLocalAddr("localhost", 41789)
if err != nil {
t.Error(err.Error())
}
expectedAddr := "127.0.0.1:41789"
if client.laddr.String() != expectedAddr {
t.Errorf("Expected laddr to be %s but was %s", expectedAddr, client.laddr.String())
}
}
func TestParsePacket(t *testing.T) {
for _, tt := range []struct {
desc string
msg string
pkt Packet
ok bool
}{
{"no_args",
"/a/b/c" + nulls(2) + "," + nulls(3),
makePacket("/a/b/c", nil),
true},
{"string_arg",
"/d/e/f" + nulls(2) + ",s" + nulls(2) + "foo" + nulls(1),
makePacket("/d/e/f", []string{"foo"}),
true},
{"empty", "", nil, false},
} {
pkt, err := ParsePacket(tt.msg)
if err != nil && tt.ok {
t.Errorf("%s: ParsePacket() returned unexpected error; %s", tt.desc, err)
}
if err == nil && !tt.ok {
t.Errorf("%s: ParsePacket() expected error", tt.desc)
}
if !tt.ok {
continue
}
pktBytes, err := pkt.MarshalBinary()
if err != nil {
t.Errorf("%s: failure converting pkt to byte array; %s", tt.desc, err)
continue
}
ttpktBytes, err := tt.pkt.MarshalBinary()
if err != nil {
t.Errorf("%s: failure converting tt.pkt to byte array; %s", tt.desc, err)
continue
}
if got, want := pktBytes, ttpktBytes; !reflect.DeepEqual(got, want) {
t.Errorf("%s: ParsePacket() as bytes = '%s', want = '%s'", tt.desc, got, want)
continue
}
}
}
func TestOscMessageMatch(t *testing.T) {
tc := []struct {
desc string
addr string
addrPattern string
want bool
}{
{
"match everything",
"*",
"/a/b",
true,
},
{
"don't match",
"/a/b",
"/a",
false,
},
{
"match alternatives",
"/a/{foo,bar}",
"/a/foo",
true,
},
{
"don't match if address is not part of the alternatives",
"/a/{foo,bar}",
"/a/bob",
false,
},
}
for _, tt := range tc {
msg := NewMessage(tt.addr)
got := msg.Match(tt.addrPattern)
if got != tt.want {
t.Errorf("%s: msg.Match('%s') = '%t', want = '%t'", tt.desc, tt.addrPattern, got, tt.want)
}
}
}
const zero = string(byte(0))
// nulls returns a string of `i` nulls.
func nulls(i int) string {
s := ""
for j := 0; j < i; j++ {
s += zero
}
return s
}
// makePacket creates a fake Message Packet.
func makePacket(addr string, args []string) Packet {
msg := NewMessage(addr)
for _, arg := range args {
msg.Append(arg)
}
return msg
}
Use full escaped strings for more helpful troubleshooting
package osc
import (
"bufio"
"bytes"
"io"
"net"
"reflect"
"sync"
"testing"
"time"
)
func TestMessage_Append(t *testing.T) {
oscAddress := "/address"
message := NewMessage(oscAddress)
if message.Address != oscAddress {
t.Errorf("OSC address should be \"%s\" and is \"%s\"", oscAddress, message.Address)
}
message.Append("string argument")
message.Append(123456789)
message.Append(true)
if message.CountArguments() != 3 {
t.Errorf("Number of arguments should be %d and is %d", 3, message.CountArguments())
}
}
func TestMessage_Equals(t *testing.T) {
msg1 := NewMessage("/address")
msg2 := NewMessage("/address")
msg1.Append(1234)
msg2.Append(1234)
msg1.Append("test string")
msg2.Append("test string")
if !msg1.Equals(msg2) {
t.Error("Messages should be equal")
}
}
func TestMessage_TypeTags(t *testing.T) {
for _, tt := range []struct {
desc string
msg *Message
tags string
ok bool
}{
{"addr_only", NewMessage("/"), ",", true},
{"nil", NewMessage("/", nil), ",N", true},
{"bool_true", NewMessage("/", true), ",T", true},
{"bool_false", NewMessage("/", false), ",F", true},
{"int32", NewMessage("/", int32(1)), ",i", true},
{"int64", NewMessage("/", int64(2)), ",h", true},
{"float32", NewMessage("/", float32(3.0)), ",f", true},
{"float64", NewMessage("/", float64(4.0)), ",d", true},
{"string", NewMessage("/", "5"), ",s", true},
{"[]byte", NewMessage("/", []byte{'6'}), ",b", true},
{"two_args", NewMessage("/", "123", int32(456)), ",si", true},
{"invalid_msg", nil, "", false},
{"invalid_arg", NewMessage("/foo/bar", 789), "", false},
} {
tags, err := tt.msg.TypeTags()
if err != nil && tt.ok {
t.Errorf("%s: TypeTags() unexpected error: %s", tt.desc, err)
continue
}
if err == nil && !tt.ok {
t.Errorf("%s: TypeTags() expected an error", tt.desc)
continue
}
if !tt.ok {
continue
}
if got, want := tags, tt.tags; got != want {
t.Errorf("%s: TypeTags() = '%s', want = '%s'", tt.desc, got, want)
}
}
}
func TestMessage_String(t *testing.T) {
for _, tt := range []struct {
desc string
msg *Message
str string
}{
{"nil", nil, ""},
{"addr_only", NewMessage("/foo/bar"), "/foo/bar ,"},
{"one_addr", NewMessage("/foo/bar", "123"), "/foo/bar ,s 123"},
{"two_args", NewMessage("/foo/bar", "123", int32(456)), "/foo/bar ,si 123 456"},
} {
if got, want := tt.msg.String(), tt.str; got != want {
t.Errorf("%s: String() = '%s', want = '%s'", tt.desc, got, want)
}
}
}
func TestAddMsgHandler(t *testing.T) {
d := NewStandardDispatcher()
err := d.AddMsgHandler("/address/test", func(msg *Message) {})
if err != nil {
t.Error("Expected that OSC address '/address/test' is valid")
}
}
func TestAddMsgHandlerWithInvalidAddress(t *testing.T) {
d := NewStandardDispatcher()
err := d.AddMsgHandler("/address*/test", func(msg *Message) {})
if err == nil {
t.Error("Expected error with '/address*/test'")
}
}
func TestServerMessageDispatching(t *testing.T) {
finish := make(chan bool)
start := make(chan bool)
done := sync.WaitGroup{}
done.Add(2)
// Start the OSC server in a new go-routine
go func() {
conn, err := net.ListenPacket("udp", "localhost:6677")
if err != nil {
t.Fatal(err)
}
defer conn.Close()
d := NewStandardDispatcher()
err = d.AddMsgHandler("/address/test", func(msg *Message) {
if len(msg.Arguments) != 1 {
t.Error("Argument length should be 1 and is: " + string(len(msg.Arguments)))
}
if msg.Arguments[0].(int32) != 1122 {
t.Error("Argument should be 1122 and is: " + string(msg.Arguments[0].(int32)))
}
// Stop OSC server
conn.Close()
finish <- true
})
if err != nil {
t.Error("Error adding message handler")
}
server := &Server{Addr: "localhost:6677", Dispatcher: d}
start <- true
server.Serve(conn)
}()
go func() {
timeout := time.After(5 * time.Second)
select {
case <-timeout:
case <-start:
time.Sleep(500 * time.Millisecond)
client := NewClient("localhost", 6677)
msg := NewMessage("/address/test")
msg.Append(int32(1122))
client.Send(msg)
}
done.Done()
select {
case <-timeout:
case <-finish:
}
done.Done()
}()
done.Wait()
}
func TestServerMessageReceiving(t *testing.T) {
finish := make(chan bool)
start := make(chan bool)
done := sync.WaitGroup{}
done.Add(2)
// Start the server in a go-routine
go func() {
server := &Server{}
c, err := net.ListenPacket("udp", "localhost:6677")
if err != nil {
t.Fatal(err)
}
defer c.Close()
// Start the client
start <- true
packet, err := server.ReceivePacket(c)
if err != nil {
t.Errorf("server error: %v", err)
return
}
if packet == nil {
t.Error("nil packet")
return
}
msg := packet.(*Message)
if msg.CountArguments() != 2 {
t.Errorf("Argument length should be 2 and is: %d\n", msg.CountArguments())
}
if msg.Arguments[0].(int32) != 1122 {
t.Error("Argument should be 1122 and is: " + string(msg.Arguments[0].(int32)))
}
if msg.Arguments[1].(int32) != 3344 {
t.Error("Argument should be 3344 and is: " + string(msg.Arguments[1].(int32)))
}
c.Close()
finish <- true
}()
go func() {
timeout := time.After(5 * time.Second)
select {
case <-timeout:
case <-start:
client := NewClient("localhost", 6677)
msg := NewMessage("/address/test")
msg.Append(int32(1122))
msg.Append(int32(3344))
time.Sleep(500 * time.Millisecond)
client.Send(msg)
}
done.Done()
select {
case <-timeout:
case <-finish:
}
done.Done()
}()
done.Wait()
}
func TestReadTimeout(t *testing.T) {
start := make(chan bool)
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
defer wg.Done()
select {
case <-time.After(5 * time.Second):
t.Fatal("timed out")
case <-start:
client := NewClient("localhost", 6677)
msg := NewMessage("/address/test1")
err := client.Send(msg)
if err != nil {
t.Fatal(err)
}
time.Sleep(150 * time.Millisecond)
msg = NewMessage("/address/test2")
err = client.Send(msg)
if err != nil {
t.Fatal(err)
}
}
}()
go func() {
defer wg.Done()
server := &Server{ReadTimeout: 100 * time.Millisecond}
c, err := net.ListenPacket("udp", "localhost:6677")
if err != nil {
t.Fatal(err)
}
defer c.Close()
start <- true
p, err := server.ReceivePacket(c)
if err != nil {
t.Errorf("server error: %v", err)
return
}
if got, want := p.(*Message).Address, "/address/test1"; got != want {
t.Errorf("wrong address; got = %s, want = %s", got, want)
return
}
// Second receive should time out since client is delayed 150 milliseconds
if _, err = server.ReceivePacket(c); err == nil {
t.Errorf("expected error")
return
}
// Next receive should get it
p, err = server.ReceivePacket(c)
if err != nil {
t.Errorf("server error: %v", err)
return
}
if got, want := p.(*Message).Address, "/address/test2"; got != want {
t.Errorf("wrong address; got = %s, want = %s", got, want)
return
}
}()
wg.Wait()
}
func TestReadPaddedString(t *testing.T) {
for _, tt := range []struct {
buf []byte // buffer
n int // bytes needed
s string // resulting string
e error // expected error
}{
{[]byte{'t', 'e', 's', 't', 'S', 't', 'r', 'i', 'n', 'g', 0, 0}, 12, "testString", nil},
{[]byte{'t', 'e', 's', 't', 'e', 'r', 's', 0}, 8, "testers", nil},
{[]byte{'t', 'e', 's', 't', 's', 0, 0, 0}, 8, "tests", nil},
{[]byte{'t', 'e', 's', 't', 0, 0, 0, 0}, 8, "test", nil},
{[]byte{}, 0, "", io.EOF},
{[]byte{'t', 'e', 's', 0}, 4, "tes", nil}, // OSC uses null terminated strings
{[]byte{'t', 'e', 's', 0, 0, 0, 0, 0}, 4, "tes", nil}, // Additional nulls should be ignored
{[]byte{'t', 'e', 's', 0, 0, 0}, 4, "tes", nil}, // Whether or not the nulls fall on a 4 byte padding boundary
{[]byte{'t', 'e', 's', 't'}, 0, "", io.EOF}, // if there is no null byte at the end, it doesn't work.
} {
buf := bytes.NewBuffer(tt.buf)
s, n, err := readPaddedString(bufio.NewReader(buf))
if got, want := err, tt.e; got != want {
t.Errorf("%q: Unexpected error reading padded string; got = %q, want = %q", tt.s, got, want)
}
if got, want := n, tt.n; got != want {
t.Errorf("%q: Bytes needed don't match; got = %d, want = %d", tt.s, got, want)
}
if got, want := s, tt.s; got != want {
t.Errorf("%q: Strings don't match; got = %q, want = %q", tt.s, got, want)
}
}
}
func TestWritePaddedString(t *testing.T) {
for _, tt := range []struct {
s string // string
buf []byte // resulting buffer
n int // bytes expected
}{
{"testString", []byte{'t', 'e', 's', 't', 'S', 't', 'r', 'i', 'n', 'g', 0, 0}, 12},
{"testers", []byte{'t', 'e', 's', 't', 'e', 'r', 's', 0}, 8},
{"tests", []byte{'t', 'e', 's', 't', 's', 0, 0, 0}, 8},
{"test", []byte{'t', 'e', 's', 't', 0, 0, 0, 0}, 8},
{"tes", []byte{'t', 'e', 's', 0}, 4},
{"tes\x00", []byte{'t', 'e', 's', 0}, 4}, // Don't add a second null terminator if one is already present
{"tes\x00\x00\x00\x00\x00", []byte{'t', 'e', 's', 0}, 4}, // Skip extra nulls
{"tes\x00\x00\x00", []byte{'t', 'e', 's', 0}, 4}, // Even if they don't fall on a 4 byte padding boundary
{"", []byte{0, 0, 0, 0}, 4}, // OSC uses null terminated strings, padded to the 4 byte boundary
} {
buf := []byte{}
bytesBuffer := bytes.NewBuffer(buf)
n, err := writePaddedString(tt.s, bytesBuffer)
if err != nil {
t.Errorf(err.Error())
}
if got, want := n, tt.n; got != want {
t.Errorf("%q: Count of bytes written don't match; got = %d, want = %d", tt.s, got, want)
}
if got, want := bytesBuffer, tt.buf; bytes.Equal(got.Bytes(), want) {
t.Errorf("%q: Buffers don't match; got = %q, want = %q", tt.s, got.Bytes(), want)
}
}
}
func TestPadBytesNeeded(t *testing.T) {
var n int
n = padBytesNeeded(4)
if n != 0 {
t.Errorf("Number of pad bytes should be 0 and is: %d", n)
}
n = padBytesNeeded(3)
if n != 1 {
t.Errorf("Number of pad bytes should be 1 and is: %d", n)
}
n = padBytesNeeded(2)
if n != 2 {
t.Errorf("Number of pad bytes should be 2 and is: %d", n)
}
n = padBytesNeeded(1)
if n != 3 {
t.Errorf("Number of pad bytes should be 3 and is: %d", n)
}
n = padBytesNeeded(0)
if n != 0 {
t.Errorf("Number of pad bytes should be 0 and is: %d", n)
}
n = padBytesNeeded(5)
if n != 3 {
t.Errorf("Number of pad bytes should be 3 and is: %d", n)
}
n = padBytesNeeded(7)
if n != 1 {
t.Errorf("Number of pad bytes should be 1 and is: %d", n)
}
n = padBytesNeeded(32)
if n != 0 {
t.Errorf("Number of pad bytes should be 0 and is: %d", n)
}
n = padBytesNeeded(63)
if n != 1 {
t.Errorf("Number of pad bytes should be 1 and is: %d", n)
}
n = padBytesNeeded(10)
if n != 2 {
t.Errorf("Number of pad bytes should be 2 and is: %d", n)
}
}
func TestTypeTagsString(t *testing.T) {
msg := NewMessage("/some/address")
msg.Append(int32(100))
msg.Append(true)
msg.Append(false)
typeTags, err := msg.TypeTags()
if err != nil {
t.Error(err.Error())
}
if typeTags != ",iTF" {
t.Errorf("Type tag string should be ',iTF' and is: %s", typeTags)
}
}
func TestClientSetLocalAddr(t *testing.T) {
client := NewClient("localhost", 8967)
err := client.SetLocalAddr("localhost", 41789)
if err != nil {
t.Error(err.Error())
}
expectedAddr := "127.0.0.1:41789"
if client.laddr.String() != expectedAddr {
t.Errorf("Expected laddr to be %s but was %s", expectedAddr, client.laddr.String())
}
}
func TestParsePacket(t *testing.T) {
for _, tt := range []struct {
desc string
msg string
pkt Packet
ok bool
}{
{"no_args",
"/a/b/c" + nulls(2) + "," + nulls(3),
makePacket("/a/b/c", nil),
true},
{"string_arg",
"/d/e/f" + nulls(2) + ",s" + nulls(2) + "foo" + nulls(1),
makePacket("/d/e/f", []string{"foo"}),
true},
{"empty", "", nil, false},
} {
pkt, err := ParsePacket(tt.msg)
if err != nil && tt.ok {
t.Errorf("%s: ParsePacket() returned unexpected error; %s", tt.desc, err)
}
if err == nil && !tt.ok {
t.Errorf("%s: ParsePacket() expected error", tt.desc)
}
if !tt.ok {
continue
}
pktBytes, err := pkt.MarshalBinary()
if err != nil {
t.Errorf("%s: failure converting pkt to byte array; %s", tt.desc, err)
continue
}
ttpktBytes, err := tt.pkt.MarshalBinary()
if err != nil {
t.Errorf("%s: failure converting tt.pkt to byte array; %s", tt.desc, err)
continue
}
if got, want := pktBytes, ttpktBytes; !reflect.DeepEqual(got, want) {
t.Errorf("%s: ParsePacket() as bytes = '%s', want = '%s'", tt.desc, got, want)
continue
}
}
}
func TestOscMessageMatch(t *testing.T) {
tc := []struct {
desc string
addr string
addrPattern string
want bool
}{
{
"match everything",
"*",
"/a/b",
true,
},
{
"don't match",
"/a/b",
"/a",
false,
},
{
"match alternatives",
"/a/{foo,bar}",
"/a/foo",
true,
},
{
"don't match if address is not part of the alternatives",
"/a/{foo,bar}",
"/a/bob",
false,
},
}
for _, tt := range tc {
msg := NewMessage(tt.addr)
got := msg.Match(tt.addrPattern)
if got != tt.want {
t.Errorf("%s: msg.Match('%s') = '%t', want = '%t'", tt.desc, tt.addrPattern, got, tt.want)
}
}
}
const zero = string(byte(0))
// nulls returns a string of `i` nulls.
func nulls(i int) string {
s := ""
for j := 0; j < i; j++ {
s += zero
}
return s
}
// makePacket creates a fake Message Packet.
func makePacket(addr string, args []string) Packet {
msg := NewMessage(addr)
for _, arg := range args {
msg.Append(arg)
}
return msg
}
|
/*
SPDX-License-Identifier: MIT
Copyright (c) 2017 Thanh Ha
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
// EPL-1.0 License
package license
const EPL_10_LICENSE = `
Copyright (c) {DATE} {INITIAL COPYRIGHT OWNER} {OTHER COPYRIGHT OWNERS}.
All rights reserved. This program and the accompanying materials
are made available under the terms of the Eclipse Public License v1.0
which accompanies this distribution, and is available at
http://www.eclipse.org/legal/epl-v10.html
`
Fix EPL file formatting
Signed-off-by: Thanh Ha <ddef40ffcf330e0c1e91b15bb1b0d4953f1680d8@linux.com>
/*
SPDX-License-Identifier: MIT
Copyright (c) 2017 Thanh Ha
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
// EPL-1.0 License
package license
const EPL_10_LICENSE = `
Copyright (c) {DATE} {INITIAL COPYRIGHT OWNER} {OTHER COPYRIGHT OWNERS}.
All rights reserved. This program and the accompanying materials
are made available under the terms of the Eclipse Public License v1.0
which accompanies this distribution, and is available at
http://www.eclipse.org/legal/epl-v10.html
`
|
package main
import (
"os"
"os/exec"
"path/filepath"
"reflect"
"testing"
)
var sourceURITests = []struct {
src string
dst string
}{
//Full URI
{
"https://github.com/sunaku/vim-unbundle",
"https://github.com/sunaku/vim-unbundle",
},
//Short GitHub URI
{
"Shougo/neobundle.vim",
"https://github.com/Shougo/neobundle.vim",
},
{
"thinca/vim-quickrun",
"https://github.com/thinca/vim-quickrun",
},
}
func TestSourceURI(t *testing.T) {
for _, test := range sourceURITests {
expect := test.dst
actual, err := ToSourceURI(test.src)
if err != nil {
t.Errorf("ToSourceURI(%q) returns %q, want nil",
test.src, err)
}
if actual != expect {
t.Errorf("%q: got %q, want %q",
test.src, actual, expect)
}
}
}
var destinationPathTests = []struct {
filetype string
src string
dst string
}{
//No filetype
{
"",
"https://github.com/sunaku/vim-unbundle",
filepath.Join(dotvim, "bundle", "vim-unbundle"),
},
{
"",
"sunaku/vim-unbundle",
filepath.Join(dotvim, "bundle", "vim-unbundle"),
},
//Filetype specified
{
"go",
"https://github.com/fatih/vim-go",
filepath.Join(dotvim, "ftbundle", "go", "vim-go"),
},
{
"perl",
"https://github.com/hotchpotch/perldoc-vim",
filepath.Join(dotvim, "ftbundle", "perl", "perldoc-vim"),
},
}
func TestDestinationPath(t *testing.T) {
for _, test := range destinationPathTests {
expect := test.dst
actual, err := ToDestinationPath(test.src, test.filetype)
if err != nil {
t.Errorf("ToSourceURI(%q) returns %q, want nil",
test.src, err)
}
if actual != expect {
t.Errorf("(uri=%q, filetype=%q): got %q, want %q",
test.filetype, test.src, actual, expect)
}
}
}
func TestPackage(t *testing.T) {
src, filetype := "sunaku/vim-unbundle", ""
expect := &Package{
verbose: false,
src: "https://github.com/sunaku/vim-unbundle",
dst: filepath.Join(dotvim, "bundle", "vim-unbundle"),
}
actual, err := NewPackage(src, filetype)
if err != nil {
t.Errorf("NewPackage(%q, %q) returns %q, want nil",
src, filetype, err)
}
if !reflect.DeepEqual(actual, expect) {
t.Errorf("(uri=%q, filetype=%q): got %q, want %q",
filetype, src, actual, expect)
}
}
func TestPackageToCommnad(t *testing.T) {
src, filetype := "sunaku/vim-unbundle", ""
p, err := NewPackage(src, filetype)
if err != nil {
t.Errorf("NewPackage(%q, %q) returns %q, want nil",
src, filetype, err)
}
expect := exec.Command("git", "clone",
"https://github.com/sunaku/vim-unbundle",
filepath.Join(dotvim, "bundle", "vim-unbundle"))
actual := p.toCommand()
if !reflect.DeepEqual(actual.Args, expect.Args) {
t.Errorf("(filetype=%q, uri=%q): got %q, want %q",
filetype, src, actual, expect)
}
}
func TestVerbose(t *testing.T) {
src, filetype := "sunaku/vim-unbundle", ""
p, err := NewPackage(src, filetype)
if err != nil {
t.Errorf("NewPackage(%q, %q) returns %q, want nil",
src, filetype, err)
}
p.Verbose(true)
expect := true
actual := p.verbose
if actual != expect {
t.Errorf("got %v, want %v",
actual, expect)
}
}
func TestInstalled(t *testing.T) {
src, filetype := "sunaku/vim-unbundle", ""
p, err := NewPackage(src, filetype)
if err != nil {
t.Errorf("NewPackage(%q, %q) returns %q, want nil",
src, filetype, err)
}
_, err = os.Stat(p.dst)
expect := err == nil
actual := p.installed()
if actual != expect {
t.Errorf("got %v, want %v",
actual, expect)
}
}
Remove TestVerbose
package main
import (
"os"
"os/exec"
"path/filepath"
"reflect"
"testing"
)
var sourceURITests = []struct {
src string
dst string
}{
//Full URI
{
"https://github.com/sunaku/vim-unbundle",
"https://github.com/sunaku/vim-unbundle",
},
//Short GitHub URI
{
"Shougo/neobundle.vim",
"https://github.com/Shougo/neobundle.vim",
},
{
"thinca/vim-quickrun",
"https://github.com/thinca/vim-quickrun",
},
}
func TestSourceURI(t *testing.T) {
for _, test := range sourceURITests {
expect := test.dst
actual, err := ToSourceURI(test.src)
if err != nil {
t.Errorf("ToSourceURI(%q) returns %q, want nil",
test.src, err)
}
if actual != expect {
t.Errorf("%q: got %q, want %q",
test.src, actual, expect)
}
}
}
var destinationPathTests = []struct {
filetype string
src string
dst string
}{
//No filetype
{
"",
"https://github.com/sunaku/vim-unbundle",
filepath.Join(dotvim, "bundle", "vim-unbundle"),
},
{
"",
"sunaku/vim-unbundle",
filepath.Join(dotvim, "bundle", "vim-unbundle"),
},
//Filetype specified
{
"go",
"https://github.com/fatih/vim-go",
filepath.Join(dotvim, "ftbundle", "go", "vim-go"),
},
{
"perl",
"https://github.com/hotchpotch/perldoc-vim",
filepath.Join(dotvim, "ftbundle", "perl", "perldoc-vim"),
},
}
func TestDestinationPath(t *testing.T) {
for _, test := range destinationPathTests {
expect := test.dst
actual, err := ToDestinationPath(test.src, test.filetype)
if err != nil {
t.Errorf("ToSourceURI(%q) returns %q, want nil",
test.src, err)
}
if actual != expect {
t.Errorf("(uri=%q, filetype=%q): got %q, want %q",
test.filetype, test.src, actual, expect)
}
}
}
func TestPackage(t *testing.T) {
src, filetype := "sunaku/vim-unbundle", ""
expect := &Package{
src: "https://github.com/sunaku/vim-unbundle",
dst: filepath.Join(dotvim, "bundle", "vim-unbundle"),
}
actual, err := NewPackage(src, filetype)
if err != nil {
t.Errorf("NewPackage(%q, %q) returns %q, want nil",
src, filetype, err)
}
if !reflect.DeepEqual(actual, expect) {
t.Errorf("(uri=%q, filetype=%q): got %q, want %q",
filetype, src, actual, expect)
}
}
func TestPackageToCommnad(t *testing.T) {
src, filetype := "sunaku/vim-unbundle", ""
p, err := NewPackage(src, filetype)
if err != nil {
t.Errorf("NewPackage(%q, %q) returns %q, want nil",
src, filetype, err)
}
expect := exec.Command("git", "clone",
"https://github.com/sunaku/vim-unbundle",
filepath.Join(dotvim, "bundle", "vim-unbundle"))
actual := p.toCommand()
if !reflect.DeepEqual(actual.Args, expect.Args) {
t.Errorf("(filetype=%q, uri=%q): got %q, want %q",
filetype, src, actual, expect)
}
}
func TestInstalled(t *testing.T) {
src, filetype := "sunaku/vim-unbundle", ""
p, err := NewPackage(src, filetype)
if err != nil {
t.Errorf("NewPackage(%q, %q) returns %q, want nil",
src, filetype, err)
}
_, err = os.Stat(p.dst)
expect := err == nil
actual := p.installed()
if actual != expect {
t.Errorf("got %v, want %v",
actual, expect)
}
}
|
package packer
import (
"fmt"
"log"
"sync"
)
const (
// This is the key in configurations that is set to the name of the
// build.
BuildNameConfigKey = "packer_build_name"
// This is the key in the configuration that is set to the type
// of the builder that is run. This is useful for provisioners and
// such who want to make use of this.
BuilderTypeConfigKey = "packer_builder_type"
// This is the key in configurations that is set to "true" when Packer
// debugging is enabled.
DebugConfigKey = "packer_debug"
// This is the key in configurations that is set to "true" when Packer
// force build is enabled.
ForceConfigKey = "packer_force"
// This key determines what to do when a normal multistep step fails
// - "cleanup" - run cleanup steps
// - "abort" - exit without cleanup
// - "ask" - ask the user
OnErrorConfigKey = "packer_on_error"
// TemplatePathKey is the path to the template that configured this build
TemplatePathKey = "packer_template_path"
// This key contains a map[string]string of the user variables for
// template processing.
UserVariablesConfigKey = "packer_user_variables"
)
// A Build represents a single job within Packer that is responsible for
// building some machine image artifact. Builds are meant to be parallelized.
type Build interface {
// Name is the name of the build. This is unique across a single template,
// but not absolutely unique. This is meant more to describe to the user
// what is being built rather than being a unique identifier.
Name() string
// Prepare configures the various components of this build and reports
// any errors in doing so (such as syntax errors, validation errors, etc.).
// It also reports any warnings.
Prepare() ([]string, error)
// Run runs the actual builder, returning an artifact implementation
// of what is built. If anything goes wrong, an error is returned.
Run(Ui) ([]Artifact, error)
// Cancel will cancel a running build. This will block until the build
// is actually completely canceled.
Cancel()
// SetDebug will enable/disable debug mode. Debug mode is always
// enabled by adding the additional key "packer_debug" to boolean
// true in the configuration of the various components. This must
// be called prior to Prepare.
//
// When SetDebug is set to true, parallelism between builds is
// strictly prohibited.
SetDebug(bool)
// SetForce will enable/disable forcing a build when artifacts exist.
//
// When SetForce is set to true, existing artifacts from the build are
// deleted prior to the build.
SetForce(bool)
// SetOnError will determine what to do when a normal multistep step fails
// - "cleanup" - run cleanup steps
// - "abort" - exit without cleanup
// - "ask" - ask the user
SetOnError(string)
}
// A build struct represents a single build job, the result of which should
// be a single machine image artifact. This artifact may be comprised of
// multiple files, of course, but it should be for only a single provider
// (such as VirtualBox, EC2, etc.).
type coreBuild struct {
name string
builder Builder
builderConfig interface{}
builderType string
hooks map[string][]Hook
postProcessors [][]coreBuildPostProcessor
provisioners []coreBuildProvisioner
templatePath string
variables map[string]string
debug bool
force bool
onError string
l sync.Mutex
prepareCalled bool
}
// Keeps track of the post-processor and the configuration of the
// post-processor used within a build.
type coreBuildPostProcessor struct {
processor PostProcessor
processorType string
config map[string]interface{}
keepInputArtifact *bool
}
// Keeps track of the provisioner and the configuration of the provisioner
// within the build.
type coreBuildProvisioner struct {
pType string
provisioner Provisioner
config []interface{}
}
// Returns the name of the build.
func (b *coreBuild) Name() string {
return b.name
}
// Prepare prepares the build by doing some initialization for the builder
// and any hooks. This _must_ be called prior to Run. The parameter is the
// overrides for the variables within the template (if any).
func (b *coreBuild) Prepare() (warn []string, err error) {
b.l.Lock()
defer b.l.Unlock()
if b.prepareCalled {
panic("prepare already called")
}
b.prepareCalled = true
packerConfig := map[string]interface{}{
BuildNameConfigKey: b.name,
BuilderTypeConfigKey: b.builderType,
DebugConfigKey: b.debug,
ForceConfigKey: b.force,
OnErrorConfigKey: b.onError,
TemplatePathKey: b.templatePath,
UserVariablesConfigKey: b.variables,
}
// Prepare the builder
warn, err = b.builder.Prepare(b.builderConfig, packerConfig)
if err != nil {
log.Printf("Build '%s' prepare failure: %s\n", b.name, err)
return
}
// Prepare the provisioners
for _, coreProv := range b.provisioners {
configs := make([]interface{}, len(coreProv.config), len(coreProv.config)+1)
copy(configs, coreProv.config)
configs = append(configs, packerConfig)
if err = coreProv.provisioner.Prepare(configs...); err != nil {
return
}
}
// Prepare the post-processors
for _, ppSeq := range b.postProcessors {
for _, corePP := range ppSeq {
err = corePP.processor.Configure(corePP.config, packerConfig)
if err != nil {
return
}
}
}
return
}
// Runs the actual build. Prepare must be called prior to running this.
func (b *coreBuild) Run(originalUi Ui) ([]Artifact, error) {
if !b.prepareCalled {
panic("Prepare must be called first")
}
// Copy the hooks
hooks := make(map[string][]Hook)
for hookName, hookList := range b.hooks {
hooks[hookName] = make([]Hook, len(hookList))
copy(hooks[hookName], hookList)
}
// Add a hook for the provisioners if we have provisioners
if len(b.provisioners) > 0 {
hookedProvisioners := make([]*HookedProvisioner, len(b.provisioners))
for i, p := range b.provisioners {
var pConfig interface{}
if len(p.config) > 0 {
pConfig = p.config[0]
}
if b.debug {
hookedProvisioners[i] = &HookedProvisioner{
&DebuggedProvisioner{Provisioner: p.provisioner},
pConfig,
p.pType,
}
} else {
hookedProvisioners[i] = &HookedProvisioner{
p.provisioner,
pConfig,
p.pType,
}
}
}
if _, ok := hooks[HookProvision]; !ok {
hooks[HookProvision] = make([]Hook, 0, 1)
}
hooks[HookProvision] = append(hooks[HookProvision], &ProvisionHook{
Provisioners: hookedProvisioners,
})
}
hook := &DispatchHook{Mapping: hooks}
artifacts := make([]Artifact, 0, 1)
// The builder just has a normal Ui, but targeted
builderUi := &TargetedUI{
Target: b.Name(),
Ui: originalUi,
}
log.Printf("Running builder: %s", b.builderType)
ts := CheckpointReporter.AddSpan(b.builderType, "builder", b.builderConfig)
builderArtifact, err := b.builder.Run(builderUi, hook)
ts.End(err)
if err != nil {
return nil, err
}
// If there was no result, don't worry about running post-processors
// because there is nothing they can do, just return.
if builderArtifact == nil {
return nil, nil
}
errors := make([]error, 0)
keepOriginalArtifact := len(b.postProcessors) == 0
// Run the post-processors
PostProcessorRunSeqLoop:
for _, ppSeq := range b.postProcessors {
priorArtifact := builderArtifact
for i, corePP := range ppSeq {
ppUi := &TargetedUI{
Target: fmt.Sprintf("%s (%s)", b.Name(), corePP.processorType),
Ui: originalUi,
}
builderUi.Say(fmt.Sprintf("Running post-processor: %s", corePP.processorType))
ts := CheckpointReporter.AddSpan(corePP.processorType, "post-processor", corePP.config)
artifact, defaultKeep, forceOverride, err := corePP.processor.PostProcess(ppUi, priorArtifact)
ts.End(err)
if err != nil {
errors = append(errors, fmt.Errorf("Post-processor failed: %s", err))
continue PostProcessorRunSeqLoop
}
if artifact == nil {
log.Println("Nil artifact, halting post-processor chain.")
continue PostProcessorRunSeqLoop
}
keep := defaultKeep
// When nil, go for the default. If overridden by user, use that
// instead.
// Exception: for postprocessors that will fail/become
// useless if keep isn't set, force an override that still uses
// post-processor preference instead of user preference.
if corePP.keepInputArtifact != nil {
if defaultKeep && *corePP.keepInputArtifact == false && forceOverride {
log.Printf("The %s post-processor forces "+
"keep_input_artifact=true to preserve integrity of the"+
"build chain. User-set keep_input_artifact=false will be"+
"ignored.", corePP.processorType)
} else {
// User overrides default
keep = *corePP.keepInputArtifact
}
}
if i == 0 {
// This is the first post-processor. We handle deleting
// previous artifacts a bit different because multiple
// post-processors may be using the original and need it.
if !keepOriginalArtifact && keep {
log.Printf(
"Flagging to keep original artifact from post-processor '%s'",
corePP.processorType)
keepOriginalArtifact = true
}
} else {
// We have a prior artifact. If we want to keep it, we append
// it to the results list. Otherwise, we destroy it.
if keep {
artifacts = append(artifacts, priorArtifact)
} else {
log.Printf("Deleting prior artifact from post-processor '%s'", corePP.processorType)
if err := priorArtifact.Destroy(); err != nil {
log.Printf("Error is %#v", err)
errors = append(errors, fmt.Errorf("Failed cleaning up prior artifact: %s; pp is %s", err, corePP.processorType))
}
}
}
priorArtifact = artifact
}
// Add on the last artifact to the results
if priorArtifact != nil {
artifacts = append(artifacts, priorArtifact)
}
}
if keepOriginalArtifact {
artifacts = append(artifacts, nil)
copy(artifacts[1:], artifacts)
artifacts[0] = builderArtifact
} else {
log.Printf("Deleting original artifact for build '%s'", b.name)
if err := builderArtifact.Destroy(); err != nil {
errors = append(errors, fmt.Errorf("Error destroying builder artifact: %s; bad artifact: %#v", err, builderArtifact.Files()))
}
}
if len(errors) > 0 {
err = &MultiError{errors}
}
return artifacts, err
}
func (b *coreBuild) SetDebug(val bool) {
if b.prepareCalled {
panic("prepare has already been called")
}
b.debug = val
}
func (b *coreBuild) SetForce(val bool) {
if b.prepareCalled {
panic("prepare has already been called")
}
b.force = val
}
func (b *coreBuild) SetOnError(val string) {
if b.prepareCalled {
panic("prepare has already been called")
}
b.onError = val
}
// Cancels the build if it is running.
func (b *coreBuild) Cancel() {
b.builder.Cancel()
}
clean up code comments
package packer
import (
"fmt"
"log"
"sync"
)
const (
// This is the key in configurations that is set to the name of the
// build.
BuildNameConfigKey = "packer_build_name"
// This is the key in the configuration that is set to the type
// of the builder that is run. This is useful for provisioners and
// such who want to make use of this.
BuilderTypeConfigKey = "packer_builder_type"
// This is the key in configurations that is set to "true" when Packer
// debugging is enabled.
DebugConfigKey = "packer_debug"
// This is the key in configurations that is set to "true" when Packer
// force build is enabled.
ForceConfigKey = "packer_force"
// This key determines what to do when a normal multistep step fails
// - "cleanup" - run cleanup steps
// - "abort" - exit without cleanup
// - "ask" - ask the user
OnErrorConfigKey = "packer_on_error"
// TemplatePathKey is the path to the template that configured this build
TemplatePathKey = "packer_template_path"
// This key contains a map[string]string of the user variables for
// template processing.
UserVariablesConfigKey = "packer_user_variables"
)
// A Build represents a single job within Packer that is responsible for
// building some machine image artifact. Builds are meant to be parallelized.
type Build interface {
// Name is the name of the build. This is unique across a single template,
// but not absolutely unique. This is meant more to describe to the user
// what is being built rather than being a unique identifier.
Name() string
// Prepare configures the various components of this build and reports
// any errors in doing so (such as syntax errors, validation errors, etc.).
// It also reports any warnings.
Prepare() ([]string, error)
// Run runs the actual builder, returning an artifact implementation
// of what is built. If anything goes wrong, an error is returned.
Run(Ui) ([]Artifact, error)
// Cancel will cancel a running build. This will block until the build
// is actually completely canceled.
Cancel()
// SetDebug will enable/disable debug mode. Debug mode is always
// enabled by adding the additional key "packer_debug" to boolean
// true in the configuration of the various components. This must
// be called prior to Prepare.
//
// When SetDebug is set to true, parallelism between builds is
// strictly prohibited.
SetDebug(bool)
// SetForce will enable/disable forcing a build when artifacts exist.
//
// When SetForce is set to true, existing artifacts from the build are
// deleted prior to the build.
SetForce(bool)
// SetOnError will determine what to do when a normal multistep step fails
// - "cleanup" - run cleanup steps
// - "abort" - exit without cleanup
// - "ask" - ask the user
SetOnError(string)
}
// A build struct represents a single build job, the result of which should
// be a single machine image artifact. This artifact may be comprised of
// multiple files, of course, but it should be for only a single provider
// (such as VirtualBox, EC2, etc.).
type coreBuild struct {
name string
builder Builder
builderConfig interface{}
builderType string
hooks map[string][]Hook
postProcessors [][]coreBuildPostProcessor
provisioners []coreBuildProvisioner
templatePath string
variables map[string]string
debug bool
force bool
onError string
l sync.Mutex
prepareCalled bool
}
// Keeps track of the post-processor and the configuration of the
// post-processor used within a build.
type coreBuildPostProcessor struct {
processor PostProcessor
processorType string
config map[string]interface{}
keepInputArtifact *bool
}
// Keeps track of the provisioner and the configuration of the provisioner
// within the build.
type coreBuildProvisioner struct {
pType string
provisioner Provisioner
config []interface{}
}
// Returns the name of the build.
func (b *coreBuild) Name() string {
return b.name
}
// Prepare prepares the build by doing some initialization for the builder
// and any hooks. This _must_ be called prior to Run. The parameter is the
// overrides for the variables within the template (if any).
func (b *coreBuild) Prepare() (warn []string, err error) {
b.l.Lock()
defer b.l.Unlock()
if b.prepareCalled {
panic("prepare already called")
}
b.prepareCalled = true
packerConfig := map[string]interface{}{
BuildNameConfigKey: b.name,
BuilderTypeConfigKey: b.builderType,
DebugConfigKey: b.debug,
ForceConfigKey: b.force,
OnErrorConfigKey: b.onError,
TemplatePathKey: b.templatePath,
UserVariablesConfigKey: b.variables,
}
// Prepare the builder
warn, err = b.builder.Prepare(b.builderConfig, packerConfig)
if err != nil {
log.Printf("Build '%s' prepare failure: %s\n", b.name, err)
return
}
// Prepare the provisioners
for _, coreProv := range b.provisioners {
configs := make([]interface{}, len(coreProv.config), len(coreProv.config)+1)
copy(configs, coreProv.config)
configs = append(configs, packerConfig)
if err = coreProv.provisioner.Prepare(configs...); err != nil {
return
}
}
// Prepare the post-processors
for _, ppSeq := range b.postProcessors {
for _, corePP := range ppSeq {
err = corePP.processor.Configure(corePP.config, packerConfig)
if err != nil {
return
}
}
}
return
}
// Runs the actual build. Prepare must be called prior to running this.
func (b *coreBuild) Run(originalUi Ui) ([]Artifact, error) {
if !b.prepareCalled {
panic("Prepare must be called first")
}
// Copy the hooks
hooks := make(map[string][]Hook)
for hookName, hookList := range b.hooks {
hooks[hookName] = make([]Hook, len(hookList))
copy(hooks[hookName], hookList)
}
// Add a hook for the provisioners if we have provisioners
if len(b.provisioners) > 0 {
hookedProvisioners := make([]*HookedProvisioner, len(b.provisioners))
for i, p := range b.provisioners {
var pConfig interface{}
if len(p.config) > 0 {
pConfig = p.config[0]
}
if b.debug {
hookedProvisioners[i] = &HookedProvisioner{
&DebuggedProvisioner{Provisioner: p.provisioner},
pConfig,
p.pType,
}
} else {
hookedProvisioners[i] = &HookedProvisioner{
p.provisioner,
pConfig,
p.pType,
}
}
}
if _, ok := hooks[HookProvision]; !ok {
hooks[HookProvision] = make([]Hook, 0, 1)
}
hooks[HookProvision] = append(hooks[HookProvision], &ProvisionHook{
Provisioners: hookedProvisioners,
})
}
hook := &DispatchHook{Mapping: hooks}
artifacts := make([]Artifact, 0, 1)
// The builder just has a normal Ui, but targeted
builderUi := &TargetedUI{
Target: b.Name(),
Ui: originalUi,
}
log.Printf("Running builder: %s", b.builderType)
ts := CheckpointReporter.AddSpan(b.builderType, "builder", b.builderConfig)
builderArtifact, err := b.builder.Run(builderUi, hook)
ts.End(err)
if err != nil {
return nil, err
}
// If there was no result, don't worry about running post-processors
// because there is nothing they can do, just return.
if builderArtifact == nil {
return nil, nil
}
errors := make([]error, 0)
keepOriginalArtifact := len(b.postProcessors) == 0
// Run the post-processors
PostProcessorRunSeqLoop:
for _, ppSeq := range b.postProcessors {
priorArtifact := builderArtifact
for i, corePP := range ppSeq {
ppUi := &TargetedUI{
Target: fmt.Sprintf("%s (%s)", b.Name(), corePP.processorType),
Ui: originalUi,
}
builderUi.Say(fmt.Sprintf("Running post-processor: %s", corePP.processorType))
ts := CheckpointReporter.AddSpan(corePP.processorType, "post-processor", corePP.config)
artifact, defaultKeep, forceOverride, err := corePP.processor.PostProcess(ppUi, priorArtifact)
ts.End(err)
if err != nil {
errors = append(errors, fmt.Errorf("Post-processor failed: %s", err))
continue PostProcessorRunSeqLoop
}
if artifact == nil {
log.Println("Nil artifact, halting post-processor chain.")
continue PostProcessorRunSeqLoop
}
keep := defaultKeep
// When user has not set keep_input_artifuact
// corePP.keepInputArtifact is nil.
// In this case, use the keepDefault provided by the postprocessor.
// When user _has_ set keep_input_atifact, go with that instead.
// Exception: for postprocessors that will fail/become
// useless if keep isn't true, heed forceOverride and keep the
// input artifact regardless of user preference.
if corePP.keepInputArtifact != nil {
if defaultKeep && *corePP.keepInputArtifact == false && forceOverride {
log.Printf("The %s post-processor forces "+
"keep_input_artifact=true to preserve integrity of the"+
"build chain. User-set keep_input_artifact=false will be"+
"ignored.", corePP.processorType)
} else {
// User overrides default.
keep = *corePP.keepInputArtifact
}
}
if i == 0 {
// This is the first post-processor. We handle deleting
// previous artifacts a bit different because multiple
// post-processors may be using the original and need it.
if !keepOriginalArtifact && keep {
log.Printf(
"Flagging to keep original artifact from post-processor '%s'",
corePP.processorType)
keepOriginalArtifact = true
}
} else {
// We have a prior artifact. If we want to keep it, we append
// it to the results list. Otherwise, we destroy it.
if keep {
artifacts = append(artifacts, priorArtifact)
} else {
log.Printf("Deleting prior artifact from post-processor '%s'", corePP.processorType)
if err := priorArtifact.Destroy(); err != nil {
log.Printf("Error is %#v", err)
errors = append(errors, fmt.Errorf("Failed cleaning up prior artifact: %s; pp is %s", err, corePP.processorType))
}
}
}
priorArtifact = artifact
}
// Add on the last artifact to the results
if priorArtifact != nil {
artifacts = append(artifacts, priorArtifact)
}
}
if keepOriginalArtifact {
artifacts = append(artifacts, nil)
copy(artifacts[1:], artifacts)
artifacts[0] = builderArtifact
} else {
log.Printf("Deleting original artifact for build '%s'", b.name)
if err := builderArtifact.Destroy(); err != nil {
errors = append(errors, fmt.Errorf("Error destroying builder artifact: %s; bad artifact: %#v", err, builderArtifact.Files()))
}
}
if len(errors) > 0 {
err = &MultiError{errors}
}
return artifacts, err
}
func (b *coreBuild) SetDebug(val bool) {
if b.prepareCalled {
panic("prepare has already been called")
}
b.debug = val
}
func (b *coreBuild) SetForce(val bool) {
if b.prepareCalled {
panic("prepare has already been called")
}
b.force = val
}
func (b *coreBuild) SetOnError(val string) {
if b.prepareCalled {
panic("prepare has already been called")
}
b.onError = val
}
// Cancels the build if it is running.
func (b *coreBuild) Cancel() {
b.builder.Cancel()
}
|
package main
import (
"fmt"
"github.com/bradfitz/slice"
ct "github.com/daviddengcn/go-colortext"
. "github.com/timob/ls/lib"
"github.com/timob/sindex"
"log"
"os"
"path"
"strconv"
"strings"
"time"
"github.com/dustin/go-humanize"
)
type DisplayEntry struct {
path string
os.FileInfo
}
type DisplayEntryList struct {
Data []DisplayEntry
sindex.List
}
var now = time.Now()
var showDirEntries bool
var showAll bool
var showAlmostAll bool
var longList bool
const (
name int = iota
modTime int = iota
size int = iota
)
var sortType int = name
var reverseSort bool
var humanReadable bool
var recursiveList bool
var onlyHidden bool
var width int
var oneColumn bool
var listBylines bool
type colorDef struct {
fg, bg byte
bright bool
}
var fileColors map[string]colorDef
var useColor bool
var colorSet bool
func setColor(def colorDef) {
colorSet = true
ct.ChangeColor(ct.Color(def.fg), def.bright, ct.Color(def.bg), false)
}
func resetColor() {
if colorSet {
ct.ResetColor()
colorSet = false
}
}
func setColorForFile(info os.FileInfo) {
mode := info.Mode()
var fileType string
if mode&os.ModeDir != 0 {
if mode&os.ModeSticky != 0 {
if mode&(1<<1) != 0 {
fileType = "tw"
} else {
fileType = "st"
}
} else if mode&(1<<1) != 0 {
fileType = "ow"
} else {
fileType = "di"
}
} else if mode&os.ModeSymlink != 0 {
fileType = "ln"
} else if mode&os.ModeNamedPipe != 0 {
fileType = "pi"
} else if mode&os.ModeSocket != 0 {
fileType = "so"
} else if mode&os.ModeDevice != 0 {
fileType = "bd"
} else if mode&os.ModeCharDevice != 0 {
fileType = "cd"
} else if mode&os.ModeSetuid != 0 {
fileType = "su"
} else if mode&os.ModeSetgid != 0 {
fileType = "sg"
} else if mode&(1<<6|1<<3|1) != 0 {
fileType = "ex"
} else {
name := info.Name()
if n := strings.LastIndex(name, "."); n != -1 && n != len(name)-1 {
key := "*" + name[n:]
if _, ok := fileColors[key]; ok {
fileType = key
}
}
}
if fileType != "" {
setColor(fileColors[fileType])
}
}
func human(n int64) string {
var i int64
var w = n
var uSize int64 = 1
for i = 0; i < 12; i++ {
if w/1024 == 0 {
break
}
w = w / 1024
uSize *= 1024
}
var f int64
var unit string
f = (n - w*uSize)
if f != 0 && i > 0 {
if w < 10 {
if f != 0 {
lowerSize := uSize / 1024
// magic plus one here (seems to be what GNU ls does)
tenth := int64(1024/10) + 1
f = f / lowerSize / tenth
// round up
f++
if f == 10 {
w++
f = 0
}
}
} else {
// round up
w++
}
}
switch i {
case 1:
unit = "K"
case 2:
unit = "M"
case 3:
unit = "G"
case 4:
unit = "T"
}
if w > 0 && w < 10 {
return fmt.Sprintf("%d.%d%s", w, f, unit)
} else {
return fmt.Sprintf("%d%s", w, unit)
}
}
func decimalLen(n int64) (i int) {
for i = 1; i < 24; i++ {
if n/10 == 0 {
break
}
n = n / 10
}
return
}
func strcmpi(a, b string) int {
for i, av := range a {
if i >= len(b) {
return 1
}
if av > 96 && av < 123 {
av -= 32
}
bv := rune(b[i])
if bv > 96 && bv < 123 {
bv -= 32
}
if av != bv {
if av > bv {
return 1
} else {
return -1
}
}
}
if len(b) > len(a) {
return -1
} else {
return 0
}
}
func modeString(mode os.FileMode) string {
output := []byte(strings.Repeat("-", 10))
if mode&os.ModeDir != 0 {
output[0] = 'd'
} else if mode&os.ModeSymlink != 0 {
output[0] = 'l'
} else if mode&os.ModeNamedPipe != 0 {
output[0] = 'p'
} else if mode&os.ModeSocket != 0 {
output[0] = 's'
} else if mode&os.ModeCharDevice != 0 && mode&os.ModeDevice != 0 {
output[0] = 'c'
}
const rwx = "rwxrwxrwx"
for i, c := range rwx {
bitSet := mode&(1<<uint(9-1-i)) != 0
if bitSet {
if (i == 2 && mode&os.ModeSetuid != 0) || (i == 5 && mode&os.ModeSetgid != 0) {
output[i+1] = 's'
} else if i == 8 && mode&os.ModeSticky != 0 {
output[i+1] = 't'
} else {
output[i+1] = byte(c)
}
} else if (i == 2 && mode&os.ModeSetuid != 0) || (i == 5 && mode&os.ModeSetgid != 0) {
output[i+1] = 'S'
} else if i == 8 && mode&os.ModeSticky != 0 {
output[i+1] = 'T'
}
}
return string(output)
}
func display(selected []DisplayEntry, root string) {
slice.Sort(selected, func(i, j int) (v bool) {
var same bool
if sortType == modTime {
v = selected[i].ModTime().Before(selected[j].ModTime())
if !v {
same = selected[i].ModTime().Equal(selected[j].ModTime())
}
v = !v
} else if sortType == size {
d := selected[j].Size() - selected[i].Size()
if d > 0 {
v = true
} else if d == 0 {
same = true
}
v = !v
} else {
// strcoll?
v = strcmpi(selected[i].path, selected[j].path) == -1
}
if same {
v = strcmpi(selected[i].path, selected[j].path) == -1
}
if reverseSort {
v = !v
}
return
})
padding := 2
smallestWord := 1
var cols int
var colWidths []int
if longList {
cols = 5
colWidths = make([]int, cols)
for _, v := range selected {
li := GetLongInfo(v)
if decimalLen(int64(li.HardLinks)) > colWidths[0] {
colWidths[0] = decimalLen(int64(li.HardLinks))
}
if len(li.UserName) > colWidths[1] {
colWidths[1] = len(li.UserName)
}
if len(li.GroupName) > colWidths[2] {
colWidths[2] = len(li.GroupName)
}
if humanReadable {
if len(human(v.Size())) > colWidths[3] {
colWidths[3] = len(human(v.Size()))
}
if len(humanize.Time(v.ModTime())) > colWidths[4] {
colWidths[4] = len(humanize.Time(v.ModTime()))
}
} else {
if decimalLen(v.Size()) > colWidths[3] {
colWidths[3] = decimalLen(v.Size())
}
}
}
} else {
if oneColumn {
cols = 1
} else {
cols = width / (padding + smallestWord)
}
colWidths = make([]int, cols)
A:
for {
colWidths = colWidths[:cols]
for i := range colWidths {
colWidths[i] = 0
}
pos := (cols - 1) * padding
for i := range selected {
p := i % cols
var j int
if listBylines {
j = i
} else {
var per int
if len(selected) % cols == 0 {
per = len(selected) / cols
} else {
per = len(selected) / cols + 1
}
square := per * cols
if len(selected) <= square - per {
cols--
if cols == 0 {
cols = 1
break A
}
continue A
}
// if needed skip empty rows in last column
// lastFullRow is index of last row with all cols present
lastFullRow := (len(selected) - 1) % per
curRow := i / cols
if curRow > lastFullRow {
diff := (i - (lastFullRow + 1) * cols)
p = diff % (cols - 1)
curRow = lastFullRow + 1 + diff / (cols - 1)
}
j = (per * p) + curRow
}
v := selected[j]
if len(v.path) > colWidths[p] {
pos += len(v.path) - colWidths[p]
if pos > width {
cols--
if cols == 0 {
cols = 1
break A
}
continue A
}
colWidths[p] = len(v.path)
}
}
break
}
}
for i := range selected {
var j, p int
adjCols := cols
if listBylines || longList {
j = i
} else {
p = i % cols
var per int
if len(selected) % cols == 0 {
per = len(selected) / cols
} else {
per = len(selected) / cols + 1
}
lastFullRow := (len(selected) - 1) % per
curRow := i / cols
if curRow > lastFullRow {
adjCols = cols - 1
diff := (i - (lastFullRow + 1) * cols)
p = diff % (cols - 1)
curRow = lastFullRow + 1 + diff / (cols - 1)
}
j = (per * p) + curRow
}
v := selected[j]
var linkTarget string
var brokenLink bool
var linkInfo os.FileInfo
if v.Mode()&os.ModeSymlink != 0 {
if l, err := os.Readlink(root + v.path); err == nil {
linkTarget = l
if i, err := os.Stat(root + v.path); err != nil {
brokenLink = true
} else {
linkInfo = i
}
} else {
log.Print(err)
}
}
if longList {
li := GetLongInfo(v)
var timeStr string
timePad := ""
if humanReadable {
timeStr = humanize.Time(v.ModTime())
timePad = strings.Repeat(" ", colWidths[4]-len(timeStr))
} else if now.Year() == v.ModTime().Year() {
timeStr = v.ModTime().Format("Jan _2 15:04")
} else {
timeStr = v.ModTime().Format("Jan _2 2006")
}
linkPad := strings.Repeat(" ", colWidths[0]-decimalLen(int64(li.HardLinks)))
userPad := strings.Repeat(" ", colWidths[1]-len(li.UserName))
groupPad := strings.Repeat(" ", colWidths[2]-len(li.GroupName))
var sizeStr string
if humanReadable {
sizeStr = human(v.Size())
} else {
sizeStr = fmt.Sprintf("%d", v.Size())
}
sizePad := strings.Repeat(" ", colWidths[3]-len(sizeStr))
if useColor {
fmt.Printf("%s %s%d %s%s %s%s %s%s %s%s ", modeString(v.Mode()), linkPad,
li.HardLinks, li.UserName, userPad, li.GroupName, groupPad, sizePad, sizeStr, timePad, timeStr)
if brokenLink {
setColor(fileColors["or"])
} else {
setColorForFile(v.FileInfo)
}
fmt.Printf("%s", v.path)
resetColor()
if linkTarget != "" {
fmt.Printf(" -> ")
if brokenLink {
setColor(fileColors["or"])
} else {
setColorForFile(linkInfo)
}
fmt.Printf("%s", linkTarget)
resetColor()
}
fmt.Println()
} else {
name := v.path
if v.Mode()&os.ModeSymlink != 0 {
name = name + " -> " + linkTarget
}
fmt.Printf("%s %s%d %s%s %s%s %s%s %s%s %s\n", modeString(v.Mode()), linkPad,
li.HardLinks, li.UserName, userPad, li.GroupName, groupPad, sizePad, sizeStr, timePad, timeStr, name)
}
} else {
w := colWidths[p]
if p == 0 {
if i != 0 {
fmt.Println()
}
}
if useColor {
if brokenLink {
setColor(fileColors["or"])
} else {
setColorForFile(v.FileInfo)
}
}
fmt.Printf("%s", v.path)
if useColor {
resetColor()
}
if p != adjCols-1 {
fmt.Print(strings.Repeat(" ", (w-len(v.path))+padding))
}
}
}
if !longList {
fmt.Println()
}
}
func main() {
exit := 0
files := sindex.InitListType(&sindex.StringList{Data: os.Args}).(*sindex.StringList)
options := sindex.InitListType(&sindex.StringList{}).(*sindex.StringList)
files.Remove(0)
for iter := files.Iterator(0); iter.Next(); {
if v := files.Data[iter.Pos()]; strings.HasPrefix(v, "-") {
options.Data[options.Append()] = v
iter.Remove()
if v == "--" {
break
}
}
}
if files.Len() == 0 {
files.Data[files.Append()] = "."
}
if !IsTerminal(1) {
oneColumn = true
}
for iter := options.Iterator(0); iter.Next(); {
if option := options.Data[iter.Pos()]; !strings.HasPrefix(option, "--") && len(option) > 2 {
letters := sindex.InitListType(&sindex.ByteList{Data: []byte(option[1:])}).(*sindex.ByteList)
var removed bool
for iter2 := letters.Iterator(letters.Len() - 1); iter2.Prev(); {
options.Data[iter.Insert()] = "-" + string(letters.Data[iter2.Pos()])
if !removed {
iter.Remove()
removed = true
}
iter.Prev()
}
}
var helpStr = `Usage: ls [OPTION]... [FILE]...
List information about the FILEs (the current directory by default).
Sort entries alphabetically unless a sort option is given.
-a do not ignore entries starting with .
-A do not list implied . and ..
-d list directory entries instead of contents
-t sort by modification time, newest first
-S sort by file size
-r reverse order while sorting
-l use a long listing format
-h with -l, print sizes, time stamps in human readable format
-R list subdirectories recursively, sorting all files
-O only list entries starting with .
-C list entries by columns
-x list entries by lines instead of by columns
-1 list one file per line
--color[=WHEN] colorize the output WHEN defaults to 'always'
or can be "never" or "auto".
--help display this help and exit
`
option := options.Data[iter.Pos()]
switch option {
case "-d":
showDirEntries = true
case "-a":
showAll = true
case "-A":
showAlmostAll = true
showAll = true
case "-t":
sortType = modTime
case "-S":
sortType = size
case "-r":
reverseSort = true
case "-l":
longList = true
case "-h":
humanReadable = true
case "-R":
recursiveList = true
case "-O":
onlyHidden = true
case "-x":
listBylines = true
case "-C":
listBylines = false
case "-1":
oneColumn = true
case "--color":
fallthrough
case "--color=always":
useColor = true
case "--color=never":
useColor = false
case "--color=auto":
if IsTerminal(1) {
useColor = true
} else {
useColor = false
}
case "--help":
fmt.Print(helpStr)
os.Exit(0)
default:
log.Fatalf("unkown option %s", option)
}
}
if w, _, err := GetTermSize(); err == nil {
width = w
} else {
width = 80
}
if useColor {
colorBytesMap := map[string][]byte{
"di": {1, 34},
"ln": {1, 36},
"pi": {40, 33},
"so": {1, 35},
"bd": {40, 33, 1},
"cd": {40, 33, 1},
"or": {40, 31},
"su": {37, 41},
"sg": {30, 43},
"tw": {30, 42},
"ow": {34, 42},
"st": {37, 44},
"ex": {01, 32},
}
lsColorsEnv := os.Getenv("LS_COLORS")
colorDefs := strings.Split(lsColorsEnv, ":")
for _, def := range colorDefs {
tokens := strings.Split(def, "=")
if len(tokens) != 2 {
continue
}
colors := strings.Split(tokens[1], ";")
var colorBytes []byte
for _, color := range colors {
if n, err := strconv.ParseInt(color, 10, 8); err == nil {
colorBytes = append(colorBytes, byte(n))
}
}
colorBytesMap[tokens[0]] = colorBytes
}
fileColors = make(map[string]colorDef)
for k, v := range colorBytesMap {
var bright bool
var fg, bg int = 0, 0
for _, b := range v {
if b == 0 {
bright = false
} else if b == 1 {
bright = true
} else if b >= 30 && b < 38 {
fg = int(b-30) + 1
} else if b >= 40 && b < 48 {
bg = int(b-40) + 1
}
}
fileColors[k] = colorDef{byte(fg), byte(bg), bright}
}
}
selected := sindex.InitListType(&DisplayEntryList{}).(*DisplayEntryList)
for iter := files.Iterator(0); iter.Next(); {
fileName := files.Data[iter.Pos()]
if showDirEntries {
if stat, err := os.Lstat(fileName); err == nil {
selected.Data[selected.Append()] = DisplayEntry{fileName, stat}
} else {
log.Print(err)
exit = 2
}
iter.Remove()
} else {
if stat, err := os.Lstat(fileName); err == nil {
if stat.IsDir() {
continue
} else {
selected.Data[selected.Append()] = DisplayEntry{fileName, stat}
iter.Remove()
}
} else {
log.Print(err)
exit = 2
iter.Remove()
}
}
}
if selected.Len() > 0 && !recursiveList {
display(selected.Data, "")
}
// directories
for iter := files.Iterator(0); iter.Next(); {
fileName := files.Data[iter.Pos()]
if !recursiveList {
if selected.Len() > 0 {
selected.Clear()
fmt.Println()
fmt.Printf("%s:\n", fileName)
} else if files.Len() > 1 {
fmt.Printf("%s:\n", fileName)
}
}
var total int64 = 0
if file, err := os.Open(fileName); err == nil {
if showAll && !showAlmostAll && !recursiveList && !onlyHidden {
if stat, err := os.Stat(fileName); err == nil {
selected.Data[selected.Append()] = DisplayEntry{".", stat}
} else {
log.Print(err)
}
if parent, err := os.Stat(path.Clean(fileName + "/..")); err == nil {
selected.Data[selected.Append()] = DisplayEntry{"..", parent}
} else {
log.Print(err)
}
}
if names, err := file.Readdirnames(0); err == nil {
for _, name := range names {
isHidden := strings.HasPrefix(name, ".")
if !onlyHidden && (showAll || !isHidden) || onlyHidden && isHidden {
if v, err := os.Lstat(fileName + "/" + name); err == nil {
total += v.Size()
if recursiveList {
path := path.Clean(fileName + "/" + v.Name())
selected.Data[selected.Append()] = DisplayEntry{path, v}
if v.IsDir() {
files.Data[files.Append()] = path
}
} else {
selected.Data[selected.Append()] = DisplayEntry{v.Name(), v}
}
} else {
log.Print(err)
exit = 1
}
}
}
} else {
log.Print(err)
exit = 1
}
file.Close()
} else {
log.Print(err)
exit = 1
}
if longList && !recursiveList {
if humanReadable {
fmt.Printf("total %s\n", human(total))
} else {
fmt.Printf("total %d\n", total/1024)
}
}
if !recursiveList && selected.Len() > 0 {
display(selected.Data, fileName+"/")
}
}
if recursiveList && selected.Len() > 0 {
log.Printf("sorting/displaying")
display(selected.Data, "")
}
os.Exit(exit)
}
fix bug in by line layout
package main
import (
"fmt"
"github.com/bradfitz/slice"
ct "github.com/daviddengcn/go-colortext"
. "github.com/timob/ls/lib"
"github.com/timob/sindex"
"log"
"os"
"path"
"strconv"
"strings"
"time"
"github.com/dustin/go-humanize"
)
type DisplayEntry struct {
path string
os.FileInfo
}
type DisplayEntryList struct {
Data []DisplayEntry
sindex.List
}
var now = time.Now()
var showDirEntries bool
var showAll bool
var showAlmostAll bool
var longList bool
const (
name int = iota
modTime int = iota
size int = iota
)
var sortType int = name
var reverseSort bool
var humanReadable bool
var recursiveList bool
var onlyHidden bool
var width int
var oneColumn bool
var listBylines bool
type colorDef struct {
fg, bg byte
bright bool
}
var fileColors map[string]colorDef
var useColor bool
var colorSet bool
func setColor(def colorDef) {
colorSet = true
ct.ChangeColor(ct.Color(def.fg), def.bright, ct.Color(def.bg), false)
}
func resetColor() {
if colorSet {
ct.ResetColor()
colorSet = false
}
}
func setColorForFile(info os.FileInfo) {
mode := info.Mode()
var fileType string
if mode&os.ModeDir != 0 {
if mode&os.ModeSticky != 0 {
if mode&(1<<1) != 0 {
fileType = "tw"
} else {
fileType = "st"
}
} else if mode&(1<<1) != 0 {
fileType = "ow"
} else {
fileType = "di"
}
} else if mode&os.ModeSymlink != 0 {
fileType = "ln"
} else if mode&os.ModeNamedPipe != 0 {
fileType = "pi"
} else if mode&os.ModeSocket != 0 {
fileType = "so"
} else if mode&os.ModeDevice != 0 {
fileType = "bd"
} else if mode&os.ModeCharDevice != 0 {
fileType = "cd"
} else if mode&os.ModeSetuid != 0 {
fileType = "su"
} else if mode&os.ModeSetgid != 0 {
fileType = "sg"
} else if mode&(1<<6|1<<3|1) != 0 {
fileType = "ex"
} else {
name := info.Name()
if n := strings.LastIndex(name, "."); n != -1 && n != len(name)-1 {
key := "*" + name[n:]
if _, ok := fileColors[key]; ok {
fileType = key
}
}
}
if fileType != "" {
setColor(fileColors[fileType])
}
}
func human(n int64) string {
var i int64
var w = n
var uSize int64 = 1
for i = 0; i < 12; i++ {
if w/1024 == 0 {
break
}
w = w / 1024
uSize *= 1024
}
var f int64
var unit string
f = (n - w*uSize)
if f != 0 && i > 0 {
if w < 10 {
if f != 0 {
lowerSize := uSize / 1024
// magic plus one here (seems to be what GNU ls does)
tenth := int64(1024/10) + 1
f = f / lowerSize / tenth
// round up
f++
if f == 10 {
w++
f = 0
}
}
} else {
// round up
w++
}
}
switch i {
case 1:
unit = "K"
case 2:
unit = "M"
case 3:
unit = "G"
case 4:
unit = "T"
}
if w > 0 && w < 10 {
return fmt.Sprintf("%d.%d%s", w, f, unit)
} else {
return fmt.Sprintf("%d%s", w, unit)
}
}
func decimalLen(n int64) (i int) {
for i = 1; i < 24; i++ {
if n/10 == 0 {
break
}
n = n / 10
}
return
}
func strcmpi(a, b string) int {
for i, av := range a {
if i >= len(b) {
return 1
}
if av > 96 && av < 123 {
av -= 32
}
bv := rune(b[i])
if bv > 96 && bv < 123 {
bv -= 32
}
if av != bv {
if av > bv {
return 1
} else {
return -1
}
}
}
if len(b) > len(a) {
return -1
} else {
return 0
}
}
func modeString(mode os.FileMode) string {
output := []byte(strings.Repeat("-", 10))
if mode&os.ModeDir != 0 {
output[0] = 'd'
} else if mode&os.ModeSymlink != 0 {
output[0] = 'l'
} else if mode&os.ModeNamedPipe != 0 {
output[0] = 'p'
} else if mode&os.ModeSocket != 0 {
output[0] = 's'
} else if mode&os.ModeCharDevice != 0 && mode&os.ModeDevice != 0 {
output[0] = 'c'
}
const rwx = "rwxrwxrwx"
for i, c := range rwx {
bitSet := mode&(1<<uint(9-1-i)) != 0
if bitSet {
if (i == 2 && mode&os.ModeSetuid != 0) || (i == 5 && mode&os.ModeSetgid != 0) {
output[i+1] = 's'
} else if i == 8 && mode&os.ModeSticky != 0 {
output[i+1] = 't'
} else {
output[i+1] = byte(c)
}
} else if (i == 2 && mode&os.ModeSetuid != 0) || (i == 5 && mode&os.ModeSetgid != 0) {
output[i+1] = 'S'
} else if i == 8 && mode&os.ModeSticky != 0 {
output[i+1] = 'T'
}
}
return string(output)
}
func display(selected []DisplayEntry, root string) {
slice.Sort(selected, func(i, j int) (v bool) {
var same bool
if sortType == modTime {
v = selected[i].ModTime().Before(selected[j].ModTime())
if !v {
same = selected[i].ModTime().Equal(selected[j].ModTime())
}
v = !v
} else if sortType == size {
d := selected[j].Size() - selected[i].Size()
if d > 0 {
v = true
} else if d == 0 {
same = true
}
v = !v
} else {
// strcoll?
v = strcmpi(selected[i].path, selected[j].path) == -1
}
if same {
v = strcmpi(selected[i].path, selected[j].path) == -1
}
if reverseSort {
v = !v
}
return
})
padding := 2
smallestWord := 1
var cols int
var colWidths []int
if longList {
cols = 5
colWidths = make([]int, cols)
for _, v := range selected {
li := GetLongInfo(v)
if decimalLen(int64(li.HardLinks)) > colWidths[0] {
colWidths[0] = decimalLen(int64(li.HardLinks))
}
if len(li.UserName) > colWidths[1] {
colWidths[1] = len(li.UserName)
}
if len(li.GroupName) > colWidths[2] {
colWidths[2] = len(li.GroupName)
}
if humanReadable {
if len(human(v.Size())) > colWidths[3] {
colWidths[3] = len(human(v.Size()))
}
if len(humanize.Time(v.ModTime())) > colWidths[4] {
colWidths[4] = len(humanize.Time(v.ModTime()))
}
} else {
if decimalLen(v.Size()) > colWidths[3] {
colWidths[3] = decimalLen(v.Size())
}
}
}
} else {
if oneColumn {
cols = 1
} else {
cols = width / (padding + smallestWord)
}
colWidths = make([]int, cols)
A:
for {
colWidths = colWidths[:cols]
for i := range colWidths {
colWidths[i] = 0
}
pos := (cols - 1) * padding
for i := range selected {
p := i % cols
var j int
if listBylines {
j = i
} else {
var per int
if len(selected) % cols == 0 {
per = len(selected) / cols
} else {
per = len(selected) / cols + 1
}
square := per * cols
if len(selected) <= square - per {
cols--
if cols == 0 {
cols = 1
break A
}
continue A
}
// if needed skip empty rows in last column
// lastFullRow is index of last row with all cols present
lastFullRow := (len(selected) - 1) % per
curRow := i / cols
if curRow > lastFullRow {
diff := (i - (lastFullRow + 1) * cols)
p = diff % (cols - 1)
curRow = lastFullRow + 1 + diff / (cols - 1)
}
j = (per * p) + curRow
}
v := selected[j]
if len(v.path) > colWidths[p] {
pos += len(v.path) - colWidths[p]
if pos > width {
cols--
if cols == 0 {
cols = 1
break A
}
continue A
}
colWidths[p] = len(v.path)
}
}
break
}
}
for i := range selected {
var j int
adjCols := cols
p := i % cols
if listBylines || longList {
j = i
} else {
var per int
if len(selected) % cols == 0 {
per = len(selected) / cols
} else {
per = len(selected) / cols + 1
}
lastFullRow := (len(selected) - 1) % per
curRow := i / cols
if curRow > lastFullRow {
adjCols = cols - 1
diff := (i - (lastFullRow + 1) * cols)
p = diff % (cols - 1)
curRow = lastFullRow + 1 + diff / (cols - 1)
}
j = (per * p) + curRow
}
v := selected[j]
var linkTarget string
var brokenLink bool
var linkInfo os.FileInfo
if v.Mode()&os.ModeSymlink != 0 {
if l, err := os.Readlink(root + v.path); err == nil {
linkTarget = l
if i, err := os.Stat(root + v.path); err != nil {
brokenLink = true
} else {
linkInfo = i
}
} else {
log.Print(err)
}
}
if longList {
li := GetLongInfo(v)
var timeStr string
timePad := ""
if humanReadable {
timeStr = humanize.Time(v.ModTime())
timePad = strings.Repeat(" ", colWidths[4]-len(timeStr))
} else if now.Year() == v.ModTime().Year() {
timeStr = v.ModTime().Format("Jan _2 15:04")
} else {
timeStr = v.ModTime().Format("Jan _2 2006")
}
linkPad := strings.Repeat(" ", colWidths[0]-decimalLen(int64(li.HardLinks)))
userPad := strings.Repeat(" ", colWidths[1]-len(li.UserName))
groupPad := strings.Repeat(" ", colWidths[2]-len(li.GroupName))
var sizeStr string
if humanReadable {
sizeStr = human(v.Size())
} else {
sizeStr = fmt.Sprintf("%d", v.Size())
}
sizePad := strings.Repeat(" ", colWidths[3]-len(sizeStr))
if useColor {
fmt.Printf("%s %s%d %s%s %s%s %s%s %s%s ", modeString(v.Mode()), linkPad,
li.HardLinks, li.UserName, userPad, li.GroupName, groupPad, sizePad, sizeStr, timePad, timeStr)
if brokenLink {
setColor(fileColors["or"])
} else {
setColorForFile(v.FileInfo)
}
fmt.Printf("%s", v.path)
resetColor()
if linkTarget != "" {
fmt.Printf(" -> ")
if brokenLink {
setColor(fileColors["or"])
} else {
setColorForFile(linkInfo)
}
fmt.Printf("%s", linkTarget)
resetColor()
}
fmt.Println()
} else {
name := v.path
if v.Mode()&os.ModeSymlink != 0 {
name = name + " -> " + linkTarget
}
fmt.Printf("%s %s%d %s%s %s%s %s%s %s%s %s\n", modeString(v.Mode()), linkPad,
li.HardLinks, li.UserName, userPad, li.GroupName, groupPad, sizePad, sizeStr, timePad, timeStr, name)
}
} else {
w := colWidths[p]
if p == 0 {
if i != 0 {
fmt.Println()
}
}
if useColor {
if brokenLink {
setColor(fileColors["or"])
} else {
setColorForFile(v.FileInfo)
}
}
fmt.Printf("%s", v.path)
if useColor {
resetColor()
}
if p != adjCols-1 {
fmt.Print(strings.Repeat(" ", (w-len(v.path))+padding))
}
}
}
if !longList {
fmt.Println()
}
}
func main() {
exit := 0
files := sindex.InitListType(&sindex.StringList{Data: os.Args}).(*sindex.StringList)
options := sindex.InitListType(&sindex.StringList{}).(*sindex.StringList)
files.Remove(0)
for iter := files.Iterator(0); iter.Next(); {
if v := files.Data[iter.Pos()]; strings.HasPrefix(v, "-") {
options.Data[options.Append()] = v
iter.Remove()
if v == "--" {
break
}
}
}
if files.Len() == 0 {
files.Data[files.Append()] = "."
}
if !IsTerminal(1) {
oneColumn = true
}
for iter := options.Iterator(0); iter.Next(); {
if option := options.Data[iter.Pos()]; !strings.HasPrefix(option, "--") && len(option) > 2 {
letters := sindex.InitListType(&sindex.ByteList{Data: []byte(option[1:])}).(*sindex.ByteList)
var removed bool
for iter2 := letters.Iterator(letters.Len() - 1); iter2.Prev(); {
options.Data[iter.Insert()] = "-" + string(letters.Data[iter2.Pos()])
if !removed {
iter.Remove()
removed = true
}
iter.Prev()
}
}
var helpStr = `Usage: ls [OPTION]... [FILE]...
List information about the FILEs (the current directory by default).
Sort entries alphabetically unless a sort option is given.
-a do not ignore entries starting with .
-A do not list implied . and ..
-d list directory entries instead of contents
-t sort by modification time, newest first
-S sort by file size
-r reverse order while sorting
-l use a long listing format
-h with -l, print sizes, time stamps in human readable format
-R list subdirectories recursively, sorting all files
-O only list entries starting with .
-C list entries by columns
-x list entries by lines instead of by columns
-1 list one file per line
--color[=WHEN] colorize the output WHEN defaults to 'always'
or can be "never" or "auto".
--help display this help and exit
`
option := options.Data[iter.Pos()]
switch option {
case "-d":
showDirEntries = true
case "-a":
showAll = true
case "-A":
showAlmostAll = true
showAll = true
case "-t":
sortType = modTime
case "-S":
sortType = size
case "-r":
reverseSort = true
case "-l":
longList = true
case "-h":
humanReadable = true
case "-R":
recursiveList = true
case "-O":
onlyHidden = true
case "-x":
listBylines = true
case "-C":
listBylines = false
case "-1":
oneColumn = true
case "--color":
fallthrough
case "--color=always":
useColor = true
case "--color=never":
useColor = false
case "--color=auto":
if IsTerminal(1) {
useColor = true
} else {
useColor = false
}
case "--help":
fmt.Print(helpStr)
os.Exit(0)
default:
log.Fatalf("unkown option %s", option)
}
}
if w, _, err := GetTermSize(); err == nil {
width = w
} else {
width = 80
}
if useColor {
colorBytesMap := map[string][]byte{
"di": {1, 34},
"ln": {1, 36},
"pi": {40, 33},
"so": {1, 35},
"bd": {40, 33, 1},
"cd": {40, 33, 1},
"or": {40, 31},
"su": {37, 41},
"sg": {30, 43},
"tw": {30, 42},
"ow": {34, 42},
"st": {37, 44},
"ex": {01, 32},
}
lsColorsEnv := os.Getenv("LS_COLORS")
colorDefs := strings.Split(lsColorsEnv, ":")
for _, def := range colorDefs {
tokens := strings.Split(def, "=")
if len(tokens) != 2 {
continue
}
colors := strings.Split(tokens[1], ";")
var colorBytes []byte
for _, color := range colors {
if n, err := strconv.ParseInt(color, 10, 8); err == nil {
colorBytes = append(colorBytes, byte(n))
}
}
colorBytesMap[tokens[0]] = colorBytes
}
fileColors = make(map[string]colorDef)
for k, v := range colorBytesMap {
var bright bool
var fg, bg int = 0, 0
for _, b := range v {
if b == 0 {
bright = false
} else if b == 1 {
bright = true
} else if b >= 30 && b < 38 {
fg = int(b-30) + 1
} else if b >= 40 && b < 48 {
bg = int(b-40) + 1
}
}
fileColors[k] = colorDef{byte(fg), byte(bg), bright}
}
}
selected := sindex.InitListType(&DisplayEntryList{}).(*DisplayEntryList)
for iter := files.Iterator(0); iter.Next(); {
fileName := files.Data[iter.Pos()]
if showDirEntries {
if stat, err := os.Lstat(fileName); err == nil {
selected.Data[selected.Append()] = DisplayEntry{fileName, stat}
} else {
log.Print(err)
exit = 2
}
iter.Remove()
} else {
if stat, err := os.Lstat(fileName); err == nil {
if stat.IsDir() {
continue
} else {
selected.Data[selected.Append()] = DisplayEntry{fileName, stat}
iter.Remove()
}
} else {
log.Print(err)
exit = 2
iter.Remove()
}
}
}
if selected.Len() > 0 && !recursiveList {
display(selected.Data, "")
}
// directories
for iter := files.Iterator(0); iter.Next(); {
fileName := files.Data[iter.Pos()]
if !recursiveList {
if selected.Len() > 0 {
selected.Clear()
fmt.Println()
fmt.Printf("%s:\n", fileName)
} else if files.Len() > 1 {
fmt.Printf("%s:\n", fileName)
}
}
var total int64 = 0
if file, err := os.Open(fileName); err == nil {
if showAll && !showAlmostAll && !recursiveList && !onlyHidden {
if stat, err := os.Stat(fileName); err == nil {
selected.Data[selected.Append()] = DisplayEntry{".", stat}
} else {
log.Print(err)
}
if parent, err := os.Stat(path.Clean(fileName + "/..")); err == nil {
selected.Data[selected.Append()] = DisplayEntry{"..", parent}
} else {
log.Print(err)
}
}
if names, err := file.Readdirnames(0); err == nil {
for _, name := range names {
isHidden := strings.HasPrefix(name, ".")
if !onlyHidden && (showAll || !isHidden) || onlyHidden && isHidden {
if v, err := os.Lstat(fileName + "/" + name); err == nil {
total += v.Size()
if recursiveList {
path := path.Clean(fileName + "/" + v.Name())
selected.Data[selected.Append()] = DisplayEntry{path, v}
if v.IsDir() {
files.Data[files.Append()] = path
}
} else {
selected.Data[selected.Append()] = DisplayEntry{v.Name(), v}
}
} else {
log.Print(err)
exit = 1
}
}
}
} else {
log.Print(err)
exit = 1
}
file.Close()
} else {
log.Print(err)
exit = 1
}
if longList && !recursiveList {
if humanReadable {
fmt.Printf("total %s\n", human(total))
} else {
fmt.Printf("total %d\n", total/1024)
}
}
if !recursiveList && selected.Len() > 0 {
display(selected.Data, fileName+"/")
}
}
if recursiveList && selected.Len() > 0 {
log.Printf("sorting/displaying")
display(selected.Data, "")
}
os.Exit(exit)
}
|
package main
import (
"os"
"github.com/timob/list"
"fmt"
"log"
"strings"
"syscall"
"unsafe"
"path"
"github.com/bradfitz/slice"
)
type DisplayEntry struct {
path string
os.FileInfo
}
type DisplayEntryList struct {
Data []DisplayEntry
list.Slice
}
func getTermSize() (int, int, error) {
var dimensions [4]uint16
fd := os.Stdout.Fd()
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 {
return -1, -1, err
}
return int(dimensions[1]), int(dimensions[0]), nil
}
func strcmpi(a, b string) int {
for i, av := range a {
if i >= len(b) {
return 1
}
if av > 96 && av < 123 {
av -= 32
}
bv := rune(b[i])
if bv > 96 && bv < 123 {
bv -= 32
}
if av != bv {
if av > bv {
return 1
} else {
return -1
}
}
}
if len(b) > len(a) {
return -1
} else {
return 0
}
}
func main() {
files := list.NewSliceList(&list.StringSlice{Data:os.Args}).(*list.StringSlice)
options := list.NewSliceList(&list.StringSlice{}).(*list.StringSlice)
files.Remove(0)
for iter := files.Iterator(0); iter.Next(); {
if v := files.Data[iter.Pos()]; strings.HasPrefix(v, "-") {
options.Data[options.Append()] = v
iter.Remove()
if v == "--" {
break
}
}
}
if files.Len() == 0 {
files.Data[files.Append()] = "."
}
var showDirEntries bool
var showAll bool
var showAlmostAll bool
var longList bool
const (
name int = iota
modTime int = iota
size int = iota
)
var sortType int = name
var reverseSort bool
for iter := options.Iterator(0); iter.Next(); {
if option := options.Data[iter.Pos()]; !strings.HasPrefix(option, "--") && len(option) > 2 {
letters := list.NewSliceList(&list.ByteSlice{Data:[]byte(option[1:])}).(*list.ByteSlice)
var removed bool
for iter2 := letters.Iterator(letters.Len() - 1); iter2.Prev(); {
options.Data[iter.Insert()] = "-" + string(letters.Data[iter2.Pos()])
if !removed {
iter.Remove()
removed = true
}
iter.Prev()
}
}
switch options.Data[iter.Pos()] {
case "-d":
showDirEntries = true
case "-a":
showAll = true
case "-A":
showAlmostAll = true
showAll = true
case "-t":
sortType = modTime
case "-S":
sortType = size
case "-r":
reverseSort = true
case "-l":
longList = true
default:
log.Fatalf("unkown option %s", options.Data[iter.Pos()])
}
}
var width int
if w, _, err := getTermSize(); err == nil {
width = w
} else {
width = 80
}
selected := list.NewSliceList(&DisplayEntryList{}).(*DisplayEntryList)
for iter := files.Iterator(0); iter.Next(); {
if fileName := files.Data[iter.Pos()]; showDirEntries {
if stat, err := os.Lstat(fileName); err == nil {
selected.Data[selected.Append()] = DisplayEntry{fileName, stat}
} else {
log.Print(err)
}
} else {
if stat, err := os.Stat(fileName); err == nil {
if stat.IsDir() {
if file, err := os.Open(fileName); err == nil {
if fileInfos, err := file.Readdir(0); err == nil {
if showAll && !showAlmostAll {
selected.Data[selected.Append()] = DisplayEntry{".", stat}
if parent, err := os.Stat(path.Dir(fileName)); err == nil {
selected.Data[selected.Append()] = DisplayEntry{"..", parent}
} else {
log.Print(err)
}
}
for _, v := range fileInfos {
if !strings.HasPrefix(v.Name(), ".") || showAll {
selected.Data[selected.Append()] = DisplayEntry{v.Name(), v}
}
}
} else {
log.Print(err)
}
} else {
log.Print(err)
}
} else {
selected.Data[selected.Append()] = DisplayEntry{fileName, stat}
}
} else {
log.Print(err)
}
}
slice.Sort(selected.Data, func(i, j int) (v bool) {
var same bool
if sortType == modTime {
v = selected.Data[i].ModTime().Before(selected.Data[j].ModTime())
if !v {
same = selected.Data[i].ModTime().Equal(selected.Data[j].ModTime())
}
v = !v
} else if sortType == size {
d := selected.Data[j].Size() - selected.Data[i].Size()
if d > 0 {
v = true
} else if d == 0 {
same = true
}
v = !v
} else {
// strcoll?
v = strcmpi(selected.Data[i].path, selected.Data[j].path) == -1
}
if same {
v = strcmpi(selected.Data[i].path, selected.Data[j].path) == -1
} else if reverseSort {
v = !v
}
return
})
padding := 2
smallestWord := 1
cols := width / (padding + smallestWord)
colWidths := make([]int, cols)
if longList {
cols = 1
}
A:
for cols > 1 {
colWidths = colWidths[:cols]
for i := range colWidths {
colWidths[i] = 0
}
pos := (cols - 1) * padding
for i, v := range selected.Data {
p := i % cols
if len(v.path) > colWidths[p] {
pos += len(v.path) - colWidths[p]
if pos >= width {
cols--
continue A
}
colWidths[p] = len(v.path)
}
}
break
}
for i, v := range selected.Data {
if longList {
fmt.Printf("%s %d %s %s %d %s %s\n", v.Mode(), 1, "fred", "fred", v.Size(), v.ModTime(), v.Name())
} else {
w := colWidths[i % cols]
if i % cols == 0 {
if i != 0 {
fmt.Println()
}
}
fmt.Printf("%s", v.path)
fmt.Print(strings.Repeat(" ", (w - len(v.path)) + padding))
}
}
fmt.Println()
selected.Clear()
}
}
add long option
package main
import (
"os"
"os/user"
"github.com/timob/list"
"fmt"
"log"
"strings"
"syscall"
"unsafe"
"path"
"github.com/bradfitz/slice"
)
type DisplayEntry struct {
path string
os.FileInfo
}
type DisplayEntryList struct {
Data []DisplayEntry
list.Slice
}
func getTermSize() (int, int, error) {
var dimensions [4]uint16
fd := os.Stdout.Fd()
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 {
return -1, -1, err
}
return int(dimensions[1]), int(dimensions[0]), nil
}
func decimalLen(n int64) (i int) {
for i = 1; i < 12; i++ {
if n / 10 == 0 {
break
}
n = n / 10
}
return
}
var userLookupCache = make(map[string]string)
func userLookUp(id string) (string, error) {
if v, ok := userLookupCache[id]; ok {
return v, nil
} else {
u, err := user.LookupId(id)
if err == nil {
userLookupCache[id] = u.Name
return u.Name, nil
}
return "", err
}
}
type longInfo struct {
userName, groupName string
hardLinks int
}
func getLongInfo(info os.FileInfo) *longInfo {
stat := info.Sys().(*syscall.Stat_t)
userName := fmt.Sprintf("%d", stat.Uid)
if u, err := userLookUp(userName); err == nil {
userName = u
}
group := fmt.Sprintf("%d", stat.Gid)
if g, err := userLookUp(group); err == nil {
group = g
}
return &longInfo{userName, group, int(stat.Nlink)}
}
func strcmpi(a, b string) int {
for i, av := range a {
if i >= len(b) {
return 1
}
if av > 96 && av < 123 {
av -= 32
}
bv := rune(b[i])
if bv > 96 && bv < 123 {
bv -= 32
}
if av != bv {
if av > bv {
return 1
} else {
return -1
}
}
}
if len(b) > len(a) {
return -1
} else {
return 0
}
}
func main() {
files := list.NewSliceList(&list.StringSlice{Data:os.Args}).(*list.StringSlice)
options := list.NewSliceList(&list.StringSlice{}).(*list.StringSlice)
files.Remove(0)
for iter := files.Iterator(0); iter.Next(); {
if v := files.Data[iter.Pos()]; strings.HasPrefix(v, "-") {
options.Data[options.Append()] = v
iter.Remove()
if v == "--" {
break
}
}
}
if files.Len() == 0 {
files.Data[files.Append()] = "."
}
var showDirEntries bool
var showAll bool
var showAlmostAll bool
var longList bool
const (
name int = iota
modTime int = iota
size int = iota
)
var sortType int = name
var reverseSort bool
for iter := options.Iterator(0); iter.Next(); {
if option := options.Data[iter.Pos()]; !strings.HasPrefix(option, "--") && len(option) > 2 {
letters := list.NewSliceList(&list.ByteSlice{Data:[]byte(option[1:])}).(*list.ByteSlice)
var removed bool
for iter2 := letters.Iterator(letters.Len() - 1); iter2.Prev(); {
options.Data[iter.Insert()] = "-" + string(letters.Data[iter2.Pos()])
if !removed {
iter.Remove()
removed = true
}
iter.Prev()
}
}
switch options.Data[iter.Pos()] {
case "-d":
showDirEntries = true
case "-a":
showAll = true
case "-A":
showAlmostAll = true
showAll = true
case "-t":
sortType = modTime
case "-S":
sortType = size
case "-r":
reverseSort = true
case "-l":
longList = true
default:
log.Fatalf("unkown option %s", options.Data[iter.Pos()])
}
}
var width int
if w, _, err := getTermSize(); err == nil {
width = w
} else {
width = 80
}
selected := list.NewSliceList(&DisplayEntryList{}).(*DisplayEntryList)
for iter := files.Iterator(0); iter.Next(); {
if fileName := files.Data[iter.Pos()]; showDirEntries {
if stat, err := os.Lstat(fileName); err == nil {
selected.Data[selected.Append()] = DisplayEntry{fileName, stat}
} else {
log.Print(err)
}
} else {
if stat, err := os.Stat(fileName); err == nil {
if stat.IsDir() {
if file, err := os.Open(fileName); err == nil {
if fileInfos, err := file.Readdir(0); err == nil {
if showAll && !showAlmostAll {
selected.Data[selected.Append()] = DisplayEntry{".", stat}
if parent, err := os.Stat(path.Dir(fileName)); err == nil {
selected.Data[selected.Append()] = DisplayEntry{"..", parent}
} else {
log.Print(err)
}
}
for _, v := range fileInfos {
if !strings.HasPrefix(v.Name(), ".") || showAll {
selected.Data[selected.Append()] = DisplayEntry{v.Name(), v}
}
}
} else {
log.Print(err)
}
} else {
log.Print(err)
}
} else {
selected.Data[selected.Append()] = DisplayEntry{fileName, stat}
}
} else {
log.Print(err)
}
}
slice.Sort(selected.Data, func(i, j int) (v bool) {
var same bool
if sortType == modTime {
v = selected.Data[i].ModTime().Before(selected.Data[j].ModTime())
if !v {
same = selected.Data[i].ModTime().Equal(selected.Data[j].ModTime())
}
v = !v
} else if sortType == size {
d := selected.Data[j].Size() - selected.Data[i].Size()
if d > 0 {
v = true
} else if d == 0 {
same = true
}
v = !v
} else {
// strcoll?
v = strcmpi(selected.Data[i].path, selected.Data[j].path) == -1
}
if same {
v = strcmpi(selected.Data[i].path, selected.Data[j].path) == -1
} else if reverseSort {
v = !v
}
return
})
padding := 2
smallestWord := 1
var cols int
var colWidths []int
if longList {
cols = 4
colWidths = make([]int, cols)
for _, v := range selected.Data {
li := getLongInfo(v)
if decimalLen(int64(li.hardLinks)) > colWidths[0] {
colWidths[0] = decimalLen(int64(li.hardLinks))
}
if len(li.userName) > colWidths[1] {
colWidths[1] = len(li.userName)
}
if len(li.groupName) > colWidths[2] {
colWidths[2] = len(li.groupName)
}
if decimalLen(v.Size()) > colWidths[3] {
colWidths[3] = decimalLen(v.Size())
}
}
} else {
cols = width / (padding + smallestWord)
colWidths = make([]int, cols)
A:
for cols > 1 {
colWidths = colWidths[:cols]
for i := range colWidths {
colWidths[i] = 0
}
pos := (cols - 1) * padding
for i, v := range selected.Data {
p := i % cols
if len(v.path) > colWidths[p] {
pos += len(v.path) - colWidths[p]
if pos >= width {
cols--
continue A
}
colWidths[p] = len(v.path)
}
}
break
}
}
for i, v := range selected.Data {
if longList {
li := getLongInfo(v)
timeStr := v.ModTime().Format("Jan _2 15:04")
linkPad := strings.Repeat(" ", colWidths[0] - decimalLen(int64(li.hardLinks)))
userPad := strings.Repeat(" ", colWidths[1] - len(li.userName))
groupPad := strings.Repeat(" ", colWidths[2] - len(li.groupName))
sizePad := strings.Repeat(" ", colWidths[3] - decimalLen(v.Size()))
fmt.Printf("%s %s %d %s %s %s %s %s %d %s %s\n", v.Mode(), linkPad, li.hardLinks, li.userName, userPad, li.groupName, groupPad, sizePad, v.Size(), timeStr, v.path)
} else {
w := colWidths[i % cols]
if i % cols == 0 {
if i != 0 {
fmt.Println()
}
}
fmt.Printf("%s", v.path)
fmt.Print(strings.Repeat(" ", (w - len(v.path)) + padding))
}
}
fmt.Println()
selected.Clear()
}
}
|
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package model
import (
"os"
"path/filepath"
"testing"
"time"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/scanner"
"github.com/syncthing/syncthing/lib/sync"
)
func init() {
// We do this to make sure that the temp file required for the tests does
// not get removed during the tests.
future := time.Now().Add(time.Hour)
err := os.Chtimes(filepath.Join("testdata", defTempNamer.TempName("file")), future, future)
if err != nil {
panic(err)
}
}
var blocks = []protocol.BlockInfo{
{Hash: []uint8{0xfa, 0x43, 0x23, 0x9b, 0xce, 0xe7, 0xb9, 0x7c, 0xa6, 0x2f, 0x0, 0x7c, 0xc6, 0x84, 0x87, 0x56, 0xa, 0x39, 0xe1, 0x9f, 0x74, 0xf3, 0xdd, 0xe7, 0x48, 0x6d, 0xb3, 0xf9, 0x8d, 0xf8, 0xe4, 0x71}}, // Zero'ed out block
{Offset: 0, Size: 0x20000, Hash: []uint8{0x7e, 0xad, 0xbc, 0x36, 0xae, 0xbb, 0xcf, 0x74, 0x43, 0xe2, 0x7a, 0x5a, 0x4b, 0xb8, 0x5b, 0xce, 0xe6, 0x9e, 0x1e, 0x10, 0xf9, 0x8a, 0xbc, 0x77, 0x95, 0x2, 0x29, 0x60, 0x9e, 0x96, 0xae, 0x6c}},
{Offset: 131072, Size: 0x20000, Hash: []uint8{0x3c, 0xc4, 0x20, 0xf4, 0xb, 0x2e, 0xcb, 0xb9, 0x5d, 0xce, 0x34, 0xa8, 0xc3, 0x92, 0xea, 0xf3, 0xda, 0x88, 0x33, 0xee, 0x7a, 0xb6, 0xe, 0xf1, 0x82, 0x5e, 0xb0, 0xa9, 0x26, 0xa9, 0xc0, 0xef}},
{Offset: 262144, Size: 0x20000, Hash: []uint8{0x76, 0xa8, 0xc, 0x69, 0xd7, 0x5c, 0x52, 0xfd, 0xdf, 0x55, 0xef, 0x44, 0xc1, 0xd6, 0x25, 0x48, 0x4d, 0x98, 0x48, 0x4d, 0xaa, 0x50, 0xf6, 0x6b, 0x32, 0x47, 0x55, 0x81, 0x6b, 0xed, 0xee, 0xfb}},
{Offset: 393216, Size: 0x20000, Hash: []uint8{0x44, 0x1e, 0xa4, 0xf2, 0x8d, 0x1f, 0xc3, 0x1b, 0x9d, 0xa5, 0x18, 0x5e, 0x59, 0x1b, 0xd8, 0x5c, 0xba, 0x7d, 0xb9, 0x8d, 0x70, 0x11, 0x5c, 0xea, 0xa1, 0x57, 0x4d, 0xcb, 0x3c, 0x5b, 0xf8, 0x6c}},
{Offset: 524288, Size: 0x20000, Hash: []uint8{0x8, 0x40, 0xd0, 0x5e, 0x80, 0x0, 0x0, 0x7c, 0x8b, 0xb3, 0x8b, 0xf7, 0x7b, 0x23, 0x26, 0x28, 0xab, 0xda, 0xcf, 0x86, 0x8f, 0xc2, 0x8a, 0x39, 0xc6, 0xe6, 0x69, 0x59, 0x97, 0xb6, 0x1a, 0x43}},
{Offset: 655360, Size: 0x20000, Hash: []uint8{0x38, 0x8e, 0x44, 0xcb, 0x30, 0xd8, 0x90, 0xf, 0xce, 0x7, 0x4b, 0x58, 0x86, 0xde, 0xce, 0x59, 0xa2, 0x46, 0xd2, 0xf9, 0xba, 0xaf, 0x35, 0x87, 0x38, 0xdf, 0xd2, 0xd, 0xf9, 0x45, 0xed, 0x91}},
{Offset: 786432, Size: 0x20000, Hash: []uint8{0x32, 0x28, 0xcd, 0xf, 0x37, 0x21, 0xe5, 0xd4, 0x1e, 0x58, 0x87, 0x73, 0x8e, 0x36, 0xdf, 0xb2, 0x70, 0x78, 0x56, 0xc3, 0x42, 0xff, 0xf7, 0x8f, 0x37, 0x95, 0x0, 0x26, 0xa, 0xac, 0x54, 0x72}},
{Offset: 917504, Size: 0x20000, Hash: []uint8{0x96, 0x6b, 0x15, 0x6b, 0xc4, 0xf, 0x19, 0x18, 0xca, 0xbb, 0x5f, 0xd6, 0xbb, 0xa2, 0xc6, 0x2a, 0xac, 0xbb, 0x8a, 0xb9, 0xce, 0xec, 0x4c, 0xdb, 0x78, 0xec, 0x57, 0x5d, 0x33, 0xf9, 0x8e, 0xaf}},
}
var folders = []string{"default"}
// Layout of the files: (indexes from the above array)
// 12345678 - Required file
// 02005008 - Existing file (currently in the index)
// 02340070 - Temp file on the disk
func TestHandleFile(t *testing.T) {
// After the diff between required and existing we should:
// Copy: 2, 5, 8
// Pull: 1, 3, 4, 6, 7
// Create existing file
existingFile := protocol.FileInfo{
Name: "filex",
Flags: 0,
Modified: 0,
Blocks: []protocol.BlockInfo{
blocks[0], blocks[2], blocks[0], blocks[0],
blocks[5], blocks[0], blocks[0], blocks[8],
},
}
// Create target file
requiredFile := existingFile
requiredFile.Blocks = blocks[1:]
db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig)
// Update index
m.updateLocals("default", []protocol.FileInfo{existingFile})
p := rwFolder{
folder: "default",
dir: "testdata",
model: m,
errors: make(map[string]string),
errorsMut: sync.NewMutex(),
}
copyChan := make(chan copyBlocksState, 1)
p.handleFile(requiredFile, copyChan, nil)
// Receive the results
toCopy := <-copyChan
if len(toCopy.blocks) != 8 {
t.Errorf("Unexpected count of copy blocks: %d != 8", len(toCopy.blocks))
}
for i, block := range toCopy.blocks {
if string(block.Hash) != string(blocks[i+1].Hash) {
t.Errorf("Block mismatch: %s != %s", block.String(), blocks[i+1].String())
}
}
}
func TestHandleFileWithTemp(t *testing.T) {
// After diff between required and existing we should:
// Copy: 2, 5, 8
// Pull: 1, 3, 4, 6, 7
// After dropping out blocks already on the temp file we should:
// Copy: 5, 8
// Pull: 1, 6
// Create existing file
existingFile := protocol.FileInfo{
Name: "file",
Flags: 0,
Modified: 0,
Blocks: []protocol.BlockInfo{
blocks[0], blocks[2], blocks[0], blocks[0],
blocks[5], blocks[0], blocks[0], blocks[8],
},
}
// Create target file
requiredFile := existingFile
requiredFile.Blocks = blocks[1:]
db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig)
// Update index
m.updateLocals("default", []protocol.FileInfo{existingFile})
p := rwFolder{
folder: "default",
dir: "testdata",
model: m,
errors: make(map[string]string),
errorsMut: sync.NewMutex(),
}
copyChan := make(chan copyBlocksState, 1)
p.handleFile(requiredFile, copyChan, nil)
// Receive the results
toCopy := <-copyChan
if len(toCopy.blocks) != 4 {
t.Errorf("Unexpected count of copy blocks: %d != 4", len(toCopy.blocks))
}
for i, eq := range []int{1, 5, 6, 8} {
if string(toCopy.blocks[i].Hash) != string(blocks[eq].Hash) {
t.Errorf("Block mismatch: %s != %s", toCopy.blocks[i].String(), blocks[eq].String())
}
}
}
func TestCopierFinder(t *testing.T) {
// After diff between required and existing we should:
// Copy: 1, 2, 3, 4, 6, 7, 8
// Since there is no existing file, nor a temp file
// After dropping out blocks found locally:
// Pull: 1, 5, 6, 8
tempFile := filepath.Join("testdata", defTempNamer.TempName("file2"))
err := os.Remove(tempFile)
if err != nil && !os.IsNotExist(err) {
t.Error(err)
}
// Create existing file
existingFile := protocol.FileInfo{
Name: defTempNamer.TempName("file"),
Flags: 0,
Modified: 0,
Blocks: []protocol.BlockInfo{
blocks[0], blocks[2], blocks[3], blocks[4],
blocks[0], blocks[0], blocks[7], blocks[0],
},
}
// Create target file
requiredFile := existingFile
requiredFile.Blocks = blocks[1:]
requiredFile.Name = "file2"
db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig)
// Update index
m.updateLocals("default", []protocol.FileInfo{existingFile})
iterFn := func(folder, file string, index int32) bool {
return true
}
// Verify that the blocks we say exist on file, really exist in the db.
for _, idx := range []int{2, 3, 4, 7} {
if m.finder.Iterate(folders, blocks[idx].Hash, iterFn) == false {
t.Error("Didn't find block")
}
}
p := rwFolder{
folder: "default",
dir: "testdata",
model: m,
errors: make(map[string]string),
errorsMut: sync.NewMutex(),
}
copyChan := make(chan copyBlocksState)
pullChan := make(chan pullBlockState, 4)
finisherChan := make(chan *sharedPullerState, 1)
// Run a single fetcher routine
go p.copierRoutine(copyChan, pullChan, finisherChan)
p.handleFile(requiredFile, copyChan, finisherChan)
pulls := []pullBlockState{<-pullChan, <-pullChan, <-pullChan, <-pullChan}
finish := <-finisherChan
select {
case <-pullChan:
t.Fatal("Finisher channel has data to be read")
case <-finisherChan:
t.Fatal("Finisher channel has data to be read")
default:
}
// Verify that the right blocks went into the pull list
for i, eq := range []int{1, 5, 6, 8} {
if string(pulls[i].block.Hash) != string(blocks[eq].Hash) {
t.Errorf("Block %d mismatch: %s != %s", eq, pulls[i].block.String(), blocks[eq].String())
}
if string(finish.file.Blocks[eq-1].Hash) != string(blocks[eq].Hash) {
t.Errorf("Block %d mismatch: %s != %s", eq, finish.file.Blocks[eq-1].String(), blocks[eq].String())
}
}
// Verify that the fetched blocks have actually been written to the temp file
blks, err := scanner.HashFile(tempFile, protocol.BlockSize, 0, nil)
if err != nil {
t.Log(err)
}
for _, eq := range []int{2, 3, 4, 7} {
if string(blks[eq-1].Hash) != string(blocks[eq].Hash) {
t.Errorf("Block %d mismatch: %s != %s", eq, blks[eq-1].String(), blocks[eq].String())
}
}
finish.fd.Close()
os.Remove(tempFile)
}
// Test that updating a file removes it's old blocks from the blockmap
func TestCopierCleanup(t *testing.T) {
iterFn := func(folder, file string, index int32) bool {
return true
}
db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig)
// Create a file
file := protocol.FileInfo{
Name: "test",
Flags: 0,
Modified: 0,
Blocks: []protocol.BlockInfo{blocks[0]},
}
// Add file to index
m.updateLocals("default", []protocol.FileInfo{file})
if !m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
t.Error("Expected block not found")
}
file.Blocks = []protocol.BlockInfo{blocks[1]}
file.Version = file.Version.Update(protocol.LocalDeviceID.Short())
// Update index (removing old blocks)
m.updateLocals("default", []protocol.FileInfo{file})
if m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
t.Error("Unexpected block found")
}
if !m.finder.Iterate(folders, blocks[1].Hash, iterFn) {
t.Error("Expected block not found")
}
file.Blocks = []protocol.BlockInfo{blocks[0]}
file.Version = file.Version.Update(protocol.LocalDeviceID.Short())
// Update index (removing old blocks)
m.updateLocals("default", []protocol.FileInfo{file})
if !m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
t.Error("Unexpected block found")
}
if m.finder.Iterate(folders, blocks[1].Hash, iterFn) {
t.Error("Expected block not found")
}
}
// Make sure that the copier routine hashes the content when asked, and pulls
// if it fails to find the block.
func TestLastResortPulling(t *testing.T) {
db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig)
// Add a file to index (with the incorrect block representation, as content
// doesn't actually match the block list)
file := protocol.FileInfo{
Name: "empty",
Flags: 0,
Modified: 0,
Blocks: []protocol.BlockInfo{blocks[0]},
}
m.updateLocals("default", []protocol.FileInfo{file})
// Pretend that we are handling a new file of the same content but
// with a different name (causing to copy that particular block)
file.Name = "newfile"
iterFn := func(folder, file string, index int32) bool {
return true
}
// Check that that particular block is there
if !m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
t.Error("Expected block not found")
}
p := rwFolder{
folder: "default",
dir: "testdata",
model: m,
errors: make(map[string]string),
errorsMut: sync.NewMutex(),
}
copyChan := make(chan copyBlocksState)
pullChan := make(chan pullBlockState, 1)
finisherChan := make(chan *sharedPullerState, 1)
// Run a single copier routine
go p.copierRoutine(copyChan, pullChan, finisherChan)
p.handleFile(file, copyChan, finisherChan)
// Copier should hash empty file, realise that the region it has read
// doesn't match the hash which was advertised by the block map, fix it
// and ask to pull the block.
<-pullChan
// Verify that it did fix the incorrect hash.
if m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
t.Error("Found unexpected block")
}
if !m.finder.Iterate(folders, scanner.SHA256OfNothing, iterFn) {
t.Error("Expected block not found")
}
(<-finisherChan).fd.Close()
os.Remove(filepath.Join("testdata", defTempNamer.TempName("newfile")))
}
func TestDeregisterOnFailInCopy(t *testing.T) {
file := protocol.FileInfo{
Name: "filex",
Flags: 0,
Modified: 0,
Blocks: []protocol.BlockInfo{
blocks[0], blocks[2], blocks[0], blocks[0],
blocks[5], blocks[0], blocks[0], blocks[8],
},
}
defer os.Remove("testdata/" + defTempNamer.TempName("filex"))
db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig)
emitter := NewProgressEmitter(defaultConfig)
go emitter.Serve()
p := rwFolder{
folder: "default",
dir: "testdata",
model: m,
queue: newJobQueue(),
progressEmitter: emitter,
errors: make(map[string]string),
errorsMut: sync.NewMutex(),
}
// queue.Done should be called by the finisher routine
p.queue.Push("filex", 0, 0)
p.queue.Pop()
if p.queue.lenProgress() != 1 {
t.Fatal("Expected file in progress")
}
copyChan := make(chan copyBlocksState)
pullChan := make(chan pullBlockState)
finisherBufferChan := make(chan *sharedPullerState)
finisherChan := make(chan *sharedPullerState)
go p.copierRoutine(copyChan, pullChan, finisherBufferChan)
go p.finisherRoutine(finisherChan)
p.handleFile(file, copyChan, finisherChan)
// Receive a block at puller, to indicate that at least a single copier
// loop has been performed.
toPull := <-pullChan
// Wait until copier is trying to pass something down to the puller again
time.Sleep(100 * time.Millisecond)
// Close the file
toPull.sharedPullerState.fail("test", os.ErrNotExist)
// Unblock copier
<-pullChan
select {
case state := <-finisherBufferChan:
// At this point the file should still be registered with both the job
// queue, and the progress emitter. Verify this.
if p.progressEmitter.lenRegistry() != 1 || p.queue.lenProgress() != 1 || p.queue.lenQueued() != 0 {
t.Fatal("Could not find file")
}
// Pass the file down the real finisher, and give it time to consume
finisherChan <- state
time.Sleep(100 * time.Millisecond)
state.mut.Lock()
stateFd := state.fd
state.mut.Unlock()
if stateFd != nil {
t.Fatal("File not closed?")
}
if p.progressEmitter.lenRegistry() != 0 || p.queue.lenProgress() != 0 || p.queue.lenQueued() != 0 {
t.Fatal("Still registered", p.progressEmitter.lenRegistry(), p.queue.lenProgress(), p.queue.lenQueued())
}
// Doing it again should have no effect
finisherChan <- state
time.Sleep(100 * time.Millisecond)
if p.progressEmitter.lenRegistry() != 0 || p.queue.lenProgress() != 0 || p.queue.lenQueued() != 0 {
t.Fatal("Still registered", p.progressEmitter.lenRegistry(), p.queue.lenProgress(), p.queue.lenQueued())
}
case <-time.After(time.Second):
t.Fatal("Didn't get anything to the finisher")
}
}
func TestDeregisterOnFailInPull(t *testing.T) {
file := protocol.FileInfo{
Name: "filex",
Flags: 0,
Modified: 0,
Blocks: []protocol.BlockInfo{
blocks[0], blocks[2], blocks[0], blocks[0],
blocks[5], blocks[0], blocks[0], blocks[8],
},
}
defer os.Remove("testdata/" + defTempNamer.TempName("filex"))
db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig)
emitter := NewProgressEmitter(defaultConfig)
go emitter.Serve()
p := rwFolder{
folder: "default",
dir: "testdata",
model: m,
queue: newJobQueue(),
progressEmitter: emitter,
errors: make(map[string]string),
errorsMut: sync.NewMutex(),
}
// queue.Done should be called by the finisher routine
p.queue.Push("filex", 0, 0)
p.queue.Pop()
if p.queue.lenProgress() != 1 {
t.Fatal("Expected file in progress")
}
copyChan := make(chan copyBlocksState)
pullChan := make(chan pullBlockState)
finisherBufferChan := make(chan *sharedPullerState)
finisherChan := make(chan *sharedPullerState)
go p.copierRoutine(copyChan, pullChan, finisherBufferChan)
go p.pullerRoutine(pullChan, finisherBufferChan)
go p.finisherRoutine(finisherChan)
p.handleFile(file, copyChan, finisherChan)
// Receive at finisher, we should error out as puller has nowhere to pull
// from.
select {
case state := <-finisherBufferChan:
// At this point the file should still be registered with both the job
// queue, and the progress emitter. Verify this.
if p.progressEmitter.lenRegistry() != 1 || p.queue.lenProgress() != 1 || p.queue.lenQueued() != 0 {
t.Fatal("Could not find file")
}
// Pass the file down the real finisher, and give it time to consume
finisherChan <- state
time.Sleep(100 * time.Millisecond)
state.mut.Lock()
stateFd := state.fd
state.mut.Unlock()
if stateFd != nil {
t.Fatal("File not closed?")
}
if p.progressEmitter.lenRegistry() != 0 || p.queue.lenProgress() != 0 || p.queue.lenQueued() != 0 {
t.Fatal("Still registered", p.progressEmitter.lenRegistry(), p.queue.lenProgress(), p.queue.lenQueued())
}
// Doing it again should have no effect
finisherChan <- state
time.Sleep(100 * time.Millisecond)
if p.progressEmitter.lenRegistry() != 0 || p.queue.lenProgress() != 0 || p.queue.lenQueued() != 0 {
t.Fatal("Still registered", p.progressEmitter.lenRegistry(), p.queue.lenProgress(), p.queue.lenQueued())
}
case <-time.After(time.Second):
t.Fatal("Didn't get anything to the finisher")
}
}
Refactor rwfolder tests
This creates a few utility functions to avoid repetition and removes
some redundant checks.
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package model
import (
"os"
"path/filepath"
"testing"
"time"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/scanner"
"github.com/syncthing/syncthing/lib/sync"
)
func init() {
// We do this to make sure that the temp file required for the tests does
// not get removed during the tests.
future := time.Now().Add(time.Hour)
err := os.Chtimes(filepath.Join("testdata", defTempNamer.TempName("file")), future, future)
if err != nil {
panic(err)
}
}
var blocks = []protocol.BlockInfo{
{Hash: []uint8{0xfa, 0x43, 0x23, 0x9b, 0xce, 0xe7, 0xb9, 0x7c, 0xa6, 0x2f, 0x0, 0x7c, 0xc6, 0x84, 0x87, 0x56, 0xa, 0x39, 0xe1, 0x9f, 0x74, 0xf3, 0xdd, 0xe7, 0x48, 0x6d, 0xb3, 0xf9, 0x8d, 0xf8, 0xe4, 0x71}}, // Zero'ed out block
{Offset: 0, Size: 0x20000, Hash: []uint8{0x7e, 0xad, 0xbc, 0x36, 0xae, 0xbb, 0xcf, 0x74, 0x43, 0xe2, 0x7a, 0x5a, 0x4b, 0xb8, 0x5b, 0xce, 0xe6, 0x9e, 0x1e, 0x10, 0xf9, 0x8a, 0xbc, 0x77, 0x95, 0x2, 0x29, 0x60, 0x9e, 0x96, 0xae, 0x6c}},
{Offset: 131072, Size: 0x20000, Hash: []uint8{0x3c, 0xc4, 0x20, 0xf4, 0xb, 0x2e, 0xcb, 0xb9, 0x5d, 0xce, 0x34, 0xa8, 0xc3, 0x92, 0xea, 0xf3, 0xda, 0x88, 0x33, 0xee, 0x7a, 0xb6, 0xe, 0xf1, 0x82, 0x5e, 0xb0, 0xa9, 0x26, 0xa9, 0xc0, 0xef}},
{Offset: 262144, Size: 0x20000, Hash: []uint8{0x76, 0xa8, 0xc, 0x69, 0xd7, 0x5c, 0x52, 0xfd, 0xdf, 0x55, 0xef, 0x44, 0xc1, 0xd6, 0x25, 0x48, 0x4d, 0x98, 0x48, 0x4d, 0xaa, 0x50, 0xf6, 0x6b, 0x32, 0x47, 0x55, 0x81, 0x6b, 0xed, 0xee, 0xfb}},
{Offset: 393216, Size: 0x20000, Hash: []uint8{0x44, 0x1e, 0xa4, 0xf2, 0x8d, 0x1f, 0xc3, 0x1b, 0x9d, 0xa5, 0x18, 0x5e, 0x59, 0x1b, 0xd8, 0x5c, 0xba, 0x7d, 0xb9, 0x8d, 0x70, 0x11, 0x5c, 0xea, 0xa1, 0x57, 0x4d, 0xcb, 0x3c, 0x5b, 0xf8, 0x6c}},
{Offset: 524288, Size: 0x20000, Hash: []uint8{0x8, 0x40, 0xd0, 0x5e, 0x80, 0x0, 0x0, 0x7c, 0x8b, 0xb3, 0x8b, 0xf7, 0x7b, 0x23, 0x26, 0x28, 0xab, 0xda, 0xcf, 0x86, 0x8f, 0xc2, 0x8a, 0x39, 0xc6, 0xe6, 0x69, 0x59, 0x97, 0xb6, 0x1a, 0x43}},
{Offset: 655360, Size: 0x20000, Hash: []uint8{0x38, 0x8e, 0x44, 0xcb, 0x30, 0xd8, 0x90, 0xf, 0xce, 0x7, 0x4b, 0x58, 0x86, 0xde, 0xce, 0x59, 0xa2, 0x46, 0xd2, 0xf9, 0xba, 0xaf, 0x35, 0x87, 0x38, 0xdf, 0xd2, 0xd, 0xf9, 0x45, 0xed, 0x91}},
{Offset: 786432, Size: 0x20000, Hash: []uint8{0x32, 0x28, 0xcd, 0xf, 0x37, 0x21, 0xe5, 0xd4, 0x1e, 0x58, 0x87, 0x73, 0x8e, 0x36, 0xdf, 0xb2, 0x70, 0x78, 0x56, 0xc3, 0x42, 0xff, 0xf7, 0x8f, 0x37, 0x95, 0x0, 0x26, 0xa, 0xac, 0x54, 0x72}},
{Offset: 917504, Size: 0x20000, Hash: []uint8{0x96, 0x6b, 0x15, 0x6b, 0xc4, 0xf, 0x19, 0x18, 0xca, 0xbb, 0x5f, 0xd6, 0xbb, 0xa2, 0xc6, 0x2a, 0xac, 0xbb, 0x8a, 0xb9, 0xce, 0xec, 0x4c, 0xdb, 0x78, 0xec, 0x57, 0x5d, 0x33, 0xf9, 0x8e, 0xaf}},
}
var folders = []string{"default"}
func setUpFile(filename string, blockNumbers []int) protocol.FileInfo {
// Create existing file
existingBlocks := make([]protocol.BlockInfo, len(blockNumbers))
for i := range blockNumbers {
existingBlocks[i] = blocks[blockNumbers[i]]
}
return protocol.FileInfo{
Name: filename,
Flags: 0,
Modified: 0,
Blocks: existingBlocks,
}
}
func setUpModel(file protocol.FileInfo) *Model {
db := db.OpenMemory()
model := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
model.AddFolder(defaultFolderConfig)
// Update index
model.updateLocals("default", []protocol.FileInfo{file})
return model
}
func setUpRwFolder(model *Model) rwFolder {
return rwFolder{
folder: "default",
dir: "testdata",
model: model,
errors: make(map[string]string),
errorsMut: sync.NewMutex(),
}
}
// Layout of the files: (indexes from the above array)
// 12345678 - Required file
// 02005008 - Existing file (currently in the index)
// 02340070 - Temp file on the disk
func TestHandleFile(t *testing.T) {
// After the diff between required and existing we should:
// Copy: 2, 5, 8
// Pull: 1, 3, 4, 6, 7
existingBlocks := []int{0, 2, 0, 0, 5, 0, 0, 8}
existingFile := setUpFile("filex", existingBlocks)
requiredFile := existingFile
requiredFile.Blocks = blocks[1:]
m := setUpModel(existingFile)
p := setUpRwFolder(m)
copyChan := make(chan copyBlocksState, 1)
p.handleFile(requiredFile, copyChan, nil)
// Receive the results
toCopy := <-copyChan
if len(toCopy.blocks) != 8 {
t.Errorf("Unexpected count of copy blocks: %d != 8", len(toCopy.blocks))
}
for i, block := range toCopy.blocks {
if string(block.Hash) != string(blocks[i+1].Hash) {
t.Errorf("Block mismatch: %s != %s", block.String(), blocks[i+1].String())
}
}
}
func TestHandleFileWithTemp(t *testing.T) {
// After diff between required and existing we should:
// Copy: 2, 5, 8
// Pull: 1, 3, 4, 6, 7
// After dropping out blocks already on the temp file we should:
// Copy: 5, 8
// Pull: 1, 6
existingBlocks := []int{0, 2, 0, 0, 5, 0, 0, 8}
existingFile := setUpFile("file", existingBlocks)
requiredFile := existingFile
requiredFile.Blocks = blocks[1:]
m := setUpModel(existingFile)
p := setUpRwFolder(m)
copyChan := make(chan copyBlocksState, 1)
p.handleFile(requiredFile, copyChan, nil)
// Receive the results
toCopy := <-copyChan
if len(toCopy.blocks) != 4 {
t.Errorf("Unexpected count of copy blocks: %d != 4", len(toCopy.blocks))
}
for i, eq := range []int{1, 5, 6, 8} {
if string(toCopy.blocks[i].Hash) != string(blocks[eq].Hash) {
t.Errorf("Block mismatch: %s != %s", toCopy.blocks[i].String(), blocks[eq].String())
}
}
}
func TestCopierFinder(t *testing.T) {
// After diff between required and existing we should:
// Copy: 1, 2, 3, 4, 6, 7, 8
// Since there is no existing file, nor a temp file
// After dropping out blocks found locally:
// Pull: 1, 5, 6, 8
tempFile := filepath.Join("testdata", defTempNamer.TempName("file2"))
err := os.Remove(tempFile)
if err != nil && !os.IsNotExist(err) {
t.Error(err)
}
existingBlocks := []int{0, 2, 3, 4, 0, 0, 7, 0}
existingFile := setUpFile(defTempNamer.TempName("file"), existingBlocks)
requiredFile := existingFile
requiredFile.Blocks = blocks[1:]
requiredFile.Name = "file2"
m := setUpModel(existingFile)
p := setUpRwFolder(m)
copyChan := make(chan copyBlocksState)
pullChan := make(chan pullBlockState, 4)
finisherChan := make(chan *sharedPullerState, 1)
// Run a single fetcher routine
go p.copierRoutine(copyChan, pullChan, finisherChan)
p.handleFile(requiredFile, copyChan, finisherChan)
pulls := []pullBlockState{<-pullChan, <-pullChan, <-pullChan, <-pullChan}
finish := <-finisherChan
select {
case <-pullChan:
t.Fatal("Finisher channel has data to be read")
case <-finisherChan:
t.Fatal("Finisher channel has data to be read")
default:
}
// Verify that the right blocks went into the pull list
for i, eq := range []int{1, 5, 6, 8} {
if string(pulls[i].block.Hash) != string(blocks[eq].Hash) {
t.Errorf("Block %d mismatch: %s != %s", eq, pulls[i].block.String(), blocks[eq].String())
}
if string(finish.file.Blocks[eq-1].Hash) != string(blocks[eq].Hash) {
t.Errorf("Block %d mismatch: %s != %s", eq, finish.file.Blocks[eq-1].String(), blocks[eq].String())
}
}
// Verify that the fetched blocks have actually been written to the temp file
blks, err := scanner.HashFile(tempFile, protocol.BlockSize, 0, nil)
if err != nil {
t.Log(err)
}
for _, eq := range []int{2, 3, 4, 7} {
if string(blks[eq-1].Hash) != string(blocks[eq].Hash) {
t.Errorf("Block %d mismatch: %s != %s", eq, blks[eq-1].String(), blocks[eq].String())
}
}
finish.fd.Close()
os.Remove(tempFile)
}
// Test that updating a file removes it's old blocks from the blockmap
func TestCopierCleanup(t *testing.T) {
iterFn := func(folder, file string, index int32) bool {
return true
}
// Create a file
file := setUpFile("test", []int{0})
m := setUpModel(file)
file.Blocks = []protocol.BlockInfo{blocks[1]}
file.Version = file.Version.Update(protocol.LocalDeviceID.Short())
// Update index (removing old blocks)
m.updateLocals("default", []protocol.FileInfo{file})
if m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
t.Error("Unexpected block found")
}
if !m.finder.Iterate(folders, blocks[1].Hash, iterFn) {
t.Error("Expected block not found")
}
file.Blocks = []protocol.BlockInfo{blocks[0]}
file.Version = file.Version.Update(protocol.LocalDeviceID.Short())
// Update index (removing old blocks)
m.updateLocals("default", []protocol.FileInfo{file})
if !m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
t.Error("Unexpected block found")
}
if m.finder.Iterate(folders, blocks[1].Hash, iterFn) {
t.Error("Expected block not found")
}
}
// Make sure that the copier routine hashes the content when asked, and pulls
// if it fails to find the block.
func TestLastResortPulling(t *testing.T) {
// Add a file to index (with the incorrect block representation, as content
// doesn't actually match the block list)
file := setUpFile("empty", []int{0})
m := setUpModel(file)
// Pretend that we are handling a new file of the same content but
// with a different name (causing to copy that particular block)
file.Name = "newfile"
iterFn := func(folder, file string, index int32) bool {
return true
}
p := setUpRwFolder(m)
copyChan := make(chan copyBlocksState)
pullChan := make(chan pullBlockState, 1)
finisherChan := make(chan *sharedPullerState, 1)
// Run a single copier routine
go p.copierRoutine(copyChan, pullChan, finisherChan)
p.handleFile(file, copyChan, finisherChan)
// Copier should hash empty file, realise that the region it has read
// doesn't match the hash which was advertised by the block map, fix it
// and ask to pull the block.
<-pullChan
// Verify that it did fix the incorrect hash.
if m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
t.Error("Found unexpected block")
}
if !m.finder.Iterate(folders, scanner.SHA256OfNothing, iterFn) {
t.Error("Expected block not found")
}
(<-finisherChan).fd.Close()
os.Remove(filepath.Join("testdata", defTempNamer.TempName("newfile")))
}
func TestDeregisterOnFailInCopy(t *testing.T) {
file := setUpFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8})
defer os.Remove("testdata/" + defTempNamer.TempName("filex"))
db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig)
emitter := NewProgressEmitter(defaultConfig)
go emitter.Serve()
p := rwFolder{
folder: "default",
dir: "testdata",
model: m,
queue: newJobQueue(),
progressEmitter: emitter,
errors: make(map[string]string),
errorsMut: sync.NewMutex(),
}
// queue.Done should be called by the finisher routine
p.queue.Push("filex", 0, 0)
p.queue.Pop()
if p.queue.lenProgress() != 1 {
t.Fatal("Expected file in progress")
}
copyChan := make(chan copyBlocksState)
pullChan := make(chan pullBlockState)
finisherBufferChan := make(chan *sharedPullerState)
finisherChan := make(chan *sharedPullerState)
go p.copierRoutine(copyChan, pullChan, finisherBufferChan)
go p.finisherRoutine(finisherChan)
p.handleFile(file, copyChan, finisherChan)
// Receive a block at puller, to indicate that at least a single copier
// loop has been performed.
toPull := <-pullChan
// Wait until copier is trying to pass something down to the puller again
time.Sleep(100 * time.Millisecond)
// Close the file
toPull.sharedPullerState.fail("test", os.ErrNotExist)
// Unblock copier
<-pullChan
select {
case state := <-finisherBufferChan:
// At this point the file should still be registered with both the job
// queue, and the progress emitter. Verify this.
if p.progressEmitter.lenRegistry() != 1 || p.queue.lenProgress() != 1 || p.queue.lenQueued() != 0 {
t.Fatal("Could not find file")
}
// Pass the file down the real finisher, and give it time to consume
finisherChan <- state
time.Sleep(100 * time.Millisecond)
state.mut.Lock()
stateFd := state.fd
state.mut.Unlock()
if stateFd != nil {
t.Fatal("File not closed?")
}
if p.progressEmitter.lenRegistry() != 0 || p.queue.lenProgress() != 0 || p.queue.lenQueued() != 0 {
t.Fatal("Still registered", p.progressEmitter.lenRegistry(), p.queue.lenProgress(), p.queue.lenQueued())
}
// Doing it again should have no effect
finisherChan <- state
time.Sleep(100 * time.Millisecond)
if p.progressEmitter.lenRegistry() != 0 || p.queue.lenProgress() != 0 || p.queue.lenQueued() != 0 {
t.Fatal("Still registered", p.progressEmitter.lenRegistry(), p.queue.lenProgress(), p.queue.lenQueued())
}
case <-time.After(time.Second):
t.Fatal("Didn't get anything to the finisher")
}
}
func TestDeregisterOnFailInPull(t *testing.T) {
file := setUpFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8})
defer os.Remove("testdata/" + defTempNamer.TempName("filex"))
db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig)
emitter := NewProgressEmitter(defaultConfig)
go emitter.Serve()
p := rwFolder{
folder: "default",
dir: "testdata",
model: m,
queue: newJobQueue(),
progressEmitter: emitter,
errors: make(map[string]string),
errorsMut: sync.NewMutex(),
}
// queue.Done should be called by the finisher routine
p.queue.Push("filex", 0, 0)
p.queue.Pop()
if p.queue.lenProgress() != 1 {
t.Fatal("Expected file in progress")
}
copyChan := make(chan copyBlocksState)
pullChan := make(chan pullBlockState)
finisherBufferChan := make(chan *sharedPullerState)
finisherChan := make(chan *sharedPullerState)
go p.copierRoutine(copyChan, pullChan, finisherBufferChan)
go p.pullerRoutine(pullChan, finisherBufferChan)
go p.finisherRoutine(finisherChan)
p.handleFile(file, copyChan, finisherChan)
// Receive at finisher, we should error out as puller has nowhere to pull
// from.
select {
case state := <-finisherBufferChan:
// At this point the file should still be registered with both the job
// queue, and the progress emitter. Verify this.
if p.progressEmitter.lenRegistry() != 1 || p.queue.lenProgress() != 1 || p.queue.lenQueued() != 0 {
t.Fatal("Could not find file")
}
// Pass the file down the real finisher, and give it time to consume
finisherChan <- state
time.Sleep(100 * time.Millisecond)
state.mut.Lock()
stateFd := state.fd
state.mut.Unlock()
if stateFd != nil {
t.Fatal("File not closed?")
}
if p.progressEmitter.lenRegistry() != 0 || p.queue.lenProgress() != 0 || p.queue.lenQueued() != 0 {
t.Fatal("Still registered", p.progressEmitter.lenRegistry(), p.queue.lenProgress(), p.queue.lenQueued())
}
// Doing it again should have no effect
finisherChan <- state
time.Sleep(100 * time.Millisecond)
if p.progressEmitter.lenRegistry() != 0 || p.queue.lenProgress() != 0 || p.queue.lenQueued() != 0 {
t.Fatal("Still registered", p.progressEmitter.lenRegistry(), p.queue.lenProgress(), p.queue.lenQueued())
}
case <-time.After(time.Second):
t.Fatal("Didn't get anything to the finisher")
}
}
|
package levelmu
import (
"sync"
)
type LevelMutex struct {
mus []sync.Mutex
// Protected by the very last mutex.
lastLevel int
}
func (lm *LevelMutex) Init(levels int) {
if lm.mus != nil {
panic("level mutex already initialized")
}
lm.mus = make([]sync.Mutex, levels)
}
func (lm *LevelMutex) Lock() {
lm.LevelLock(0)
}
func (lm *LevelMutex) Unlock() {
stopLevel := lm.lastLevel
for i := len(lm.mus) - 1; i >= stopLevel; i-- {
lm.mus[i].Unlock()
}
}
func (lm *LevelMutex) LevelLock(level int) {
if level >= len(lm.mus) {
panic("lock level exceeds configured level count")
}
for l := level; l < len(lm.mus); l++ {
lm.mus[l].Lock()
}
lm.lastLevel = level
}
util/levelmu is no longer in use
|
package rocserv
import (
"context"
"encoding/json"
"fmt"
"net/http"
"strings"
"gitlab.pri.ibanyu.com/middleware/seaweed/xcontext"
"gitlab.pri.ibanyu.com/middleware/seaweed/xlog"
"gitlab.pri.ibanyu.com/middleware/seaweed/xtrace"
"github.com/uber/jaeger-client-go"
)
const (
TrafficLogID = "TRAFFIC"
TrafficLogKeyUID = "uid"
TrafficLogKeyGroup = "group"
TrafficLogKeyTraceID = "tid"
TrafficLogKeySpanID = "sid"
TrafficLogKeyParentSpanID = "pid"
TrafficLogKeyOperation = "op"
TrafficLogKeyCaller = "caller"
TrafficLogKeyServerType = "stype"
TrafficLogKeyServerID = "srvid"
TrafficLogKeyServerName = "sname"
)
func httpTrafficLogMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// NOTE: log before handling business logic
logTrafficForHttpServer(r.Context())
next.ServeHTTP(w, r)
})
}
func trafficKVFromContext(ctx context.Context) (kv map[string]interface{}) {
kv = map[string]interface{}{}
kv[TrafficLogKeyUID], _ = xcontext.GetUID(ctx)
kv[TrafficLogKeyGroup] = xcontext.GetControlRouteGroupWithDefault(ctx, xcontext.DefaultGroup)
if callerName, ok := xcontext.GetControlCallerServerName(ctx); ok {
kv[TrafficLogKeyCaller] = callerName
}
span := xtrace.SpanFromContext(ctx)
if span == nil {
return
}
if jaegerSpan, ok := span.(*jaeger.Span); ok {
jaegerSpanCtx, ok := jaegerSpan.Context().(jaeger.SpanContext)
if !ok {
return
}
kv[TrafficLogKeyOperation] = jaegerSpan.OperationName()
kv[TrafficLogKeyTraceID] = fmt.Sprint(jaegerSpanCtx.TraceID())
kv[TrafficLogKeySpanID] = fmt.Sprint(jaegerSpanCtx.SpanID())
kv[TrafficLogKeyParentSpanID] = fmt.Sprint(jaegerSpanCtx.ParentID())
}
return
}
func logTrafficForHttpServer(ctx context.Context) {
kv := make(map[string]interface{})
kv[TrafficLogKeyServerType] = "http"
for k, v := range trafficKVFromContext(ctx) {
kv[k] = v
}
logTrafficByKV(ctx, kv)
}
func serviceFromServPath(spath string) string {
// NOTE: 若 sep 不为空, strings.Split 返回的字符串数组长度至少为 1
parts := strings.Split(spath, "/")
return parts[len(parts)-1]
}
func logTrafficByKV(ctx context.Context, kv map[string]interface{}) {
bs, _ := json.Marshal(kv)
xlog.Infof(ctx, "%s\t%s", TrafficLogID, string(bs))
}
remove logs in http middleware
package rocserv
import (
"context"
"encoding/json"
"fmt"
"net/http"
"strings"
"gitlab.pri.ibanyu.com/middleware/seaweed/xcontext"
"gitlab.pri.ibanyu.com/middleware/seaweed/xlog"
"gitlab.pri.ibanyu.com/middleware/seaweed/xtrace"
"github.com/uber/jaeger-client-go"
)
const (
TrafficLogID = "TRAFFIC"
TrafficLogKeyUID = "uid"
TrafficLogKeyGroup = "group"
TrafficLogKeyTraceID = "tid"
TrafficLogKeySpanID = "sid"
TrafficLogKeyParentSpanID = "pid"
TrafficLogKeyOperation = "op"
TrafficLogKeyCaller = "caller"
TrafficLogKeyServerType = "stype"
TrafficLogKeyServerID = "srvid"
TrafficLogKeyServerName = "sname"
)
func httpTrafficLogMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// NOTE: log before handling business logic, too many useless logs, remove it
// logTrafficForHttpServer(r.Context())
next.ServeHTTP(w, r)
})
}
func trafficKVFromContext(ctx context.Context) (kv map[string]interface{}) {
kv = map[string]interface{}{}
kv[TrafficLogKeyUID], _ = xcontext.GetUID(ctx)
kv[TrafficLogKeyGroup] = xcontext.GetControlRouteGroupWithDefault(ctx, xcontext.DefaultGroup)
if callerName, ok := xcontext.GetControlCallerServerName(ctx); ok {
kv[TrafficLogKeyCaller] = callerName
}
span := xtrace.SpanFromContext(ctx)
if span == nil {
return
}
if jaegerSpan, ok := span.(*jaeger.Span); ok {
jaegerSpanCtx, ok := jaegerSpan.Context().(jaeger.SpanContext)
if !ok {
return
}
kv[TrafficLogKeyOperation] = jaegerSpan.OperationName()
kv[TrafficLogKeyTraceID] = fmt.Sprint(jaegerSpanCtx.TraceID())
kv[TrafficLogKeySpanID] = fmt.Sprint(jaegerSpanCtx.SpanID())
kv[TrafficLogKeyParentSpanID] = fmt.Sprint(jaegerSpanCtx.ParentID())
}
return
}
func logTrafficForHttpServer(ctx context.Context) {
kv := make(map[string]interface{})
kv[TrafficLogKeyServerType] = "http"
for k, v := range trafficKVFromContext(ctx) {
kv[k] = v
}
logTrafficByKV(ctx, kv)
}
func serviceFromServPath(spath string) string {
// NOTE: 若 sep 不为空, strings.Split 返回的字符串数组长度至少为 1
parts := strings.Split(spath, "/")
return parts[len(parts)-1]
}
func logTrafficByKV(ctx context.Context, kv map[string]interface{}) {
bs, _ := json.Marshal(kv)
xlog.Infof(ctx, "%s\t%s", TrafficLogID, string(bs))
}
|
// Package mailtoweb returns the webmail URL related to a an email address.
//
// It can be used in web apps for a better user experience, by providing a direct link to user's webmail from his email address, after a subscription for example.
package mailtoweb
import "strings"
var relations = map[string]string{
"@gmail.": "https://mail.google.com/",
}
// For returns the webmail URL related to the email address.
// If unknown, result is empty.
func For(email string) string {
for e, u := range relations {
if strings.Contains(email, e) {
return u
}
}
return ""
}
Update GoDoc
/*
Package mailtoweb returns the webmail URL related to a an email address.
It can be used in web apps for a better user experience, by providing a direct link to user's webmail from his email address, after a subscription for example.
Installation
In the terminal:
$ go get github.com/whitedevops/mailtoweb
Usage
Example:
package main
import (
"github.com/whitedevops/mailtoweb"
)
func main() {
mailtoweb.For("user@gmail.com")
}
*/
package mailtoweb
import "strings"
var relations = map[string]string{
"@gmail.": "https://mail.google.com/",
}
// For returns the webmail URL related to the email address.
// If unknown, result is empty.
func For(email string) string {
for e, u := range relations {
if strings.Contains(email, e) {
return u
}
}
return ""
}
|
package main
import (
"fmt"
"github.com/nevon/gogel"
"strings"
)
func main() {
// body := gogel.NewElementNode("body", make(map[string]string), []gogel.Node{})
// body.Children = append(body.Children, gogel.NewElementNode("div", map[string]string{"id": "test"}, []gogel.Node{}))
// Walk(body, 0)
doc := "<html><body><h1>Hello friends</h1><div id='myDiv'><p>This is a paragraph</p><p>Followed by another paragraph.</p></div></body></html>"
tree := gogel.Parse(doc)
Walk(tree[0], 0)
}
func Walk(n gogel.Node, level int) {
out := []string{
strings.Repeat(" ", level),
n.ToString(),
}
fmt.Println(strings.Join(out, ""))
for _, child := range n.GetChildren() {
Walk(child, level+1)
}
}
Removed dead code.
package main
import (
"fmt"
"github.com/nevon/gogel"
"strings"
)
func main() {
doc := "<html><body><h1>Hello friends</h1><div id='myDiv'><p>This is a paragraph</p><p>Followed by another paragraph.</p></div></body></html>"
tree := gogel.Parse(doc)
Walk(tree[0], 0)
}
func Walk(n gogel.Node, level int) {
out := []string{
strings.Repeat(" ", level),
n.ToString(),
}
fmt.Println(strings.Join(out, ""))
for _, child := range n.GetChildren() {
Walk(child, level+1)
}
}
|
package pgxpool
import (
"context"
"fmt"
"math/rand"
"runtime"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/puddle/v2"
)
var defaultMaxConns = int32(4)
var defaultMinConns = int32(0)
var defaultMaxConnLifetime = time.Hour
var defaultMaxConnIdleTime = time.Minute * 30
var defaultHealthCheckPeriod = time.Minute
type connResource struct {
conn *pgx.Conn
conns []Conn
poolRows []poolRow
poolRowss []poolRows
}
func (cr *connResource) getConn(p *Pool, res *puddle.Resource[*connResource]) *Conn {
if len(cr.conns) == 0 {
cr.conns = make([]Conn, 128)
}
c := &cr.conns[len(cr.conns)-1]
cr.conns = cr.conns[0 : len(cr.conns)-1]
c.res = res
c.p = p
return c
}
func (cr *connResource) getPoolRow(c *Conn, r pgx.Row) *poolRow {
if len(cr.poolRows) == 0 {
cr.poolRows = make([]poolRow, 128)
}
pr := &cr.poolRows[len(cr.poolRows)-1]
cr.poolRows = cr.poolRows[0 : len(cr.poolRows)-1]
pr.c = c
pr.r = r
return pr
}
func (cr *connResource) getPoolRows(c *Conn, r pgx.Rows) *poolRows {
if len(cr.poolRowss) == 0 {
cr.poolRowss = make([]poolRows, 128)
}
pr := &cr.poolRowss[len(cr.poolRowss)-1]
cr.poolRowss = cr.poolRowss[0 : len(cr.poolRowss)-1]
pr.c = c
pr.r = r
return pr
}
// Pool allows for connection reuse.
type Pool struct {
p *puddle.Pool[*connResource]
config *Config
beforeConnect func(context.Context, *pgx.ConnConfig) error
afterConnect func(context.Context, *pgx.Conn) error
beforeAcquire func(context.Context, *pgx.Conn) bool
afterRelease func(*pgx.Conn) bool
minConns int32
maxConns int32
maxConnLifetime time.Duration
maxConnLifetimeJitter time.Duration
maxConnIdleTime time.Duration
healthCheckPeriod time.Duration
healthCheckChan chan struct{}
newConnsCount int64
lifetimeDestroyCount int64
idleDestroyCount int64
closeOnce sync.Once
closeChan chan struct{}
}
// Config is the configuration struct for creating a pool. It must be created by ParseConfig and then it can be
// modified. A manually initialized ConnConfig will cause ConnectConfig to panic.
type Config struct {
ConnConfig *pgx.ConnConfig
// BeforeConnect is called before a new connection is made. It is passed a copy of the underlying pgx.ConnConfig and
// will not impact any existing open connections.
BeforeConnect func(context.Context, *pgx.ConnConfig) error
// AfterConnect is called after a connection is established, but before it is added to the pool.
AfterConnect func(context.Context, *pgx.Conn) error
// BeforeAcquire is called before a connection is acquired from the pool. It must return true to allow the
// acquision or false to indicate that the connection should be destroyed and a different connection should be
// acquired.
BeforeAcquire func(context.Context, *pgx.Conn) bool
// AfterRelease is called after a connection is released, but before it is returned to the pool. It must return true to
// return the connection to the pool or false to destroy the connection.
AfterRelease func(*pgx.Conn) bool
// MaxConnLifetime is the duration since creation after which a connection will be automatically closed.
MaxConnLifetime time.Duration
// MaxConnLifetimeJitter is the duration after MaxConnLifetime to randomly decide to close a connection.
// This helps prevent all connections from being closed at the exact same time, starving the pool.
MaxConnLifetimeJitter time.Duration
// MaxConnIdleTime is the duration after which an idle connection will be automatically closed by the health check.
MaxConnIdleTime time.Duration
// MaxConns is the maximum size of the pool. The default is the greater of 4 or runtime.NumCPU().
MaxConns int32
// MinConns is the minimum size of the pool. After connection closes, the pool might dip below MinConns. A low
// number of MinConns might mean the pool is empty after MaxConnLifetime until the health check has a chance
// to create new connections.
MinConns int32
// HealthCheckPeriod is the duration between checks of the health of idle connections.
HealthCheckPeriod time.Duration
createdByParseConfig bool // Used to enforce created by ParseConfig rule.
}
// Copy returns a deep copy of the config that is safe to use and modify.
// The only exception is the tls.Config:
// according to the tls.Config docs it must not be modified after creation.
func (c *Config) Copy() *Config {
newConfig := new(Config)
*newConfig = *c
newConfig.ConnConfig = c.ConnConfig.Copy()
return newConfig
}
// ConnString returns the connection string as parsed by pgxpool.ParseConfig into pgxpool.Config.
func (c *Config) ConnString() string { return c.ConnConfig.ConnString() }
// New creates a new Pool. See ParseConfig for information on connString format.
func New(ctx context.Context, connString string) (*Pool, error) {
config, err := ParseConfig(connString)
if err != nil {
return nil, err
}
return NewWithConfig(ctx, config)
}
// NewWithConfig creates a new Pool. config must have been created by ParseConfig.
func NewWithConfig(ctx context.Context, config *Config) (*Pool, error) {
// Default values are set in ParseConfig. Enforce initial creation by ParseConfig rather than setting defaults from
// zero values.
if !config.createdByParseConfig {
panic("config must be created by ParseConfig")
}
p := &Pool{
config: config,
beforeConnect: config.BeforeConnect,
afterConnect: config.AfterConnect,
beforeAcquire: config.BeforeAcquire,
afterRelease: config.AfterRelease,
minConns: config.MinConns,
maxConns: config.MaxConns,
maxConnLifetime: config.MaxConnLifetime,
maxConnLifetimeJitter: config.MaxConnLifetimeJitter,
maxConnIdleTime: config.MaxConnIdleTime,
healthCheckPeriod: config.HealthCheckPeriod,
healthCheckChan: make(chan struct{}, 1),
closeChan: make(chan struct{}),
}
var err error
p.p, err = puddle.NewPool(
&puddle.Config[*connResource]{
Constructor: func(ctx context.Context) (*connResource, error) {
connConfig := p.config.ConnConfig.Copy()
// Connection will continue in background even if Acquire is canceled. Ensure that a connect won't hang forever.
if connConfig.ConnectTimeout <= 0 {
connConfig.ConnectTimeout = 2 * time.Minute
}
if p.beforeConnect != nil {
if err := p.beforeConnect(ctx, connConfig); err != nil {
return nil, err
}
}
conn, err := pgx.ConnectConfig(ctx, connConfig)
if err != nil {
return nil, err
}
if p.afterConnect != nil {
err = p.afterConnect(ctx, conn)
if err != nil {
conn.Close(ctx)
return nil, err
}
}
cr := &connResource{
conn: conn,
conns: make([]Conn, 64),
poolRows: make([]poolRow, 64),
poolRowss: make([]poolRows, 64),
}
return cr, nil
},
Destructor: func(value *connResource) {
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
conn := value.conn
conn.Close(ctx)
select {
case <-conn.PgConn().CleanupDone():
case <-ctx.Done():
}
cancel()
},
MaxSize: config.MaxConns,
},
)
if err != nil {
return nil, err
}
go func() {
p.createIdleResources(ctx, int(p.minConns))
p.backgroundHealthCheck()
}()
return p, nil
}
// ParseConfig builds a Config from connString. It parses connString with the same behavior as pgx.ParseConfig with the
// addition of the following variables:
//
// pool_max_conns: integer greater than 0
// pool_min_conns: integer 0 or greater
// pool_max_conn_lifetime: duration string
// pool_max_conn_idle_time: duration string
// pool_health_check_period: duration string
// pool_max_conn_lifetime_jitter: duration string
//
// See Config for definitions of these arguments.
//
// # Example DSN
// user=jack password=secret host=pg.example.com port=5432 dbname=mydb sslmode=verify-ca pool_max_conns=10
//
// # Example URL
// postgres://jack:secret@pg.example.com:5432/mydb?sslmode=verify-ca&pool_max_conns=10
func ParseConfig(connString string) (*Config, error) {
connConfig, err := pgx.ParseConfig(connString)
if err != nil {
return nil, err
}
config := &Config{
ConnConfig: connConfig,
createdByParseConfig: true,
}
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conns"]; ok {
delete(connConfig.Config.RuntimeParams, "pool_max_conns")
n, err := strconv.ParseInt(s, 10, 32)
if err != nil {
return nil, fmt.Errorf("cannot parse pool_max_conns: %w", err)
}
if n < 1 {
return nil, fmt.Errorf("pool_max_conns too small: %d", n)
}
config.MaxConns = int32(n)
} else {
config.MaxConns = defaultMaxConns
if numCPU := int32(runtime.NumCPU()); numCPU > config.MaxConns {
config.MaxConns = numCPU
}
}
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_min_conns"]; ok {
delete(connConfig.Config.RuntimeParams, "pool_min_conns")
n, err := strconv.ParseInt(s, 10, 32)
if err != nil {
return nil, fmt.Errorf("cannot parse pool_min_conns: %w", err)
}
config.MinConns = int32(n)
} else {
config.MinConns = defaultMinConns
}
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conn_lifetime"]; ok {
delete(connConfig.Config.RuntimeParams, "pool_max_conn_lifetime")
d, err := time.ParseDuration(s)
if err != nil {
return nil, fmt.Errorf("invalid pool_max_conn_lifetime: %w", err)
}
config.MaxConnLifetime = d
} else {
config.MaxConnLifetime = defaultMaxConnLifetime
}
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conn_idle_time"]; ok {
delete(connConfig.Config.RuntimeParams, "pool_max_conn_idle_time")
d, err := time.ParseDuration(s)
if err != nil {
return nil, fmt.Errorf("invalid pool_max_conn_idle_time: %w", err)
}
config.MaxConnIdleTime = d
} else {
config.MaxConnIdleTime = defaultMaxConnIdleTime
}
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_health_check_period"]; ok {
delete(connConfig.Config.RuntimeParams, "pool_health_check_period")
d, err := time.ParseDuration(s)
if err != nil {
return nil, fmt.Errorf("invalid pool_health_check_period: %w", err)
}
config.HealthCheckPeriod = d
} else {
config.HealthCheckPeriod = defaultHealthCheckPeriod
}
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conn_lifetime_jitter"]; ok {
delete(connConfig.Config.RuntimeParams, "pool_max_conn_lifetime_jitter")
d, err := time.ParseDuration(s)
if err != nil {
return nil, fmt.Errorf("invalid pool_max_conn_lifetime_jitter: %w", err)
}
config.MaxConnLifetimeJitter = d
}
return config, nil
}
// Close closes all connections in the pool and rejects future Acquire calls. Blocks until all connections are returned
// to pool and closed.
func (p *Pool) Close() {
p.closeOnce.Do(func() {
close(p.closeChan)
p.p.Close()
})
}
func (p *Pool) isExpired(res *puddle.Resource[*connResource]) bool {
now := time.Now()
// Small optimization to avoid rand. If it's over lifetime AND jitter, immediately
// return true.
if now.Sub(res.CreationTime()) > p.maxConnLifetime+p.maxConnLifetimeJitter {
return true
}
if p.maxConnLifetimeJitter == 0 {
return false
}
jitterSecs := rand.Float64() * p.maxConnLifetimeJitter.Seconds()
return now.Sub(res.CreationTime()) > p.maxConnLifetime+(time.Duration(jitterSecs)*time.Second)
}
func (p *Pool) triggerHealthCheck() {
go func() {
// Destroy is asynchronous so we give it time to actually remove itself from
// the pool otherwise we might try to check the pool size too soon
time.Sleep(500 * time.Millisecond)
select {
case p.healthCheckChan <- struct{}{}:
default:
}
}()
}
func (p *Pool) backgroundHealthCheck() {
ticker := time.NewTicker(p.healthCheckPeriod)
defer ticker.Stop()
for {
select {
case <-p.closeChan:
return
case <-p.healthCheckChan:
p.checkHealth()
case <-ticker.C:
p.checkHealth()
}
}
}
func (p *Pool) checkHealth() {
for {
// If checkMinConns failed we don't destroy any connections since we couldn't
// even get to minConns
if err := p.checkMinConns(); err != nil {
// Should we log this error somewhere?
break
}
if !p.checkConnsHealth() {
// Since we didn't destroy any connections we can stop looping
break
}
// Technically Destroy is asynchronous but 500ms should be enough for it to
// remove it from the underlying pool
select {
case <-p.closeChan:
return
case <-time.After(500 * time.Millisecond):
}
}
}
// checkConnsHealth will check all idle connections, destroy a connection if
// it's idle or too old, and returns true if any were destroyed
func (p *Pool) checkConnsHealth() bool {
var destroyed bool
totalConns := p.Stat().TotalConns()
resources := p.p.AcquireAllIdle()
for _, res := range resources {
// We're okay going under minConns if the lifetime is up
if p.isExpired(res) && totalConns >= p.minConns {
atomic.AddInt64(&p.lifetimeDestroyCount, 1)
res.Destroy()
destroyed = true
// Since Destroy is async we manually decrement totalConns.
totalConns--
} else if res.IdleDuration() > p.maxConnIdleTime && totalConns > p.minConns {
atomic.AddInt64(&p.idleDestroyCount, 1)
res.Destroy()
destroyed = true
// Since Destroy is async we manually decrement totalConns.
totalConns--
} else {
res.ReleaseUnused()
}
}
return destroyed
}
func (p *Pool) checkMinConns() error {
// TotalConns can include ones that are being destroyed but we should have
// sleep(500ms) around all of the destroys to help prevent that from throwing
// off this check
toCreate := p.minConns - p.Stat().TotalConns()
if toCreate > 0 {
return p.createIdleResources(context.Background(), int(toCreate))
}
return nil
}
func (p *Pool) createIdleResources(parentCtx context.Context, targetResources int) error {
ctx, cancel := context.WithCancel(parentCtx)
defer cancel()
errs := make(chan error, targetResources)
for i := 0; i < targetResources; i++ {
go func() {
atomic.AddInt64(&p.newConnsCount, 1)
err := p.p.CreateResource(ctx)
errs <- err
}()
}
var firstError error
for i := 0; i < targetResources; i++ {
err := <-errs
if err != nil && firstError == nil {
cancel()
firstError = err
}
}
return firstError
}
// Acquire returns a connection (*Conn) from the Pool
func (p *Pool) Acquire(ctx context.Context) (*Conn, error) {
for {
res, err := p.p.Acquire(ctx)
if err != nil {
return nil, err
}
cr := res.Value()
if res.IdleDuration() > time.Second {
err := cr.conn.PgConn().CheckConn()
if err != nil {
res.Destroy()
continue
}
}
if p.beforeAcquire == nil || p.beforeAcquire(ctx, cr.conn) {
return cr.getConn(p, res), nil
}
res.Destroy()
}
}
// AcquireFunc acquires a *Conn and calls f with that *Conn. ctx will only affect the Acquire. It has no effect on the
// call of f. The return value is either an error acquiring the *Conn or the return value of f. The *Conn is
// automatically released after the call of f.
func (p *Pool) AcquireFunc(ctx context.Context, f func(*Conn) error) error {
conn, err := p.Acquire(ctx)
if err != nil {
return err
}
defer conn.Release()
return f(conn)
}
// AcquireAllIdle atomically acquires all currently idle connections. Its intended use is for health check and
// keep-alive functionality. It does not update pool statistics.
func (p *Pool) AcquireAllIdle(ctx context.Context) []*Conn {
resources := p.p.AcquireAllIdle()
conns := make([]*Conn, 0, len(resources))
for _, res := range resources {
cr := res.Value()
if p.beforeAcquire == nil || p.beforeAcquire(ctx, cr.conn) {
conns = append(conns, cr.getConn(p, res))
} else {
res.Destroy()
}
}
return conns
}
// Reset closes all connections, but leaves the pool open. It is intended for use when an error is detected that would
// disrupt all connections (such as a network interruption or a server state change).
//
// It is safe to reset a pool while connections are checked out. Those connections will be closed when they are returned
// to the pool.
func (p *Pool) Reset() {
p.p.Reset()
}
// Config returns a copy of config that was used to initialize this pool.
func (p *Pool) Config() *Config { return p.config.Copy() }
// Stat returns a pgxpool.Stat struct with a snapshot of Pool statistics.
func (p *Pool) Stat() *Stat {
return &Stat{
s: p.p.Stat(),
newConnsCount: atomic.LoadInt64(&p.newConnsCount),
lifetimeDestroyCount: atomic.LoadInt64(&p.lifetimeDestroyCount),
idleDestroyCount: atomic.LoadInt64(&p.idleDestroyCount),
}
}
// Exec acquires a connection from the Pool and executes the given SQL.
// SQL can be either a prepared statement name or an SQL string.
// Arguments should be referenced positionally from the SQL string as $1, $2, etc.
// The acquired connection is returned to the pool when the Exec function returns.
func (p *Pool) Exec(ctx context.Context, sql string, arguments ...any) (pgconn.CommandTag, error) {
c, err := p.Acquire(ctx)
if err != nil {
return pgconn.CommandTag{}, err
}
defer c.Release()
return c.Exec(ctx, sql, arguments...)
}
// Query acquires a connection and executes a query that returns pgx.Rows.
// Arguments should be referenced positionally from the SQL string as $1, $2, etc.
// See pgx.Rows documentation to close the returned Rows and return the acquired connection to the Pool.
//
// If there is an error, the returned pgx.Rows will be returned in an error state.
// If preferred, ignore the error returned from Query and handle errors using the returned pgx.Rows.
//
// For extra control over how the query is executed, the types QuerySimpleProtocol, QueryResultFormats, and
// QueryResultFormatsByOID may be used as the first args to control exactly how the query is executed. This is rarely
// needed. See the documentation for those types for details.
func (p *Pool) Query(ctx context.Context, sql string, args ...any) (pgx.Rows, error) {
c, err := p.Acquire(ctx)
if err != nil {
return errRows{err: err}, err
}
rows, err := c.Query(ctx, sql, args...)
if err != nil {
c.Release()
return errRows{err: err}, err
}
return c.getPoolRows(rows), nil
}
// QueryRow acquires a connection and executes a query that is expected
// to return at most one row (pgx.Row). Errors are deferred until pgx.Row's
// Scan method is called. If the query selects no rows, pgx.Row's Scan will
// return ErrNoRows. Otherwise, pgx.Row's Scan scans the first selected row
// and discards the rest. The acquired connection is returned to the Pool when
// pgx.Row's Scan method is called.
//
// Arguments should be referenced positionally from the SQL string as $1, $2, etc.
//
// For extra control over how the query is executed, the types QuerySimpleProtocol, QueryResultFormats, and
// QueryResultFormatsByOID may be used as the first args to control exactly how the query is executed. This is rarely
// needed. See the documentation for those types for details.
func (p *Pool) QueryRow(ctx context.Context, sql string, args ...any) pgx.Row {
c, err := p.Acquire(ctx)
if err != nil {
return errRow{err: err}
}
row := c.QueryRow(ctx, sql, args...)
return c.getPoolRow(row)
}
func (p *Pool) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResults {
c, err := p.Acquire(ctx)
if err != nil {
return errBatchResults{err: err}
}
br := c.SendBatch(ctx, b)
return &poolBatchResults{br: br, c: c}
}
// Begin acquires a connection from the Pool and starts a transaction. Unlike database/sql, the context only affects the begin command. i.e. there is no
// auto-rollback on context cancellation. Begin initiates a transaction block without explicitly setting a transaction mode for the block (see BeginTx with TxOptions if transaction mode is required).
// *pgxpool.Tx is returned, which implements the pgx.Tx interface.
// Commit or Rollback must be called on the returned transaction to finalize the transaction block.
func (p *Pool) Begin(ctx context.Context) (pgx.Tx, error) {
return p.BeginTx(ctx, pgx.TxOptions{})
}
// BeginTx acquires a connection from the Pool and starts a transaction with pgx.TxOptions determining the transaction mode.
// Unlike database/sql, the context only affects the begin command. i.e. there is no auto-rollback on context cancellation.
// *pgxpool.Tx is returned, which implements the pgx.Tx interface.
// Commit or Rollback must be called on the returned transaction to finalize the transaction block.
func (p *Pool) BeginTx(ctx context.Context, txOptions pgx.TxOptions) (pgx.Tx, error) {
c, err := p.Acquire(ctx)
if err != nil {
return nil, err
}
t, err := c.BeginTx(ctx, txOptions)
if err != nil {
c.Release()
return nil, err
}
return &Tx{t: t, c: c}, nil
}
func (p *Pool) CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) {
c, err := p.Acquire(ctx)
if err != nil {
return 0, err
}
defer c.Release()
return c.Conn().CopyFrom(ctx, tableName, columnNames, rowSrc)
}
// Ping acquires a connection from the Pool and executes an empty sql statement against it.
// If the sql returns without error, the database Ping is considered successful, otherwise, the error is returned.
func (p *Pool) Ping(ctx context.Context) error {
c, err := p.Acquire(ctx)
if err != nil {
return err
}
defer c.Release()
return c.Ping(ctx)
}
Fix atomic alignment on 32-bit platforms
refs #1288
package pgxpool
import (
"context"
"fmt"
"math/rand"
"runtime"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/puddle/v2"
)
var defaultMaxConns = int32(4)
var defaultMinConns = int32(0)
var defaultMaxConnLifetime = time.Hour
var defaultMaxConnIdleTime = time.Minute * 30
var defaultHealthCheckPeriod = time.Minute
type connResource struct {
conn *pgx.Conn
conns []Conn
poolRows []poolRow
poolRowss []poolRows
}
func (cr *connResource) getConn(p *Pool, res *puddle.Resource[*connResource]) *Conn {
if len(cr.conns) == 0 {
cr.conns = make([]Conn, 128)
}
c := &cr.conns[len(cr.conns)-1]
cr.conns = cr.conns[0 : len(cr.conns)-1]
c.res = res
c.p = p
return c
}
func (cr *connResource) getPoolRow(c *Conn, r pgx.Row) *poolRow {
if len(cr.poolRows) == 0 {
cr.poolRows = make([]poolRow, 128)
}
pr := &cr.poolRows[len(cr.poolRows)-1]
cr.poolRows = cr.poolRows[0 : len(cr.poolRows)-1]
pr.c = c
pr.r = r
return pr
}
func (cr *connResource) getPoolRows(c *Conn, r pgx.Rows) *poolRows {
if len(cr.poolRowss) == 0 {
cr.poolRowss = make([]poolRows, 128)
}
pr := &cr.poolRowss[len(cr.poolRowss)-1]
cr.poolRowss = cr.poolRowss[0 : len(cr.poolRowss)-1]
pr.c = c
pr.r = r
return pr
}
// Pool allows for connection reuse.
type Pool struct {
// 64 bit fields accessed with atomics must be at beginning of struct to guarantee alignment for certain 32-bit
// architectures. See BUGS section of https://pkg.go.dev/sync/atomic and https://github.com/jackc/pgx/issues/1288.
newConnsCount int64
lifetimeDestroyCount int64
idleDestroyCount int64
p *puddle.Pool[*connResource]
config *Config
beforeConnect func(context.Context, *pgx.ConnConfig) error
afterConnect func(context.Context, *pgx.Conn) error
beforeAcquire func(context.Context, *pgx.Conn) bool
afterRelease func(*pgx.Conn) bool
minConns int32
maxConns int32
maxConnLifetime time.Duration
maxConnLifetimeJitter time.Duration
maxConnIdleTime time.Duration
healthCheckPeriod time.Duration
healthCheckChan chan struct{}
closeOnce sync.Once
closeChan chan struct{}
}
// Config is the configuration struct for creating a pool. It must be created by ParseConfig and then it can be
// modified. A manually initialized ConnConfig will cause ConnectConfig to panic.
type Config struct {
ConnConfig *pgx.ConnConfig
// BeforeConnect is called before a new connection is made. It is passed a copy of the underlying pgx.ConnConfig and
// will not impact any existing open connections.
BeforeConnect func(context.Context, *pgx.ConnConfig) error
// AfterConnect is called after a connection is established, but before it is added to the pool.
AfterConnect func(context.Context, *pgx.Conn) error
// BeforeAcquire is called before a connection is acquired from the pool. It must return true to allow the
// acquision or false to indicate that the connection should be destroyed and a different connection should be
// acquired.
BeforeAcquire func(context.Context, *pgx.Conn) bool
// AfterRelease is called after a connection is released, but before it is returned to the pool. It must return true to
// return the connection to the pool or false to destroy the connection.
AfterRelease func(*pgx.Conn) bool
// MaxConnLifetime is the duration since creation after which a connection will be automatically closed.
MaxConnLifetime time.Duration
// MaxConnLifetimeJitter is the duration after MaxConnLifetime to randomly decide to close a connection.
// This helps prevent all connections from being closed at the exact same time, starving the pool.
MaxConnLifetimeJitter time.Duration
// MaxConnIdleTime is the duration after which an idle connection will be automatically closed by the health check.
MaxConnIdleTime time.Duration
// MaxConns is the maximum size of the pool. The default is the greater of 4 or runtime.NumCPU().
MaxConns int32
// MinConns is the minimum size of the pool. After connection closes, the pool might dip below MinConns. A low
// number of MinConns might mean the pool is empty after MaxConnLifetime until the health check has a chance
// to create new connections.
MinConns int32
// HealthCheckPeriod is the duration between checks of the health of idle connections.
HealthCheckPeriod time.Duration
createdByParseConfig bool // Used to enforce created by ParseConfig rule.
}
// Copy returns a deep copy of the config that is safe to use and modify.
// The only exception is the tls.Config:
// according to the tls.Config docs it must not be modified after creation.
func (c *Config) Copy() *Config {
newConfig := new(Config)
*newConfig = *c
newConfig.ConnConfig = c.ConnConfig.Copy()
return newConfig
}
// ConnString returns the connection string as parsed by pgxpool.ParseConfig into pgxpool.Config.
func (c *Config) ConnString() string { return c.ConnConfig.ConnString() }
// New creates a new Pool. See ParseConfig for information on connString format.
func New(ctx context.Context, connString string) (*Pool, error) {
config, err := ParseConfig(connString)
if err != nil {
return nil, err
}
return NewWithConfig(ctx, config)
}
// NewWithConfig creates a new Pool. config must have been created by ParseConfig.
func NewWithConfig(ctx context.Context, config *Config) (*Pool, error) {
// Default values are set in ParseConfig. Enforce initial creation by ParseConfig rather than setting defaults from
// zero values.
if !config.createdByParseConfig {
panic("config must be created by ParseConfig")
}
p := &Pool{
config: config,
beforeConnect: config.BeforeConnect,
afterConnect: config.AfterConnect,
beforeAcquire: config.BeforeAcquire,
afterRelease: config.AfterRelease,
minConns: config.MinConns,
maxConns: config.MaxConns,
maxConnLifetime: config.MaxConnLifetime,
maxConnLifetimeJitter: config.MaxConnLifetimeJitter,
maxConnIdleTime: config.MaxConnIdleTime,
healthCheckPeriod: config.HealthCheckPeriod,
healthCheckChan: make(chan struct{}, 1),
closeChan: make(chan struct{}),
}
var err error
p.p, err = puddle.NewPool(
&puddle.Config[*connResource]{
Constructor: func(ctx context.Context) (*connResource, error) {
connConfig := p.config.ConnConfig.Copy()
// Connection will continue in background even if Acquire is canceled. Ensure that a connect won't hang forever.
if connConfig.ConnectTimeout <= 0 {
connConfig.ConnectTimeout = 2 * time.Minute
}
if p.beforeConnect != nil {
if err := p.beforeConnect(ctx, connConfig); err != nil {
return nil, err
}
}
conn, err := pgx.ConnectConfig(ctx, connConfig)
if err != nil {
return nil, err
}
if p.afterConnect != nil {
err = p.afterConnect(ctx, conn)
if err != nil {
conn.Close(ctx)
return nil, err
}
}
cr := &connResource{
conn: conn,
conns: make([]Conn, 64),
poolRows: make([]poolRow, 64),
poolRowss: make([]poolRows, 64),
}
return cr, nil
},
Destructor: func(value *connResource) {
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
conn := value.conn
conn.Close(ctx)
select {
case <-conn.PgConn().CleanupDone():
case <-ctx.Done():
}
cancel()
},
MaxSize: config.MaxConns,
},
)
if err != nil {
return nil, err
}
go func() {
p.createIdleResources(ctx, int(p.minConns))
p.backgroundHealthCheck()
}()
return p, nil
}
// ParseConfig builds a Config from connString. It parses connString with the same behavior as pgx.ParseConfig with the
// addition of the following variables:
//
// pool_max_conns: integer greater than 0
// pool_min_conns: integer 0 or greater
// pool_max_conn_lifetime: duration string
// pool_max_conn_idle_time: duration string
// pool_health_check_period: duration string
// pool_max_conn_lifetime_jitter: duration string
//
// See Config for definitions of these arguments.
//
// # Example DSN
// user=jack password=secret host=pg.example.com port=5432 dbname=mydb sslmode=verify-ca pool_max_conns=10
//
// # Example URL
// postgres://jack:secret@pg.example.com:5432/mydb?sslmode=verify-ca&pool_max_conns=10
func ParseConfig(connString string) (*Config, error) {
connConfig, err := pgx.ParseConfig(connString)
if err != nil {
return nil, err
}
config := &Config{
ConnConfig: connConfig,
createdByParseConfig: true,
}
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conns"]; ok {
delete(connConfig.Config.RuntimeParams, "pool_max_conns")
n, err := strconv.ParseInt(s, 10, 32)
if err != nil {
return nil, fmt.Errorf("cannot parse pool_max_conns: %w", err)
}
if n < 1 {
return nil, fmt.Errorf("pool_max_conns too small: %d", n)
}
config.MaxConns = int32(n)
} else {
config.MaxConns = defaultMaxConns
if numCPU := int32(runtime.NumCPU()); numCPU > config.MaxConns {
config.MaxConns = numCPU
}
}
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_min_conns"]; ok {
delete(connConfig.Config.RuntimeParams, "pool_min_conns")
n, err := strconv.ParseInt(s, 10, 32)
if err != nil {
return nil, fmt.Errorf("cannot parse pool_min_conns: %w", err)
}
config.MinConns = int32(n)
} else {
config.MinConns = defaultMinConns
}
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conn_lifetime"]; ok {
delete(connConfig.Config.RuntimeParams, "pool_max_conn_lifetime")
d, err := time.ParseDuration(s)
if err != nil {
return nil, fmt.Errorf("invalid pool_max_conn_lifetime: %w", err)
}
config.MaxConnLifetime = d
} else {
config.MaxConnLifetime = defaultMaxConnLifetime
}
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conn_idle_time"]; ok {
delete(connConfig.Config.RuntimeParams, "pool_max_conn_idle_time")
d, err := time.ParseDuration(s)
if err != nil {
return nil, fmt.Errorf("invalid pool_max_conn_idle_time: %w", err)
}
config.MaxConnIdleTime = d
} else {
config.MaxConnIdleTime = defaultMaxConnIdleTime
}
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_health_check_period"]; ok {
delete(connConfig.Config.RuntimeParams, "pool_health_check_period")
d, err := time.ParseDuration(s)
if err != nil {
return nil, fmt.Errorf("invalid pool_health_check_period: %w", err)
}
config.HealthCheckPeriod = d
} else {
config.HealthCheckPeriod = defaultHealthCheckPeriod
}
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conn_lifetime_jitter"]; ok {
delete(connConfig.Config.RuntimeParams, "pool_max_conn_lifetime_jitter")
d, err := time.ParseDuration(s)
if err != nil {
return nil, fmt.Errorf("invalid pool_max_conn_lifetime_jitter: %w", err)
}
config.MaxConnLifetimeJitter = d
}
return config, nil
}
// Close closes all connections in the pool and rejects future Acquire calls. Blocks until all connections are returned
// to pool and closed.
func (p *Pool) Close() {
p.closeOnce.Do(func() {
close(p.closeChan)
p.p.Close()
})
}
func (p *Pool) isExpired(res *puddle.Resource[*connResource]) bool {
now := time.Now()
// Small optimization to avoid rand. If it's over lifetime AND jitter, immediately
// return true.
if now.Sub(res.CreationTime()) > p.maxConnLifetime+p.maxConnLifetimeJitter {
return true
}
if p.maxConnLifetimeJitter == 0 {
return false
}
jitterSecs := rand.Float64() * p.maxConnLifetimeJitter.Seconds()
return now.Sub(res.CreationTime()) > p.maxConnLifetime+(time.Duration(jitterSecs)*time.Second)
}
func (p *Pool) triggerHealthCheck() {
go func() {
// Destroy is asynchronous so we give it time to actually remove itself from
// the pool otherwise we might try to check the pool size too soon
time.Sleep(500 * time.Millisecond)
select {
case p.healthCheckChan <- struct{}{}:
default:
}
}()
}
func (p *Pool) backgroundHealthCheck() {
ticker := time.NewTicker(p.healthCheckPeriod)
defer ticker.Stop()
for {
select {
case <-p.closeChan:
return
case <-p.healthCheckChan:
p.checkHealth()
case <-ticker.C:
p.checkHealth()
}
}
}
func (p *Pool) checkHealth() {
for {
// If checkMinConns failed we don't destroy any connections since we couldn't
// even get to minConns
if err := p.checkMinConns(); err != nil {
// Should we log this error somewhere?
break
}
if !p.checkConnsHealth() {
// Since we didn't destroy any connections we can stop looping
break
}
// Technically Destroy is asynchronous but 500ms should be enough for it to
// remove it from the underlying pool
select {
case <-p.closeChan:
return
case <-time.After(500 * time.Millisecond):
}
}
}
// checkConnsHealth will check all idle connections, destroy a connection if
// it's idle or too old, and returns true if any were destroyed
func (p *Pool) checkConnsHealth() bool {
var destroyed bool
totalConns := p.Stat().TotalConns()
resources := p.p.AcquireAllIdle()
for _, res := range resources {
// We're okay going under minConns if the lifetime is up
if p.isExpired(res) && totalConns >= p.minConns {
atomic.AddInt64(&p.lifetimeDestroyCount, 1)
res.Destroy()
destroyed = true
// Since Destroy is async we manually decrement totalConns.
totalConns--
} else if res.IdleDuration() > p.maxConnIdleTime && totalConns > p.minConns {
atomic.AddInt64(&p.idleDestroyCount, 1)
res.Destroy()
destroyed = true
// Since Destroy is async we manually decrement totalConns.
totalConns--
} else {
res.ReleaseUnused()
}
}
return destroyed
}
func (p *Pool) checkMinConns() error {
// TotalConns can include ones that are being destroyed but we should have
// sleep(500ms) around all of the destroys to help prevent that from throwing
// off this check
toCreate := p.minConns - p.Stat().TotalConns()
if toCreate > 0 {
return p.createIdleResources(context.Background(), int(toCreate))
}
return nil
}
func (p *Pool) createIdleResources(parentCtx context.Context, targetResources int) error {
ctx, cancel := context.WithCancel(parentCtx)
defer cancel()
errs := make(chan error, targetResources)
for i := 0; i < targetResources; i++ {
go func() {
atomic.AddInt64(&p.newConnsCount, 1)
err := p.p.CreateResource(ctx)
errs <- err
}()
}
var firstError error
for i := 0; i < targetResources; i++ {
err := <-errs
if err != nil && firstError == nil {
cancel()
firstError = err
}
}
return firstError
}
// Acquire returns a connection (*Conn) from the Pool
func (p *Pool) Acquire(ctx context.Context) (*Conn, error) {
for {
res, err := p.p.Acquire(ctx)
if err != nil {
return nil, err
}
cr := res.Value()
if res.IdleDuration() > time.Second {
err := cr.conn.PgConn().CheckConn()
if err != nil {
res.Destroy()
continue
}
}
if p.beforeAcquire == nil || p.beforeAcquire(ctx, cr.conn) {
return cr.getConn(p, res), nil
}
res.Destroy()
}
}
// AcquireFunc acquires a *Conn and calls f with that *Conn. ctx will only affect the Acquire. It has no effect on the
// call of f. The return value is either an error acquiring the *Conn or the return value of f. The *Conn is
// automatically released after the call of f.
func (p *Pool) AcquireFunc(ctx context.Context, f func(*Conn) error) error {
conn, err := p.Acquire(ctx)
if err != nil {
return err
}
defer conn.Release()
return f(conn)
}
// AcquireAllIdle atomically acquires all currently idle connections. Its intended use is for health check and
// keep-alive functionality. It does not update pool statistics.
func (p *Pool) AcquireAllIdle(ctx context.Context) []*Conn {
resources := p.p.AcquireAllIdle()
conns := make([]*Conn, 0, len(resources))
for _, res := range resources {
cr := res.Value()
if p.beforeAcquire == nil || p.beforeAcquire(ctx, cr.conn) {
conns = append(conns, cr.getConn(p, res))
} else {
res.Destroy()
}
}
return conns
}
// Reset closes all connections, but leaves the pool open. It is intended for use when an error is detected that would
// disrupt all connections (such as a network interruption or a server state change).
//
// It is safe to reset a pool while connections are checked out. Those connections will be closed when they are returned
// to the pool.
func (p *Pool) Reset() {
p.p.Reset()
}
// Config returns a copy of config that was used to initialize this pool.
func (p *Pool) Config() *Config { return p.config.Copy() }
// Stat returns a pgxpool.Stat struct with a snapshot of Pool statistics.
func (p *Pool) Stat() *Stat {
return &Stat{
s: p.p.Stat(),
newConnsCount: atomic.LoadInt64(&p.newConnsCount),
lifetimeDestroyCount: atomic.LoadInt64(&p.lifetimeDestroyCount),
idleDestroyCount: atomic.LoadInt64(&p.idleDestroyCount),
}
}
// Exec acquires a connection from the Pool and executes the given SQL.
// SQL can be either a prepared statement name or an SQL string.
// Arguments should be referenced positionally from the SQL string as $1, $2, etc.
// The acquired connection is returned to the pool when the Exec function returns.
func (p *Pool) Exec(ctx context.Context, sql string, arguments ...any) (pgconn.CommandTag, error) {
c, err := p.Acquire(ctx)
if err != nil {
return pgconn.CommandTag{}, err
}
defer c.Release()
return c.Exec(ctx, sql, arguments...)
}
// Query acquires a connection and executes a query that returns pgx.Rows.
// Arguments should be referenced positionally from the SQL string as $1, $2, etc.
// See pgx.Rows documentation to close the returned Rows and return the acquired connection to the Pool.
//
// If there is an error, the returned pgx.Rows will be returned in an error state.
// If preferred, ignore the error returned from Query and handle errors using the returned pgx.Rows.
//
// For extra control over how the query is executed, the types QuerySimpleProtocol, QueryResultFormats, and
// QueryResultFormatsByOID may be used as the first args to control exactly how the query is executed. This is rarely
// needed. See the documentation for those types for details.
func (p *Pool) Query(ctx context.Context, sql string, args ...any) (pgx.Rows, error) {
c, err := p.Acquire(ctx)
if err != nil {
return errRows{err: err}, err
}
rows, err := c.Query(ctx, sql, args...)
if err != nil {
c.Release()
return errRows{err: err}, err
}
return c.getPoolRows(rows), nil
}
// QueryRow acquires a connection and executes a query that is expected
// to return at most one row (pgx.Row). Errors are deferred until pgx.Row's
// Scan method is called. If the query selects no rows, pgx.Row's Scan will
// return ErrNoRows. Otherwise, pgx.Row's Scan scans the first selected row
// and discards the rest. The acquired connection is returned to the Pool when
// pgx.Row's Scan method is called.
//
// Arguments should be referenced positionally from the SQL string as $1, $2, etc.
//
// For extra control over how the query is executed, the types QuerySimpleProtocol, QueryResultFormats, and
// QueryResultFormatsByOID may be used as the first args to control exactly how the query is executed. This is rarely
// needed. See the documentation for those types for details.
func (p *Pool) QueryRow(ctx context.Context, sql string, args ...any) pgx.Row {
c, err := p.Acquire(ctx)
if err != nil {
return errRow{err: err}
}
row := c.QueryRow(ctx, sql, args...)
return c.getPoolRow(row)
}
func (p *Pool) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResults {
c, err := p.Acquire(ctx)
if err != nil {
return errBatchResults{err: err}
}
br := c.SendBatch(ctx, b)
return &poolBatchResults{br: br, c: c}
}
// Begin acquires a connection from the Pool and starts a transaction. Unlike database/sql, the context only affects the begin command. i.e. there is no
// auto-rollback on context cancellation. Begin initiates a transaction block without explicitly setting a transaction mode for the block (see BeginTx with TxOptions if transaction mode is required).
// *pgxpool.Tx is returned, which implements the pgx.Tx interface.
// Commit or Rollback must be called on the returned transaction to finalize the transaction block.
func (p *Pool) Begin(ctx context.Context) (pgx.Tx, error) {
return p.BeginTx(ctx, pgx.TxOptions{})
}
// BeginTx acquires a connection from the Pool and starts a transaction with pgx.TxOptions determining the transaction mode.
// Unlike database/sql, the context only affects the begin command. i.e. there is no auto-rollback on context cancellation.
// *pgxpool.Tx is returned, which implements the pgx.Tx interface.
// Commit or Rollback must be called on the returned transaction to finalize the transaction block.
func (p *Pool) BeginTx(ctx context.Context, txOptions pgx.TxOptions) (pgx.Tx, error) {
c, err := p.Acquire(ctx)
if err != nil {
return nil, err
}
t, err := c.BeginTx(ctx, txOptions)
if err != nil {
c.Release()
return nil, err
}
return &Tx{t: t, c: c}, nil
}
func (p *Pool) CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) {
c, err := p.Acquire(ctx)
if err != nil {
return 0, err
}
defer c.Release()
return c.Conn().CopyFrom(ctx, tableName, columnNames, rowSrc)
}
// Ping acquires a connection from the Pool and executes an empty sql statement against it.
// If the sql returns without error, the database Ping is considered successful, otherwise, the error is returned.
func (p *Pool) Ping(ctx context.Context) error {
c, err := p.Acquire(ctx)
if err != nil {
return err
}
defer c.Release()
return c.Ping(ctx)
}
|
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
This stand-alone package is utilized for dynamically generating/maintaining a list of
metrics; this list is determined by the stability class of the metric. We statically analyze
all files in the Kubernetes code base to:
- Determine whether the metric falls into a stability class with stability guarantees.
- Determine the metric's metadata, i.e. the name, labels, type of metric.
- Output (based on the above) the metrics which meet our criteria into a yaml file.
Due to the dynamic nature of how metrics can be written, we only support the subset of metrics
which can actually be parsed. If a metric cannot be parsed, it must be delegated to the stability
class `Internal`, which will exempt the metric from static analysis.
The entrypoint to this package is defined in a shell script (i.e. stability-utils.sh) which has
the logic for feeding file names as arguments into the program. The logic of this program is as
follows:
- parse all files fed in, keeping track of:
- the function and struct pointers which correspond to prometheus metric definitions.
- consts/variable we encounter, so that we can use these to resolve values in metric definitions
- then, iterate over the function and struct pointers, resolving attributes to concrete metric values
- then, using our collected and resolved metric definitions, output (depending on the mode):
- a yaml file corresponding to all stable metrics
- a documentation file corresponding to all parseable metrics in the Kubernetes codebase
*/
package main
go fmt
Change-Id: I631ef35f10e8434b5a6deccedab6158f7199faa3
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
This stand-alone package is utilized for dynamically generating/maintaining a list of
metrics; this list is determined by the stability class of the metric. We statically analyze
all files in the Kubernetes code base to:
- Determine whether the metric falls into a stability class with stability guarantees.
- Determine the metric's metadata, i.e. the name, labels, type of metric.
- Output (based on the above) the metrics which meet our criteria into a yaml file.
Due to the dynamic nature of how metrics can be written, we only support the subset of metrics
which can actually be parsed. If a metric cannot be parsed, it must be delegated to the stability
class `Internal`, which will exempt the metric from static analysis.
The entrypoint to this package is defined in a shell script (i.e. stability-utils.sh) which has
the logic for feeding file names as arguments into the program. The logic of this program is as
follows:
- parse all files fed in, keeping track of:
- the function and struct pointers which correspond to prometheus metric definitions.
- consts/variable we encounter, so that we can use these to resolve values in metric definitions
- then, iterate over the function and struct pointers, resolving attributes to concrete metric values
- then, using our collected and resolved metric definitions, output (depending on the mode):
- a yaml file corresponding to all stable metrics
- a documentation file corresponding to all parseable metrics in the Kubernetes codebase
*/
package main
|
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package istiocontrolplane
import (
"context"
"fmt"
"os"
"reflect"
"strings"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
"sigs.k8s.io/yaml"
"istio.io/api/operator/v1alpha1"
"istio.io/istio/operator/pkg/apis/istio"
iopv1alpha1 "istio.io/istio/operator/pkg/apis/istio/v1alpha1"
"istio.io/istio/operator/pkg/cache"
"istio.io/istio/operator/pkg/helm"
"istio.io/istio/operator/pkg/helmreconciler"
"istio.io/istio/operator/pkg/metrics"
"istio.io/istio/operator/pkg/name"
"istio.io/istio/operator/pkg/object"
"istio.io/istio/operator/pkg/tpath"
"istio.io/istio/operator/pkg/translate"
"istio.io/istio/operator/pkg/util"
"istio.io/istio/operator/pkg/util/clog"
"istio.io/istio/operator/pkg/util/progress"
"istio.io/istio/pkg/errdict"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/url"
"istio.io/pkg/log"
"istio.io/pkg/version"
)
const (
finalizer = "istio-finalizer.install.istio.io"
// finalizerMaxRetries defines the maximum number of attempts to remove the finalizer.
finalizerMaxRetries = 1
// IgnoreReconcileAnnotation is annotation of IstioOperator CR so it would be ignored during Reconcile loop.
IgnoreReconcileAnnotation = "install.istio.io/ignoreReconcile"
)
var (
scope = log.RegisterScope("installer", "installer", 0)
restConfig *rest.Config
)
type Options struct {
Force bool
}
var (
// watchedResources contains all resources we will watch and reconcile when changed
// Ideally this would also contain Istio CRDs, but there is a race condition here - we cannot watch
// a type that does not yet exist.
watchedResources = []schema.GroupVersionKind{
{Group: "autoscaling", Version: "v2beta2", Kind: name.HPAStr},
{Group: "policy", Version: "v1beta1", Kind: name.PDBStr},
{Group: "apps", Version: "v1", Kind: name.DeploymentStr},
{Group: "apps", Version: "v1", Kind: name.DaemonSetStr},
{Group: "", Version: "v1", Kind: name.ServiceStr},
// Endpoints should not be pruned because these are generated and not in the manifest.
// {Group: "", Version: "v1", Kind: name.EndpointStr},
{Group: "", Version: "v1", Kind: name.CMStr},
{Group: "", Version: "v1", Kind: name.PVCStr},
{Group: "", Version: "v1", Kind: name.PodStr},
{Group: "", Version: "v1", Kind: name.SecretStr},
{Group: "", Version: "v1", Kind: name.SAStr},
{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: name.RoleBindingStr},
{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: name.RoleStr},
{Group: "admissionregistration.k8s.io", Version: "v1", Kind: name.MutatingWebhookConfigurationStr},
{Group: "admissionregistration.k8s.io", Version: "v1", Kind: name.ValidatingWebhookConfigurationStr},
{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: name.ClusterRoleStr},
{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: name.ClusterRoleBindingStr},
{Group: "apiextensions.k8s.io", Version: "v1", Kind: name.CRDStr},
}
ownedResourcePredicates = predicate.Funcs{
CreateFunc: func(_ event.CreateEvent) bool {
// no action
return false
},
GenericFunc: func(_ event.GenericEvent) bool {
// no action
return false
},
DeleteFunc: func(e event.DeleteEvent) bool {
obj, err := meta.Accessor(e.Object)
scope.Debugf("got delete event for %s.%s", obj.GetName(), obj.GetNamespace())
if err != nil {
return false
}
unsObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(e.Object)
if err != nil {
return false
}
if isOperatorCreatedResource(obj) {
crName := obj.GetLabels()[helmreconciler.OwningResourceName]
crNamespace := obj.GetLabels()[helmreconciler.OwningResourceNamespace]
componentName := obj.GetLabels()[helmreconciler.IstioComponentLabelStr]
var host string
if restConfig != nil {
host = restConfig.Host
}
crHash := strings.Join([]string{crName, crNamespace, componentName, host}, "-")
oh := object.NewK8sObject(&unstructured.Unstructured{Object: unsObj}, nil, nil).Hash()
cache.RemoveObject(crHash, oh)
return true
}
return false
},
UpdateFunc: func(e event.UpdateEvent) bool {
// no action
return false
},
}
operatorPredicates = predicate.Funcs{
CreateFunc: func(e event.CreateEvent) bool {
return true
},
DeleteFunc: func(e event.DeleteEvent) bool {
return true
},
UpdateFunc: func(e event.UpdateEvent) bool {
oldIOP, ok := e.ObjectOld.(*iopv1alpha1.IstioOperator)
if !ok {
scope.Error(errdict.OperatorFailedToGetObjectInCallback, "failed to get old IstioOperator")
return false
}
newIOP := e.ObjectNew.(*iopv1alpha1.IstioOperator)
if !ok {
scope.Error(errdict.OperatorFailedToGetObjectInCallback, "failed to get new IstioOperator")
return false
}
if !reflect.DeepEqual(oldIOP.Spec, newIOP.Spec) ||
oldIOP.GetDeletionTimestamp() != newIOP.GetDeletionTimestamp() ||
oldIOP.GetGeneration() != newIOP.GetGeneration() {
return true
}
return false
},
}
)
// NewReconcileIstioOperator creates a new ReconcileIstioOperator and returns a ptr to it.
func NewReconcileIstioOperator(client client.Client, kubeClient kube.Client, scheme *runtime.Scheme) *ReconcileIstioOperator {
return &ReconcileIstioOperator{
client: client,
kubeClient: kubeClient,
scheme: scheme,
}
}
// ReconcileIstioOperator reconciles a IstioOperator object
type ReconcileIstioOperator struct {
// This client, initialized using mgr.Client() above, is a split client
// that reads objects from the cache and writes to the apiserver
client client.Client
kubeClient kube.Client
scheme *runtime.Scheme
options *Options
}
// Reconcile reads that state of the cluster for a IstioOperator object and makes changes based on the state read
// and what is in the IstioOperator.Spec
// Note:
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (r *ReconcileIstioOperator) Reconcile(_ context.Context, request reconcile.Request) (reconcile.Result, error) {
scope.Info("Reconciling IstioOperator")
ns, iopName := request.Namespace, request.Name
reqNamespacedName := types.NamespacedName{
Name: request.Name,
Namespace: ns,
}
// declare read-only iop instance to create the reconciler
iop := &iopv1alpha1.IstioOperator{}
if err := r.client.Get(context.TODO(), reqNamespacedName, iop); err != nil {
if errors.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
metrics.CRDeletionTotal.Increment()
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
scope.Warnf(errdict.OperatorFailedToGetObjectFromAPIServer, "error getting IstioOperator %s: %s", iopName, err)
metrics.CountCRFetchFail(errors.ReasonForError(err))
return reconcile.Result{}, err
}
if iop.Spec == nil {
iop.Spec = &v1alpha1.IstioOperatorSpec{Profile: name.DefaultProfileName}
}
operatorRevision, _ := os.LookupEnv("REVISION")
if operatorRevision != "" && operatorRevision != iop.Spec.Revision {
scope.Infof("Ignoring IstioOperator CR %s with revision %s, since operator revision is %s.", iopName, iop.Spec.Revision, operatorRevision)
return reconcile.Result{}, nil
}
if iop.Annotations != nil {
if ir := iop.Annotations[IgnoreReconcileAnnotation]; ir == "true" {
scope.Infof("Ignoring the IstioOperator CR %s because it is annotated to be ignored for reconcile ", iopName)
return reconcile.Result{}, nil
}
}
// for backward compatibility, the previous applied installed-state CR does not have the ignore reconcile annotation
// TODO(richardwxn): remove this check and rely on annotation check only
if strings.HasPrefix(iop.Name, name.InstalledSpecCRPrefix) {
scope.Infof("Ignoring the installed-state IstioOperator CR %s ", iopName)
return reconcile.Result{}, nil
}
var err error
iopMerged := &iopv1alpha1.IstioOperator{}
*iopMerged = *iop
// get the merged values in iop on top of the defaults for the profile given by iop.profile
iopMerged.Spec, err = mergeIOPSWithProfile(iopMerged)
if err != nil {
scope.Errorf(errdict.OperatorFailedToMergeUserIOP, "failed to merge base profile with user IstioOperator CR %s, %s", iopName, err)
return reconcile.Result{}, err
}
deleted := iop.GetDeletionTimestamp() != nil
finalizers := sets.NewString(iop.GetFinalizers()...)
if deleted {
if !finalizers.Has(finalizer) {
scope.Infof("IstioOperator %s deleted", iopName)
metrics.CRDeletionTotal.Increment()
return reconcile.Result{}, nil
}
scope.Infof("Deleting IstioOperator %s", iopName)
reconciler, err := helmreconciler.NewHelmReconciler(r.client, r.kubeClient, iopMerged, nil)
if err != nil {
return reconcile.Result{}, err
}
if err := reconciler.Delete(); err != nil {
scope.Errorf("Failed to delete resources with helm reconciler: %s.", err)
return reconcile.Result{}, err
}
finalizers.Delete(finalizer)
iop.SetFinalizers(finalizers.List())
finalizerError := r.client.Update(context.TODO(), iop)
for retryCount := 0; errors.IsConflict(finalizerError) && retryCount < finalizerMaxRetries; retryCount++ {
scope.Info("API server conflict during finalizer removal, retrying.")
_ = r.client.Get(context.TODO(), request.NamespacedName, iop)
finalizers = sets.NewString(iop.GetFinalizers()...)
finalizers.Delete(finalizer)
iop.SetFinalizers(finalizers.List())
finalizerError = r.client.Update(context.TODO(), iop)
}
if finalizerError != nil {
if errors.IsNotFound(finalizerError) {
scope.Infof("Did not remove finalizer from %s: the object was previously deleted.", iopName)
metrics.CRDeletionTotal.Increment()
return reconcile.Result{}, nil
} else if errors.IsConflict(finalizerError) {
scope.Infof("Could not remove finalizer from %s due to conflict. Operation will be retried in next reconcile attempt.", iopName)
return reconcile.Result{}, nil
}
scope.Errorf(errdict.OperatorFailedToRemoveFinalizer, "error removing finalizer: %s", finalizerError)
return reconcile.Result{}, finalizerError
}
return reconcile.Result{}, nil
} else if !finalizers.Has(finalizer) {
log.Infof("Adding finalizer %v to %v", finalizer, request)
finalizers.Insert(finalizer)
iop.SetFinalizers(finalizers.List())
err := r.client.Update(context.TODO(), iop)
if err != nil {
if errors.IsNotFound(err) {
scope.Infof("Could not add finalizer to %s: the object was deleted.", iopName)
metrics.CRDeletionTotal.Increment()
return reconcile.Result{}, nil
} else if errors.IsConflict(err) {
scope.Infof("Could not add finalizer to %s due to conflict. Operation will be retried in next reconcile attempt.", iopName)
}
scope.Errorf(errdict.OperatorFailedToAddFinalizer, "Failed to add finalizer to IstioOperator CR %s: %s", iopName, err)
return reconcile.Result{}, err
}
}
scope.Info("Updating IstioOperator")
val := iopMerged.Spec.Values.AsMap()
if _, ok := val["global"]; !ok {
val["global"] = make(map[string]interface{})
}
globalValues := val["global"].(map[string]interface{})
scope.Info("Detecting third-party JWT support")
var jwtPolicy util.JWTPolicy
if jwtPolicy, err = util.DetectSupportedJWTPolicy(r.kubeClient.Kube()); err != nil {
// TODO(howardjohn): add to dictionary. When resolved, replace this sentence with Done or WontFix - if WontFix, add reason.
scope.Warnf("Failed to detect third-party JWT support: %v", err)
} else {
if jwtPolicy == util.FirstPartyJWT {
scope.Info("Detected that your cluster does not support third party JWT authentication. " +
"Falling back to less secure first party JWT. " +
"See " + url.ConfigureSAToken + " for details.")
}
globalValues["jwtPolicy"] = string(jwtPolicy)
}
err = util.ValidateIOPCAConfig(r.kubeClient, iopMerged)
if err != nil {
scope.Errorf(errdict.OperatorFailedToConfigure, "failed to apply IstioOperator resources. Error %s", err)
return reconcile.Result{}, err
}
helmReconcilerOptions := &helmreconciler.Options{
Log: clog.NewDefaultLogger(),
ProgressLog: progress.NewLog(),
}
if r.options != nil {
helmReconcilerOptions.Force = r.options.Force
}
reconciler, err := helmreconciler.NewHelmReconciler(r.client, r.kubeClient, iopMerged, helmReconcilerOptions)
if err != nil {
return reconcile.Result{}, err
}
if err := reconciler.SetStatusBegin(); err != nil {
return reconcile.Result{}, err
}
status, err := reconciler.Reconcile()
if err != nil {
scope.Errorf("Error during reconcile: %s", err)
}
if err := reconciler.SetStatusComplete(status); err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, err
}
// mergeIOPSWithProfile overlays the values in iop on top of the defaults for the profile given by iop.profile and
// returns the merged result.
func mergeIOPSWithProfile(iop *iopv1alpha1.IstioOperator) (*v1alpha1.IstioOperatorSpec, error) {
profileYAML, err := helm.GetProfileYAML(iop.Spec.InstallPackagePath, iop.Spec.Profile)
if err != nil {
metrics.CountCRMergeFail(metrics.CannotFetchProfileError)
return nil, err
}
// Due to the fact that base profile is compiled in before a tag can be created, we must allow an additional
// override from variables that are set during release build time.
hub := version.DockerInfo.Hub
tag := version.DockerInfo.Tag
if hub != "" && hub != "unknown" && tag != "" && tag != "unknown" {
buildHubTagOverlayYAML, err := helm.GenerateHubTagOverlay(hub, tag)
if err != nil {
metrics.CountCRMergeFail(metrics.OverlayError)
return nil, err
}
profileYAML, err = util.OverlayYAML(profileYAML, buildHubTagOverlayYAML)
if err != nil {
metrics.CountCRMergeFail(metrics.OverlayError)
return nil, err
}
}
overlayYAMLB, err := yaml.Marshal(iop)
if err != nil {
metrics.CountCRMergeFail(metrics.IOPFormatError)
return nil, err
}
overlayYAML := string(overlayYAMLB)
t := translate.NewReverseTranslator()
overlayYAML, err = t.TranslateK8SfromValueToIOP(overlayYAML)
if err != nil {
metrics.CountCRMergeFail(metrics.TranslateValuesError)
return nil, fmt.Errorf("could not overlay k8s settings from values to IOP: %s", err)
}
mergedYAML, err := util.OverlayIOP(profileYAML, overlayYAML)
if err != nil {
metrics.CountCRMergeFail(metrics.OverlayError)
return nil, err
}
mergedYAML, err = translate.OverlayValuesEnablement(mergedYAML, overlayYAML, "")
if err != nil {
metrics.CountCRMergeFail(metrics.TranslateValuesError)
return nil, err
}
mergedYAMLSpec, err := tpath.GetSpecSubtree(mergedYAML)
if err != nil {
metrics.CountCRMergeFail(metrics.InternalYAMLParseError)
return nil, err
}
return istio.UnmarshalAndValidateIOPS(mergedYAMLSpec)
}
// Add creates a new IstioOperator Controller and adds it to the Manager. The Manager will set fields on the Controller
// and Start it when the Manager is Started. It also provides additional options to modify internal reconciler behavior.
func Add(mgr manager.Manager, options *Options) error {
restConfig = mgr.GetConfig()
kubeClient, err := kube.NewExtendedClient(kube.NewClientConfigForRestConfig(restConfig), "")
if err != nil {
return fmt.Errorf("create Kubernetes client: %v", err)
}
return add(mgr, &ReconcileIstioOperator{client: mgr.GetClient(), scheme: mgr.GetScheme(), kubeClient: kubeClient, options: options})
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r reconcile.Reconciler) error {
scope.Info("Adding controller for IstioOperator.")
// Create a new controller
c, err := controller.New("istiocontrolplane-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
// Watch for changes to primary resource IstioOperator
err = c.Watch(&source.Kind{Type: &iopv1alpha1.IstioOperator{}}, &handler.EnqueueRequestForObject{}, operatorPredicates)
if err != nil {
return err
}
// watch for changes to Istio resources
err = watchIstioResources(c)
if err != nil {
return err
}
scope.Info("Controller added")
return nil
}
// Watch changes for Istio resources managed by the operator
func watchIstioResources(c controller.Controller) error {
for _, t := range watchedResources {
u := &unstructured.Unstructured{}
u.SetGroupVersionKind(schema.GroupVersionKind{
Kind: t.Kind,
Group: t.Group,
Version: t.Version,
})
err := c.Watch(&source.Kind{Type: u}, handler.EnqueueRequestsFromMapFunc(func(a client.Object) []reconcile.Request {
scope.Infof("Watching a change for istio resource: %s/%s", a.GetNamespace(), a.GetName())
return []reconcile.Request{
{NamespacedName: types.NamespacedName{
Name: a.GetLabels()[helmreconciler.OwningResourceName],
Namespace: a.GetLabels()[helmreconciler.OwningResourceNamespace],
}},
}
}),
ownedResourcePredicates)
if err != nil {
scope.Errorf("Could not create watch for %s/%s/%s: %s.", t.Kind, t.Group, t.Version, err)
}
}
return nil
}
// Check if the specified object is created by operator
func isOperatorCreatedResource(obj metav1.Object) bool {
return obj.GetLabels()[helmreconciler.OwningResourceName] != "" &&
obj.GetLabels()[helmreconciler.OwningResourceNamespace] != "" &&
obj.GetLabels()[helmreconciler.IstioComponentLabelStr] != ""
}
operator: fix remaining issue for 1.25 in cluster controller (#39587)
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package istiocontrolplane
import (
"context"
"fmt"
"os"
"reflect"
"strings"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
kubeversion "k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
"sigs.k8s.io/yaml"
"istio.io/api/operator/v1alpha1"
"istio.io/istio/operator/pkg/apis/istio"
iopv1alpha1 "istio.io/istio/operator/pkg/apis/istio/v1alpha1"
"istio.io/istio/operator/pkg/cache"
"istio.io/istio/operator/pkg/helm"
"istio.io/istio/operator/pkg/helmreconciler"
"istio.io/istio/operator/pkg/metrics"
"istio.io/istio/operator/pkg/name"
"istio.io/istio/operator/pkg/object"
"istio.io/istio/operator/pkg/tpath"
"istio.io/istio/operator/pkg/translate"
"istio.io/istio/operator/pkg/util"
"istio.io/istio/operator/pkg/util/clog"
"istio.io/istio/operator/pkg/util/progress"
"istio.io/istio/pkg/errdict"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/url"
"istio.io/pkg/log"
"istio.io/pkg/version"
)
const (
finalizer = "istio-finalizer.install.istio.io"
// finalizerMaxRetries defines the maximum number of attempts to remove the finalizer.
finalizerMaxRetries = 1
// IgnoreReconcileAnnotation is annotation of IstioOperator CR so it would be ignored during Reconcile loop.
IgnoreReconcileAnnotation = "install.istio.io/ignoreReconcile"
)
var (
scope = log.RegisterScope("installer", "installer", 0)
restConfig *rest.Config
)
type Options struct {
Force bool
}
const (
autoscalingV2MinK8SVersion = 23
pdbV1MinK8SVersion = 21
)
// watchedResources contains all resources we will watch and reconcile when changed
// Ideally this would also contain Istio CRDs, but there is a race condition here - we cannot watch
// a type that does not yet exist.
func watchedResources(version *kubeversion.Info) []schema.GroupVersionKind {
res := []schema.GroupVersionKind{
{Group: "apps", Version: "v1", Kind: name.DeploymentStr},
{Group: "apps", Version: "v1", Kind: name.DaemonSetStr},
{Group: "", Version: "v1", Kind: name.ServiceStr},
// Endpoints should not be pruned because these are generated and not in the manifest.
// {Group: "", Version: "v1", Kind: name.EndpointStr},
{Group: "", Version: "v1", Kind: name.CMStr},
{Group: "", Version: "v1", Kind: name.PVCStr},
{Group: "", Version: "v1", Kind: name.PodStr},
{Group: "", Version: "v1", Kind: name.SecretStr},
{Group: "", Version: "v1", Kind: name.SAStr},
{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: name.RoleBindingStr},
{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: name.RoleStr},
{Group: "admissionregistration.k8s.io", Version: "v1", Kind: name.MutatingWebhookConfigurationStr},
{Group: "admissionregistration.k8s.io", Version: "v1", Kind: name.ValidatingWebhookConfigurationStr},
{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: name.ClusterRoleStr},
{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: name.ClusterRoleBindingStr},
{Group: "apiextensions.k8s.io", Version: "v1", Kind: name.CRDStr},
}
// autoscaling v2 API is available on >=1.23
if kube.IsKubeAtLeastOrLessThanVersion(version, autoscalingV2MinK8SVersion, true) {
res = append(res, schema.GroupVersionKind{Group: "autoscaling", Version: "v2", Kind: name.HPAStr})
} else {
res = append(res, schema.GroupVersionKind{Group: "autoscaling", Version: "v2beta2", Kind: name.HPAStr})
}
// policy/v1 is available on >=1.21
if kube.IsKubeAtLeastOrLessThanVersion(version, pdbV1MinK8SVersion, true) {
res = append(res, schema.GroupVersionKind{Group: "policy", Version: "v1", Kind: name.PDBStr})
} else {
res = append(res, schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: name.PDBStr})
}
return res
}
var (
ownedResourcePredicates = predicate.Funcs{
CreateFunc: func(_ event.CreateEvent) bool {
// no action
return false
},
GenericFunc: func(_ event.GenericEvent) bool {
// no action
return false
},
DeleteFunc: func(e event.DeleteEvent) bool {
obj, err := meta.Accessor(e.Object)
scope.Debugf("got delete event for %s.%s", obj.GetName(), obj.GetNamespace())
if err != nil {
return false
}
unsObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(e.Object)
if err != nil {
return false
}
if isOperatorCreatedResource(obj) {
crName := obj.GetLabels()[helmreconciler.OwningResourceName]
crNamespace := obj.GetLabels()[helmreconciler.OwningResourceNamespace]
componentName := obj.GetLabels()[helmreconciler.IstioComponentLabelStr]
var host string
if restConfig != nil {
host = restConfig.Host
}
crHash := strings.Join([]string{crName, crNamespace, componentName, host}, "-")
oh := object.NewK8sObject(&unstructured.Unstructured{Object: unsObj}, nil, nil).Hash()
cache.RemoveObject(crHash, oh)
return true
}
return false
},
UpdateFunc: func(e event.UpdateEvent) bool {
// no action
return false
},
}
operatorPredicates = predicate.Funcs{
CreateFunc: func(e event.CreateEvent) bool {
return true
},
DeleteFunc: func(e event.DeleteEvent) bool {
return true
},
UpdateFunc: func(e event.UpdateEvent) bool {
oldIOP, ok := e.ObjectOld.(*iopv1alpha1.IstioOperator)
if !ok {
scope.Error(errdict.OperatorFailedToGetObjectInCallback, "failed to get old IstioOperator")
return false
}
newIOP := e.ObjectNew.(*iopv1alpha1.IstioOperator)
if !ok {
scope.Error(errdict.OperatorFailedToGetObjectInCallback, "failed to get new IstioOperator")
return false
}
if !reflect.DeepEqual(oldIOP.Spec, newIOP.Spec) ||
oldIOP.GetDeletionTimestamp() != newIOP.GetDeletionTimestamp() ||
oldIOP.GetGeneration() != newIOP.GetGeneration() {
return true
}
return false
},
}
)
// NewReconcileIstioOperator creates a new ReconcileIstioOperator and returns a ptr to it.
func NewReconcileIstioOperator(client client.Client, kubeClient kube.Client, scheme *runtime.Scheme) *ReconcileIstioOperator {
return &ReconcileIstioOperator{
client: client,
kubeClient: kubeClient,
scheme: scheme,
}
}
// ReconcileIstioOperator reconciles a IstioOperator object
type ReconcileIstioOperator struct {
// This client, initialized using mgr.Client() above, is a split client
// that reads objects from the cache and writes to the apiserver
client client.Client
kubeClient kube.Client
scheme *runtime.Scheme
options *Options
}
// Reconcile reads that state of the cluster for a IstioOperator object and makes changes based on the state read
// and what is in the IstioOperator.Spec
// Note:
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (r *ReconcileIstioOperator) Reconcile(_ context.Context, request reconcile.Request) (reconcile.Result, error) {
scope.Info("Reconciling IstioOperator")
ns, iopName := request.Namespace, request.Name
reqNamespacedName := types.NamespacedName{
Name: request.Name,
Namespace: ns,
}
// declare read-only iop instance to create the reconciler
iop := &iopv1alpha1.IstioOperator{}
if err := r.client.Get(context.TODO(), reqNamespacedName, iop); err != nil {
if errors.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
metrics.CRDeletionTotal.Increment()
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
scope.Warnf(errdict.OperatorFailedToGetObjectFromAPIServer, "error getting IstioOperator %s: %s", iopName, err)
metrics.CountCRFetchFail(errors.ReasonForError(err))
return reconcile.Result{}, err
}
if iop.Spec == nil {
iop.Spec = &v1alpha1.IstioOperatorSpec{Profile: name.DefaultProfileName}
}
operatorRevision, _ := os.LookupEnv("REVISION")
if operatorRevision != "" && operatorRevision != iop.Spec.Revision {
scope.Infof("Ignoring IstioOperator CR %s with revision %s, since operator revision is %s.", iopName, iop.Spec.Revision, operatorRevision)
return reconcile.Result{}, nil
}
if iop.Annotations != nil {
if ir := iop.Annotations[IgnoreReconcileAnnotation]; ir == "true" {
scope.Infof("Ignoring the IstioOperator CR %s because it is annotated to be ignored for reconcile ", iopName)
return reconcile.Result{}, nil
}
}
// for backward compatibility, the previous applied installed-state CR does not have the ignore reconcile annotation
// TODO(richardwxn): remove this check and rely on annotation check only
if strings.HasPrefix(iop.Name, name.InstalledSpecCRPrefix) {
scope.Infof("Ignoring the installed-state IstioOperator CR %s ", iopName)
return reconcile.Result{}, nil
}
var err error
iopMerged := &iopv1alpha1.IstioOperator{}
*iopMerged = *iop
// get the merged values in iop on top of the defaults for the profile given by iop.profile
iopMerged.Spec, err = mergeIOPSWithProfile(iopMerged)
if err != nil {
scope.Errorf(errdict.OperatorFailedToMergeUserIOP, "failed to merge base profile with user IstioOperator CR %s, %s", iopName, err)
return reconcile.Result{}, err
}
deleted := iop.GetDeletionTimestamp() != nil
finalizers := sets.NewString(iop.GetFinalizers()...)
if deleted {
if !finalizers.Has(finalizer) {
scope.Infof("IstioOperator %s deleted", iopName)
metrics.CRDeletionTotal.Increment()
return reconcile.Result{}, nil
}
scope.Infof("Deleting IstioOperator %s", iopName)
reconciler, err := helmreconciler.NewHelmReconciler(r.client, r.kubeClient, iopMerged, nil)
if err != nil {
return reconcile.Result{}, err
}
if err := reconciler.Delete(); err != nil {
scope.Errorf("Failed to delete resources with helm reconciler: %s.", err)
return reconcile.Result{}, err
}
finalizers.Delete(finalizer)
iop.SetFinalizers(finalizers.List())
finalizerError := r.client.Update(context.TODO(), iop)
for retryCount := 0; errors.IsConflict(finalizerError) && retryCount < finalizerMaxRetries; retryCount++ {
scope.Info("API server conflict during finalizer removal, retrying.")
_ = r.client.Get(context.TODO(), request.NamespacedName, iop)
finalizers = sets.NewString(iop.GetFinalizers()...)
finalizers.Delete(finalizer)
iop.SetFinalizers(finalizers.List())
finalizerError = r.client.Update(context.TODO(), iop)
}
if finalizerError != nil {
if errors.IsNotFound(finalizerError) {
scope.Infof("Did not remove finalizer from %s: the object was previously deleted.", iopName)
metrics.CRDeletionTotal.Increment()
return reconcile.Result{}, nil
} else if errors.IsConflict(finalizerError) {
scope.Infof("Could not remove finalizer from %s due to conflict. Operation will be retried in next reconcile attempt.", iopName)
return reconcile.Result{}, nil
}
scope.Errorf(errdict.OperatorFailedToRemoveFinalizer, "error removing finalizer: %s", finalizerError)
return reconcile.Result{}, finalizerError
}
return reconcile.Result{}, nil
} else if !finalizers.Has(finalizer) {
log.Infof("Adding finalizer %v to %v", finalizer, request)
finalizers.Insert(finalizer)
iop.SetFinalizers(finalizers.List())
err := r.client.Update(context.TODO(), iop)
if err != nil {
if errors.IsNotFound(err) {
scope.Infof("Could not add finalizer to %s: the object was deleted.", iopName)
metrics.CRDeletionTotal.Increment()
return reconcile.Result{}, nil
} else if errors.IsConflict(err) {
scope.Infof("Could not add finalizer to %s due to conflict. Operation will be retried in next reconcile attempt.", iopName)
}
scope.Errorf(errdict.OperatorFailedToAddFinalizer, "Failed to add finalizer to IstioOperator CR %s: %s", iopName, err)
return reconcile.Result{}, err
}
}
scope.Info("Updating IstioOperator")
val := iopMerged.Spec.Values.AsMap()
if _, ok := val["global"]; !ok {
val["global"] = make(map[string]interface{})
}
globalValues := val["global"].(map[string]interface{})
scope.Info("Detecting third-party JWT support")
var jwtPolicy util.JWTPolicy
if jwtPolicy, err = util.DetectSupportedJWTPolicy(r.kubeClient.Kube()); err != nil {
// TODO(howardjohn): add to dictionary. When resolved, replace this sentence with Done or WontFix - if WontFix, add reason.
scope.Warnf("Failed to detect third-party JWT support: %v", err)
} else {
if jwtPolicy == util.FirstPartyJWT {
scope.Info("Detected that your cluster does not support third party JWT authentication. " +
"Falling back to less secure first party JWT. " +
"See " + url.ConfigureSAToken + " for details.")
}
globalValues["jwtPolicy"] = string(jwtPolicy)
}
err = util.ValidateIOPCAConfig(r.kubeClient, iopMerged)
if err != nil {
scope.Errorf(errdict.OperatorFailedToConfigure, "failed to apply IstioOperator resources. Error %s", err)
return reconcile.Result{}, err
}
helmReconcilerOptions := &helmreconciler.Options{
Log: clog.NewDefaultLogger(),
ProgressLog: progress.NewLog(),
}
if r.options != nil {
helmReconcilerOptions.Force = r.options.Force
}
reconciler, err := helmreconciler.NewHelmReconciler(r.client, r.kubeClient, iopMerged, helmReconcilerOptions)
if err != nil {
return reconcile.Result{}, err
}
if err := reconciler.SetStatusBegin(); err != nil {
return reconcile.Result{}, err
}
status, err := reconciler.Reconcile()
if err != nil {
scope.Errorf("Error during reconcile: %s", err)
}
if err := reconciler.SetStatusComplete(status); err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, err
}
// mergeIOPSWithProfile overlays the values in iop on top of the defaults for the profile given by iop.profile and
// returns the merged result.
func mergeIOPSWithProfile(iop *iopv1alpha1.IstioOperator) (*v1alpha1.IstioOperatorSpec, error) {
profileYAML, err := helm.GetProfileYAML(iop.Spec.InstallPackagePath, iop.Spec.Profile)
if err != nil {
metrics.CountCRMergeFail(metrics.CannotFetchProfileError)
return nil, err
}
// Due to the fact that base profile is compiled in before a tag can be created, we must allow an additional
// override from variables that are set during release build time.
hub := version.DockerInfo.Hub
tag := version.DockerInfo.Tag
if hub != "" && hub != "unknown" && tag != "" && tag != "unknown" {
buildHubTagOverlayYAML, err := helm.GenerateHubTagOverlay(hub, tag)
if err != nil {
metrics.CountCRMergeFail(metrics.OverlayError)
return nil, err
}
profileYAML, err = util.OverlayYAML(profileYAML, buildHubTagOverlayYAML)
if err != nil {
metrics.CountCRMergeFail(metrics.OverlayError)
return nil, err
}
}
overlayYAMLB, err := yaml.Marshal(iop)
if err != nil {
metrics.CountCRMergeFail(metrics.IOPFormatError)
return nil, err
}
overlayYAML := string(overlayYAMLB)
t := translate.NewReverseTranslator()
overlayYAML, err = t.TranslateK8SfromValueToIOP(overlayYAML)
if err != nil {
metrics.CountCRMergeFail(metrics.TranslateValuesError)
return nil, fmt.Errorf("could not overlay k8s settings from values to IOP: %s", err)
}
mergedYAML, err := util.OverlayIOP(profileYAML, overlayYAML)
if err != nil {
metrics.CountCRMergeFail(metrics.OverlayError)
return nil, err
}
mergedYAML, err = translate.OverlayValuesEnablement(mergedYAML, overlayYAML, "")
if err != nil {
metrics.CountCRMergeFail(metrics.TranslateValuesError)
return nil, err
}
mergedYAMLSpec, err := tpath.GetSpecSubtree(mergedYAML)
if err != nil {
metrics.CountCRMergeFail(metrics.InternalYAMLParseError)
return nil, err
}
return istio.UnmarshalAndValidateIOPS(mergedYAMLSpec)
}
// Add creates a new IstioOperator Controller and adds it to the Manager. The Manager will set fields on the Controller
// and Start it when the Manager is Started. It also provides additional options to modify internal reconciler behavior.
func Add(mgr manager.Manager, options *Options) error {
restConfig = mgr.GetConfig()
kubeClient, err := kube.NewExtendedClient(kube.NewClientConfigForRestConfig(restConfig), "")
if err != nil {
return fmt.Errorf("create Kubernetes client: %v", err)
}
return add(mgr, &ReconcileIstioOperator{client: mgr.GetClient(), scheme: mgr.GetScheme(), kubeClient: kubeClient, options: options})
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r *ReconcileIstioOperator) error {
scope.Info("Adding controller for IstioOperator.")
// Create a new controller
c, err := controller.New("istiocontrolplane-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
// Watch for changes to primary resource IstioOperator
err = c.Watch(&source.Kind{Type: &iopv1alpha1.IstioOperator{}}, &handler.EnqueueRequestForObject{}, operatorPredicates)
if err != nil {
return err
}
ver, err := r.kubeClient.GetKubernetesVersion()
if err != nil {
return err
}
// watch for changes to Istio resources
err = watchIstioResources(c, ver)
if err != nil {
return err
}
scope.Info("Controller added")
return nil
}
// Watch changes for Istio resources managed by the operator
func watchIstioResources(c controller.Controller, ver *kubeversion.Info) error {
for _, t := range watchedResources(ver) {
u := &unstructured.Unstructured{}
u.SetGroupVersionKind(schema.GroupVersionKind{
Kind: t.Kind,
Group: t.Group,
Version: t.Version,
})
err := c.Watch(&source.Kind{Type: u}, handler.EnqueueRequestsFromMapFunc(func(a client.Object) []reconcile.Request {
scope.Infof("Watching a change for istio resource: %s/%s", a.GetNamespace(), a.GetName())
return []reconcile.Request{
{NamespacedName: types.NamespacedName{
Name: a.GetLabels()[helmreconciler.OwningResourceName],
Namespace: a.GetLabels()[helmreconciler.OwningResourceNamespace],
}},
}
}),
ownedResourcePredicates)
if err != nil {
scope.Errorf("Could not create watch for %s/%s/%s: %s.", t.Kind, t.Group, t.Version, err)
}
}
return nil
}
// Check if the specified object is created by operator
func isOperatorCreatedResource(obj metav1.Object) bool {
return obj.GetLabels()[helmreconciler.OwningResourceName] != "" &&
obj.GetLabels()[helmreconciler.OwningResourceNamespace] != "" &&
obj.GetLabels()[helmreconciler.IstioComponentLabelStr] != ""
}
|
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"os"
"path/filepath"
"runtime"
"testing"
)
func TestFindRoot(t *testing.T) {
wd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
expect := filepath.Join(wd, "_testdata", "rootfind")
got1, err := findProjectRoot(expect)
if err != nil {
t.Errorf("Unexpected error while finding root: %s", err)
} else if expect != got1 {
t.Errorf("findProjectRoot directly on root dir should have found %s, got %s", expect, got1)
}
got2, err := findProjectRoot(filepath.Join(expect, "subdir"))
if err != nil {
t.Errorf("Unexpected error while finding root: %s", err)
} else if expect != got2 {
t.Errorf("findProjectRoot on subdir should have found %s, got %s", expect, got2)
}
got3, err := findProjectRoot(filepath.Join(expect, "nonexistent"))
if err != nil {
t.Errorf("Unexpected error while finding root: %s", err)
} else if expect != got3 {
t.Errorf("findProjectRoot on nonexistent subdir should still work and give %s, got %s", expect, got3)
}
// the following test does not work on windows because syscall.Stat does not
// return a "not a directory" error
if runtime.GOOS != "windows" {
got4, err := findProjectRoot(filepath.Join(expect, manifestName))
if err == nil {
t.Errorf("Should have err'd when trying subdir of file, but returned %s", got4)
}
}
}
Added more tests to main_test.go
Increased main.go coverage from 12.8% to 21.8%
- Added test for `project.makeParams()`
- Added an extra test for `findProjectRoot()`
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"fmt"
"os"
"path/filepath"
"runtime"
"testing"
"github.com/sdboyer/gps"
)
func TestFindRoot(t *testing.T) {
wd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
expect := filepath.Join(wd, "_testdata", "rootfind")
got1, err := findProjectRoot(expect)
if err != nil {
t.Errorf("Unexpected error while finding root: %s", err)
} else if expect != got1 {
t.Errorf("findProjectRoot directly on root dir should have found %s, got %s", expect, got1)
}
got2, err := findProjectRoot(filepath.Join(expect, "subdir"))
if err != nil {
t.Errorf("Unexpected error while finding root: %s", err)
} else if expect != got2 {
t.Errorf("findProjectRoot on subdir should have found %s, got %s", expect, got2)
}
got3, err := findProjectRoot(filepath.Join(expect, "nonexistent"))
if err != nil {
t.Errorf("Unexpected error while finding root: %s", err)
} else if expect != got3 {
t.Errorf("findProjectRoot on nonexistent subdir should still work and give %s, got %s", expect, got3)
}
root := "/"
returnedPath, projectRootErr := findProjectRoot(root)
if returnedPath != "" {
t.Errorf("findProjectRoot with path %s returned non empty string: %s", root, returnedPath)
}
if projectRootErr == nil {
t.Errorf("findProjectRoot with path %s should return error", root)
}
errStr := fmt.Sprintf("%v", projectRootErr.Error())
expectedStr := "could not find project manifest.json, use dep init to initiate a manifest"
if errStr != expectedStr {
t.Errorf("Incorrect errProjectNotFound error. Found: %s. Expected: %s", errStr, expectedStr)
}
// the following test does not work on windows because syscall.Stat does not
// return a "not a directory" error
if runtime.GOOS != "windows" {
got4, err := findProjectRoot(filepath.Join(expect, manifestName))
if err == nil {
t.Errorf("Should have err'd when trying subdir of file, but returned %s", got4)
}
}
}
func TestProjectMakeParams(t *testing.T) {
p := project{
absroot: "someroot",
importroot: gps.ProjectRoot("Some project root"),
m: &manifest{Ignores: []string{"ignoring this"}},
l: &lock{},
}
solveParam := p.makeParams()
if solveParam.Manifest != p.m {
t.Error("makeParams() returned gps.SolveParameters with incorrect Manifest")
}
if solveParam.Lock != p.l {
t.Error("makeParams() returned gps.SolveParameters with incorrect Lock")
}
}
|
package main
import (
"io/ioutil"
"os"
"strings"
"sync"
"testing"
)
func TestPrintMatchingLines(t *testing.T) {
tests := []struct {
pattern string
file string
matchYes []string // matching lines expected to contain these strings
matchNo []string // matching lines are not expected to contain these
}{
{
pattern: "fish",
file: "testdata/fish.txt",
matchYes: []string{"One", "two", "red", "blue"},
matchNo: []string{"ONE", "TWO"},
},
{
pattern: "not-in-text",
file: "testdata/fish.txt",
matchYes: []string{},
matchNo: []string{"One", "two", "red", "blue", "ONE", "TWO", "RED", "BLUE"},
},
{
pattern: " ",
file: "testdata/empty.txt",
matchYes: []string{},
matchNo: []string{},
},
}
for _, test := range tests {
capOut, capErr := captureStdoutStderr(func() {
printMatchingLines(test.pattern, "testdata/fish.txt")
}, t)
for _, exp := range test.matchYes {
if !strings.Contains(capOut, exp) {
t.Errorf("%q in %s: expected match of line with %q\n\n%s",
test.pattern, test.file, exp, capOut)
}
}
for _, notExp := range test.matchNo {
if strings.Contains(capOut, notExp) {
t.Errorf("%q in %s: expected no match of line with %q\n\n%s",
test.pattern, test.file, notExp, capOut)
}
}
if capErr != "" {
t.Errorf("expected nothing in STDERR:\n%q", capErr)
}
}
}
// takes a zero-argument function and invokes the function while
// capturing all data written to stdout/error.
func captureStdoutStderr(f func(), t *testing.T) (string, string) {
oldOut := os.Stdout
oldErr := os.Stderr
defer func() {
os.Stdout = oldOut
os.Stderr = oldErr
}()
var outBuf []byte
var errBuf []byte
var wg sync.WaitGroup
outPipe := pipeForBuffer(&outBuf, &wg, t)
errPipe := pipeForBuffer(&errBuf, &wg, t)
os.Stdout = outPipe
os.Stderr = errPipe
f()
outPipe.Close()
errPipe.Close()
wg.Wait() // allow the capture routines to finish
return string(outBuf), string(errBuf)
}
func pipeForBuffer(buf *[]byte, wg *sync.WaitGroup, t *testing.T) *os.File {
// Go doesn't use io.Writer for Stdout/Stderr
// so we have to jump through some hoops using an
// in-memory pipe to accomplish the capture
r, w, err := os.Pipe()
if err != nil {
t.Fatalf("error with os.Pipe %q", err)
}
wg.Add(1)
go func() {
*buf, err = ioutil.ReadAll(r)
if err != nil {
t.Fatalf("error with os.Pipe %q", err)
}
wg.Done()
}()
return w
}
fixed test loop to use table data
package main
import (
"io/ioutil"
"os"
"strings"
"sync"
"testing"
)
func TestPrintMatchingLines(t *testing.T) {
tests := []struct {
pattern string
file string
matchYes []string // matching lines expected to contain these strings
matchNo []string // matching lines are not expected to contain these
}{
{
pattern: "fish",
file: "testdata/fish.txt",
matchYes: []string{"One", "two", "red", "blue"},
matchNo: []string{"ONE", "TWO"},
},
{
pattern: "not-in-text",
file: "testdata/fish.txt",
matchYes: []string{},
matchNo: []string{"One", "two", "red", "blue", "ONE", "TWO", "RED", "BLUE"},
},
{
pattern: " ", // space expected in all lines with text
file: "testdata/fish.txt",
matchYes: []string{"One", "two", "red", "blue", "ONE", "TWO", "RED", "BLUE"},
matchNo: []string{},
},
{
pattern: " ",
file: "testdata/empty.txt",
matchYes: []string{},
matchNo: []string{},
},
}
for _, test := range tests {
capOut, capErr := captureStdoutStderr(func() {
printMatchingLines(test.pattern, test.file)
}, t)
for _, exp := range test.matchYes {
if !strings.Contains(capOut, exp) {
t.Errorf("%q in %s: expected match of line with %q\n\n%s",
test.pattern, test.file, exp, capOut)
}
}
for _, notExp := range test.matchNo {
if strings.Contains(capOut, notExp) {
t.Errorf("%q in %s: expected no match of line with %q\n\n%s",
test.pattern, test.file, notExp, capOut)
}
}
if capErr != "" {
t.Errorf("expected nothing in STDERR:\n%q", capErr)
}
}
}
// takes a zero-argument function and invokes the function while
// capturing all data written to stdout/error.
func captureStdoutStderr(f func(), t *testing.T) (string, string) {
oldOut := os.Stdout
oldErr := os.Stderr
defer func() {
os.Stdout = oldOut
os.Stderr = oldErr
}()
var outBuf []byte
var errBuf []byte
var wg sync.WaitGroup
outPipe := pipeForBuffer(&outBuf, &wg, t)
errPipe := pipeForBuffer(&errBuf, &wg, t)
os.Stdout = outPipe
os.Stderr = errPipe
f()
outPipe.Close()
errPipe.Close()
wg.Wait() // allow the capture routines to finish
return string(outBuf), string(errBuf)
}
func pipeForBuffer(buf *[]byte, wg *sync.WaitGroup, t *testing.T) *os.File {
// Go doesn't use io.Writer for Stdout/Stderr
// so we have to jump through some hoops using an
// in-memory pipe to accomplish the capture
r, w, err := os.Pipe()
if err != nil {
t.Fatalf("error with os.Pipe %q", err)
}
wg.Add(1)
go func() {
*buf, err = ioutil.ReadAll(r)
if err != nil {
t.Fatalf("error with os.Pipe %q", err)
}
wg.Done()
}()
return w
}
|
package main_test
import (
. "github.com/reaandrew/code-named-something"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"net/http"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
)
var _ = Describe("Main", func() {
It("Makes a http get request to each url in a file", func() {
port := 8000
list := `http://127.0.0.1:8000/A
http://127.0.0.1:8000/B
http://127.0.0.1:8000/C
`
server := CreateRequestRecordingServer(port)
defer server.Stop()
server.Start()
file, err := ioutil.TempFile(os.TempDir(), "prefix")
if err != nil {
panic(err)
}
file.WriteString(list)
defer os.Remove(file.Name())
exePath, err := filepath.Abs("./code-named-something")
if err != nil {
panic(err)
}
cmd := exec.Command(exePath, "-f", file.Name())
_, _ = cmd.CombinedOutput()
client := &http.Client{}
req, _ := http.NewRequest("GET", "http://127.0.0.1:8000/A", nil)
_, err = client.Do(req)
if err != nil {
panic(err)
}
Expect(err).To(BeNil())
Expect(server.Contains(RequestWithPath("/A"))).To(Equal(true))
Expect(server.Contains(RequestWithPath("/B"))).To(Equal(true))
Expect(server.Contains(RequestWithPath("/C"))).To(Equal(true))
})
})
Changed the package name as travis is having trouble building it and not sure why yet. This should fix it
package main
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"net/http"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
)
var _ = Describe("Main", func() {
It("Makes a http get request to each url in a file", func() {
port := 8000
list := `http://127.0.0.1:8000/A
http://127.0.0.1:8000/B
http://127.0.0.1:8000/C
`
server := CreateRequestRecordingServer(port)
defer server.Stop()
server.Start()
file, err := ioutil.TempFile(os.TempDir(), "prefix")
if err != nil {
panic(err)
}
file.WriteString(list)
defer os.Remove(file.Name())
exePath, err := filepath.Abs("./code-named-something")
if err != nil {
panic(err)
}
cmd := exec.Command(exePath, "-f", file.Name())
_, _ = cmd.CombinedOutput()
client := &http.Client{}
req, _ := http.NewRequest("GET", "http://127.0.0.1:8000/A", nil)
_, err = client.Do(req)
if err != nil {
panic(err)
}
Expect(err).To(BeNil())
Expect(server.Contains(RequestWithPath("/A"))).To(Equal(true))
Expect(server.Contains(RequestWithPath("/B"))).To(Equal(true))
Expect(server.Contains(RequestWithPath("/C"))).To(Equal(true))
})
})
|
package main
import (
"os"
"reflect"
"regexp"
"strings"
"testing"
"gopkg.in/ini.v1"
)
func init() {
RegexpUrlTwitter, _ = regexp.Compile(REGEXP_URL_TWITTER)
RegexpUrlTistory, _ = regexp.Compile(REGEXP_URL_TISTORY)
RegexpUrlTistoryWithCDN, _ = regexp.Compile(REGEXP_URL_TISTORY_WITH_CDN)
RegexpUrlGfycat, _ = regexp.Compile(REGEXP_URL_GFYCAT)
RegexpUrlInstagram, _ = regexp.Compile(REGEXP_URL_INSTAGRAM)
RegexpUrlImgurSingle, _ = regexp.Compile(REGEXP_URL_IMGUR_SINGLE)
RegexpUrlImgurAlbum, _ = regexp.Compile(REGEXP_URL_IMGUR_ALBUM)
RegexpUrlGoogleDrive, _ = regexp.Compile(REGEXP_URL_GOOGLEDRIVE)
RegexpUrlPossibleTistorySite, _ = regexp.Compile(REGEXP_URL_POSSIBLE_TISTORY_SITE)
RegexpUrlFlickrPhoto, _ = regexp.Compile(REGEXP_URL_FLICKR_PHOTO)
RegexpUrlFlickrAlbum, _ = regexp.Compile(REGEXP_URL_FLICKR_ALBUM)
RegexpUrlStreamable, _ = regexp.Compile(REGEXP_URL_STREAMABLE)
flickrApiKey = os.Getenv("FLICKR_API_KEY")
var err error
cfg, err := ini.Load("config.ini")
if err == nil {
flickrApiKey = cfg.Section("flickr").Key("api key").MustString("yourflickrapikey")
}
}
type urlsTestpair struct {
value string
result map[string]string
}
var getTwitterUrlsTests = []urlsTestpair{
{
"https://pbs.twimg.com/media/CulDBM6VYAA-YhY.jpg:orig",
map[string]string{"https://pbs.twimg.com/media/CulDBM6VYAA-YhY.jpg:orig": "CulDBM6VYAA-YhY.jpg"},
},
{
"https://pbs.twimg.com/media/CulDBM6VYAA-YhY.jpg",
map[string]string{"https://pbs.twimg.com/media/CulDBM6VYAA-YhY.jpg:orig": "CulDBM6VYAA-YhY.jpg"},
},
{
"http://pbs.twimg.com/media/CulDBM6VYAA-YhY.jpg",
map[string]string{"https://pbs.twimg.com/media/CulDBM6VYAA-YhY.jpg:orig": "CulDBM6VYAA-YhY.jpg"},
},
}
func TestGetTwitterUrls(t *testing.T) {
for _, pair := range getTwitterUrlsTests {
v, err := getTwitterUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
if !reflect.DeepEqual(v, pair.result) {
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
}
var getTistoryUrlsTests = []urlsTestpair{
{
"http://cfile25.uf.tistory.com/original/235CA739582E86992EFC4E",
map[string]string{"http://cfile25.uf.tistory.com/original/235CA739582E86992EFC4E": ""},
},
{
"http://cfile25.uf.tistory.com/image/235CA739582E86992EFC4E",
map[string]string{"http://cfile25.uf.tistory.com/original/235CA739582E86992EFC4E": ""},
},
}
func TestGetTistoryUrls(t *testing.T) {
for _, pair := range getTistoryUrlsTests {
v, err := getTistoryUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
if !reflect.DeepEqual(v, pair.result) {
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
}
var getGfycatUrlsTests = []urlsTestpair{
{
"https://gfycat.com/SandyChiefBoubou",
map[string]string{"https://fat.gfycat.com/SandyChiefBoubou.mp4": ""},
},
}
func TestGetGfycatUrls(t *testing.T) {
for _, pair := range getGfycatUrlsTests {
v, err := getGfycatUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
if !reflect.DeepEqual(v, pair.result) {
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
}
var getInstagramUrlsPictureTests = []urlsTestpair{
{
"https://www.instagram.com/p/BHhDAmhAz33/?taken-by=s_sohye",
map[string]string{"https://www.instagram.com/p/BHhDAmhAz33/media/?size=l&taken-by=s_sohye": "instagram s_sohye - BHhDAmhAz33.jpg"},
},
{
"https://www.instagram.com/p/BHhDAmhAz33/",
map[string]string{"https://www.instagram.com/p/BHhDAmhAz33/media/?size=l": "instagram s_sohye - BHhDAmhAz33.jpg"},
},
}
var getInstagramUrlsVideoTests = []urlsTestpair{
{
"https://www.instagram.com/p/BL2_ZIHgYTp/?taken-by=s_sohye",
map[string]string{"14811404_233311497085396_338650092456116224_n.mp4": "instagram s_sohye - BL2_ZIHgYTp.mp4"},
},
{
"https://www.instagram.com/p/BL2_ZIHgYTp/",
map[string]string{"14811404_233311497085396_338650092456116224_n.mp4": "instagram s_sohye - BL2_ZIHgYTp.mp4"},
},
}
var getInstagramUrlsAlbumTests = []urlsTestpair{
{
"https://www.instagram.com/p/BRiCc0VjULk/?taken-by=gfriendofficial",
map[string]string{
"17265460_395888184109957_3500310922180689920_n.jpg": "instagram gfriendofficial - BRiCc0VjULk",
"17265456_267171360360765_8110946520456495104_n.jpg": "instagram gfriendofficial - BRiCc0VjULk",
"17265327_1394797493912862_2677004307588448256_n.jpg": "instagram gfriendofficial - BRiCc0VjULk"},
},
{
"https://www.instagram.com/p/BRhheSPjaQ3/",
map[string]string{
"17125875_306909746390523_8184965703367917568_n.jpg": "instagram gfriendofficial - BRhheSPjaQ3",
"17266053_188727064951899_2485556569865977856_n.jpg": "instagram gfriendofficial - BRhheSPjaQ3"},
},
}
func TestGetInstagramUrls(t *testing.T) {
for _, pair := range getInstagramUrlsPictureTests {
v, err := getInstagramUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
if !reflect.DeepEqual(v, pair.result) {
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
for _, pair := range getInstagramUrlsVideoTests {
v, err := getInstagramUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
for keyResult, valueResult := range pair.result {
for keyExpected, valueExpected := range v {
if strings.Contains(keyResult, keyExpected) || valueResult != valueExpected { // CDN location can vary
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
}
}
for _, pair := range getInstagramUrlsAlbumTests {
v, err := getInstagramUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
for keyResult, valueResult := range pair.result {
for keyExpected, valueExpected := range v {
if strings.Contains(keyResult, keyExpected) || strings.Contains(valueResult, valueExpected) { // CDN location can vary
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
}
}
}
var getImgurSingleUrlsTests = []urlsTestpair{
{
"http://imgur.com/viZictl",
map[string]string{"http://imgur.com/download/viZictl": ""},
},
{
"https://imgur.com/viZictl",
map[string]string{"https://imgur.com/download/viZictl": ""},
},
{
"https://i.imgur.com/viZictl.jpg",
map[string]string{"https://i.imgur.com/download/viZictl.jpg": ""},
},
{
"http://imgur.com/uYwt2VV",
map[string]string{"http://imgur.com/download/uYwt2VV": ""},
},
{
"http://i.imgur.com/uYwt2VV.gifv",
map[string]string{"http://i.imgur.com/download/uYwt2VV": ""},
},
}
func TestGetImgurSingleUrls(t *testing.T) {
for _, pair := range getImgurSingleUrlsTests {
v, err := getImgurSingleUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
if !reflect.DeepEqual(v, pair.result) {
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
}
var getImgurAlbumUrlsTests = []urlsTestpair{
{
"http://imgur.com/a/ALTpi",
map[string]string{
"https://i.imgur.com/FKoguPh.jpg": "",
"https://i.imgur.com/5FNL6Pe.jpg": "",
"https://i.imgur.com/YA0V0g9.jpg": "",
"https://i.imgur.com/Uc2iDhD.jpg": "",
"https://i.imgur.com/J9JRSSJ.jpg": "",
"https://i.imgur.com/Xrx0uyE.jpg": "",
"https://i.imgur.com/3xDSq1O.jpg": "",
},
},
}
func TestGetImgurAlbumUrls(t *testing.T) {
for _, pair := range getImgurAlbumUrlsTests {
v, err := getImgurAlbumUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
for expectedLink, expectedName := range pair.result {
linkFound := false
for gotLink, gotName := range v {
if expectedLink == gotLink && expectedName == gotName {
linkFound = true
}
}
if !linkFound {
t.Errorf("For expected %s %s, got %s", expectedLink, expectedName, v)
}
}
}
}
var getGoogleDriveUrlsTests = []urlsTestpair{
{
"https://drive.google.com/file/d/0B8TnwsJqlFllSUtvUEhoSU40WkE/view",
map[string]string{"https://drive.google.com/uc?export=download&id=0B8TnwsJqlFllSUtvUEhoSU40WkE": ""},
},
}
func TestGetGoogleDriveUrls(t *testing.T) {
for _, pair := range getGoogleDriveUrlsTests {
v, err := getGoogleDriveUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
if !reflect.DeepEqual(v, pair.result) {
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
}
var getTistoryWithCDNUrlsTests = []urlsTestpair{
{
"http://img1.daumcdn.net/thumb/R720x0.q80/?scode=mtistory&fname=http%3A%2F%2Fcfile24.uf.tistory.com%2Fimage%2F2658554B580BDC4C0924CA",
map[string]string{"http://cfile24.uf.tistory.com/original/2658554B580BDC4C0924CA": ""},
},
}
func TestGetTistoryWithCDNUrls(t *testing.T) {
for _, pair := range getTistoryWithCDNUrlsTests {
v, err := getTistoryWithCDNUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
if !reflect.DeepEqual(v, pair.result) {
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
}
var getPossibleTistorySiteUrlsTests = []urlsTestpair{
{
"http://soonduck.tistory.com/482",
map[string]string{
"a": "",
"b": "",
"c": "",
"d": "",
"e": "",
},
},
{
"http://soonduck.tistory.com/m/482",
map[string]string{
"a": "",
"b": "",
"c": "",
"d": "",
"e": "",
},
},
{
"http://slmn.de/123",
map[string]string{},
},
}
func TestGetPossibleTistorySiteUrls(t *testing.T) {
for _, pair := range getPossibleTistorySiteUrlsTests {
v, err := getPossibleTistorySiteUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
if len(pair.result) != len(v) { // only check filenames, urls may vary
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
}
var getFlickrUrlFromPhotoIdTests = []map[string]string{
{
"value": "31065043320",
"result": "https://farm6.staticflickr.com/5521/31065043320_cd03a9a448_b.jpg",
},
}
func TestGetFlickrUrlFromPhotoId(t *testing.T) {
for _, pair := range getFlickrUrlFromPhotoIdTests {
v := getFlickrUrlFromPhotoId(pair["value"])
if v != pair["result"] {
t.Errorf("For %s, expected %s, got %s", pair["value"], pair["result"], v)
}
}
}
var getFlickrPhotoUrlsTests = []urlsTestpair{
{
"https://www.flickr.com/photos/137385017@N08/31065043320/in/album-72157677350305446/",
map[string]string{
"https://farm6.staticflickr.com/5521/31065043320_cd03a9a448_b.jpg": "",
},
},
{
"https://www.flickr.com/photos/137385017@N08/31065043320/in/album-72157677350305446",
map[string]string{
"https://farm6.staticflickr.com/5521/31065043320_cd03a9a448_b.jpg": "",
},
},
{
"https://www.flickr.com/photos/137385017@N08/31065043320/",
map[string]string{
"https://farm6.staticflickr.com/5521/31065043320_cd03a9a448_b.jpg": "",
},
},
{
"https://www.flickr.com/photos/137385017@N08/31065043320",
map[string]string{
"https://farm6.staticflickr.com/5521/31065043320_cd03a9a448_b.jpg": "",
},
},
}
func TestGetFlickrPhotoUrls(t *testing.T) {
for _, pair := range getFlickrPhotoUrlsTests {
v, err := getFlickrPhotoUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
if !reflect.DeepEqual(v, pair.result) {
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
}
var getFlickrAlbumUrlsTests = []urlsTestpair{
{
"https://www.flickr.com/photos/137385017@N08/albums/72157677350305446/",
map[string]string{
"https://farm6.staticflickr.com/5521/31065043320_cd03a9a448_b.jpg": "",
"https://farm6.staticflickr.com/5651/31434767515_49f88ee12e_b.jpg": "",
"https://farm6.staticflickr.com/5750/31434766825_529fd08071_b.jpg": "",
"https://farm6.staticflickr.com/5811/31319456971_37c8c4708a_b.jpg": "",
"https://farm6.staticflickr.com/5494/30627074913_b7f810fc26_b.jpg": "",
"https://farm6.staticflickr.com/5539/31065042720_d76f643b28_b.jpg": "",
"https://farm6.staticflickr.com/5813/31434765285_94b85d5e8c_b.jpg": "",
"https://farm6.staticflickr.com/5600/31065044090_eca63bd5a5_b.jpg": "",
"https://farm6.staticflickr.com/5733/31434764435_350825477e_b.jpg": "",
"https://farm6.staticflickr.com/5715/30627073573_b86e4b2c22_b.jpg": "",
"https://farm6.staticflickr.com/5758/31289864222_5e3cca7e72_b.jpg": "",
"https://farm6.staticflickr.com/5801/30627076673_5a32f3e562_b.jpg": "",
"https://farm6.staticflickr.com/5538/31319458901_088858d7f1_b.jpg": "",
},
},
{
"https://www.flickr.com/photos/137385017@N08/albums/72157677350305446",
map[string]string{
"https://farm6.staticflickr.com/5521/31065043320_cd03a9a448_b.jpg": "",
"https://farm6.staticflickr.com/5651/31434767515_49f88ee12e_b.jpg": "",
"https://farm6.staticflickr.com/5750/31434766825_529fd08071_b.jpg": "",
"https://farm6.staticflickr.com/5811/31319456971_37c8c4708a_b.jpg": "",
"https://farm6.staticflickr.com/5494/30627074913_b7f810fc26_b.jpg": "",
"https://farm6.staticflickr.com/5539/31065042720_d76f643b28_b.jpg": "",
"https://farm6.staticflickr.com/5813/31434765285_94b85d5e8c_b.jpg": "",
"https://farm6.staticflickr.com/5600/31065044090_eca63bd5a5_b.jpg": "",
"https://farm6.staticflickr.com/5733/31434764435_350825477e_b.jpg": "",
"https://farm6.staticflickr.com/5715/30627073573_b86e4b2c22_b.jpg": "",
"https://farm6.staticflickr.com/5758/31289864222_5e3cca7e72_b.jpg": "",
"https://farm6.staticflickr.com/5801/30627076673_5a32f3e562_b.jpg": "",
"https://farm6.staticflickr.com/5538/31319458901_088858d7f1_b.jpg": "",
},
},
{
"https://www.flickr.com/photos/137385017@N08/albums/with/72157677350305446/",
map[string]string{
"https://farm6.staticflickr.com/5521/31065043320_cd03a9a448_b.jpg": "",
"https://farm6.staticflickr.com/5651/31434767515_49f88ee12e_b.jpg": "",
"https://farm6.staticflickr.com/5750/31434766825_529fd08071_b.jpg": "",
"https://farm6.staticflickr.com/5811/31319456971_37c8c4708a_b.jpg": "",
"https://farm6.staticflickr.com/5494/30627074913_b7f810fc26_b.jpg": "",
"https://farm6.staticflickr.com/5539/31065042720_d76f643b28_b.jpg": "",
"https://farm6.staticflickr.com/5813/31434765285_94b85d5e8c_b.jpg": "",
"https://farm6.staticflickr.com/5600/31065044090_eca63bd5a5_b.jpg": "",
"https://farm6.staticflickr.com/5733/31434764435_350825477e_b.jpg": "",
"https://farm6.staticflickr.com/5715/30627073573_b86e4b2c22_b.jpg": "",
"https://farm6.staticflickr.com/5758/31289864222_5e3cca7e72_b.jpg": "",
"https://farm6.staticflickr.com/5801/30627076673_5a32f3e562_b.jpg": "",
"https://farm6.staticflickr.com/5538/31319458901_088858d7f1_b.jpg": "",
},
},
{
"https://www.flickr.com/photos/137385017@N08/albums/with/72157677350305446",
map[string]string{
"https://farm6.staticflickr.com/5521/31065043320_cd03a9a448_b.jpg": "",
"https://farm6.staticflickr.com/5651/31434767515_49f88ee12e_b.jpg": "",
"https://farm6.staticflickr.com/5750/31434766825_529fd08071_b.jpg": "",
"https://farm6.staticflickr.com/5811/31319456971_37c8c4708a_b.jpg": "",
"https://farm6.staticflickr.com/5494/30627074913_b7f810fc26_b.jpg": "",
"https://farm6.staticflickr.com/5539/31065042720_d76f643b28_b.jpg": "",
"https://farm6.staticflickr.com/5813/31434765285_94b85d5e8c_b.jpg": "",
"https://farm6.staticflickr.com/5600/31065044090_eca63bd5a5_b.jpg": "",
"https://farm6.staticflickr.com/5733/31434764435_350825477e_b.jpg": "",
"https://farm6.staticflickr.com/5715/30627073573_b86e4b2c22_b.jpg": "",
"https://farm6.staticflickr.com/5758/31289864222_5e3cca7e72_b.jpg": "",
"https://farm6.staticflickr.com/5801/30627076673_5a32f3e562_b.jpg": "",
"https://farm6.staticflickr.com/5538/31319458901_088858d7f1_b.jpg": "",
},
},
}
func TestGetFlickrAlbumUrls(t *testing.T) {
for _, pair := range getFlickrAlbumUrlsTests {
v, err := getFlickrAlbumUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
if !reflect.DeepEqual(v, pair.result) {
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
}
var getStreamableUrlsTests = []urlsTestpair{
{
"http://streamable.com/41ajc",
map[string]string{
"https://cdn-e2.streamable.com/video/mp4/41ajc.mp4": "",
},
},
}
func TestGetStreamableUrls(t *testing.T) {
for _, pair := range getStreamableUrlsTests {
v, err := getStreamableUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
for key, value := range v {
parts := strings.Split(key, "?")
delete(v, key)
v[parts[0]] = value
}
if !reflect.DeepEqual(v, pair.result) {
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
}
updates test
package main
import (
"os"
"reflect"
"regexp"
"strings"
"testing"
"gopkg.in/ini.v1"
)
func init() {
RegexpUrlTwitter, _ = regexp.Compile(REGEXP_URL_TWITTER)
RegexpUrlTistory, _ = regexp.Compile(REGEXP_URL_TISTORY)
RegexpUrlTistoryWithCDN, _ = regexp.Compile(REGEXP_URL_TISTORY_WITH_CDN)
RegexpUrlGfycat, _ = regexp.Compile(REGEXP_URL_GFYCAT)
RegexpUrlInstagram, _ = regexp.Compile(REGEXP_URL_INSTAGRAM)
RegexpUrlImgurSingle, _ = regexp.Compile(REGEXP_URL_IMGUR_SINGLE)
RegexpUrlImgurAlbum, _ = regexp.Compile(REGEXP_URL_IMGUR_ALBUM)
RegexpUrlGoogleDrive, _ = regexp.Compile(REGEXP_URL_GOOGLEDRIVE)
RegexpUrlPossibleTistorySite, _ = regexp.Compile(REGEXP_URL_POSSIBLE_TISTORY_SITE)
RegexpUrlFlickrPhoto, _ = regexp.Compile(REGEXP_URL_FLICKR_PHOTO)
RegexpUrlFlickrAlbum, _ = regexp.Compile(REGEXP_URL_FLICKR_ALBUM)
RegexpUrlStreamable, _ = regexp.Compile(REGEXP_URL_STREAMABLE)
flickrApiKey = os.Getenv("FLICKR_API_KEY")
var err error
cfg, err := ini.Load("config.ini")
if err == nil {
flickrApiKey = cfg.Section("flickr").Key("api key").MustString("yourflickrapikey")
}
}
type urlsTestpair struct {
value string
result map[string]string
}
var getTwitterUrlsTests = []urlsTestpair{
{
"https://pbs.twimg.com/media/CulDBM6VYAA-YhY.jpg:orig",
map[string]string{"https://pbs.twimg.com/media/CulDBM6VYAA-YhY.jpg:orig": "CulDBM6VYAA-YhY.jpg"},
},
{
"https://pbs.twimg.com/media/CulDBM6VYAA-YhY.jpg",
map[string]string{"https://pbs.twimg.com/media/CulDBM6VYAA-YhY.jpg:orig": "CulDBM6VYAA-YhY.jpg"},
},
{
"http://pbs.twimg.com/media/CulDBM6VYAA-YhY.jpg",
map[string]string{"https://pbs.twimg.com/media/CulDBM6VYAA-YhY.jpg:orig": "CulDBM6VYAA-YhY.jpg"},
},
}
func TestGetTwitterUrls(t *testing.T) {
for _, pair := range getTwitterUrlsTests {
v, err := getTwitterUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
if !reflect.DeepEqual(v, pair.result) {
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
}
var getTistoryUrlsTests = []urlsTestpair{
{
"http://cfile25.uf.tistory.com/original/235CA739582E86992EFC4E",
map[string]string{"http://cfile25.uf.tistory.com/original/235CA739582E86992EFC4E": ""},
},
{
"http://cfile25.uf.tistory.com/image/235CA739582E86992EFC4E",
map[string]string{"http://cfile25.uf.tistory.com/original/235CA739582E86992EFC4E": ""},
},
}
func TestGetTistoryUrls(t *testing.T) {
for _, pair := range getTistoryUrlsTests {
v, err := getTistoryUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
if !reflect.DeepEqual(v, pair.result) {
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
}
var getGfycatUrlsTests = []urlsTestpair{
{
"https://gfycat.com/SandyChiefBoubou",
map[string]string{"https://fat.gfycat.com/SandyChiefBoubou.mp4": ""},
},
}
func TestGetGfycatUrls(t *testing.T) {
for _, pair := range getGfycatUrlsTests {
v, err := getGfycatUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
if !reflect.DeepEqual(v, pair.result) {
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
}
var getInstagramUrlsPictureTests = []urlsTestpair{
{
"https://www.instagram.com/p/BHhDAmhAz33/?taken-by=s_sohye",
map[string]string{"https://www.instagram.com/p/BHhDAmhAz33/media/?size=l&taken-by=s_sohye": "instagram s_sohye - BHhDAmhAz33.jpg"},
},
{
"https://www.instagram.com/p/BHhDAmhAz33/",
map[string]string{"https://www.instagram.com/p/BHhDAmhAz33/media/?size=l": "instagram s_sohye - BHhDAmhAz33.jpg"},
},
}
var getInstagramUrlsVideoTests = []urlsTestpair{
{
"https://www.instagram.com/p/BL2_ZIHgYTp/?taken-by=s_sohye",
map[string]string{"14811404_233311497085396_338650092456116224_n.mp4": "instagram s_sohye - BL2_ZIHgYTp.mp4"},
},
{
"https://www.instagram.com/p/BL2_ZIHgYTp/",
map[string]string{"14811404_233311497085396_338650092456116224_n.mp4": "instagram s_sohye - BL2_ZIHgYTp.mp4"},
},
}
var getInstagramUrlsAlbumTests = []urlsTestpair{
{
"https://www.instagram.com/p/BRiCc0VjULk/?taken-by=gfriendofficial",
map[string]string{
"17265460_395888184109957_3500310922180689920_n.jpg": "instagram gfriendofficial - BRiCc0VjULk",
"17265456_267171360360765_8110946520456495104_n.jpg": "instagram gfriendofficial - BRiCc0VjULk",
"17265327_1394797493912862_2677004307588448256_n.jpg": "instagram gfriendofficial - BRiCc0VjULk"},
},
{
"https://www.instagram.com/p/BRhheSPjaQ3/",
map[string]string{
"17125875_306909746390523_8184965703367917568_n.jpg": "instagram gfriendofficial - BRhheSPjaQ3",
"17266053_188727064951899_2485556569865977856_n.jpg": "instagram gfriendofficial - BRhheSPjaQ3"},
},
}
func TestGetInstagramUrls(t *testing.T) {
for _, pair := range getInstagramUrlsPictureTests {
v, err := getInstagramUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
if !reflect.DeepEqual(v, pair.result) {
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
for _, pair := range getInstagramUrlsVideoTests {
v, err := getInstagramUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
for keyResult, valueResult := range pair.result {
for keyExpected, valueExpected := range v {
if strings.Contains(keyResult, keyExpected) || valueResult != valueExpected { // CDN location can vary
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
}
}
for _, pair := range getInstagramUrlsAlbumTests {
v, err := getInstagramUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
for keyResult, valueResult := range pair.result {
for keyExpected, valueExpected := range v {
if strings.Contains(keyResult, keyExpected) || strings.Contains(valueResult, valueExpected) { // CDN location can vary
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
}
}
}
var getImgurSingleUrlsTests = []urlsTestpair{
{
"http://imgur.com/viZictl",
map[string]string{"http://imgur.com/download/viZictl": ""},
},
{
"https://imgur.com/viZictl",
map[string]string{"https://imgur.com/download/viZictl": ""},
},
{
"https://i.imgur.com/viZictl.jpg",
map[string]string{"https://i.imgur.com/download/viZictl.jpg": ""},
},
{
"http://imgur.com/uYwt2VV",
map[string]string{"http://imgur.com/download/uYwt2VV": ""},
},
{
"http://i.imgur.com/uYwt2VV.gifv",
map[string]string{"http://i.imgur.com/download/uYwt2VV": ""},
},
}
func TestGetImgurSingleUrls(t *testing.T) {
for _, pair := range getImgurSingleUrlsTests {
v, err := getImgurSingleUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
if !reflect.DeepEqual(v, pair.result) {
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
}
var getImgurAlbumUrlsTests = []urlsTestpair{
{
"http://imgur.com/a/ALTpi",
map[string]string{
"https://i.imgur.com/FKoguPh.jpg": "",
"https://i.imgur.com/5FNL6Pe.jpg": "",
"https://i.imgur.com/YA0V0g9.jpg": "",
"https://i.imgur.com/Uc2iDhD.jpg": "",
"https://i.imgur.com/J9JRSSJ.jpg": "",
"https://i.imgur.com/Xrx0uyE.jpg": "",
"https://i.imgur.com/3xDSq1O.jpg": "",
},
},
}
func TestGetImgurAlbumUrls(t *testing.T) {
for _, pair := range getImgurAlbumUrlsTests {
v, err := getImgurAlbumUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
for expectedLink, expectedName := range pair.result {
linkFound := false
for gotLink, gotName := range v {
if expectedLink == gotLink && expectedName == gotName {
linkFound = true
}
}
if !linkFound {
t.Errorf("For expected %s %s, got %s", expectedLink, expectedName, v)
}
}
}
}
var getGoogleDriveUrlsTests = []urlsTestpair{
{
"https://drive.google.com/file/d/0B8TnwsJqlFllSUtvUEhoSU40WkE/view",
map[string]string{"https://drive.google.com/uc?export=download&id=0B8TnwsJqlFllSUtvUEhoSU40WkE": ""},
},
}
func TestGetGoogleDriveUrls(t *testing.T) {
for _, pair := range getGoogleDriveUrlsTests {
v, err := getGoogleDriveUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
if !reflect.DeepEqual(v, pair.result) {
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
}
var getTistoryWithCDNUrlsTests = []urlsTestpair{
{
"http://img1.daumcdn.net/thumb/R720x0.q80/?scode=mtistory&fname=http%3A%2F%2Fcfile24.uf.tistory.com%2Fimage%2F2658554B580BDC4C0924CA",
map[string]string{"http://cfile24.uf.tistory.com/original/2658554B580BDC4C0924CA": ""},
},
}
func TestGetTistoryWithCDNUrls(t *testing.T) {
for _, pair := range getTistoryWithCDNUrlsTests {
v, err := getTistoryWithCDNUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
if !reflect.DeepEqual(v, pair.result) {
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
}
var getPossibleTistorySiteUrlsTests = []urlsTestpair{
{
"http://soonduck.tistory.com/482",
map[string]string{
"a": "",
"b": "",
"c": "",
"d": "",
"e": "",
},
},
{
"http://soonduck.tistory.com/m/482",
map[string]string{
"a": "",
"b": "",
"c": "",
"d": "",
"e": "",
},
},
{
"http://slmn.de/123",
map[string]string{},
},
}
func TestGetPossibleTistorySiteUrls(t *testing.T) {
for _, pair := range getPossibleTistorySiteUrlsTests {
v, err := getPossibleTistorySiteUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
if len(pair.result) != len(v) { // only check filenames, urls may vary
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
}
var getFlickrUrlFromPhotoIdTests = []map[string]string{
{
"value": "31065043320",
"result": "https://farm6.staticflickr.com/5521/31065043320_cd03a9a448_b.jpg",
},
}
func TestGetFlickrUrlFromPhotoId(t *testing.T) {
for _, pair := range getFlickrUrlFromPhotoIdTests {
v := getFlickrUrlFromPhotoId(pair["value"])
if v != pair["result"] {
t.Errorf("For %s, expected %s, got %s", pair["value"], pair["result"], v)
}
}
}
var getFlickrPhotoUrlsTests = []urlsTestpair{
{
"https://www.flickr.com/photos/137385017@N08/31065043320/in/album-72157677350305446/",
map[string]string{
"https://farm6.staticflickr.com/5521/31065043320_cd03a9a448_b.jpg": "",
},
},
{
"https://www.flickr.com/photos/137385017@N08/31065043320/in/album-72157677350305446",
map[string]string{
"https://farm6.staticflickr.com/5521/31065043320_cd03a9a448_b.jpg": "",
},
},
{
"https://www.flickr.com/photos/137385017@N08/31065043320/",
map[string]string{
"https://farm6.staticflickr.com/5521/31065043320_cd03a9a448_b.jpg": "",
},
},
{
"https://www.flickr.com/photos/137385017@N08/31065043320",
map[string]string{
"https://farm6.staticflickr.com/5521/31065043320_cd03a9a448_b.jpg": "",
},
},
}
func TestGetFlickrPhotoUrls(t *testing.T) {
for _, pair := range getFlickrPhotoUrlsTests {
v, err := getFlickrPhotoUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
if !reflect.DeepEqual(v, pair.result) {
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
}
var getFlickrAlbumUrlsTests = []urlsTestpair{
{
"https://www.flickr.com/photos/137385017@N08/albums/72157677350305446/",
map[string]string{
"https://farm6.staticflickr.com/5521/31065043320_cd03a9a448_b.jpg": "",
"https://farm6.staticflickr.com/5651/31434767515_49f88ee12e_b.jpg": "",
"https://farm6.staticflickr.com/5750/31434766825_529fd08071_b.jpg": "",
"https://farm6.staticflickr.com/5811/31319456971_37c8c4708a_b.jpg": "",
"https://farm6.staticflickr.com/5494/30627074913_b7f810fc26_b.jpg": "",
"https://farm6.staticflickr.com/5539/31065042720_d76f643b28_b.jpg": "",
"https://farm6.staticflickr.com/5813/31434765285_94b85d5e8c_b.jpg": "",
"https://farm6.staticflickr.com/5600/31065044090_eca63bd5a5_b.jpg": "",
"https://farm6.staticflickr.com/5733/31434764435_350825477e_b.jpg": "",
"https://farm6.staticflickr.com/5715/30627073573_b86e4b2c22_b.jpg": "",
"https://farm6.staticflickr.com/5758/31289864222_5e3cca7e72_b.jpg": "",
"https://farm6.staticflickr.com/5801/30627076673_5a32f3e562_b.jpg": "",
"https://farm6.staticflickr.com/5538/31319458901_088858d7f1_b.jpg": "",
},
},
{
"https://www.flickr.com/photos/137385017@N08/albums/72157677350305446",
map[string]string{
"https://farm6.staticflickr.com/5521/31065043320_cd03a9a448_b.jpg": "",
"https://farm6.staticflickr.com/5651/31434767515_49f88ee12e_b.jpg": "",
"https://farm6.staticflickr.com/5750/31434766825_529fd08071_b.jpg": "",
"https://farm6.staticflickr.com/5811/31319456971_37c8c4708a_b.jpg": "",
"https://farm6.staticflickr.com/5494/30627074913_b7f810fc26_b.jpg": "",
"https://farm6.staticflickr.com/5539/31065042720_d76f643b28_b.jpg": "",
"https://farm6.staticflickr.com/5813/31434765285_94b85d5e8c_b.jpg": "",
"https://farm6.staticflickr.com/5600/31065044090_eca63bd5a5_b.jpg": "",
"https://farm6.staticflickr.com/5733/31434764435_350825477e_b.jpg": "",
"https://farm6.staticflickr.com/5715/30627073573_b86e4b2c22_b.jpg": "",
"https://farm6.staticflickr.com/5758/31289864222_5e3cca7e72_b.jpg": "",
"https://farm6.staticflickr.com/5801/30627076673_5a32f3e562_b.jpg": "",
"https://farm6.staticflickr.com/5538/31319458901_088858d7f1_b.jpg": "",
},
},
{
"https://www.flickr.com/photos/137385017@N08/albums/with/72157677350305446/",
map[string]string{
"https://farm6.staticflickr.com/5521/31065043320_cd03a9a448_b.jpg": "",
"https://farm6.staticflickr.com/5651/31434767515_49f88ee12e_b.jpg": "",
"https://farm6.staticflickr.com/5750/31434766825_529fd08071_b.jpg": "",
"https://farm6.staticflickr.com/5811/31319456971_37c8c4708a_b.jpg": "",
"https://farm6.staticflickr.com/5494/30627074913_b7f810fc26_b.jpg": "",
"https://farm6.staticflickr.com/5539/31065042720_d76f643b28_b.jpg": "",
"https://farm6.staticflickr.com/5813/31434765285_94b85d5e8c_b.jpg": "",
"https://farm6.staticflickr.com/5600/31065044090_eca63bd5a5_b.jpg": "",
"https://farm6.staticflickr.com/5733/31434764435_350825477e_b.jpg": "",
"https://farm6.staticflickr.com/5715/30627073573_b86e4b2c22_b.jpg": "",
"https://farm6.staticflickr.com/5758/31289864222_5e3cca7e72_b.jpg": "",
"https://farm6.staticflickr.com/5801/30627076673_5a32f3e562_b.jpg": "",
"https://farm6.staticflickr.com/5538/31319458901_088858d7f1_b.jpg": "",
},
},
{
"https://www.flickr.com/photos/137385017@N08/albums/with/72157677350305446",
map[string]string{
"https://farm6.staticflickr.com/5521/31065043320_cd03a9a448_b.jpg": "",
"https://farm6.staticflickr.com/5651/31434767515_49f88ee12e_b.jpg": "",
"https://farm6.staticflickr.com/5750/31434766825_529fd08071_b.jpg": "",
"https://farm6.staticflickr.com/5811/31319456971_37c8c4708a_b.jpg": "",
"https://farm6.staticflickr.com/5494/30627074913_b7f810fc26_b.jpg": "",
"https://farm6.staticflickr.com/5539/31065042720_d76f643b28_b.jpg": "",
"https://farm6.staticflickr.com/5813/31434765285_94b85d5e8c_b.jpg": "",
"https://farm6.staticflickr.com/5600/31065044090_eca63bd5a5_b.jpg": "",
"https://farm6.staticflickr.com/5733/31434764435_350825477e_b.jpg": "",
"https://farm6.staticflickr.com/5715/30627073573_b86e4b2c22_b.jpg": "",
"https://farm6.staticflickr.com/5758/31289864222_5e3cca7e72_b.jpg": "",
"https://farm6.staticflickr.com/5801/30627076673_5a32f3e562_b.jpg": "",
"https://farm6.staticflickr.com/5538/31319458901_088858d7f1_b.jpg": "",
},
},
}
func TestGetFlickrAlbumUrls(t *testing.T) {
for _, pair := range getFlickrAlbumUrlsTests {
v, err := getFlickrAlbumUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
if !reflect.DeepEqual(v, pair.result) {
t.Errorf("For %s, expected %s, got %s", pair.value, pair.result, v)
}
}
}
var getStreamableUrlsTests = []urlsTestpair{
{
"http://streamable.com/41ajc",
map[string]string{
"streamablevideo.com/video/mp4/41ajc.mp4": "",
},
},
}
func TestGetStreamableUrls(t *testing.T) {
for _, pair := range getStreamableUrlsTests {
v, err := getStreamableUrls(pair.value)
if err != nil {
t.Errorf("For %v, expected %v, got %v", pair.value, nil, err)
}
for expectedLink, expectedName := range pair.result {
linkFound := false
for gotLink, _ := range v {
if strings.Contains(gotLink, expectedLink) {
linkFound = true
}
}
if !linkFound {
t.Errorf("For expected %s %s, got %s", expectedLink, expectedName, v)
}
}
}
}
|
package main
import (
"flag"
"fmt"
"os"
"path/filepath"
"time"
"github.com/igneous-systems/logit"
"github.com/igneous-systems/pickett"
"github.com/igneous-systems/pickett/io"
)
func contains(s []string, target string) bool {
for _, candidate := range s {
if candidate == target {
return true
}
}
return false
}
func makeIOObjects(path string) (io.Helper, io.DockerCli, io.EtcdClient, io.VirtualBox, error) {
helper, err := io.NewHelper(path)
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("can't read %s: %v", path, err)
}
cli, err := io.NewDockerCli()
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("failed to connect to docker server, maybe its not running? %v", err)
}
etcd, err := io.NewEtcdClient()
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("failed to connect to etcd, maybe its not running? %v", err)
}
vbox, err := io.NewVirtualBox()
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("failed to run vboxmanage: %v", err)
}
return helper, cli, etcd, vbox, nil
}
// trueMain is the entry point of the program with the targets filled in
// and a working helper.
func trueMain(targets []string, helper io.Helper, cli io.DockerCli, etcd io.EtcdClient, vbox io.VirtualBox) error {
reader := helper.ConfigReader()
config, err := pickett.NewConfig(reader, helper, cli, etcd, vbox)
if err != nil {
return fmt.Errorf("can't understand config file %s: %v", helper.ConfigFile(), err)
}
buildables, runnables := config.EntryPoints()
run := false
runTarget := ""
// if you don't tell us what to build, we build everything with no outgoing
// edges, the "root" of a backchain
if len(targets) == 0 {
targets = buildables
} else {
//if you do tell us, we need know if it's runnable
for _, t := range targets {
if contains(buildables, t) {
continue
}
if contains(runnables, t) {
if run {
return fmt.Errorf("can only run one target (%s and %s both runnable)", runTarget, t)
}
run = true
runTarget = t
continue
}
return fmt.Errorf("don't know anything about target %s", t)
}
}
for _, target := range targets {
if target == runTarget {
continue
}
err := config.Build(target)
if err != nil {
return fmt.Errorf("an error occurred while building target '%v': %v", target, err)
}
}
if runTarget != "" {
err = config.Execute(runTarget)
if err != nil {
return fmt.Errorf("an error occurred while running target '%v': %v", runTarget, err)
}
}
return nil
}
var flog = logit.NewNestedLoggerFromCaller(logit.Global)
func main() {
var debug bool
var configFile string
flag.BoolVar(&debug, "debug", false, "turns on verbose logging for pickett developers")
flag.StringVar(&configFile, "config", "Pickett.json", "use a custom pickett configuration file")
flag.Parse()
args := flag.Args()
if len(args) == 0 {
usage()
}
var logFilterLvl logit.Level
if debug {
logFilterLvl = logit.DEBUG
} else {
logFilterLvl = logit.INFO
}
logit.Global.ModifyFilterLvl("stdout", logFilterLvl, nil, nil)
defer logit.Flush(time.Millisecond * 300)
wd, err := os.Getwd()
if err != nil {
panic("can't get working directory!")
}
_, err = os.Open(filepath.Join(wd, configFile))
if err != nil {
fmt.Fprintf(os.Stderr, "can't find configuration file: %s\n", filepath.Join(wd, configFile))
os.Exit(1)
}
helper, docker, etcd, vbox, err := makeIOObjects(filepath.Join(wd, configFile))
if err != nil {
flog.Errorf("failed to make IO objects: %v", err)
}
reader := helper.ConfigReader()
config, err := pickett.NewConfig(reader, helper, docker, etcd, vbox)
if err != nil {
fmt.Fprintf(os.Stderr, "Can't understand config file %s: %v", err.Error(), helper.ConfigFile())
os.Exit(1)
}
switch args[0] {
case "run":
err = pickett.CmdRun(args[1:], config)
case "build":
err = pickett.CmdBuild(args[1:], config)
case "status":
err = pickett.CmdStatus(args[1:], config)
case "stop":
err = pickett.CmdStop(args[1:], config)
case "drop":
err = pickett.CmdDrop(args[1:], config)
case "wipe":
err = pickett.CmdWipe(args[1:], config)
default:
usage()
os.Exit(1)
}
if err != nil {
// Make sure we get flog a chance to flush before exit
logit.Flush(time.Millisecond * 300)
flog.Errorf("%s: %v", args[0], err)
os.Exit(1)
} else {
os.Exit(0)
}
}
func usage() {
// There doesn't seem to be a better way to mix flags usage with arguments usage ?
error := fmt.Errorf(`Usage of pickett, expected an action as the first argument, one of:
- run [topology.node] Runs a specific node in a topology, including all depedencies.
- status [tags or topology.node] Shows the status of all the known buildable tags and/or runnable nodes.
- build [tags] Build all tags or specified tags.
- stop [topology.node] Stop all or a specific node.
- drop [topology.node] Stop and delete all or a specific node.
- wipe [tags] Delete all or specified tags (forces rebuild next time)`)
fmt.Print(error)
os.Exit(1)
}
Log before flush
package main
import (
"flag"
"fmt"
"os"
"path/filepath"
"time"
"github.com/igneous-systems/logit"
"github.com/igneous-systems/pickett"
"github.com/igneous-systems/pickett/io"
)
func contains(s []string, target string) bool {
for _, candidate := range s {
if candidate == target {
return true
}
}
return false
}
func makeIOObjects(path string) (io.Helper, io.DockerCli, io.EtcdClient, io.VirtualBox, error) {
helper, err := io.NewHelper(path)
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("can't read %s: %v", path, err)
}
cli, err := io.NewDockerCli()
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("failed to connect to docker server, maybe its not running? %v", err)
}
etcd, err := io.NewEtcdClient()
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("failed to connect to etcd, maybe its not running? %v", err)
}
vbox, err := io.NewVirtualBox()
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("failed to run vboxmanage: %v", err)
}
return helper, cli, etcd, vbox, nil
}
// trueMain is the entry point of the program with the targets filled in
// and a working helper.
func trueMain(targets []string, helper io.Helper, cli io.DockerCli, etcd io.EtcdClient, vbox io.VirtualBox) error {
reader := helper.ConfigReader()
config, err := pickett.NewConfig(reader, helper, cli, etcd, vbox)
if err != nil {
return fmt.Errorf("can't understand config file %s: %v", helper.ConfigFile(), err)
}
buildables, runnables := config.EntryPoints()
run := false
runTarget := ""
// if you don't tell us what to build, we build everything with no outgoing
// edges, the "root" of a backchain
if len(targets) == 0 {
targets = buildables
} else {
//if you do tell us, we need know if it's runnable
for _, t := range targets {
if contains(buildables, t) {
continue
}
if contains(runnables, t) {
if run {
return fmt.Errorf("can only run one target (%s and %s both runnable)", runTarget, t)
}
run = true
runTarget = t
continue
}
return fmt.Errorf("don't know anything about target %s", t)
}
}
for _, target := range targets {
if target == runTarget {
continue
}
err := config.Build(target)
if err != nil {
return fmt.Errorf("an error occurred while building target '%v': %v", target, err)
}
}
if runTarget != "" {
err = config.Execute(runTarget)
if err != nil {
return fmt.Errorf("an error occurred while running target '%v': %v", runTarget, err)
}
}
return nil
}
var flog = logit.NewNestedLoggerFromCaller(logit.Global)
func main() {
var debug bool
var configFile string
flag.BoolVar(&debug, "debug", false, "turns on verbose logging for pickett developers")
flag.StringVar(&configFile, "config", "Pickett.json", "use a custom pickett configuration file")
flag.Parse()
args := flag.Args()
if len(args) == 0 {
usage()
}
var logFilterLvl logit.Level
if debug {
logFilterLvl = logit.DEBUG
} else {
logFilterLvl = logit.INFO
}
logit.Global.ModifyFilterLvl("stdout", logFilterLvl, nil, nil)
defer logit.Flush(time.Millisecond * 300)
wd, err := os.Getwd()
if err != nil {
panic("can't get working directory!")
}
_, err = os.Open(filepath.Join(wd, configFile))
if err != nil {
fmt.Fprintf(os.Stderr, "can't find configuration file: %s\n", filepath.Join(wd, configFile))
os.Exit(1)
}
helper, docker, etcd, vbox, err := makeIOObjects(filepath.Join(wd, configFile))
if err != nil {
flog.Errorf("failed to make IO objects: %v", err)
}
reader := helper.ConfigReader()
config, err := pickett.NewConfig(reader, helper, docker, etcd, vbox)
if err != nil {
fmt.Fprintf(os.Stderr, "Can't understand config file %s: %v", err.Error(), helper.ConfigFile())
os.Exit(1)
}
switch args[0] {
case "run":
err = pickett.CmdRun(args[1:], config)
case "build":
err = pickett.CmdBuild(args[1:], config)
case "status":
err = pickett.CmdStatus(args[1:], config)
case "stop":
err = pickett.CmdStop(args[1:], config)
case "drop":
err = pickett.CmdDrop(args[1:], config)
case "wipe":
err = pickett.CmdWipe(args[1:], config)
default:
usage()
os.Exit(1)
}
if err != nil {
// Make sure we get flog a chance to flush before exit
flog.Errorf("%s: %v", args[0], err)
logit.Flush(time.Millisecond * 300)
os.Exit(1)
} else {
os.Exit(0)
}
}
func usage() {
// There doesn't seem to be a better way to mix flags usage with arguments usage ?
error := fmt.Errorf(`Usage of pickett, expected an action as the first argument, one of:
- run [topology.node] Runs a specific node in a topology, including all depedencies.
- status [tags or topology.node] Shows the status of all the known buildable tags and/or runnable nodes.
- build [tags] Build all tags or specified tags.
- stop [topology.node] Stop all or a specific node.
- drop [topology.node] Stop and delete all or a specific node.
- wipe [tags] Delete all or specified tags (forces rebuild next time)`)
fmt.Print(error)
os.Exit(1)
}
|
package lock
import (
"fmt"
"github.com/jfrog/jfrog-client-go/utils/io/fileutils"
"math"
"os"
"testing"
"time"
)
/*
The lock mechanism prefers earlier lock requests. If two locks requests have same time stamps, it'll take the one with the smaller PID first.
Here we test the functionality of a real process with a real PID and a dummy process with MaxInt pid.
*/
func TestLockSmallerPid(t *testing.T) {
// First creating the first lock object with special pid number that doesn't exists.
firstLock, _ := getLock(math.MaxInt32, t)
// Creating a second lock object with the running PID
secondLock, folderName := getLock(os.Getpid(), t)
// Confirming that only two locks are located in the lock directory
files, err := fileutils.ListFiles(folderName, false)
if err != nil {
t.Error(err)
}
if len(files) != 2 {
t.Error("Expected 2 files but got ", len(files))
}
// Performing lock. This should work since the first lock PID is not running. The Lock() will remove it.
err = secondLock.Lock()
if err != nil {
t.Error(err)
}
// Unlocking to remove the lock file.
err = secondLock.Unlock()
if err != nil {
t.Error(err)
}
// If timestamp equals, secondLock.Lock() is not expected to delete first lock's file, since os.Getpid() < math.MaxInt32.
if firstLock.currentTime == secondLock.currentTime {
err = firstLock.Unlock()
if err != nil {
t.Error(err)
}
}
// Confirming that no locks are located in the lock directory
files, err = fileutils.ListFiles(folderName, false)
if err != nil {
t.Error(err)
}
if len(files) != 0 {
t.Error("Expected 0 files but got", len(files), files)
}
}
/*
The lock mechanism prefers earlier lock requests. If two locks requests have same time stamps, it'll take the one with the smaller PID first.
Here we test the functionality of a real process with a real PID and a dummy process with -1 pid.
*/
func TestLockBiggerPid(t *testing.T) {
// First creating the first lock object with special pid number that doesn't exists.
getLock(-1, t)
// Creating a second lock object with the running PID
secondLock, folderName := getLock(os.Getpid(), t)
// Confirming that only two locks are located in the lock directory
files, err := fileutils.ListFiles(folderName, false)
if err != nil {
t.Error(err)
}
if len(files) != 2 {
t.Error("Expected 2 files but got ", len(files), files)
}
// Performing lock. This should work since the first lock PID is not running. The Lock() will remove it.
err = secondLock.Lock()
if err != nil {
t.Error(err)
}
// Unlocking to remove the lock file.
err = secondLock.Unlock()
if err != nil {
t.Error(err)
}
// Confirming that no locks are located in the lock directory
files, err = fileutils.ListFiles(folderName, false)
if err != nil {
t.Error(err)
}
if len(files) != 0 {
t.Error("Expected 0 files but got", len(files), files)
}
}
func TestUnlock(t *testing.T) {
lock := new(Lock)
err := lock.CreateNewLockFile()
if err != nil {
t.Error(err)
}
exists, err := fileutils.IsFileExists(lock.fileName, false)
if err != nil {
t.Error(err)
}
if !exists {
t.Errorf("File %s is missing", lock.fileName)
}
lock.Unlock()
exists, err = fileutils.IsFileExists(lock.fileName, false)
if err != nil {
t.Error(err)
}
if exists {
t.Errorf("File %s exists, but it should have been removed by Unlock", lock.fileName)
}
}
func TestCreateFile(t *testing.T) {
pid := os.Getpid()
lock, folderName := getLock(pid, t)
exists, err := fileutils.IsFileExists(lock.fileName, false)
if err != nil {
t.Error(err)
t.FailNow()
}
if !exists {
t.Error("Lock wan't created.")
t.FailNow()
}
files, err := fileutils.ListFiles(folderName, false)
if err != nil {
t.Error(err)
t.FailNow()
}
if len(files) != 1 {
t.Error(fmt.Errorf("Expected one file, got %d.", len(files)))
t.FailNow()
}
if files[0] != lock.fileName {
t.Error(fmt.Errorf("Expected filename %s, got %s", lock.fileName, files[0]))
}
// Removing the created lock file
err = lock.Unlock()
if err != nil {
t.Error(err)
}
}
func getLock(pid int, t *testing.T) (Lock, string) {
currentTime := time.Now().UnixNano()
lock := Lock{
pid: pid,
currentTime: currentTime,
}
folderName, err := CreateLockDir()
if err != nil {
t.Error(err)
t.FailNow()
}
err = lock.CreateFile(folderName, pid)
if err != nil {
t.Error(err)
t.FailNow()
}
return lock, folderName
}
Log API changes.
package lock
import (
"fmt"
"github.com/jfrog/jfrog-cli-go/utils/log"
"github.com/jfrog/jfrog-client-go/utils/io/fileutils"
"math"
"os"
"testing"
"time"
)
/*
The lock mechanism prefers earlier lock requests. If two locks requests have same time stamps, it'll take the one with the smaller PID first.
Here we test the functionality of a real process with a real PID and a dummy process with MaxInt pid.
*/
func TestLockSmallerPid(t *testing.T) {
log.SetDefaultLogger()
// First creating the first lock object with special pid number that doesn't exists.
firstLock, _ := getLock(math.MaxInt32, t)
// Creating a second lock object with the running PID
secondLock, folderName := getLock(os.Getpid(), t)
// Confirming that only two locks are located in the lock directory
files, err := fileutils.ListFiles(folderName, false)
if err != nil {
t.Error(err)
}
if len(files) != 2 {
t.Error("Expected 2 files but got ", len(files))
}
// Performing lock. This should work since the first lock PID is not running. The Lock() will remove it.
err = secondLock.Lock()
if err != nil {
t.Error(err)
}
// Unlocking to remove the lock file.
err = secondLock.Unlock()
if err != nil {
t.Error(err)
}
// If timestamp equals, secondLock.Lock() is not expected to delete first lock's file, since os.Getpid() < math.MaxInt32.
if firstLock.currentTime == secondLock.currentTime {
err = firstLock.Unlock()
if err != nil {
t.Error(err)
}
}
// Confirming that no locks are located in the lock directory
files, err = fileutils.ListFiles(folderName, false)
if err != nil {
t.Error(err)
}
if len(files) != 0 {
t.Error("Expected 0 files but got", len(files), files)
}
}
/*
The lock mechanism prefers earlier lock requests. If two locks requests have same time stamps, it'll take the one with the smaller PID first.
Here we test the functionality of a real process with a real PID and a dummy process with -1 pid.
*/
func TestLockBiggerPid(t *testing.T) {
log.SetDefaultLogger()
// First creating the first lock object with special pid number that doesn't exists.
getLock(-1, t)
// Creating a second lock object with the running PID
secondLock, folderName := getLock(os.Getpid(), t)
// Confirming that only two locks are located in the lock directory
files, err := fileutils.ListFiles(folderName, false)
if err != nil {
t.Error(err)
}
if len(files) != 2 {
t.Error("Expected 2 files but got ", len(files), files)
}
// Performing lock. This should work since the first lock PID is not running. The Lock() will remove it.
err = secondLock.Lock()
if err != nil {
t.Error(err)
}
// Unlocking to remove the lock file.
err = secondLock.Unlock()
if err != nil {
t.Error(err)
}
// Confirming that no locks are located in the lock directory
files, err = fileutils.ListFiles(folderName, false)
if err != nil {
t.Error(err)
}
if len(files) != 0 {
t.Error("Expected 0 files but got", len(files), files)
}
}
func TestUnlock(t *testing.T) {
log.SetDefaultLogger()
lock := new(Lock)
err := lock.CreateNewLockFile()
if err != nil {
t.Error(err)
}
exists, err := fileutils.IsFileExists(lock.fileName, false)
if err != nil {
t.Error(err)
}
if !exists {
t.Errorf("File %s is missing", lock.fileName)
}
lock.Unlock()
exists, err = fileutils.IsFileExists(lock.fileName, false)
if err != nil {
t.Error(err)
}
if exists {
t.Errorf("File %s exists, but it should have been removed by Unlock", lock.fileName)
}
}
func TestCreateFile(t *testing.T) {
log.SetDefaultLogger()
pid := os.Getpid()
lock, folderName := getLock(pid, t)
exists, err := fileutils.IsFileExists(lock.fileName, false)
if err != nil {
t.Error(err)
t.FailNow()
}
if !exists {
t.Error("Lock wan't created.")
t.FailNow()
}
files, err := fileutils.ListFiles(folderName, false)
if err != nil {
t.Error(err)
t.FailNow()
}
if len(files) != 1 {
t.Error(fmt.Errorf("Expected one file, got %d.", len(files)))
t.FailNow()
}
if files[0] != lock.fileName {
t.Error(fmt.Errorf("Expected filename %s, got %s", lock.fileName, files[0]))
}
// Removing the created lock file
err = lock.Unlock()
if err != nil {
t.Error(err)
}
}
func getLock(pid int, t *testing.T) (Lock, string) {
currentTime := time.Now().UnixNano()
lock := Lock{
pid: pid,
currentTime: currentTime,
}
folderName, err := CreateLockDir()
if err != nil {
t.Error(err)
t.FailNow()
}
err = lock.CreateFile(folderName, pid)
if err != nil {
t.Error(err)
t.FailNow()
}
return lock, folderName
}
|
package make
import (
"bufio"
"database/sql"
"fmt"
log "github.com/Sirupsen/logrus"
"github.com/fubarhouse/golang-drush/command"
_ "github.com/go-sql-driver/mysql" // mysql is assumed under this system (for now).
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
)
// ReplaceTextInFile is a utility function to replace all instances of a string in a file.
func ReplaceTextInFile(fullPath string, oldString string, newString string) {
read, err := ioutil.ReadFile(fullPath)
if err != nil {
log.Panicln(err)
}
newContents := strings.Replace(string(read), oldString, newString, -1)
err = ioutil.WriteFile(fullPath, []byte(newContents), 0)
if err != nil {
log.Panicln(err)
}
}
// RestartWebServer is a function to run a command to restart the given web service.
func (Site *Site) RestartWebServer() {
_, stdErr := exec.Command("sudo", "service", Site.Webserver, "restart").Output()
if stdErr != nil {
log.Errorf("Could not restart webserver %v. %v\n", Site.Webserver, stdErr)
} else {
log.Infof("Restarted webserver %v.\n", Site.Webserver)
}
}
// StartWebServer is a function to run a command to start the given web service.
func (Site *Site) StartWebServer() {
_, stdErr := exec.Command("sudo", "service", Site.Webserver, "start").Output()
if stdErr != nil {
log.Errorf("Could not start webserver %v. %v\n", Site.Webserver, stdErr)
} else {
log.Infof("Started webserver %v.\n", Site.Webserver)
}
}
// StopWebServer is a function to run a command to stop the given web service.
func (Site *Site) StopWebServer() {
_, stdErr := exec.Command("sudo", "service", Site.Webserver, "stop").Output()
if stdErr != nil {
log.Errorf("Could not stop webserver %v. %v\n", Site.Webserver, stdErr)
} else {
log.Infof("Stopped webserver %v.\n", Site.Webserver)
}
}
// DrupalProject struct which represents a Drupal project on drupal.org
type DrupalProject struct {
Type string
Name string
Subdir string
Status bool
}
// Site struct which represents a build website being used.
type Site struct {
Timestamp string
Path string
Make string
Name string
Alias string
Domain string
database *makeDB
Webserver string
Vhostpath string
Template string
MakeFileRewriteSource string
MakeFileRewriteDestination string
FilePathPrivate string
FilePathPublic string
FilePathTemp string
WorkingCopy bool
}
// NewSite instantiates an instance of the struct Site
func NewSite(make, name, path, alias, webserver, domain, vhostpath, template string) *Site {
Site := &Site{}
Site.TimeStampReset()
Site.Make = make
Site.Name = name
Site.Path = path
Site.Webserver = webserver
Site.Alias = alias
Site.Domain = domain
Site.Vhostpath = vhostpath
Site.Template = template
Site.FilePathPrivate = "files/private"
Site.FilePathPublic = "" // For later implementation
Site.FilePathTemp = "files/private/temp"
Site.MakeFileRewriteSource = ""
Site.MakeFileRewriteDestination = ""
Site.WorkingCopy = false
return Site
}
// ActionBuild is a superseded build action, requires action, documentation or removal.
func (Site *Site) ActionBuild() {
// TODO: Define purpose with the existence of ProcessMake()
if Site.AliasExists(Site.Name) == true {
Site.Path = fmt.Sprintf("%v%v", Site.Path, Site.TimeStampGet())
//Site.ProcessMakes([]string{"core.make", "libraries.make", "contrib.make", "custom.make"})
Site.ActionInstall()
}
}
// ActionInstall runs drush site-install on a Site struct
func (Site *Site) ActionInstall() {
// Obtain a string value from the Port value in db config.
stringPort := fmt.Sprintf("%v", Site.database.getPort())
// Open a mysql connection
db, dbErr := sql.Open("mysql", Site.database.getUser()+":"+Site.database.getPass()+"@tcp("+Site.database.dbHost+":"+stringPort+")/")
// Defer the connection
defer db.Close()
// Report any connection errors
if dbErr != nil {
log.Warnf("WARN:", dbErr)
}
// Create database
dbName := strings.Replace(Site.Name+Site.Timestamp, ".", "_", -1)
_, dbErr = db.Exec("CREATE DATABASE IF NOT EXISTS " + dbName)
if dbErr != nil {
panic(dbErr)
}
// Drush site-install
thisCmd := fmt.Sprintf("-y site-install standard --sites-subdir=%v --db-url=mysql://%v:%v@%v:%v/%v install_configure_form.update_status_module='array(FALSE,FALSE)'", Site.Name, Site.database.getUser(), Site.database.getPass(), Site.database.getHost(), Site.database.getPort(), dbName)
_, installErr := exec.Command("sh", "-c", "cd "+Site.Path+"/"+Site.Name+Site.Timestamp+" && drush "+thisCmd).Output()
if installErr != nil {
log.Warnln("Unable to install Drupal.")
log.Debugln("drush", thisCmd)
} else {
log.Infof("Installed Drupal.")
}
}
// ActionKill will delete a single site instance.
func (Site *Site) ActionKill() {
// What to do with the default...
if Site.AliasExists(Site.Name) == true {
Site.Path = fmt.Sprintf("%v", Site.Path)
_, err := os.Stat(Site.Path)
if err == nil {
os.Remove(Site.Path)
}
}
}
// ActionRebuild rebuild site structs, needs action, documentation or purging.
func (Site *Site) ActionRebuild() {
// TODO: Define purpose with the existence of ProcessMake()
if Site.AliasExists(Site.Name) == true {
Site.TimeStampReset()
Site.Path = fmt.Sprintf("%v%v", Site.Path, Site.TimeStampGet())
//Site.ProcessMake()
//Site.ActionInstall()
}
}
// ActionRebuildProject purges a specific project from a specified path, and re-download it
// Re-downloading will use drush dl, or git clone depending on availability.
func (Site *Site) ActionRebuildProject(Makefiles []string, Project string, GitPath, Branch string, RemoveGit bool) {
log.Infoln("Searching for module/theme...")
moduleFound := false
var moduleType string
var moduleCat string
err := new(error)
_ = filepath.Walk(Site.Path, func(path string, _ os.FileInfo, _ error) error {
realpath := strings.Split(string(path), "\n")
for _, name := range realpath {
if strings.Contains(name, "/contrib/"+Project+"/") || strings.Contains(name, "/custom/"+Project+"/") {
if strings.Contains(name, "/contrib/"+Project+"/") {
moduleType = "contrib"
} else {
moduleType = "custom"
}
if strings.Contains(name, "/modules/"+moduleType+"/"+Project+"/") {
moduleCat = "modules"
} else if strings.Contains(name, "/themes/"+moduleType+"/"+Project+"/") {
moduleCat = "themes"
}
moduleFound = true
}
}
return nil
})
if moduleFound {
log.Infoln("Found module at", Site.Path+"/sites/all/"+moduleCat+"/"+moduleType+"/"+Project+"/")
}
if moduleType != "" && moduleCat != "" {
ProjectDir := Site.Path + "/sites/all/" + moduleCat + "/" + moduleType + "/" + Project + "/"
_, errMod := os.Stat(ProjectDir)
if errMod == nil {
*err = os.RemoveAll(ProjectDir)
if *err == nil {
log.Infoln("Removed", ProjectDir)
} else {
log.Warn("Could not remove ", ProjectDir)
}
}
}
if moduleFound == false {
log.Infof("Could not find project %v in %v", Project, Site.Path)
}
if moduleCat == "" || moduleType == "" {
// By this point, we should fall back to the input make file.
for _, val := range Makefiles {
unprocessedMakes, unprocessedMakeErr := ioutil.ReadFile(val)
if unprocessedMakeErr != nil {
log.Warnf("Could not read from %v: %v", val, unprocessedMakeErr)
}
Projects := strings.Split(string(unprocessedMakes), "\n")
for _, ThisProject := range Projects {
if strings.Contains(ThisProject, "projects["+Project+"][subdir] = ") {
moduleType = strings.Replace(ThisProject, "projects["+Project+"][subdir] = ", "", -1)
moduleType = strings.Replace(moduleType, "\"", "", -1)
moduleType = strings.Replace(moduleType, " ", "", -1)
}
if strings.Contains(ThisProject, "projects["+Project+"][type] = ") {
moduleCat = strings.Replace(ThisProject, "projects["+Project+"][type] = ", "", -1)
moduleCat = strings.Replace(moduleCat, "\"", "", -1)
moduleCat = strings.Replace(moduleCat, " ", "", -1)
}
}
}
if moduleCat == "" {
log.Warnln("Project category could not be detected.")
} else {
log.Infoln("Project category was found to be", moduleCat)
}
if moduleType == "" {
log.Warnln("Project type could not be detected.")
} else {
log.Infoln("Project type was found to be", moduleCat)
}
}
path := Site.Path + "/" + "/sites/all/" + moduleCat + "/" + moduleType + "/"
if moduleType == "contrib" {
command.DrushDownloadToPath(path, Project)
} else {
gitCmd := exec.Command("git", "clone", "-b", Branch, GitPath, path+"/"+Project)
_, *err = gitCmd.Output()
if *err == nil {
log.Infof("Downloaded package %v from %v to %v", Project, GitPath, path+"/"+Project)
if RemoveGit {
*err = os.RemoveAll(path + "/" + Project + "/.git")
if *err == nil {
log.Infoln("Removed .git folder from file system.")
} else {
log.Warnln("Unable to remove .git folder from file system.")
}
}
} else {
log.Errorf("Could not clone %v from %v: %v\n", Project, GitPath, *err)
}
}
}
// ActionRebuildCodebase re-runs drush make on a specified path.
func (Site *Site) ActionRebuildCodebase(Makefiles []string) {
// This function exists for the sole purpose of
// rebuilding a specific Drupal codebase in a specific
// directory for Release management type work.
var newMakeFilePath string
if Site.Timestamp == "." {
Site.Timestamp = ""
newMakeFilePath = "/tmp/drupal-" + Site.Name + Site.TimeStampGenerate() + ".make"
} else {
newMakeFilePath = "/tmp/drupal-" + Site.Name + Site.TimeStampGet() + ".make"
}
file, crErr := os.Create(newMakeFilePath)
if crErr == nil {
log.Infoln("Generated temporary make file...")
} else {
log.Errorln("Error creating "+newMakeFilePath+":", crErr)
}
writer := bufio.NewWriter(file)
defer file.Close()
fmt.Fprintln(writer, "core = 7.x")
fmt.Fprintln(writer, "api = 2")
for _, Makefile := range Makefiles {
cmdOut, _ := exec.Command("cat", Makefile).Output()
output := strings.Split(string(cmdOut), "\n")
for _, line := range output {
if strings.HasPrefix(line, "core") == false && strings.HasPrefix(line, "api") == false {
if strings.HasPrefix(line, "projects") == true || strings.HasPrefix(line, "libraries") == true {
fmt.Fprintln(writer, line)
}
}
}
}
writer.Flush()
chmodErr := os.Chmod(Site.Path, 0777)
if chmodErr != nil {
log.Warnln("Could not change permissions on codebase directory")
} else {
log.Infoln("Changed docroot permissions to 0777 for file removal.")
}
_ = filepath.Walk(Site.Path, func(path string, Info os.FileInfo, _ error) error {
realpath := strings.Split(Site.Path, "\n")
err := new(error)
for _, name := range realpath {
fmt.Sprintln(name)
if !strings.Contains(path, "/sites") || strings.Contains(path, "/sites/all") {
if Info.IsDir() && !strings.HasSuffix(path, Site.Path) {
os.Chmod(path, 0777)
delErr := os.RemoveAll(path)
if delErr != nil {
log.Warnln("Could not remove", path)
}
} else if !Info.IsDir() {
delErr := os.Remove(path)
if delErr != nil {
log.Warnln("Could not remove", path)
}
}
}
}
return *err
})
Site.ProcessMake(newMakeFilePath)
err := os.Remove(newMakeFilePath)
if err != nil {
log.Warnln("Could not remove temporary make file", newMakeFilePath)
} else {
log.Infoln("Removed temporary make file", newMakeFilePath)
}
}
// ActionDatabaseDumpLocal run drush sql-dump to a specified path on a site struct.
func (Site *Site) ActionDatabaseDumpLocal(path string) {
srcAlias := strings.Replace(Site.Alias, "@", "", -1)
x := command.NewDrushCommand()
x.Set(srcAlias, fmt.Sprintf("sql-dump %v", path), true)
_, err := x.Output()
if err == nil {
log.Println("Dump complete. Dump can be found at", path)
} else {
log.Println("Could not dump database.", err)
}
}
// ActionDatabaseDumpRemote run drush sql-dump to a specified path on a site alias.
func (Site *Site) ActionDatabaseDumpRemote(alias, path string) {
srcAlias := strings.Replace(alias, "@", "", -1)
x := command.NewDrushCommand()
x.Set(srcAlias, fmt.Sprintf("sql-dump %v", path), true)
_, err := x.Output()
if err == nil {
log.Infoln("Dump complete. Dump can be found at", path)
} else {
log.Errorln("Could not dump database.", err)
}
}
// DatabaseSet sets the database field to an inputted *makeDB struct.
func (Site *Site) DatabaseSet(database *makeDB) {
Site.database = database
}
// DatabasesGet returns a list of databases associated to local builds from the site struct
func (Site *Site) DatabasesGet() []string {
values, _ := exec.Command("mysql", "--user="+Site.database.dbUser, "--password="+Site.database.dbPass, "-e", "show databases").Output()
databases := strings.Split(string(values), "\n")
siteDbs := []string{}
for _, database := range databases {
if strings.HasPrefix(database, Site.Name+"_2") {
siteDbs = append(siteDbs, database)
}
}
return siteDbs
}
// SymInstall installs a symlink to the site directory of the site struct
func (Site *Site) SymInstall() {
Target := filepath.Join(Site.Name + Site.TimeStampGet())
Symlink := filepath.Join(Site.Path, Site.Domain+".latest")
err := os.Symlink(Target, Symlink)
if err == nil {
log.Infoln("Created symlink")
} else {
log.Warnln("Could not create symlink:", err)
}
}
// SymUninstall removes a symlink to the site directory of the site struct
func (Site *Site) SymUninstall() {
Symlink := Site.Domain + ".latest"
_, statErr := os.Stat(Site.Path + "/" + Symlink)
if statErr == nil {
err := os.Remove(Site.Path + "/" + Symlink)
if err != nil {
log.Errorln("Could not remove symlink.", err)
} else {
log.Infoln("Removed symlink.")
}
}
}
// SymReinstall re-installs a symlink to the site directory of the site struct
func (Site *Site) SymReinstall() {
Site.SymUninstall()
Site.SymInstall()
}
// TimeStampGet returns the timestamp variable for the site struct
func (Site *Site) TimeStampGet() string {
return Site.Timestamp
}
// TimeStampSet sets the timestamp field for the site struct to a given value
func (Site *Site) TimeStampSet(value string) {
Site.Timestamp = fmt.Sprintf(".%v", value)
}
// TimeStampReset sets the timestamp field for the site struct to a new value
func (Site *Site) TimeStampReset() {
now := time.Now()
Site.Timestamp = fmt.Sprintf(".%v", now.Format("20060102150405"))
}
// TimeStampGenerate generates a new timestamp and returns it, does not latch to site struct
func (Site *Site) TimeStampGenerate() string {
return fmt.Sprintf(".%v", time.Now().Format("20060102150405"))
}
// VerifyProcessedMake requires documentation, @TODO for revisitation.
func (Site *Site) VerifyProcessedMake(makeFile string) []DrupalProject {
unprocessedMakes, unprocessedMakeErr := ioutil.ReadFile(makeFile)
Projects := make([]DrupalProject, 50)
if unprocessedMakeErr != nil {
log.Infoln("Could not read from", unprocessedMakeErr)
}
for _, Line := range strings.Split(string(unprocessedMakes), "\n") {
var Type string
if strings.Contains(Line, "subdir") || strings.Contains(Line, "directory_name") {
currentType := strings.SplitAfter(Line, "=")
Type = strings.Replace(currentType[1], "\"", "", -1)
Type = strings.Replace(Type, " ", "", -1)
}
if Type != "" {
if strings.HasPrefix(Line, "projects") {
Project := strings.SplitAfter(Line, "[")
Project[1] = strings.Replace(Project[1], "[", "", -1)
Project[1] = strings.Replace(Project[1], "]", "", -1)
thisProject := DrupalProject{"modules", Project[1], Type, false}
Projects = append(Projects, thisProject)
}
if strings.HasPrefix(Line, "libraries") {
Library := strings.SplitAfter(Line, "[")
Library[1] = strings.Replace(Library[1], "[", "", -1)
Library[1] = strings.Replace(Library[1], "]", "", -1)
thisProject := DrupalProject{"libraries", Library[1], Type, false}
Projects = append(Projects, thisProject)
}
}
}
var foundModules int
for index, Project := range Projects {
if Project.Name != "" {
//log.Printf("Package %v is of type %v, belonging to subdir %v", Project.Name, Project.Type, Project.Subdir)
err := new(error)
_ = filepath.Walk(Site.Path, func(path string, _ os.FileInfo, _ error) error {
realpath := strings.Split(Site.Path, "\n")
for _, name := range realpath {
if strings.Contains(path, "custom/"+Project.Name+"/") || strings.Contains(path, "contrib/"+Project.Name+"/") || strings.Contains(path, "libraries/"+Project.Subdir+"/") {
fmt.Sprintln(name)
foundModules++
Projects[index].Status = true
break
}
}
return *err
})
}
}
return Projects
}
// ProcessMake processes a make file at a particular path.
func (Site *Site) ProcessMake(makeFile string) bool {
// Test the make file exists
fullPath := makeFile
_, err := os.Stat(fullPath)
if err != nil {
log.Fatalln("File not found:", err)
os.Exit(1)
}
if Site.MakeFileRewriteSource != "" && Site.MakeFileRewriteDestination != "" {
log.Printf("Applying specified rewrite string on temporary makefile: %v -> %v", Site.MakeFileRewriteSource, Site.MakeFileRewriteDestination)
ReplaceTextInFile(makeFile, Site.MakeFileRewriteSource, Site.MakeFileRewriteDestination)
} else {
log.Println("No rewrite string was configured, continuing without additional parsing.")
}
log.Infof("Building from %v...", makeFile)
drushMake := command.NewDrushCommand()
drushCommand := ""
if Site.WorkingCopy {
drushCommand = fmt.Sprintf("make --yes --working-copy %v", makeFile)
} else {
drushCommand = fmt.Sprintf("make --yes %v", makeFile)
}
drushMake.Set("", drushCommand, false)
if Site.Timestamp == "" {
drushMake.SetWorkingDir(Site.Path + "/")
} else {
drushMake.SetWorkingDir(Site.Path + "/" + Site.Name + Site.Timestamp)
}
mkdirErr := os.MkdirAll(drushMake.GetWorkingDir(), 0755)
if mkdirErr != nil {
log.Warnln("Could not create directory", drushMake.GetWorkingDir())
} else {
log.Infoln("Created directory", drushMake.GetWorkingDir())
}
_ = drushMake.LiveOutput()
return true
}
// InstallSiteRef installs the Drupal multisite sites.php file for the site struct.
func (Site *Site) InstallSiteRef() {
data := map[string]string{
"Name": Site.Name,
"Domain": Site.Domain,
}
dirPath := Site.Path + "/" + Site.Name + Site.Timestamp + "/sites/"
dirErr := os.MkdirAll(dirPath+Site.Name, 0755)
if dirErr != nil {
log.Errorln("Unable to create directory", dirPath+Site.Name, dirErr)
} else {
log.Infoln("Created directory", dirPath+Site.Name)
}
dirErr = os.Chmod(dirPath+Site.Name, 0775)
if dirErr != nil {
log.Errorln("Could not set permissions 0755 on", dirPath+Site.Name, dirErr)
} else {
log.Infoln("Permissions set to 0755 on", dirPath+Site.Name)
}
filename := dirPath + "/sites.php"
buffer := []byte{60, 63, 112, 104, 112, 10, 10, 47, 42, 42, 10, 32, 42, 32, 64, 102, 105, 108, 101, 10, 32, 42, 32, 67, 111, 110, 102, 105, 103, 117, 114, 97, 116, 105, 111, 110, 32, 102, 105, 108, 101, 32, 102, 111, 114, 32, 68, 114, 117, 112, 97, 108, 39, 115, 32, 109, 117, 108, 116, 105, 45, 115, 105, 116, 101, 32, 100, 105, 114, 101, 99, 116, 111, 114, 121, 32, 97, 108, 105, 97, 115, 105, 110, 103, 32, 102, 101, 97, 116, 117, 114, 101, 46, 10, 32, 42, 47, 10, 10, 32, 32, 32, 36, 115, 105, 116, 101, 115, 91, 39, 68, 111, 109, 97, 105, 110, 39, 93, 32, 61, 32, 39, 78, 97, 109, 101, 39, 59, 10, 10, 63, 62, 10}
tpl := fmt.Sprintf("%v", string(buffer[:]))
tpl = strings.Replace(tpl, "Name", data["Name"], -1)
tpl = strings.Replace(tpl, "Domain", data["Domain"], -1)
nf, err := os.Create(filename)
nf.Chmod(0755)
if err != nil {
log.Fatalln("Could not create", err)
}
_, err = nf.WriteString(tpl)
if err != nil {
log.Errorln("Could not add", filename)
} else {
log.Infoln("Added", filename)
}
defer nf.Close()
}
// ReplaceTextInFile reinstalls and verifies the ctools cache folder for the site struct.
func (Site *Site) ReplaceTextInFile() {
// We need to remove and re-add the ctools cache directory as 0777.
cToolsDir := fmt.Sprintf("%v/%v%v/sites/%v/files/ctools", Site.Path, Site.Name, Site.Timestamp, Site.Name)
// Remove the directory!
cToolsErr := os.RemoveAll(cToolsDir)
if cToolsErr != nil {
log.Errorln("Couldn't remove", cToolsDir)
} else {
log.Infoln("Created", cToolsDir)
}
// Add the directory!
cToolsErr = os.Mkdir(cToolsDir, 0777)
if cToolsErr != nil {
log.Errorln("Couldn't remove", cToolsDir)
} else {
log.Infoln("Created", cToolsDir)
}
}
#27: Improve message logs, prevent folder mismatching.
package make
import (
"bufio"
"database/sql"
"fmt"
log "github.com/Sirupsen/logrus"
"github.com/fubarhouse/golang-drush/command"
_ "github.com/go-sql-driver/mysql" // mysql is assumed under this system (for now).
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
)
// ReplaceTextInFile is a utility function to replace all instances of a string in a file.
func ReplaceTextInFile(fullPath string, oldString string, newString string) {
read, err := ioutil.ReadFile(fullPath)
if err != nil {
log.Panicln(err)
}
newContents := strings.Replace(string(read), oldString, newString, -1)
err = ioutil.WriteFile(fullPath, []byte(newContents), 0)
if err != nil {
log.Panicln(err)
}
}
// RestartWebServer is a function to run a command to restart the given web service.
func (Site *Site) RestartWebServer() {
_, stdErr := exec.Command("sudo", "service", Site.Webserver, "restart").Output()
if stdErr != nil {
log.Errorf("Could not restart webserver %v. %v\n", Site.Webserver, stdErr)
} else {
log.Infof("Restarted webserver %v.\n", Site.Webserver)
}
}
// StartWebServer is a function to run a command to start the given web service.
func (Site *Site) StartWebServer() {
_, stdErr := exec.Command("sudo", "service", Site.Webserver, "start").Output()
if stdErr != nil {
log.Errorf("Could not start webserver %v. %v\n", Site.Webserver, stdErr)
} else {
log.Infof("Started webserver %v.\n", Site.Webserver)
}
}
// StopWebServer is a function to run a command to stop the given web service.
func (Site *Site) StopWebServer() {
_, stdErr := exec.Command("sudo", "service", Site.Webserver, "stop").Output()
if stdErr != nil {
log.Errorf("Could not stop webserver %v. %v\n", Site.Webserver, stdErr)
} else {
log.Infof("Stopped webserver %v.\n", Site.Webserver)
}
}
// DrupalProject struct which represents a Drupal project on drupal.org
type DrupalProject struct {
Type string
Name string
Subdir string
Status bool
}
// Site struct which represents a build website being used.
type Site struct {
Timestamp string
Path string
Make string
Name string
Alias string
Domain string
database *makeDB
Webserver string
Vhostpath string
Template string
MakeFileRewriteSource string
MakeFileRewriteDestination string
FilePathPrivate string
FilePathPublic string
FilePathTemp string
WorkingCopy bool
}
// NewSite instantiates an instance of the struct Site
func NewSite(make, name, path, alias, webserver, domain, vhostpath, template string) *Site {
Site := &Site{}
Site.TimeStampReset()
Site.Make = make
Site.Name = name
Site.Path = path
Site.Webserver = webserver
Site.Alias = alias
Site.Domain = domain
Site.Vhostpath = vhostpath
Site.Template = template
Site.FilePathPrivate = "files/private"
Site.FilePathPublic = "" // For later implementation
Site.FilePathTemp = "files/private/temp"
Site.MakeFileRewriteSource = ""
Site.MakeFileRewriteDestination = ""
Site.WorkingCopy = false
return Site
}
// ActionBuild is a superseded build action, requires action, documentation or removal.
func (Site *Site) ActionBuild() {
// TODO: Define purpose with the existence of ProcessMake()
if Site.AliasExists(Site.Name) == true {
Site.Path = fmt.Sprintf("%v%v", Site.Path, Site.TimeStampGet())
//Site.ProcessMakes([]string{"core.make", "libraries.make", "contrib.make", "custom.make"})
Site.ActionInstall()
}
}
// ActionInstall runs drush site-install on a Site struct
func (Site *Site) ActionInstall() {
// Obtain a string value from the Port value in db config.
stringPort := fmt.Sprintf("%v", Site.database.getPort())
// Open a mysql connection
db, dbErr := sql.Open("mysql", Site.database.getUser()+":"+Site.database.getPass()+"@tcp("+Site.database.dbHost+":"+stringPort+")/")
// Defer the connection
defer db.Close()
// Report any connection errors
if dbErr != nil {
log.Warnf("WARN:", dbErr)
}
// Create database
dbName := strings.Replace(Site.Name+Site.Timestamp, ".", "_", -1)
_, dbErr = db.Exec("CREATE DATABASE IF NOT EXISTS " + dbName)
if dbErr != nil {
panic(dbErr)
}
// Drush site-install
thisCmd := fmt.Sprintf("-y site-install standard --sites-subdir=%v --db-url=mysql://%v:%v@%v:%v/%v install_configure_form.update_status_module='array(FALSE,FALSE)'", Site.Name, Site.database.getUser(), Site.database.getPass(), Site.database.getHost(), Site.database.getPort(), dbName)
_, installErr := exec.Command("sh", "-c", "cd "+Site.Path+"/"+Site.Name+Site.Timestamp+" && drush "+thisCmd).Output()
if installErr != nil {
log.Warnln("Unable to install Drupal.")
log.Debugln("drush", thisCmd)
} else {
log.Infof("Installed Drupal.")
}
}
// ActionKill will delete a single site instance.
func (Site *Site) ActionKill() {
// What to do with the default...
if Site.AliasExists(Site.Name) == true {
Site.Path = fmt.Sprintf("%v", Site.Path)
_, err := os.Stat(Site.Path)
if err == nil {
os.Remove(Site.Path)
}
}
}
// ActionRebuild rebuild site structs, needs action, documentation or purging.
func (Site *Site) ActionRebuild() {
// TODO: Define purpose with the existence of ProcessMake()
if Site.AliasExists(Site.Name) == true {
Site.TimeStampReset()
Site.Path = fmt.Sprintf("%v%v", Site.Path, Site.TimeStampGet())
//Site.ProcessMake()
//Site.ActionInstall()
}
}
// ActionRebuildProject purges a specific project from a specified path, and re-download it
// Re-downloading will use drush dl, or git clone depending on availability.
func (Site *Site) ActionRebuildProject(Makefiles []string, Project string, GitPath, Branch string, RemoveGit bool) {
log.Infoln("Searching for module/theme...")
moduleFound := false
var moduleType string
var moduleCat string
err := new(error)
_ = filepath.Walk(Site.Path, func(path string, _ os.FileInfo, _ error) error {
realpath := strings.Split(string(path), "\n")
for _, name := range realpath {
if strings.Contains(name, "/contrib/"+Project+"/") || strings.Contains(name, "/custom/"+Project+"/") {
if strings.Contains(name, "/contrib/"+Project+"/") {
moduleType = "contrib"
} else {
moduleType = "custom"
}
if strings.Contains(name, "/modules/"+moduleType+"/"+Project+"/") {
moduleCat = "modules"
} else if strings.Contains(name, "/themes/"+moduleType+"/"+Project+"/") {
moduleCat = "themes"
}
moduleFound = true
}
}
return nil
})
if moduleFound {
log.Infoln("Found module at", Site.Path+"/sites/all/"+moduleCat+"/"+moduleType+"/"+Project+"/")
}
if moduleType != "" && moduleCat != "" {
ProjectDir := Site.Path + "/sites/all/" + moduleCat + "/" + moduleType + "/" + Project + "/"
_, errMod := os.Stat(ProjectDir)
if errMod == nil {
*err = os.RemoveAll(ProjectDir)
if *err == nil {
log.Infoln("Removed", ProjectDir)
} else {
log.Warn("Could not remove ", ProjectDir)
}
}
}
if moduleFound == false {
log.Infof("Could not find project %v in %v", Project, Site.Path)
}
if moduleCat == "" || moduleType == "" {
// By this point, we should fall back to the input make file.
for _, val := range Makefiles {
unprocessedMakes, unprocessedMakeErr := ioutil.ReadFile(val)
if unprocessedMakeErr != nil {
log.Warnf("Could not read from %v: %v", val, unprocessedMakeErr)
}
Projects := strings.Split(string(unprocessedMakes), "\n")
for _, ThisProject := range Projects {
if strings.Contains(ThisProject, "projects["+Project+"][subdir] = ") {
moduleType = strings.Replace(ThisProject, "projects["+Project+"][subdir] = ", "", -1)
moduleType = strings.Replace(moduleType, "\"", "", -1)
moduleType = strings.Replace(moduleType, " ", "", -1)
}
if strings.Contains(ThisProject, "projects["+Project+"][type] = ") {
moduleCat = strings.Replace(ThisProject, "projects["+Project+"][type] = ", "", -1)
moduleCat = strings.Replace(moduleCat, "\"", "", -1)
moduleCat = strings.Replace(moduleCat, " ", "", -1)
}
}
}
if moduleCat == "" {
log.Warnln("Project category could not be detected.")
} else {
log.Infoln("Project category was found to be", moduleCat)
if moduleCat == "module" {
moduleCat = "modules"
}
if moduleCat == "theme" {
moduleCat = "themes"
}
}
if moduleType == "" {
log.Warnln("Project type could not be detected.")
} else {
log.Infoln("Project type was found to be", moduleType)
}
}
path := Site.Path + "/" + "/sites/all/" + moduleCat + "/" + moduleType + "/"
if moduleType == "contrib" {
command.DrushDownloadToPath(path, Project)
} else {
gitCmd := exec.Command("git", "clone", "-b", Branch, GitPath, path+"/"+Project)
_, *err = gitCmd.Output()
if *err == nil {
log.Infof("Downloaded package %v from %v to %v", Project, GitPath, path+"/"+Project)
if RemoveGit {
*err = os.RemoveAll(path + "/" + Project + "/.git")
if *err == nil {
log.Infoln("Removed .git folder from file system.")
} else {
log.Warnln("Unable to remove .git folder from file system.")
}
}
} else {
log.Errorf("Could not clone %v from %v: %v\n", Project, GitPath, *err)
}
}
}
// ActionRebuildCodebase re-runs drush make on a specified path.
func (Site *Site) ActionRebuildCodebase(Makefiles []string) {
// This function exists for the sole purpose of
// rebuilding a specific Drupal codebase in a specific
// directory for Release management type work.
var newMakeFilePath string
if Site.Timestamp == "." {
Site.Timestamp = ""
newMakeFilePath = "/tmp/drupal-" + Site.Name + Site.TimeStampGenerate() + ".make"
} else {
newMakeFilePath = "/tmp/drupal-" + Site.Name + Site.TimeStampGet() + ".make"
}
file, crErr := os.Create(newMakeFilePath)
if crErr == nil {
log.Infoln("Generated temporary make file...")
} else {
log.Errorln("Error creating "+newMakeFilePath+":", crErr)
}
writer := bufio.NewWriter(file)
defer file.Close()
fmt.Fprintln(writer, "core = 7.x")
fmt.Fprintln(writer, "api = 2")
for _, Makefile := range Makefiles {
cmdOut, _ := exec.Command("cat", Makefile).Output()
output := strings.Split(string(cmdOut), "\n")
for _, line := range output {
if strings.HasPrefix(line, "core") == false && strings.HasPrefix(line, "api") == false {
if strings.HasPrefix(line, "projects") == true || strings.HasPrefix(line, "libraries") == true {
fmt.Fprintln(writer, line)
}
}
}
}
writer.Flush()
chmodErr := os.Chmod(Site.Path, 0777)
if chmodErr != nil {
log.Warnln("Could not change permissions on codebase directory")
} else {
log.Infoln("Changed docroot permissions to 0777 for file removal.")
}
_ = filepath.Walk(Site.Path, func(path string, Info os.FileInfo, _ error) error {
realpath := strings.Split(Site.Path, "\n")
err := new(error)
for _, name := range realpath {
fmt.Sprintln(name)
if !strings.Contains(path, "/sites") || strings.Contains(path, "/sites/all") {
if Info.IsDir() && !strings.HasSuffix(path, Site.Path) {
os.Chmod(path, 0777)
delErr := os.RemoveAll(path)
if delErr != nil {
log.Warnln("Could not remove", path)
}
} else if !Info.IsDir() {
delErr := os.Remove(path)
if delErr != nil {
log.Warnln("Could not remove", path)
}
}
}
}
return *err
})
Site.ProcessMake(newMakeFilePath)
err := os.Remove(newMakeFilePath)
if err != nil {
log.Warnln("Could not remove temporary make file", newMakeFilePath)
} else {
log.Infoln("Removed temporary make file", newMakeFilePath)
}
}
// ActionDatabaseDumpLocal run drush sql-dump to a specified path on a site struct.
func (Site *Site) ActionDatabaseDumpLocal(path string) {
srcAlias := strings.Replace(Site.Alias, "@", "", -1)
x := command.NewDrushCommand()
x.Set(srcAlias, fmt.Sprintf("sql-dump %v", path), true)
_, err := x.Output()
if err == nil {
log.Println("Dump complete. Dump can be found at", path)
} else {
log.Println("Could not dump database.", err)
}
}
// ActionDatabaseDumpRemote run drush sql-dump to a specified path on a site alias.
func (Site *Site) ActionDatabaseDumpRemote(alias, path string) {
srcAlias := strings.Replace(alias, "@", "", -1)
x := command.NewDrushCommand()
x.Set(srcAlias, fmt.Sprintf("sql-dump %v", path), true)
_, err := x.Output()
if err == nil {
log.Infoln("Dump complete. Dump can be found at", path)
} else {
log.Errorln("Could not dump database.", err)
}
}
// DatabaseSet sets the database field to an inputted *makeDB struct.
func (Site *Site) DatabaseSet(database *makeDB) {
Site.database = database
}
// DatabasesGet returns a list of databases associated to local builds from the site struct
func (Site *Site) DatabasesGet() []string {
values, _ := exec.Command("mysql", "--user="+Site.database.dbUser, "--password="+Site.database.dbPass, "-e", "show databases").Output()
databases := strings.Split(string(values), "\n")
siteDbs := []string{}
for _, database := range databases {
if strings.HasPrefix(database, Site.Name+"_2") {
siteDbs = append(siteDbs, database)
}
}
return siteDbs
}
// SymInstall installs a symlink to the site directory of the site struct
func (Site *Site) SymInstall() {
Target := filepath.Join(Site.Name + Site.TimeStampGet())
Symlink := filepath.Join(Site.Path, Site.Domain+".latest")
err := os.Symlink(Target, Symlink)
if err == nil {
log.Infoln("Created symlink")
} else {
log.Warnln("Could not create symlink:", err)
}
}
// SymUninstall removes a symlink to the site directory of the site struct
func (Site *Site) SymUninstall() {
Symlink := Site.Domain + ".latest"
_, statErr := os.Stat(Site.Path + "/" + Symlink)
if statErr == nil {
err := os.Remove(Site.Path + "/" + Symlink)
if err != nil {
log.Errorln("Could not remove symlink.", err)
} else {
log.Infoln("Removed symlink.")
}
}
}
// SymReinstall re-installs a symlink to the site directory of the site struct
func (Site *Site) SymReinstall() {
Site.SymUninstall()
Site.SymInstall()
}
// TimeStampGet returns the timestamp variable for the site struct
func (Site *Site) TimeStampGet() string {
return Site.Timestamp
}
// TimeStampSet sets the timestamp field for the site struct to a given value
func (Site *Site) TimeStampSet(value string) {
Site.Timestamp = fmt.Sprintf(".%v", value)
}
// TimeStampReset sets the timestamp field for the site struct to a new value
func (Site *Site) TimeStampReset() {
now := time.Now()
Site.Timestamp = fmt.Sprintf(".%v", now.Format("20060102150405"))
}
// TimeStampGenerate generates a new timestamp and returns it, does not latch to site struct
func (Site *Site) TimeStampGenerate() string {
return fmt.Sprintf(".%v", time.Now().Format("20060102150405"))
}
// VerifyProcessedMake requires documentation, @TODO for revisitation.
func (Site *Site) VerifyProcessedMake(makeFile string) []DrupalProject {
unprocessedMakes, unprocessedMakeErr := ioutil.ReadFile(makeFile)
Projects := make([]DrupalProject, 50)
if unprocessedMakeErr != nil {
log.Infoln("Could not read from", unprocessedMakeErr)
}
for _, Line := range strings.Split(string(unprocessedMakes), "\n") {
var Type string
if strings.Contains(Line, "subdir") || strings.Contains(Line, "directory_name") {
currentType := strings.SplitAfter(Line, "=")
Type = strings.Replace(currentType[1], "\"", "", -1)
Type = strings.Replace(Type, " ", "", -1)
}
if Type != "" {
if strings.HasPrefix(Line, "projects") {
Project := strings.SplitAfter(Line, "[")
Project[1] = strings.Replace(Project[1], "[", "", -1)
Project[1] = strings.Replace(Project[1], "]", "", -1)
thisProject := DrupalProject{"modules", Project[1], Type, false}
Projects = append(Projects, thisProject)
}
if strings.HasPrefix(Line, "libraries") {
Library := strings.SplitAfter(Line, "[")
Library[1] = strings.Replace(Library[1], "[", "", -1)
Library[1] = strings.Replace(Library[1], "]", "", -1)
thisProject := DrupalProject{"libraries", Library[1], Type, false}
Projects = append(Projects, thisProject)
}
}
}
var foundModules int
for index, Project := range Projects {
if Project.Name != "" {
//log.Printf("Package %v is of type %v, belonging to subdir %v", Project.Name, Project.Type, Project.Subdir)
err := new(error)
_ = filepath.Walk(Site.Path, func(path string, _ os.FileInfo, _ error) error {
realpath := strings.Split(Site.Path, "\n")
for _, name := range realpath {
if strings.Contains(path, "custom/"+Project.Name+"/") || strings.Contains(path, "contrib/"+Project.Name+"/") || strings.Contains(path, "libraries/"+Project.Subdir+"/") {
fmt.Sprintln(name)
foundModules++
Projects[index].Status = true
break
}
}
return *err
})
}
}
return Projects
}
// ProcessMake processes a make file at a particular path.
func (Site *Site) ProcessMake(makeFile string) bool {
// Test the make file exists
fullPath := makeFile
_, err := os.Stat(fullPath)
if err != nil {
log.Fatalln("File not found:", err)
os.Exit(1)
}
if Site.MakeFileRewriteSource != "" && Site.MakeFileRewriteDestination != "" {
log.Printf("Applying specified rewrite string on temporary makefile: %v -> %v", Site.MakeFileRewriteSource, Site.MakeFileRewriteDestination)
ReplaceTextInFile(makeFile, Site.MakeFileRewriteSource, Site.MakeFileRewriteDestination)
} else {
log.Println("No rewrite string was configured, continuing without additional parsing.")
}
log.Infof("Building from %v...", makeFile)
drushMake := command.NewDrushCommand()
drushCommand := ""
if Site.WorkingCopy {
drushCommand = fmt.Sprintf("make --yes --working-copy %v", makeFile)
} else {
drushCommand = fmt.Sprintf("make --yes %v", makeFile)
}
drushMake.Set("", drushCommand, false)
if Site.Timestamp == "" {
drushMake.SetWorkingDir(Site.Path + "/")
} else {
drushMake.SetWorkingDir(Site.Path + "/" + Site.Name + Site.Timestamp)
}
mkdirErr := os.MkdirAll(drushMake.GetWorkingDir(), 0755)
if mkdirErr != nil {
log.Warnln("Could not create directory", drushMake.GetWorkingDir())
} else {
log.Infoln("Created directory", drushMake.GetWorkingDir())
}
_ = drushMake.LiveOutput()
return true
}
// InstallSiteRef installs the Drupal multisite sites.php file for the site struct.
func (Site *Site) InstallSiteRef() {
data := map[string]string{
"Name": Site.Name,
"Domain": Site.Domain,
}
dirPath := Site.Path + "/" + Site.Name + Site.Timestamp + "/sites/"
dirErr := os.MkdirAll(dirPath+Site.Name, 0755)
if dirErr != nil {
log.Errorln("Unable to create directory", dirPath+Site.Name, dirErr)
} else {
log.Infoln("Created directory", dirPath+Site.Name)
}
dirErr = os.Chmod(dirPath+Site.Name, 0775)
if dirErr != nil {
log.Errorln("Could not set permissions 0755 on", dirPath+Site.Name, dirErr)
} else {
log.Infoln("Permissions set to 0755 on", dirPath+Site.Name)
}
filename := dirPath + "/sites.php"
buffer := []byte{60, 63, 112, 104, 112, 10, 10, 47, 42, 42, 10, 32, 42, 32, 64, 102, 105, 108, 101, 10, 32, 42, 32, 67, 111, 110, 102, 105, 103, 117, 114, 97, 116, 105, 111, 110, 32, 102, 105, 108, 101, 32, 102, 111, 114, 32, 68, 114, 117, 112, 97, 108, 39, 115, 32, 109, 117, 108, 116, 105, 45, 115, 105, 116, 101, 32, 100, 105, 114, 101, 99, 116, 111, 114, 121, 32, 97, 108, 105, 97, 115, 105, 110, 103, 32, 102, 101, 97, 116, 117, 114, 101, 46, 10, 32, 42, 47, 10, 10, 32, 32, 32, 36, 115, 105, 116, 101, 115, 91, 39, 68, 111, 109, 97, 105, 110, 39, 93, 32, 61, 32, 39, 78, 97, 109, 101, 39, 59, 10, 10, 63, 62, 10}
tpl := fmt.Sprintf("%v", string(buffer[:]))
tpl = strings.Replace(tpl, "Name", data["Name"], -1)
tpl = strings.Replace(tpl, "Domain", data["Domain"], -1)
nf, err := os.Create(filename)
nf.Chmod(0755)
if err != nil {
log.Fatalln("Could not create", err)
}
_, err = nf.WriteString(tpl)
if err != nil {
log.Errorln("Could not add", filename)
} else {
log.Infoln("Added", filename)
}
defer nf.Close()
}
// ReplaceTextInFile reinstalls and verifies the ctools cache folder for the site struct.
func (Site *Site) ReplaceTextInFile() {
// We need to remove and re-add the ctools cache directory as 0777.
cToolsDir := fmt.Sprintf("%v/%v%v/sites/%v/files/ctools", Site.Path, Site.Name, Site.Timestamp, Site.Name)
// Remove the directory!
cToolsErr := os.RemoveAll(cToolsDir)
if cToolsErr != nil {
log.Errorln("Couldn't remove", cToolsDir)
} else {
log.Infoln("Created", cToolsDir)
}
// Add the directory!
cToolsErr = os.Mkdir(cToolsDir, 0777)
if cToolsErr != nil {
log.Errorln("Couldn't remove", cToolsDir)
} else {
log.Infoln("Created", cToolsDir)
}
}
|
package main
import (
"crypto/tls"
"errors"
"flag"
"io"
"log"
"net"
"time"
bsc "github.com/muyuballs/bsc/v2"
"github.com/muyuballs/bsc/v2/ben"
)
var serverAddr = flag.String("server", "", "bsc server addr")
var domain = flag.String("domain", "", "service public domain")
var rhost = flag.String("rhost", "", "host rewrite to")
var target = flag.String("target", "", "target service addr")
var retry = flag.Int("retry count", -1, "retry count default -1 for ever")
var isTls = flag.Bool("tls", false, "is https")
func dialTarget(taddr *net.TCPAddr) (conn io.ReadWriteCloser, err error) {
log.Println("dial taddr", taddr)
if taddr.Port == 443 || *isTls {
dConn, err := tls.Dial("tcp", taddr.String(), &tls.Config{InsecureSkipVerify: true})
if err != nil {
log.Println("dial taddr:", err)
return nil, err
}
log.Println("dial taddr with tls done.")
return dConn, nil
}
dConn, err := net.DialTCP("tcp", nil, taddr)
if err != nil {
log.Println("dial taddr:", err)
return
}
log.Println("dial taddr done.")
return dConn, nil
}
func closeTag(conn *net.TCPConn, tag int32, err error) {
log.Println("close channel", tag, err)
b := bsc.Block{Tag: tag, Type: bsc.TYPE_CLOSE}
b.WriteTo(conn)
}
func pang(conn *net.TCPConn) {
b := bsc.Block{Type: bsc.TYPE_PANG}
b.WriteTo(conn)
}
func handConn(serverConn *net.TCPConn, taddr *net.TCPAddr) {
defer serverConn.Close()
targets := make(map[int32]io.ReadWriteCloser)
blockReader := bsc.BlockReader{Reader: serverConn}
for {
block, err := blockReader.Read()
if err != nil {
log.Println("read data channel ", err)
break
}
if block.Type == bsc.TYPE_DATA {
if target, ok := targets[block.Tag]; ok {
n, err := target.Write(block.Data)
if err != nil || n < len(block.Data) {
if err == nil {
err = io.ErrShortWrite
}
closeTag(serverConn, block.Tag, err)
delete(targets, block.Tag)
}
} else {
closeTag(serverConn, block.Tag, errors.New("channel target not found"))
}
continue
}
if block.Type == bsc.TYPE_OPEN {
log.Println("open channel", block.Tag)
targetConn, err := dialTarget(taddr)
if err != nil {
closeTag(serverConn, block.Tag, err)
continue
}
targets[block.Tag] = targetConn
go func() {
io.Copy(bsc.NewBlockWriter(serverConn, block.Tag), targetConn)
closeTag(serverConn, block.Tag, errors.New("copy done."))
}()
continue
}
if block.Type == bsc.TYPE_CLOSE {
log.Println("close channel by server", block.Tag)
if target, ok := targets[block.Tag]; ok {
target.Close()
delete(targets, block.Tag)
}
continue
}
if block.Type == bsc.TYPE_PING {
pang(serverConn)
continue
}
log.Println("not support block type", block.Type)
}
}
//
func main() {
log.Println("hello bsc-client")
flag.Parse()
if *serverAddr == "" {
flag.PrintDefaults()
return
}
if *domain == "" {
flag.PrintDefaults()
return
}
if *target == "" {
flag.PrintDefaults()
return
}
taddr, err := net.ResolveTCPAddr("tcp", *target)
if err != nil {
log.Println(err)
return
}
daddr, err := net.ResolveTCPAddr("tcp", *serverAddr)
if err != nil {
log.Println(err)
return
}
var retryCount = 0
for *retry == -1 || retryCount <= *retry {
conn, err := net.DialTCP("tcp", nil, daddr)
if err == nil {
conn.Write([]byte{bsc.TYPE_INIT})
ben.WriteLDString(conn, *domain)
ben.WriteLDString(conn, *rhost)
handConn(conn, taddr)
}
retryCount++
atTime := time.Now().Add(10 * time.Second)
log.Println("retry connect server @", atTime)
time.Sleep(10 * time.Second)
}
}
update
package main
import (
"crypto/tls"
"errors"
"flag"
"io"
"log"
"net"
"time"
bsc "github.com/muyuballs/bsc/v2"
"github.com/muyuballs/bsc/v2/ben"
)
var serverAddr = flag.String("server", "", "bsc server addr")
var domain = flag.String("domain", "", "service public domain")
var rhost = flag.String("rhost", "", "host rewrite to")
var target = flag.String("target", "", "target service addr")
var retry = flag.Int("retry count", -1, "retry count default -1 for ever")
var isTls = flag.Bool("tls", false, "is https")
func dialTarget(taddr *net.TCPAddr) (conn io.ReadWriteCloser, err error) {
log.Println("dial taddr", taddr)
if taddr.Port == 443 || *isTls {
dConn, err := tls.Dial("tcp", taddr.String(), &tls.Config{InsecureSkipVerify: true})
if err != nil {
log.Println("dial taddr:", err)
return nil, err
}
log.Println("dial taddr with tls done.")
return dConn, nil
}
dConn, err := net.DialTCP("tcp", nil, taddr)
if err != nil {
log.Println("dial taddr:", err)
return
}
log.Println("dial taddr done.")
return dConn, nil
}
func closeTag(conn *net.TCPConn, tag int32, err error) {
log.Println("close channel", tag, err)
b := bsc.Block{Tag: tag, Type: bsc.TYPE_CLOSE}
b.WriteTo(conn)
}
func pang(conn *net.TCPConn) {
b := bsc.Block{Type: bsc.TYPE_PANG}
b.WriteTo(conn)
}
func handConn(serverConn *net.TCPConn, taddr *net.TCPAddr) {
defer serverConn.Close()
targets := make(map[int32]io.ReadWriteCloser)
blockReader := bsc.BlockReader{Reader: serverConn}
for {
block, err := blockReader.Read()
if err != nil {
log.Println("read data channel ", err)
break
}
if block.Type == bsc.TYPE_DATA {
if target, ok := targets[block.Tag]; ok {
n, err := target.Write(block.Data)
if err != nil || n < len(block.Data) {
if err == nil {
err = io.ErrShortWrite
}
closeTag(serverConn, block.Tag, err)
delete(targets, block.Tag)
}
} else {
closeTag(serverConn, block.Tag, errors.New("channel target not found"))
}
continue
}
if block.Type == bsc.TYPE_OPEN {
log.Println("open channel", block.Tag)
targetConn, err := dialTarget(taddr)
if err != nil {
closeTag(serverConn, block.Tag, err)
continue
}
targets[block.Tag] = targetConn
go func() {
io.Copy(bsc.NewBlockWriter(serverConn, block.Tag), targetConn)
closeTag(serverConn, block.Tag, errors.New("copy done."))
}()
continue
}
if block.Type == bsc.TYPE_CLOSE {
log.Println("close channel by server", block.Tag)
if target, ok := targets[block.Tag]; ok {
target.Close()
delete(targets, block.Tag)
}
continue
}
if block.Type == bsc.TYPE_PING {
log.Println("Ping from ", serverConn.LocalAddr().String())
pang(serverConn)
continue
}
log.Println("not support block type", block.Type)
}
}
//
func main() {
log.Println("hello bsc-client")
flag.Parse()
if *serverAddr == "" {
flag.PrintDefaults()
return
}
if *domain == "" {
flag.PrintDefaults()
return
}
if *target == "" {
flag.PrintDefaults()
return
}
taddr, err := net.ResolveTCPAddr("tcp", *target)
if err != nil {
log.Println(err)
return
}
daddr, err := net.ResolveTCPAddr("tcp", *serverAddr)
if err != nil {
log.Println(err)
return
}
var retryCount = 0
for *retry == -1 || retryCount <= *retry {
conn, err := net.DialTCP("tcp", nil, daddr)
if err == nil {
conn.Write([]byte{bsc.TYPE_INIT})
ben.WriteLDString(conn, *domain)
ben.WriteLDString(conn, *rhost)
handConn(conn, taddr)
}
retryCount++
atTime := time.Now().Add(10 * time.Second)
log.Println("retry connect server @", atTime)
time.Sleep(10 * time.Second)
}
}
|
package api
import (
"context"
"fmt"
"github.com/Aptomi/aptomi/pkg/lang"
"github.com/Aptomi/aptomi/pkg/runtime"
"github.com/dgrijalva/jwt-go"
jwtreq "github.com/dgrijalva/jwt-go/request"
"github.com/julienschmidt/httprouter"
"net/http"
"time"
)
// AuthSuccessObject contains Info for the AuthSuccess type
var AuthSuccessObject = &runtime.Info{
Kind: "auth-success",
Constructor: func() runtime.Object { return &AuthSuccess{} },
}
// AuthSuccess represents successful authentication
type AuthSuccess struct {
runtime.TypeKind `yaml:",inline"`
Token string
}
func (api *coreAPI) handleLogin(writer http.ResponseWriter, request *http.Request, params httprouter.Params) {
username := request.PostFormValue("username")
password := request.PostFormValue("password")
user, err := api.externalData.UserLoader.Authenticate(username, password)
if err != nil {
serverErr := NewServerError(fmt.Sprintf("Authentication error: %s", err))
api.contentType.WriteOne(writer, request, serverErr)
} else {
api.contentType.WriteOne(writer, request, &AuthSuccess{
AuthSuccessObject.GetTypeKind(),
api.newToken(user),
})
}
}
type Claims struct {
Name string `json:"name"`
DomainAdmin bool `json:"admin,omitempty"`
jwt.StandardClaims
}
func (claims Claims) Valid() error {
if len(claims.Name) == 0 {
return fmt.Errorf("token should contain non-empty username")
}
return claims.StandardClaims.Valid()
}
func (api *coreAPI) newToken(user *lang.User) string {
token := jwt.NewWithClaims(jwt.SigningMethodHS256, Claims{
Name: user.Name,
DomainAdmin: user.DomainAdmin,
StandardClaims: jwt.StandardClaims{
IssuedAt: time.Now().Unix(),
ExpiresAt: time.Now().Add(30 * 24 * time.Hour).Unix(),
},
})
// Sign and get the complete encoded token as a string using the secret
tokenString, err := token.SignedString([]byte(api.secret))
if err != nil {
panic(fmt.Errorf("error while signing token: %s", err))
}
return tokenString
}
func (api *coreAPI) auth(handle httprouter.Handle) httprouter.Handle {
return api.handleAuth(handle, false)
}
func (api *coreAPI) admin(handle httprouter.Handle) httprouter.Handle {
return api.handleAuth(handle, true)
}
func (api *coreAPI) handleAuth(handle httprouter.Handle, admin bool) httprouter.Handle {
return func(writer http.ResponseWriter, request *http.Request, params httprouter.Params) {
err := api.checkToken(request, admin)
if err != nil {
authErr := NewServerError(fmt.Sprintf("Authentication error: %s", err))
api.contentType.WriteOneWithStatus(writer, request, authErr, http.StatusUnauthorized)
return
}
handle(writer, request, params)
}
}
const (
ctxUserProperty = "user"
)
func (api *coreAPI) checkToken(request *http.Request, admin bool) error {
token, err := jwtreq.ParseFromRequestWithClaims(request, jwtreq.AuthorizationHeaderExtractor, &Claims{},
func(token *jwt.Token) (interface{}, error) {
return []byte(api.secret), nil
})
if err != nil {
return err
}
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return fmt.Errorf("unexpected token signing method: %s", token.Header["alg"])
}
claims := token.Claims.(*Claims)
user := api.externalData.UserLoader.LoadUserByName(claims.Name)
if user == nil {
return fmt.Errorf("token refers to non-existing user: %s", claims.Name)
}
if user.DomainAdmin != claims.DomainAdmin {
return fmt.Errorf("token contains incorrect admin status: %s", claims.DomainAdmin)
}
if admin && !user.DomainAdmin {
return fmt.Errorf("admin privileges required")
}
// store user into the request
newRequest := request.WithContext(context.WithValue(request.Context(), ctxUserProperty, user))
*request = *newRequest
return nil
}
func (api *coreAPI) getUserOptional(request *http.Request) *lang.User {
val := request.Context().Value(ctxUserProperty)
if val == nil {
return nil
}
if user, ok := val.(*lang.User); ok {
return user
}
return nil
}
func (api *coreAPI) getUserRequired(request *http.Request) *lang.User {
user := api.getUserOptional(request)
if user == nil {
panic("unauthorized or user couldn't be loaded")
}
return user
}
Fix format for bool var
package api
import (
"context"
"fmt"
"github.com/Aptomi/aptomi/pkg/lang"
"github.com/Aptomi/aptomi/pkg/runtime"
"github.com/dgrijalva/jwt-go"
jwtreq "github.com/dgrijalva/jwt-go/request"
"github.com/julienschmidt/httprouter"
"net/http"
"time"
)
// AuthSuccessObject contains Info for the AuthSuccess type
var AuthSuccessObject = &runtime.Info{
Kind: "auth-success",
Constructor: func() runtime.Object { return &AuthSuccess{} },
}
// AuthSuccess represents successful authentication
type AuthSuccess struct {
runtime.TypeKind `yaml:",inline"`
Token string
}
func (api *coreAPI) handleLogin(writer http.ResponseWriter, request *http.Request, params httprouter.Params) {
username := request.PostFormValue("username")
password := request.PostFormValue("password")
user, err := api.externalData.UserLoader.Authenticate(username, password)
if err != nil {
serverErr := NewServerError(fmt.Sprintf("Authentication error: %s", err))
api.contentType.WriteOne(writer, request, serverErr)
} else {
api.contentType.WriteOne(writer, request, &AuthSuccess{
AuthSuccessObject.GetTypeKind(),
api.newToken(user),
})
}
}
type Claims struct {
Name string `json:"name"`
DomainAdmin bool `json:"admin,omitempty"`
jwt.StandardClaims
}
func (claims Claims) Valid() error {
if len(claims.Name) == 0 {
return fmt.Errorf("token should contain non-empty username")
}
return claims.StandardClaims.Valid()
}
func (api *coreAPI) newToken(user *lang.User) string {
token := jwt.NewWithClaims(jwt.SigningMethodHS256, Claims{
Name: user.Name,
DomainAdmin: user.DomainAdmin,
StandardClaims: jwt.StandardClaims{
IssuedAt: time.Now().Unix(),
ExpiresAt: time.Now().Add(30 * 24 * time.Hour).Unix(),
},
})
// Sign and get the complete encoded token as a string using the secret
tokenString, err := token.SignedString([]byte(api.secret))
if err != nil {
panic(fmt.Errorf("error while signing token: %s", err))
}
return tokenString
}
func (api *coreAPI) auth(handle httprouter.Handle) httprouter.Handle {
return api.handleAuth(handle, false)
}
func (api *coreAPI) admin(handle httprouter.Handle) httprouter.Handle {
return api.handleAuth(handle, true)
}
func (api *coreAPI) handleAuth(handle httprouter.Handle, admin bool) httprouter.Handle {
return func(writer http.ResponseWriter, request *http.Request, params httprouter.Params) {
err := api.checkToken(request, admin)
if err != nil {
authErr := NewServerError(fmt.Sprintf("Authentication error: %s", err))
api.contentType.WriteOneWithStatus(writer, request, authErr, http.StatusUnauthorized)
return
}
handle(writer, request, params)
}
}
const (
ctxUserProperty = "user"
)
func (api *coreAPI) checkToken(request *http.Request, admin bool) error {
token, err := jwtreq.ParseFromRequestWithClaims(request, jwtreq.AuthorizationHeaderExtractor, &Claims{},
func(token *jwt.Token) (interface{}, error) {
return []byte(api.secret), nil
})
if err != nil {
return err
}
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return fmt.Errorf("unexpected token signing method: %s", token.Header["alg"])
}
claims := token.Claims.(*Claims)
user := api.externalData.UserLoader.LoadUserByName(claims.Name)
if user == nil {
return fmt.Errorf("token refers to non-existing user: %s", claims.Name)
}
if user.DomainAdmin != claims.DomainAdmin {
return fmt.Errorf("token contains incorrect admin status: %t", claims.DomainAdmin)
}
if admin && !user.DomainAdmin {
return fmt.Errorf("admin privileges required")
}
// store user into the request
newRequest := request.WithContext(context.WithValue(request.Context(), ctxUserProperty, user))
*request = *newRequest
return nil
}
func (api *coreAPI) getUserOptional(request *http.Request) *lang.User {
val := request.Context().Value(ctxUserProperty)
if val == nil {
return nil
}
if user, ok := val.(*lang.User); ok {
return user
}
return nil
}
func (api *coreAPI) getUserRequired(request *http.Request) *lang.User {
user := api.getUserOptional(request)
if user == nil {
panic("unauthorized or user couldn't be loaded")
}
return user
}
|
/*******************************************************************************
*
* Copyright 2017 SAP SE
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of the License along with this
* program. If not, you may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*******************************************************************************/
package api
import (
"database/sql"
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
"sync"
"github.com/gorilla/mux"
"github.com/sapcc/go-bits/gopherpolicy"
"github.com/sapcc/go-bits/respondwith"
"github.com/sapcc/go-bits/sre"
"github.com/sapcc/limes"
"github.com/sapcc/limes/pkg/core"
"github.com/sapcc/limes/pkg/db"
"github.com/sapcc/limes/pkg/reports"
)
//VersionData is used by version advertisement handlers.
type VersionData struct {
Status string `json:"status"`
ID string `json:"id"`
Links []VersionLinkData `json:"links"`
}
//VersionLinkData is used by version advertisement handlers, as part of the
//VersionData struct.
type VersionLinkData struct {
URL string `json:"href"`
Relation string `json:"rel"`
Type string `json:"type,omitempty"`
}
type v1Provider struct {
Cluster *core.Cluster
PolicyEnforcer gopherpolicy.Enforcer
VersionData VersionData
//see comment in ListProjects() for details
listProjectsMutex sync.Mutex
}
//NewV1Router creates a http.Handler that serves the Limes v1 API.
//It also returns the VersionData for this API version which is needed for the
//version advertisement on "GET /".
func NewV1Router(cluster *core.Cluster, policyEnforcer gopherpolicy.Enforcer) (http.Handler, VersionData) {
r := mux.NewRouter()
p := &v1Provider{
Cluster: cluster,
PolicyEnforcer: policyEnforcer,
}
p.VersionData = VersionData{
Status: "CURRENT",
ID: "v1",
Links: []VersionLinkData{
{
Relation: "self",
URL: p.Path(),
},
{
Relation: "describedby",
URL: "https://github.com/sapcc/limes/tree/master/docs",
Type: "text/html",
},
},
}
r.Methods("GET").Path("/v1/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
respondwith.JSON(w, 200, map[string]interface{}{"version": p.VersionData})
})
r.Methods("GET").Path("/v1/clusters/current").HandlerFunc(p.GetCluster)
r.Methods("GET").Path("/v1/inconsistencies").HandlerFunc(p.ListInconsistencies)
r.Methods("GET").Path("/v1/domains").HandlerFunc(p.ListDomains)
r.Methods("GET").Path("/v1/domains/{domain_id}").HandlerFunc(p.GetDomain)
r.Methods("POST").Path("/v1/domains/discover").HandlerFunc(p.DiscoverDomains)
r.Methods("POST").Path("/v1/domains/{domain_id}/simulate-put").HandlerFunc(p.SimulatePutDomain)
r.Methods("PUT").Path("/v1/domains/{domain_id}").HandlerFunc(p.PutDomain)
r.Methods("GET").Path("/v1/domains/{domain_id}/projects").HandlerFunc(p.ListProjects)
r.Methods("GET").Path("/v1/domains/{domain_id}/projects/{project_id}").HandlerFunc(p.GetProject)
r.Methods("POST").Path("/v1/domains/{domain_id}/projects/discover").HandlerFunc(p.DiscoverProjects)
r.Methods("POST").Path("/v1/domains/{domain_id}/projects/{project_id}/sync").HandlerFunc(p.SyncProject)
r.Methods("POST").Path("/v1/domains/{domain_id}/projects/{project_id}/simulate-put").HandlerFunc(p.SimulatePutProject)
r.Methods("PUT").Path("/v1/domains/{domain_id}/projects/{project_id}").HandlerFunc(p.PutProject)
return sre.Instrument(forbidClusterIDHeader(r)), p.VersionData
}
func forbidClusterIDHeader(inner http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
clusterID := r.Header.Get("X-Limes-Cluster-Id")
if clusterID != "" && clusterID != "current" {
http.Error(w, "multi-cluster support is removed: the X-Limes-Cluster-Id header is not allowed anymore", http.StatusBadRequest)
} else {
inner.ServeHTTP(w, r)
}
})
}
//RequireJSON will parse the request body into the given data structure, or
//write an error response if that fails.
func RequireJSON(w http.ResponseWriter, r *http.Request, data interface{}) bool {
err := json.NewDecoder(r.Body).Decode(data)
if err != nil {
http.Error(w, "request body is not valid JSON: "+err.Error(), 400)
return false
}
return true
}
//Path constructs a full URL for a given URL path below the /v1/ endpoint.
func (p *v1Provider) Path(elements ...string) string {
parts := []string{
strings.TrimSuffix(p.Cluster.Config.CatalogURL, "/"),
"v1",
}
parts = append(parts, elements...)
return strings.Join(parts, "/")
}
//FindDomainFromRequest loads the db.Domain referenced by the :domain_id path
//parameter. Any errors will be written into the response immediately and cause
//a nil return value.
func (p *v1Provider) FindDomainFromRequest(w http.ResponseWriter, r *http.Request) *db.Domain {
domainUUID := mux.Vars(r)["domain_id"]
if domainUUID == "" {
http.Error(w, "domain ID missing", 400)
return nil
}
var domain db.Domain
err := db.DB.SelectOne(&domain, `SELECT * FROM domains WHERE uuid = $1 AND cluster_id = $2`,
domainUUID, p.Cluster.ID,
)
switch {
case err == sql.ErrNoRows:
http.Error(w, "no such domain (if it was just created, try to POST /domains/discover)", 404)
return nil
case respondwith.ErrorText(w, err):
return nil
default:
return &domain
}
}
//FindProjectFromRequest loads the db.Project referenced by the :project_id
//path parameter, and verifies that it is located within the given domain.
func (p *v1Provider) FindProjectFromRequest(w http.ResponseWriter, r *http.Request, domain *db.Domain) *db.Project {
project, ok := p.FindProjectFromRequestIfExists(w, r, domain)
if ok && project == nil {
msg := fmt.Sprintf(
"no such project (if it was just created, try to POST /domains/%s/projects/discover)",
mux.Vars(r)["domain_id"],
)
http.Error(w, msg, 404)
return nil
}
return project
}
//FindProjectFromRequestIfExists works like FindProjectFromRequest, but returns
//a nil project instead of producing an error if the project does not exist in
//the local DB yet.
func (p *v1Provider) FindProjectFromRequestIfExists(w http.ResponseWriter, r *http.Request, domain *db.Domain) (project *db.Project, ok bool) {
projectUUID := mux.Vars(r)["project_id"]
if projectUUID == "" {
http.Error(w, "project ID missing", 400)
return nil, false
}
project = &db.Project{}
err := db.DB.SelectOne(project, `SELECT * FROM projects WHERE uuid = $1`, projectUUID)
switch {
case err == sql.ErrNoRows:
return nil, true
case err == nil && domain.ID != project.DomainID:
http.Error(w, "no such project", 404)
return nil, false
case respondwith.ErrorText(w, err):
return nil, false
default:
return project, true
}
}
//GetDomainReport is a convenience wrapper around reports.GetDomains() for getting a single domain report.
func GetDomainReport(cluster *core.Cluster, dbDomain db.Domain, dbi db.Interface, filter reports.Filter) (*limes.DomainReport, error) {
domainReports, err := reports.GetDomains(cluster, &dbDomain.ID, dbi, filter)
if err != nil {
return nil, err
}
if len(domainReports) == 0 {
return nil, errors.New("no resource data found for domain")
}
return domainReports[0], nil
}
//GetProjectReport is a convenience wrapper around reports.GetProjects() for getting a single project report.
func GetProjectReport(cluster *core.Cluster, dbDomain db.Domain, dbProject db.Project, dbi db.Interface, filter reports.Filter) (*limes.ProjectReport, error) {
var result *limes.ProjectReport
err := reports.GetProjects(cluster, dbDomain, &dbProject, dbi, filter, func(r *limes.ProjectReport) error {
result = r
return nil
})
if err != nil {
return nil, err
}
if result == nil {
return nil, errors.New("no resource data found for project")
}
return result, nil
}
api: `html.EscapeString()` in error msg
/*******************************************************************************
*
* Copyright 2017 SAP SE
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of the License along with this
* program. If not, you may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*******************************************************************************/
package api
import (
"database/sql"
"encoding/json"
"errors"
"fmt"
"html"
"net/http"
"strings"
"sync"
"github.com/gorilla/mux"
"github.com/sapcc/go-bits/gopherpolicy"
"github.com/sapcc/go-bits/respondwith"
"github.com/sapcc/go-bits/sre"
"github.com/sapcc/limes"
"github.com/sapcc/limes/pkg/core"
"github.com/sapcc/limes/pkg/db"
"github.com/sapcc/limes/pkg/reports"
)
//VersionData is used by version advertisement handlers.
type VersionData struct {
Status string `json:"status"`
ID string `json:"id"`
Links []VersionLinkData `json:"links"`
}
//VersionLinkData is used by version advertisement handlers, as part of the
//VersionData struct.
type VersionLinkData struct {
URL string `json:"href"`
Relation string `json:"rel"`
Type string `json:"type,omitempty"`
}
type v1Provider struct {
Cluster *core.Cluster
PolicyEnforcer gopherpolicy.Enforcer
VersionData VersionData
//see comment in ListProjects() for details
listProjectsMutex sync.Mutex
}
//NewV1Router creates a http.Handler that serves the Limes v1 API.
//It also returns the VersionData for this API version which is needed for the
//version advertisement on "GET /".
func NewV1Router(cluster *core.Cluster, policyEnforcer gopherpolicy.Enforcer) (http.Handler, VersionData) {
r := mux.NewRouter()
p := &v1Provider{
Cluster: cluster,
PolicyEnforcer: policyEnforcer,
}
p.VersionData = VersionData{
Status: "CURRENT",
ID: "v1",
Links: []VersionLinkData{
{
Relation: "self",
URL: p.Path(),
},
{
Relation: "describedby",
URL: "https://github.com/sapcc/limes/tree/master/docs",
Type: "text/html",
},
},
}
r.Methods("GET").Path("/v1/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
respondwith.JSON(w, 200, map[string]interface{}{"version": p.VersionData})
})
r.Methods("GET").Path("/v1/clusters/current").HandlerFunc(p.GetCluster)
r.Methods("GET").Path("/v1/inconsistencies").HandlerFunc(p.ListInconsistencies)
r.Methods("GET").Path("/v1/domains").HandlerFunc(p.ListDomains)
r.Methods("GET").Path("/v1/domains/{domain_id}").HandlerFunc(p.GetDomain)
r.Methods("POST").Path("/v1/domains/discover").HandlerFunc(p.DiscoverDomains)
r.Methods("POST").Path("/v1/domains/{domain_id}/simulate-put").HandlerFunc(p.SimulatePutDomain)
r.Methods("PUT").Path("/v1/domains/{domain_id}").HandlerFunc(p.PutDomain)
r.Methods("GET").Path("/v1/domains/{domain_id}/projects").HandlerFunc(p.ListProjects)
r.Methods("GET").Path("/v1/domains/{domain_id}/projects/{project_id}").HandlerFunc(p.GetProject)
r.Methods("POST").Path("/v1/domains/{domain_id}/projects/discover").HandlerFunc(p.DiscoverProjects)
r.Methods("POST").Path("/v1/domains/{domain_id}/projects/{project_id}/sync").HandlerFunc(p.SyncProject)
r.Methods("POST").Path("/v1/domains/{domain_id}/projects/{project_id}/simulate-put").HandlerFunc(p.SimulatePutProject)
r.Methods("PUT").Path("/v1/domains/{domain_id}/projects/{project_id}").HandlerFunc(p.PutProject)
return sre.Instrument(forbidClusterIDHeader(r)), p.VersionData
}
func forbidClusterIDHeader(inner http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
clusterID := r.Header.Get("X-Limes-Cluster-Id")
if clusterID != "" && clusterID != "current" {
http.Error(w, "multi-cluster support is removed: the X-Limes-Cluster-Id header is not allowed anymore", http.StatusBadRequest)
} else {
inner.ServeHTTP(w, r)
}
})
}
//RequireJSON will parse the request body into the given data structure, or
//write an error response if that fails.
func RequireJSON(w http.ResponseWriter, r *http.Request, data interface{}) bool {
err := json.NewDecoder(r.Body).Decode(data)
if err != nil {
http.Error(w, "request body is not valid JSON: "+err.Error(), 400)
return false
}
return true
}
//Path constructs a full URL for a given URL path below the /v1/ endpoint.
func (p *v1Provider) Path(elements ...string) string {
parts := []string{
strings.TrimSuffix(p.Cluster.Config.CatalogURL, "/"),
"v1",
}
parts = append(parts, elements...)
return strings.Join(parts, "/")
}
//FindDomainFromRequest loads the db.Domain referenced by the :domain_id path
//parameter. Any errors will be written into the response immediately and cause
//a nil return value.
func (p *v1Provider) FindDomainFromRequest(w http.ResponseWriter, r *http.Request) *db.Domain {
domainUUID := mux.Vars(r)["domain_id"]
if domainUUID == "" {
http.Error(w, "domain ID missing", 400)
return nil
}
var domain db.Domain
err := db.DB.SelectOne(&domain, `SELECT * FROM domains WHERE uuid = $1 AND cluster_id = $2`,
domainUUID, p.Cluster.ID,
)
switch {
case err == sql.ErrNoRows:
http.Error(w, "no such domain (if it was just created, try to POST /domains/discover)", 404)
return nil
case respondwith.ErrorText(w, err):
return nil
default:
return &domain
}
}
//FindProjectFromRequest loads the db.Project referenced by the :project_id
//path parameter, and verifies that it is located within the given domain.
func (p *v1Provider) FindProjectFromRequest(w http.ResponseWriter, r *http.Request, domain *db.Domain) *db.Project {
project, ok := p.FindProjectFromRequestIfExists(w, r, domain)
if ok && project == nil {
msg := fmt.Sprintf(
"no such project (if it was just created, try to POST /domains/%s/projects/discover)",
html.EscapeString(mux.Vars(r)["domain_id"]),
)
http.Error(w, msg, 404)
return nil
}
return project
}
//FindProjectFromRequestIfExists works like FindProjectFromRequest, but returns
//a nil project instead of producing an error if the project does not exist in
//the local DB yet.
func (p *v1Provider) FindProjectFromRequestIfExists(w http.ResponseWriter, r *http.Request, domain *db.Domain) (project *db.Project, ok bool) {
projectUUID := mux.Vars(r)["project_id"]
if projectUUID == "" {
http.Error(w, "project ID missing", 400)
return nil, false
}
project = &db.Project{}
err := db.DB.SelectOne(project, `SELECT * FROM projects WHERE uuid = $1`, projectUUID)
switch {
case err == sql.ErrNoRows:
return nil, true
case err == nil && domain.ID != project.DomainID:
http.Error(w, "no such project", 404)
return nil, false
case respondwith.ErrorText(w, err):
return nil, false
default:
return project, true
}
}
//GetDomainReport is a convenience wrapper around reports.GetDomains() for getting a single domain report.
func GetDomainReport(cluster *core.Cluster, dbDomain db.Domain, dbi db.Interface, filter reports.Filter) (*limes.DomainReport, error) {
domainReports, err := reports.GetDomains(cluster, &dbDomain.ID, dbi, filter)
if err != nil {
return nil, err
}
if len(domainReports) == 0 {
return nil, errors.New("no resource data found for domain")
}
return domainReports[0], nil
}
//GetProjectReport is a convenience wrapper around reports.GetProjects() for getting a single project report.
func GetProjectReport(cluster *core.Cluster, dbDomain db.Domain, dbProject db.Project, dbi db.Interface, filter reports.Filter) (*limes.ProjectReport, error) {
var result *limes.ProjectReport
err := reports.GetProjects(cluster, dbDomain, &dbProject, dbi, filter, func(r *limes.ProjectReport) error {
result = r
return nil
})
if err != nil {
return nil, err
}
if result == nil {
return nil, errors.New("no resource data found for project")
}
return result, nil
}
|
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package tests contains test cases. To run the tests go to tests directory and run:
// RUN_TESTBED=1 go test -v
package tests
// This file contains Test functions which initiate the tests. The tests can be either
// coded in this file or use scenarios from perf_scenarios.go.
import (
"context"
"path"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/model/pdata"
conventions "go.opentelemetry.io/collector/model/semconv/v1.5.0"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/idutils"
"github.com/open-telemetry/opentelemetry-collector-contrib/testbed/datareceivers"
"github.com/open-telemetry/opentelemetry-collector-contrib/testbed/datasenders"
"github.com/open-telemetry/opentelemetry-collector-contrib/testbed/testbed"
)
// TestMain is used to initiate setup, execution and tear down of testbed.
func TestMain(m *testing.M) {
testbed.DoTestMain(m, performanceResultsSummary)
}
func TestTrace10kSPS(t *testing.T) {
tests := []struct {
name string
sender testbed.DataSender
receiver testbed.DataReceiver
resourceSpec testbed.ResourceSpec
}{
{
"JaegerGRPC",
datasenders.NewJaegerGRPCDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)),
datareceivers.NewJaegerDataReceiver(testbed.GetAvailablePort(t)),
testbed.ResourceSpec{
ExpectedMaxCPU: 40,
ExpectedMaxRAM: 100,
},
},
{
"OpenCensus",
datasenders.NewOCTraceDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)),
datareceivers.NewOCDataReceiver(testbed.GetAvailablePort(t)),
testbed.ResourceSpec{
ExpectedMaxCPU: 39,
ExpectedMaxRAM: 100,
},
},
{
"OTLP-gRPC",
testbed.NewOTLPTraceDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)),
testbed.NewOTLPDataReceiver(testbed.GetAvailablePort(t)),
testbed.ResourceSpec{
ExpectedMaxCPU: 20,
ExpectedMaxRAM: 100,
},
},
{
"OTLP-gRPC-gzip",
testbed.NewOTLPTraceDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)),
testbed.NewOTLPDataReceiver(testbed.GetAvailablePort(t)).WithCompression("gzip"),
testbed.ResourceSpec{
ExpectedMaxCPU: 30,
ExpectedMaxRAM: 100,
},
},
{
"OTLP-HTTP",
testbed.NewOTLPHTTPTraceDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)),
testbed.NewOTLPHTTPDataReceiver(testbed.GetAvailablePort(t)),
testbed.ResourceSpec{
ExpectedMaxCPU: 20,
ExpectedMaxRAM: 100,
},
},
{
"OTLP-HTTP-gzip",
testbed.NewOTLPHTTPTraceDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)),
testbed.NewOTLPHTTPDataReceiver(testbed.GetAvailablePort(t)).WithCompression("gzip"),
testbed.ResourceSpec{
ExpectedMaxCPU: 25,
ExpectedMaxRAM: 100,
},
},
{
"SAPM",
datasenders.NewSapmDataSender(testbed.GetAvailablePort(t)),
datareceivers.NewSapmDataReceiver(testbed.GetAvailablePort(t)),
testbed.ResourceSpec{
ExpectedMaxCPU: 32,
ExpectedMaxRAM: 100,
},
},
{
"Zipkin",
datasenders.NewZipkinDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)),
datareceivers.NewZipkinDataReceiver(testbed.GetAvailablePort(t)),
testbed.ResourceSpec{
ExpectedMaxCPU: 80,
ExpectedMaxRAM: 100,
},
},
}
processors := map[string]string{
"batch": `
batch:
`,
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
Scenario10kItemsPerSecond(
t,
test.sender,
test.receiver,
test.resourceSpec,
performanceResultsSummary,
processors,
nil,
)
})
}
}
func TestTraceNoBackend10kSPS(t *testing.T) {
limitProcessors := map[string]string{
"memory_limiter": `
memory_limiter:
check_interval: 100ms
limit_mib: 20
`,
}
noLimitProcessors := map[string]string{}
var processorsConfig = []processorConfig{
{
Name: "NoMemoryLimit",
Processor: noLimitProcessors,
ExpectedMaxRAM: 190,
ExpectedMinFinalRAM: 100,
},
{
Name: "MemoryLimit",
Processor: limitProcessors,
ExpectedMaxRAM: 95,
ExpectedMinFinalRAM: 50,
},
}
for _, testConf := range processorsConfig {
t.Run(testConf.Name, func(t *testing.T) {
ScenarioTestTraceNoBackend10kSPS(
t,
testbed.NewOTLPTraceDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)),
testbed.NewOTLPDataReceiver(testbed.GetAvailablePort(t)),
testbed.ResourceSpec{ExpectedMaxCPU: 60, ExpectedMaxRAM: testConf.ExpectedMaxRAM},
performanceResultsSummary,
testConf,
)
})
}
}
func TestTrace1kSPSWithAttrs(t *testing.T) {
Scenario1kSPSWithAttrs(t, []string{}, []TestCase{
// No attributes.
{
attrCount: 0,
attrSizeByte: 0,
expectedMaxCPU: 30,
expectedMaxRAM: 150,
resultsSummary: performanceResultsSummary,
},
// We generate 10 attributes each with average key length of 100 bytes and
// average value length of 50 bytes so total size of attributes values is
// 15000 bytes.
{
attrCount: 100,
attrSizeByte: 50,
expectedMaxCPU: 120,
expectedMaxRAM: 150,
resultsSummary: performanceResultsSummary,
},
// Approx 10 KiB attributes.
{
attrCount: 10,
attrSizeByte: 1000,
expectedMaxCPU: 100,
expectedMaxRAM: 150,
resultsSummary: performanceResultsSummary,
},
// Approx 100 KiB attributes.
{
attrCount: 20,
attrSizeByte: 5000,
expectedMaxCPU: 250,
expectedMaxRAM: 150,
resultsSummary: performanceResultsSummary,
},
}, nil, nil)
}
func TestTraceBallast1kSPSWithAttrs(t *testing.T) {
ballastExtCfg := `
memory_ballast:
size_mib: 1000`
Scenario1kSPSWithAttrs(t, []string{}, []TestCase{
// No attributes.
{
attrCount: 0,
attrSizeByte: 0,
expectedMaxCPU: 53,
expectedMaxRAM: 2200,
resultsSummary: performanceResultsSummary,
},
{
attrCount: 100,
attrSizeByte: 50,
expectedMaxCPU: 100,
expectedMaxRAM: 2200,
resultsSummary: performanceResultsSummary,
},
{
attrCount: 10,
attrSizeByte: 1000,
expectedMaxCPU: 100,
expectedMaxRAM: 2200,
resultsSummary: performanceResultsSummary,
},
{
attrCount: 20,
attrSizeByte: 5000,
expectedMaxCPU: 120,
expectedMaxRAM: 2200,
resultsSummary: performanceResultsSummary,
},
}, nil, map[string]string{"memory_ballast": ballastExtCfg})
}
func TestTraceBallast1kSPSAddAttrs(t *testing.T) {
ballastExtCfg := `
memory_ballast:
size_mib: 1000`
attrProcCfg := `
attributes:
actions:
- key: attrib.key00
value: 123
action: insert
- key: attrib.key01
value: "a small string for this attribute"
action: insert
- key: attrib.key02
value: true
action: insert
- key: region
value: test-region
action: insert
- key: data-center
value: test-datacenter
action: insert`
Scenario1kSPSWithAttrs(
t,
[]string{},
[]TestCase{
{
attrCount: 0,
attrSizeByte: 0,
expectedMaxCPU: 30,
expectedMaxRAM: 2200,
resultsSummary: performanceResultsSummary,
},
{
attrCount: 100,
attrSizeByte: 50,
expectedMaxCPU: 80,
expectedMaxRAM: 2200,
resultsSummary: performanceResultsSummary,
},
{
attrCount: 10,
attrSizeByte: 1000,
expectedMaxCPU: 80,
expectedMaxRAM: 2200,
resultsSummary: performanceResultsSummary,
},
{
attrCount: 20,
attrSizeByte: 5000,
expectedMaxCPU: 120,
expectedMaxRAM: 2200,
resultsSummary: performanceResultsSummary,
},
},
map[string]string{"attributes": attrProcCfg},
map[string]string{"memory_ballast": ballastExtCfg},
)
}
// verifySingleSpan sends a single span to Collector, waits until the span is forwarded
// and received by MockBackend and calls user-supplied verification functions on
// received span.
// Temporarily, we need two verification functions in order to verify spans in
// new and old format received by MockBackend.
func verifySingleSpan(
t *testing.T,
tc *testbed.TestCase,
serviceName string,
spanName string,
verifyReceived func(span pdata.Span),
) {
// Clear previously received traces.
tc.MockBackend.ClearReceivedItems()
startCounter := tc.MockBackend.DataItemsReceived()
// Send one span.
td := pdata.NewTraces()
rs := td.ResourceSpans().AppendEmpty()
rs.Resource().Attributes().InsertString(conventions.AttributeServiceName, serviceName)
span := rs.InstrumentationLibrarySpans().AppendEmpty().Spans().AppendEmpty()
span.SetTraceID(idutils.UInt64ToTraceID(0, 1))
span.SetSpanID(idutils.UInt64ToSpanID(1))
span.SetName(spanName)
sender := tc.Sender.(testbed.TraceDataSender)
require.NoError(t, sender.ConsumeTraces(context.Background(), td))
// We bypass the load generator in this test, but make sure to increment the
// counter since it is used in final reports.
tc.LoadGenerator.IncDataItemsSent()
// Wait until span is received.
tc.WaitFor(func() bool { return tc.MockBackend.DataItemsReceived() == startCounter+1 },
"span received")
// Verify received span.
count := 0
for _, td := range tc.MockBackend.ReceivedTraces {
rs := td.ResourceSpans()
for i := 0; i < rs.Len(); i++ {
ils := rs.At(i).InstrumentationLibrarySpans()
for j := 0; j < ils.Len(); j++ {
spans := ils.At(j).Spans()
for k := 0; k < spans.Len(); k++ {
verifyReceived(spans.At(k))
count++
}
}
}
}
assert.EqualValues(t, 1, count, "must receive one span")
}
func TestTraceAttributesProcessor(t *testing.T) {
tests := []struct {
name string
sender testbed.DataSender
receiver testbed.DataReceiver
}{
{
"JaegerGRPC",
datasenders.NewJaegerGRPCDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)),
datareceivers.NewJaegerDataReceiver(testbed.GetAvailablePort(t)),
},
{
"OTLP",
testbed.NewOTLPTraceDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)),
testbed.NewOTLPDataReceiver(testbed.GetAvailablePort(t)),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
resultDir, err := filepath.Abs(path.Join("results", t.Name()))
require.NoError(t, err)
// Use processor to add attributes to certain spans.
processors := map[string]string{
"batch": `
batch:
`,
"attributes": `
attributes:
include:
match_type: regexp
services: ["service-to-add.*"]
span_names: ["span-to-add-.*"]
actions:
- action: insert
key: "new_attr"
value: "string value"
`,
}
agentProc := testbed.NewChildProcessCollector()
configStr := createConfigYaml(t, test.sender, test.receiver, resultDir, processors, nil)
configCleanup, err := agentProc.PrepareConfig(configStr)
require.NoError(t, err)
defer configCleanup()
options := testbed.LoadOptions{DataItemsPerSecond: 10000, ItemsPerBatch: 10}
dataProvider := testbed.NewPerfTestDataProvider(options)
tc := testbed.NewTestCase(
t,
dataProvider,
test.sender,
test.receiver,
agentProc,
&testbed.PerfTestValidator{},
performanceResultsSummary,
)
defer tc.Stop()
tc.StartBackend()
tc.StartAgent()
defer tc.StopAgent()
tc.EnableRecording()
require.NoError(t, test.sender.Start())
// Create a span that matches "include" filter.
spanToInclude := "span-to-add-attr"
// Create a service name that matches "include" filter.
nodeToInclude := "service-to-add-attr"
// verifySpan verifies that attributes was added to the internal data span.
verifySpan := func(span pdata.Span) {
require.NotNil(t, span)
require.Equal(t, span.Attributes().Len(), 1)
attrVal, ok := span.Attributes().Get("new_attr")
assert.True(t, ok)
assert.EqualValues(t, "string value", attrVal.StringVal())
}
verifySingleSpan(t, tc, nodeToInclude, spanToInclude, verifySpan)
// Create a service name that does not match "include" filter.
nodeToExclude := "service-not-to-add-attr"
verifySingleSpan(t, tc, nodeToExclude, spanToInclude, func(span pdata.Span) {
// Verify attributes was not added to the new internal data span.
assert.Equal(t, span.Attributes().Len(), 0)
})
// Create another span that does not match "include" filter.
spanToExclude := "span-not-to-add-attr"
verifySingleSpan(t, tc, nodeToInclude, spanToExclude, func(span pdata.Span) {
// Verify attributes was not added to the new internal data span.
assert.Equal(t, span.Attributes().Len(), 0)
})
})
}
}
Increase memory limit for zipkin testbed test (#7530)
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package tests contains test cases. To run the tests go to tests directory and run:
// RUN_TESTBED=1 go test -v
package tests
// This file contains Test functions which initiate the tests. The tests can be either
// coded in this file or use scenarios from perf_scenarios.go.
import (
"context"
"path"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/model/pdata"
conventions "go.opentelemetry.io/collector/model/semconv/v1.5.0"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/idutils"
"github.com/open-telemetry/opentelemetry-collector-contrib/testbed/datareceivers"
"github.com/open-telemetry/opentelemetry-collector-contrib/testbed/datasenders"
"github.com/open-telemetry/opentelemetry-collector-contrib/testbed/testbed"
)
// TestMain is used to initiate setup, execution and tear down of testbed.
func TestMain(m *testing.M) {
testbed.DoTestMain(m, performanceResultsSummary)
}
func TestTrace10kSPS(t *testing.T) {
tests := []struct {
name string
sender testbed.DataSender
receiver testbed.DataReceiver
resourceSpec testbed.ResourceSpec
}{
{
"JaegerGRPC",
datasenders.NewJaegerGRPCDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)),
datareceivers.NewJaegerDataReceiver(testbed.GetAvailablePort(t)),
testbed.ResourceSpec{
ExpectedMaxCPU: 40,
ExpectedMaxRAM: 100,
},
},
{
"OpenCensus",
datasenders.NewOCTraceDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)),
datareceivers.NewOCDataReceiver(testbed.GetAvailablePort(t)),
testbed.ResourceSpec{
ExpectedMaxCPU: 39,
ExpectedMaxRAM: 100,
},
},
{
"OTLP-gRPC",
testbed.NewOTLPTraceDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)),
testbed.NewOTLPDataReceiver(testbed.GetAvailablePort(t)),
testbed.ResourceSpec{
ExpectedMaxCPU: 20,
ExpectedMaxRAM: 100,
},
},
{
"OTLP-gRPC-gzip",
testbed.NewOTLPTraceDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)),
testbed.NewOTLPDataReceiver(testbed.GetAvailablePort(t)).WithCompression("gzip"),
testbed.ResourceSpec{
ExpectedMaxCPU: 30,
ExpectedMaxRAM: 100,
},
},
{
"OTLP-HTTP",
testbed.NewOTLPHTTPTraceDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)),
testbed.NewOTLPHTTPDataReceiver(testbed.GetAvailablePort(t)),
testbed.ResourceSpec{
ExpectedMaxCPU: 20,
ExpectedMaxRAM: 100,
},
},
{
"OTLP-HTTP-gzip",
testbed.NewOTLPHTTPTraceDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)),
testbed.NewOTLPHTTPDataReceiver(testbed.GetAvailablePort(t)).WithCompression("gzip"),
testbed.ResourceSpec{
ExpectedMaxCPU: 25,
ExpectedMaxRAM: 100,
},
},
{
"SAPM",
datasenders.NewSapmDataSender(testbed.GetAvailablePort(t)),
datareceivers.NewSapmDataReceiver(testbed.GetAvailablePort(t)),
testbed.ResourceSpec{
ExpectedMaxCPU: 32,
ExpectedMaxRAM: 100,
},
},
{
"Zipkin",
datasenders.NewZipkinDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)),
datareceivers.NewZipkinDataReceiver(testbed.GetAvailablePort(t)),
testbed.ResourceSpec{
ExpectedMaxCPU: 80,
ExpectedMaxRAM: 120,
},
},
}
processors := map[string]string{
"batch": `
batch:
`,
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
Scenario10kItemsPerSecond(
t,
test.sender,
test.receiver,
test.resourceSpec,
performanceResultsSummary,
processors,
nil,
)
})
}
}
func TestTraceNoBackend10kSPS(t *testing.T) {
limitProcessors := map[string]string{
"memory_limiter": `
memory_limiter:
check_interval: 100ms
limit_mib: 20
`,
}
noLimitProcessors := map[string]string{}
var processorsConfig = []processorConfig{
{
Name: "NoMemoryLimit",
Processor: noLimitProcessors,
ExpectedMaxRAM: 190,
ExpectedMinFinalRAM: 100,
},
{
Name: "MemoryLimit",
Processor: limitProcessors,
ExpectedMaxRAM: 95,
ExpectedMinFinalRAM: 50,
},
}
for _, testConf := range processorsConfig {
t.Run(testConf.Name, func(t *testing.T) {
ScenarioTestTraceNoBackend10kSPS(
t,
testbed.NewOTLPTraceDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)),
testbed.NewOTLPDataReceiver(testbed.GetAvailablePort(t)),
testbed.ResourceSpec{ExpectedMaxCPU: 60, ExpectedMaxRAM: testConf.ExpectedMaxRAM},
performanceResultsSummary,
testConf,
)
})
}
}
func TestTrace1kSPSWithAttrs(t *testing.T) {
Scenario1kSPSWithAttrs(t, []string{}, []TestCase{
// No attributes.
{
attrCount: 0,
attrSizeByte: 0,
expectedMaxCPU: 30,
expectedMaxRAM: 150,
resultsSummary: performanceResultsSummary,
},
// We generate 10 attributes each with average key length of 100 bytes and
// average value length of 50 bytes so total size of attributes values is
// 15000 bytes.
{
attrCount: 100,
attrSizeByte: 50,
expectedMaxCPU: 120,
expectedMaxRAM: 150,
resultsSummary: performanceResultsSummary,
},
// Approx 10 KiB attributes.
{
attrCount: 10,
attrSizeByte: 1000,
expectedMaxCPU: 100,
expectedMaxRAM: 150,
resultsSummary: performanceResultsSummary,
},
// Approx 100 KiB attributes.
{
attrCount: 20,
attrSizeByte: 5000,
expectedMaxCPU: 250,
expectedMaxRAM: 150,
resultsSummary: performanceResultsSummary,
},
}, nil, nil)
}
func TestTraceBallast1kSPSWithAttrs(t *testing.T) {
ballastExtCfg := `
memory_ballast:
size_mib: 1000`
Scenario1kSPSWithAttrs(t, []string{}, []TestCase{
// No attributes.
{
attrCount: 0,
attrSizeByte: 0,
expectedMaxCPU: 53,
expectedMaxRAM: 2200,
resultsSummary: performanceResultsSummary,
},
{
attrCount: 100,
attrSizeByte: 50,
expectedMaxCPU: 100,
expectedMaxRAM: 2200,
resultsSummary: performanceResultsSummary,
},
{
attrCount: 10,
attrSizeByte: 1000,
expectedMaxCPU: 100,
expectedMaxRAM: 2200,
resultsSummary: performanceResultsSummary,
},
{
attrCount: 20,
attrSizeByte: 5000,
expectedMaxCPU: 120,
expectedMaxRAM: 2200,
resultsSummary: performanceResultsSummary,
},
}, nil, map[string]string{"memory_ballast": ballastExtCfg})
}
func TestTraceBallast1kSPSAddAttrs(t *testing.T) {
ballastExtCfg := `
memory_ballast:
size_mib: 1000`
attrProcCfg := `
attributes:
actions:
- key: attrib.key00
value: 123
action: insert
- key: attrib.key01
value: "a small string for this attribute"
action: insert
- key: attrib.key02
value: true
action: insert
- key: region
value: test-region
action: insert
- key: data-center
value: test-datacenter
action: insert`
Scenario1kSPSWithAttrs(
t,
[]string{},
[]TestCase{
{
attrCount: 0,
attrSizeByte: 0,
expectedMaxCPU: 30,
expectedMaxRAM: 2200,
resultsSummary: performanceResultsSummary,
},
{
attrCount: 100,
attrSizeByte: 50,
expectedMaxCPU: 80,
expectedMaxRAM: 2200,
resultsSummary: performanceResultsSummary,
},
{
attrCount: 10,
attrSizeByte: 1000,
expectedMaxCPU: 80,
expectedMaxRAM: 2200,
resultsSummary: performanceResultsSummary,
},
{
attrCount: 20,
attrSizeByte: 5000,
expectedMaxCPU: 120,
expectedMaxRAM: 2200,
resultsSummary: performanceResultsSummary,
},
},
map[string]string{"attributes": attrProcCfg},
map[string]string{"memory_ballast": ballastExtCfg},
)
}
// verifySingleSpan sends a single span to Collector, waits until the span is forwarded
// and received by MockBackend and calls user-supplied verification functions on
// received span.
// Temporarily, we need two verification functions in order to verify spans in
// new and old format received by MockBackend.
func verifySingleSpan(
t *testing.T,
tc *testbed.TestCase,
serviceName string,
spanName string,
verifyReceived func(span pdata.Span),
) {
// Clear previously received traces.
tc.MockBackend.ClearReceivedItems()
startCounter := tc.MockBackend.DataItemsReceived()
// Send one span.
td := pdata.NewTraces()
rs := td.ResourceSpans().AppendEmpty()
rs.Resource().Attributes().InsertString(conventions.AttributeServiceName, serviceName)
span := rs.InstrumentationLibrarySpans().AppendEmpty().Spans().AppendEmpty()
span.SetTraceID(idutils.UInt64ToTraceID(0, 1))
span.SetSpanID(idutils.UInt64ToSpanID(1))
span.SetName(spanName)
sender := tc.Sender.(testbed.TraceDataSender)
require.NoError(t, sender.ConsumeTraces(context.Background(), td))
// We bypass the load generator in this test, but make sure to increment the
// counter since it is used in final reports.
tc.LoadGenerator.IncDataItemsSent()
// Wait until span is received.
tc.WaitFor(func() bool { return tc.MockBackend.DataItemsReceived() == startCounter+1 },
"span received")
// Verify received span.
count := 0
for _, td := range tc.MockBackend.ReceivedTraces {
rs := td.ResourceSpans()
for i := 0; i < rs.Len(); i++ {
ils := rs.At(i).InstrumentationLibrarySpans()
for j := 0; j < ils.Len(); j++ {
spans := ils.At(j).Spans()
for k := 0; k < spans.Len(); k++ {
verifyReceived(spans.At(k))
count++
}
}
}
}
assert.EqualValues(t, 1, count, "must receive one span")
}
func TestTraceAttributesProcessor(t *testing.T) {
tests := []struct {
name string
sender testbed.DataSender
receiver testbed.DataReceiver
}{
{
"JaegerGRPC",
datasenders.NewJaegerGRPCDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)),
datareceivers.NewJaegerDataReceiver(testbed.GetAvailablePort(t)),
},
{
"OTLP",
testbed.NewOTLPTraceDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)),
testbed.NewOTLPDataReceiver(testbed.GetAvailablePort(t)),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
resultDir, err := filepath.Abs(path.Join("results", t.Name()))
require.NoError(t, err)
// Use processor to add attributes to certain spans.
processors := map[string]string{
"batch": `
batch:
`,
"attributes": `
attributes:
include:
match_type: regexp
services: ["service-to-add.*"]
span_names: ["span-to-add-.*"]
actions:
- action: insert
key: "new_attr"
value: "string value"
`,
}
agentProc := testbed.NewChildProcessCollector()
configStr := createConfigYaml(t, test.sender, test.receiver, resultDir, processors, nil)
configCleanup, err := agentProc.PrepareConfig(configStr)
require.NoError(t, err)
defer configCleanup()
options := testbed.LoadOptions{DataItemsPerSecond: 10000, ItemsPerBatch: 10}
dataProvider := testbed.NewPerfTestDataProvider(options)
tc := testbed.NewTestCase(
t,
dataProvider,
test.sender,
test.receiver,
agentProc,
&testbed.PerfTestValidator{},
performanceResultsSummary,
)
defer tc.Stop()
tc.StartBackend()
tc.StartAgent()
defer tc.StopAgent()
tc.EnableRecording()
require.NoError(t, test.sender.Start())
// Create a span that matches "include" filter.
spanToInclude := "span-to-add-attr"
// Create a service name that matches "include" filter.
nodeToInclude := "service-to-add-attr"
// verifySpan verifies that attributes was added to the internal data span.
verifySpan := func(span pdata.Span) {
require.NotNil(t, span)
require.Equal(t, span.Attributes().Len(), 1)
attrVal, ok := span.Attributes().Get("new_attr")
assert.True(t, ok)
assert.EqualValues(t, "string value", attrVal.StringVal())
}
verifySingleSpan(t, tc, nodeToInclude, spanToInclude, verifySpan)
// Create a service name that does not match "include" filter.
nodeToExclude := "service-not-to-add-attr"
verifySingleSpan(t, tc, nodeToExclude, spanToInclude, func(span pdata.Span) {
// Verify attributes was not added to the new internal data span.
assert.Equal(t, span.Attributes().Len(), 0)
})
// Create another span that does not match "include" filter.
spanToExclude := "span-not-to-add-attr"
verifySingleSpan(t, tc, nodeToInclude, spanToExclude, func(span pdata.Span) {
// Verify attributes was not added to the new internal data span.
assert.Equal(t, span.Attributes().Len(), 0)
})
})
}
}
|
// Copyright 2017 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build darwin linux freebsd netbsd openbsd android
package vpython
import (
"context"
"os"
"syscall"
"golang.org/x/sys/unix"
"go.chromium.org/luci/vpython/python"
"go.chromium.org/luci/vpython/venv"
"go.chromium.org/luci/common/errors"
"go.chromium.org/luci/common/system/environ"
)
// systemSpecificLaunch launches the process described by "cmd" while ensuring
// that the VirtualEnv lock is held throughout its duration (best effort).
//
// On Linux/Mac, we use "execve" to *become* the target process. We need to
// continue to hold the lock for that process, though. We do this by passing it
// as an open file handle to the subprocess.
//
// This can be error-prone, as it places the burden on the subprocess to
// manage the file descriptor.
func systemSpecificLaunch(c context.Context, ve *venv.Env, cl *python.CommandLine, env environ.Env, dir string) error {
return Exec(c, ve.Interpreter(), cl, env, dir, func() error {
// Store our lock file descriptor as FD #3 (after #2, STDERR).
lockFD := ve.LockHandle.LockFile().Fd()
if lockFD == 3 {
// "dup2" doesn't change flags if the source and destination file
// descriptors are the same. Explicitly remove the close-on-exec flag, which
// Go enables by default.
if _, _, err := unix.Syscall(unix.SYS_FCNTL, lockFD, unix.F_SETFD, 0); err != 0 {
return errors.Annotate(err, "could not remove close-on-exec for lock file").Err()
}
} else {
// Use "dup2" to copy the file descriptor to #3 slot. This will also clear
// its flags, including close-on-exec.
if err := unix.Dup2(int(lockFD), 3); err != nil {
return errors.Annotate(err, "could not dup2 lock file").Err()
}
}
return nil
})
}
func execImpl(c context.Context, argv []string, env environ.Env, dir string, setupFn func() error) error {
// Change directory.
if dir != "" {
if err := os.Chdir(dir); err != nil {
return errors.Annotate(err, "failed to chdir to %q", dir).Err()
}
}
// At this point, ANY ERROR will be fatal (panic). We assume that each
// operation may permanently alter our runtime environment.
if setupFn != nil {
if err := setupFn(); err != nil {
panic(err)
}
}
if err := syscall.Exec(argv[0], argv, env.Sorted()); err != nil {
panic(errors.Annotate(err, "failed to execve %q", argv[0]).Err())
}
panic("must not return")
}
[vpython] replace unix.Syscall with unix.FcntlInt
This is for
https://chromium-review.googlesource.com/c/infra/infra/+/2387883
that includes change of
https://go.googlesource.com/sys.git/+/6fcdbc0bbc04dcd9e7dc145879ceaf9bf1c6ff03
Change-Id: Icbf8c9196b567a584d35aff06dde1644da93cfb5
Reviewed-on: https://chromium-review.googlesource.com/c/infra/luci/luci-go/+/2389282
Reviewed-by: Vadim Shtayura <9f116ddb1b24f6fc1916a676eb17161b6c07dfc1@chromium.org>
Commit-Queue: Vadim Shtayura <9f116ddb1b24f6fc1916a676eb17161b6c07dfc1@chromium.org>
Commit-Queue: Takuto Ikuta <71927f930fca81015e0f496c65cd126ab1a692e3@chromium.org>
Auto-Submit: Takuto Ikuta <71927f930fca81015e0f496c65cd126ab1a692e3@chromium.org>
// Copyright 2017 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build darwin linux freebsd netbsd openbsd android
package vpython
import (
"context"
"os"
"syscall"
"golang.org/x/sys/unix"
"go.chromium.org/luci/vpython/python"
"go.chromium.org/luci/vpython/venv"
"go.chromium.org/luci/common/errors"
"go.chromium.org/luci/common/system/environ"
)
// systemSpecificLaunch launches the process described by "cmd" while ensuring
// that the VirtualEnv lock is held throughout its duration (best effort).
//
// On Linux/Mac, we use "execve" to *become* the target process. We need to
// continue to hold the lock for that process, though. We do this by passing it
// as an open file handle to the subprocess.
//
// This can be error-prone, as it places the burden on the subprocess to
// manage the file descriptor.
func systemSpecificLaunch(c context.Context, ve *venv.Env, cl *python.CommandLine, env environ.Env, dir string) error {
return Exec(c, ve.Interpreter(), cl, env, dir, func() error {
// Store our lock file descriptor as FD #3 (after #2, STDERR).
lockFD := ve.LockHandle.LockFile().Fd()
if lockFD == 3 {
// "dup2" doesn't change flags if the source and destination file
// descriptors are the same. Explicitly remove the close-on-exec flag, which
// Go enables by default.
if _, err := unix.FcntlInt(lockFD, unix.F_SETFD, 0); err != nil {
return errors.Annotate(err, "could not remove close-on-exec for lock file").Err()
}
} else {
// Use "dup2" to copy the file descriptor to #3 slot. This will also clear
// its flags, including close-on-exec.
if err := unix.Dup2(int(lockFD), 3); err != nil {
return errors.Annotate(err, "could not dup2 lock file").Err()
}
}
return nil
})
}
func execImpl(c context.Context, argv []string, env environ.Env, dir string, setupFn func() error) error {
// Change directory.
if dir != "" {
if err := os.Chdir(dir); err != nil {
return errors.Annotate(err, "failed to chdir to %q", dir).Err()
}
}
// At this point, ANY ERROR will be fatal (panic). We assume that each
// operation may permanently alter our runtime environment.
if setupFn != nil {
if err := setupFn(); err != nil {
panic(err)
}
}
if err := syscall.Exec(argv[0], argv, env.Sorted()); err != nil {
panic(errors.Annotate(err, "failed to execve %q", argv[0]).Err())
}
panic("must not return")
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// A package for arbitrary precision arithmethic.
// It implements the following numeric types:
//
// - Natural unsigned integers
// - Integer signed integers
// - Rational rational numbers
//
package bignum
import "fmt"
// ----------------------------------------------------------------------------
// Internal representation
//
// A natural number of the form
//
// x = x[n-1]*B^(n-1) + x[n-2]*B^(n-2) + ... + x[1]*B + x[0]
//
// with 0 <= x[i] < B and 0 <= i < n is stored in a slice of length n,
// with the digits x[i] as the slice elements.
//
// A natural number is normalized if the slice contains no leading 0 digits.
// During arithmetic operations, denormalized values may occur but are
// always normalized before returning the final result. The normalized
// representation of 0 is the empty slice (length = 0).
//
// The operations for all other numeric types are implemented on top of
// the operations for natural numbers.
//
// The base B is chosen as large as possible on a given platform but there
// are a few constraints besides the size of the largest unsigned integer
// type available:
//
// 1) To improve conversion speed between strings and numbers, the base B
// is chosen such that division and multiplication by 10 (for decimal
// string representation) can be done without using extended-precision
// arithmetic. This makes addition, subtraction, and conversion routines
// twice as fast. It requires a ``buffer'' of 4 bits per operand digit.
// That is, the size of B must be 4 bits smaller then the size of the
// type (digit) in which these operations are performed. Having this
// buffer also allows for trivial (single-bit) carry computation in
// addition and subtraction (optimization suggested by Ken Thompson).
//
// 2) Long division requires extended-precision (2-digit) division per digit.
// Instead of sacrificing the largest base type for all other operations,
// for division the operands are unpacked into ``half-digits'', and the
// results are packed again. For faster unpacking/packing, the base size
// in bits must be even.
type (
digit uint64;
digit2 uint32; // half-digits for division
)
const (
logW = 64;
logH = 4; // bits for a hex digit (= small number)
logB = logW - logH; // largest bit-width available
// half-digits
_W2 = logB / 2; // width
_B2 = 1 << _W2; // base
_M2 = _B2 - 1; // mask
// full digits
_W = _W2 * 2; // width
_B = 1 << _W; // base
_M = _B - 1; // mask
)
// ----------------------------------------------------------------------------
// Support functions
func assert(p bool) {
if !p {
panic("assert failed");
}
}
func isSmall(x digit) bool {
return x < 1<<logH;
}
// For debugging. Keep around.
/*
func dump(x []digit) {
print("[", len(x), "]");
for i := len(x) - 1; i >= 0; i-- {
print(" ", x[i]);
}
println();
}
*/
// ----------------------------------------------------------------------------
// Natural numbers
// Natural represents an unsigned integer value of arbitrary precision.
//
type Natural []digit;
// Common small values - allocate once.
var nat [16]Natural;
func init() {
nat[0] = Natural{}; // zero has no digits
for i := 1; i < len(nat); i++ {
nat[i] = Natural{digit(i)};
}
}
// Nat creates a small natural number with value x.
//
func Nat(x uint64) Natural {
// avoid allocation for common small values
if x < uint64(len(nat)) {
return nat[x];
}
// single-digit values
if x < _B {
return Natural{digit(x)};
}
// compute number of digits required to represent x
// (this is usually 1 or 2, but the algorithm works
// for any base)
n := 0;
for t := x; t > 0; t >>= _W {
n++;
}
// split x into digits
z := make(Natural, n);
for i := 0; i < n; i++ {
z[i] = digit(x & _M);
x >>= _W;
}
return z;
}
// Value returns the lowest 64bits of x.
//
func (x Natural) Value() uint64 {
// single-digit values
n := len(x);
switch n {
case 0: return 0;
case 1: return uint64(x[0]);
}
// multi-digit values
// (this is usually 1 or 2, but the algorithm works
// for any base)
z := uint64(0);
s := uint(0);
for i := 0; i < n && s < 64; i++ {
z += uint64(x[i]) << s;
s += _W;
}
return z;
}
// Predicates
// IsEven returns true iff x is divisible by 2.
//
func (x Natural) IsEven() bool {
return len(x) == 0 || x[0]&1 == 0;
}
// IsOdd returns true iff x is not divisible by 2.
//
func (x Natural) IsOdd() bool {
return len(x) > 0 && x[0]&1 != 0;
}
// IsZero returns true iff x == 0.
//
func (x Natural) IsZero() bool {
return len(x) == 0;
}
// Operations
//
// Naming conventions
//
// c carry
// x, y operands
// z result
// n, m len(x), len(y)
func normalize(x Natural) Natural {
n := len(x);
for n > 0 && x[n - 1] == 0 { n-- }
if n < len(x) {
x = x[0 : n]; // trim leading 0's
}
return x;
}
// Add returns the sum x + y.
//
func (x Natural) Add(y Natural) Natural {
n := len(x);
m := len(y);
if n < m {
return y.Add(x);
}
c := digit(0);
z := make(Natural, n + 1);
i := 0;
for i < m {
t := c + x[i] + y[i];
c, z[i] = t>>_W, t&_M;
i++;
}
for i < n {
t := c + x[i];
c, z[i] = t>>_W, t&_M;
i++;
}
if c != 0 {
z[i] = c;
i++;
}
return z[0 : i];
}
// Sub returns the difference x - y for x >= y.
// If x < y, an underflow run-time error occurs (use Cmp to test if x >= y).
//
func (x Natural) Sub(y Natural) Natural {
n := len(x);
m := len(y);
if n < m {
panic("underflow")
}
c := digit(0);
z := make(Natural, n);
i := 0;
for i < m {
t := c + x[i] - y[i];
c, z[i] = digit(int64(t)>>_W), t&_M; // requires arithmetic shift!
i++;
}
for i < n {
t := c + x[i];
c, z[i] = digit(int64(t)>>_W), t&_M; // requires arithmetic shift!
i++;
}
for i > 0 && z[i - 1] == 0 { // normalize
i--;
}
return z[0 : i];
}
// Returns c = x*y div B, z = x*y mod B.
//
func mul11(x, y digit) (digit, digit) {
// Split x and y into 2 sub-digits each,
// multiply the digits separately while avoiding overflow,
// and return the product as two separate digits.
// This code also works for non-even bit widths W
// which is why there are separate constants below
// for half-digits.
const W2 = (_W + 1)/2;
const DW = W2*2 - _W; // 0 or 1
const B2 = 1<<W2;
const M2 = _B2 - 1;
// split x and y into sub-digits
// x = (x1*B2 + x0)
// y = (y1*B2 + y0)
x1, x0 := x>>W2, x&M2;
y1, y0 := y>>W2, y&M2;
// x*y = t2*B2^2 + t1*B2 + t0
t0 := x0*y0;
t1 := x1*y0 + x0*y1;
t2 := x1*y1;
// compute the result digits but avoid overflow
// z = z1*B + z0 = x*y
z0 := (t1<<W2 + t0)&_M;
z1 := t2<<DW + (t1 + t0>>W2)>>(_W-W2);
return z1, z0;
}
// Mul returns the product x * y.
//
func (x Natural) Mul(y Natural) Natural {
n := len(x);
m := len(y);
z := make(Natural, n + m);
for j := 0; j < m; j++ {
d := y[j];
if d != 0 {
c := digit(0);
for i := 0; i < n; i++ {
// z[i+j] += c + x[i]*d;
z1, z0 := mul11(x[i], d);
t := c + z[i+j] + z0;
c, z[i+j] = t>>_W, t&_M;
c += z1;
}
z[n+j] = c;
}
}
return normalize(z);
}
// DivMod needs multi-precision division, which is not available if digit
// is already using the largest uint size. Instead, unpack each operand
// into operands with twice as many digits of half the size (digit2), do
// DivMod, and then pack the results again.
func unpack(x Natural) []digit2 {
n := len(x);
z := make([]digit2, n*2 + 1); // add space for extra digit (used by DivMod)
for i := 0; i < n; i++ {
t := x[i];
z[i*2] = digit2(t & _M2);
z[i*2 + 1] = digit2(t >> _W2 & _M2);
}
// normalize result
k := 2*n;
for k > 0 && z[k - 1] == 0 { k-- }
return z[0 : k]; // trim leading 0's
}
func pack(x []digit2) Natural {
n := (len(x) + 1) / 2;
z := make(Natural, n);
if len(x) & 1 == 1 {
// handle odd len(x)
n--;
z[n] = digit(x[n*2]);
}
for i := 0; i < n; i++ {
z[i] = digit(x[i*2 + 1]) << _W2 | digit(x[i*2]);
}
return normalize(z);
}
func mul1(z, x []digit2, y digit2) digit2 {
n := len(x);
c := digit(0);
f := digit(y);
for i := 0; i < n; i++ {
t := c + digit(x[i])*f;
c, z[i] = t>>_W2, digit2(t&_M2);
}
return digit2(c);
}
func div1(z, x []digit2, y digit2) digit2 {
n := len(x);
c := digit(0);
d := digit(y);
for i := n-1; i >= 0; i-- {
t := c*_B2 + digit(x[i]);
c, z[i] = t%d, digit2(t/d);
}
return digit2(c);
}
// divmod returns q and r with x = y*q + r and 0 <= r < y.
// x and y are destroyed in the process.
//
// The algorithm used here is based on 1). 2) describes the same algorithm
// in C. A discussion and summary of the relevant theorems can be found in
// 3). 3) also describes an easier way to obtain the trial digit - however
// it relies on tripple-precision arithmetic which is why Knuth's method is
// used here.
//
// 1) D. Knuth, The Art of Computer Programming. Volume 2. Seminumerical
// Algorithms. Addison-Wesley, Reading, 1969.
// (Algorithm D, Sec. 4.3.1)
//
// 2) Henry S. Warren, Jr., Hacker's Delight. Addison-Wesley, 2003.
// (9-2 Multiword Division, p.140ff)
//
// 3) P. Brinch Hansen, ``Multiple-length division revisited: A tour of the
// minefield''. Software - Practice and Experience 24, (June 1994),
// 579-601. John Wiley & Sons, Ltd.
func divmod(x, y []digit2) ([]digit2, []digit2) {
n := len(x);
m := len(y);
if m == 0 {
panic("division by zero");
}
assert(n+1 <= cap(x)); // space for one extra digit
x = x[0 : n + 1];
assert(x[n] == 0);
if m == 1 {
// division by single digit
// result is shifted left by 1 in place!
x[0] = div1(x[1 : n+1], x[0 : n], y[0]);
} else if m > n {
// y > x => quotient = 0, remainder = x
// TODO in this case we shouldn't even unpack x and y
m = n;
} else {
// general case
assert(2 <= m && m <= n);
// normalize x and y
// TODO Instead of multiplying, it would be sufficient to
// shift y such that the normalization condition is
// satisfied (as done in Hacker's Delight).
f := _B2 / (digit(y[m-1]) + 1);
if f != 1 {
mul1(x, x, digit2(f));
mul1(y, y, digit2(f));
}
assert(_B2/2 <= y[m-1] && y[m-1] < _B2); // incorrect scaling
y1, y2 := digit(y[m-1]), digit(y[m-2]);
d2 := digit(y1)<<_W2 + digit(y2);
for i := n-m; i >= 0; i-- {
k := i+m;
// compute trial digit (Knuth)
var q digit;
{ x0, x1, x2 := digit(x[k]), digit(x[k-1]), digit(x[k-2]);
if x0 != y1 {
q = (x0<<_W2 + x1)/y1;
} else {
q = _B2 - 1;
}
for y2*q > (x0<<_W2 + x1 - y1*q)<<_W2 + x2 {
q--
}
}
// subtract y*q
c := digit(0);
for j := 0; j < m; j++ {
t := c + digit(x[i+j]) - digit(y[j])*q;
c, x[i+j] = digit(int64(t) >> _W2), digit2(t & _M2); // requires arithmetic shift!
}
// correct if trial digit was too large
if c + digit(x[k]) != 0 {
// add y
c := digit(0);
for j := 0; j < m; j++ {
t := c + digit(x[i+j]) + digit(y[j]);
c, x[i+j] = t >> _W2, digit2(t & _M2)
}
assert(c + digit(x[k]) == 0);
// correct trial digit
q--;
}
x[k] = digit2(q);
}
// undo normalization for remainder
if f != 1 {
c := div1(x[0 : m], x[0 : m], digit2(f));
assert(c == 0);
}
}
return x[m : n+1], x[0 : m];
}
// Div returns the quotient q = x / y for y > 0,
// with x = y*q + r and 0 <= r < y.
// If y == 0, a division-by-zero run-time error occurs.
//
func (x Natural) Div(y Natural) Natural {
q, r := divmod(unpack(x), unpack(y));
return pack(q);
}
// Mod returns the modulus r of the division x / y for y > 0,
// with x = y*q + r and 0 <= r < y.
// If y == 0, a division-by-zero run-time error occurs.
//
func (x Natural) Mod(y Natural) Natural {
q, r := divmod(unpack(x), unpack(y));
return pack(r);
}
// DivMod returns the pair (x.Div(y), x.Mod(y)) for y > 0.
// If y == 0, a division-by-zero run-time error occurs.
//
func (x Natural) DivMod(y Natural) (Natural, Natural) {
q, r := divmod(unpack(x), unpack(y));
return pack(q), pack(r);
}
func shl(z, x []digit, s uint) digit {
assert(s <= _W);
n := len(x);
c := digit(0);
for i := 0; i < n; i++ {
c, z[i] = x[i] >> (_W-s), x[i] << s & _M | c;
}
return c;
}
// Shl implements ``shift left'' x << s. It returns x * 2^s.
//
func (x Natural) Shl(s uint) Natural {
n := uint(len(x));
m := n + s/_W;
z := make(Natural, m+1);
z[m] = shl(z[m-n : m], x, s%_W);
return normalize(z);
}
func shr(z, x []digit, s uint) digit {
assert(s <= _W);
n := len(x);
c := digit(0);
for i := n - 1; i >= 0; i-- {
c, z[i] = x[i] << (_W-s) & _M, x[i] >> s | c;
}
return c;
}
// Shr implements ``shift right'' x >> s. It returns x / 2^s.
//
func (x Natural) Shr(s uint) Natural {
n := uint(len(x));
m := n - s/_W;
if m > n { // check for underflow
m = 0;
}
z := make(Natural, m);
shr(z, x[n-m : n], s%_W);
return normalize(z);
}
// And returns the ``bitwise and'' x & y for the 2's-complement representation of x and y.
//
func (x Natural) And(y Natural) Natural {
n := len(x);
m := len(y);
if n < m {
return y.And(x);
}
z := make(Natural, m);
for i := 0; i < m; i++ {
z[i] = x[i] & y[i];
}
// upper bits are 0
return normalize(z);
}
func copy(z, x []digit) {
for i, e := range x {
z[i] = e
}
}
// AndNot returns the ``bitwise clear'' x &^ y for the 2's-complement representation of x and y.
//
func (x Natural) AndNot(y Natural) Natural {
n := len(x);
m := len(y);
if n < m {
m = n;
}
z := make(Natural, n);
for i := 0; i < m; i++ {
z[i] = x[i] &^ y[i];
}
copy(z[m : n], x[m : n]);
return normalize(z);
}
// Or returns the ``bitwise or'' x | y for the 2's-complement representation of x and y.
//
func (x Natural) Or(y Natural) Natural {
n := len(x);
m := len(y);
if n < m {
return y.Or(x);
}
z := make(Natural, n);
for i := 0; i < m; i++ {
z[i] = x[i] | y[i];
}
copy(z[m : n], x[m : n]);
return z;
}
// Xor returns the ``bitwise exclusive or'' x ^ y for the 2's-complement representation of x and y.
//
func (x Natural) Xor(y Natural) Natural {
n := len(x);
m := len(y);
if n < m {
return y.Xor(x);
}
z := make(Natural, n);
for i := 0; i < m; i++ {
z[i] = x[i] ^ y[i];
}
copy(z[m : n], x[m : n]);
return normalize(z);
}
// Cmp compares x and y. The result is an int value
//
// < 0 if x < y
// == 0 if x == y
// > 0 if x > y
//
func (x Natural) Cmp(y Natural) int {
n := len(x);
m := len(y);
if n != m || n == 0 {
return n - m;
}
i := n - 1;
for i > 0 && x[i] == y[i] { i--; }
d := 0;
switch {
case x[i] < y[i]: d = -1;
case x[i] > y[i]: d = 1;
}
return d;
}
// log2 computes the binary logarithm of x for x > 0.
// The result is the integer n for which 2^n <= x < 2^(n+1).
// If x == 0 a run-time error occurs.
//
func log2(x uint64) uint {
assert(x > 0);
n := uint(0);
for x > 0 {
x >>= 1;
n++;
}
return n - 1;
}
// Log2 computes the binary logarithm of x for x > 0.
// The result is the integer n for which 2^n <= x < 2^(n+1).
// If x == 0 a run-time error occurs.
//
func (x Natural) Log2() uint {
n := len(x);
if n > 0 {
return (uint(n) - 1)*_W + log2(uint64(x[n - 1]));
}
panic("Log2(0)");
}
// Computes x = x div d in place (modifies x) for small d's.
// Returns updated x and x mod d.
//
func divmod1(x Natural, d digit) (Natural, digit) {
assert(0 < d && isSmall(d - 1));
c := digit(0);
for i := len(x) - 1; i >= 0; i-- {
t := c<<_W + x[i];
c, x[i] = t%d, t/d;
}
return normalize(x), c;
}
// ToString converts x to a string for a given base, with 2 <= base <= 16.
//
func (x Natural) ToString(base uint) string {
if len(x) == 0 {
return "0";
}
// allocate buffer for conversion
assert(2 <= base && base <= 16);
n := (x.Log2() + 1) / log2(uint64(base)) + 1; // +1: round up
s := make([]byte, n);
// don't destroy x
t := make(Natural, len(x));
copy(t, x);
// convert
i := n;
for !t.IsZero() {
i--;
var d digit;
t, d = divmod1(t, digit(base));
s[i] = "0123456789abcdef"[d];
};
return string(s[i : n]);
}
// String converts x to its decimal string representation.
// x.String() is the same as x.ToString(10).
//
func (x Natural) String() string {
return x.ToString(10);
}
func fmtbase(c int) uint {
switch c {
case 'b': return 2;
case 'o': return 8;
case 'x': return 16;
}
return 10;
}
// Format is a support routine for fmt.Formatter. It accepts
// the formats 'b' (binary), 'o' (octal), and 'x' (hexadecimal).
//
func (x Natural) Format(h fmt.State, c int) {
fmt.Fprintf(h, "%s", x.ToString(fmtbase(c)));
}
func hexvalue(ch byte) uint {
d := uint(1 << logH);
switch {
case '0' <= ch && ch <= '9': d = uint(ch - '0');
case 'a' <= ch && ch <= 'f': d = uint(ch - 'a') + 10;
case 'A' <= ch && ch <= 'F': d = uint(ch - 'A') + 10;
}
return d;
}
// Computes x = x*d + c for small d's.
//
func muladd1(x Natural, d, c digit) Natural {
assert(isSmall(d-1) && isSmall(c));
n := len(x);
z := make(Natural, n + 1);
for i := 0; i < n; i++ {
t := c + x[i]*d;
c, z[i] = t>>_W, t&_M;
}
z[n] = c;
return normalize(z);
}
// NatFromString returns the natural number corresponding to the
// longest possible prefix of s representing a natural number in a
// given conversion base, the actual conversion base used, and the
// prefix length. The syntax of natural numbers follows the syntax
// of unsigned integer literals in Go.
//
// If the base argument is 0, the string prefix determines the actual
// conversion base. A prefix of ``0x'' or ``0X'' selects base 16; the
// ``0'' prefix selects base 8. Otherwise the selected base is 10.
//
func NatFromString(s string, base uint) (Natural, uint, int) {
// determine base if necessary
i, n := 0, len(s);
if base == 0 {
base = 10;
if n > 0 && s[0] == '0' {
if n > 1 && (s[1] == 'x' || s[1] == 'X') {
base, i = 16, 2;
} else {
base, i = 8, 1;
}
}
}
// convert string
assert(2 <= base && base <= 16);
x := nat[0];
for ; i < n; i++ {
d := hexvalue(s[i]);
if d < base {
x = muladd1(x, digit(base), digit(d));
} else {
break;
}
}
return x, base, i;
}
// Natural number functions
func pop1(x digit) uint {
n := uint(0);
for x != 0 {
x &= x-1;
n++;
}
return n;
}
// Pop computes the ``population count'' of (the number of 1 bits in) x.
//
func (x Natural) Pop() uint {
n := uint(0);
for i := len(x) - 1; i >= 0; i-- {
n += pop1(x[i]);
}
return n;
}
// Pow computes x to the power of n.
//
func (xp Natural) Pow(n uint) Natural {
z := nat[1];
x := xp;
for n > 0 {
// z * x^n == x^n0
if n&1 == 1 {
z = z.Mul(x);
}
x, n = x.Mul(x), n/2;
}
return z;
}
// MulRange computes the product of all the unsigned integers
// in the range [a, b] inclusively.
//
func MulRange(a, b uint) Natural {
switch {
case a > b: return nat[1];
case a == b: return Nat(uint64(a));
case a + 1 == b: return Nat(uint64(a)).Mul(Nat(uint64(b)));
}
m := (a + b)>>1;
assert(a <= m && m < b);
return MulRange(a, m).Mul(MulRange(m + 1, b));
}
// Fact computes the factorial of n (Fact(n) == MulRange(2, n)).
//
func Fact(n uint) Natural {
// Using MulRange() instead of the basic for-loop
// lead to faster factorial computation.
return MulRange(2, n);
}
// Binomial computes the binomial coefficient of (n, k).
//
func Binomial(n, k uint) Natural {
return MulRange(n-k+1, n).Div(MulRange(1, k));
}
// Gcd computes the gcd of x and y.
//
func (x Natural) Gcd(y Natural) Natural {
// Euclidean algorithm.
a, b := x, y;
for !b.IsZero() {
a, b = b, a.Mod(b);
}
return a;
}
adjust comment for better godoc output
R=rsc
DELTA=3 (0 added, 0 deleted, 3 changed)
OCL=31944
CL=31950
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// A package for arbitrary precision arithmethic.
// It implements the following numeric types:
//
// - Natural unsigned integers
// - Integer signed integers
// - Rational rational numbers
//
package bignum
import "fmt"
// ----------------------------------------------------------------------------
// Internal representation
//
// A natural number of the form
//
// x = x[n-1]*B^(n-1) + x[n-2]*B^(n-2) + ... + x[1]*B + x[0]
//
// with 0 <= x[i] < B and 0 <= i < n is stored in a slice of length n,
// with the digits x[i] as the slice elements.
//
// A natural number is normalized if the slice contains no leading 0 digits.
// During arithmetic operations, denormalized values may occur but are
// always normalized before returning the final result. The normalized
// representation of 0 is the empty slice (length = 0).
//
// The operations for all other numeric types are implemented on top of
// the operations for natural numbers.
//
// The base B is chosen as large as possible on a given platform but there
// are a few constraints besides the size of the largest unsigned integer
// type available:
//
// 1) To improve conversion speed between strings and numbers, the base B
// is chosen such that division and multiplication by 10 (for decimal
// string representation) can be done without using extended-precision
// arithmetic. This makes addition, subtraction, and conversion routines
// twice as fast. It requires a ``buffer'' of 4 bits per operand digit.
// That is, the size of B must be 4 bits smaller then the size of the
// type (digit) in which these operations are performed. Having this
// buffer also allows for trivial (single-bit) carry computation in
// addition and subtraction (optimization suggested by Ken Thompson).
//
// 2) Long division requires extended-precision (2-digit) division per digit.
// Instead of sacrificing the largest base type for all other operations,
// for division the operands are unpacked into ``half-digits'', and the
// results are packed again. For faster unpacking/packing, the base size
// in bits must be even.
type (
digit uint64;
digit2 uint32; // half-digits for division
)
const (
logW = 64;
logH = 4; // bits for a hex digit (= small number)
logB = logW - logH; // largest bit-width available
// half-digits
_W2 = logB / 2; // width
_B2 = 1 << _W2; // base
_M2 = _B2 - 1; // mask
// full digits
_W = _W2 * 2; // width
_B = 1 << _W; // base
_M = _B - 1; // mask
)
// ----------------------------------------------------------------------------
// Support functions
func assert(p bool) {
if !p {
panic("assert failed");
}
}
func isSmall(x digit) bool {
return x < 1<<logH;
}
// For debugging. Keep around.
/*
func dump(x []digit) {
print("[", len(x), "]");
for i := len(x) - 1; i >= 0; i-- {
print(" ", x[i]);
}
println();
}
*/
// ----------------------------------------------------------------------------
// Natural numbers
// Natural represents an unsigned integer value of arbitrary precision.
//
type Natural []digit;
// Common small values - allocate once.
var nat [16]Natural;
func init() {
nat[0] = Natural{}; // zero has no digits
for i := 1; i < len(nat); i++ {
nat[i] = Natural{digit(i)};
}
}
// Nat creates a small natural number with value x.
//
func Nat(x uint64) Natural {
// avoid allocation for common small values
if x < uint64(len(nat)) {
return nat[x];
}
// single-digit values
if x < _B {
return Natural{digit(x)};
}
// compute number of digits required to represent x
// (this is usually 1 or 2, but the algorithm works
// for any base)
n := 0;
for t := x; t > 0; t >>= _W {
n++;
}
// split x into digits
z := make(Natural, n);
for i := 0; i < n; i++ {
z[i] = digit(x & _M);
x >>= _W;
}
return z;
}
// Value returns the lowest 64bits of x.
//
func (x Natural) Value() uint64 {
// single-digit values
n := len(x);
switch n {
case 0: return 0;
case 1: return uint64(x[0]);
}
// multi-digit values
// (this is usually 1 or 2, but the algorithm works
// for any base)
z := uint64(0);
s := uint(0);
for i := 0; i < n && s < 64; i++ {
z += uint64(x[i]) << s;
s += _W;
}
return z;
}
// Predicates
// IsEven returns true iff x is divisible by 2.
//
func (x Natural) IsEven() bool {
return len(x) == 0 || x[0]&1 == 0;
}
// IsOdd returns true iff x is not divisible by 2.
//
func (x Natural) IsOdd() bool {
return len(x) > 0 && x[0]&1 != 0;
}
// IsZero returns true iff x == 0.
//
func (x Natural) IsZero() bool {
return len(x) == 0;
}
// Operations
//
// Naming conventions
//
// c carry
// x, y operands
// z result
// n, m len(x), len(y)
func normalize(x Natural) Natural {
n := len(x);
for n > 0 && x[n - 1] == 0 { n-- }
if n < len(x) {
x = x[0 : n]; // trim leading 0's
}
return x;
}
// Add returns the sum x + y.
//
func (x Natural) Add(y Natural) Natural {
n := len(x);
m := len(y);
if n < m {
return y.Add(x);
}
c := digit(0);
z := make(Natural, n + 1);
i := 0;
for i < m {
t := c + x[i] + y[i];
c, z[i] = t>>_W, t&_M;
i++;
}
for i < n {
t := c + x[i];
c, z[i] = t>>_W, t&_M;
i++;
}
if c != 0 {
z[i] = c;
i++;
}
return z[0 : i];
}
// Sub returns the difference x - y for x >= y.
// If x < y, an underflow run-time error occurs (use Cmp to test if x >= y).
//
func (x Natural) Sub(y Natural) Natural {
n := len(x);
m := len(y);
if n < m {
panic("underflow")
}
c := digit(0);
z := make(Natural, n);
i := 0;
for i < m {
t := c + x[i] - y[i];
c, z[i] = digit(int64(t)>>_W), t&_M; // requires arithmetic shift!
i++;
}
for i < n {
t := c + x[i];
c, z[i] = digit(int64(t)>>_W), t&_M; // requires arithmetic shift!
i++;
}
for i > 0 && z[i - 1] == 0 { // normalize
i--;
}
return z[0 : i];
}
// Returns c = x*y div B, z = x*y mod B.
//
func mul11(x, y digit) (digit, digit) {
// Split x and y into 2 sub-digits each,
// multiply the digits separately while avoiding overflow,
// and return the product as two separate digits.
// This code also works for non-even bit widths W
// which is why there are separate constants below
// for half-digits.
const W2 = (_W + 1)/2;
const DW = W2*2 - _W; // 0 or 1
const B2 = 1<<W2;
const M2 = _B2 - 1;
// split x and y into sub-digits
// x = (x1*B2 + x0)
// y = (y1*B2 + y0)
x1, x0 := x>>W2, x&M2;
y1, y0 := y>>W2, y&M2;
// x*y = t2*B2^2 + t1*B2 + t0
t0 := x0*y0;
t1 := x1*y0 + x0*y1;
t2 := x1*y1;
// compute the result digits but avoid overflow
// z = z1*B + z0 = x*y
z0 := (t1<<W2 + t0)&_M;
z1 := t2<<DW + (t1 + t0>>W2)>>(_W-W2);
return z1, z0;
}
// Mul returns the product x * y.
//
func (x Natural) Mul(y Natural) Natural {
n := len(x);
m := len(y);
z := make(Natural, n + m);
for j := 0; j < m; j++ {
d := y[j];
if d != 0 {
c := digit(0);
for i := 0; i < n; i++ {
// z[i+j] += c + x[i]*d;
z1, z0 := mul11(x[i], d);
t := c + z[i+j] + z0;
c, z[i+j] = t>>_W, t&_M;
c += z1;
}
z[n+j] = c;
}
}
return normalize(z);
}
// DivMod needs multi-precision division, which is not available if digit
// is already using the largest uint size. Instead, unpack each operand
// into operands with twice as many digits of half the size (digit2), do
// DivMod, and then pack the results again.
func unpack(x Natural) []digit2 {
n := len(x);
z := make([]digit2, n*2 + 1); // add space for extra digit (used by DivMod)
for i := 0; i < n; i++ {
t := x[i];
z[i*2] = digit2(t & _M2);
z[i*2 + 1] = digit2(t >> _W2 & _M2);
}
// normalize result
k := 2*n;
for k > 0 && z[k - 1] == 0 { k-- }
return z[0 : k]; // trim leading 0's
}
func pack(x []digit2) Natural {
n := (len(x) + 1) / 2;
z := make(Natural, n);
if len(x) & 1 == 1 {
// handle odd len(x)
n--;
z[n] = digit(x[n*2]);
}
for i := 0; i < n; i++ {
z[i] = digit(x[i*2 + 1]) << _W2 | digit(x[i*2]);
}
return normalize(z);
}
func mul1(z, x []digit2, y digit2) digit2 {
n := len(x);
c := digit(0);
f := digit(y);
for i := 0; i < n; i++ {
t := c + digit(x[i])*f;
c, z[i] = t>>_W2, digit2(t&_M2);
}
return digit2(c);
}
func div1(z, x []digit2, y digit2) digit2 {
n := len(x);
c := digit(0);
d := digit(y);
for i := n-1; i >= 0; i-- {
t := c*_B2 + digit(x[i]);
c, z[i] = t%d, digit2(t/d);
}
return digit2(c);
}
// divmod returns q and r with x = y*q + r and 0 <= r < y.
// x and y are destroyed in the process.
//
// The algorithm used here is based on 1). 2) describes the same algorithm
// in C. A discussion and summary of the relevant theorems can be found in
// 3). 3) also describes an easier way to obtain the trial digit - however
// it relies on tripple-precision arithmetic which is why Knuth's method is
// used here.
//
// 1) D. Knuth, The Art of Computer Programming. Volume 2. Seminumerical
// Algorithms. Addison-Wesley, Reading, 1969.
// (Algorithm D, Sec. 4.3.1)
//
// 2) Henry S. Warren, Jr., Hacker's Delight. Addison-Wesley, 2003.
// (9-2 Multiword Division, p.140ff)
//
// 3) P. Brinch Hansen, ``Multiple-length division revisited: A tour of the
// minefield''. Software - Practice and Experience 24, (June 1994),
// 579-601. John Wiley & Sons, Ltd.
func divmod(x, y []digit2) ([]digit2, []digit2) {
n := len(x);
m := len(y);
if m == 0 {
panic("division by zero");
}
assert(n+1 <= cap(x)); // space for one extra digit
x = x[0 : n + 1];
assert(x[n] == 0);
if m == 1 {
// division by single digit
// result is shifted left by 1 in place!
x[0] = div1(x[1 : n+1], x[0 : n], y[0]);
} else if m > n {
// y > x => quotient = 0, remainder = x
// TODO in this case we shouldn't even unpack x and y
m = n;
} else {
// general case
assert(2 <= m && m <= n);
// normalize x and y
// TODO Instead of multiplying, it would be sufficient to
// shift y such that the normalization condition is
// satisfied (as done in Hacker's Delight).
f := _B2 / (digit(y[m-1]) + 1);
if f != 1 {
mul1(x, x, digit2(f));
mul1(y, y, digit2(f));
}
assert(_B2/2 <= y[m-1] && y[m-1] < _B2); // incorrect scaling
y1, y2 := digit(y[m-1]), digit(y[m-2]);
d2 := digit(y1)<<_W2 + digit(y2);
for i := n-m; i >= 0; i-- {
k := i+m;
// compute trial digit (Knuth)
var q digit;
{ x0, x1, x2 := digit(x[k]), digit(x[k-1]), digit(x[k-2]);
if x0 != y1 {
q = (x0<<_W2 + x1)/y1;
} else {
q = _B2 - 1;
}
for y2*q > (x0<<_W2 + x1 - y1*q)<<_W2 + x2 {
q--
}
}
// subtract y*q
c := digit(0);
for j := 0; j < m; j++ {
t := c + digit(x[i+j]) - digit(y[j])*q;
c, x[i+j] = digit(int64(t) >> _W2), digit2(t & _M2); // requires arithmetic shift!
}
// correct if trial digit was too large
if c + digit(x[k]) != 0 {
// add y
c := digit(0);
for j := 0; j < m; j++ {
t := c + digit(x[i+j]) + digit(y[j]);
c, x[i+j] = t >> _W2, digit2(t & _M2)
}
assert(c + digit(x[k]) == 0);
// correct trial digit
q--;
}
x[k] = digit2(q);
}
// undo normalization for remainder
if f != 1 {
c := div1(x[0 : m], x[0 : m], digit2(f));
assert(c == 0);
}
}
return x[m : n+1], x[0 : m];
}
// Div returns the quotient q = x / y for y > 0,
// with x = y*q + r and 0 <= r < y.
// If y == 0, a division-by-zero run-time error occurs.
//
func (x Natural) Div(y Natural) Natural {
q, r := divmod(unpack(x), unpack(y));
return pack(q);
}
// Mod returns the modulus r of the division x / y for y > 0,
// with x = y*q + r and 0 <= r < y.
// If y == 0, a division-by-zero run-time error occurs.
//
func (x Natural) Mod(y Natural) Natural {
q, r := divmod(unpack(x), unpack(y));
return pack(r);
}
// DivMod returns the pair (x.Div(y), x.Mod(y)) for y > 0.
// If y == 0, a division-by-zero run-time error occurs.
//
func (x Natural) DivMod(y Natural) (Natural, Natural) {
q, r := divmod(unpack(x), unpack(y));
return pack(q), pack(r);
}
func shl(z, x []digit, s uint) digit {
assert(s <= _W);
n := len(x);
c := digit(0);
for i := 0; i < n; i++ {
c, z[i] = x[i] >> (_W-s), x[i] << s & _M | c;
}
return c;
}
// Shl implements ``shift left'' x << s. It returns x * 2^s.
//
func (x Natural) Shl(s uint) Natural {
n := uint(len(x));
m := n + s/_W;
z := make(Natural, m+1);
z[m] = shl(z[m-n : m], x, s%_W);
return normalize(z);
}
func shr(z, x []digit, s uint) digit {
assert(s <= _W);
n := len(x);
c := digit(0);
for i := n - 1; i >= 0; i-- {
c, z[i] = x[i] << (_W-s) & _M, x[i] >> s | c;
}
return c;
}
// Shr implements ``shift right'' x >> s. It returns x / 2^s.
//
func (x Natural) Shr(s uint) Natural {
n := uint(len(x));
m := n - s/_W;
if m > n { // check for underflow
m = 0;
}
z := make(Natural, m);
shr(z, x[n-m : n], s%_W);
return normalize(z);
}
// And returns the ``bitwise and'' x & y for the 2's-complement representation of x and y.
//
func (x Natural) And(y Natural) Natural {
n := len(x);
m := len(y);
if n < m {
return y.And(x);
}
z := make(Natural, m);
for i := 0; i < m; i++ {
z[i] = x[i] & y[i];
}
// upper bits are 0
return normalize(z);
}
func copy(z, x []digit) {
for i, e := range x {
z[i] = e
}
}
// AndNot returns the ``bitwise clear'' x &^ y for the 2's-complement representation of x and y.
//
func (x Natural) AndNot(y Natural) Natural {
n := len(x);
m := len(y);
if n < m {
m = n;
}
z := make(Natural, n);
for i := 0; i < m; i++ {
z[i] = x[i] &^ y[i];
}
copy(z[m : n], x[m : n]);
return normalize(z);
}
// Or returns the ``bitwise or'' x | y for the 2's-complement representation of x and y.
//
func (x Natural) Or(y Natural) Natural {
n := len(x);
m := len(y);
if n < m {
return y.Or(x);
}
z := make(Natural, n);
for i := 0; i < m; i++ {
z[i] = x[i] | y[i];
}
copy(z[m : n], x[m : n]);
return z;
}
// Xor returns the ``bitwise exclusive or'' x ^ y for the 2's-complement representation of x and y.
//
func (x Natural) Xor(y Natural) Natural {
n := len(x);
m := len(y);
if n < m {
return y.Xor(x);
}
z := make(Natural, n);
for i := 0; i < m; i++ {
z[i] = x[i] ^ y[i];
}
copy(z[m : n], x[m : n]);
return normalize(z);
}
// Cmp compares x and y. The result is an int value
//
// < 0 if x < y
// == 0 if x == y
// > 0 if x > y
//
func (x Natural) Cmp(y Natural) int {
n := len(x);
m := len(y);
if n != m || n == 0 {
return n - m;
}
i := n - 1;
for i > 0 && x[i] == y[i] { i--; }
d := 0;
switch {
case x[i] < y[i]: d = -1;
case x[i] > y[i]: d = 1;
}
return d;
}
// log2 computes the binary logarithm of x for x > 0.
// The result is the integer n for which 2^n <= x < 2^(n+1).
// If x == 0 a run-time error occurs.
//
func log2(x uint64) uint {
assert(x > 0);
n := uint(0);
for x > 0 {
x >>= 1;
n++;
}
return n - 1;
}
// Log2 computes the binary logarithm of x for x > 0.
// The result is the integer n for which 2^n <= x < 2^(n+1).
// If x == 0 a run-time error occurs.
//
func (x Natural) Log2() uint {
n := len(x);
if n > 0 {
return (uint(n) - 1)*_W + log2(uint64(x[n - 1]));
}
panic("Log2(0)");
}
// Computes x = x div d in place (modifies x) for small d's.
// Returns updated x and x mod d.
//
func divmod1(x Natural, d digit) (Natural, digit) {
assert(0 < d && isSmall(d - 1));
c := digit(0);
for i := len(x) - 1; i >= 0; i-- {
t := c<<_W + x[i];
c, x[i] = t%d, t/d;
}
return normalize(x), c;
}
// ToString converts x to a string for a given base, with 2 <= base <= 16.
//
func (x Natural) ToString(base uint) string {
if len(x) == 0 {
return "0";
}
// allocate buffer for conversion
assert(2 <= base && base <= 16);
n := (x.Log2() + 1) / log2(uint64(base)) + 1; // +1: round up
s := make([]byte, n);
// don't destroy x
t := make(Natural, len(x));
copy(t, x);
// convert
i := n;
for !t.IsZero() {
i--;
var d digit;
t, d = divmod1(t, digit(base));
s[i] = "0123456789abcdef"[d];
};
return string(s[i : n]);
}
// String converts x to its decimal string representation.
// x.String() is the same as x.ToString(10).
//
func (x Natural) String() string {
return x.ToString(10);
}
func fmtbase(c int) uint {
switch c {
case 'b': return 2;
case 'o': return 8;
case 'x': return 16;
}
return 10;
}
// Format is a support routine for fmt.Formatter. It accepts
// the formats 'b' (binary), 'o' (octal), and 'x' (hexadecimal).
//
func (x Natural) Format(h fmt.State, c int) {
fmt.Fprintf(h, "%s", x.ToString(fmtbase(c)));
}
func hexvalue(ch byte) uint {
d := uint(1 << logH);
switch {
case '0' <= ch && ch <= '9': d = uint(ch - '0');
case 'a' <= ch && ch <= 'f': d = uint(ch - 'a') + 10;
case 'A' <= ch && ch <= 'F': d = uint(ch - 'A') + 10;
}
return d;
}
// Computes x = x*d + c for small d's.
//
func muladd1(x Natural, d, c digit) Natural {
assert(isSmall(d-1) && isSmall(c));
n := len(x);
z := make(Natural, n + 1);
for i := 0; i < n; i++ {
t := c + x[i]*d;
c, z[i] = t>>_W, t&_M;
}
z[n] = c;
return normalize(z);
}
// NatFromString returns the natural number corresponding to the
// longest possible prefix of s representing a natural number in a
// given conversion base, the actual conversion base used, and the
// prefix length. The syntax of natural numbers follows the syntax
// of unsigned integer literals in Go.
//
// If the base argument is 0, the string prefix determines the actual
// conversion base. A prefix of ``0x'' or ``0X'' selects base 16; the
// ``0'' prefix selects base 8. Otherwise the selected base is 10.
//
func NatFromString(s string, base uint) (Natural, uint, int) {
// determine base if necessary
i, n := 0, len(s);
if base == 0 {
base = 10;
if n > 0 && s[0] == '0' {
if n > 1 && (s[1] == 'x' || s[1] == 'X') {
base, i = 16, 2;
} else {
base, i = 8, 1;
}
}
}
// convert string
assert(2 <= base && base <= 16);
x := nat[0];
for ; i < n; i++ {
d := hexvalue(s[i]);
if d < base {
x = muladd1(x, digit(base), digit(d));
} else {
break;
}
}
return x, base, i;
}
// Natural number functions
func pop1(x digit) uint {
n := uint(0);
for x != 0 {
x &= x-1;
n++;
}
return n;
}
// Pop computes the ``population count'' of (the number of 1 bits in) x.
//
func (x Natural) Pop() uint {
n := uint(0);
for i := len(x) - 1; i >= 0; i-- {
n += pop1(x[i]);
}
return n;
}
// Pow computes x to the power of n.
//
func (xp Natural) Pow(n uint) Natural {
z := nat[1];
x := xp;
for n > 0 {
// z * x^n == x^n0
if n&1 == 1 {
z = z.Mul(x);
}
x, n = x.Mul(x), n/2;
}
return z;
}
// MulRange computes the product of all the unsigned integers
// in the range [a, b] inclusively.
//
func MulRange(a, b uint) Natural {
switch {
case a > b: return nat[1];
case a == b: return Nat(uint64(a));
case a + 1 == b: return Nat(uint64(a)).Mul(Nat(uint64(b)));
}
m := (a + b)>>1;
assert(a <= m && m < b);
return MulRange(a, m).Mul(MulRange(m + 1, b));
}
// Fact computes the factorial of n (Fact(n) == MulRange(2, n)).
//
func Fact(n uint) Natural {
// Using MulRange() instead of the basic for-loop
// lead to faster factorial computation.
return MulRange(2, n);
}
// Binomial computes the binomial coefficient of (n, k).
//
func Binomial(n, k uint) Natural {
return MulRange(n-k+1, n).Div(MulRange(1, k));
}
// Gcd computes the gcd of x and y.
//
func (x Natural) Gcd(y Natural) Natural {
// Euclidean algorithm.
a, b := x, y;
for !b.IsZero() {
a, b = b, a.Mod(b);
}
return a;
}
|
// Copyright 2015 Dorival de Moraes Pedroso. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package goga
import (
"math"
"github.com/cpmech/gosl/rnd"
)
// de_operator performs the differential-evolution operation
func de_operator(u, x, x0, x1, x2 []float64, prms *Parameters) {
C, F := prms.DiffEvolC, prms.DiffEvolF
if prms.DiffEvolUseCmult {
C *= rnd.Float64(0, 1)
}
if prms.DiffEvolUseFmult {
F *= rnd.Float64(0, 1)
}
K := 0.5 * (F + 1.0)
n := len(x)
I := rnd.Int(0, n-1)
for i := 0; i < n; i++ {
if rnd.FlipCoin(C) || i == I {
if rnd.FlipCoin(prms.DiffEvolPm) {
u[i] = x0[i] + F*(x1[i]-x2[i])
} else {
u[i] = x0[i] + K*(x1[i]+x2[i]-2.0*x0[i])
}
} else {
u[i] = x[i]
}
u[i] = prms.EnforceRange(i, u[i])
}
}
// CxFltDE implements the differential-evolution crossover
func CxFltDE(a, b, A, B, A0, A1, A2, B0, B1, B2 []float64, prms *Parameters) {
de_operator(a, A, A0, A1, A2, prms)
de_operator(b, B, B0, B1, B2, prms)
}
// MtFltDeb implements Deb's parameter-based mutation operator
// [1] Deb K and Tiwari S (2008) Omni-optimizer: A generic evolutionary algorithm for single
// and multi-objective optimization. European Journal of Operational Research, 185:1062-1087.
func MtFltDeb(A []float64, prms *Parameters) {
// skip mutation
if !rnd.FlipCoin(prms.PmFlt) {
return
}
// for each gene
size := len(A)
pm := 1.0 / float64(size)
ηm := prms.DebEtam
cm := 1.0 / (ηm + 1.0)
var u, Δx, φ1, φ2, δ1, δ2, δb, xl, xu float64
for i := 0; i < size; i++ {
// leave basis unmodified
if !rnd.FlipCoin(pm) {
continue
}
// range
xl, xu = prms.FltMin[i], prms.FltMax[i]
Δx = xu - xl
// mutation
u = rnd.Float64(0, 1)
δ1 = (A[i] - xl) / Δx
δ2 = (xu - A[i]) / Δx
if u <= 0.5 {
φ1 = math.Pow(1.0-δ1, ηm+1.0)
δb = math.Pow(2.0*u+(1.0-2.0*u)*φ1, cm) - 1.0
} else {
φ2 = math.Pow(1.0-δ2, ηm+1.0)
δb = 1.0 - math.Pow(2.0-2.0*u+(2.0*u-1.0)*φ2, cm)
}
A[i] = prms.EnforceRange(i, A[i]+δb*Δx)
}
}
options to CxFltDE added
// Copyright 2015 Dorival de Moraes Pedroso. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package goga
import (
"math"
"github.com/cpmech/gosl/rnd"
)
// de_operator performs the differential-evolution operation
func de_operator(u, x, x0, x1, x2 []float64, prms *Parameters) {
C, F := prms.DiffEvolC, prms.DiffEvolF
if prms.DiffEvolUseCmult {
C *= rnd.Float64(0, 1)
}
if prms.DiffEvolUseFmult {
F *= rnd.Float64(0, 1)
}
K := 0.5 * (F + 1.0)
n := len(x)
I := rnd.Int(0, n-1)
for i := 0; i < n; i++ {
if rnd.FlipCoin(C) || i == I {
if rnd.FlipCoin(prms.DiffEvolPm) {
u[i] = x0[i] + F*(x1[i]-x2[i])
} else {
u[i] = x0[i] + K*(x1[i]+x2[i]-2.0*x0[i])
}
} else {
u[i] = x[i]
}
u[i] = prms.EnforceRange(i, u[i])
}
}
// CxFltDE implements the differential-evolution crossover
func CxFltDE(a, b, A, B, A0, A1, A2, B0, B1, B2 []float64, prms *Parameters) {
scheme := 3
switch scheme {
case 1:
de_operator(a, A, B, A1, A2, prms)
de_operator(b, B, A, B1, B2, prms)
case 2:
de_operator(a, A, A0, A1, A2, prms)
de_operator(b, B, A0, A1, A2, prms)
default:
de_operator(a, A, A0, A1, A2, prms)
de_operator(b, B, B0, B1, B2, prms)
}
}
// MtFltDeb implements Deb's parameter-based mutation operator
// [1] Deb K and Tiwari S (2008) Omni-optimizer: A generic evolutionary algorithm for single
// and multi-objective optimization. European Journal of Operational Research, 185:1062-1087.
func MtFltDeb(A []float64, prms *Parameters) {
// skip mutation
if !rnd.FlipCoin(prms.PmFlt) {
return
}
// for each gene
size := len(A)
pm := 1.0 / float64(size)
ηm := prms.DebEtam
cm := 1.0 / (ηm + 1.0)
var u, Δx, φ1, φ2, δ1, δ2, δb, xl, xu float64
for i := 0; i < size; i++ {
// leave basis unmodified
if !rnd.FlipCoin(pm) {
continue
}
// range
xl, xu = prms.FltMin[i], prms.FltMax[i]
Δx = xu - xl
// mutation
u = rnd.Float64(0, 1)
δ1 = (A[i] - xl) / Δx
δ2 = (xu - A[i]) / Δx
if u <= 0.5 {
φ1 = math.Pow(1.0-δ1, ηm+1.0)
δb = math.Pow(2.0*u+(1.0-2.0*u)*φ1, cm) - 1.0
} else {
φ2 = math.Pow(1.0-δ2, ηm+1.0)
δb = 1.0 - math.Pow(2.0-2.0*u+(2.0*u-1.0)*φ2, cm)
}
A[i] = prms.EnforceRange(i, A[i]+δb*Δx)
}
}
|
package client
import (
"doozer/proto"
"doozer/util"
"encoding/binary"
"fmt"
"log"
"net"
"io"
"os"
pb "goprotobuf.googlecode.com/hg/proto"
"sync"
)
const (
Valid = 1 << iota
Done
)
var lg = util.NewLogger("client")
var (
ErrNoAddrs = os.NewError("no known address")
)
type ResponseError struct {
Code int32
Detail string
}
func (r *ResponseError) String() string {
return "response: " + proto.Response_Err_name[r.Code] + ": " + r.Detail
}
// Response errors
var (
ErrNotDir = &ResponseError{proto.Response_NOTDIR, "not a directory"}
ErrIsDir = &ResponseError{proto.Response_ISDIR, "is a directory"}
ErrCasMismatch = &ResponseError{proto.Response_CAS_MISMATCH, "cas mismatch"}
ErrInvalidSnap = &ResponseError{proto.Response_INVALID_SNAP, "invalid snapshot id"}
respErrors = map[int32]*ResponseError{
proto.Response_NOTDIR: ErrNotDir,
proto.Response_ISDIR: ErrIsDir,
proto.Response_CAS_MISMATCH: ErrCasMismatch,
proto.Response_INVALID_SNAP: ErrInvalidSnap,
}
)
type Event struct {
Cas int64
Path string
Body []byte
Err os.Error
}
type T proto.Request
type R proto.Response
func (r *R) err() os.Error {
if r.ErrCode != nil {
c := int32(*r.ErrCode)
if c == proto.Response_REDIRECT {
return os.EAGAIN
}
if r.ErrDetail != nil {
return &ResponseError{c, *r.ErrDetail}
}
if r, ok := respErrors[c]; ok {
return r
}
return &ResponseError{c, proto.Response_Err_name[c]}
}
return nil
}
func (r *R) String() string {
return fmt.Sprintf("%#v", r)
}
type conn struct {
c net.Conn
clk sync.Mutex // write lock
err os.Error // unrecoverable error
// callback management
n int32 // next tag
cb map[int32]chan *R // callback channels
cblk sync.Mutex
lg *log.Logger
// redirect handling
redirectAddr string
redirected bool
}
func (c *conn) writeT(t *T) os.Error {
if c.err != nil {
return c.err
}
buf, err := pb.Marshal(t)
if err != nil {
return err
}
c.err = binary.Write(c.c, binary.BigEndian, int32(len(buf)))
if c.err != nil {
return c.err
}
for len(buf) > 0 {
n, err := c.c.Write(buf)
if err != nil {
c.err = err
return err
}
buf = buf[n:]
}
return nil
}
func (c *conn) send(t *T) (chan *R, os.Error) {
if c.err != nil {
return nil, c.err
}
ch := make(chan *R)
// Find an unused tag and
// put the reply chan in the table.
c.cblk.Lock()
for _, ok := c.cb[c.n]; ok; _, ok = c.cb[c.n] {
c.n++
}
tag := c.n
c.cb[tag] = ch
c.cblk.Unlock()
t.Tag = &tag
c.clk.Lock()
err := c.writeT(t)
c.clk.Unlock()
if err != nil {
c.cblk.Lock()
c.cb[tag] = nil, false
c.cblk.Unlock()
return nil, err
}
return ch, nil
}
func (c *conn) call(t *T) (*R, os.Error) {
ch, err := c.send(t)
if err != nil {
return nil, err
}
r := <-ch
if err := r.err(); err != nil {
return nil, err
}
return r, nil
}
func (c *conn) readR() (*R, os.Error) {
var size int32
err := binary.Read(c.c, binary.BigEndian, &size)
if err != nil {
return nil, err
}
buf := make([]byte, size)
_, err = io.ReadFull(c.c, buf)
if err != nil {
return nil, err
}
var r R
err = pb.Unmarshal(buf, &r)
if err != nil {
return nil, err
}
return &r, nil
}
func (c *conn) readResponses() {
for {
r, err := c.readR()
if err != nil {
c.lg.Println(err)
return
}
if r.ErrCode != nil && *r.ErrCode == proto.Response_REDIRECT {
c.redirectAddr = pb.GetString(r.ErrDetail)
c.redirected = true
}
tag := pb.GetInt32(r.Tag)
flags := pb.GetInt32(r.Flags)
c.cblk.Lock()
ch, ok := c.cb[tag]
if ok && flags&Done != 0 {
c.cb[tag] = nil, false
}
c.cblk.Unlock()
if ok {
if flags&Valid != 0 {
ch <- r
}
if flags&Done != 0 {
close(ch)
}
} else {
c.lg.Println("unexpected:", r.String())
}
}
}
func (c *conn) cancel(tag int32) os.Error {
verb := proto.NewRequest_Verb(proto.Request_CANCEL)
_, err := c.call(&T{Verb: verb, Id: &tag})
if err != nil {
return err
}
c.cblk.Lock()
ch, ok := c.cb[tag]
if ok {
c.cb[tag] = nil, false
}
c.cblk.Unlock()
if ok {
close(ch)
}
return nil
}
type Client struct {
Name string
c *conn // current connection
ra []string // known readable addresses
wa []string // known writable address
lg *log.Logger
lk sync.Mutex
}
// Name is the name of this cluster.
// Addr is an initial readable address to connect to.
func New(name, raddr string) *Client {
return &Client{Name: name, ra: []string{raddr}}
}
func (c *Client) AddRaddr(a string) {
c.lk.Lock()
defer c.lk.Unlock()
for _, s := range c.ra {
if s == a {
return
}
}
c.ra = append(c.ra, a)
}
func (c *Client) AddWaddr(a string) {
c.lk.Lock()
defer c.lk.Unlock()
for _, s := range c.wa {
if s == a {
return
}
}
c.wa = append(c.wa, a)
}
func (cl *Client) dial(addr string) (*conn, os.Error) {
var c conn
var err os.Error
c.c, err = net.Dial("tcp", "", addr)
if err != nil {
return nil, err
}
c.cb = make(map[int32]chan *R)
c.lg = util.NewLogger(addr)
go c.readResponses()
return &c, nil
}
// This is a little subtle. We want to follow redirects while still pipelining
// requests, and we want to allow as many requests as possible to succeed
// without retrying unnecessarily.
//
// In particular, reads never need to redirect, and writes must always go to
// a CAL node. So we want that read requests never retry, and write requests
// retry if and only if necessary. Here's how it works:
//
// In the conn, when we get a redirect response, we raise a flag noting
// the new address. This flag only goes up, never down. This flag effectively
// means the connection is deprecated. Any pending requests can go ahead, but
// new requests should use the new address.
//
// In the Client, when we notice that a redirect has occurred (i.e. the flag is
// set), we establish a new connection to the new address. Calls in the future
// will use the new connection. But we also allow the old connection to
// continue functioning as it was. Any writes on the old connection will retry,
// and then they are guaranteed to pick up the new connection. Any reads on the
// old connection will just succeed directly.
func (cl *Client) conn() (c *conn, err os.Error) {
cl.lk.Lock()
defer cl.lk.Unlock()
if cl.c == nil {
if len(cl.ra) < 1 {
return nil, ErrNoAddrs
}
cl.c, err = cl.dial(cl.ra[0])
if err != nil {
cl.ra = cl.ra[1:]
}
return cl.c, err
}
if cl.c.redirected {
cl.AddWaddr(cl.c.redirectAddr)
if len(cl.wa) < 1 {
return nil, ErrNoAddrs
}
cl.c, err = cl.dial(cl.wa[0])
if err != nil {
cl.wa = cl.wa[1:]
}
return cl.c, err
}
return cl.c, nil
}
func (cl *Client) call(verb int32, t *T) (r *R, err os.Error) {
t.Verb = proto.NewRequest_Verb(verb)
for err = os.EAGAIN; err == os.EAGAIN; {
var c *conn
c, err = cl.conn()
if err != nil {
continue
}
r, err = c.call(t)
}
if err != nil {
lg.Println(err)
}
return
}
func (cl *Client) Join(id, addr string) (seqn int64, snapshot string, err os.Error) {
r, err := cl.call(proto.Request_JOIN, &T{Path: &id, Value: []byte(addr)})
if err != nil {
return 0, "", err
}
return pb.GetInt64(r.Seqn), string(r.Value), nil
}
func (cl *Client) Set(path string, oldCas int64, body []byte) (newCas int64, err os.Error) {
r, err := cl.call(proto.Request_SET, &T{Path: &path, Value: body, Cas: &oldCas})
if err != nil {
return 0, err
}
return pb.GetInt64(r.Cas), nil
}
// Returns the body and CAS token of the file at path.
// If snapId is 0, uses the current state, otherwise,
// snapId must be a value previously returned from Snap.
// If path does not denote a file, returns an error.
func (cl *Client) Get(path string, snapId int32) (body []byte, cas int64, err os.Error) {
r, err := cl.call(proto.Request_GET, &T{Path: &path, Id: &snapId})
if err != nil {
return nil, 0, err
}
return r.Value, pb.GetInt64(r.Cas), nil
}
func (cl *Client) Del(path string, cas int64) os.Error {
_, err := cl.call(proto.Request_DEL, &T{Path: &path, Cas: &cas})
return err
}
func (cl *Client) Noop() os.Error {
_, err := cl.call(proto.Request_NOOP, &T{})
return err
}
func (cl *Client) Checkin(id string, cas int64) (int64, os.Error) {
r, err := cl.call(proto.Request_CHECKIN, &T{Path: &id, Cas: &cas})
if err != nil {
return 0, err
}
return pb.GetInt64(r.Cas), nil
}
func (cl *Client) Snap() (id int32, ver int64, err os.Error) {
r, err := cl.call(proto.Request_SNAP, &T{})
if err != nil {
return 0, 0, err
}
return pb.GetInt32(r.Id), pb.GetInt64(r.Seqn), nil
}
func (cl *Client) DelSnap(id int32) os.Error {
_, err := cl.call(proto.Request_DELSNAP, &T{Id: &id})
return err
}
func (cl *Client) events(verb int32, glob string) (*Watch, os.Error) {
c, err := cl.conn()
if err != nil {
return nil, err
}
var t T
t.Verb = proto.NewRequest_Verb(verb)
t.Path = &glob
ch, err := c.send(&t)
if err != nil {
return nil, err
}
evs := make(chan *Event)
w := &Watch{evs, c, *t.Tag}
go func() {
for r := range ch {
var ev Event
if err := r.err(); err != nil {
ev.Err = err
} else {
ev.Cas = pb.GetInt64(r.Cas)
ev.Path = pb.GetString(r.Path)
ev.Body = r.Value
}
evs <- &ev
}
close(evs)
}()
return w, nil
}
func (cl *Client) Watch(glob string) (*Watch, os.Error) {
return cl.events(proto.Request_WATCH, glob)
}
func (cl *Client) Walk(glob string) (*Watch, os.Error) {
return cl.events(proto.Request_WALK, glob)
}
type Watch struct {
C <-chan *Event // to caller
c *conn
tag int32
}
func (w *Watch) Cancel() os.Error {
return w.c.cancel(w.tag)
}
refactor
package client
import (
"doozer/proto"
"doozer/util"
"encoding/binary"
"fmt"
"log"
"net"
"io"
"os"
pb "goprotobuf.googlecode.com/hg/proto"
"sync"
)
const (
Valid = 1 << iota
Done
)
var lg = util.NewLogger("client")
var (
ErrNoAddrs = os.NewError("no known address")
)
type ResponseError struct {
Code int32
Detail string
}
func (r *ResponseError) String() string {
return "response: " + proto.Response_Err_name[r.Code] + ": " + r.Detail
}
// Response errors
var (
ErrNotDir = &ResponseError{proto.Response_NOTDIR, "not a directory"}
ErrIsDir = &ResponseError{proto.Response_ISDIR, "is a directory"}
ErrCasMismatch = &ResponseError{proto.Response_CAS_MISMATCH, "cas mismatch"}
ErrInvalidSnap = &ResponseError{proto.Response_INVALID_SNAP, "invalid snapshot id"}
respErrors = map[int32]*ResponseError{
proto.Response_NOTDIR: ErrNotDir,
proto.Response_ISDIR: ErrIsDir,
proto.Response_CAS_MISMATCH: ErrCasMismatch,
proto.Response_INVALID_SNAP: ErrInvalidSnap,
}
)
type Event struct {
Cas int64
Path string
Body []byte
Err os.Error
}
type T proto.Request
type R proto.Response
func (r *R) err() os.Error {
if r.ErrCode != nil {
c := int32(*r.ErrCode)
if c == proto.Response_REDIRECT {
return os.EAGAIN
}
if r.ErrDetail != nil {
return &ResponseError{c, *r.ErrDetail}
}
if r, ok := respErrors[c]; ok {
return r
}
return &ResponseError{c, proto.Response_Err_name[c]}
}
return nil
}
func (r *R) String() string {
return fmt.Sprintf("%#v", r)
}
type conn struct {
c net.Conn
clk sync.Mutex // write lock
err os.Error // unrecoverable error
// callback management
n int32 // next tag
cb map[int32]chan *R // callback channels
cblk sync.Mutex
lg *log.Logger
// redirect handling
redirectAddr string
redirected bool
}
func (c *conn) writeT(t *T) os.Error {
if c.err != nil {
return c.err
}
buf, err := pb.Marshal(t)
if err != nil {
return err
}
c.err = binary.Write(c.c, binary.BigEndian, int32(len(buf)))
if c.err != nil {
return c.err
}
for len(buf) > 0 {
n, err := c.c.Write(buf)
if err != nil {
c.err = err
return err
}
buf = buf[n:]
}
return nil
}
func (c *conn) send(t *T) (chan *R, os.Error) {
if c.err != nil {
return nil, c.err
}
ch := make(chan *R)
// Find an unused tag and
// put the reply chan in the table.
c.cblk.Lock()
for _, ok := c.cb[c.n]; ok; _, ok = c.cb[c.n] {
c.n++
}
tag := c.n
c.cb[tag] = ch
c.cblk.Unlock()
t.Tag = &tag
c.clk.Lock()
err := c.writeT(t)
c.clk.Unlock()
if err != nil {
c.cblk.Lock()
c.cb[tag] = nil, false
c.cblk.Unlock()
return nil, err
}
return ch, nil
}
func (c *conn) call(t *T) (*R, os.Error) {
ch, err := c.send(t)
if err != nil {
return nil, err
}
r := <-ch
if err := r.err(); err != nil {
return nil, err
}
return r, nil
}
func (c *conn) events(t *T) (*Watch, os.Error) {
ch, err := c.send(t)
if err != nil {
return nil, err
}
evs := make(chan *Event)
w := &Watch{evs, c, *t.Tag}
go func() {
for r := range ch {
var ev Event
if err := r.err(); err != nil {
ev.Err = err
} else {
ev.Cas = pb.GetInt64(r.Cas)
ev.Path = pb.GetString(r.Path)
ev.Body = r.Value
}
evs <- &ev
}
close(evs)
}()
return w, nil
}
func (c *conn) readR() (*R, os.Error) {
var size int32
err := binary.Read(c.c, binary.BigEndian, &size)
if err != nil {
return nil, err
}
buf := make([]byte, size)
_, err = io.ReadFull(c.c, buf)
if err != nil {
return nil, err
}
var r R
err = pb.Unmarshal(buf, &r)
if err != nil {
return nil, err
}
return &r, nil
}
func (c *conn) readResponses() {
for {
r, err := c.readR()
if err != nil {
c.lg.Println(err)
return
}
if r.ErrCode != nil && *r.ErrCode == proto.Response_REDIRECT {
c.redirectAddr = pb.GetString(r.ErrDetail)
c.redirected = true
}
tag := pb.GetInt32(r.Tag)
flags := pb.GetInt32(r.Flags)
c.cblk.Lock()
ch, ok := c.cb[tag]
if ok && flags&Done != 0 {
c.cb[tag] = nil, false
}
c.cblk.Unlock()
if ok {
if flags&Valid != 0 {
ch <- r
}
if flags&Done != 0 {
close(ch)
}
} else {
c.lg.Println("unexpected:", r.String())
}
}
}
func (c *conn) cancel(tag int32) os.Error {
verb := proto.NewRequest_Verb(proto.Request_CANCEL)
_, err := c.call(&T{Verb: verb, Id: &tag})
if err != nil {
return err
}
c.cblk.Lock()
ch, ok := c.cb[tag]
if ok {
c.cb[tag] = nil, false
}
c.cblk.Unlock()
if ok {
close(ch)
}
return nil
}
type Client struct {
Name string
c *conn // current connection
a []string // known (writable) server addresses
lg *log.Logger
lk sync.Mutex
}
// Name is the name of this cluster.
// Addr is an initial (writable) address to connect to.
func New(name, addr string) *Client {
return &Client{Name: name, a: []string{addr}}
}
func (c *Client) AddWaddr(a string) {
c.lk.Lock()
defer c.lk.Unlock()
for _, s := range c.a {
if s == a {
return
}
}
c.a = append(c.a, a)
}
func (cl *Client) dial(addr string) (*conn, os.Error) {
var c conn
var err os.Error
c.c, err = net.Dial("tcp", "", addr)
if err != nil {
return nil, err
}
c.cb = make(map[int32]chan *R)
c.lg = util.NewLogger(addr)
go c.readResponses()
return &c, nil
}
// This is a little subtle. We want to follow redirects while still pipelining
// requests, and we want to allow as many requests as possible to succeed
// without retrying unnecessarily.
//
// In particular, reads never need to redirect, and writes must always go to
// a CAL node. So we want that read requests never retry, and write requests
// retry if and only if necessary. Here's how it works:
//
// In the conn, when we get a redirect response, we raise a flag noting
// the new address. This flag only goes up, never down. This flag effectively
// means the connection is deprecated. Any pending requests can go ahead, but
// new requests should use the new address.
//
// In the Client, when we notice that a redirect has occurred (i.e. the flag is
// set), we establish a new connection to the new address. Calls in the future
// will use the new connection. But we also allow the old connection to
// continue functioning as it was. Any writes on the old connection will retry,
// and then they are guaranteed to pick up the new connection. Any reads on the
// old connection will just succeed directly.
func (cl *Client) conn() (c *conn, err os.Error) {
cl.lk.Lock()
defer cl.lk.Unlock()
if cl.c == nil {
if len(cl.a) < 1 {
return nil, ErrNoAddrs
}
cl.c, err = cl.dial(cl.a[0])
if err != nil {
cl.a = cl.a[1:]
}
return cl.c, err
}
return cl.c, nil
}
func (cl *Client) call(verb int32, t *T) (r *R, err os.Error) {
t.Verb = proto.NewRequest_Verb(verb)
for err = os.EAGAIN; err == os.EAGAIN; {
var c *conn
c, err = cl.conn()
if err != nil {
continue
}
r, err = c.call(t)
}
if err != nil {
lg.Println(err)
}
return
}
func (cl *Client) Join(id, addr string) (seqn int64, snapshot string, err os.Error) {
r, err := cl.call(proto.Request_JOIN, &T{Path: &id, Value: []byte(addr)})
if err != nil {
return 0, "", err
}
return pb.GetInt64(r.Seqn), string(r.Value), nil
}
func (cl *Client) Set(path string, oldCas int64, body []byte) (newCas int64, err os.Error) {
r, err := cl.call(proto.Request_SET, &T{Path: &path, Value: body, Cas: &oldCas})
if err != nil {
return 0, err
}
return pb.GetInt64(r.Cas), nil
}
// Returns the body and CAS token of the file at path.
// If snapId is 0, uses the current state, otherwise,
// snapId must be a value previously returned from Snap.
// If path does not denote a file, returns an error.
func (cl *Client) Get(path string, snapId int32) (body []byte, cas int64, err os.Error) {
r, err := cl.call(proto.Request_GET, &T{Path: &path, Id: &snapId})
if err != nil {
return nil, 0, err
}
return r.Value, pb.GetInt64(r.Cas), nil
}
func (cl *Client) Del(path string, cas int64) os.Error {
_, err := cl.call(proto.Request_DEL, &T{Path: &path, Cas: &cas})
return err
}
func (cl *Client) Noop() os.Error {
_, err := cl.call(proto.Request_NOOP, &T{})
return err
}
func (cl *Client) Checkin(id string, cas int64) (int64, os.Error) {
r, err := cl.call(proto.Request_CHECKIN, &T{Path: &id, Cas: &cas})
if err != nil {
return 0, err
}
return pb.GetInt64(r.Cas), nil
}
func (cl *Client) Snap() (id int32, ver int64, err os.Error) {
r, err := cl.call(proto.Request_SNAP, &T{})
if err != nil {
return 0, 0, err
}
return pb.GetInt32(r.Id), pb.GetInt64(r.Seqn), nil
}
func (cl *Client) DelSnap(id int32) os.Error {
_, err := cl.call(proto.Request_DELSNAP, &T{Id: &id})
return err
}
func (cl *Client) Watch(glob string) (*Watch, os.Error) {
c, err := cl.conn()
if err != nil {
return nil, err
}
var t T
t.Verb = proto.NewRequest_Verb(proto.Request_WATCH)
t.Path = &glob
return c.events(&t)
}
func (cl *Client) Walk(glob string) (*Watch, os.Error) {
c, err := cl.conn()
if err != nil {
return nil, err
}
var t T
t.Verb = proto.NewRequest_Verb(proto.Request_WALK)
t.Path = &glob
return c.events(&t)
}
type Watch struct {
C <-chan *Event // to caller
c *conn
tag int32
}
func (w *Watch) Cancel() os.Error {
return w.c.cancel(w.tag)
}
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package build
import (
"errors"
"fmt"
"log"
"os"
"path/filepath"
"runtime"
)
// Path is a validated list of Trees derived from $GOROOT and $GOPATH at init.
var Path []*Tree
// Tree describes a Go source tree, either $GOROOT or one from $GOPATH.
type Tree struct {
Path string
Goroot bool
}
func newTree(p string) (*Tree, error) {
if !filepath.IsAbs(p) {
return nil, errors.New("must be absolute")
}
ep, err := filepath.EvalSymlinks(p)
if err != nil {
return nil, err
}
return &Tree{Path: ep}, nil
}
// SrcDir returns the tree's package source directory.
func (t *Tree) SrcDir() string {
if t.Goroot {
return filepath.Join(t.Path, "src", "pkg")
}
return filepath.Join(t.Path, "src")
}
// PkgDir returns the tree's package object directory.
func (t *Tree) PkgDir() string {
goos, goarch := runtime.GOOS, runtime.GOARCH
if e := os.Getenv("GOOS"); e != "" {
goos = e
}
if e := os.Getenv("GOARCH"); e != "" {
goarch = e
}
return filepath.Join(t.Path, "pkg", goos+"_"+goarch)
}
// BinDir returns the tree's binary executable directory.
func (t *Tree) BinDir() string {
if t.Goroot {
if gobin := os.Getenv("GOBIN"); gobin != "" {
return gobin
}
}
return filepath.Join(t.Path, "bin")
}
// HasSrc returns whether the given package's
// source can be found inside this Tree.
func (t *Tree) HasSrc(pkg string) bool {
fi, err := os.Stat(filepath.Join(t.SrcDir(), pkg))
if err != nil {
return false
}
return fi.IsDir()
}
// HasPkg returns whether the given package's
// object file can be found inside this Tree.
func (t *Tree) HasPkg(pkg string) bool {
fi, err := os.Stat(filepath.Join(t.PkgDir(), pkg+".a"))
if err != nil {
return false
}
return !fi.IsDir()
// TODO(adg): check object version is consistent
}
var (
ErrNotFound = errors.New("go/build: package could not be found locally")
ErrTreeNotFound = errors.New("go/build: no valid GOROOT or GOPATH could be found")
)
// FindTree takes an import or filesystem path and returns the
// tree where the package source should be and the package import path.
func FindTree(path string) (tree *Tree, pkg string, err error) {
if isLocalPath(path) {
if path, err = filepath.Abs(path); err != nil {
return
}
if path, err = filepath.EvalSymlinks(path); err != nil {
return
}
for _, t := range Path {
tpath := t.SrcDir() + string(filepath.Separator)
if !filepath.HasPrefix(path, tpath) {
continue
}
tree = t
pkg = path[len(tpath):]
return
}
err = fmt.Errorf("path %q not inside a GOPATH", path)
return
}
tree = defaultTree
pkg = path
for _, t := range Path {
if t.HasSrc(pkg) {
tree = t
return
}
}
if tree == nil {
err = ErrTreeNotFound
} else {
err = ErrNotFound
}
return
}
// isLocalPath returns whether the given path is local (/foo ./foo ../foo . ..)
// Windows paths that starts with drive letter (c:\foo c:foo) are considered local.
func isLocalPath(s string) bool {
const sep = string(filepath.Separator)
return s == "." || s == ".." ||
filepath.HasPrefix(s, sep) ||
filepath.HasPrefix(s, "."+sep) || filepath.HasPrefix(s, ".."+sep) ||
filepath.VolumeName(s) != ""
}
var (
// argument lists used by the build's gc and ld methods
gcImportArgs []string
ldImportArgs []string
// default tree for remote packages
defaultTree *Tree
)
// set up Path: parse and validate GOROOT and GOPATH variables
func init() {
root := runtime.GOROOT()
t, err := newTree(root)
if err != nil {
log.Printf("go/build: invalid GOROOT %q: %v", root, err)
} else {
t.Goroot = true
Path = []*Tree{t}
}
for _, p := range filepath.SplitList(os.Getenv("GOPATH")) {
if p == "" {
continue
}
t, err := newTree(p)
if err != nil {
log.Printf("go/build: invalid GOPATH %q: %v", p, err)
continue
}
Path = append(Path, t)
gcImportArgs = append(gcImportArgs, "-I", t.PkgDir())
ldImportArgs = append(ldImportArgs, "-L", t.PkgDir())
// select first GOPATH entry as default
if defaultTree == nil {
defaultTree = t
}
}
// use GOROOT if no valid GOPATH specified
if defaultTree == nil && len(Path) > 0 {
defaultTree = Path[0]
}
}
go/build: remove 'go/build' from error messages
This leads to really confusing messages in goinstall.
R=golang-dev, r
CC=golang-dev
http://codereview.appspot.com/5495074
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package build
import (
"errors"
"fmt"
"log"
"os"
"path/filepath"
"runtime"
)
// Path is a validated list of Trees derived from $GOROOT and $GOPATH at init.
var Path []*Tree
// Tree describes a Go source tree, either $GOROOT or one from $GOPATH.
type Tree struct {
Path string
Goroot bool
}
func newTree(p string) (*Tree, error) {
if !filepath.IsAbs(p) {
return nil, errors.New("must be absolute")
}
ep, err := filepath.EvalSymlinks(p)
if err != nil {
return nil, err
}
return &Tree{Path: ep}, nil
}
// SrcDir returns the tree's package source directory.
func (t *Tree) SrcDir() string {
if t.Goroot {
return filepath.Join(t.Path, "src", "pkg")
}
return filepath.Join(t.Path, "src")
}
// PkgDir returns the tree's package object directory.
func (t *Tree) PkgDir() string {
goos, goarch := runtime.GOOS, runtime.GOARCH
if e := os.Getenv("GOOS"); e != "" {
goos = e
}
if e := os.Getenv("GOARCH"); e != "" {
goarch = e
}
return filepath.Join(t.Path, "pkg", goos+"_"+goarch)
}
// BinDir returns the tree's binary executable directory.
func (t *Tree) BinDir() string {
if t.Goroot {
if gobin := os.Getenv("GOBIN"); gobin != "" {
return gobin
}
}
return filepath.Join(t.Path, "bin")
}
// HasSrc returns whether the given package's
// source can be found inside this Tree.
func (t *Tree) HasSrc(pkg string) bool {
fi, err := os.Stat(filepath.Join(t.SrcDir(), pkg))
if err != nil {
return false
}
return fi.IsDir()
}
// HasPkg returns whether the given package's
// object file can be found inside this Tree.
func (t *Tree) HasPkg(pkg string) bool {
fi, err := os.Stat(filepath.Join(t.PkgDir(), pkg+".a"))
if err != nil {
return false
}
return !fi.IsDir()
// TODO(adg): check object version is consistent
}
var (
ErrNotFound = errors.New("package could not be found locally")
ErrTreeNotFound = errors.New("no valid GOROOT or GOPATH could be found")
)
// FindTree takes an import or filesystem path and returns the
// tree where the package source should be and the package import path.
func FindTree(path string) (tree *Tree, pkg string, err error) {
if isLocalPath(path) {
if path, err = filepath.Abs(path); err != nil {
return
}
if path, err = filepath.EvalSymlinks(path); err != nil {
return
}
for _, t := range Path {
tpath := t.SrcDir() + string(filepath.Separator)
if !filepath.HasPrefix(path, tpath) {
continue
}
tree = t
pkg = path[len(tpath):]
return
}
err = fmt.Errorf("path %q not inside a GOPATH", path)
return
}
tree = defaultTree
pkg = path
for _, t := range Path {
if t.HasSrc(pkg) {
tree = t
return
}
}
if tree == nil {
err = ErrTreeNotFound
} else {
err = ErrNotFound
}
return
}
// isLocalPath returns whether the given path is local (/foo ./foo ../foo . ..)
// Windows paths that starts with drive letter (c:\foo c:foo) are considered local.
func isLocalPath(s string) bool {
const sep = string(filepath.Separator)
return s == "." || s == ".." ||
filepath.HasPrefix(s, sep) ||
filepath.HasPrefix(s, "."+sep) || filepath.HasPrefix(s, ".."+sep) ||
filepath.VolumeName(s) != ""
}
var (
// argument lists used by the build's gc and ld methods
gcImportArgs []string
ldImportArgs []string
// default tree for remote packages
defaultTree *Tree
)
// set up Path: parse and validate GOROOT and GOPATH variables
func init() {
root := runtime.GOROOT()
t, err := newTree(root)
if err != nil {
log.Printf("invalid GOROOT %q: %v", root, err)
} else {
t.Goroot = true
Path = []*Tree{t}
}
for _, p := range filepath.SplitList(os.Getenv("GOPATH")) {
if p == "" {
continue
}
t, err := newTree(p)
if err != nil {
log.Printf("invalid GOPATH %q: %v", p, err)
continue
}
Path = append(Path, t)
gcImportArgs = append(gcImportArgs, "-I", t.PkgDir())
ldImportArgs = append(ldImportArgs, "-L", t.PkgDir())
// select first GOPATH entry as default
if defaultTree == nil {
defaultTree = t
}
}
// use GOROOT if no valid GOPATH specified
if defaultTree == nil && len(Path) > 0 {
defaultTree = Path[0]
}
}
|
package youtubedl
import (
"context"
"encoding/json"
"os"
"testing"
"github.com/wader/ydls/leaktest"
)
var testNetwork = os.Getenv("TEST_NETWORK") != ""
var testYoutubeldl = os.Getenv("TEST_YOUTUBEDL") != ""
func TestParseInfo(t *testing.T) {
if !testNetwork || !testYoutubeldl {
t.Skip("TEST_NETWORK, TEST_YOUTUBEDL env not set")
}
for _, c := range []struct {
url string
expectedTitle string
}{
{"https://soundcloud.com/timsweeney/thedrifter", "BIS Radio Show #793 with The Drifter"},
{"https://vimeo.com/129701495", "Ben Nagy Fuzzing OSX At Scale"},
{"https://www.infoq.com/presentations/Simple-Made-Easy", "Simple Made Easy"},
{"https://www.youtube.com/watch?v=uVYWQJ5BB_w", "A Radiolab Producer on the Making of a Podcast"},
} {
func() {
defer leaktest.Check(t)()
ctx, cancelFn := context.WithCancel(context.Background())
yi, err := NewFromURL(ctx, c.url, nil)
if err != nil {
cancelFn()
t.Errorf("failed to parse %s: %v", c.url, err)
return
}
cancelFn()
if yi.Title != c.expectedTitle {
t.Errorf("%s: expected title '%s' got '%s'", c.url, c.expectedTitle, yi.Title)
}
if yi.Thumbnail != "" && len(yi.ThumbnailBytes) == 0 {
t.Errorf("%s: expected thumbnail bytes", c.url)
}
var dummy map[string]interface{}
if err := json.Unmarshal(yi.rawJSON, &dummy); err != nil {
t.Errorf("%s: failed to parse rawJSON", c.url)
}
if len(yi.Formats) == 0 {
t.Errorf("%s: expected formats", c.url)
}
for _, f := range yi.Formats {
if f.FormatID == "" {
t.Errorf("%s: %s expected FormatID not empty", c.url, f.FormatID)
}
if f.ACodec != "" && f.ACodec != "none" && f.Ext != "" && f.NormACodec == "" {
t.Errorf("%s: %s expected NormACodec not empty for %s", c.url, f.FormatID, f.ACodec)
}
if f.VCodec != "" && f.VCodec != "none" && f.Ext != "" && f.NormVCodec == "" {
t.Errorf("%s: %s expected NormVCodec not empty for %s", c.url, f.FormatID, f.VCodec)
}
if f.ABR+f.VBR+f.TBR != 0 && f.NormBR == 0 {
t.Errorf("%s: %s expected NormBR not zero", c.url, f.FormatID)
}
}
t.Logf("%s: OK\n", c.url)
}()
}
}
func TestFail(t *testing.T) {
if !testNetwork || !testYoutubeldl {
t.Skip("TEST_NETWORK, TEST_YOUTUBEDL env not set")
}
defer leaktest.Check(t)()
geoBlockedURL := "https://www.youtube.com/watch?v=aaaaaaaaaaa"
_, err := NewFromURL(context.Background(), geoBlockedURL, nil)
if err == nil {
t.Errorf("%s: should fail", geoBlockedURL)
}
expectedError := "aaaaaaaaaaa: YouTube said: This video does not exist."
if err.Error() != expectedError {
t.Errorf("%s: expected '%s' got '%s'", geoBlockedURL, expectedError, err.Error())
}
}
Update youtube error message in test
package youtubedl
import (
"context"
"encoding/json"
"os"
"testing"
"github.com/wader/ydls/leaktest"
)
var testNetwork = os.Getenv("TEST_NETWORK") != ""
var testYoutubeldl = os.Getenv("TEST_YOUTUBEDL") != ""
func TestParseInfo(t *testing.T) {
if !testNetwork || !testYoutubeldl {
t.Skip("TEST_NETWORK, TEST_YOUTUBEDL env not set")
}
for _, c := range []struct {
url string
expectedTitle string
}{
{"https://soundcloud.com/timsweeney/thedrifter", "BIS Radio Show #793 with The Drifter"},
{"https://vimeo.com/129701495", "Ben Nagy Fuzzing OSX At Scale"},
{"https://www.infoq.com/presentations/Simple-Made-Easy", "Simple Made Easy"},
{"https://www.youtube.com/watch?v=uVYWQJ5BB_w", "A Radiolab Producer on the Making of a Podcast"},
} {
func() {
defer leaktest.Check(t)()
ctx, cancelFn := context.WithCancel(context.Background())
yi, err := NewFromURL(ctx, c.url, nil)
if err != nil {
cancelFn()
t.Errorf("failed to parse %s: %v", c.url, err)
return
}
cancelFn()
if yi.Title != c.expectedTitle {
t.Errorf("%s: expected title '%s' got '%s'", c.url, c.expectedTitle, yi.Title)
}
if yi.Thumbnail != "" && len(yi.ThumbnailBytes) == 0 {
t.Errorf("%s: expected thumbnail bytes", c.url)
}
var dummy map[string]interface{}
if err := json.Unmarshal(yi.rawJSON, &dummy); err != nil {
t.Errorf("%s: failed to parse rawJSON", c.url)
}
if len(yi.Formats) == 0 {
t.Errorf("%s: expected formats", c.url)
}
for _, f := range yi.Formats {
if f.FormatID == "" {
t.Errorf("%s: %s expected FormatID not empty", c.url, f.FormatID)
}
if f.ACodec != "" && f.ACodec != "none" && f.Ext != "" && f.NormACodec == "" {
t.Errorf("%s: %s expected NormACodec not empty for %s", c.url, f.FormatID, f.ACodec)
}
if f.VCodec != "" && f.VCodec != "none" && f.Ext != "" && f.NormVCodec == "" {
t.Errorf("%s: %s expected NormVCodec not empty for %s", c.url, f.FormatID, f.VCodec)
}
if f.ABR+f.VBR+f.TBR != 0 && f.NormBR == 0 {
t.Errorf("%s: %s expected NormBR not zero", c.url, f.FormatID)
}
}
t.Logf("%s: OK\n", c.url)
}()
}
}
func TestFail(t *testing.T) {
if !testNetwork || !testYoutubeldl {
t.Skip("TEST_NETWORK, TEST_YOUTUBEDL env not set")
}
defer leaktest.Check(t)()
geoBlockedURL := "https://www.youtube.com/watch?v=aaaaaaaaaaa"
_, err := NewFromURL(context.Background(), geoBlockedURL, nil)
if err == nil {
t.Errorf("%s: should fail", geoBlockedURL)
}
expectedError := "aaaaaaaaaaa: YouTube said: Invalid parameters."
if err.Error() != expectedError {
t.Errorf("%s: expected '%s' got '%s'", geoBlockedURL, expectedError, err.Error())
}
}
|
package jose
import (
"bytes"
"crypto"
"crypto/hmac"
_ "crypto/sha256"
"errors"
"fmt"
)
type VerifierHMAC struct {
KeyID string
Hash crypto.Hash
Secret []byte
}
type SignerHMAC struct {
VerifierHMAC
}
func NewVerifierHMAC(jwk JWK) (*VerifierHMAC, error) {
if jwk.Alg != "" && jwk.Alg != "HS256" {
return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg)
}
v := VerifierHMAC{
KeyID: jwk.ID,
Secret: jwk.Secret,
Hash: crypto.SHA256,
}
return &v, nil
}
func (v *VerifierHMAC) ID() string {
return v.KeyID
}
func (v *VerifierHMAC) Alg() string {
return "HS256"
}
func (v *VerifierHMAC) Verify(sig []byte, data []byte) error {
h := hmac.New(v.Hash.New, v.Secret)
h.Write(data)
if !bytes.Equal(sig, h.Sum(nil)) {
return errors.New("invalid hmac signature")
}
return nil
}
func NewSignerHMAC(kid string, secret []byte) *SignerHMAC {
return &SignerHMAC{
VerifierHMAC: VerifierHMAC{
KeyID: kid,
Secret: secret,
Hash: crypto.SHA256,
},
}
}
func (s *SignerHMAC) Sign(data []byte) ([]byte, error) {
h := hmac.New(s.Hash.New, s.Secret)
h.Write(data)
return h.Sum(nil), nil
}
jose: use constant time comparison for HMAC verification
This mirrors similar logic in square/go-jose[0] and dgrijalva/jwt-go[1].
[0] https://github.com/square/go-jose/blob/5848f914/symmetric.go#L317-L324
[1] https://github.com/dgrijalva/jwt-go/blob/63734eae/hmac.go#L71
Closes #98
package jose
import (
"crypto"
"crypto/hmac"
_ "crypto/sha256"
"errors"
"fmt"
)
type VerifierHMAC struct {
KeyID string
Hash crypto.Hash
Secret []byte
}
type SignerHMAC struct {
VerifierHMAC
}
func NewVerifierHMAC(jwk JWK) (*VerifierHMAC, error) {
if jwk.Alg != "" && jwk.Alg != "HS256" {
return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg)
}
v := VerifierHMAC{
KeyID: jwk.ID,
Secret: jwk.Secret,
Hash: crypto.SHA256,
}
return &v, nil
}
func (v *VerifierHMAC) ID() string {
return v.KeyID
}
func (v *VerifierHMAC) Alg() string {
return "HS256"
}
func (v *VerifierHMAC) Verify(sig []byte, data []byte) error {
h := hmac.New(v.Hash.New, v.Secret)
h.Write(data)
// hmac.Equal compares two hmacs but does it in constant time to mitigating time
// based attacks. See #98
if !hmac.Equal(sig, h.Sum(nil)) {
return errors.New("invalid hmac signature")
}
return nil
}
func NewSignerHMAC(kid string, secret []byte) *SignerHMAC {
return &SignerHMAC{
VerifierHMAC: VerifierHMAC{
KeyID: kid,
Secret: secret,
Hash: crypto.SHA256,
},
}
}
func (s *SignerHMAC) Sign(data []byte) ([]byte, error) {
h := hmac.New(s.Hash.New, s.Secret)
h.Write(data)
return h.Sum(nil), nil
}
|
package object
import (
"testing"
. "github.com/Zac-Garby/pluto/object"
)
func TestStringer(t *testing.T) {
var (
n1 = &Number{Value: 0}
n2 = &Number{Value: 1}
s1 = &String{Value: "foo"}
s2 = &String{Value: "bar"}
c1 = &Char{Value: 'x'}
c2 = &Char{Value: 'y'}
a0 = &Array{Value: []Object{}}
a1 = &Array{Value: []Object{n1}}
a2 = &Array{Value: []Object{n1, n2}}
t0 = &Tuple{Value: []Object{}}
t1 = &Tuple{Value: []Object{n1}}
t2 = &Tuple{Value: []Object{n1, n2}}
m0 = &Map{Keys: make(map[string]Object), Values: make(map[string]Object)}
m1 = &Map{Keys: make(map[string]Object), Values: make(map[string]Object)}
m2 = &Map{Keys: make(map[string]Object), Values: make(map[string]Object)}
)
m1.Set(s1, n1)
m2.Set(s1, n1)
m2.Set(s2, n2)
cases := []struct {
obj Object
str string
}{
{n1, "0"},
{n2, "1"},
{s1, "foo"},
{s2, "bar"},
{c1, "x"},
{c2, "y"},
{a0, "[]"},
{a1, "[0]"},
{a2, "[0, 1]"},
{t0, "()"},
{t1, "(0)"},
{t2, "(0, 1)"},
{m0, "[:]"},
{m1, "[foo: 0]"},
{m2, "[foo: 0, bar: 1]"},
}
for _, pair := range cases {
str := pair.obj.String()
if str != pair.str {
t.Errorf("wrong string representation for %s, should be %s", str, pair.str)
}
}
}
Remove 2-map stringer test
package object
import (
"testing"
. "github.com/Zac-Garby/pluto/object"
)
func TestStringer(t *testing.T) {
var (
n1 = &Number{Value: 0}
n2 = &Number{Value: 1}
s1 = &String{Value: "foo"}
s2 = &String{Value: "bar"}
c1 = &Char{Value: 'x'}
c2 = &Char{Value: 'y'}
a0 = &Array{Value: []Object{}}
a1 = &Array{Value: []Object{n1}}
a2 = &Array{Value: []Object{n1, n2}}
t0 = &Tuple{Value: []Object{}}
t1 = &Tuple{Value: []Object{n1}}
t2 = &Tuple{Value: []Object{n1, n2}}
m0 = &Map{Keys: make(map[string]Object), Values: make(map[string]Object)}
m1 = &Map{Keys: make(map[string]Object), Values: make(map[string]Object)}
)
m1.Set(s1, n1)
cases := []struct {
obj Object
str string
}{
{n1, "0"},
{n2, "1"},
{s1, "foo"},
{s2, "bar"},
{c1, "x"},
{c2, "y"},
{a0, "[]"},
{a1, "[0]"},
{a2, "[0, 1]"},
{t0, "()"},
{t1, "(0)"},
{t2, "(0, 1)"},
{m0, "[:]"},
{m1, "[foo: 0]"},
}
for _, pair := range cases {
str := pair.obj.String()
if str != pair.str {
t.Errorf("wrong string representation for %s, should be %s", str, pair.str)
}
}
}
|
package utils
//Sieve uses a sieve of Arosthesis to fill a slice of primes up to n
func Sieve(amount int) []int {
//fill a slice on 1..n
n := make([]int, amount-1)
for i := range n {
n[i] = i + 2
}
//Starting number for the tests
number := 2
//stop at sqrt(highest prime we will get)
for number*number <= len(n)+2 {
for i := number + number; i < len(n)+2; i += number {
n[i-2] = 0
}
number++
for n[number-2] == 0 {
number++
}
}
//filter out the primes
ret := *new([]int)
for _, v := range n {
if v != 0 {
ret = append(ret, v)
}
}
return ret
}
Beginning work on a general utils package
package utils
//Sieve uses a sieve of Arosthesis to fill a slice of primes up to n
func Sieve(amount int) []int {
//fill a slice on 1..n
n := make([]int, amount-1)
for i := range n {
n[i] = i + 2
}
//Starting number for the tests
number := 2
//stop at sqrt(highest prime we will get)
for number*number <= len(n)+2 {
for i := number + number; i < len(n)+2; i += number {
n[i-2] = 0
}
number++
for n[number-2] == 0 {
number++
}
}
//filter out the primes
ret := *new([]int)
for _, v := range n {
if v != 0 {
ret = append(ret, v)
}
}
return ret
}
//Checks if a prime number,the caller is responsible for providing a large enough prime set
func IsPrime(n int, primes []int) bool {
for _, v := range primes {
if v == n {
return true
} else if v > n {
return false
}
}
return false
}
func Factorial(n int) int {
total := 1
for i := n; i > 1; i-- {
total *= i
}
return total
}
|
package lnwallet
import (
"crypto/sha256"
"fmt"
"net"
"sync"
"sync/atomic"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/roasbeef/btcd/chaincfg"
"github.com/roasbeef/btcutil/hdkeychain"
"github.com/lightningnetwork/lnd/shachain"
"github.com/roasbeef/btcd/btcec"
"github.com/roasbeef/btcd/txscript"
"github.com/roasbeef/btcd/wire"
"github.com/roasbeef/btcutil"
"github.com/roasbeef/btcutil/txsort"
)
const (
// The size of the buffered queue of requests to the wallet from the
// outside word.
msgBufferSize = 100
// revocationRootIndex is the top level HD key index from which secrets
// used to generate producer roots should be derived from.
revocationRootIndex = hdkeychain.HardenedKeyStart + 1
// identityKeyIndex is the top level HD key index which is used to
// generate/rotate identity keys.
//
// TODO(roasbeef): should instead be child to make room for future
// rotations, etc.
identityKeyIndex = hdkeychain.HardenedKeyStart + 2
commitFee = 5000
)
var (
// Namespace bucket keys.
lightningNamespaceKey = []byte("ln-wallet")
waddrmgrNamespaceKey = []byte("waddrmgr")
wtxmgrNamespaceKey = []byte("wtxmgr")
)
// ErrInsufficientFunds is a type matching the error interface which is
// returned when coin selection for a new funding transaction fails to due
// having an insufficient amount of confirmed funds.
type ErrInsufficientFunds struct {
amountAvailable btcutil.Amount
amountSelected btcutil.Amount
}
func (e *ErrInsufficientFunds) Error() string {
return fmt.Sprintf("not enough outputs to create funding transaction,"+
" need %v only have %v available", e.amountAvailable,
e.amountSelected)
}
// initFundingReserveReq is the first message sent to initiate the workflow
// required to open a payment channel with a remote peer. The initial required
// parameters are configurable across channels. These parameters are to be
// chosen depending on the fee climate within the network, and time value of funds to
// be locked up within the channel. Upon success a ChannelReservation will be
// created in order to track the lifetime of this pending channel. Outputs
// selected will be 'locked', making them unavailable, for any other pending
// reservations. Therefore, all channels in reservation limbo will be periodically
// after a timeout period in order to avoid "exhaustion" attacks.
//
// TODO(roasbeef): zombie reservation sweeper goroutine.
type initFundingReserveMsg struct {
// The ID of the remote node we would like to open a channel with.
nodeID *btcec.PublicKey
// The IP address plus port that we used to either establish or accept
// the connection which led to the negotiation of this funding
// workflow.
nodeAddr *net.TCPAddr
// The number of confirmations required before the channel is considered
// open.
numConfs uint16
// The amount of funds requested for this channel.
fundingAmount btcutil.Amount
// The total capacity of the channel which includes the amount of funds
// the remote party contributes (if any).
capacity btcutil.Amount
// The minimum accepted satoshis/KB fee for the funding transaction. In
// order to ensure timely confirmation, it is recomened that this fee
// should be generous, paying some multiple of the accepted base fee
// rate of the network.
// TODO(roasbeef): integrate fee estimation project...
minFeeRate btcutil.Amount
// ourDustLimit is the threshold below which no HTLC output should be
// generated for our commitment transaction; ie. HTLCs below
// this amount are not enforceable onchain from our point of view.
ourDustLimit btcutil.Amount
// pushSat is the number of satoshis that should be pushed over the the
// responder as part of the initial channel creation.
pushSat btcutil.Amount
// The delay on the "pay-to-self" output(s) of the commitment transaction.
csvDelay uint32
// A channel in which all errors will be sent accross. Will be nil if
// this initial set is succesful.
// NOTE: In order to avoid deadlocks, this channel MUST be buffered.
err chan error
// A ChannelReservation with our contributions filled in will be sent
// accross this channel in the case of a succesfully reservation
// initiation. In the case of an error, this will read a nil pointer.
// NOTE: In order to avoid deadlocks, this channel MUST be buffered.
resp chan *ChannelReservation
}
// fundingReserveCancelMsg is a message reserved for cancelling an existing
// channel reservation identified by its reservation ID. Cancelling a reservation
// frees its locked outputs up, for inclusion within further reservations.
type fundingReserveCancelMsg struct {
pendingFundingID uint64
// NOTE: In order to avoid deadlocks, this channel MUST be buffered.
err chan error // Buffered
}
// addContributionMsg represents a message executing the second phase of the
// channel reservation workflow. This message carries the counterparty's
// "contribution" to the payment channel. In the case that this message is
// processed without generating any errors, then channel reservation will then
// be able to construct the funding tx, both commitment transactions, and
// finally generate signatures for all our inputs to the funding transaction,
// and for the remote node's version of the commitment transaction.
type addContributionMsg struct {
pendingFundingID uint64
// TODO(roasbeef): Should also carry SPV proofs in we're in SPV mode
contribution *ChannelContribution
// NOTE: In order to avoid deadlocks, this channel MUST be buffered.
err chan error
}
// addSingleContributionMsg represents a message executing the second phase of
// a single funder channel reservation workflow. This messages carries the
// counterparty's "contribution" to the payment channel. As this message is
// sent when on the responding side to a single funder workflow, no further
// action apart from storing the provided contribution is carried out.
type addSingleContributionMsg struct {
pendingFundingID uint64
contribution *ChannelContribution
// NOTE: In order to avoid deadlocks, this channel MUST be buffered.
err chan error
}
// addCounterPartySigsMsg represents the final message required to complete,
// and 'open' a payment channel. This message carries the counterparty's
// signatures for each of their inputs to the funding transaction, and also a
// signature allowing us to spend our version of the commitment transaction.
// If we're able to verify all the signatures are valid, the funding transaction
// will be broadcast to the network. After the funding transaction gains a
// configurable number of confirmations, the channel is officially considered
// 'open'.
type addCounterPartySigsMsg struct {
pendingFundingID uint64
// Should be order of sorted inputs that are theirs. Sorting is done
// in accordance to BIP-69:
// https://github.com/bitcoin/bips/blob/master/bip-0069.mediawiki.
theirFundingInputScripts []*InputScript
// This should be 1/2 of the signatures needed to succesfully spend our
// version of the commitment transaction.
theirCommitmentSig []byte
// This channel is used to return the completed channel after the wallet
// has completed all of its stages in the funding process.
completeChan chan *channeldb.OpenChannel
// NOTE: In order to avoid deadlocks, this channel MUST be buffered.
err chan error
}
// addSingleFunderSigsMsg represents the next-to-last message required to
// complete a single-funder channel workflow. Once the initiator is able to
// construct the funding transaction, they send both the outpoint and a
// signature for our version of the commitment transaction. Once this message
// is processed we (the responder) are able to construct both commitment
// transactions, signing the remote party's version.
type addSingleFunderSigsMsg struct {
pendingFundingID uint64
// fundingOutpoint is the outpoint of the completed funding
// transaction as assembled by the workflow initiator.
fundingOutpoint *wire.OutPoint
// revokeKey is the revocation public key derived by the remote node to
// be used within the initial version of the commitment transaction we
// construct for them.
revokeKey *btcec.PublicKey
// theirCommitmentSig are the 1/2 of the signatures needed to
// succesfully spend our version of the commitment transaction.
theirCommitmentSig []byte
// obsfucator is the bytes to be used to obsfucate the state hints on
// the commitment transaction.
obsfucator [StateHintSize]byte
// This channel is used to return the completed channel after the wallet
// has completed all of its stages in the funding process.
completeChan chan *channeldb.OpenChannel
// NOTE: In order to avoid deadlocks, this channel MUST be buffered.
err chan error
}
// LightningWallet is a domain specific, yet general Bitcoin wallet capable of
// executing workflow required to interact with the Lightning Network. It is
// domain specific in the sense that it understands all the fancy scripts used
// within the Lightning Network, channel lifetimes, etc. However, it embedds a
// general purpose Bitcoin wallet within it. Therefore, it is also able to serve
// as a regular Bitcoin wallet which uses HD keys. The wallet is highly concurrent
// internally. All communication, and requests towards the wallet are
// dispatched as messages over channels, ensuring thread safety across all
// operations. Interaction has been designed independent of any peer-to-peer
// communication protocol, allowing the wallet to be self-contained and embeddable
// within future projects interacting with the Lightning Network.
// NOTE: At the moment the wallet requires a btcd full node, as it's dependent
// on btcd's websockets notifications as even triggers during the lifetime of
// a channel. However, once the chainntnfs package is complete, the wallet
// will be compatible with multiple RPC/notification services such as Electrum,
// Bitcoin Core + ZeroMQ, etc. Eventually, the wallet won't require a full-node
// at all, as SPV support is integrated inot btcwallet.
type LightningWallet struct {
// This mutex is to be held when generating external keys to be used
// as multi-sig, and commitment keys within the channel.
keyGenMtx sync.RWMutex
// This mutex MUST be held when performing coin selection in order to
// avoid inadvertently creating multiple funding transaction which
// double spend inputs across each other.
coinSelectMtx sync.RWMutex
// A wrapper around a namespace within boltdb reserved for ln-based
// wallet metadata. See the 'channeldb' package for further
// information.
ChannelDB *channeldb.DB
// Used by in order to obtain notifications about funding transaction
// reaching a specified confirmation depth, and to catch
// counterparty's broadcasting revoked commitment states.
chainNotifier chainntnfs.ChainNotifier
// wallet is the the core wallet, all non Lightning Network specific
// interaction is proxied to the internal wallet.
WalletController
// Signer is the wallet's current Signer implementation. This Signer is
// used to generate signature for all inputs to potential funding
// transactions, as well as for spends from the funding transaction to
// update the commitment state.
Signer Signer
// ChainIO is an instance of the BlockChainIO interface. ChainIO is
// used to lookup the existence of outputs within the UTXO set.
ChainIO BlockChainIO
// rootKey is the root HD key derived from a WalletController private
// key. This rootKey is used to derive all LN specific secrets.
rootKey *hdkeychain.ExtendedKey
// All messages to the wallet are to be sent across this channel.
msgChan chan interface{}
// Incomplete payment channels are stored in the map below. An intent
// to create a payment channel is tracked as a "reservation" within
// limbo. Once the final signatures have been exchanged, a reservation
// is removed from limbo. Each reservation is tracked by a unique
// monotonically integer. All requests concerning the channel MUST
// carry a valid, active funding ID.
fundingLimbo map[uint64]*ChannelReservation
nextFundingID uint64
limboMtx sync.RWMutex
// TODO(roasbeef): zombie garbage collection routine to solve
// lost-object/starvation problem/attack.
// lockedOutPoints is a set of the currently locked outpoint. This
// information is kept in order to provide an easy way to unlock all
// the currently locked outpoints.
lockedOutPoints map[wire.OutPoint]struct{}
netParams *chaincfg.Params
started int32
shutdown int32
quit chan struct{}
wg sync.WaitGroup
// TODO(roasbeef): handle wallet lock/unlock
}
// NewLightningWallet creates/opens and initializes a LightningWallet instance.
// If the wallet has never been created (according to the passed dataDir), first-time
// setup is executed.
//
// NOTE: The passed channeldb, and ChainNotifier should already be fully
// initialized/started before being passed as a function arugment.
func NewLightningWallet(cdb *channeldb.DB, notifier chainntnfs.ChainNotifier,
wallet WalletController, signer Signer, bio BlockChainIO,
netParams *chaincfg.Params) (*LightningWallet, error) {
// TODO(roasbeef): need a another wallet level config
// Fetch the root derivation key from the wallet's HD chain. We'll use
// this to generate specific Lightning related secrets on the fly.
rootKey, err := wallet.FetchRootKey()
if err != nil {
return nil, err
}
// TODO(roasbeef): always re-derive on the fly?
rootKeyRaw := rootKey.Serialize()
rootMasterKey, err := hdkeychain.NewMaster(rootKeyRaw, netParams)
if err != nil {
return nil, err
}
return &LightningWallet{
rootKey: rootMasterKey,
chainNotifier: notifier,
Signer: signer,
WalletController: wallet,
ChainIO: bio,
ChannelDB: cdb,
msgChan: make(chan interface{}, msgBufferSize),
nextFundingID: 0,
fundingLimbo: make(map[uint64]*ChannelReservation),
lockedOutPoints: make(map[wire.OutPoint]struct{}),
netParams: netParams,
quit: make(chan struct{}),
}, nil
}
// Startup establishes a connection to the RPC source, and spins up all
// goroutines required to handle incoming messages.
func (l *LightningWallet) Startup() error {
// Already started?
if atomic.AddInt32(&l.started, 1) != 1 {
return nil
}
// Start the underlying wallet controller.
if err := l.Start(); err != nil {
return err
}
l.wg.Add(1)
// TODO(roasbeef): multiple request handlers?
go l.requestHandler()
return nil
}
// Shutdown gracefully stops the wallet, and all active goroutines.
func (l *LightningWallet) Shutdown() error {
if atomic.AddInt32(&l.shutdown, 1) != 1 {
return nil
}
// Signal the underlying wallet controller to shutdown, waiting until
// all active goroutines have been shutdown.
if err := l.Stop(); err != nil {
return err
}
close(l.quit)
l.wg.Wait()
return nil
}
// LockedOutpoints returns a list of all currently locked outpoint.
func (l *LightningWallet) LockedOutpoints() []*wire.OutPoint {
outPoints := make([]*wire.OutPoint, 0, len(l.lockedOutPoints))
for outPoint := range l.lockedOutPoints {
outPoints = append(outPoints, &outPoint)
}
return outPoints
}
// ResetReservations reset the volatile wallet state which trakcs all currently
// active reservations.
func (l *LightningWallet) ResetReservations() {
l.nextFundingID = 0
l.fundingLimbo = make(map[uint64]*ChannelReservation)
for outpoint := range l.lockedOutPoints {
l.UnlockOutpoint(outpoint)
}
l.lockedOutPoints = make(map[wire.OutPoint]struct{})
}
// ActiveReservations returns a slice of all the currently active
// (non-cancalled) reservations.
func (l *LightningWallet) ActiveReservations() []*ChannelReservation {
reservations := make([]*ChannelReservation, 0, len(l.fundingLimbo))
for _, reservation := range l.fundingLimbo {
reservations = append(reservations, reservation)
}
return reservations
}
// GetIdentitykey returns the identity private key of the wallet.
// TODO(roasbeef): should be moved elsewhere
func (l *LightningWallet) GetIdentitykey() (*btcec.PrivateKey, error) {
identityKey, err := l.rootKey.Child(identityKeyIndex)
if err != nil {
return nil, err
}
return identityKey.ECPrivKey()
}
// requestHandler is the primary goroutine(s) responsible for handling, and
// dispatching relies to all messages.
func (l *LightningWallet) requestHandler() {
out:
for {
select {
case m := <-l.msgChan:
switch msg := m.(type) {
case *initFundingReserveMsg:
l.handleFundingReserveRequest(msg)
case *fundingReserveCancelMsg:
l.handleFundingCancelRequest(msg)
case *addSingleContributionMsg:
l.handleSingleContribution(msg)
case *addContributionMsg:
l.handleContributionMsg(msg)
case *addSingleFunderSigsMsg:
l.handleSingleFunderSigs(msg)
case *addCounterPartySigsMsg:
l.handleFundingCounterPartySigs(msg)
}
case <-l.quit:
// TODO: do some clean up
break out
}
}
l.wg.Done()
}
// InitChannelReservation kicks off the 3-step workflow required to successfully
// open a payment channel with a remote node. As part of the funding
// reservation, the inputs selected for the funding transaction are 'locked'.
// This ensures that multiple channel reservations aren't double spending the
// same inputs in the funding transaction. If reservation initialization is
// successful, a ChannelReservation containing our completed contribution is
// returned. Our contribution contains all the items necessary to allow the
// counterparty to build the funding transaction, and both versions of the
// commitment transaction. Otherwise, an error occurred a nil pointer along with
// an error are returned.
//
// Once a ChannelReservation has been obtained, two additional steps must be
// processed before a payment channel can be considered 'open'. The second step
// validates, and processes the counterparty's channel contribution. The third,
// and final step verifies all signatures for the inputs of the funding
// transaction, and that the signature we records for our version of the
// commitment transaction is valid.
func (l *LightningWallet) InitChannelReservation(capacity,
ourFundAmt btcutil.Amount, theirID *btcec.PublicKey,
theirAddr *net.TCPAddr, numConfs uint16,
csvDelay uint32, ourDustLimit btcutil.Amount,
pushSat btcutil.Amount) (*ChannelReservation, error) {
// TODO(roasbeef): make the above into an initial config as part of the
// refactor to implement spec compliant funding flow
errChan := make(chan error, 1)
respChan := make(chan *ChannelReservation, 1)
l.msgChan <- &initFundingReserveMsg{
capacity: capacity,
numConfs: numConfs,
fundingAmount: ourFundAmt,
csvDelay: csvDelay,
ourDustLimit: ourDustLimit,
pushSat: pushSat,
nodeID: theirID,
nodeAddr: theirAddr,
err: errChan,
resp: respChan,
}
return <-respChan, <-errChan
}
// handleFundingReserveRequest processes a message intending to create, and
// validate a funding reservation request.
func (l *LightningWallet) handleFundingReserveRequest(req *initFundingReserveMsg) {
// It isn't possible to create a channel with zero funds committed.
if req.fundingAmount+req.capacity == 0 {
req.err <- fmt.Errorf("cannot have channel with zero " +
"satoshis funded")
req.resp <- nil
return
}
id := atomic.AddUint64(&l.nextFundingID, 1)
totalCapacity := req.capacity + commitFee
reservation := NewChannelReservation(totalCapacity, req.fundingAmount,
req.minFeeRate, l, id, req.numConfs, req.pushSat)
// Grab the mutex on the ChannelReservation to ensure thread-safety
reservation.Lock()
defer reservation.Unlock()
reservation.nodeAddr = req.nodeAddr
reservation.ourContribution.CsvDelay = req.csvDelay
reservation.partialState.NumConfsRequired = req.numConfs
reservation.partialState.IdentityPub = req.nodeID
reservation.partialState.LocalCsvDelay = req.csvDelay
reservation.partialState.OurDustLimit = req.ourDustLimit
ourContribution := reservation.ourContribution
// If we're on the receiving end of a single funder channel then we
// don't need to perform any coin selection. Otherwise, attempt to
// obtain enough coins to meet the required funding amount.
if req.fundingAmount != 0 {
// TODO(roasbeef): consult model for proper fee rate on funding
// tx
feeRate := uint64(10)
amt := req.fundingAmount + commitFee
err := l.selectCoinsAndChange(feeRate, amt, ourContribution)
if err != nil {
req.err <- err
req.resp <- nil
return
}
}
// Grab two fresh keys from our HD chain, one will be used for the
// multi-sig funding transaction, and the other for the commitment
// transaction.
multiSigKey, err := l.NewRawKey()
if err != nil {
req.err <- err
req.resp <- nil
return
}
commitKey, err := l.NewRawKey()
if err != nil {
req.err <- err
req.resp <- nil
return
}
reservation.partialState.OurMultiSigKey = multiSigKey
ourContribution.MultiSigKey = multiSigKey
reservation.partialState.OurCommitKey = commitKey
ourContribution.CommitKey = commitKey
// Generate a fresh address to be used in the case of a cooperative
// channel close.
deliveryAddress, err := l.NewAddress(WitnessPubKey, false)
if err != nil {
req.err <- err
req.resp <- nil
return
}
deliveryScript, err := txscript.PayToAddrScript(deliveryAddress)
if err != nil {
req.err <- err
req.resp <- nil
return
}
reservation.partialState.OurDeliveryScript = deliveryScript
ourContribution.DeliveryAddress = deliveryAddress
// Create a limbo and record entry for this newly pending funding
// request.
l.limboMtx.Lock()
l.fundingLimbo[id] = reservation
l.limboMtx.Unlock()
// Funding reservation request successfully handled. The funding inputs
// will be marked as unavailable until the reservation is either
// completed, or cancelled.
req.resp <- reservation
req.err <- nil
}
// handleFundingReserveCancel cancels an existing channel reservation. As part
// of the cancellation, outputs previously selected as inputs for the funding
// transaction via coin selection are freed allowing future reservations to
// include them.
func (l *LightningWallet) handleFundingCancelRequest(req *fundingReserveCancelMsg) {
// TODO(roasbeef): holding lock too long
l.limboMtx.Lock()
defer l.limboMtx.Unlock()
pendingReservation, ok := l.fundingLimbo[req.pendingFundingID]
if !ok {
// TODO(roasbeef): make new error, "unkown funding state" or something
req.err <- fmt.Errorf("attempted to cancel non-existant funding state")
return
}
// Grab the mutex on the ChannelReservation to ensure thead-safety
pendingReservation.Lock()
defer pendingReservation.Unlock()
// Mark all previously locked outpoints as usuable for future funding
// requests.
for _, unusedInput := range pendingReservation.ourContribution.Inputs {
delete(l.lockedOutPoints, unusedInput.PreviousOutPoint)
l.UnlockOutpoint(unusedInput.PreviousOutPoint)
}
// TODO(roasbeef): is it even worth it to keep track of unsed keys?
// TODO(roasbeef): Is it possible to mark the unused change also as
// available?
delete(l.fundingLimbo, req.pendingFundingID)
req.err <- nil
}
// handleContributionMsg processes the second workflow step for the lifetime of
// a channel reservation. Upon completion, the reservation will carry a
// completed funding transaction (minus the counterparty's input signatures),
// both versions of the commitment transaction, and our signature for their
// version of the commitment transaction.
func (l *LightningWallet) handleContributionMsg(req *addContributionMsg) {
l.limboMtx.Lock()
pendingReservation, ok := l.fundingLimbo[req.pendingFundingID]
l.limboMtx.Unlock()
if !ok {
req.err <- fmt.Errorf("attempted to update non-existant funding state")
return
}
// Grab the mutex on the ChannelReservation to ensure thead-safety
pendingReservation.Lock()
defer pendingReservation.Unlock()
// Create a blank, fresh transaction. Soon to be a complete funding
// transaction which will allow opening a lightning channel.
pendingReservation.fundingTx = wire.NewMsgTx(1)
fundingTx := pendingReservation.fundingTx
// Some temporary variables to cut down on the resolution verbosity.
pendingReservation.theirContribution = req.contribution
theirContribution := req.contribution
ourContribution := pendingReservation.ourContribution
// Add all multi-party inputs and outputs to the transaction.
for _, ourInput := range ourContribution.Inputs {
fundingTx.AddTxIn(ourInput)
}
for _, theirInput := range theirContribution.Inputs {
fundingTx.AddTxIn(theirInput)
}
for _, ourChangeOutput := range ourContribution.ChangeOutputs {
fundingTx.AddTxOut(ourChangeOutput)
}
for _, theirChangeOutput := range theirContribution.ChangeOutputs {
fundingTx.AddTxOut(theirChangeOutput)
}
ourKey := pendingReservation.partialState.OurMultiSigKey
theirKey := theirContribution.MultiSigKey
// Finally, add the 2-of-2 multi-sig output which will set up the lightning
// channel.
channelCapacity := int64(pendingReservation.partialState.Capacity)
witnessScript, multiSigOut, err := GenFundingPkScript(ourKey.SerializeCompressed(),
theirKey.SerializeCompressed(), channelCapacity)
if err != nil {
req.err <- err
return
}
pendingReservation.partialState.FundingWitnessScript = witnessScript
// Sort the transaction. Since both side agree to a canonical
// ordering, by sorting we no longer need to send the entire
// transaction. Only signatures will be exchanged.
fundingTx.AddTxOut(multiSigOut)
txsort.InPlaceSort(pendingReservation.fundingTx)
// Next, sign all inputs that are ours, collecting the signatures in
// order of the inputs.
pendingReservation.ourFundingInputScripts = make([]*InputScript, 0, len(ourContribution.Inputs))
signDesc := SignDescriptor{
HashType: txscript.SigHashAll,
SigHashes: txscript.NewTxSigHashes(fundingTx),
}
for i, txIn := range fundingTx.TxIn {
info, err := l.FetchInputInfo(&txIn.PreviousOutPoint)
if err == ErrNotMine {
continue
} else if err != nil {
req.err <- err
return
}
signDesc.Output = info
signDesc.InputIndex = i
inputScript, err := l.Signer.ComputeInputScript(fundingTx, &signDesc)
if err != nil {
req.err <- err
return
}
txIn.SignatureScript = inputScript.ScriptSig
txIn.Witness = inputScript.Witness
pendingReservation.ourFundingInputScripts = append(
pendingReservation.ourFundingInputScripts,
inputScript,
)
}
// Locate the index of the multi-sig outpoint in order to record it
// since the outputs are canonically sorted. If this is a single funder
// workflow, then we'll also need to send this to the remote node.
fundingTxID := fundingTx.TxHash()
_, multiSigIndex := FindScriptOutputIndex(fundingTx, multiSigOut.PkScript)
fundingOutpoint := wire.NewOutPoint(&fundingTxID, multiSigIndex)
pendingReservation.partialState.FundingOutpoint = fundingOutpoint
// Initialize an empty sha-chain for them, tracking the current pending
// revocation hash (we don't yet know the preimage so we can't add it
// to the chain).
s := shachain.NewRevocationStore()
pendingReservation.partialState.RevocationStore = s
pendingReservation.partialState.TheirCurrentRevocation = theirContribution.RevocationKey
masterElkremRoot, err := l.deriveMasterRevocationRoot()
if err != nil {
req.err <- err
return
}
// Now that we have their commitment key, we can create the revocation
// key for the first version of our commitment transaction. To do so,
// we'll first create our root, then produce the first pre-image.
root := deriveRevocationRoot(masterElkremRoot, ourKey, theirKey)
producer := shachain.NewRevocationProducer(*root)
pendingReservation.partialState.RevocationProducer = producer
firstPreimage, err := producer.AtIndex(0)
if err != nil {
req.err <- err
return
}
theirCommitKey := theirContribution.CommitKey
ourRevokeKey := DeriveRevocationPubkey(theirCommitKey, firstPreimage[:])
// Create the txIn to our commitment transaction; required to construct
// the commitment transactions.
fundingTxIn := wire.NewTxIn(wire.NewOutPoint(&fundingTxID, multiSigIndex), nil, nil)
// With the funding tx complete, create both commitment transactions.
// TODO(roasbeef): much cleanup + de-duplication
ourBalance := pendingReservation.partialState.OurBalance
theirBalance := pendingReservation.partialState.TheirBalance
ourCommitKey := ourContribution.CommitKey
ourCommitTx, err := CreateCommitTx(fundingTxIn, ourCommitKey, theirCommitKey,
ourRevokeKey, ourContribution.CsvDelay,
ourBalance, theirBalance, pendingReservation.partialState.OurDustLimit)
if err != nil {
req.err <- err
return
}
theirCommitTx, err := CreateCommitTx(fundingTxIn, theirCommitKey, ourCommitKey,
theirContribution.RevocationKey, theirContribution.CsvDelay,
theirBalance, ourBalance, pendingReservation.partialState.TheirDustLimit)
if err != nil {
req.err <- err
return
}
// With both commitment transactions constructed, generate the state
// obsfucator then use it to encode the current state number withi both
// commitment transactions.
// TODO(roasbeef): define obsfucator scheme for dual funder
var stateObsfucator [StateHintSize]byte
if pendingReservation.partialState.IsInitiator {
stateObsfucator, err = deriveStateHintObfuscator(producer)
if err != nil {
req.err <- err
return
}
}
err = initStateHints(ourCommitTx, theirCommitTx, stateObsfucator)
if err != nil {
req.err <- err
return
}
// Sort both transactions according to the agreed upon cannonical
// ordering. This lets us skip sending the entire transaction over,
// instead we'll just send signatures.
txsort.InPlaceSort(ourCommitTx)
txsort.InPlaceSort(theirCommitTx)
deliveryScript, err := txscript.PayToAddrScript(theirContribution.DeliveryAddress)
if err != nil {
req.err <- err
return
}
// Record newly available information witin the open channel state.
pendingReservation.partialState.RemoteCsvDelay = theirContribution.CsvDelay
pendingReservation.partialState.TheirDeliveryScript = deliveryScript
pendingReservation.partialState.ChanID = fundingOutpoint
pendingReservation.partialState.TheirCommitKey = theirCommitKey
pendingReservation.partialState.TheirMultiSigKey = theirContribution.MultiSigKey
pendingReservation.partialState.OurCommitTx = ourCommitTx
pendingReservation.partialState.StateHintObsfucator = stateObsfucator
pendingReservation.ourContribution.RevocationKey = ourRevokeKey
// Generate a signature for their version of the initial commitment
// transaction.
signDesc = SignDescriptor{
WitnessScript: witnessScript,
PubKey: ourKey,
Output: multiSigOut,
HashType: txscript.SigHashAll,
SigHashes: txscript.NewTxSigHashes(theirCommitTx),
InputIndex: 0,
}
sigTheirCommit, err := l.Signer.SignOutputRaw(theirCommitTx, &signDesc)
if err != nil {
req.err <- err
return
}
pendingReservation.ourCommitmentSig = sigTheirCommit
req.err <- nil
}
// handleSingleContribution is called as the second step to a single funder
// workflow to which we are the responder. It simply saves the remote peer's
// contribution to the channel, as solely the remote peer will contribute any
// funds to the channel.
func (l *LightningWallet) handleSingleContribution(req *addSingleContributionMsg) {
l.limboMtx.Lock()
pendingReservation, ok := l.fundingLimbo[req.pendingFundingID]
l.limboMtx.Unlock()
if !ok {
req.err <- fmt.Errorf("attempted to update non-existant funding state")
return
}
// Grab the mutex on the ChannelReservation to ensure thead-safety
pendingReservation.Lock()
defer pendingReservation.Unlock()
// Simply record the counterparty's contribution into the pending
// reservation data as they'll be solely funding the channel entirely.
pendingReservation.theirContribution = req.contribution
theirContribution := pendingReservation.theirContribution
// Additionally, we can now also record the redeem script of the
// funding transaction.
// TODO(roasbeef): switch to proper pubkey derivation
ourKey := pendingReservation.partialState.OurMultiSigKey
theirKey := theirContribution.MultiSigKey
channelCapacity := int64(pendingReservation.partialState.Capacity)
witnessScript, _, err := GenFundingPkScript(ourKey.SerializeCompressed(),
theirKey.SerializeCompressed(), channelCapacity)
if err != nil {
req.err <- err
return
}
pendingReservation.partialState.FundingWitnessScript = witnessScript
masterElkremRoot, err := l.deriveMasterRevocationRoot()
if err != nil {
req.err <- err
return
}
// Now that we know their commitment key, we can create the revocation
// key for our version of the initial commitment transaction.
root := deriveRevocationRoot(masterElkremRoot, ourKey, theirKey)
producer := shachain.NewRevocationProducer(*root)
firstPreimage, err := producer.AtIndex(0)
if err != nil {
req.err <- err
return
}
pendingReservation.partialState.RevocationProducer = producer
theirCommitKey := theirContribution.CommitKey
ourRevokeKey := DeriveRevocationPubkey(theirCommitKey, firstPreimage[:])
// Initialize an empty sha-chain for them, tracking the current pending
// revocation hash (we don't yet know the preimage so we can't add it
// to the chain).
remotePreimageStore := shachain.NewRevocationStore()
pendingReservation.partialState.RevocationStore = remotePreimageStore
// Record the counterpaty's remaining contributions to the channel,
// converting their delivery address into a public key script.
deliveryScript, err := txscript.PayToAddrScript(theirContribution.DeliveryAddress)
if err != nil {
req.err <- err
return
}
pendingReservation.partialState.RemoteCsvDelay = theirContribution.CsvDelay
pendingReservation.partialState.TheirDeliveryScript = deliveryScript
pendingReservation.partialState.TheirCommitKey = theirContribution.CommitKey
pendingReservation.partialState.TheirMultiSigKey = theirContribution.MultiSigKey
pendingReservation.ourContribution.RevocationKey = ourRevokeKey
req.err <- nil
return
}
// openChanDetails contains a "finalized" channel which can be considered
// "open" according to the requested confirmation depth at reservation
// initialization. Additionally, the struct contains additional details
// pertaining to the exact location in the main chain in-which the transaction
// was confirmed.
type openChanDetails struct {
channel *LightningChannel
blockHeight uint32
txIndex uint32
}
// handleFundingCounterPartySigs is the final step in the channel reservation
// workflow. During this step, we validate *all* the received signatures for
// inputs to the funding transaction. If any of these are invalid, we bail,
// and forcibly cancel this funding request. Additionally, we ensure that the
// signature we received from the counterparty for our version of the commitment
// transaction allows us to spend from the funding output with the addition of
// our signature.
func (l *LightningWallet) handleFundingCounterPartySigs(msg *addCounterPartySigsMsg) {
l.limboMtx.RLock()
res, ok := l.fundingLimbo[msg.pendingFundingID]
l.limboMtx.RUnlock()
if !ok {
msg.err <- fmt.Errorf("attempted to update non-existant funding state")
return
}
// Grab the mutex on the ChannelReservation to ensure thead-safety
res.Lock()
defer res.Unlock()
// Now we can complete the funding transaction by adding their
// signatures to their inputs.
res.theirFundingInputScripts = msg.theirFundingInputScripts
inputScripts := msg.theirFundingInputScripts
fundingTx := res.fundingTx
sigIndex := 0
fundingHashCache := txscript.NewTxSigHashes(fundingTx)
for i, txin := range fundingTx.TxIn {
if len(inputScripts) != 0 && len(txin.Witness) == 0 {
// Attach the input scripts so we can verify it below.
txin.Witness = inputScripts[sigIndex].Witness
txin.SignatureScript = inputScripts[sigIndex].ScriptSig
// Fetch the alleged previous output along with the
// pkscript referenced by this input.
prevOut := txin.PreviousOutPoint
output, err := l.ChainIO.GetUtxo(&prevOut.Hash, prevOut.Index)
if output == nil {
msg.err <- fmt.Errorf("input to funding tx does not exist: %v", err)
return
}
// Ensure that the witness+sigScript combo is valid.
vm, err := txscript.NewEngine(output.PkScript,
fundingTx, i, txscript.StandardVerifyFlags, nil,
fundingHashCache, output.Value)
if err != nil {
// TODO(roasbeef): cancel at this stage if invalid sigs?
msg.err <- fmt.Errorf("cannot create script engine: %s", err)
return
}
if err = vm.Execute(); err != nil {
msg.err <- fmt.Errorf("cannot validate transaction: %s", err)
return
}
sigIndex++
}
}
// At this point, we can also record and verify their signature for our
// commitment transaction.
res.theirCommitmentSig = msg.theirCommitmentSig
commitTx := res.partialState.OurCommitTx
theirKey := res.theirContribution.MultiSigKey
// Re-generate both the witnessScript and p2sh output. We sign the
// witnessScript script, but include the p2sh output as the subscript
// for verification.
witnessScript := res.partialState.FundingWitnessScript
// Next, create the spending scriptSig, and then verify that the script
// is complete, allowing us to spend from the funding transaction.
theirCommitSig := msg.theirCommitmentSig
channelValue := int64(res.partialState.Capacity)
hashCache := txscript.NewTxSigHashes(commitTx)
sigHash, err := txscript.CalcWitnessSigHash(witnessScript, hashCache,
txscript.SigHashAll, commitTx, 0, channelValue)
if err != nil {
msg.err <- fmt.Errorf("counterparty's commitment signature is invalid: %v", err)
return
}
// Verify that we've received a valid signature from the remote party
// for our version of the commitment transaction.
sig, err := btcec.ParseSignature(theirCommitSig, btcec.S256())
if err != nil {
msg.err <- err
return
} else if !sig.Verify(sigHash, theirKey) {
msg.err <- fmt.Errorf("counterparty's commitment signature is invalid")
return
}
res.partialState.OurCommitSig = theirCommitSig
// Funding complete, this entry can be removed from limbo.
l.limboMtx.Lock()
delete(l.fundingLimbo, res.reservationID)
l.limboMtx.Unlock()
walletLog.Infof("Broadcasting funding tx for ChannelPoint(%v): %v",
res.partialState.FundingOutpoint, spew.Sdump(fundingTx))
// Broacast the finalized funding transaction to the network.
if err := l.PublishTransaction(fundingTx); err != nil {
msg.err <- err
return
}
// Add the complete funding transaction to the DB, in it's open bucket
// which will be used for the lifetime of this channel.
// TODO(roasbeef): revisit faul-tolerance of this flow
nodeAddr := res.nodeAddr
if err := res.partialState.SyncPending(nodeAddr); err != nil {
msg.err <- err
return
}
msg.completeChan <- res.partialState
msg.err <- nil
}
// handleSingleFunderSigs is called once the remote peer who initiated the
// single funder workflow has assembled the funding transaction, and generated
// a signature for our version of the commitment transaction. This method
// progresses the workflow by generating a signature for the remote peer's
// version of the commitment transaction.
func (l *LightningWallet) handleSingleFunderSigs(req *addSingleFunderSigsMsg) {
l.limboMtx.RLock()
pendingReservation, ok := l.fundingLimbo[req.pendingFundingID]
l.limboMtx.RUnlock()
if !ok {
req.err <- fmt.Errorf("attempted to update non-existant funding state")
return
}
// Grab the mutex on the ChannelReservation to ensure thead-safety
pendingReservation.Lock()
defer pendingReservation.Unlock()
pendingReservation.partialState.FundingOutpoint = req.fundingOutpoint
pendingReservation.partialState.TheirCurrentRevocation = req.revokeKey
pendingReservation.partialState.ChanID = req.fundingOutpoint
pendingReservation.partialState.StateHintObsfucator = req.obsfucator
fundingTxIn := wire.NewTxIn(req.fundingOutpoint, nil, nil)
// Now that we have the funding outpoint, we can generate both versions
// of the commitment transaction, and generate a signature for the
// remote node's commitment transactions.
ourCommitKey := pendingReservation.ourContribution.CommitKey
theirCommitKey := pendingReservation.theirContribution.CommitKey
ourBalance := pendingReservation.partialState.OurBalance
theirBalance := pendingReservation.partialState.TheirBalance
ourCommitTx, err := CreateCommitTx(fundingTxIn, ourCommitKey, theirCommitKey,
pendingReservation.ourContribution.RevocationKey,
pendingReservation.ourContribution.CsvDelay, ourBalance, theirBalance,
pendingReservation.partialState.OurDustLimit)
if err != nil {
req.err <- err
return
}
theirCommitTx, err := CreateCommitTx(fundingTxIn, theirCommitKey, ourCommitKey,
req.revokeKey, pendingReservation.theirContribution.CsvDelay,
theirBalance, ourBalance, pendingReservation.partialState.TheirDustLimit)
if err != nil {
req.err <- err
return
}
// With both commitment transactions constructed, generate the state
// obsfucator then use it to encode the current state number within
// both commitment transactions.
err = initStateHints(ourCommitTx, theirCommitTx, req.obsfucator)
if err != nil {
req.err <- err
return
}
// Sort both transactions according to the agreed upon cannonical
// ordering. This ensures that both parties sign the same sighash
// without further synchronization.
txsort.InPlaceSort(ourCommitTx)
pendingReservation.partialState.OurCommitTx = ourCommitTx
txsort.InPlaceSort(theirCommitTx)
witnessScript := pendingReservation.partialState.FundingWitnessScript
channelValue := int64(pendingReservation.partialState.Capacity)
hashCache := txscript.NewTxSigHashes(ourCommitTx)
theirKey := pendingReservation.theirContribution.MultiSigKey
ourKey := pendingReservation.partialState.OurMultiSigKey
sigHash, err := txscript.CalcWitnessSigHash(witnessScript, hashCache,
txscript.SigHashAll, ourCommitTx, 0, channelValue)
if err != nil {
req.err <- err
return
}
// Verify that we've received a valid signature from the remote party
// for our version of the commitment transaction.
sig, err := btcec.ParseSignature(req.theirCommitmentSig, btcec.S256())
if err != nil {
req.err <- err
return
} else if !sig.Verify(sigHash, theirKey) {
req.err <- fmt.Errorf("counterparty's commitment signature is invalid")
return
}
pendingReservation.partialState.OurCommitSig = req.theirCommitmentSig
// With their signature for our version of the commitment transactions
// verified, we can now generate a signature for their version,
// allowing the funding transaction to be safely broadcast.
p2wsh, err := witnessScriptHash(witnessScript)
if err != nil {
req.err <- err
return
}
signDesc := SignDescriptor{
WitnessScript: witnessScript,
PubKey: ourKey,
Output: &wire.TxOut{
PkScript: p2wsh,
Value: channelValue,
},
HashType: txscript.SigHashAll,
SigHashes: txscript.NewTxSigHashes(theirCommitTx),
InputIndex: 0,
}
sigTheirCommit, err := l.Signer.SignOutputRaw(theirCommitTx, &signDesc)
if err != nil {
req.err <- err
return
}
pendingReservation.ourCommitmentSig = sigTheirCommit
// Add the complete funding transaction to the DB, in it's open bucket
// which will be used for the lifetime of this channel.
if err := pendingReservation.partialState.SyncPending(pendingReservation.nodeAddr); err != nil {
req.err <- err
return
}
req.completeChan <- pendingReservation.partialState
req.err <- nil
l.limboMtx.Lock()
delete(l.fundingLimbo, req.pendingFundingID)
l.limboMtx.Unlock()
}
// selectCoinsAndChange performs coin selection in order to obtain witness
// outputs which sum to at least 'numCoins' amount of satoshis. If coin
// selection is successful/possible, then the selected coins are available
// within the passed contribution's inputs. If necessary, a change address will
// also be generated.
// TODO(roasbeef): remove hardcoded fees and req'd confs for outputs.
func (l *LightningWallet) selectCoinsAndChange(feeRate uint64, amt btcutil.Amount,
contribution *ChannelContribution) error {
// We hold the coin select mutex while querying for outputs, and
// performing coin selection in order to avoid inadvertent double
// spends across funding transactions.
l.coinSelectMtx.Lock()
defer l.coinSelectMtx.Unlock()
// Find all unlocked unspent witness outputs with greater than 1
// confirmation.
// TODO(roasbeef): make num confs a configuration paramter
coins, err := l.ListUnspentWitness(1)
if err != nil {
return err
}
// Perform coin selection over our available, unlocked unspent outputs
// in order to find enough coins to meet the funding amount
// requirements.
selectedCoins, changeAmt, err := coinSelect(feeRate, amt, coins)
if err != nil {
return err
}
// Lock the selected coins. These coins are now "reserved", this
// prevents concurrent funding requests from referring to and this
// double-spending the same set of coins.
contribution.Inputs = make([]*wire.TxIn, len(selectedCoins))
for i, coin := range selectedCoins {
l.lockedOutPoints[*coin] = struct{}{}
l.LockOutpoint(*coin)
// Empty sig script, we'll actually sign if this reservation is
// queued up to be completed (the other side accepts).
contribution.Inputs[i] = wire.NewTxIn(coin, nil, nil)
}
// Record any change output(s) generated as a result of the coin
// selection.
if changeAmt != 0 {
changeAddr, err := l.NewAddress(WitnessPubKey, true)
if err != nil {
return err
}
changeScript, err := txscript.PayToAddrScript(changeAddr)
if err != nil {
return err
}
contribution.ChangeOutputs = make([]*wire.TxOut, 1)
contribution.ChangeOutputs[0] = &wire.TxOut{
Value: int64(changeAmt),
PkScript: changeScript,
}
}
return nil
}
// deriveMasterRevocationRoot derives the private key which serves as the master
// producer root. This master secret is used as the secret input to a HKDF to
// generate revocation secrets based on random, but public data.
func (l *LightningWallet) deriveMasterRevocationRoot() (*btcec.PrivateKey, error) {
masterElkremRoot, err := l.rootKey.Child(revocationRootIndex)
if err != nil {
return nil, err
}
return masterElkremRoot.ECPrivKey()
}
// deriveStateHintObfuscator derives the bytes to be used for obfuscating the
// state hints from the root to be used for a new channel. The
// obfuscator is generated by performing an additional sha256 hash of the first
// child derived from the revocation root. The leading 4 bytes are used for the
// obfuscator.
func deriveStateHintObfuscator(producer shachain.Producer) ([StateHintSize]byte, error) {
var obfuscator [StateHintSize]byte
firstChild, err := producer.AtIndex(0)
if err != nil {
return obfuscator, err
}
grandChild := sha256.Sum256(firstChild[:])
copy(obfuscator[:], grandChild[:])
return obfuscator, nil
}
// initStateHints properly sets the obsfucated state hints on both commitment
// transactions using the passed obsfucator.
func initStateHints(commit1, commit2 *wire.MsgTx,
obfuscator [StateHintSize]byte) error {
if err := SetStateNumHint(commit1, 0, obfuscator); err != nil {
return err
}
if err := SetStateNumHint(commit2, 0, obfuscator); err != nil {
return err
}
return nil
}
// selectInputs selects a slice of inputs necessary to meet the specified
// selection amount. If input selection is unable to succeed to to insufficient
// funds, a non-nil error is returned. Additionally, the total amount of the
// selected coins are returned in order for the caller to properly handle
// change+fees.
func selectInputs(amt btcutil.Amount, coins []*Utxo) (btcutil.Amount, []*wire.OutPoint, error) {
var (
selectedUtxos []*wire.OutPoint
satSelected btcutil.Amount
)
i := 0
for satSelected < amt {
// If we're about to go past the number of available coins,
// then exit with an error.
if i > len(coins)-1 {
return 0, nil, &ErrInsufficientFunds{amt, satSelected}
}
// Otherwise, collect this new coin as it may be used for final
// coin selection.
coin := coins[i]
utxo := &wire.OutPoint{
Hash: coin.Hash,
Index: coin.Index,
}
selectedUtxos = append(selectedUtxos, utxo)
satSelected += coin.Value
i++
}
return satSelected, selectedUtxos, nil
}
// coinSelect attemps to select a sufficient amount of coins, including a
// change output to fund amt satoshis, adhearing to the specified fee rate. The
// specified fee rate should be expressed in sat/byte for coin selection to
// function properly.
func coinSelect(feeRate uint64, amt btcutil.Amount,
coins []*Utxo) ([]*wire.OutPoint, btcutil.Amount, error) {
const (
// txOverhead is the overhead of a transaction residing within
// the version number and lock time.
txOverhead = 8
// p2wkhSpendSize an estimate of the number of bytes it takes
// to spend a p2wkh output.
//
// (p2wkh witness) + txid + index + varint script size + sequence
// TODO(roasbeef): div by 3 due to witness size?
p2wkhSpendSize = (1 + 73 + 1 + 33) + 32 + 4 + 1 + 4
// p2wkhOutputSize is an estimate of the size of a regualr
// p2wkh output.
//
// 8 (output) + 1 (var int script) + 22 (p2wkh output)
p2wkhOutputSize = 8 + 1 + 22
// p2wkhOutputSize is an estimate of the p2wsh funding uotput.
p2wshOutputSize = 8 + 1 + 34
)
var estimatedSize int
amtNeeded := amt
for {
// First perform an initial round of coin selection to estimate
// the required fee.
totalSat, selectedUtxos, err := selectInputs(amtNeeded, coins)
if err != nil {
return nil, 0, err
}
// Based on the selected coins, estimate the size of the final
// fully signed transaction.
estimatedSize = ((len(selectedUtxos) * p2wkhSpendSize) +
p2wshOutputSize + txOverhead)
// The difference bteween the selected amount and the amount
// requested will be used to pay fees, and generate a change
// output with the remaining.
overShootAmt := totalSat - amtNeeded
// Based on the estimated size and fee rate, if the excess
// amount isn't enough to pay fees, then increase the requested
// coin amount by the estimate required fee, performing another
// round of coin selection.
requiredFee := btcutil.Amount(uint64(estimatedSize) * feeRate)
if overShootAmt < requiredFee {
amtNeeded += requiredFee
continue
}
// If the fee is sufficient, then calculate the size of the change output.
changeAmt := overShootAmt - requiredFee
return selectedUtxos, changeAmt, nil
}
}
lnwallet: fetch the root key during startup rather than on creation
This commit modifies the initialization logic of the LightningWallet to
fetch the root key during startup rather than during creation. We make
this change in order to give enough time for the underlying
WalletController to properly boot up before we ask it to do any work.
package lnwallet
import (
"crypto/sha256"
"fmt"
"net"
"sync"
"sync/atomic"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/roasbeef/btcd/chaincfg"
"github.com/roasbeef/btcutil/hdkeychain"
"github.com/lightningnetwork/lnd/shachain"
"github.com/roasbeef/btcd/btcec"
"github.com/roasbeef/btcd/txscript"
"github.com/roasbeef/btcd/wire"
"github.com/roasbeef/btcutil"
"github.com/roasbeef/btcutil/txsort"
)
const (
// The size of the buffered queue of requests to the wallet from the
// outside word.
msgBufferSize = 100
// revocationRootIndex is the top level HD key index from which secrets
// used to generate producer roots should be derived from.
revocationRootIndex = hdkeychain.HardenedKeyStart + 1
// identityKeyIndex is the top level HD key index which is used to
// generate/rotate identity keys.
//
// TODO(roasbeef): should instead be child to make room for future
// rotations, etc.
identityKeyIndex = hdkeychain.HardenedKeyStart + 2
commitFee = 5000
)
var (
// Namespace bucket keys.
lightningNamespaceKey = []byte("ln-wallet")
waddrmgrNamespaceKey = []byte("waddrmgr")
wtxmgrNamespaceKey = []byte("wtxmgr")
)
// ErrInsufficientFunds is a type matching the error interface which is
// returned when coin selection for a new funding transaction fails to due
// having an insufficient amount of confirmed funds.
type ErrInsufficientFunds struct {
amountAvailable btcutil.Amount
amountSelected btcutil.Amount
}
func (e *ErrInsufficientFunds) Error() string {
return fmt.Sprintf("not enough outputs to create funding transaction,"+
" need %v only have %v available", e.amountAvailable,
e.amountSelected)
}
// initFundingReserveReq is the first message sent to initiate the workflow
// required to open a payment channel with a remote peer. The initial required
// parameters are configurable across channels. These parameters are to be
// chosen depending on the fee climate within the network, and time value of funds to
// be locked up within the channel. Upon success a ChannelReservation will be
// created in order to track the lifetime of this pending channel. Outputs
// selected will be 'locked', making them unavailable, for any other pending
// reservations. Therefore, all channels in reservation limbo will be periodically
// after a timeout period in order to avoid "exhaustion" attacks.
//
// TODO(roasbeef): zombie reservation sweeper goroutine.
type initFundingReserveMsg struct {
// The ID of the remote node we would like to open a channel with.
nodeID *btcec.PublicKey
// The IP address plus port that we used to either establish or accept
// the connection which led to the negotiation of this funding
// workflow.
nodeAddr *net.TCPAddr
// The number of confirmations required before the channel is considered
// open.
numConfs uint16
// The amount of funds requested for this channel.
fundingAmount btcutil.Amount
// The total capacity of the channel which includes the amount of funds
// the remote party contributes (if any).
capacity btcutil.Amount
// The minimum accepted satoshis/KB fee for the funding transaction. In
// order to ensure timely confirmation, it is recomened that this fee
// should be generous, paying some multiple of the accepted base fee
// rate of the network.
// TODO(roasbeef): integrate fee estimation project...
minFeeRate btcutil.Amount
// ourDustLimit is the threshold below which no HTLC output should be
// generated for our commitment transaction; ie. HTLCs below
// this amount are not enforceable onchain from our point of view.
ourDustLimit btcutil.Amount
// pushSat is the number of satoshis that should be pushed over the the
// responder as part of the initial channel creation.
pushSat btcutil.Amount
// The delay on the "pay-to-self" output(s) of the commitment transaction.
csvDelay uint32
// A channel in which all errors will be sent accross. Will be nil if
// this initial set is succesful.
// NOTE: In order to avoid deadlocks, this channel MUST be buffered.
err chan error
// A ChannelReservation with our contributions filled in will be sent
// accross this channel in the case of a succesfully reservation
// initiation. In the case of an error, this will read a nil pointer.
// NOTE: In order to avoid deadlocks, this channel MUST be buffered.
resp chan *ChannelReservation
}
// fundingReserveCancelMsg is a message reserved for cancelling an existing
// channel reservation identified by its reservation ID. Cancelling a reservation
// frees its locked outputs up, for inclusion within further reservations.
type fundingReserveCancelMsg struct {
pendingFundingID uint64
// NOTE: In order to avoid deadlocks, this channel MUST be buffered.
err chan error // Buffered
}
// addContributionMsg represents a message executing the second phase of the
// channel reservation workflow. This message carries the counterparty's
// "contribution" to the payment channel. In the case that this message is
// processed without generating any errors, then channel reservation will then
// be able to construct the funding tx, both commitment transactions, and
// finally generate signatures for all our inputs to the funding transaction,
// and for the remote node's version of the commitment transaction.
type addContributionMsg struct {
pendingFundingID uint64
// TODO(roasbeef): Should also carry SPV proofs in we're in SPV mode
contribution *ChannelContribution
// NOTE: In order to avoid deadlocks, this channel MUST be buffered.
err chan error
}
// addSingleContributionMsg represents a message executing the second phase of
// a single funder channel reservation workflow. This messages carries the
// counterparty's "contribution" to the payment channel. As this message is
// sent when on the responding side to a single funder workflow, no further
// action apart from storing the provided contribution is carried out.
type addSingleContributionMsg struct {
pendingFundingID uint64
contribution *ChannelContribution
// NOTE: In order to avoid deadlocks, this channel MUST be buffered.
err chan error
}
// addCounterPartySigsMsg represents the final message required to complete,
// and 'open' a payment channel. This message carries the counterparty's
// signatures for each of their inputs to the funding transaction, and also a
// signature allowing us to spend our version of the commitment transaction.
// If we're able to verify all the signatures are valid, the funding transaction
// will be broadcast to the network. After the funding transaction gains a
// configurable number of confirmations, the channel is officially considered
// 'open'.
type addCounterPartySigsMsg struct {
pendingFundingID uint64
// Should be order of sorted inputs that are theirs. Sorting is done
// in accordance to BIP-69:
// https://github.com/bitcoin/bips/blob/master/bip-0069.mediawiki.
theirFundingInputScripts []*InputScript
// This should be 1/2 of the signatures needed to succesfully spend our
// version of the commitment transaction.
theirCommitmentSig []byte
// This channel is used to return the completed channel after the wallet
// has completed all of its stages in the funding process.
completeChan chan *channeldb.OpenChannel
// NOTE: In order to avoid deadlocks, this channel MUST be buffered.
err chan error
}
// addSingleFunderSigsMsg represents the next-to-last message required to
// complete a single-funder channel workflow. Once the initiator is able to
// construct the funding transaction, they send both the outpoint and a
// signature for our version of the commitment transaction. Once this message
// is processed we (the responder) are able to construct both commitment
// transactions, signing the remote party's version.
type addSingleFunderSigsMsg struct {
pendingFundingID uint64
// fundingOutpoint is the outpoint of the completed funding
// transaction as assembled by the workflow initiator.
fundingOutpoint *wire.OutPoint
// revokeKey is the revocation public key derived by the remote node to
// be used within the initial version of the commitment transaction we
// construct for them.
revokeKey *btcec.PublicKey
// theirCommitmentSig are the 1/2 of the signatures needed to
// succesfully spend our version of the commitment transaction.
theirCommitmentSig []byte
// obsfucator is the bytes to be used to obsfucate the state hints on
// the commitment transaction.
obsfucator [StateHintSize]byte
// This channel is used to return the completed channel after the wallet
// has completed all of its stages in the funding process.
completeChan chan *channeldb.OpenChannel
// NOTE: In order to avoid deadlocks, this channel MUST be buffered.
err chan error
}
// LightningWallet is a domain specific, yet general Bitcoin wallet capable of
// executing workflow required to interact with the Lightning Network. It is
// domain specific in the sense that it understands all the fancy scripts used
// within the Lightning Network, channel lifetimes, etc. However, it embedds a
// general purpose Bitcoin wallet within it. Therefore, it is also able to serve
// as a regular Bitcoin wallet which uses HD keys. The wallet is highly concurrent
// internally. All communication, and requests towards the wallet are
// dispatched as messages over channels, ensuring thread safety across all
// operations. Interaction has been designed independent of any peer-to-peer
// communication protocol, allowing the wallet to be self-contained and embeddable
// within future projects interacting with the Lightning Network.
// NOTE: At the moment the wallet requires a btcd full node, as it's dependent
// on btcd's websockets notifications as even triggers during the lifetime of
// a channel. However, once the chainntnfs package is complete, the wallet
// will be compatible with multiple RPC/notification services such as Electrum,
// Bitcoin Core + ZeroMQ, etc. Eventually, the wallet won't require a full-node
// at all, as SPV support is integrated inot btcwallet.
type LightningWallet struct {
// This mutex is to be held when generating external keys to be used
// as multi-sig, and commitment keys within the channel.
keyGenMtx sync.RWMutex
// This mutex MUST be held when performing coin selection in order to
// avoid inadvertently creating multiple funding transaction which
// double spend inputs across each other.
coinSelectMtx sync.RWMutex
// A wrapper around a namespace within boltdb reserved for ln-based
// wallet metadata. See the 'channeldb' package for further
// information.
ChannelDB *channeldb.DB
// Used by in order to obtain notifications about funding transaction
// reaching a specified confirmation depth, and to catch
// counterparty's broadcasting revoked commitment states.
chainNotifier chainntnfs.ChainNotifier
// wallet is the the core wallet, all non Lightning Network specific
// interaction is proxied to the internal wallet.
WalletController
// Signer is the wallet's current Signer implementation. This Signer is
// used to generate signature for all inputs to potential funding
// transactions, as well as for spends from the funding transaction to
// update the commitment state.
Signer Signer
// ChainIO is an instance of the BlockChainIO interface. ChainIO is
// used to lookup the existence of outputs within the UTXO set.
ChainIO BlockChainIO
// rootKey is the root HD key derived from a WalletController private
// key. This rootKey is used to derive all LN specific secrets.
rootKey *hdkeychain.ExtendedKey
// All messages to the wallet are to be sent across this channel.
msgChan chan interface{}
// Incomplete payment channels are stored in the map below. An intent
// to create a payment channel is tracked as a "reservation" within
// limbo. Once the final signatures have been exchanged, a reservation
// is removed from limbo. Each reservation is tracked by a unique
// monotonically integer. All requests concerning the channel MUST
// carry a valid, active funding ID.
fundingLimbo map[uint64]*ChannelReservation
nextFundingID uint64
limboMtx sync.RWMutex
// TODO(roasbeef): zombie garbage collection routine to solve
// lost-object/starvation problem/attack.
// lockedOutPoints is a set of the currently locked outpoint. This
// information is kept in order to provide an easy way to unlock all
// the currently locked outpoints.
lockedOutPoints map[wire.OutPoint]struct{}
netParams *chaincfg.Params
started int32
shutdown int32
quit chan struct{}
wg sync.WaitGroup
// TODO(roasbeef): handle wallet lock/unlock
}
// NewLightningWallet creates/opens and initializes a LightningWallet instance.
// If the wallet has never been created (according to the passed dataDir), first-time
// setup is executed.
//
// NOTE: The passed channeldb, and ChainNotifier should already be fully
// initialized/started before being passed as a function arugment.
func NewLightningWallet(cdb *channeldb.DB, notifier chainntnfs.ChainNotifier,
wallet WalletController, signer Signer, bio BlockChainIO,
netParams *chaincfg.Params) (*LightningWallet, error) {
return &LightningWallet{
chainNotifier: notifier,
Signer: signer,
WalletController: wallet,
ChainIO: bio,
ChannelDB: cdb,
msgChan: make(chan interface{}, msgBufferSize),
nextFundingID: 0,
fundingLimbo: make(map[uint64]*ChannelReservation),
lockedOutPoints: make(map[wire.OutPoint]struct{}),
netParams: netParams,
quit: make(chan struct{}),
}, nil
}
// Startup establishes a connection to the RPC source, and spins up all
// goroutines required to handle incoming messages.
func (l *LightningWallet) Startup() error {
// Already started?
if atomic.AddInt32(&l.started, 1) != 1 {
return nil
}
// Start the underlying wallet controller.
if err := l.Start(); err != nil {
return err
}
// Fetch the root derivation key from the wallet's HD chain. We'll use
// this to generate specific Lightning related secrets on the fly.
rootKey, err := l.FetchRootKey()
if err != nil {
return err
}
// TODO(roasbeef): always re-derive on the fly?
rootKeyRaw := rootKey.Serialize()
l.rootKey, err = hdkeychain.NewMaster(rootKeyRaw, l.netParams)
if err != nil {
return err
}
l.wg.Add(1)
// TODO(roasbeef): multiple request handlers?
go l.requestHandler()
return nil
}
// Shutdown gracefully stops the wallet, and all active goroutines.
func (l *LightningWallet) Shutdown() error {
if atomic.AddInt32(&l.shutdown, 1) != 1 {
return nil
}
// Signal the underlying wallet controller to shutdown, waiting until
// all active goroutines have been shutdown.
if err := l.Stop(); err != nil {
return err
}
close(l.quit)
l.wg.Wait()
return nil
}
// LockedOutpoints returns a list of all currently locked outpoint.
func (l *LightningWallet) LockedOutpoints() []*wire.OutPoint {
outPoints := make([]*wire.OutPoint, 0, len(l.lockedOutPoints))
for outPoint := range l.lockedOutPoints {
outPoints = append(outPoints, &outPoint)
}
return outPoints
}
// ResetReservations reset the volatile wallet state which trakcs all currently
// active reservations.
func (l *LightningWallet) ResetReservations() {
l.nextFundingID = 0
l.fundingLimbo = make(map[uint64]*ChannelReservation)
for outpoint := range l.lockedOutPoints {
l.UnlockOutpoint(outpoint)
}
l.lockedOutPoints = make(map[wire.OutPoint]struct{})
}
// ActiveReservations returns a slice of all the currently active
// (non-cancalled) reservations.
func (l *LightningWallet) ActiveReservations() []*ChannelReservation {
reservations := make([]*ChannelReservation, 0, len(l.fundingLimbo))
for _, reservation := range l.fundingLimbo {
reservations = append(reservations, reservation)
}
return reservations
}
// GetIdentitykey returns the identity private key of the wallet.
// TODO(roasbeef): should be moved elsewhere
func (l *LightningWallet) GetIdentitykey() (*btcec.PrivateKey, error) {
identityKey, err := l.rootKey.Child(identityKeyIndex)
if err != nil {
return nil, err
}
return identityKey.ECPrivKey()
}
// requestHandler is the primary goroutine(s) responsible for handling, and
// dispatching relies to all messages.
func (l *LightningWallet) requestHandler() {
out:
for {
select {
case m := <-l.msgChan:
switch msg := m.(type) {
case *initFundingReserveMsg:
l.handleFundingReserveRequest(msg)
case *fundingReserveCancelMsg:
l.handleFundingCancelRequest(msg)
case *addSingleContributionMsg:
l.handleSingleContribution(msg)
case *addContributionMsg:
l.handleContributionMsg(msg)
case *addSingleFunderSigsMsg:
l.handleSingleFunderSigs(msg)
case *addCounterPartySigsMsg:
l.handleFundingCounterPartySigs(msg)
}
case <-l.quit:
// TODO: do some clean up
break out
}
}
l.wg.Done()
}
// InitChannelReservation kicks off the 3-step workflow required to successfully
// open a payment channel with a remote node. As part of the funding
// reservation, the inputs selected for the funding transaction are 'locked'.
// This ensures that multiple channel reservations aren't double spending the
// same inputs in the funding transaction. If reservation initialization is
// successful, a ChannelReservation containing our completed contribution is
// returned. Our contribution contains all the items necessary to allow the
// counterparty to build the funding transaction, and both versions of the
// commitment transaction. Otherwise, an error occurred a nil pointer along with
// an error are returned.
//
// Once a ChannelReservation has been obtained, two additional steps must be
// processed before a payment channel can be considered 'open'. The second step
// validates, and processes the counterparty's channel contribution. The third,
// and final step verifies all signatures for the inputs of the funding
// transaction, and that the signature we records for our version of the
// commitment transaction is valid.
func (l *LightningWallet) InitChannelReservation(capacity,
ourFundAmt btcutil.Amount, theirID *btcec.PublicKey,
theirAddr *net.TCPAddr, numConfs uint16,
csvDelay uint32, ourDustLimit btcutil.Amount,
pushSat btcutil.Amount) (*ChannelReservation, error) {
// TODO(roasbeef): make the above into an initial config as part of the
// refactor to implement spec compliant funding flow
errChan := make(chan error, 1)
respChan := make(chan *ChannelReservation, 1)
l.msgChan <- &initFundingReserveMsg{
capacity: capacity,
numConfs: numConfs,
fundingAmount: ourFundAmt,
csvDelay: csvDelay,
ourDustLimit: ourDustLimit,
pushSat: pushSat,
nodeID: theirID,
nodeAddr: theirAddr,
err: errChan,
resp: respChan,
}
return <-respChan, <-errChan
}
// handleFundingReserveRequest processes a message intending to create, and
// validate a funding reservation request.
func (l *LightningWallet) handleFundingReserveRequest(req *initFundingReserveMsg) {
// It isn't possible to create a channel with zero funds committed.
if req.fundingAmount+req.capacity == 0 {
req.err <- fmt.Errorf("cannot have channel with zero " +
"satoshis funded")
req.resp <- nil
return
}
id := atomic.AddUint64(&l.nextFundingID, 1)
totalCapacity := req.capacity + commitFee
reservation := NewChannelReservation(totalCapacity, req.fundingAmount,
req.minFeeRate, l, id, req.numConfs, req.pushSat)
// Grab the mutex on the ChannelReservation to ensure thread-safety
reservation.Lock()
defer reservation.Unlock()
reservation.nodeAddr = req.nodeAddr
reservation.ourContribution.CsvDelay = req.csvDelay
reservation.partialState.NumConfsRequired = req.numConfs
reservation.partialState.IdentityPub = req.nodeID
reservation.partialState.LocalCsvDelay = req.csvDelay
reservation.partialState.OurDustLimit = req.ourDustLimit
ourContribution := reservation.ourContribution
// If we're on the receiving end of a single funder channel then we
// don't need to perform any coin selection. Otherwise, attempt to
// obtain enough coins to meet the required funding amount.
if req.fundingAmount != 0 {
// TODO(roasbeef): consult model for proper fee rate on funding
// tx
feeRate := uint64(10)
amt := req.fundingAmount + commitFee
err := l.selectCoinsAndChange(feeRate, amt, ourContribution)
if err != nil {
req.err <- err
req.resp <- nil
return
}
}
// Grab two fresh keys from our HD chain, one will be used for the
// multi-sig funding transaction, and the other for the commitment
// transaction.
multiSigKey, err := l.NewRawKey()
if err != nil {
req.err <- err
req.resp <- nil
return
}
commitKey, err := l.NewRawKey()
if err != nil {
req.err <- err
req.resp <- nil
return
}
reservation.partialState.OurMultiSigKey = multiSigKey
ourContribution.MultiSigKey = multiSigKey
reservation.partialState.OurCommitKey = commitKey
ourContribution.CommitKey = commitKey
// Generate a fresh address to be used in the case of a cooperative
// channel close.
deliveryAddress, err := l.NewAddress(WitnessPubKey, false)
if err != nil {
req.err <- err
req.resp <- nil
return
}
deliveryScript, err := txscript.PayToAddrScript(deliveryAddress)
if err != nil {
req.err <- err
req.resp <- nil
return
}
reservation.partialState.OurDeliveryScript = deliveryScript
ourContribution.DeliveryAddress = deliveryAddress
// Create a limbo and record entry for this newly pending funding
// request.
l.limboMtx.Lock()
l.fundingLimbo[id] = reservation
l.limboMtx.Unlock()
// Funding reservation request successfully handled. The funding inputs
// will be marked as unavailable until the reservation is either
// completed, or cancelled.
req.resp <- reservation
req.err <- nil
}
// handleFundingReserveCancel cancels an existing channel reservation. As part
// of the cancellation, outputs previously selected as inputs for the funding
// transaction via coin selection are freed allowing future reservations to
// include them.
func (l *LightningWallet) handleFundingCancelRequest(req *fundingReserveCancelMsg) {
// TODO(roasbeef): holding lock too long
l.limboMtx.Lock()
defer l.limboMtx.Unlock()
pendingReservation, ok := l.fundingLimbo[req.pendingFundingID]
if !ok {
// TODO(roasbeef): make new error, "unkown funding state" or something
req.err <- fmt.Errorf("attempted to cancel non-existant funding state")
return
}
// Grab the mutex on the ChannelReservation to ensure thead-safety
pendingReservation.Lock()
defer pendingReservation.Unlock()
// Mark all previously locked outpoints as usuable for future funding
// requests.
for _, unusedInput := range pendingReservation.ourContribution.Inputs {
delete(l.lockedOutPoints, unusedInput.PreviousOutPoint)
l.UnlockOutpoint(unusedInput.PreviousOutPoint)
}
// TODO(roasbeef): is it even worth it to keep track of unsed keys?
// TODO(roasbeef): Is it possible to mark the unused change also as
// available?
delete(l.fundingLimbo, req.pendingFundingID)
req.err <- nil
}
// handleContributionMsg processes the second workflow step for the lifetime of
// a channel reservation. Upon completion, the reservation will carry a
// completed funding transaction (minus the counterparty's input signatures),
// both versions of the commitment transaction, and our signature for their
// version of the commitment transaction.
func (l *LightningWallet) handleContributionMsg(req *addContributionMsg) {
l.limboMtx.Lock()
pendingReservation, ok := l.fundingLimbo[req.pendingFundingID]
l.limboMtx.Unlock()
if !ok {
req.err <- fmt.Errorf("attempted to update non-existant funding state")
return
}
// Grab the mutex on the ChannelReservation to ensure thead-safety
pendingReservation.Lock()
defer pendingReservation.Unlock()
// Create a blank, fresh transaction. Soon to be a complete funding
// transaction which will allow opening a lightning channel.
pendingReservation.fundingTx = wire.NewMsgTx(1)
fundingTx := pendingReservation.fundingTx
// Some temporary variables to cut down on the resolution verbosity.
pendingReservation.theirContribution = req.contribution
theirContribution := req.contribution
ourContribution := pendingReservation.ourContribution
// Add all multi-party inputs and outputs to the transaction.
for _, ourInput := range ourContribution.Inputs {
fundingTx.AddTxIn(ourInput)
}
for _, theirInput := range theirContribution.Inputs {
fundingTx.AddTxIn(theirInput)
}
for _, ourChangeOutput := range ourContribution.ChangeOutputs {
fundingTx.AddTxOut(ourChangeOutput)
}
for _, theirChangeOutput := range theirContribution.ChangeOutputs {
fundingTx.AddTxOut(theirChangeOutput)
}
ourKey := pendingReservation.partialState.OurMultiSigKey
theirKey := theirContribution.MultiSigKey
// Finally, add the 2-of-2 multi-sig output which will set up the lightning
// channel.
channelCapacity := int64(pendingReservation.partialState.Capacity)
witnessScript, multiSigOut, err := GenFundingPkScript(ourKey.SerializeCompressed(),
theirKey.SerializeCompressed(), channelCapacity)
if err != nil {
req.err <- err
return
}
pendingReservation.partialState.FundingWitnessScript = witnessScript
// Sort the transaction. Since both side agree to a canonical
// ordering, by sorting we no longer need to send the entire
// transaction. Only signatures will be exchanged.
fundingTx.AddTxOut(multiSigOut)
txsort.InPlaceSort(pendingReservation.fundingTx)
// Next, sign all inputs that are ours, collecting the signatures in
// order of the inputs.
pendingReservation.ourFundingInputScripts = make([]*InputScript, 0, len(ourContribution.Inputs))
signDesc := SignDescriptor{
HashType: txscript.SigHashAll,
SigHashes: txscript.NewTxSigHashes(fundingTx),
}
for i, txIn := range fundingTx.TxIn {
info, err := l.FetchInputInfo(&txIn.PreviousOutPoint)
if err == ErrNotMine {
continue
} else if err != nil {
req.err <- err
return
}
signDesc.Output = info
signDesc.InputIndex = i
inputScript, err := l.Signer.ComputeInputScript(fundingTx, &signDesc)
if err != nil {
req.err <- err
return
}
txIn.SignatureScript = inputScript.ScriptSig
txIn.Witness = inputScript.Witness
pendingReservation.ourFundingInputScripts = append(
pendingReservation.ourFundingInputScripts,
inputScript,
)
}
// Locate the index of the multi-sig outpoint in order to record it
// since the outputs are canonically sorted. If this is a single funder
// workflow, then we'll also need to send this to the remote node.
fundingTxID := fundingTx.TxHash()
_, multiSigIndex := FindScriptOutputIndex(fundingTx, multiSigOut.PkScript)
fundingOutpoint := wire.NewOutPoint(&fundingTxID, multiSigIndex)
pendingReservation.partialState.FundingOutpoint = fundingOutpoint
// Initialize an empty sha-chain for them, tracking the current pending
// revocation hash (we don't yet know the preimage so we can't add it
// to the chain).
s := shachain.NewRevocationStore()
pendingReservation.partialState.RevocationStore = s
pendingReservation.partialState.TheirCurrentRevocation = theirContribution.RevocationKey
masterElkremRoot, err := l.deriveMasterRevocationRoot()
if err != nil {
req.err <- err
return
}
// Now that we have their commitment key, we can create the revocation
// key for the first version of our commitment transaction. To do so,
// we'll first create our root, then produce the first pre-image.
root := deriveRevocationRoot(masterElkremRoot, ourKey, theirKey)
producer := shachain.NewRevocationProducer(*root)
pendingReservation.partialState.RevocationProducer = producer
firstPreimage, err := producer.AtIndex(0)
if err != nil {
req.err <- err
return
}
theirCommitKey := theirContribution.CommitKey
ourRevokeKey := DeriveRevocationPubkey(theirCommitKey, firstPreimage[:])
// Create the txIn to our commitment transaction; required to construct
// the commitment transactions.
fundingTxIn := wire.NewTxIn(wire.NewOutPoint(&fundingTxID, multiSigIndex), nil, nil)
// With the funding tx complete, create both commitment transactions.
// TODO(roasbeef): much cleanup + de-duplication
ourBalance := pendingReservation.partialState.OurBalance
theirBalance := pendingReservation.partialState.TheirBalance
ourCommitKey := ourContribution.CommitKey
ourCommitTx, err := CreateCommitTx(fundingTxIn, ourCommitKey, theirCommitKey,
ourRevokeKey, ourContribution.CsvDelay,
ourBalance, theirBalance, pendingReservation.partialState.OurDustLimit)
if err != nil {
req.err <- err
return
}
theirCommitTx, err := CreateCommitTx(fundingTxIn, theirCommitKey, ourCommitKey,
theirContribution.RevocationKey, theirContribution.CsvDelay,
theirBalance, ourBalance, pendingReservation.partialState.TheirDustLimit)
if err != nil {
req.err <- err
return
}
// With both commitment transactions constructed, generate the state
// obsfucator then use it to encode the current state number withi both
// commitment transactions.
// TODO(roasbeef): define obsfucator scheme for dual funder
var stateObsfucator [StateHintSize]byte
if pendingReservation.partialState.IsInitiator {
stateObsfucator, err = deriveStateHintObfuscator(producer)
if err != nil {
req.err <- err
return
}
}
err = initStateHints(ourCommitTx, theirCommitTx, stateObsfucator)
if err != nil {
req.err <- err
return
}
// Sort both transactions according to the agreed upon cannonical
// ordering. This lets us skip sending the entire transaction over,
// instead we'll just send signatures.
txsort.InPlaceSort(ourCommitTx)
txsort.InPlaceSort(theirCommitTx)
deliveryScript, err := txscript.PayToAddrScript(theirContribution.DeliveryAddress)
if err != nil {
req.err <- err
return
}
// Record newly available information witin the open channel state.
pendingReservation.partialState.RemoteCsvDelay = theirContribution.CsvDelay
pendingReservation.partialState.TheirDeliveryScript = deliveryScript
pendingReservation.partialState.ChanID = fundingOutpoint
pendingReservation.partialState.TheirCommitKey = theirCommitKey
pendingReservation.partialState.TheirMultiSigKey = theirContribution.MultiSigKey
pendingReservation.partialState.OurCommitTx = ourCommitTx
pendingReservation.partialState.StateHintObsfucator = stateObsfucator
pendingReservation.ourContribution.RevocationKey = ourRevokeKey
// Generate a signature for their version of the initial commitment
// transaction.
signDesc = SignDescriptor{
WitnessScript: witnessScript,
PubKey: ourKey,
Output: multiSigOut,
HashType: txscript.SigHashAll,
SigHashes: txscript.NewTxSigHashes(theirCommitTx),
InputIndex: 0,
}
sigTheirCommit, err := l.Signer.SignOutputRaw(theirCommitTx, &signDesc)
if err != nil {
req.err <- err
return
}
pendingReservation.ourCommitmentSig = sigTheirCommit
req.err <- nil
}
// handleSingleContribution is called as the second step to a single funder
// workflow to which we are the responder. It simply saves the remote peer's
// contribution to the channel, as solely the remote peer will contribute any
// funds to the channel.
func (l *LightningWallet) handleSingleContribution(req *addSingleContributionMsg) {
l.limboMtx.Lock()
pendingReservation, ok := l.fundingLimbo[req.pendingFundingID]
l.limboMtx.Unlock()
if !ok {
req.err <- fmt.Errorf("attempted to update non-existant funding state")
return
}
// Grab the mutex on the ChannelReservation to ensure thead-safety
pendingReservation.Lock()
defer pendingReservation.Unlock()
// Simply record the counterparty's contribution into the pending
// reservation data as they'll be solely funding the channel entirely.
pendingReservation.theirContribution = req.contribution
theirContribution := pendingReservation.theirContribution
// Additionally, we can now also record the redeem script of the
// funding transaction.
// TODO(roasbeef): switch to proper pubkey derivation
ourKey := pendingReservation.partialState.OurMultiSigKey
theirKey := theirContribution.MultiSigKey
channelCapacity := int64(pendingReservation.partialState.Capacity)
witnessScript, _, err := GenFundingPkScript(ourKey.SerializeCompressed(),
theirKey.SerializeCompressed(), channelCapacity)
if err != nil {
req.err <- err
return
}
pendingReservation.partialState.FundingWitnessScript = witnessScript
masterElkremRoot, err := l.deriveMasterRevocationRoot()
if err != nil {
req.err <- err
return
}
// Now that we know their commitment key, we can create the revocation
// key for our version of the initial commitment transaction.
root := deriveRevocationRoot(masterElkremRoot, ourKey, theirKey)
producer := shachain.NewRevocationProducer(*root)
firstPreimage, err := producer.AtIndex(0)
if err != nil {
req.err <- err
return
}
pendingReservation.partialState.RevocationProducer = producer
theirCommitKey := theirContribution.CommitKey
ourRevokeKey := DeriveRevocationPubkey(theirCommitKey, firstPreimage[:])
// Initialize an empty sha-chain for them, tracking the current pending
// revocation hash (we don't yet know the preimage so we can't add it
// to the chain).
remotePreimageStore := shachain.NewRevocationStore()
pendingReservation.partialState.RevocationStore = remotePreimageStore
// Record the counterpaty's remaining contributions to the channel,
// converting their delivery address into a public key script.
deliveryScript, err := txscript.PayToAddrScript(theirContribution.DeliveryAddress)
if err != nil {
req.err <- err
return
}
pendingReservation.partialState.RemoteCsvDelay = theirContribution.CsvDelay
pendingReservation.partialState.TheirDeliveryScript = deliveryScript
pendingReservation.partialState.TheirCommitKey = theirContribution.CommitKey
pendingReservation.partialState.TheirMultiSigKey = theirContribution.MultiSigKey
pendingReservation.ourContribution.RevocationKey = ourRevokeKey
req.err <- nil
return
}
// openChanDetails contains a "finalized" channel which can be considered
// "open" according to the requested confirmation depth at reservation
// initialization. Additionally, the struct contains additional details
// pertaining to the exact location in the main chain in-which the transaction
// was confirmed.
type openChanDetails struct {
channel *LightningChannel
blockHeight uint32
txIndex uint32
}
// handleFundingCounterPartySigs is the final step in the channel reservation
// workflow. During this step, we validate *all* the received signatures for
// inputs to the funding transaction. If any of these are invalid, we bail,
// and forcibly cancel this funding request. Additionally, we ensure that the
// signature we received from the counterparty for our version of the commitment
// transaction allows us to spend from the funding output with the addition of
// our signature.
func (l *LightningWallet) handleFundingCounterPartySigs(msg *addCounterPartySigsMsg) {
l.limboMtx.RLock()
res, ok := l.fundingLimbo[msg.pendingFundingID]
l.limboMtx.RUnlock()
if !ok {
msg.err <- fmt.Errorf("attempted to update non-existant funding state")
return
}
// Grab the mutex on the ChannelReservation to ensure thead-safety
res.Lock()
defer res.Unlock()
// Now we can complete the funding transaction by adding their
// signatures to their inputs.
res.theirFundingInputScripts = msg.theirFundingInputScripts
inputScripts := msg.theirFundingInputScripts
fundingTx := res.fundingTx
sigIndex := 0
fundingHashCache := txscript.NewTxSigHashes(fundingTx)
for i, txin := range fundingTx.TxIn {
if len(inputScripts) != 0 && len(txin.Witness) == 0 {
// Attach the input scripts so we can verify it below.
txin.Witness = inputScripts[sigIndex].Witness
txin.SignatureScript = inputScripts[sigIndex].ScriptSig
// Fetch the alleged previous output along with the
// pkscript referenced by this input.
prevOut := txin.PreviousOutPoint
output, err := l.ChainIO.GetUtxo(&prevOut.Hash, prevOut.Index)
if output == nil {
msg.err <- fmt.Errorf("input to funding tx does not exist: %v", err)
return
}
// Ensure that the witness+sigScript combo is valid.
vm, err := txscript.NewEngine(output.PkScript,
fundingTx, i, txscript.StandardVerifyFlags, nil,
fundingHashCache, output.Value)
if err != nil {
// TODO(roasbeef): cancel at this stage if invalid sigs?
msg.err <- fmt.Errorf("cannot create script engine: %s", err)
return
}
if err = vm.Execute(); err != nil {
msg.err <- fmt.Errorf("cannot validate transaction: %s", err)
return
}
sigIndex++
}
}
// At this point, we can also record and verify their signature for our
// commitment transaction.
res.theirCommitmentSig = msg.theirCommitmentSig
commitTx := res.partialState.OurCommitTx
theirKey := res.theirContribution.MultiSigKey
// Re-generate both the witnessScript and p2sh output. We sign the
// witnessScript script, but include the p2sh output as the subscript
// for verification.
witnessScript := res.partialState.FundingWitnessScript
// Next, create the spending scriptSig, and then verify that the script
// is complete, allowing us to spend from the funding transaction.
theirCommitSig := msg.theirCommitmentSig
channelValue := int64(res.partialState.Capacity)
hashCache := txscript.NewTxSigHashes(commitTx)
sigHash, err := txscript.CalcWitnessSigHash(witnessScript, hashCache,
txscript.SigHashAll, commitTx, 0, channelValue)
if err != nil {
msg.err <- fmt.Errorf("counterparty's commitment signature is invalid: %v", err)
return
}
// Verify that we've received a valid signature from the remote party
// for our version of the commitment transaction.
sig, err := btcec.ParseSignature(theirCommitSig, btcec.S256())
if err != nil {
msg.err <- err
return
} else if !sig.Verify(sigHash, theirKey) {
msg.err <- fmt.Errorf("counterparty's commitment signature is invalid")
return
}
res.partialState.OurCommitSig = theirCommitSig
// Funding complete, this entry can be removed from limbo.
l.limboMtx.Lock()
delete(l.fundingLimbo, res.reservationID)
l.limboMtx.Unlock()
walletLog.Infof("Broadcasting funding tx for ChannelPoint(%v): %v",
res.partialState.FundingOutpoint, spew.Sdump(fundingTx))
// Broacast the finalized funding transaction to the network.
if err := l.PublishTransaction(fundingTx); err != nil {
msg.err <- err
return
}
// Add the complete funding transaction to the DB, in it's open bucket
// which will be used for the lifetime of this channel.
// TODO(roasbeef): revisit faul-tolerance of this flow
nodeAddr := res.nodeAddr
if err := res.partialState.SyncPending(nodeAddr); err != nil {
msg.err <- err
return
}
msg.completeChan <- res.partialState
msg.err <- nil
}
// handleSingleFunderSigs is called once the remote peer who initiated the
// single funder workflow has assembled the funding transaction, and generated
// a signature for our version of the commitment transaction. This method
// progresses the workflow by generating a signature for the remote peer's
// version of the commitment transaction.
func (l *LightningWallet) handleSingleFunderSigs(req *addSingleFunderSigsMsg) {
l.limboMtx.RLock()
pendingReservation, ok := l.fundingLimbo[req.pendingFundingID]
l.limboMtx.RUnlock()
if !ok {
req.err <- fmt.Errorf("attempted to update non-existant funding state")
return
}
// Grab the mutex on the ChannelReservation to ensure thead-safety
pendingReservation.Lock()
defer pendingReservation.Unlock()
pendingReservation.partialState.FundingOutpoint = req.fundingOutpoint
pendingReservation.partialState.TheirCurrentRevocation = req.revokeKey
pendingReservation.partialState.ChanID = req.fundingOutpoint
pendingReservation.partialState.StateHintObsfucator = req.obsfucator
fundingTxIn := wire.NewTxIn(req.fundingOutpoint, nil, nil)
// Now that we have the funding outpoint, we can generate both versions
// of the commitment transaction, and generate a signature for the
// remote node's commitment transactions.
ourCommitKey := pendingReservation.ourContribution.CommitKey
theirCommitKey := pendingReservation.theirContribution.CommitKey
ourBalance := pendingReservation.partialState.OurBalance
theirBalance := pendingReservation.partialState.TheirBalance
ourCommitTx, err := CreateCommitTx(fundingTxIn, ourCommitKey, theirCommitKey,
pendingReservation.ourContribution.RevocationKey,
pendingReservation.ourContribution.CsvDelay, ourBalance, theirBalance,
pendingReservation.partialState.OurDustLimit)
if err != nil {
req.err <- err
return
}
theirCommitTx, err := CreateCommitTx(fundingTxIn, theirCommitKey, ourCommitKey,
req.revokeKey, pendingReservation.theirContribution.CsvDelay,
theirBalance, ourBalance, pendingReservation.partialState.TheirDustLimit)
if err != nil {
req.err <- err
return
}
// With both commitment transactions constructed, generate the state
// obsfucator then use it to encode the current state number within
// both commitment transactions.
err = initStateHints(ourCommitTx, theirCommitTx, req.obsfucator)
if err != nil {
req.err <- err
return
}
// Sort both transactions according to the agreed upon cannonical
// ordering. This ensures that both parties sign the same sighash
// without further synchronization.
txsort.InPlaceSort(ourCommitTx)
pendingReservation.partialState.OurCommitTx = ourCommitTx
txsort.InPlaceSort(theirCommitTx)
witnessScript := pendingReservation.partialState.FundingWitnessScript
channelValue := int64(pendingReservation.partialState.Capacity)
hashCache := txscript.NewTxSigHashes(ourCommitTx)
theirKey := pendingReservation.theirContribution.MultiSigKey
ourKey := pendingReservation.partialState.OurMultiSigKey
sigHash, err := txscript.CalcWitnessSigHash(witnessScript, hashCache,
txscript.SigHashAll, ourCommitTx, 0, channelValue)
if err != nil {
req.err <- err
return
}
// Verify that we've received a valid signature from the remote party
// for our version of the commitment transaction.
sig, err := btcec.ParseSignature(req.theirCommitmentSig, btcec.S256())
if err != nil {
req.err <- err
return
} else if !sig.Verify(sigHash, theirKey) {
req.err <- fmt.Errorf("counterparty's commitment signature is invalid")
return
}
pendingReservation.partialState.OurCommitSig = req.theirCommitmentSig
// With their signature for our version of the commitment transactions
// verified, we can now generate a signature for their version,
// allowing the funding transaction to be safely broadcast.
p2wsh, err := witnessScriptHash(witnessScript)
if err != nil {
req.err <- err
return
}
signDesc := SignDescriptor{
WitnessScript: witnessScript,
PubKey: ourKey,
Output: &wire.TxOut{
PkScript: p2wsh,
Value: channelValue,
},
HashType: txscript.SigHashAll,
SigHashes: txscript.NewTxSigHashes(theirCommitTx),
InputIndex: 0,
}
sigTheirCommit, err := l.Signer.SignOutputRaw(theirCommitTx, &signDesc)
if err != nil {
req.err <- err
return
}
pendingReservation.ourCommitmentSig = sigTheirCommit
// Add the complete funding transaction to the DB, in it's open bucket
// which will be used for the lifetime of this channel.
if err := pendingReservation.partialState.SyncPending(pendingReservation.nodeAddr); err != nil {
req.err <- err
return
}
req.completeChan <- pendingReservation.partialState
req.err <- nil
l.limboMtx.Lock()
delete(l.fundingLimbo, req.pendingFundingID)
l.limboMtx.Unlock()
}
// selectCoinsAndChange performs coin selection in order to obtain witness
// outputs which sum to at least 'numCoins' amount of satoshis. If coin
// selection is successful/possible, then the selected coins are available
// within the passed contribution's inputs. If necessary, a change address will
// also be generated.
// TODO(roasbeef): remove hardcoded fees and req'd confs for outputs.
func (l *LightningWallet) selectCoinsAndChange(feeRate uint64, amt btcutil.Amount,
contribution *ChannelContribution) error {
// We hold the coin select mutex while querying for outputs, and
// performing coin selection in order to avoid inadvertent double
// spends across funding transactions.
l.coinSelectMtx.Lock()
defer l.coinSelectMtx.Unlock()
// Find all unlocked unspent witness outputs with greater than 1
// confirmation.
// TODO(roasbeef): make num confs a configuration paramter
coins, err := l.ListUnspentWitness(1)
if err != nil {
return err
}
// Perform coin selection over our available, unlocked unspent outputs
// in order to find enough coins to meet the funding amount
// requirements.
selectedCoins, changeAmt, err := coinSelect(feeRate, amt, coins)
if err != nil {
return err
}
// Lock the selected coins. These coins are now "reserved", this
// prevents concurrent funding requests from referring to and this
// double-spending the same set of coins.
contribution.Inputs = make([]*wire.TxIn, len(selectedCoins))
for i, coin := range selectedCoins {
l.lockedOutPoints[*coin] = struct{}{}
l.LockOutpoint(*coin)
// Empty sig script, we'll actually sign if this reservation is
// queued up to be completed (the other side accepts).
contribution.Inputs[i] = wire.NewTxIn(coin, nil, nil)
}
// Record any change output(s) generated as a result of the coin
// selection.
if changeAmt != 0 {
changeAddr, err := l.NewAddress(WitnessPubKey, true)
if err != nil {
return err
}
changeScript, err := txscript.PayToAddrScript(changeAddr)
if err != nil {
return err
}
contribution.ChangeOutputs = make([]*wire.TxOut, 1)
contribution.ChangeOutputs[0] = &wire.TxOut{
Value: int64(changeAmt),
PkScript: changeScript,
}
}
return nil
}
// deriveMasterRevocationRoot derives the private key which serves as the master
// producer root. This master secret is used as the secret input to a HKDF to
// generate revocation secrets based on random, but public data.
func (l *LightningWallet) deriveMasterRevocationRoot() (*btcec.PrivateKey, error) {
masterElkremRoot, err := l.rootKey.Child(revocationRootIndex)
if err != nil {
return nil, err
}
return masterElkremRoot.ECPrivKey()
}
// deriveStateHintObfuscator derives the bytes to be used for obfuscating the
// state hints from the root to be used for a new channel. The
// obfuscator is generated by performing an additional sha256 hash of the first
// child derived from the revocation root. The leading 4 bytes are used for the
// obfuscator.
func deriveStateHintObfuscator(producer shachain.Producer) ([StateHintSize]byte, error) {
var obfuscator [StateHintSize]byte
firstChild, err := producer.AtIndex(0)
if err != nil {
return obfuscator, err
}
grandChild := sha256.Sum256(firstChild[:])
copy(obfuscator[:], grandChild[:])
return obfuscator, nil
}
// initStateHints properly sets the obsfucated state hints on both commitment
// transactions using the passed obsfucator.
func initStateHints(commit1, commit2 *wire.MsgTx,
obfuscator [StateHintSize]byte) error {
if err := SetStateNumHint(commit1, 0, obfuscator); err != nil {
return err
}
if err := SetStateNumHint(commit2, 0, obfuscator); err != nil {
return err
}
return nil
}
// selectInputs selects a slice of inputs necessary to meet the specified
// selection amount. If input selection is unable to succeed to to insufficient
// funds, a non-nil error is returned. Additionally, the total amount of the
// selected coins are returned in order for the caller to properly handle
// change+fees.
func selectInputs(amt btcutil.Amount, coins []*Utxo) (btcutil.Amount, []*wire.OutPoint, error) {
var (
selectedUtxos []*wire.OutPoint
satSelected btcutil.Amount
)
i := 0
for satSelected < amt {
// If we're about to go past the number of available coins,
// then exit with an error.
if i > len(coins)-1 {
return 0, nil, &ErrInsufficientFunds{amt, satSelected}
}
// Otherwise, collect this new coin as it may be used for final
// coin selection.
coin := coins[i]
utxo := &wire.OutPoint{
Hash: coin.Hash,
Index: coin.Index,
}
selectedUtxos = append(selectedUtxos, utxo)
satSelected += coin.Value
i++
}
return satSelected, selectedUtxos, nil
}
// coinSelect attemps to select a sufficient amount of coins, including a
// change output to fund amt satoshis, adhearing to the specified fee rate. The
// specified fee rate should be expressed in sat/byte for coin selection to
// function properly.
func coinSelect(feeRate uint64, amt btcutil.Amount,
coins []*Utxo) ([]*wire.OutPoint, btcutil.Amount, error) {
const (
// txOverhead is the overhead of a transaction residing within
// the version number and lock time.
txOverhead = 8
// p2wkhSpendSize an estimate of the number of bytes it takes
// to spend a p2wkh output.
//
// (p2wkh witness) + txid + index + varint script size + sequence
// TODO(roasbeef): div by 3 due to witness size?
p2wkhSpendSize = (1 + 73 + 1 + 33) + 32 + 4 + 1 + 4
// p2wkhOutputSize is an estimate of the size of a regualr
// p2wkh output.
//
// 8 (output) + 1 (var int script) + 22 (p2wkh output)
p2wkhOutputSize = 8 + 1 + 22
// p2wkhOutputSize is an estimate of the p2wsh funding uotput.
p2wshOutputSize = 8 + 1 + 34
)
var estimatedSize int
amtNeeded := amt
for {
// First perform an initial round of coin selection to estimate
// the required fee.
totalSat, selectedUtxos, err := selectInputs(amtNeeded, coins)
if err != nil {
return nil, 0, err
}
// Based on the selected coins, estimate the size of the final
// fully signed transaction.
estimatedSize = ((len(selectedUtxos) * p2wkhSpendSize) +
p2wshOutputSize + txOverhead)
// The difference bteween the selected amount and the amount
// requested will be used to pay fees, and generate a change
// output with the remaining.
overShootAmt := totalSat - amtNeeded
// Based on the estimated size and fee rate, if the excess
// amount isn't enough to pay fees, then increase the requested
// coin amount by the estimate required fee, performing another
// round of coin selection.
requiredFee := btcutil.Amount(uint64(estimatedSize) * feeRate)
if overShootAmt < requiredFee {
amtNeeded += requiredFee
continue
}
// If the fee is sufficient, then calculate the size of the change output.
changeAmt := overShootAmt - requiredFee
return selectedUtxos, changeAmt, nil
}
}
|
package opts
import (
"fmt"
"net"
"os"
"path"
"regexp"
"strings"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/volume"
)
var (
alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker -d -H tcp://:8080
DefaultHTTPHost = "127.0.0.1"
// DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker -d -H tcp://
// TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter
// is not supplied. A better longer term solution would be to use a named
// pipe as the default on the Windows daemon.
DefaultHTTPPort = 2375 // Default HTTP Port
// DefaultUnixSocket Path for the unix socket.
// Docker daemon by default always listens on the default unix socket
DefaultUnixSocket = "/var/run/docker.sock"
)
// ListOpts type that hold a list of values and a validation function.
type ListOpts struct {
values *[]string
validator ValidatorFctType
}
// NewListOpts Create a new ListOpts with the specified validator.
func NewListOpts(validator ValidatorFctType) ListOpts {
var values []string
return *NewListOptsRef(&values, validator)
}
func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
return &ListOpts{
values: values,
validator: validator,
}
}
func (opts *ListOpts) String() string {
return fmt.Sprintf("%v", []string((*opts.values)))
}
// Set validates if needed the input value and add it to the
// internal slice.
func (opts *ListOpts) Set(value string) error {
if opts.validator != nil {
v, err := opts.validator(value)
if err != nil {
return err
}
value = v
}
(*opts.values) = append((*opts.values), value)
return nil
}
// Delete remove the given element from the slice.
func (opts *ListOpts) Delete(key string) {
for i, k := range *opts.values {
if k == key {
(*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)
return
}
}
}
// GetMap returns the content of values in a map in order to avoid
// duplicates.
// FIXME: can we remove this?
func (opts *ListOpts) GetMap() map[string]struct{} {
ret := make(map[string]struct{})
for _, k := range *opts.values {
ret[k] = struct{}{}
}
return ret
}
// GetAll returns the values' slice.
// FIXME: Can we remove this?
func (opts *ListOpts) GetAll() []string {
return (*opts.values)
}
// Get checks the existence of the given key.
func (opts *ListOpts) Get(key string) bool {
for _, k := range *opts.values {
if k == key {
return true
}
}
return false
}
// Len returns the amount of element in the slice.
func (opts *ListOpts) Len() int {
return len((*opts.values))
}
//MapOpts type that holds a map of values and a validation function.
type MapOpts struct {
values map[string]string
validator ValidatorFctType
}
// Set validates if needed the input value and add it to the
// internal map, by splitting on '='.
func (opts *MapOpts) Set(value string) error {
if opts.validator != nil {
v, err := opts.validator(value)
if err != nil {
return err
}
value = v
}
vals := strings.SplitN(value, "=", 2)
if len(vals) == 1 {
(opts.values)[vals[0]] = ""
} else {
(opts.values)[vals[0]] = vals[1]
}
return nil
}
func (opts *MapOpts) String() string {
return fmt.Sprintf("%v", map[string]string((opts.values)))
}
func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
if values == nil {
values = make(map[string]string)
}
return &MapOpts{
values: values,
validator: validator,
}
}
// ValidatorFctType validator that return a validate string and/or an error
type ValidatorFctType func(val string) (string, error)
// ValidatorFctListType validator that return a validate list of string and/or an error
type ValidatorFctListType func(val string) ([]string, error)
// ValidateAttach Validates that the specified string is a valid attach option.
func ValidateAttach(val string) (string, error) {
s := strings.ToLower(val)
for _, str := range []string{"stdin", "stdout", "stderr"} {
if s == str {
return s, nil
}
}
return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR")
}
// ValidateLink Validates that the specified string has a valid link format (containerName:alias).
func ValidateLink(val string) (string, error) {
if _, _, err := parsers.ParseLink(val); err != nil {
return val, err
}
return val, nil
}
// ValidateDevice Validate a path for devices
// It will make sure 'val' is in the form:
// [host-dir:]container-path[:mode]
func ValidateDevice(val string) (string, error) {
return validatePath(val, false)
}
// ValidatePath Validate a path for volumes
// It will make sure 'val' is in the form:
// [host-dir:]container-path[:rw|ro]
// It will also validate the mount mode.
func ValidatePath(val string) (string, error) {
return validatePath(val, true)
}
func validatePath(val string, validateMountMode bool) (string, error) {
var containerPath string
var mode string
if strings.Count(val, ":") > 2 {
return val, fmt.Errorf("bad format for volumes: %s", val)
}
splited := strings.SplitN(val, ":", 3)
if splited[0] == "" {
return val, fmt.Errorf("bad format for volumes: %s", val)
}
switch len(splited) {
case 1:
containerPath = splited[0]
val = path.Clean(containerPath)
case 2:
if isValid, _ := volume.ValidateMountMode(splited[1]); validateMountMode && isValid {
containerPath = splited[0]
mode = splited[1]
val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode)
} else {
containerPath = splited[1]
val = fmt.Sprintf("%s:%s", splited[0], path.Clean(containerPath))
}
case 3:
containerPath = splited[1]
mode = splited[2]
if isValid, _ := volume.ValidateMountMode(splited[2]); validateMountMode && !isValid {
return val, fmt.Errorf("bad mount mode specified : %s", mode)
}
val = fmt.Sprintf("%s:%s:%s", splited[0], containerPath, mode)
}
if !path.IsAbs(containerPath) {
return val, fmt.Errorf("%s is not an absolute path", containerPath)
}
return val, nil
}
// ValidateEnv Validate an environment variable and returns it
// It will use EnvironmentVariableRegexp to ensure the name of the environment variable is valid.
// If no value is specified, it returns the current value using os.Getenv.
func ValidateEnv(val string) (string, error) {
arr := strings.Split(val, "=")
if len(arr) > 1 {
return val, nil
}
if !EnvironmentVariableRegexp.MatchString(arr[0]) {
return val, ErrBadEnvVariable{fmt.Sprintf("variable '%s' is not a valid environment variable", val)}
}
if !doesEnvExist(val) {
return val, nil
}
return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil
}
// ValidateIPAddress Validates an Ip address
func ValidateIPAddress(val string) (string, error) {
var ip = net.ParseIP(strings.TrimSpace(val))
if ip != nil {
return ip.String(), nil
}
return "", fmt.Errorf("%s is not an ip address", val)
}
// ValidateMACAddress Validates a MAC address
func ValidateMACAddress(val string) (string, error) {
_, err := net.ParseMAC(strings.TrimSpace(val))
if err != nil {
return "", err
}
return val, nil
}
// ValidateDNSSearch Validates domain for resolvconf search configuration.
// A zero length domain is represented by .
func ValidateDNSSearch(val string) (string, error) {
if val = strings.Trim(val, " "); val == "." {
return val, nil
}
return validateDomain(val)
}
func validateDomain(val string) (string, error) {
if alphaRegexp.FindString(val) == "" {
return "", fmt.Errorf("%s is not a valid domain", val)
}
ns := domainRegexp.FindSubmatch([]byte(val))
if len(ns) > 0 && len(ns[1]) < 255 {
return string(ns[1]), nil
}
return "", fmt.Errorf("%s is not a valid domain", val)
}
// ValidateExtraHost Validate that the given string is a valid extrahost and returns it
// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6)
func ValidateExtraHost(val string) (string, error) {
// allow for IPv6 addresses in extra hosts by only splitting on first ":"
arr := strings.SplitN(val, ":", 2)
if len(arr) != 2 || len(arr[0]) == 0 {
return "", fmt.Errorf("bad format for add-host: %q", val)
}
if _, err := ValidateIPAddress(arr[1]); err != nil {
return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1])
}
return val, nil
}
// ValidateLabel Validate that the given string is a valid label, and returns it
// Labels are in the form on key=value
func ValidateLabel(val string) (string, error) {
if strings.Count(val, "=") < 1 {
return "", fmt.Errorf("bad attribute format: %s", val)
}
return val, nil
}
// ValidateHost Validate that the given string is a valid host and returns it
func ValidateHost(val string) (string, error) {
host, err := parsers.ParseHost(DefaultHTTPHost, DefaultUnixSocket, val)
if err != nil {
return val, err
}
return host, nil
}
func doesEnvExist(name string) bool {
for _, entry := range os.Environ() {
parts := strings.SplitN(entry, "=", 2)
if parts[0] == name {
return true
}
}
return false
}
Change all docker -d to docker daemon
Signed-off-by: Qiang Huang <488e06933c295c76472a67dbc6657ac40a756953@huawei.com>
package opts
import (
"fmt"
"net"
"os"
"path"
"regexp"
"strings"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/volume"
)
var (
alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080
DefaultHTTPHost = "127.0.0.1"
// DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp://
// TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter
// is not supplied. A better longer term solution would be to use a named
// pipe as the default on the Windows daemon.
DefaultHTTPPort = 2375 // Default HTTP Port
// DefaultUnixSocket Path for the unix socket.
// Docker daemon by default always listens on the default unix socket
DefaultUnixSocket = "/var/run/docker.sock"
)
// ListOpts type that hold a list of values and a validation function.
type ListOpts struct {
values *[]string
validator ValidatorFctType
}
// NewListOpts Create a new ListOpts with the specified validator.
func NewListOpts(validator ValidatorFctType) ListOpts {
var values []string
return *NewListOptsRef(&values, validator)
}
func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
return &ListOpts{
values: values,
validator: validator,
}
}
func (opts *ListOpts) String() string {
return fmt.Sprintf("%v", []string((*opts.values)))
}
// Set validates if needed the input value and add it to the
// internal slice.
func (opts *ListOpts) Set(value string) error {
if opts.validator != nil {
v, err := opts.validator(value)
if err != nil {
return err
}
value = v
}
(*opts.values) = append((*opts.values), value)
return nil
}
// Delete remove the given element from the slice.
func (opts *ListOpts) Delete(key string) {
for i, k := range *opts.values {
if k == key {
(*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)
return
}
}
}
// GetMap returns the content of values in a map in order to avoid
// duplicates.
// FIXME: can we remove this?
func (opts *ListOpts) GetMap() map[string]struct{} {
ret := make(map[string]struct{})
for _, k := range *opts.values {
ret[k] = struct{}{}
}
return ret
}
// GetAll returns the values' slice.
// FIXME: Can we remove this?
func (opts *ListOpts) GetAll() []string {
return (*opts.values)
}
// Get checks the existence of the given key.
func (opts *ListOpts) Get(key string) bool {
for _, k := range *opts.values {
if k == key {
return true
}
}
return false
}
// Len returns the amount of element in the slice.
func (opts *ListOpts) Len() int {
return len((*opts.values))
}
//MapOpts type that holds a map of values and a validation function.
type MapOpts struct {
values map[string]string
validator ValidatorFctType
}
// Set validates if needed the input value and add it to the
// internal map, by splitting on '='.
func (opts *MapOpts) Set(value string) error {
if opts.validator != nil {
v, err := opts.validator(value)
if err != nil {
return err
}
value = v
}
vals := strings.SplitN(value, "=", 2)
if len(vals) == 1 {
(opts.values)[vals[0]] = ""
} else {
(opts.values)[vals[0]] = vals[1]
}
return nil
}
func (opts *MapOpts) String() string {
return fmt.Sprintf("%v", map[string]string((opts.values)))
}
func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
if values == nil {
values = make(map[string]string)
}
return &MapOpts{
values: values,
validator: validator,
}
}
// ValidatorFctType validator that return a validate string and/or an error
type ValidatorFctType func(val string) (string, error)
// ValidatorFctListType validator that return a validate list of string and/or an error
type ValidatorFctListType func(val string) ([]string, error)
// ValidateAttach Validates that the specified string is a valid attach option.
func ValidateAttach(val string) (string, error) {
s := strings.ToLower(val)
for _, str := range []string{"stdin", "stdout", "stderr"} {
if s == str {
return s, nil
}
}
return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR")
}
// ValidateLink Validates that the specified string has a valid link format (containerName:alias).
func ValidateLink(val string) (string, error) {
if _, _, err := parsers.ParseLink(val); err != nil {
return val, err
}
return val, nil
}
// ValidateDevice Validate a path for devices
// It will make sure 'val' is in the form:
// [host-dir:]container-path[:mode]
func ValidateDevice(val string) (string, error) {
return validatePath(val, false)
}
// ValidatePath Validate a path for volumes
// It will make sure 'val' is in the form:
// [host-dir:]container-path[:rw|ro]
// It will also validate the mount mode.
func ValidatePath(val string) (string, error) {
return validatePath(val, true)
}
func validatePath(val string, validateMountMode bool) (string, error) {
var containerPath string
var mode string
if strings.Count(val, ":") > 2 {
return val, fmt.Errorf("bad format for volumes: %s", val)
}
splited := strings.SplitN(val, ":", 3)
if splited[0] == "" {
return val, fmt.Errorf("bad format for volumes: %s", val)
}
switch len(splited) {
case 1:
containerPath = splited[0]
val = path.Clean(containerPath)
case 2:
if isValid, _ := volume.ValidateMountMode(splited[1]); validateMountMode && isValid {
containerPath = splited[0]
mode = splited[1]
val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode)
} else {
containerPath = splited[1]
val = fmt.Sprintf("%s:%s", splited[0], path.Clean(containerPath))
}
case 3:
containerPath = splited[1]
mode = splited[2]
if isValid, _ := volume.ValidateMountMode(splited[2]); validateMountMode && !isValid {
return val, fmt.Errorf("bad mount mode specified : %s", mode)
}
val = fmt.Sprintf("%s:%s:%s", splited[0], containerPath, mode)
}
if !path.IsAbs(containerPath) {
return val, fmt.Errorf("%s is not an absolute path", containerPath)
}
return val, nil
}
// ValidateEnv Validate an environment variable and returns it
// It will use EnvironmentVariableRegexp to ensure the name of the environment variable is valid.
// If no value is specified, it returns the current value using os.Getenv.
func ValidateEnv(val string) (string, error) {
arr := strings.Split(val, "=")
if len(arr) > 1 {
return val, nil
}
if !EnvironmentVariableRegexp.MatchString(arr[0]) {
return val, ErrBadEnvVariable{fmt.Sprintf("variable '%s' is not a valid environment variable", val)}
}
if !doesEnvExist(val) {
return val, nil
}
return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil
}
// ValidateIPAddress Validates an Ip address
func ValidateIPAddress(val string) (string, error) {
var ip = net.ParseIP(strings.TrimSpace(val))
if ip != nil {
return ip.String(), nil
}
return "", fmt.Errorf("%s is not an ip address", val)
}
// ValidateMACAddress Validates a MAC address
func ValidateMACAddress(val string) (string, error) {
_, err := net.ParseMAC(strings.TrimSpace(val))
if err != nil {
return "", err
}
return val, nil
}
// ValidateDNSSearch Validates domain for resolvconf search configuration.
// A zero length domain is represented by .
func ValidateDNSSearch(val string) (string, error) {
if val = strings.Trim(val, " "); val == "." {
return val, nil
}
return validateDomain(val)
}
func validateDomain(val string) (string, error) {
if alphaRegexp.FindString(val) == "" {
return "", fmt.Errorf("%s is not a valid domain", val)
}
ns := domainRegexp.FindSubmatch([]byte(val))
if len(ns) > 0 && len(ns[1]) < 255 {
return string(ns[1]), nil
}
return "", fmt.Errorf("%s is not a valid domain", val)
}
// ValidateExtraHost Validate that the given string is a valid extrahost and returns it
// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6)
func ValidateExtraHost(val string) (string, error) {
// allow for IPv6 addresses in extra hosts by only splitting on first ":"
arr := strings.SplitN(val, ":", 2)
if len(arr) != 2 || len(arr[0]) == 0 {
return "", fmt.Errorf("bad format for add-host: %q", val)
}
if _, err := ValidateIPAddress(arr[1]); err != nil {
return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1])
}
return val, nil
}
// ValidateLabel Validate that the given string is a valid label, and returns it
// Labels are in the form on key=value
func ValidateLabel(val string) (string, error) {
if strings.Count(val, "=") < 1 {
return "", fmt.Errorf("bad attribute format: %s", val)
}
return val, nil
}
// ValidateHost Validate that the given string is a valid host and returns it
func ValidateHost(val string) (string, error) {
host, err := parsers.ParseHost(DefaultHTTPHost, DefaultUnixSocket, val)
if err != nil {
return val, err
}
return host, nil
}
func doesEnvExist(name string) bool {
for _, entry := range os.Environ() {
parts := strings.SplitN(entry, "=", 2)
if parts[0] == name {
return true
}
}
return false
}
|
package opts
import (
"fmt"
"net"
"regexp"
"strings"
"github.com/docker/engine-api/types/filters"
)
var (
alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
)
// ListOpts holds a list of values and a validation function.
type ListOpts struct {
values *[]string
validator ValidatorFctType
}
// NewListOpts creates a new ListOpts with the specified validator.
func NewListOpts(validator ValidatorFctType) ListOpts {
var values []string
return *NewListOptsRef(&values, validator)
}
// NewListOptsRef creates a new ListOpts with the specified values and validator.
func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
return &ListOpts{
values: values,
validator: validator,
}
}
func (opts *ListOpts) String() string {
return fmt.Sprintf("%v", []string((*opts.values)))
}
// Set validates if needed the input value and adds it to the
// internal slice.
func (opts *ListOpts) Set(value string) error {
if opts.validator != nil {
v, err := opts.validator(value)
if err != nil {
return err
}
value = v
}
(*opts.values) = append((*opts.values), value)
return nil
}
// Delete removes the specified element from the slice.
func (opts *ListOpts) Delete(key string) {
for i, k := range *opts.values {
if k == key {
(*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)
return
}
}
}
// GetMap returns the content of values in a map in order to avoid
// duplicates.
func (opts *ListOpts) GetMap() map[string]struct{} {
ret := make(map[string]struct{})
for _, k := range *opts.values {
ret[k] = struct{}{}
}
return ret
}
// GetAll returns the values of slice.
func (opts *ListOpts) GetAll() []string {
return (*opts.values)
}
// GetAllOrEmpty returns the values of the slice
// or an empty slice when there are no values.
func (opts *ListOpts) GetAllOrEmpty() []string {
v := *opts.values
if v == nil {
return make([]string, 0)
}
return v
}
// Get checks the existence of the specified key.
func (opts *ListOpts) Get(key string) bool {
for _, k := range *opts.values {
if k == key {
return true
}
}
return false
}
// Len returns the amount of element in the slice.
func (opts *ListOpts) Len() int {
return len((*opts.values))
}
// Type returns a string name for this Option type
func (opts *ListOpts) Type() string {
return "list"
}
// NamedOption is an interface that list and map options
// with names implement.
type NamedOption interface {
Name() string
}
// NamedListOpts is a ListOpts with a configuration name.
// This struct is useful to keep reference to the assigned
// field name in the internal configuration struct.
type NamedListOpts struct {
name string
ListOpts
}
var _ NamedOption = &NamedListOpts{}
// NewNamedListOptsRef creates a reference to a new NamedListOpts struct.
func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts {
return &NamedListOpts{
name: name,
ListOpts: *NewListOptsRef(values, validator),
}
}
// Name returns the name of the NamedListOpts in the configuration.
func (o *NamedListOpts) Name() string {
return o.name
}
// MapOpts holds a map of values and a validation function.
type MapOpts struct {
values map[string]string
validator ValidatorFctType
}
// Set validates if needed the input value and add it to the
// internal map, by splitting on '='.
func (opts *MapOpts) Set(value string) error {
if opts.validator != nil {
v, err := opts.validator(value)
if err != nil {
return err
}
value = v
}
vals := strings.SplitN(value, "=", 2)
if len(vals) == 1 {
(opts.values)[vals[0]] = ""
} else {
(opts.values)[vals[0]] = vals[1]
}
return nil
}
// GetAll returns the values of MapOpts as a map.
func (opts *MapOpts) GetAll() map[string]string {
return opts.values
}
func (opts *MapOpts) String() string {
return fmt.Sprintf("%v", map[string]string((opts.values)))
}
// Type returns a string name for this Option type
func (opts *MapOpts) Type() string {
return "map"
}
// NewMapOpts creates a new MapOpts with the specified map of values and a validator.
func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
if values == nil {
values = make(map[string]string)
}
return &MapOpts{
values: values,
validator: validator,
}
}
// NamedMapOpts is a MapOpts struct with a configuration name.
// This struct is useful to keep reference to the assigned
// field name in the internal configuration struct.
type NamedMapOpts struct {
name string
MapOpts
}
var _ NamedOption = &NamedMapOpts{}
// NewNamedMapOpts creates a reference to a new NamedMapOpts struct.
func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts {
return &NamedMapOpts{
name: name,
MapOpts: *NewMapOpts(values, validator),
}
}
// Name returns the name of the NamedMapOpts in the configuration.
func (o *NamedMapOpts) Name() string {
return o.name
}
// ValidatorFctType defines a validator function that returns a validated string and/or an error.
type ValidatorFctType func(val string) (string, error)
// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
type ValidatorFctListType func(val string) ([]string, error)
// ValidateIPAddress validates an Ip address.
func ValidateIPAddress(val string) (string, error) {
var ip = net.ParseIP(strings.TrimSpace(val))
if ip != nil {
return ip.String(), nil
}
return "", fmt.Errorf("%s is not an ip address", val)
}
// ValidateDNSSearch validates domain for resolvconf search configuration.
// A zero length domain is represented by a dot (.).
func ValidateDNSSearch(val string) (string, error) {
if val = strings.Trim(val, " "); val == "." {
return val, nil
}
return validateDomain(val)
}
func validateDomain(val string) (string, error) {
if alphaRegexp.FindString(val) == "" {
return "", fmt.Errorf("%s is not a valid domain", val)
}
ns := domainRegexp.FindSubmatch([]byte(val))
if len(ns) > 0 && len(ns[1]) < 255 {
return string(ns[1]), nil
}
return "", fmt.Errorf("%s is not a valid domain", val)
}
// ValidateLabel validates that the specified string is a valid label, and returns it.
// Labels are in the form on key=value.
func ValidateLabel(val string) (string, error) {
if strings.Count(val, "=") < 1 {
return "", fmt.Errorf("bad attribute format: %s", val)
}
return val, nil
}
// ValidateSysctl validates a sysctl and returns it.
func ValidateSysctl(val string) (string, error) {
validSysctlMap := map[string]bool{
"kernel.msgmax": true,
"kernel.msgmnb": true,
"kernel.msgmni": true,
"kernel.sem": true,
"kernel.shmall": true,
"kernel.shmmax": true,
"kernel.shmmni": true,
"kernel.shm_rmid_forced": true,
}
validSysctlPrefixes := []string{
"net.",
"fs.mqueue.",
}
arr := strings.Split(val, "=")
if len(arr) < 2 {
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
}
if validSysctlMap[arr[0]] {
return val, nil
}
for _, vp := range validSysctlPrefixes {
if strings.HasPrefix(arr[0], vp) {
return val, nil
}
}
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
}
// FilterOpt is a flag type for validating filters
type FilterOpt struct {
filter filters.Args
}
// NewFilterOpt returns a new FilterOpt
func NewFilterOpt() FilterOpt {
return FilterOpt{filter: filters.NewArgs()}
}
func (o *FilterOpt) String() string {
repr, err := filters.ToParam(o.filter)
if err != nil {
return "invalid filters"
}
return repr
}
// Set sets the value of the opt by parsing the command line value
func (o *FilterOpt) Set(value string) error {
var err error
o.filter, err = filters.ParseFlag(value, o.filter)
return err
}
// Type returns the option type
func (o *FilterOpt) Type() string {
return "filter"
}
// Value returns the value of this option
func (o *FilterOpt) Value() filters.Args {
return o.filter
}
Add engine-api types to docker
This moves the types for the `engine-api` repo to the existing types
package.
Signed-off-by: Michael Crosby <951f4c7b5d47e32d0112c78d14a98eaa23e6b5ef@gmail.com>
package opts
import (
"fmt"
"net"
"regexp"
"strings"
"github.com/docker/docker/api/types/filters"
)
var (
alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
)
// ListOpts holds a list of values and a validation function.
type ListOpts struct {
values *[]string
validator ValidatorFctType
}
// NewListOpts creates a new ListOpts with the specified validator.
func NewListOpts(validator ValidatorFctType) ListOpts {
var values []string
return *NewListOptsRef(&values, validator)
}
// NewListOptsRef creates a new ListOpts with the specified values and validator.
func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
return &ListOpts{
values: values,
validator: validator,
}
}
func (opts *ListOpts) String() string {
return fmt.Sprintf("%v", []string((*opts.values)))
}
// Set validates if needed the input value and adds it to the
// internal slice.
func (opts *ListOpts) Set(value string) error {
if opts.validator != nil {
v, err := opts.validator(value)
if err != nil {
return err
}
value = v
}
(*opts.values) = append((*opts.values), value)
return nil
}
// Delete removes the specified element from the slice.
func (opts *ListOpts) Delete(key string) {
for i, k := range *opts.values {
if k == key {
(*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)
return
}
}
}
// GetMap returns the content of values in a map in order to avoid
// duplicates.
func (opts *ListOpts) GetMap() map[string]struct{} {
ret := make(map[string]struct{})
for _, k := range *opts.values {
ret[k] = struct{}{}
}
return ret
}
// GetAll returns the values of slice.
func (opts *ListOpts) GetAll() []string {
return (*opts.values)
}
// GetAllOrEmpty returns the values of the slice
// or an empty slice when there are no values.
func (opts *ListOpts) GetAllOrEmpty() []string {
v := *opts.values
if v == nil {
return make([]string, 0)
}
return v
}
// Get checks the existence of the specified key.
func (opts *ListOpts) Get(key string) bool {
for _, k := range *opts.values {
if k == key {
return true
}
}
return false
}
// Len returns the amount of element in the slice.
func (opts *ListOpts) Len() int {
return len((*opts.values))
}
// Type returns a string name for this Option type
func (opts *ListOpts) Type() string {
return "list"
}
// NamedOption is an interface that list and map options
// with names implement.
type NamedOption interface {
Name() string
}
// NamedListOpts is a ListOpts with a configuration name.
// This struct is useful to keep reference to the assigned
// field name in the internal configuration struct.
type NamedListOpts struct {
name string
ListOpts
}
var _ NamedOption = &NamedListOpts{}
// NewNamedListOptsRef creates a reference to a new NamedListOpts struct.
func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts {
return &NamedListOpts{
name: name,
ListOpts: *NewListOptsRef(values, validator),
}
}
// Name returns the name of the NamedListOpts in the configuration.
func (o *NamedListOpts) Name() string {
return o.name
}
// MapOpts holds a map of values and a validation function.
type MapOpts struct {
values map[string]string
validator ValidatorFctType
}
// Set validates if needed the input value and add it to the
// internal map, by splitting on '='.
func (opts *MapOpts) Set(value string) error {
if opts.validator != nil {
v, err := opts.validator(value)
if err != nil {
return err
}
value = v
}
vals := strings.SplitN(value, "=", 2)
if len(vals) == 1 {
(opts.values)[vals[0]] = ""
} else {
(opts.values)[vals[0]] = vals[1]
}
return nil
}
// GetAll returns the values of MapOpts as a map.
func (opts *MapOpts) GetAll() map[string]string {
return opts.values
}
func (opts *MapOpts) String() string {
return fmt.Sprintf("%v", map[string]string((opts.values)))
}
// Type returns a string name for this Option type
func (opts *MapOpts) Type() string {
return "map"
}
// NewMapOpts creates a new MapOpts with the specified map of values and a validator.
func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
if values == nil {
values = make(map[string]string)
}
return &MapOpts{
values: values,
validator: validator,
}
}
// NamedMapOpts is a MapOpts struct with a configuration name.
// This struct is useful to keep reference to the assigned
// field name in the internal configuration struct.
type NamedMapOpts struct {
name string
MapOpts
}
var _ NamedOption = &NamedMapOpts{}
// NewNamedMapOpts creates a reference to a new NamedMapOpts struct.
func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts {
return &NamedMapOpts{
name: name,
MapOpts: *NewMapOpts(values, validator),
}
}
// Name returns the name of the NamedMapOpts in the configuration.
func (o *NamedMapOpts) Name() string {
return o.name
}
// ValidatorFctType defines a validator function that returns a validated string and/or an error.
type ValidatorFctType func(val string) (string, error)
// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
type ValidatorFctListType func(val string) ([]string, error)
// ValidateIPAddress validates an Ip address.
func ValidateIPAddress(val string) (string, error) {
var ip = net.ParseIP(strings.TrimSpace(val))
if ip != nil {
return ip.String(), nil
}
return "", fmt.Errorf("%s is not an ip address", val)
}
// ValidateDNSSearch validates domain for resolvconf search configuration.
// A zero length domain is represented by a dot (.).
func ValidateDNSSearch(val string) (string, error) {
if val = strings.Trim(val, " "); val == "." {
return val, nil
}
return validateDomain(val)
}
func validateDomain(val string) (string, error) {
if alphaRegexp.FindString(val) == "" {
return "", fmt.Errorf("%s is not a valid domain", val)
}
ns := domainRegexp.FindSubmatch([]byte(val))
if len(ns) > 0 && len(ns[1]) < 255 {
return string(ns[1]), nil
}
return "", fmt.Errorf("%s is not a valid domain", val)
}
// ValidateLabel validates that the specified string is a valid label, and returns it.
// Labels are in the form on key=value.
func ValidateLabel(val string) (string, error) {
if strings.Count(val, "=") < 1 {
return "", fmt.Errorf("bad attribute format: %s", val)
}
return val, nil
}
// ValidateSysctl validates a sysctl and returns it.
func ValidateSysctl(val string) (string, error) {
validSysctlMap := map[string]bool{
"kernel.msgmax": true,
"kernel.msgmnb": true,
"kernel.msgmni": true,
"kernel.sem": true,
"kernel.shmall": true,
"kernel.shmmax": true,
"kernel.shmmni": true,
"kernel.shm_rmid_forced": true,
}
validSysctlPrefixes := []string{
"net.",
"fs.mqueue.",
}
arr := strings.Split(val, "=")
if len(arr) < 2 {
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
}
if validSysctlMap[arr[0]] {
return val, nil
}
for _, vp := range validSysctlPrefixes {
if strings.HasPrefix(arr[0], vp) {
return val, nil
}
}
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
}
// FilterOpt is a flag type for validating filters
type FilterOpt struct {
filter filters.Args
}
// NewFilterOpt returns a new FilterOpt
func NewFilterOpt() FilterOpt {
return FilterOpt{filter: filters.NewArgs()}
}
func (o *FilterOpt) String() string {
repr, err := filters.ToParam(o.filter)
if err != nil {
return "invalid filters"
}
return repr
}
// Set sets the value of the opt by parsing the command line value
func (o *FilterOpt) Set(value string) error {
var err error
o.filter, err = filters.ParseFlag(value, o.filter)
return err
}
// Type returns the option type
func (o *FilterOpt) Type() string {
return "filter"
}
// Value returns the value of this option
func (o *FilterOpt) Value() filters.Args {
return o.filter
}
|
package brimstore
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"log"
"math"
"os"
"runtime"
"sort"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/gholt/brimutil"
"github.com/spaolacci/murmur3"
)
var ErrValueNotFound error = errors.New("value not found")
// ValuesStoreOpts allows configuration of the ValuesStore, although normally
// the defaults are best.
type ValuesStoreOpts struct {
Cores int
MaxValueSize int
MemTOCPageSize int
MemValuesPageSize int
ValuesLocMapPageSize int
ValuesFileSize int
ValuesFileReaders int
ChecksumInterval int
}
func NewValuesStoreOpts() *ValuesStoreOpts {
opts := &ValuesStoreOpts{}
if env := os.Getenv("BRIMSTORE_VALUESSTORE_CORES"); env != "" {
if val, err := strconv.Atoi(env); err == nil {
opts.Cores = val
}
}
if opts.Cores <= 0 {
opts.Cores = runtime.GOMAXPROCS(0)
}
if env := os.Getenv("BRIMSTORE_VALUESSTORE_MAX_VALUE_SIZE"); env != "" {
if val, err := strconv.Atoi(env); err == nil {
opts.MaxValueSize = val
}
}
if opts.MaxValueSize <= 0 {
opts.MaxValueSize = 4 * 1024 * 1024
}
if env := os.Getenv("BRIMSTORE_VALUESSTORE_MEMTOCPAGESIZE"); env != "" {
if val, err := strconv.Atoi(env); err == nil {
opts.MemTOCPageSize = val
}
}
if opts.MemTOCPageSize <= 0 {
opts.MemTOCPageSize = 1 << brimutil.PowerOfTwoNeeded(uint64(opts.MaxValueSize+4))
}
if env := os.Getenv("BRIMSTORE_VALUESSTORE_MEMVALUESPAGESIZE"); env != "" {
if val, err := strconv.Atoi(env); err == nil {
opts.MemValuesPageSize = val
}
}
if opts.MemValuesPageSize <= 0 {
opts.MemValuesPageSize = 1 << brimutil.PowerOfTwoNeeded(uint64(opts.MaxValueSize+4))
}
if env := os.Getenv("BRIMSTORE_VALUESSTORE_VALUESLOCMAP_PAGESIZE"); env != "" {
if val, err := strconv.Atoi(env); err == nil {
opts.ValuesLocMapPageSize = val
}
}
if opts.ValuesLocMapPageSize <= 0 {
opts.ValuesLocMapPageSize = 4 * 1024 * 1024
}
if env := os.Getenv("BRIMSTORE_VALUESSTORE_VALUESFILE_SIZE"); env != "" {
if val, err := strconv.Atoi(env); err == nil {
opts.ValuesFileSize = val
}
}
if opts.ValuesFileSize <= 0 {
opts.ValuesFileSize = math.MaxUint32
}
if env := os.Getenv("BRIMSTORE_VALUESSTORE_VALUESFILE_READERS"); env != "" {
if val, err := strconv.Atoi(env); err == nil {
opts.ValuesFileReaders = val
}
}
if opts.ValuesFileReaders <= 0 {
opts.ValuesFileReaders = opts.Cores
if opts.Cores > 8 {
opts.ValuesFileReaders = 8
}
}
if env := os.Getenv("BRIMSTORE_VALUESSTORE_CHECKSUMINTERVAL"); env != "" {
if val, err := strconv.Atoi(env); err == nil {
opts.ChecksumInterval = val
}
}
if opts.ChecksumInterval <= 0 {
opts.ChecksumInterval = 65532
}
return opts
}
// ValuesStore: See NewValuesStore.
type ValuesStore struct {
freeableVMChan chan *valuesMem
freeVMChan chan *valuesMem
freeVWRChans []chan *valueWriteReq
pendingVWRChans []chan *valueWriteReq
vfVMChan chan *valuesMem
freeTOCBlockChan chan []byte
pendingTOCBlockChan chan []byte
tocWriterDoneChan chan struct{}
valuesLocBlocks []valuesLocBlock
atValuesLocBlocksIDer int32
vlm *valuesLocMap
cores int
maxValueSize int
memTOCPageSize int
memValuesPageSize int
valuesFileSize int
valuesFileReaders int
checksumInterval uint32
}
// NewValuesStore creates a ValuesStore for use in storing []byte values
// referenced by 128 bit keys; opts may be nil to use the defaults.
//
// Note that a lot of buffering and multiple cores can be in use and Close
// should be called prior to the process exiting to ensure all processing is
// done and the buffers are flushed.
func NewValuesStore(opts *ValuesStoreOpts) *ValuesStore {
if opts == nil {
opts = NewValuesStoreOpts()
}
cores := opts.Cores
if cores < 1 {
cores = 1
}
maxValueSize := opts.MaxValueSize
if maxValueSize < 0 {
maxValueSize = 0
}
memTOCPageSize := opts.MemTOCPageSize
if memTOCPageSize < 4096 {
memTOCPageSize = 4096
}
memValuesPageSize := opts.MemValuesPageSize
if memValuesPageSize < 4096 {
memValuesPageSize = 4096
}
valuesFileSize := opts.ValuesFileSize
if valuesFileSize <= 0 || valuesFileSize > math.MaxUint32 {
valuesFileSize = math.MaxUint32
}
valuesFileReaders := opts.ValuesFileReaders
if valuesFileReaders < 1 {
valuesFileReaders = 1
}
checksumInterval := opts.ChecksumInterval
if checksumInterval < 1024 {
checksumInterval = 1024
} else if checksumInterval >= 4294967296 {
checksumInterval = 4294967295
}
if memTOCPageSize < checksumInterval/2+1 {
memTOCPageSize = checksumInterval/2 + 1
}
if memValuesPageSize < checksumInterval/2+1 {
memValuesPageSize = checksumInterval/2 + 1
}
vs := &ValuesStore{
valuesLocBlocks: make([]valuesLocBlock, 65536),
atValuesLocBlocksIDer: _VALUESBLOCK_IDOFFSET - 1,
vlm: newValuesLocMap(opts),
cores: cores,
maxValueSize: maxValueSize,
memTOCPageSize: memTOCPageSize,
memValuesPageSize: memValuesPageSize,
valuesFileSize: valuesFileSize,
checksumInterval: uint32(checksumInterval),
valuesFileReaders: valuesFileReaders,
}
vs.freeableVMChan = make(chan *valuesMem, vs.cores)
vs.freeVMChan = make(chan *valuesMem, vs.cores*2)
vs.freeVWRChans = make([]chan *valueWriteReq, vs.cores)
vs.pendingVWRChans = make([]chan *valueWriteReq, vs.cores)
vs.vfVMChan = make(chan *valuesMem, vs.cores)
vs.freeTOCBlockChan = make(chan []byte, vs.cores*2)
vs.pendingTOCBlockChan = make(chan []byte, vs.cores)
vs.tocWriterDoneChan = make(chan struct{}, 1)
for i := 0; i < cap(vs.freeVMChan); i++ {
vm := &valuesMem{
vs: vs,
toc: make([]byte, 0, vs.memTOCPageSize),
values: make([]byte, 0, vs.memValuesPageSize),
}
vm.id = vs.addValuesLocBock(vm)
vs.freeVMChan <- vm
}
for i := 0; i < len(vs.freeVWRChans); i++ {
vs.freeVWRChans[i] = make(chan *valueWriteReq, vs.cores*2)
for j := 0; j < vs.cores*2; j++ {
vs.freeVWRChans[i] <- &valueWriteReq{errChan: make(chan error, 1)}
}
}
for i := 0; i < len(vs.pendingVWRChans); i++ {
vs.pendingVWRChans[i] = make(chan *valueWriteReq)
}
for i := 0; i < cap(vs.freeTOCBlockChan); i++ {
vs.freeTOCBlockChan <- make([]byte, 0, vs.memTOCPageSize)
}
go vs.tocWriter()
go vs.vfWriter()
for i := 0; i < vs.cores; i++ {
go vs.memClearer()
}
for i := 0; i < len(vs.pendingVWRChans); i++ {
go vs.memWriter(vs.pendingVWRChans[i])
}
vs.recovery()
return vs
}
func (vs *ValuesStore) MaxValueSize() int {
return vs.maxValueSize
}
func (vs *ValuesStore) Close() {
for _, c := range vs.pendingVWRChans {
c <- nil
}
<-vs.tocWriterDoneChan
for vs.vlm.isResizing() {
time.Sleep(10 * time.Millisecond)
}
}
// ReadValue will return value, seq, err for keyA, keyB; if an incoming value
// is provided, the read value will be appended to it and the whole returned
// (useful to reuse an existing []byte).
func (vs *ValuesStore) ReadValue(keyA uint64, keyB uint64, value []byte) ([]byte, uint64, error) {
id, offset, seq := vs.vlm.get(keyA, keyB)
if id < _VALUESBLOCK_IDOFFSET {
return value, 0, ErrValueNotFound
}
return vs.valuesLocBlock(id).readValue(keyA, keyB, value, seq, offset)
}
// WriteValue stores value, seq for keyA, keyB or returns any error; a newer
// seq already in place is not reported as an error.
func (vs *ValuesStore) WriteValue(keyA uint64, keyB uint64, value []byte, seq uint64) (uint64, error) {
i := int(keyA>>1) % len(vs.freeVWRChans)
vwr := <-vs.freeVWRChans[i]
vwr.keyA = keyA
vwr.keyB = keyB
vwr.value = value
vwr.seq = seq
vs.pendingVWRChans[i] <- vwr
err := <-vwr.errChan
oldSeq := vwr.seq
vwr.value = nil
vs.freeVWRChans[i] <- vwr
return oldSeq, err
}
func (vs *ValuesStore) valuesLocBlock(valuesLocBlockID uint16) valuesLocBlock {
return vs.valuesLocBlocks[valuesLocBlockID]
}
func (vs *ValuesStore) addValuesLocBock(block valuesLocBlock) uint16 {
id := atomic.AddInt32(&vs.atValuesLocBlocksIDer, 1)
if id >= 65536 {
panic("too many valuesLocBlocks")
}
vs.valuesLocBlocks[id] = block
return uint16(id)
}
func (vs *ValuesStore) memClearer() {
var tb []byte
var tbTS int64
var tbOffset int
for {
vm := <-vs.freeableVMChan
if vm == nil {
if tb != nil {
vs.pendingTOCBlockChan <- tb
}
vs.pendingTOCBlockChan <- nil
break
}
vf := vs.valuesLocBlock(vm.vfID)
if tb != nil && tbTS != vf.timestamp() {
vs.pendingTOCBlockChan <- tb
tb = nil
}
for vmTOCOffset := 0; vmTOCOffset < len(vm.toc); vmTOCOffset += 28 {
vmMemOffset := binary.BigEndian.Uint32(vm.toc[vmTOCOffset:])
a := binary.BigEndian.Uint64(vm.toc[vmTOCOffset+4:])
b := binary.BigEndian.Uint64(vm.toc[vmTOCOffset+12:])
q := binary.BigEndian.Uint64(vm.toc[vmTOCOffset+20:])
nq := vs.vlm.set(vm.vfID, vm.vfOffset+vmMemOffset, a, b, q, true)
if nq != q {
continue
}
if tb != nil && tbOffset+28 > cap(tb) {
vs.pendingTOCBlockChan <- tb
tb = nil
}
if tb == nil {
tb = <-vs.freeTOCBlockChan
tbTS = vf.timestamp()
tb = tb[:8]
binary.BigEndian.PutUint64(tb, uint64(tbTS))
tbOffset = 8
}
tb = tb[:tbOffset+28]
binary.BigEndian.PutUint32(tb[tbOffset:], vm.vfOffset+uint32(vmMemOffset))
binary.BigEndian.PutUint64(tb[tbOffset+4:], a)
binary.BigEndian.PutUint64(tb[tbOffset+12:], b)
binary.BigEndian.PutUint64(tb[tbOffset+20:], q)
tbOffset += 28
}
vm.discardLock.Lock()
vm.vfID = 0
vm.vfOffset = 0
vm.toc = vm.toc[:0]
vm.values = vm.values[:0]
vm.discardLock.Unlock()
vs.freeVMChan <- vm
}
}
func (vs *ValuesStore) memWriter(VWRChan chan *valueWriteReq) {
var vm *valuesMem
var vmTOCOffset int
var vmMemOffset int
for {
vwr := <-VWRChan
if vwr == nil {
if vm != nil && len(vm.toc) > 0 {
vs.vfVMChan <- vm
}
vs.vfVMChan <- nil
break
}
vz := len(vwr.value)
if vz > vs.maxValueSize {
vwr.errChan <- fmt.Errorf("value length of %d > %d", vz, vs.maxValueSize)
continue
}
if vm != nil && (vmTOCOffset+28 > cap(vm.toc) || vmMemOffset+4+vz > cap(vm.values)) {
vs.vfVMChan <- vm
vm = nil
}
if vm == nil {
vm = <-vs.freeVMChan
vmTOCOffset = 0
vmMemOffset = 0
}
vm.discardLock.Lock()
vm.values = vm.values[:vmMemOffset+4+vz]
vm.discardLock.Unlock()
binary.BigEndian.PutUint32(vm.values[vmMemOffset:], uint32(vz))
copy(vm.values[vmMemOffset+4:], vwr.value)
oldSeq := vs.vlm.set(vm.id, uint32(vmMemOffset), vwr.keyA, vwr.keyB, vwr.seq, false)
if oldSeq < vwr.seq {
vm.toc = vm.toc[:vmTOCOffset+28]
binary.BigEndian.PutUint32(vm.toc[vmTOCOffset:], uint32(vmMemOffset))
binary.BigEndian.PutUint64(vm.toc[vmTOCOffset+4:], vwr.keyA)
binary.BigEndian.PutUint64(vm.toc[vmTOCOffset+12:], vwr.keyB)
binary.BigEndian.PutUint64(vm.toc[vmTOCOffset+20:], vwr.seq)
vmTOCOffset += 28
vmMemOffset += 4 + vz
} else {
vm.discardLock.Lock()
vm.values = vm.values[:vmMemOffset]
vm.discardLock.Unlock()
}
vwr.seq = oldSeq
vwr.errChan <- nil
}
}
func (vs *ValuesStore) vfWriter() {
var vf *valuesFile
memWritersLeft := vs.cores
for {
vm := <-vs.vfVMChan
if vm == nil {
memWritersLeft--
if memWritersLeft < 1 {
if vf != nil {
vf.close()
}
for i := 0; i <= vs.cores; i++ {
vs.freeableVMChan <- nil
}
break
}
continue
}
if vf != nil && int(atomic.LoadUint32(&vf.atOffset))+len(vm.values) > vs.valuesFileSize {
vf.close()
vf = nil
}
if vf == nil {
vf = createValuesFile(vs)
}
vf.write(vm)
}
}
func (vs *ValuesStore) tocWriter() {
var tsA uint64
var writerA io.WriteCloser
var offsetA uint64
var tsB uint64
var writerB io.WriteCloser
var offsetB uint64
head := []byte("BRIMSTORE VALUESTOC v0 ")
binary.BigEndian.PutUint32(head[28:], uint32(vs.checksumInterval))
term := make([]byte, 16)
copy(term[12:], "TERM")
memClearersLeft := vs.cores
for {
t := <-vs.pendingTOCBlockChan
if t == nil {
memClearersLeft--
if memClearersLeft < 1 {
if writerB != nil {
binary.BigEndian.PutUint64(term[4:], offsetB)
if _, err := writerB.Write(term); err != nil {
panic(err)
}
if err := writerB.Close(); err != nil {
panic(err)
}
}
if writerA != nil {
binary.BigEndian.PutUint64(term[4:], offsetA)
if _, err := writerA.Write(term); err != nil {
panic(err)
}
if err := writerA.Close(); err != nil {
panic(err)
}
}
break
}
continue
}
if len(t) > 8 {
ts := binary.BigEndian.Uint64(t)
switch ts {
case tsA:
if _, err := writerA.Write(t[8:]); err != nil {
panic(err)
}
offsetA += uint64(len(t) - 8)
case tsB:
if _, err := writerB.Write(t[8:]); err != nil {
panic(err)
}
offsetB += uint64(len(t) - 8)
default:
// An assumption is made here: If the timestamp for this toc
// block doesn't match the last two seen timestamps then we
// expect no more toc blocks for the oldest timestamp and can
// close that toc file.
if writerB != nil {
binary.BigEndian.PutUint64(term[4:], offsetB)
if _, err := writerB.Write(term); err != nil {
panic(err)
}
if err := writerB.Close(); err != nil {
panic(err)
}
}
tsB = tsA
writerB = writerA
offsetB = offsetA
tsA = ts
fp, err := os.Create(fmt.Sprintf("%d.valuestoc", ts))
if err != nil {
panic(err)
}
writerA = brimutil.NewMultiCoreChecksummedWriter(fp, int(vs.checksumInterval), murmur3.New32, vs.cores)
if _, err := writerA.Write(head); err != nil {
panic(err)
}
if _, err := writerA.Write(t[8:]); err != nil {
panic(err)
}
offsetA = 32 + uint64(len(t)-8)
}
}
vs.freeTOCBlockChan <- t[:0]
}
vs.tocWriterDoneChan <- struct{}{}
}
func (vs *ValuesStore) recovery() {
start := time.Now()
dfp, err := os.Open(".")
if err != nil {
panic(err)
}
names, err := dfp.Readdirnames(-1)
if err != nil {
panic(err)
}
sort.Strings(names)
count := 0
for i := len(names) - 1; i >= 0; i-- {
if !strings.HasSuffix(names[i], ".valuestoc") {
continue
}
ts := int64(0)
if ts, err = strconv.ParseInt(names[i][:len(names[i])-len(".valuestoc")], 10, 64); err != nil {
log.Printf("bad timestamp name: %#v\n", names[i])
continue
}
if ts == 0 {
log.Printf("bad timestamp name: %#v\n", names[i])
continue
}
vf := newValuesFile(vs, ts)
fp, err := os.Open(names[i])
if err != nil {
log.Printf("error opening %s: %s\n", names[i], err)
continue
}
buf := make([]byte, vs.checksumInterval+4)
checksumFailures := 0
overflow := make([]byte, 0, 28)
first := true
terminated := false
for {
n, err := io.ReadFull(fp, buf)
if n < 4 {
if err != io.EOF && err != io.ErrUnexpectedEOF {
log.Printf("error reading %s: %s\n", names[i], err)
}
break
}
n -= 4
if murmur3.Sum32(buf[:n]) != binary.BigEndian.Uint32(buf[n:]) {
checksumFailures++
} else {
i := 0
if first {
if !bytes.Equal(buf[:28], []byte("BRIMSTORE VALUESTOC v0 ")) {
log.Printf("bad header: %s\n", names[i])
break
}
if binary.BigEndian.Uint32(buf[28:]) != vs.checksumInterval {
log.Printf("bad header checksum interval: %s\n", names[i])
break
}
i += 32
first = false
}
if n < int(vs.checksumInterval) {
if binary.BigEndian.Uint32(buf[n-16:]) != 0 {
log.Printf("bad terminator size marker: %s\n", names[i])
break
}
if !bytes.Equal(buf[n-4:n], []byte("TERM")) {
log.Printf("bad terminator: %s\n", names[i])
break
}
n -= 16
terminated = true
}
if len(overflow) > 0 {
i += 28 - len(overflow)
overflow = append(overflow, buf[i-28+len(overflow):i]...)
offset := binary.BigEndian.Uint32(overflow)
a := binary.BigEndian.Uint64(overflow[4:])
b := binary.BigEndian.Uint64(overflow[12:])
q := binary.BigEndian.Uint64(overflow[20:])
vs.vlm.set(vf.id, offset, a, b, q, false)
count++
overflow = overflow[:0]
}
for ; i+28 <= n; i += 28 {
offset := binary.BigEndian.Uint32(buf[i:])
a := binary.BigEndian.Uint64(buf[i+4:])
b := binary.BigEndian.Uint64(buf[i+12:])
q := binary.BigEndian.Uint64(buf[i+20:])
vs.vlm.set(vf.id, offset, a, b, q, false)
count++
}
if i != n {
overflow = overflow[:n-i]
copy(overflow, buf[i:])
}
}
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
log.Printf("error reading %s: %s\n", names[i], err)
break
}
}
fp.Close()
if !terminated {
log.Printf("early end of file: %s\n", names[i])
}
if checksumFailures > 0 {
log.Printf("%d checksum failures for %s\n", checksumFailures, names[i])
}
}
if count > 0 {
dur := time.Now().Sub(start)
log.Printf("%d key locations recovered in %s, %.0f/s\n", count, dur, float64(count)/(float64(dur)/float64(time.Second)))
}
}
type valueWriteReq struct {
keyA uint64
keyB uint64
value []byte
seq uint64
errChan chan error
}
type valuesLocBlock interface {
timestamp() int64
readValue(keyA uint64, keyB uint64, value []byte, seq uint64, offset uint32) ([]byte, uint64, error)
}
Cap toc size (loosely)
package brimstore
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"log"
"math"
"os"
"runtime"
"sort"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/gholt/brimutil"
"github.com/spaolacci/murmur3"
)
var ErrValueNotFound error = errors.New("value not found")
// ValuesStoreOpts allows configuration of the ValuesStore, although normally
// the defaults are best.
type ValuesStoreOpts struct {
Cores int
MaxValueSize int
MemTOCPageSize int
MemValuesPageSize int
ValuesLocMapPageSize int
ValuesFileSize int
ValuesFileReaders int
ChecksumInterval int
}
func NewValuesStoreOpts() *ValuesStoreOpts {
opts := &ValuesStoreOpts{}
if env := os.Getenv("BRIMSTORE_VALUESSTORE_CORES"); env != "" {
if val, err := strconv.Atoi(env); err == nil {
opts.Cores = val
}
}
if opts.Cores <= 0 {
opts.Cores = runtime.GOMAXPROCS(0)
}
if env := os.Getenv("BRIMSTORE_VALUESSTORE_MAX_VALUE_SIZE"); env != "" {
if val, err := strconv.Atoi(env); err == nil {
opts.MaxValueSize = val
}
}
if opts.MaxValueSize <= 0 {
opts.MaxValueSize = 4 * 1024 * 1024
}
if env := os.Getenv("BRIMSTORE_VALUESSTORE_MEMTOCPAGESIZE"); env != "" {
if val, err := strconv.Atoi(env); err == nil {
opts.MemTOCPageSize = val
}
}
if opts.MemTOCPageSize <= 0 {
opts.MemTOCPageSize = 1 << brimutil.PowerOfTwoNeeded(uint64(opts.MaxValueSize+4))
}
if env := os.Getenv("BRIMSTORE_VALUESSTORE_MEMVALUESPAGESIZE"); env != "" {
if val, err := strconv.Atoi(env); err == nil {
opts.MemValuesPageSize = val
}
}
if opts.MemValuesPageSize <= 0 {
opts.MemValuesPageSize = 1 << brimutil.PowerOfTwoNeeded(uint64(opts.MaxValueSize+4))
}
if env := os.Getenv("BRIMSTORE_VALUESSTORE_VALUESLOCMAP_PAGESIZE"); env != "" {
if val, err := strconv.Atoi(env); err == nil {
opts.ValuesLocMapPageSize = val
}
}
if opts.ValuesLocMapPageSize <= 0 {
opts.ValuesLocMapPageSize = 4 * 1024 * 1024
}
if env := os.Getenv("BRIMSTORE_VALUESSTORE_VALUESFILE_SIZE"); env != "" {
if val, err := strconv.Atoi(env); err == nil {
opts.ValuesFileSize = val
}
}
if opts.ValuesFileSize <= 0 {
opts.ValuesFileSize = math.MaxUint32
}
if env := os.Getenv("BRIMSTORE_VALUESSTORE_VALUESFILE_READERS"); env != "" {
if val, err := strconv.Atoi(env); err == nil {
opts.ValuesFileReaders = val
}
}
if opts.ValuesFileReaders <= 0 {
opts.ValuesFileReaders = opts.Cores
if opts.Cores > 8 {
opts.ValuesFileReaders = 8
}
}
if env := os.Getenv("BRIMSTORE_VALUESSTORE_CHECKSUMINTERVAL"); env != "" {
if val, err := strconv.Atoi(env); err == nil {
opts.ChecksumInterval = val
}
}
if opts.ChecksumInterval <= 0 {
opts.ChecksumInterval = 65532
}
return opts
}
// ValuesStore: See NewValuesStore.
type ValuesStore struct {
freeableVMChan chan *valuesMem
freeVMChan chan *valuesMem
freeVWRChans []chan *valueWriteReq
pendingVWRChans []chan *valueWriteReq
vfVMChan chan *valuesMem
freeTOCBlockChan chan []byte
pendingTOCBlockChan chan []byte
tocWriterDoneChan chan struct{}
valuesLocBlocks []valuesLocBlock
atValuesLocBlocksIDer int32
vlm *valuesLocMap
cores int
maxValueSize int
memTOCPageSize int
memValuesPageSize int
valuesFileSize int
valuesFileReaders int
checksumInterval uint32
}
// NewValuesStore creates a ValuesStore for use in storing []byte values
// referenced by 128 bit keys; opts may be nil to use the defaults.
//
// Note that a lot of buffering and multiple cores can be in use and Close
// should be called prior to the process exiting to ensure all processing is
// done and the buffers are flushed.
func NewValuesStore(opts *ValuesStoreOpts) *ValuesStore {
if opts == nil {
opts = NewValuesStoreOpts()
}
cores := opts.Cores
if cores < 1 {
cores = 1
}
maxValueSize := opts.MaxValueSize
if maxValueSize < 0 {
maxValueSize = 0
}
memTOCPageSize := opts.MemTOCPageSize
if memTOCPageSize < 4096 {
memTOCPageSize = 4096
}
memValuesPageSize := opts.MemValuesPageSize
if memValuesPageSize < 4096 {
memValuesPageSize = 4096
}
valuesFileSize := opts.ValuesFileSize
if valuesFileSize <= 0 || valuesFileSize > math.MaxUint32 {
valuesFileSize = math.MaxUint32
}
valuesFileReaders := opts.ValuesFileReaders
if valuesFileReaders < 1 {
valuesFileReaders = 1
}
checksumInterval := opts.ChecksumInterval
if checksumInterval < 1024 {
checksumInterval = 1024
} else if checksumInterval >= 4294967296 {
checksumInterval = 4294967295
}
if memTOCPageSize < checksumInterval/2+1 {
memTOCPageSize = checksumInterval/2 + 1
}
if memValuesPageSize < checksumInterval/2+1 {
memValuesPageSize = checksumInterval/2 + 1
}
vs := &ValuesStore{
valuesLocBlocks: make([]valuesLocBlock, 65536),
atValuesLocBlocksIDer: _VALUESBLOCK_IDOFFSET - 1,
vlm: newValuesLocMap(opts),
cores: cores,
maxValueSize: maxValueSize,
memTOCPageSize: memTOCPageSize,
memValuesPageSize: memValuesPageSize,
valuesFileSize: valuesFileSize,
checksumInterval: uint32(checksumInterval),
valuesFileReaders: valuesFileReaders,
}
vs.freeableVMChan = make(chan *valuesMem, vs.cores)
vs.freeVMChan = make(chan *valuesMem, vs.cores*2)
vs.freeVWRChans = make([]chan *valueWriteReq, vs.cores)
vs.pendingVWRChans = make([]chan *valueWriteReq, vs.cores)
vs.vfVMChan = make(chan *valuesMem, vs.cores)
vs.freeTOCBlockChan = make(chan []byte, vs.cores*2)
vs.pendingTOCBlockChan = make(chan []byte, vs.cores)
vs.tocWriterDoneChan = make(chan struct{}, 1)
for i := 0; i < cap(vs.freeVMChan); i++ {
vm := &valuesMem{
vs: vs,
toc: make([]byte, 0, vs.memTOCPageSize),
values: make([]byte, 0, vs.memValuesPageSize),
}
vm.id = vs.addValuesLocBock(vm)
vs.freeVMChan <- vm
}
for i := 0; i < len(vs.freeVWRChans); i++ {
vs.freeVWRChans[i] = make(chan *valueWriteReq, vs.cores*2)
for j := 0; j < vs.cores*2; j++ {
vs.freeVWRChans[i] <- &valueWriteReq{errChan: make(chan error, 1)}
}
}
for i := 0; i < len(vs.pendingVWRChans); i++ {
vs.pendingVWRChans[i] = make(chan *valueWriteReq)
}
for i := 0; i < cap(vs.freeTOCBlockChan); i++ {
vs.freeTOCBlockChan <- make([]byte, 0, vs.memTOCPageSize)
}
go vs.tocWriter()
go vs.vfWriter()
for i := 0; i < vs.cores; i++ {
go vs.memClearer()
}
for i := 0; i < len(vs.pendingVWRChans); i++ {
go vs.memWriter(vs.pendingVWRChans[i])
}
vs.recovery()
return vs
}
func (vs *ValuesStore) MaxValueSize() int {
return vs.maxValueSize
}
func (vs *ValuesStore) Close() {
for _, c := range vs.pendingVWRChans {
c <- nil
}
<-vs.tocWriterDoneChan
for vs.vlm.isResizing() {
time.Sleep(10 * time.Millisecond)
}
}
// ReadValue will return value, seq, err for keyA, keyB; if an incoming value
// is provided, the read value will be appended to it and the whole returned
// (useful to reuse an existing []byte).
func (vs *ValuesStore) ReadValue(keyA uint64, keyB uint64, value []byte) ([]byte, uint64, error) {
id, offset, seq := vs.vlm.get(keyA, keyB)
if id < _VALUESBLOCK_IDOFFSET {
return value, 0, ErrValueNotFound
}
return vs.valuesLocBlock(id).readValue(keyA, keyB, value, seq, offset)
}
// WriteValue stores value, seq for keyA, keyB or returns any error; a newer
// seq already in place is not reported as an error.
func (vs *ValuesStore) WriteValue(keyA uint64, keyB uint64, value []byte, seq uint64) (uint64, error) {
i := int(keyA>>1) % len(vs.freeVWRChans)
vwr := <-vs.freeVWRChans[i]
vwr.keyA = keyA
vwr.keyB = keyB
vwr.value = value
vwr.seq = seq
vs.pendingVWRChans[i] <- vwr
err := <-vwr.errChan
oldSeq := vwr.seq
vwr.value = nil
vs.freeVWRChans[i] <- vwr
return oldSeq, err
}
func (vs *ValuesStore) valuesLocBlock(valuesLocBlockID uint16) valuesLocBlock {
return vs.valuesLocBlocks[valuesLocBlockID]
}
func (vs *ValuesStore) addValuesLocBock(block valuesLocBlock) uint16 {
id := atomic.AddInt32(&vs.atValuesLocBlocksIDer, 1)
if id >= 65536 {
panic("too many valuesLocBlocks")
}
vs.valuesLocBlocks[id] = block
return uint16(id)
}
func (vs *ValuesStore) memClearer() {
var tb []byte
var tbTS int64
var tbOffset int
for {
vm := <-vs.freeableVMChan
if vm == nil {
if tb != nil {
vs.pendingTOCBlockChan <- tb
}
vs.pendingTOCBlockChan <- nil
break
}
vf := vs.valuesLocBlock(vm.vfID)
if tb != nil && tbTS != vf.timestamp() {
vs.pendingTOCBlockChan <- tb
tb = nil
}
for vmTOCOffset := 0; vmTOCOffset < len(vm.toc); vmTOCOffset += 28 {
vmMemOffset := binary.BigEndian.Uint32(vm.toc[vmTOCOffset:])
a := binary.BigEndian.Uint64(vm.toc[vmTOCOffset+4:])
b := binary.BigEndian.Uint64(vm.toc[vmTOCOffset+12:])
q := binary.BigEndian.Uint64(vm.toc[vmTOCOffset+20:])
nq := vs.vlm.set(vm.vfID, vm.vfOffset+vmMemOffset, a, b, q, true)
if nq != q {
continue
}
if tb != nil && tbOffset+28 > cap(tb) {
vs.pendingTOCBlockChan <- tb
tb = nil
}
if tb == nil {
tb = <-vs.freeTOCBlockChan
tbTS = vf.timestamp()
tb = tb[:8]
binary.BigEndian.PutUint64(tb, uint64(tbTS))
tbOffset = 8
}
tb = tb[:tbOffset+28]
binary.BigEndian.PutUint32(tb[tbOffset:], vm.vfOffset+uint32(vmMemOffset))
binary.BigEndian.PutUint64(tb[tbOffset+4:], a)
binary.BigEndian.PutUint64(tb[tbOffset+12:], b)
binary.BigEndian.PutUint64(tb[tbOffset+20:], q)
tbOffset += 28
}
vm.discardLock.Lock()
vm.vfID = 0
vm.vfOffset = 0
vm.toc = vm.toc[:0]
vm.values = vm.values[:0]
vm.discardLock.Unlock()
vs.freeVMChan <- vm
}
}
func (vs *ValuesStore) memWriter(VWRChan chan *valueWriteReq) {
var vm *valuesMem
var vmTOCOffset int
var vmMemOffset int
for {
vwr := <-VWRChan
if vwr == nil {
if vm != nil && len(vm.toc) > 0 {
vs.vfVMChan <- vm
}
vs.vfVMChan <- nil
break
}
vz := len(vwr.value)
if vz > vs.maxValueSize {
vwr.errChan <- fmt.Errorf("value length of %d > %d", vz, vs.maxValueSize)
continue
}
if vm != nil && (vmTOCOffset+28 > cap(vm.toc) || vmMemOffset+4+vz > cap(vm.values)) {
vs.vfVMChan <- vm
vm = nil
}
if vm == nil {
vm = <-vs.freeVMChan
vmTOCOffset = 0
vmMemOffset = 0
}
vm.discardLock.Lock()
vm.values = vm.values[:vmMemOffset+4+vz]
vm.discardLock.Unlock()
binary.BigEndian.PutUint32(vm.values[vmMemOffset:], uint32(vz))
copy(vm.values[vmMemOffset+4:], vwr.value)
oldSeq := vs.vlm.set(vm.id, uint32(vmMemOffset), vwr.keyA, vwr.keyB, vwr.seq, false)
if oldSeq < vwr.seq {
vm.toc = vm.toc[:vmTOCOffset+28]
binary.BigEndian.PutUint32(vm.toc[vmTOCOffset:], uint32(vmMemOffset))
binary.BigEndian.PutUint64(vm.toc[vmTOCOffset+4:], vwr.keyA)
binary.BigEndian.PutUint64(vm.toc[vmTOCOffset+12:], vwr.keyB)
binary.BigEndian.PutUint64(vm.toc[vmTOCOffset+20:], vwr.seq)
vmTOCOffset += 28
vmMemOffset += 4 + vz
} else {
vm.discardLock.Lock()
vm.values = vm.values[:vmMemOffset]
vm.discardLock.Unlock()
}
vwr.seq = oldSeq
vwr.errChan <- nil
}
}
func (vs *ValuesStore) vfWriter() {
var vf *valuesFile
memWritersLeft := vs.cores
// Just loosely tracks toc size to switch vfs if the toc reaches about
// vs.valuesFileSize, in case a lot of tiny values (<32) are in use.
tocLen := 0
for {
vm := <-vs.vfVMChan
if vm == nil {
memWritersLeft--
if memWritersLeft < 1 {
if vf != nil {
vf.close()
}
for i := 0; i <= vs.cores; i++ {
vs.freeableVMChan <- nil
}
break
}
continue
}
if vf != nil && int(atomic.LoadUint32(&vf.atOffset))+len(vm.values) > vs.valuesFileSize || tocLen >= vs.valuesFileSize {
vf.close()
vf = nil
}
if vf == nil {
vf = createValuesFile(vs)
}
vf.write(vm)
tocLen += len(vm.toc)
}
}
func (vs *ValuesStore) tocWriter() {
var tsA uint64
var writerA io.WriteCloser
var offsetA uint64
var tsB uint64
var writerB io.WriteCloser
var offsetB uint64
head := []byte("BRIMSTORE VALUESTOC v0 ")
binary.BigEndian.PutUint32(head[28:], uint32(vs.checksumInterval))
term := make([]byte, 16)
copy(term[12:], "TERM")
memClearersLeft := vs.cores
for {
t := <-vs.pendingTOCBlockChan
if t == nil {
memClearersLeft--
if memClearersLeft < 1 {
if writerB != nil {
binary.BigEndian.PutUint64(term[4:], offsetB)
if _, err := writerB.Write(term); err != nil {
panic(err)
}
if err := writerB.Close(); err != nil {
panic(err)
}
}
if writerA != nil {
binary.BigEndian.PutUint64(term[4:], offsetA)
if _, err := writerA.Write(term); err != nil {
panic(err)
}
if err := writerA.Close(); err != nil {
panic(err)
}
}
break
}
continue
}
if len(t) > 8 {
ts := binary.BigEndian.Uint64(t)
switch ts {
case tsA:
if _, err := writerA.Write(t[8:]); err != nil {
panic(err)
}
offsetA += uint64(len(t) - 8)
case tsB:
if _, err := writerB.Write(t[8:]); err != nil {
panic(err)
}
offsetB += uint64(len(t) - 8)
default:
// An assumption is made here: If the timestamp for this toc
// block doesn't match the last two seen timestamps then we
// expect no more toc blocks for the oldest timestamp and can
// close that toc file.
if writerB != nil {
binary.BigEndian.PutUint64(term[4:], offsetB)
if _, err := writerB.Write(term); err != nil {
panic(err)
}
if err := writerB.Close(); err != nil {
panic(err)
}
}
tsB = tsA
writerB = writerA
offsetB = offsetA
tsA = ts
fp, err := os.Create(fmt.Sprintf("%d.valuestoc", ts))
if err != nil {
panic(err)
}
writerA = brimutil.NewMultiCoreChecksummedWriter(fp, int(vs.checksumInterval), murmur3.New32, vs.cores)
if _, err := writerA.Write(head); err != nil {
panic(err)
}
if _, err := writerA.Write(t[8:]); err != nil {
panic(err)
}
offsetA = 32 + uint64(len(t)-8)
}
}
vs.freeTOCBlockChan <- t[:0]
}
vs.tocWriterDoneChan <- struct{}{}
}
func (vs *ValuesStore) recovery() {
start := time.Now()
dfp, err := os.Open(".")
if err != nil {
panic(err)
}
names, err := dfp.Readdirnames(-1)
if err != nil {
panic(err)
}
sort.Strings(names)
count := 0
for i := len(names) - 1; i >= 0; i-- {
if !strings.HasSuffix(names[i], ".valuestoc") {
continue
}
ts := int64(0)
if ts, err = strconv.ParseInt(names[i][:len(names[i])-len(".valuestoc")], 10, 64); err != nil {
log.Printf("bad timestamp name: %#v\n", names[i])
continue
}
if ts == 0 {
log.Printf("bad timestamp name: %#v\n", names[i])
continue
}
vf := newValuesFile(vs, ts)
fp, err := os.Open(names[i])
if err != nil {
log.Printf("error opening %s: %s\n", names[i], err)
continue
}
buf := make([]byte, vs.checksumInterval+4)
checksumFailures := 0
overflow := make([]byte, 0, 28)
first := true
terminated := false
for {
n, err := io.ReadFull(fp, buf)
if n < 4 {
if err != io.EOF && err != io.ErrUnexpectedEOF {
log.Printf("error reading %s: %s\n", names[i], err)
}
break
}
n -= 4
if murmur3.Sum32(buf[:n]) != binary.BigEndian.Uint32(buf[n:]) {
checksumFailures++
} else {
i := 0
if first {
if !bytes.Equal(buf[:28], []byte("BRIMSTORE VALUESTOC v0 ")) {
log.Printf("bad header: %s\n", names[i])
break
}
if binary.BigEndian.Uint32(buf[28:]) != vs.checksumInterval {
log.Printf("bad header checksum interval: %s\n", names[i])
break
}
i += 32
first = false
}
if n < int(vs.checksumInterval) {
if binary.BigEndian.Uint32(buf[n-16:]) != 0 {
log.Printf("bad terminator size marker: %s\n", names[i])
break
}
if !bytes.Equal(buf[n-4:n], []byte("TERM")) {
log.Printf("bad terminator: %s\n", names[i])
break
}
n -= 16
terminated = true
}
if len(overflow) > 0 {
i += 28 - len(overflow)
overflow = append(overflow, buf[i-28+len(overflow):i]...)
offset := binary.BigEndian.Uint32(overflow)
a := binary.BigEndian.Uint64(overflow[4:])
b := binary.BigEndian.Uint64(overflow[12:])
q := binary.BigEndian.Uint64(overflow[20:])
vs.vlm.set(vf.id, offset, a, b, q, false)
count++
overflow = overflow[:0]
}
for ; i+28 <= n; i += 28 {
offset := binary.BigEndian.Uint32(buf[i:])
a := binary.BigEndian.Uint64(buf[i+4:])
b := binary.BigEndian.Uint64(buf[i+12:])
q := binary.BigEndian.Uint64(buf[i+20:])
vs.vlm.set(vf.id, offset, a, b, q, false)
count++
}
if i != n {
overflow = overflow[:n-i]
copy(overflow, buf[i:])
}
}
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
log.Printf("error reading %s: %s\n", names[i], err)
break
}
}
fp.Close()
if !terminated {
log.Printf("early end of file: %s\n", names[i])
}
if checksumFailures > 0 {
log.Printf("%d checksum failures for %s\n", checksumFailures, names[i])
}
}
if count > 0 {
dur := time.Now().Sub(start)
log.Printf("%d key locations recovered in %s, %.0f/s\n", count, dur, float64(count)/(float64(dur)/float64(time.Second)))
}
}
type valueWriteReq struct {
keyA uint64
keyB uint64
value []byte
seq uint64
errChan chan error
}
type valuesLocBlock interface {
timestamp() int64
readValue(keyA uint64, keyB uint64, value []byte, seq uint64, offset uint32) ([]byte, uint64, error)
}
|
package gonx
import (
"io/ioutil"
"strings"
)
type NXFile struct {
FileName string
Raw []byte
Header Header
}
func New(fileName string) (NX *NXFile) {
buffer, err := ioutil.ReadFile(fileName)
pError(err)
NX = new(NXFile)
NX.FileName = fileName
NX.Raw = buffer
NX.Header = NX.ParseHeader()
return
}
func (NX *NXFile) Root() (node *Node) {
node = new(Node)
node.NXFile = NX
node.ParseNode(0)
return
}
func (NX *NXFile) Resolve(path string, separator string) *Node {
if separator == "" {
separator = "/"
}
if path == separator {
return NX.Root()
}
nodes := strings.Split(path, separator)
cursor := NX.Root()
for i := 0; i < len(nodes); i++ {
if cursor == nil {
return nil
}
cursor = cursor.Child(nodes[i])
}
return cursor
}
Reverted change from Mmap to ioutil (too slow) - commit:4dd6a76daa79e3fc2d4acde30beaf336e5e0e08f
package gonx
import (
"os"
"strings"
"github.com/edsrzf/mmap-go"
)
type NXFile struct {
FileName string
Raw mmap.MMap
Header Header
}
func New(fileName string) (NX *NXFile) {
file, err := os.Open(fileName)
pError(err)
buffer, err := mmap.Map(file, mmap.RDONLY, 0)
pError(err)
NX = new(NXFile)
NX.FileName = fileName
NX.Raw = buffer
NX.Header = NX.ParseHeader()
return
}
func (NX *NXFile) Root() (node *Node) {
node = new(Node)
node.NXFile = NX
node.ParseNode(0)
return
}
func (NX *NXFile) Resolve(path string, separator string) *Node {
if separator == "" {
separator = "/"
}
if path == separator {
return NX.Root()
}
nodes := strings.Split(path, separator)
cursor := NX.Root()
for i := 0; i < len(nodes); i++ {
if cursor == nil {
return nil
}
cursor = cursor.Child(nodes[i])
}
return cursor
} |
package cluster
import (
"net"
"strings"
"sync/atomic"
"github.com/gallir/radix.improved/redis"
"github.com/gallir/smart-relayer/lib"
)
const (
requestBufferSize = 32
)
type connHandler struct {
initialized bool
seq uint64
last uint64
srv *Server
conn net.Conn
reqCh chan reqData
respCh chan *redis.Resp
pending int32
}
func Handle(srv *Server, netCon net.Conn) {
h := &connHandler{
srv: srv,
conn: netCon,
}
defer h.close()
reader := redis.NewRespReader(h.conn)
for {
req := reader.Read()
if req.IsType(redis.IOErr) {
if redis.IsTimeout(req) {
continue
}
return
}
resp := h.process(req)
if h.srv.config.Compress || h.srv.config.Uncompress {
resp.Uncompress(lib.MagicSnappy)
}
resp.WriteTo(h.conn)
resp.ReleaseBuffers()
}
}
func (h *connHandler) close() {
if h.reqCh != nil {
close(h.reqCh)
}
if h.respCh != nil {
close(h.respCh)
}
h.conn.Close()
}
func (h *connHandler) process(req *redis.Resp) *redis.Resp {
cmd, err := req.First()
if err != nil || strings.ToUpper(cmd) == selectCommand {
return respBadCommand
}
doAsync := false
var fastResponse *redis.Resp
if h.srv.mode == lib.ModeSmart {
fastResponse, doAsync = commands[strings.ToUpper(cmd)]
}
if doAsync {
atomic.AddInt32(&h.pending, 1)
if !h.initialized {
h.initialized = true
h.reqCh = make(chan reqData, requestBufferSize)
h.respCh = make(chan *redis.Resp, 1)
go h.sendWorker()
}
h.reqCh <- reqData{
req: req,
compress: h.srv.config.Compress,
}
return fastResponse
}
p := atomic.LoadInt32(&h.pending)
if p != 0 {
// There are operations in queue, send by the same channel
h.reqCh <- reqData{
req: req,
compress: h.srv.config.Compress,
answerCh: h.respCh,
}
return <-h.respCh
}
// No ongoing operations, we can send directly
resp := h.sender(req, h.srv.config.Compress, false)
return resp
}
func (h *connHandler) sendWorker() {
for m := range h.reqCh {
resp := h.sender(m.req, m.compress, true)
if m.answerCh != nil {
m.answerCh <- resp
}
}
}
func (h *connHandler) sender(req *redis.Resp, compress, async bool) *redis.Resp {
if compress {
req.Compress(lib.MinCompressSize, lib.MagicSnappy)
}
a, err := req.Array()
if err != nil {
return respBadCommand
}
cmd, _ := a[0].Str()
args := make([]interface{}, 0, len(a)-1)
for _, v := range a {
b, _ := v.Bytes()
args = append(args, b)
}
resp := h.srv.pool.Cmd(cmd, args[1:])
if async {
atomic.AddInt32(&h.pending, -1)
}
req.ReleaseBuffers()
return resp
}
Add to counter also when sending non async
package cluster
import (
"net"
"strings"
"sync/atomic"
"github.com/gallir/radix.improved/redis"
"github.com/gallir/smart-relayer/lib"
)
const (
requestBufferSize = 32
)
type connHandler struct {
initialized bool
seq uint64
last uint64
srv *Server
conn net.Conn
reqCh chan reqData
respCh chan *redis.Resp
pending int32
}
func Handle(srv *Server, netCon net.Conn) {
h := &connHandler{
srv: srv,
conn: netCon,
}
defer h.close()
reader := redis.NewRespReader(h.conn)
for {
req := reader.Read()
if req.IsType(redis.IOErr) {
if redis.IsTimeout(req) {
continue
}
return
}
resp := h.process(req)
if h.srv.config.Compress || h.srv.config.Uncompress {
resp.Uncompress(lib.MagicSnappy)
}
resp.WriteTo(h.conn)
resp.ReleaseBuffers()
}
}
func (h *connHandler) close() {
if h.reqCh != nil {
close(h.reqCh)
}
if h.respCh != nil {
close(h.respCh)
}
h.conn.Close()
}
func (h *connHandler) process(req *redis.Resp) *redis.Resp {
cmd, err := req.First()
if err != nil || strings.ToUpper(cmd) == selectCommand {
return respBadCommand
}
doAsync := false
var fastResponse *redis.Resp
if h.srv.mode == lib.ModeSmart {
fastResponse, doAsync = commands[strings.ToUpper(cmd)]
}
if doAsync {
if !h.initialized {
h.initialized = true
h.reqCh = make(chan reqData, requestBufferSize)
h.respCh = make(chan *redis.Resp, 1)
go h.sendWorker()
}
atomic.AddInt32(&h.pending, 1)
h.reqCh <- reqData{
req: req,
compress: h.srv.config.Compress,
}
return fastResponse
}
p := atomic.LoadInt32(&h.pending)
if p != 0 {
// There are operations in queue, send by the same channel
atomic.AddInt32(&h.pending, 1)
h.reqCh <- reqData{
req: req,
compress: h.srv.config.Compress,
answerCh: h.respCh,
}
return <-h.respCh
}
// No ongoing operations, we can send directly
resp := h.sender(req, h.srv.config.Compress, false)
return resp
}
func (h *connHandler) sendWorker() {
for m := range h.reqCh {
resp := h.sender(m.req, m.compress, true)
if m.answerCh != nil {
m.answerCh <- resp
}
}
}
func (h *connHandler) sender(req *redis.Resp, compress, async bool) *redis.Resp {
if compress {
req.Compress(lib.MinCompressSize, lib.MagicSnappy)
}
a, err := req.Array()
if err != nil {
return respBadCommand
}
cmd, _ := a[0].Str()
args := make([]interface{}, 0, len(a)-1)
for _, v := range a {
b, _ := v.Bytes()
args = append(args, b)
}
resp := h.srv.pool.Cmd(cmd, args[1:])
if async {
atomic.AddInt32(&h.pending, -1)
}
req.ReleaseBuffers()
return resp
}
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// licence that can be found in the LICENSE file.
// This file contains the implementation of the 'gomovepkg' command
// whose main function is in golang.org/x/tools/cmd/gomovepkg.
package rename
// TODO(matloob):
// - think about what happens if the package is moving across version control systems.
// - think about windows, which uses "\" as its directory separator.
// - dot imports are not supported. Make sure it's clearly documented.
import (
"bytes"
"fmt"
"go/ast"
"go/build"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"text/template"
"golang.org/x/tools/go/buildutil"
"golang.org/x/tools/go/loader"
"golang.org/x/tools/refactor/importgraph"
)
// Move, given a package path and a destination package path, will try
// to move the given package to the new path. The Move function will
// first check for any conflicts preventing the move, such as a
// package already existing at the destination package path. If the
// move can proceed, it builds an import graph to find all imports of
// the packages whose paths need to be renamed. This includes uses of
// the subpackages of the package to be moved as those packages will
// also need to be moved. It then renames all imports to point to the
// new paths, and then moves the packages to their new paths.
func Move(ctxt *build.Context, from, to, moveTmpl string) error {
srcDir, err := srcDir(ctxt, from)
if err != nil {
return err
}
// This should be the only place in the program that constructs
// file paths.
// TODO(matloob): test on Microsoft Windows.
fromDir := buildutil.JoinPath(ctxt, srcDir, filepath.FromSlash(from))
toDir := buildutil.JoinPath(ctxt, srcDir, filepath.FromSlash(to))
toParent := filepath.Dir(toDir)
if !buildutil.IsDir(ctxt, toParent) {
return fmt.Errorf("parent directory does not exist for path %s", toDir)
}
// Build the import graph and figure out which packages to update.
fwd, rev, errors := importgraph.Build(ctxt)
if len(errors) > 0 {
// With a large GOPATH tree, errors are inevitable.
// Report them but proceed.
fmt.Fprintf(os.Stderr, "While scanning Go workspace:\n")
for path, err := range errors {
fmt.Fprintf(os.Stderr, "Package %q: %s.\n", path, err)
}
}
// Determine the affected packages---the set of packages whose import
// statements need updating.
affectedPackages := map[string]bool{from: true}
destinations := map[string]string{} // maps old dir to new dir
for pkg := range subpackages(ctxt, srcDir, from) {
for r := range rev[pkg] {
affectedPackages[r] = true
}
destinations[pkg] = strings.Replace(pkg,
// Ensure directories have a trailing "/".
filepath.Join(from, ""), filepath.Join(to, ""), 1)
}
// Load all the affected packages.
iprog, err := loadProgram(ctxt, affectedPackages)
if err != nil {
return err
}
// Prepare the move command, if one was supplied.
var cmd string
if moveTmpl != "" {
if cmd, err = moveCmd(moveTmpl, fromDir, toDir); err != nil {
return err
}
}
m := mover{
ctxt: ctxt,
fwd: fwd,
rev: rev,
iprog: iprog,
from: from,
to: to,
fromDir: fromDir,
toDir: toDir,
affectedPackages: affectedPackages,
destinations: destinations,
cmd: cmd,
}
if err := m.checkValid(); err != nil {
return err
}
m.move()
return nil
}
// srcDir returns the absolute path of the srcdir containing pkg.
func srcDir(ctxt *build.Context, pkg string) (string, error) {
for _, srcDir := range ctxt.SrcDirs() {
path := buildutil.JoinPath(ctxt, srcDir, pkg)
if buildutil.IsDir(ctxt, path) {
return srcDir, nil
}
}
return "", fmt.Errorf("src dir not found for package: %s", pkg)
}
// subpackages returns the set of packages in the given srcDir whose
// import paths start with dir.
func subpackages(ctxt *build.Context, srcDir string, dir string) map[string]bool {
subs := map[string]bool{dir: true}
// Find all packages under srcDir whose import paths start with dir.
buildutil.ForEachPackage(ctxt, func(pkg string, err error) {
if err != nil {
log.Fatalf("unexpected error in ForEachPackage: %v", err)
}
if !strings.HasPrefix(pkg, path.Join(dir, "")) {
return
}
p, err := ctxt.Import(pkg, "", build.FindOnly)
if err != nil {
log.Fatalf("unexpected: package %s can not be located by build context: %s", pkg, err)
}
if p.SrcRoot == "" {
log.Fatalf("unexpected: could not determine srcDir for package %s: %s", pkg, err)
}
if p.SrcRoot != srcDir {
return
}
subs[pkg] = true
})
return subs
}
type mover struct {
// iprog contains all packages whose contents need to be updated
// with new package names or import paths.
iprog *loader.Program
ctxt *build.Context
// fwd and rev are the forward and reverse import graphs
fwd, rev importgraph.Graph
// from and to are the source and destination import
// paths. fromDir and toDir are the source and destination
// absolute paths that package source files will be moved between.
from, to, fromDir, toDir string
// affectedPackages is the set of all packages whose contents need
// to be updated to reflect new package names or import paths.
affectedPackages map[string]bool
// destinations maps each subpackage to be moved to its
// destination path.
destinations map[string]string
// cmd, if not empty, will be executed to move fromDir to toDir.
cmd string
}
func (m *mover) checkValid() error {
const prefix = "invalid move destination"
match, err := regexp.MatchString("^[_\\pL][_\\pL\\p{Nd}]*$", path.Base(m.to))
if err != nil {
panic("regexp.MatchString failed")
}
if !match {
return fmt.Errorf("%s: %s; gomvpkg does not support move destinations "+
"whose base names are not valid go identifiers", prefix, m.to)
}
if buildutil.FileExists(m.ctxt, m.toDir) {
return fmt.Errorf("%s: %s conflicts with file %s", prefix, m.to, m.toDir)
}
if buildutil.IsDir(m.ctxt, m.toDir) {
return fmt.Errorf("%s: %s conflicts with directory %s", prefix, m.to, m.toDir)
}
for _, toSubPkg := range m.destinations {
if _, err := m.ctxt.Import(toSubPkg, "", build.FindOnly); err == nil {
return fmt.Errorf("%s: %s; package or subpackage %s already exists",
prefix, m.to, toSubPkg)
}
}
return nil
}
// moveCmd produces the version control move command used to move fromDir to toDir by
// executing the given template.
func moveCmd(moveTmpl, fromDir, toDir string) (string, error) {
tmpl, err := template.New("movecmd").Parse(moveTmpl)
if err != nil {
return "", err
}
var buf bytes.Buffer
err = tmpl.Execute(&buf, struct {
Src string
Dst string
}{fromDir, toDir})
return buf.String(), err
}
func (m *mover) move() error {
filesToUpdate := make(map[*ast.File]bool)
// Change the moved package's "package" declaration to its new base name.
pkg, ok := m.iprog.Imported[m.from]
if !ok {
log.Fatalf("unexpected: package %s is not in import map", m.from)
}
newName := filepath.Base(m.to)
for _, f := range pkg.Files {
f.Name.Name = newName // change package decl
filesToUpdate[f] = true
}
// Update imports of that package to use the new import name.
// None of the subpackages will change their name---only the from package
// itself will.
for p := range m.rev[m.from] {
if err := importName(m.iprog, m.iprog.Imported[p], m.from, path.Base(m.from), newName); err != nil {
return err
}
}
// Update import paths for all imports by affected packages.
for ap := range m.affectedPackages {
info, ok := m.iprog.Imported[ap]
if !ok {
log.Fatalf("unexpected: package %s is not in import map", ap)
}
for _, f := range info.Files {
for _, imp := range f.Imports {
importPath, _ := strconv.Unquote(imp.Path.Value)
if newPath, ok := m.destinations[importPath]; ok {
imp.Path.Value = strconv.Quote(newPath)
oldName := path.Base(importPath)
if imp.Name != nil {
oldName = imp.Name.Name
}
newName := path.Base(newPath)
if imp.Name == nil && oldName != newName {
imp.Name = ast.NewIdent(oldName)
} else if imp.Name == nil || imp.Name.Name == newName {
imp.Name = nil
}
filesToUpdate[f] = true
}
}
}
}
for f := range filesToUpdate {
tokenFile := m.iprog.Fset.File(f.Pos())
rewriteFile(m.iprog.Fset, f, tokenFile.Name())
}
// Move the directories.
// If either the fromDir or toDir are contained under version control it is
// the user's responsibility to provide a custom move command that updates
// version control to reflect the move.
// TODO(matloob): If the parent directory of toDir does not exist, create it.
// For now, it's required that it does exist.
if m.cmd != "" {
// TODO(matloob): Verify that the windows and plan9 cases are correct.
var cmd *exec.Cmd
switch runtime.GOOS {
case "windows":
cmd = exec.Command("cmd", "/c", m.cmd)
case "plan9":
cmd = exec.Command("rc", "-c", m.cmd)
default:
cmd = exec.Command("sh", "-c", m.cmd)
}
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stdout
if err := cmd.Run(); err != nil {
return fmt.Errorf("version control system's move command failed: %v", err)
}
return nil
}
return moveDirectory(m.fromDir, m.toDir)
}
var moveDirectory = func(from, to string) error {
return os.Rename(from, to)
}
refactor/rename: fix command name in comment
Change-Id: I7a74c07b104cf1d1adb4536522ff2341d24ebd82
Reviewed-on: https://go-review.googlesource.com/12010
Reviewed-by: Ian Lance Taylor <87e9c6d529889242b7e184afb632328636553ab4@golang.org>
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// licence that can be found in the LICENSE file.
// This file contains the implementation of the 'gomvpkg' command
// whose main function is in golang.org/x/tools/cmd/gomvpkg.
package rename
// TODO(matloob):
// - think about what happens if the package is moving across version control systems.
// - think about windows, which uses "\" as its directory separator.
// - dot imports are not supported. Make sure it's clearly documented.
import (
"bytes"
"fmt"
"go/ast"
"go/build"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"text/template"
"golang.org/x/tools/go/buildutil"
"golang.org/x/tools/go/loader"
"golang.org/x/tools/refactor/importgraph"
)
// Move, given a package path and a destination package path, will try
// to move the given package to the new path. The Move function will
// first check for any conflicts preventing the move, such as a
// package already existing at the destination package path. If the
// move can proceed, it builds an import graph to find all imports of
// the packages whose paths need to be renamed. This includes uses of
// the subpackages of the package to be moved as those packages will
// also need to be moved. It then renames all imports to point to the
// new paths, and then moves the packages to their new paths.
func Move(ctxt *build.Context, from, to, moveTmpl string) error {
srcDir, err := srcDir(ctxt, from)
if err != nil {
return err
}
// This should be the only place in the program that constructs
// file paths.
// TODO(matloob): test on Microsoft Windows.
fromDir := buildutil.JoinPath(ctxt, srcDir, filepath.FromSlash(from))
toDir := buildutil.JoinPath(ctxt, srcDir, filepath.FromSlash(to))
toParent := filepath.Dir(toDir)
if !buildutil.IsDir(ctxt, toParent) {
return fmt.Errorf("parent directory does not exist for path %s", toDir)
}
// Build the import graph and figure out which packages to update.
fwd, rev, errors := importgraph.Build(ctxt)
if len(errors) > 0 {
// With a large GOPATH tree, errors are inevitable.
// Report them but proceed.
fmt.Fprintf(os.Stderr, "While scanning Go workspace:\n")
for path, err := range errors {
fmt.Fprintf(os.Stderr, "Package %q: %s.\n", path, err)
}
}
// Determine the affected packages---the set of packages whose import
// statements need updating.
affectedPackages := map[string]bool{from: true}
destinations := map[string]string{} // maps old dir to new dir
for pkg := range subpackages(ctxt, srcDir, from) {
for r := range rev[pkg] {
affectedPackages[r] = true
}
destinations[pkg] = strings.Replace(pkg,
// Ensure directories have a trailing "/".
filepath.Join(from, ""), filepath.Join(to, ""), 1)
}
// Load all the affected packages.
iprog, err := loadProgram(ctxt, affectedPackages)
if err != nil {
return err
}
// Prepare the move command, if one was supplied.
var cmd string
if moveTmpl != "" {
if cmd, err = moveCmd(moveTmpl, fromDir, toDir); err != nil {
return err
}
}
m := mover{
ctxt: ctxt,
fwd: fwd,
rev: rev,
iprog: iprog,
from: from,
to: to,
fromDir: fromDir,
toDir: toDir,
affectedPackages: affectedPackages,
destinations: destinations,
cmd: cmd,
}
if err := m.checkValid(); err != nil {
return err
}
m.move()
return nil
}
// srcDir returns the absolute path of the srcdir containing pkg.
func srcDir(ctxt *build.Context, pkg string) (string, error) {
for _, srcDir := range ctxt.SrcDirs() {
path := buildutil.JoinPath(ctxt, srcDir, pkg)
if buildutil.IsDir(ctxt, path) {
return srcDir, nil
}
}
return "", fmt.Errorf("src dir not found for package: %s", pkg)
}
// subpackages returns the set of packages in the given srcDir whose
// import paths start with dir.
func subpackages(ctxt *build.Context, srcDir string, dir string) map[string]bool {
subs := map[string]bool{dir: true}
// Find all packages under srcDir whose import paths start with dir.
buildutil.ForEachPackage(ctxt, func(pkg string, err error) {
if err != nil {
log.Fatalf("unexpected error in ForEachPackage: %v", err)
}
if !strings.HasPrefix(pkg, path.Join(dir, "")) {
return
}
p, err := ctxt.Import(pkg, "", build.FindOnly)
if err != nil {
log.Fatalf("unexpected: package %s can not be located by build context: %s", pkg, err)
}
if p.SrcRoot == "" {
log.Fatalf("unexpected: could not determine srcDir for package %s: %s", pkg, err)
}
if p.SrcRoot != srcDir {
return
}
subs[pkg] = true
})
return subs
}
type mover struct {
// iprog contains all packages whose contents need to be updated
// with new package names or import paths.
iprog *loader.Program
ctxt *build.Context
// fwd and rev are the forward and reverse import graphs
fwd, rev importgraph.Graph
// from and to are the source and destination import
// paths. fromDir and toDir are the source and destination
// absolute paths that package source files will be moved between.
from, to, fromDir, toDir string
// affectedPackages is the set of all packages whose contents need
// to be updated to reflect new package names or import paths.
affectedPackages map[string]bool
// destinations maps each subpackage to be moved to its
// destination path.
destinations map[string]string
// cmd, if not empty, will be executed to move fromDir to toDir.
cmd string
}
func (m *mover) checkValid() error {
const prefix = "invalid move destination"
match, err := regexp.MatchString("^[_\\pL][_\\pL\\p{Nd}]*$", path.Base(m.to))
if err != nil {
panic("regexp.MatchString failed")
}
if !match {
return fmt.Errorf("%s: %s; gomvpkg does not support move destinations "+
"whose base names are not valid go identifiers", prefix, m.to)
}
if buildutil.FileExists(m.ctxt, m.toDir) {
return fmt.Errorf("%s: %s conflicts with file %s", prefix, m.to, m.toDir)
}
if buildutil.IsDir(m.ctxt, m.toDir) {
return fmt.Errorf("%s: %s conflicts with directory %s", prefix, m.to, m.toDir)
}
for _, toSubPkg := range m.destinations {
if _, err := m.ctxt.Import(toSubPkg, "", build.FindOnly); err == nil {
return fmt.Errorf("%s: %s; package or subpackage %s already exists",
prefix, m.to, toSubPkg)
}
}
return nil
}
// moveCmd produces the version control move command used to move fromDir to toDir by
// executing the given template.
func moveCmd(moveTmpl, fromDir, toDir string) (string, error) {
tmpl, err := template.New("movecmd").Parse(moveTmpl)
if err != nil {
return "", err
}
var buf bytes.Buffer
err = tmpl.Execute(&buf, struct {
Src string
Dst string
}{fromDir, toDir})
return buf.String(), err
}
func (m *mover) move() error {
filesToUpdate := make(map[*ast.File]bool)
// Change the moved package's "package" declaration to its new base name.
pkg, ok := m.iprog.Imported[m.from]
if !ok {
log.Fatalf("unexpected: package %s is not in import map", m.from)
}
newName := filepath.Base(m.to)
for _, f := range pkg.Files {
f.Name.Name = newName // change package decl
filesToUpdate[f] = true
}
// Update imports of that package to use the new import name.
// None of the subpackages will change their name---only the from package
// itself will.
for p := range m.rev[m.from] {
if err := importName(m.iprog, m.iprog.Imported[p], m.from, path.Base(m.from), newName); err != nil {
return err
}
}
// Update import paths for all imports by affected packages.
for ap := range m.affectedPackages {
info, ok := m.iprog.Imported[ap]
if !ok {
log.Fatalf("unexpected: package %s is not in import map", ap)
}
for _, f := range info.Files {
for _, imp := range f.Imports {
importPath, _ := strconv.Unquote(imp.Path.Value)
if newPath, ok := m.destinations[importPath]; ok {
imp.Path.Value = strconv.Quote(newPath)
oldName := path.Base(importPath)
if imp.Name != nil {
oldName = imp.Name.Name
}
newName := path.Base(newPath)
if imp.Name == nil && oldName != newName {
imp.Name = ast.NewIdent(oldName)
} else if imp.Name == nil || imp.Name.Name == newName {
imp.Name = nil
}
filesToUpdate[f] = true
}
}
}
}
for f := range filesToUpdate {
tokenFile := m.iprog.Fset.File(f.Pos())
rewriteFile(m.iprog.Fset, f, tokenFile.Name())
}
// Move the directories.
// If either the fromDir or toDir are contained under version control it is
// the user's responsibility to provide a custom move command that updates
// version control to reflect the move.
// TODO(matloob): If the parent directory of toDir does not exist, create it.
// For now, it's required that it does exist.
if m.cmd != "" {
// TODO(matloob): Verify that the windows and plan9 cases are correct.
var cmd *exec.Cmd
switch runtime.GOOS {
case "windows":
cmd = exec.Command("cmd", "/c", m.cmd)
case "plan9":
cmd = exec.Command("rc", "-c", m.cmd)
default:
cmd = exec.Command("sh", "-c", m.cmd)
}
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stdout
if err := cmd.Run(); err != nil {
return fmt.Errorf("version control system's move command failed: %v", err)
}
return nil
}
return moveDirectory(m.fromDir, m.toDir)
}
var moveDirectory = func(from, to string) error {
return os.Rename(from, to)
}
|
// Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package azurekeyvault provides a secrets implementation backed by Azure KeyVault.
// See https://docs.microsoft.com/en-us/azure/key-vault/key-vault-whatis for more information.
// Use OpenKeeper to construct a *secrets.Keeper.
//
// URLs
//
// For secrets.OpenKeeper, azurekeyvault registers for the scheme "azurekeyvault".
// The default URL opener will use Dial, which gets default credentials from the
// environment, unless the AZURE_KEYVAULT_AUTH_VIA_CLI environment variable is
// set to true, in which case it uses DialUsingCLIAuth to get credentials from the
// "az" command line.
//
// To customize the URL opener, or for more details on the URL format,
// see URLOpener.
// See https://gocloud.dev/concepts/urls/ for background information.
//
// As
//
// azurekeyvault exposes the following type for As:
// - Error: autorest.DetailedError, see https://godoc.org/github.com/Azure/go-autorest/autorest#DetailedError
package azurekeyvault
import (
"context"
"encoding/base64"
"fmt"
"net/url"
"os"
"path"
"regexp"
"strconv"
"strings"
"sync"
"github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure/auth"
"github.com/google/wire"
"gocloud.dev/gcerrors"
"gocloud.dev/internal/gcerr"
"gocloud.dev/internal/useragent"
"gocloud.dev/secrets"
)
var (
// Map of HTTP Status Code to go-cloud ErrorCode
errorCodeMap = map[int]gcerrors.ErrorCode{
200: gcerrors.OK,
400: gcerrors.InvalidArgument,
401: gcerrors.PermissionDenied,
403: gcerrors.PermissionDenied,
404: gcerrors.NotFound,
408: gcerrors.DeadlineExceeded,
429: gcerrors.ResourceExhausted,
500: gcerrors.Internal,
501: gcerrors.Unimplemented,
}
)
func init() {
secrets.DefaultURLMux().RegisterKeeper(Scheme, new(defaultDialer))
}
// Set holds Wire providers for this package.
var Set = wire.NewSet(
Dial,
wire.Struct(new(URLOpener), "Client"),
)
// defaultDialer dials Azure KeyVault from the environment on the first call to OpenKeeperURL.
type defaultDialer struct {
init sync.Once
opener *URLOpener
err error
}
func (o *defaultDialer) OpenKeeperURL(ctx context.Context, u *url.URL) (*secrets.Keeper, error) {
o.init.Do(func() {
// Determine the dialer to use. The default one gets
// credentials from the environment, but an alternative is
// to get credentials from the az CLI.
dialer := Dial
useCLIStr := os.Getenv("AZURE_KEYVAULT_AUTH_VIA_CLI")
if useCLIStr != "" {
if b, err := strconv.ParseBool(useCLIStr); err != nil {
o.err = fmt.Errorf("invalid value %q for environment variable AZURE_KEYVAULT_AUTH_VIA_CLI: %v", useCLIStr, err)
return
} else if b {
dialer = DialUsingCLIAuth
}
}
client, err := dialer()
if err != nil {
o.err = err
return
}
o.opener = &URLOpener{Client: client}
})
if o.err != nil {
return nil, fmt.Errorf("open keeper %v: failed to Dial default KeyVault: %v", u, o.err)
}
return o.opener.OpenKeeperURL(ctx, u)
}
// Scheme is the URL scheme azurekeyvault registers its URLOpener under on secrets.DefaultMux.
const Scheme = "azurekeyvault"
// URLOpener opens Azure KeyVault URLs like
// "azurekeyvault://{keyvault-name}.vault.azure.net/keys/{key-name}/{key-version}?algorithm=RSA-OAEP-256".
//
// The "azurekeyvault" URL scheme is replaced with "https" to construct an Azure
// Key Vault keyID, as described in https://docs.microsoft.com/en-us/azure/key-vault/about-keys-secrets-and-certificates.
// The "/{key-version}"" suffix is optional; it defaults to the latest version.
//
// The "algorithm" query parameter sets the algorithm to use; see
// https://docs.microsoft.com/en-us/rest/api/keyvault/encrypt/encrypt#jsonwebkeyencryptionalgorithm
// for supported algorithms. It defaults to "RSA-OAEP-256".
//
// No other query parameters are supported.
type URLOpener struct {
// Client must be set to a non-nil value.
Client *keyvault.BaseClient
// Options specifies the options to pass to OpenKeeper.
Options KeeperOptions
}
// OpenKeeperURL opens an Azure KeyVault Keeper based on u.
func (o *URLOpener) OpenKeeperURL(ctx context.Context, u *url.URL) (*secrets.Keeper, error) {
q := u.Query()
algorithm := q.Get("algorithm")
if algorithm != "" {
o.Options.Algorithm = keyvault.JSONWebKeyEncryptionAlgorithm(algorithm)
q.Del("algorithm")
}
for param := range q {
return nil, fmt.Errorf("open keeper %v: invalid query parameter %q", u, param)
}
keyID := "https://" + path.Join(u.Host, u.Path)
return OpenKeeper(o.Client, keyID, &o.Options)
}
type keeper struct {
client *keyvault.BaseClient
keyVaultURI string
keyName string
keyVersion string
options *KeeperOptions
}
// KeeperOptions provides configuration options for encryption/decryption operations.
type KeeperOptions struct {
// Algorithm sets the encryption algorithm used.
// Defaults to "RSA-OAEP-256".
// See https://docs.microsoft.com/en-us/rest/api/keyvault/encrypt/encrypt#jsonwebkeyencryptionalgorithm
// for more details.
Algorithm keyvault.JSONWebKeyEncryptionAlgorithm
}
// Dial gets a new *keyvault.BaseClient using authorization from the environment.
// See https://docs.microsoft.com/en-us/go/azure/azure-sdk-go-authorization#use-environment-based-authentication.
func Dial() (*keyvault.BaseClient, error) {
return dial(false)
}
// DialUsingCLIAuth gets a new *keyvault.BaseClient using authorization from the "az" CLI.
func DialUsingCLIAuth() (*keyvault.BaseClient, error) {
return dial(true)
}
// dial is a helper for Dial and DialUsingCLIAuth.
func dial(useCLI bool) (*keyvault.BaseClient, error) {
// Set the resource explicitly, because the default is the "resource manager endpoint"
// instead of the keyvault endpoint.
// https://azidentity.azurewebsites.net/post/2018/11/30/azure-key-vault-oauth-resource-value-https-vault-azure-net-no-slash
// has some discussion.
resource := os.Getenv("AZURE_AD_RESOURCE")
if resource == "" {
resource = "https://vault.azure.net"
}
authorizer := auth.NewAuthorizerFromEnvironmentWithResource
if useCLI {
authorizer = auth.NewAuthorizerFromCLIWithResource
}
auth, err := authorizer(resource)
if err != nil {
return nil, err
}
client := keyvault.NewWithoutDefaults()
client.Authorizer = auth
client.Sender = autorest.NewClientWithUserAgent(useragent.AzureUserAgentPrefix("secrets"))
return &client, nil
}
var (
// Note that the last binding may be just a key, or key/version.
keyIDRE = regexp.MustCompile("^(https://.+\\.vault\\.azure\\.net/)keys/(.+)$")
)
// OpenKeeper returns a *secrets.Keeper that uses Azure keyVault.
//
// client is a *keyvault.BaseClient instance, see https://godoc.org/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault#BaseClient.
//
// keyID is a Azure Key Vault key identifier like "https://{keyvault-name}.vault.azure.net/keys/{key-name}/{key-version}".
// The "/{key-version}" suffix is optional; it defaults to the latest version.
// See https://docs.microsoft.com/en-us/azure/key-vault/about-keys-secrets-and-certificates
// for more details.
func OpenKeeper(client *keyvault.BaseClient, keyID string, opts *KeeperOptions) (*secrets.Keeper, error) {
drv, err := openKeeper(client, keyID, opts)
if err != nil {
return nil, err
}
return secrets.NewKeeper(drv), nil
}
func openKeeper(client *keyvault.BaseClient, keyID string, opts *KeeperOptions) (*keeper, error) {
if opts == nil {
opts = &KeeperOptions{}
}
if opts.Algorithm == "" {
opts.Algorithm = keyvault.RSAOAEP256
}
matches := keyIDRE.FindStringSubmatch(keyID)
if len(matches) != 3 {
return nil, fmt.Errorf("invalid keyID %q; must match %v %v", keyID, keyIDRE, matches)
}
// matches[0] is the whole keyID, [1] is the keyVaultURI, and [2] is the key or the key/version.
keyVaultURI := matches[1]
parts := strings.SplitN(matches[2], "/", 2)
keyName := parts[0]
var keyVersion string
if len(parts) > 1 {
keyVersion = parts[1]
}
return &keeper{
client: client,
keyVaultURI: keyVaultURI,
keyName: keyName,
keyVersion: keyVersion,
options: opts,
}, nil
}
// Encrypt encrypts the plaintext into a ciphertext.
func (k *keeper) Encrypt(ctx context.Context, plaintext []byte) ([]byte, error) {
b64Text := base64.StdEncoding.EncodeToString(plaintext)
keyOpsResult, err := k.client.Encrypt(ctx, k.keyVaultURI, k.keyName, k.keyVersion, keyvault.KeyOperationsParameters{
Algorithm: k.options.Algorithm,
Value: &b64Text,
})
if err != nil {
return nil, err
}
return []byte(*keyOpsResult.Result), nil
}
// Decrypt decrypts the ciphertext into a plaintext.
func (k *keeper) Decrypt(ctx context.Context, ciphertext []byte) ([]byte, error) {
cipherval := string(ciphertext)
keyOpsResult, err := k.client.Decrypt(ctx, k.keyVaultURI, k.keyName, k.keyVersion, keyvault.KeyOperationsParameters{
Algorithm: k.options.Algorithm,
Value: &cipherval,
})
if err != nil {
return nil, err
}
return base64.StdEncoding.DecodeString(*keyOpsResult.Result)
}
// Close implements driver.Keeper.Close.
func (k *keeper) Close() error { return nil }
// ErrorAs implements driver.Keeper.ErrorAs.
func (k *keeper) ErrorAs(err error, i interface{}) bool {
e, ok := err.(autorest.DetailedError)
if !ok {
return false
}
p, ok := i.(*autorest.DetailedError)
if !ok {
return false
}
*p = e
return true
}
// ErrorCode implements driver.ErrorCode.
func (k *keeper) ErrorCode(err error) gcerrors.ErrorCode {
de, ok := err.(autorest.DetailedError)
if !ok {
return gcerr.Unknown
}
ec, ok := errorCodeMap[de.StatusCode.(int)]
if !ok {
return gcerr.Unknown
}
return ec
}
secrets/azurekeyvault: add support for other azure keyvault regions (#2786)
// Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package azurekeyvault provides a secrets implementation backed by Azure KeyVault.
// See https://docs.microsoft.com/en-us/azure/key-vault/key-vault-whatis for more information.
// Use OpenKeeper to construct a *secrets.Keeper.
//
// URLs
//
// For secrets.OpenKeeper, azurekeyvault registers for the scheme "azurekeyvault".
// The default URL opener will use Dial, which gets default credentials from the
// environment, unless the AZURE_KEYVAULT_AUTH_VIA_CLI environment variable is
// set to true, in which case it uses DialUsingCLIAuth to get credentials from the
// "az" command line.
//
// To customize the URL opener, or for more details on the URL format,
// see URLOpener.
// See https://gocloud.dev/concepts/urls/ for background information.
//
// As
//
// azurekeyvault exposes the following type for As:
// - Error: autorest.DetailedError, see https://godoc.org/github.com/Azure/go-autorest/autorest#DetailedError
package azurekeyvault
import (
"context"
"encoding/base64"
"fmt"
"net/url"
"os"
"path"
"regexp"
"strconv"
"strings"
"sync"
"github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure/auth"
"github.com/google/wire"
"gocloud.dev/gcerrors"
"gocloud.dev/internal/gcerr"
"gocloud.dev/internal/useragent"
"gocloud.dev/secrets"
)
var (
// Map of HTTP Status Code to go-cloud ErrorCode
errorCodeMap = map[int]gcerrors.ErrorCode{
200: gcerrors.OK,
400: gcerrors.InvalidArgument,
401: gcerrors.PermissionDenied,
403: gcerrors.PermissionDenied,
404: gcerrors.NotFound,
408: gcerrors.DeadlineExceeded,
429: gcerrors.ResourceExhausted,
500: gcerrors.Internal,
501: gcerrors.Unimplemented,
}
)
func init() {
secrets.DefaultURLMux().RegisterKeeper(Scheme, new(defaultDialer))
}
// Set holds Wire providers for this package.
var Set = wire.NewSet(
Dial,
wire.Struct(new(URLOpener), "Client"),
)
// defaultDialer dials Azure KeyVault from the environment on the first call to OpenKeeperURL.
type defaultDialer struct {
init sync.Once
opener *URLOpener
err error
}
func (o *defaultDialer) OpenKeeperURL(ctx context.Context, u *url.URL) (*secrets.Keeper, error) {
o.init.Do(func() {
// Determine the dialer to use. The default one gets
// credentials from the environment, but an alternative is
// to get credentials from the az CLI.
dialer := Dial
useCLIStr := os.Getenv("AZURE_KEYVAULT_AUTH_VIA_CLI")
if useCLIStr != "" {
if b, err := strconv.ParseBool(useCLIStr); err != nil {
o.err = fmt.Errorf("invalid value %q for environment variable AZURE_KEYVAULT_AUTH_VIA_CLI: %v", useCLIStr, err)
return
} else if b {
dialer = DialUsingCLIAuth
}
}
client, err := dialer()
if err != nil {
o.err = err
return
}
o.opener = &URLOpener{Client: client}
})
if o.err != nil {
return nil, fmt.Errorf("open keeper %v: failed to Dial default KeyVault: %v", u, o.err)
}
return o.opener.OpenKeeperURL(ctx, u)
}
// Scheme is the URL scheme azurekeyvault registers its URLOpener under on secrets.DefaultMux.
const Scheme = "azurekeyvault"
// URLOpener opens Azure KeyVault URLs like
// "azurekeyvault://{keyvault-name}.vault.azure.net/keys/{key-name}/{key-version}?algorithm=RSA-OAEP-256".
//
// The "azurekeyvault" URL scheme is replaced with "https" to construct an Azure
// Key Vault keyID, as described in https://docs.microsoft.com/en-us/azure/key-vault/about-keys-secrets-and-certificates.
// The "/{key-version}"" suffix is optional; it defaults to the latest version.
//
// The "algorithm" query parameter sets the algorithm to use; see
// https://docs.microsoft.com/en-us/rest/api/keyvault/encrypt/encrypt#jsonwebkeyencryptionalgorithm
// for supported algorithms. It defaults to "RSA-OAEP-256".
//
// No other query parameters are supported.
type URLOpener struct {
// Client must be set to a non-nil value.
Client *keyvault.BaseClient
// Options specifies the options to pass to OpenKeeper.
Options KeeperOptions
}
// OpenKeeperURL opens an Azure KeyVault Keeper based on u.
func (o *URLOpener) OpenKeeperURL(ctx context.Context, u *url.URL) (*secrets.Keeper, error) {
q := u.Query()
algorithm := q.Get("algorithm")
if algorithm != "" {
o.Options.Algorithm = keyvault.JSONWebKeyEncryptionAlgorithm(algorithm)
q.Del("algorithm")
}
for param := range q {
return nil, fmt.Errorf("open keeper %v: invalid query parameter %q", u, param)
}
keyID := "https://" + path.Join(u.Host, u.Path)
return OpenKeeper(o.Client, keyID, &o.Options)
}
type keeper struct {
client *keyvault.BaseClient
keyVaultURI string
keyName string
keyVersion string
options *KeeperOptions
}
// KeeperOptions provides configuration options for encryption/decryption operations.
type KeeperOptions struct {
// Algorithm sets the encryption algorithm used.
// Defaults to "RSA-OAEP-256".
// See https://docs.microsoft.com/en-us/rest/api/keyvault/encrypt/encrypt#jsonwebkeyencryptionalgorithm
// for more details.
Algorithm keyvault.JSONWebKeyEncryptionAlgorithm
}
// Dial gets a new *keyvault.BaseClient using authorization from the environment.
// See https://docs.microsoft.com/en-us/go/azure/azure-sdk-go-authorization#use-environment-based-authentication.
func Dial() (*keyvault.BaseClient, error) {
return dial(false)
}
// DialUsingCLIAuth gets a new *keyvault.BaseClient using authorization from the "az" CLI.
func DialUsingCLIAuth() (*keyvault.BaseClient, error) {
return dial(true)
}
// dial is a helper for Dial and DialUsingCLIAuth.
func dial(useCLI bool) (*keyvault.BaseClient, error) {
// Set the resource explicitly, because the default is the "resource manager endpoint"
// instead of the keyvault endpoint.
// https://azidentity.azurewebsites.net/post/2018/11/30/azure-key-vault-oauth-resource-value-https-vault-azure-net-no-slash
// has some discussion.
resource := os.Getenv("AZURE_AD_RESOURCE")
if resource == "" {
resource = "https://vault.azure.net"
}
authorizer := auth.NewAuthorizerFromEnvironmentWithResource
if useCLI {
authorizer = auth.NewAuthorizerFromCLIWithResource
}
auth, err := authorizer(resource)
if err != nil {
return nil, err
}
client := keyvault.NewWithoutDefaults()
client.Authorizer = auth
client.Sender = autorest.NewClientWithUserAgent(useragent.AzureUserAgentPrefix("secrets"))
return &client, nil
}
var (
// Note that the last binding may be just a key, or key/version.
keyIDRE = regexp.MustCompile(`^(https://.+\.vault\.(?:azure\.net|azure\.cn|usgovcloudapi\.net|microsoftazure\.de)/)keys/(.+)$`)
)
// OpenKeeper returns a *secrets.Keeper that uses Azure keyVault.
//
// client is a *keyvault.BaseClient instance, see https://godoc.org/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault#BaseClient.
//
// keyID is a Azure Key Vault key identifier like "https://{keyvault-name}.vault.azure.net/keys/{key-name}/{key-version}".
// The "/{key-version}" suffix is optional; it defaults to the latest version.
// See https://docs.microsoft.com/en-us/azure/key-vault/about-keys-secrets-and-certificates
// for more details.
func OpenKeeper(client *keyvault.BaseClient, keyID string, opts *KeeperOptions) (*secrets.Keeper, error) {
drv, err := openKeeper(client, keyID, opts)
if err != nil {
return nil, err
}
return secrets.NewKeeper(drv), nil
}
func openKeeper(client *keyvault.BaseClient, keyID string, opts *KeeperOptions) (*keeper, error) {
if opts == nil {
opts = &KeeperOptions{}
}
if opts.Algorithm == "" {
opts.Algorithm = keyvault.RSAOAEP256
}
matches := keyIDRE.FindStringSubmatch(keyID)
if len(matches) != 3 {
return nil, fmt.Errorf("invalid keyID %q; must match %v %v", keyID, keyIDRE, matches)
}
// matches[0] is the whole keyID, [1] is the keyVaultURI, and [2] is the key or the key/version.
keyVaultURI := matches[1]
parts := strings.SplitN(matches[2], "/", 2)
keyName := parts[0]
var keyVersion string
if len(parts) > 1 {
keyVersion = parts[1]
}
return &keeper{
client: client,
keyVaultURI: keyVaultURI,
keyName: keyName,
keyVersion: keyVersion,
options: opts,
}, nil
}
// Encrypt encrypts the plaintext into a ciphertext.
func (k *keeper) Encrypt(ctx context.Context, plaintext []byte) ([]byte, error) {
b64Text := base64.StdEncoding.EncodeToString(plaintext)
keyOpsResult, err := k.client.Encrypt(ctx, k.keyVaultURI, k.keyName, k.keyVersion, keyvault.KeyOperationsParameters{
Algorithm: k.options.Algorithm,
Value: &b64Text,
})
if err != nil {
return nil, err
}
return []byte(*keyOpsResult.Result), nil
}
// Decrypt decrypts the ciphertext into a plaintext.
func (k *keeper) Decrypt(ctx context.Context, ciphertext []byte) ([]byte, error) {
cipherval := string(ciphertext)
keyOpsResult, err := k.client.Decrypt(ctx, k.keyVaultURI, k.keyName, k.keyVersion, keyvault.KeyOperationsParameters{
Algorithm: k.options.Algorithm,
Value: &cipherval,
})
if err != nil {
return nil, err
}
return base64.StdEncoding.DecodeString(*keyOpsResult.Result)
}
// Close implements driver.Keeper.Close.
func (k *keeper) Close() error { return nil }
// ErrorAs implements driver.Keeper.ErrorAs.
func (k *keeper) ErrorAs(err error, i interface{}) bool {
e, ok := err.(autorest.DetailedError)
if !ok {
return false
}
p, ok := i.(*autorest.DetailedError)
if !ok {
return false
}
*p = e
return true
}
// ErrorCode implements driver.ErrorCode.
func (k *keeper) ErrorCode(err error) gcerrors.ErrorCode {
de, ok := err.(autorest.DetailedError)
if !ok {
return gcerr.Unknown
}
ec, ok := errorCodeMap[de.StatusCode.(int)]
if !ok {
return gcerr.Unknown
}
return ec
}
|
// Copyright (c) 2014, Rob Thornton
// All rights reserved.
// This source code is governed by a Simplied BSD-License. Please see the
// LICENSE included in this distribution for a copy of the full license
// or, if one is not included, you may also find a copy at
// http://opensource.org/licenses/BSD-2-Clause
package parse
import (
"os"
"github.com/rthornton128/calc/ast"
"github.com/rthornton128/calc/scan"
"github.com/rthornton128/calc/token"
)
func ParseFile(filename, src string) *ast.File {
var p parser
p.init(filename, src)
f := p.parseFile()
if p.errors.Count() > 0 {
p.errors.Print()
return nil
}
return f
}
type parser struct {
file *token.File
errors scan.ErrorList
scanner scan.Scanner
listok bool
curScope *ast.Scope
topScope *ast.Scope
pos token.Pos
tok token.Token
lit string
}
/* Utility */
func (p *parser) addError(msg string) {
p.errors.Add(p.file.Position(p.pos), msg)
if p.errors.Count() >= 10 {
p.errors.Print()
os.Exit(1)
}
}
func (p *parser) expect(tok token.Token) token.Pos {
pos := p.pos
if p.tok != tok {
p.addError("Expected '" + tok.String() + "' got '" + p.lit + "'")
}
p.next()
return pos
}
func (p *parser) init(fname, src string) {
p.file = token.NewFile(fname, 1, len(src))
p.scanner.Init(p.file, src)
p.listok = false
p.curScope = ast.NewScope(nil)
p.topScope = p.curScope
p.next()
}
func (p *parser) next() {
p.lit, p.tok, p.pos = p.scanner.Scan()
}
/* Scope */
func (p *parser) openScope() {
p.curScope = ast.NewScope(p.curScope)
}
func (p *parser) closeScope() {
p.curScope = p.curScope.Parent
}
/* Parsing */
func (p *parser) parseAssignExpr(open token.Pos) *ast.AssignExpr {
pos := p.expect(token.ASSIGN)
nam := p.parseIdent()
val := p.parseGenExpr()
end := p.expect(token.RPAREN)
return &ast.AssignExpr{
Expression: ast.Expression{Opening: open, Closing: end},
Equal: pos,
Name: nam,
Value: val,
}
}
func (p *parser) parseBasicLit() *ast.BasicLit {
pos, tok, lit := p.pos, p.tok, p.lit
p.next()
return &ast.BasicLit{LitPos: pos, Kind: tok, Lit: lit}
}
func (p *parser) parseBinaryExpr(open token.Pos) *ast.BinaryExpr {
pos := p.pos
op := p.tok
p.next()
var list []ast.Expr
for p.tok != token.RPAREN && p.tok != token.EOF {
list = append(list, p.parseGenExpr())
}
if len(list) < 2 {
p.addError("binary expression must have at least two operands")
}
end := p.expect(token.RPAREN)
return &ast.BinaryExpr{
Expression: ast.Expression{
Opening: open,
Closing: end,
},
Op: op,
OpPos: pos,
List: list,
}
}
func (p *parser) parseCallExpr(open token.Pos) *ast.CallExpr {
pos := p.pos
nam := p.parseIdent()
var list []ast.Expr
for p.tok != token.RPAREN && p.tok != token.EOF {
list = append(list, p.parseGenExpr())
}
end := p.expect(token.RPAREN)
return &ast.CallExpr{
Expression: ast.Expression{
Opening: open,
Closing: end,
},
Call: pos,
Name: nam,
Args: list,
}
}
func (p *parser) parseDeclExpr(open token.Pos) *ast.DeclExpr {
pos := p.expect(token.DECL)
nam := p.parseIdent()
p.openScope()
var list []*ast.Ident
if p.tok == token.LPAREN {
p.next()
list = p.parseParamList()
}
typ := p.parseIdent()
bod := p.tryExprOrList()
p.closeScope()
end := p.expect(token.RPAREN)
decl := &ast.DeclExpr{
Expression: ast.Expression{
Opening: open,
Closing: end,
},
Decl: pos,
Name: nam,
Type: typ,
Params: list,
Body: bod,
}
ob := &ast.Object{
NamePos: nam.NamePos,
Name: nam.Name,
Kind: ast.Decl,
Type: typ,
Value: decl,
}
if old := p.curScope.Insert(ob); old != nil {
p.addError("identifier already exists")
}
return decl
}
func (p *parser) parseExpr() ast.Expr {
var expr ast.Expr
listok := p.listok
pos := p.expect(token.LPAREN)
if p.listok && p.tok == token.LPAREN {
expr = p.parseExprList(pos)
return expr
}
p.listok = false
switch p.tok {
case token.ADD, token.SUB, token.MUL, token.QUO, token.REM,
token.EQL, token.GTE, token.GTT, token.NEQ, token.LST, token.LTE:
expr = p.parseBinaryExpr(pos)
case token.ASSIGN:
expr = p.parseAssignExpr(pos)
case token.DECL:
expr = p.parseDeclExpr(pos)
case token.IDENT:
expr = p.parseCallExpr(pos)
case token.IF:
expr = p.parseIfExpr(pos)
case token.VAR:
expr = p.parseVarExpr(pos)
default:
if listok {
p.addError("Expected expression but got '" + p.lit + "'")
} else {
p.addError("Expected operator, keyword or identifier but got '" + p.lit +
"'")
}
}
return expr
}
func (p *parser) parseExprList(open token.Pos) ast.Expr {
p.listok = false
var list []ast.Expr
for p.tok != token.RPAREN {
list = append(list, p.parseGenExpr())
}
if len(list) < 1 {
p.addError("empty expression list not allowed")
}
end := p.expect(token.RPAREN)
return &ast.ExprList{
Expression: ast.Expression{
Opening: open,
Closing: end,
},
List: list,
}
}
func (p *parser) parseGenExpr() ast.Expr {
var expr ast.Expr
switch p.tok {
case token.LPAREN:
expr = p.parseExpr()
case token.IDENT:
expr = p.parseIdent()
case token.INTEGER:
expr = p.parseBasicLit()
default:
p.addError("Expected '" + token.LPAREN.String() + "' or '" +
token.INTEGER.String() + "' got '" + p.lit + "'")
p.next()
}
p.listok = false
return expr
}
func (p *parser) parseFile() *ast.File {
for p.tok != token.EOF {
p.parseGenExpr()
}
if p.topScope.Size() < 1 {
p.addError("reached end of file without any declarations")
}
return &ast.File{Scope: p.topScope}
}
func (p *parser) parseIdent() *ast.Ident {
name := p.lit
pos := p.expect(token.IDENT)
return &ast.Ident{NamePos: pos, Name: name}
}
func (p *parser) parseIfExpr(open token.Pos) *ast.IfExpr {
pos := p.expect(token.IF)
cond := p.parseGenExpr()
var typ *ast.Ident
if p.tok == token.IDENT {
typ = p.parseIdent()
}
then := p.tryExprOrList()
var els ast.Expr
if p.tok != token.RPAREN {
els = p.tryExprOrList()
}
end := p.expect(token.RPAREN)
return &ast.IfExpr{
Expression: ast.Expression{
Opening: open,
Closing: end,
},
If: pos,
Type: typ,
Cond: cond,
Then: then,
Else: els,
}
}
func (p *parser) parseParamList() []*ast.Ident {
var list []*ast.Ident
count, start := 0, 0
for p.tok != token.RPAREN {
ident := p.parseIdent()
count++
if p.tok == token.COMMA || p.tok == token.RPAREN {
for _, param := range list[start:] {
if param.Object == nil {
param.Object = &ast.Object{
Kind: ast.Var,
Name: param.Name,
}
}
param.Object.Type = ident
}
start = count
continue
}
list = append(list, ident)
}
if len(list) < 1 {
p.addError("empty param list not allowed")
}
p.expect(token.RPAREN)
return list
}
func (p *parser) parseVarExpr(lparen token.Pos) *ast.VarExpr {
var (
typ *ast.Ident
val ast.Expr
)
varpos := p.expect(token.VAR)
nam := p.parseIdent()
// TODO: Needs improvement; maybe a p.tryTypeOrExpression?
if p.tok == token.RPAREN {
p.addError("Expected type, expression or literal, got: )")
}
if p.tok == token.IDENT {
typ = p.parseIdent()
}
if p.tok != token.RPAREN {
val = p.parseGenExpr()
}
if p.tok != token.RPAREN {
typ = p.parseIdent()
}
// TODO: end
rparen := p.expect(token.RPAREN)
ob := &ast.Object{
NamePos: nam.NamePos,
Name: nam.Name,
Kind: ast.Var,
Type: typ,
Value: val,
}
if old := p.curScope.Insert(ob); old != nil {
p.addError("Identifier " + nam.Name + " redeclared; original " +
"declaration at " + p.file.Position(old.NamePos).String())
}
return &ast.VarExpr{
Expression: ast.Expression{Opening: lparen, Closing: rparen},
Var: varpos,
Name: nam,
Object: ob,
}
}
func (p *parser) tryExprOrList() ast.Expr {
p.listok = true
return p.parseGenExpr()
}
add type checking and declaration checking
// Copyright (c) 2014, Rob Thornton
// All rights reserved.
// This source code is governed by a Simplied BSD-License. Please see the
// LICENSE included in this distribution for a copy of the full license
// or, if one is not included, you may also find a copy at
// http://opensource.org/licenses/BSD-2-Clause
package parse
import (
"os"
"github.com/rthornton128/calc/ast"
"github.com/rthornton128/calc/scan"
"github.com/rthornton128/calc/token"
)
func ParseFile(filename, src string) *ast.File {
var p parser
p.init(filename, src)
f := p.parseFile()
if p.errors.Count() > 0 {
p.errors.Print()
return nil
}
return f
}
type parser struct {
file *token.File
errors scan.ErrorList
scanner scan.Scanner
listok bool
curScope *ast.Scope
topScope *ast.Scope
pos token.Pos
tok token.Token
lit string
}
/* Utility */
func (p *parser) addError(args ...interface{}) {
p.errors.Add(p.file.Position(p.pos), args...)
if p.errors.Count() >= 10 {
p.errors.Print()
os.Exit(1)
}
}
func (p *parser) expect(tok token.Token) token.Pos {
pos := p.pos
if p.tok != tok {
p.addError("Expected '" + tok.String() + "' got '" + p.lit + "'")
}
p.next()
return pos
}
func (p *parser) init(fname, src string) {
p.file = token.NewFile(fname, 1, len(src))
p.scanner.Init(p.file, src)
p.listok = false
p.curScope = ast.NewScope(nil)
p.topScope = p.curScope
p.next()
}
func (p *parser) next() {
p.lit, p.tok, p.pos = p.scanner.Scan()
}
func (p *parser) typeOf(n ast.Node) string {
switch t := n.(type) {
case *ast.AssignExpr:
ob := p.curScope.Lookup(t.Name.Name)
return ob.Type.Name
case *ast.BasicLit, *ast.BinaryExpr:
return "int"
case *ast.CallExpr:
ob := p.curScope.Lookup(t.Name.Name)
return ob.Type.Name
case *ast.DeclExpr:
return t.Type.Name
case *ast.ExprList:
return p.typeOf(t.List[len(t.List)-1])
case *ast.Ident:
ob := p.curScope.Lookup(t.Name)
return ob.Type.Name
case *ast.IfExpr:
if t.Type != nil {
return t.Type.Name
}
return p.typeOf(t.Then)
case *ast.VarExpr:
ob := p.curScope.Lookup(t.Name.Name)
return ob.Type.Name
}
panic("unknown node, no return type")
}
/* Scope */
func (p *parser) openScope() {
p.curScope = ast.NewScope(p.curScope)
}
func (p *parser) closeScope() {
p.curScope = p.curScope.Parent
}
/* Parsing */
func (p *parser) parseAssignExpr(open token.Pos) *ast.AssignExpr {
pos := p.expect(token.ASSIGN)
nam := p.parseIdent()
val := p.parseGenExpr()
end := p.expect(token.RPAREN)
ob := p.curScope.Lookup(nam.Name)
if ob == nil {
p.addError("assignment to undeclared variable")
}
if vtype := p.typeOf(val); ob.Type.Name != vtype {
p.addError("assignment to variable with incompatible type: ",
ob.Type.Name, "vs.", vtype)
}
return &ast.AssignExpr{
Expression: ast.Expression{Opening: open, Closing: end},
Equal: pos,
Name: nam,
Value: val,
}
}
func (p *parser) parseBasicLit() *ast.BasicLit {
pos, tok, lit := p.pos, p.tok, p.lit
p.next()
return &ast.BasicLit{LitPos: pos, Kind: tok, Lit: lit}
}
func (p *parser) parseBinaryExpr(open token.Pos) *ast.BinaryExpr {
pos := p.pos
op := p.tok
p.next()
var list []ast.Expr
for p.tok != token.RPAREN && p.tok != token.EOF {
list = append(list, p.parseGenExpr())
}
if len(list) < 2 {
p.addError("binary expression must have at least two operands")
}
end := p.expect(token.RPAREN)
return &ast.BinaryExpr{
Expression: ast.Expression{
Opening: open,
Closing: end,
},
Op: op,
OpPos: pos,
List: list,
}
}
func (p *parser) parseCallExpr(open token.Pos) *ast.CallExpr {
pos := p.pos
nam := p.parseIdent()
var list []ast.Expr
for p.tok != token.RPAREN && p.tok != token.EOF {
list = append(list, p.parseGenExpr())
}
end := p.expect(token.RPAREN)
return &ast.CallExpr{
Expression: ast.Expression{
Opening: open,
Closing: end,
},
Call: pos,
Name: nam,
Args: list,
}
}
func (p *parser) parseDeclExpr(open token.Pos) *ast.DeclExpr {
pos := p.expect(token.DECL)
nam := p.parseIdent()
p.openScope()
var list []*ast.Ident
if p.tok == token.LPAREN {
p.next()
list = p.parseParamList()
}
typ := p.parseIdent()
bod := p.tryExprOrList()
p.closeScope()
end := p.expect(token.RPAREN)
decl := &ast.DeclExpr{
Expression: ast.Expression{
Opening: open,
Closing: end,
},
Decl: pos,
Name: nam,
Type: typ,
Params: list,
Body: bod,
}
ob := &ast.Object{
NamePos: nam.NamePos,
Name: nam.Name,
Kind: ast.Decl,
Type: typ,
Value: decl,
}
if old := p.curScope.Insert(ob); old != nil {
p.addError("redeclaration of function not allowed, originally declared "+
"at: ", p.file.Position(old.NamePos))
}
if btype := p.typeOf(bod); typ.Name != btype {
p.addError("return value type does not match return type:",
typ.Name, " vs. ", btype)
}
return decl
}
func (p *parser) parseExpr() ast.Expr {
var expr ast.Expr
listok := p.listok
pos := p.expect(token.LPAREN)
if p.listok && p.tok == token.LPAREN {
expr = p.parseExprList(pos)
return expr
}
p.listok = false
switch p.tok {
case token.ADD, token.SUB, token.MUL, token.QUO, token.REM,
token.EQL, token.GTE, token.GTT, token.NEQ, token.LST, token.LTE:
expr = p.parseBinaryExpr(pos)
case token.ASSIGN:
expr = p.parseAssignExpr(pos)
case token.DECL:
expr = p.parseDeclExpr(pos)
case token.IDENT:
expr = p.parseCallExpr(pos)
case token.IF:
expr = p.parseIfExpr(pos)
case token.VAR:
expr = p.parseVarExpr(pos)
default:
if listok {
p.addError("Expected expression but got '" + p.lit + "'")
} else {
p.addError("Expected operator, keyword or identifier but got '" + p.lit +
"'")
}
}
return expr
}
func (p *parser) parseExprList(open token.Pos) ast.Expr {
p.listok = false
var list []ast.Expr
for p.tok != token.RPAREN {
list = append(list, p.parseGenExpr())
}
if len(list) < 1 {
p.addError("empty expression list not allowed")
}
end := p.expect(token.RPAREN)
return &ast.ExprList{
Expression: ast.Expression{
Opening: open,
Closing: end,
},
List: list,
}
}
func (p *parser) parseGenExpr() ast.Expr {
var expr ast.Expr
switch p.tok {
case token.LPAREN:
expr = p.parseExpr()
case token.IDENT:
expr = p.parseIdent()
case token.INTEGER:
expr = p.parseBasicLit()
default:
p.addError("Expected '" + token.LPAREN.String() + "' or '" +
token.INTEGER.String() + "' got '" + p.lit + "'")
p.next()
}
p.listok = false
return expr
}
func (p *parser) parseFile() *ast.File {
for p.tok != token.EOF {
p.parseGenExpr()
}
if p.topScope.Size() < 1 {
p.addError("reached end of file without any declarations")
}
return &ast.File{Scope: p.topScope}
}
func (p *parser) parseIdent() *ast.Ident {
name := p.lit
pos := p.expect(token.IDENT)
return &ast.Ident{NamePos: pos, Name: name}
}
func (p *parser) parseIfExpr(open token.Pos) *ast.IfExpr {
pos := p.expect(token.IF)
cond := p.parseGenExpr()
var typ *ast.Ident
if p.tok == token.IDENT {
typ = p.parseIdent()
}
p.openScope()
then := p.tryExprOrList()
p.closeScope()
var els ast.Expr
if p.tok != token.RPAREN {
p.openScope()
els = p.tryExprOrList()
p.closeScope()
}
end := p.expect(token.RPAREN)
if typ != nil {
ttype := p.typeOf(then)
if ttype != typ.Name {
p.addError("return value of then clause does not match if type: ",
typ.Name, " vs. ", ttype)
}
if els != nil {
etype := p.typeOf(els)
if etype != typ.Name {
p.addError("return value of else clause does not match if type: ",
typ.Name, " vs. ", etype)
}
}
}
return &ast.IfExpr{
Expression: ast.Expression{
Opening: open,
Closing: end,
},
If: pos,
Type: typ,
Cond: cond,
Then: then,
Else: els,
}
}
func (p *parser) parseParamList() []*ast.Ident {
var list []*ast.Ident
count, start := 0, 0
for p.tok != token.RPAREN {
ident := p.parseIdent()
count++
if p.tok == token.COMMA || p.tok == token.RPAREN {
for _, param := range list[start:] {
if param.Object == nil {
param.Object = &ast.Object{
Kind: ast.Var,
Name: param.Name,
}
}
param.Object.Type = ident
}
start = count
continue
}
list = append(list, ident)
}
if len(list) < 1 {
p.addError("empty param list not allowed")
}
p.expect(token.RPAREN)
return list
}
func (p *parser) parseVarExpr(lparen token.Pos) *ast.VarExpr {
var (
typ *ast.Ident
val ast.Expr
)
varpos := p.expect(token.VAR)
nam := p.parseIdent()
// TODO: Needs improvement; maybe a p.tryTypeOrExpression?
if p.tok == token.RPAREN {
p.addError("Expected type, expression or literal, got: )")
}
if p.tok == token.IDENT {
typ = p.parseIdent()
}
if p.tok != token.RPAREN {
val = p.parseGenExpr()
}
if p.tok != token.RPAREN {
typ = p.parseIdent()
}
// TODO: end
rparen := p.expect(token.RPAREN)
ob := &ast.Object{
NamePos: nam.NamePos,
Name: nam.Name,
Kind: ast.Var,
Type: typ,
Value: val,
}
if old := p.curScope.Insert(ob); old != nil {
p.addError("redeclaration of variable not allowed; original "+
"declaration at: ", p.file.Position(old.NamePos))
}
return &ast.VarExpr{
Expression: ast.Expression{Opening: lparen, Closing: rparen},
Var: varpos,
Name: nam,
Object: ob,
}
}
func (p *parser) tryExprOrList() ast.Expr {
p.listok = true
return p.parseGenExpr()
}
|
package libxml2
import (
"regexp"
"testing"
"github.com/lestrrat/go-libxml2/parser"
"github.com/stretchr/testify/assert"
)
const stdXMLDecl = `<?xml version="1.0"?>` + "\n"
var (
goodWFNSStrings = []string{
stdXMLDecl + `<foobar xmlns:bar="xml://foo" bar:foo="bar"/>` + "\n",
stdXMLDecl + `<foobar xmlns="xml://foo" foo="bar"><foo/></foobar>` + "\n",
stdXMLDecl + `<bar:foobar xmlns:bar="xml://foo" foo="bar"><foo/></bar:foobar>` + "\n",
stdXMLDecl + `<bar:foobar xmlns:bar="xml://foo" foo="bar"><bar:foo/></bar:foobar>` + "\n",
stdXMLDecl + `<bar:foobar xmlns:bar="xml://foo" bar:foo="bar"><bar:foo/></bar:foobar>` + "\n",
}
goodWFStrings = []string{
`<foobar/>`,
`<foobar></foobar>`,
`<foobar></foobar>`,
`<?xml version="1.0" encoding="UTF-8"?>` + "\n" + `<foobar></foobar>`,
`<?xml version="1.0" encoding="ISO-8859-1"?>` + "\n" + `<foobar></foobar>`,
stdXMLDecl + `<foobar> </foobar>` + "\n",
stdXMLDecl + `<foobar><foo/></foobar> `,
stdXMLDecl + `<foobar> <foo/> </foobar> `,
stdXMLDecl + `<foobar><![CDATA[<>&"\` + "`" + `]]></foobar>`,
stdXMLDecl + `<foobar><>&"'</foobar>`,
stdXMLDecl + `<foobar>  </foobar>`,
stdXMLDecl + `<!--comment--><foobar>foo</foobar>`,
stdXMLDecl + `<foobar>foo</foobar><!--comment-->`,
stdXMLDecl + `<foobar>foo<!----></foobar>`,
stdXMLDecl + `<foobar foo="bar"/>`,
stdXMLDecl + `<foobar foo="\` + "`" + `bar>"/>`,
}
goodWFDTDStrings = []string{
stdXMLDecl + `<!DOCTYPE foobar [` + "\n" + `<!ENTITY foo " test ">` + "\n" + `]>` + "\n" + `<foobar>&foo;</foobar>`,
stdXMLDecl + `<!DOCTYPE foobar [<!ENTITY foo "bar">]><foobar>&foo;</foobar>`,
stdXMLDecl + `<!DOCTYPE foobar [<!ENTITY foo "bar">]><foobar>&foo;></foobar>`,
stdXMLDecl + `<!DOCTYPE foobar [<!ENTITY foo "bar="foo"">]><foobar>&foo;></foobar>`,
stdXMLDecl + `<!DOCTYPE foobar [<!ENTITY foo "bar">]><foobar>&foo;></foobar>`,
stdXMLDecl + `<!DOCTYPE foobar [<!ENTITY foo "bar">]><foobar foo="&foo;"/>`,
stdXMLDecl + `<!DOCTYPE foobar [<!ENTITY foo "bar">]><foobar foo=">&foo;"/>`,
}
badWFStrings = []string{
"", // totally empty document
stdXMLDecl, // only XML Declaration
"<!--ouch-->", // comment only is like an empty document
`<!DOCTYPE ouch [<!ENTITY foo "bar">]>`, // no good either ...
"<ouch>", // single tag (tag mismatch)
"<ouch/>foo", // trailing junk
"foo<ouch/>", // leading junk
"<ouch foo=bar/>", // bad attribute
`<ouch foo="bar/>`, // bad attribute
"<ouch>&</ouch>", // bad char
`<ouch>&//0x20;</ouch>`, // bad chart
"<foob<e4>r/>", // bad encoding
"<ouch>&foo;</ouch>", // undefind entity
"<ouch>></ouch>", // unterminated entity
stdXMLDecl + `<!DOCTYPE foobar [<!ENTITY foo "bar">]><foobar &foo;="ouch"/>`, // bad placed entity
stdXMLDecl + `<!DOCTYPE foobar [<!ENTITY foo "bar="foo"">]><foobar &foo;/>`, // even worse
"<ouch><!---></ouch>", // bad comment
"<ouch><!-----></ouch>", // bad either... (is this conform with the spec????)
}
)
func parseShouldSucceed(t *testing.T, opts parser.Option, inputs []string) {
t.Logf("Test parsing with parser %v", opts)
for _, s := range inputs {
d, err := ParseString(s, opts)
if !assert.NoError(t, err, "Parse should succeed") {
return
}
d.Free()
}
}
func parseShouldFail(t *testing.T, opts parser.Option, inputs []string) {
for _, s := range inputs {
d, err := ParseString(s, opts)
if err == nil {
d.Free()
t.Errorf("Expected failure to parse '%s'", s)
}
}
}
type ParseOptionToString struct {
v parser.Option
e string
}
func TestParseOptionStringer(t *testing.T) {
values := []ParseOptionToString{
ParseOptionToString{
v: parser.XMLParseRecover,
e: "Recover",
},
ParseOptionToString{
v: parser.XMLParseNoEnt,
e: "NoEnt",
},
ParseOptionToString{
v: parser.XMLParseDTDLoad,
e: "DTDLoad",
},
ParseOptionToString{
v: parser.XMLParseDTDAttr,
e: "DTDAttr",
},
ParseOptionToString{
v: parser.XMLParseDTDValid,
e: "DTDValid",
},
ParseOptionToString{
v: parser.XMLParseNoError,
e: "NoError",
},
ParseOptionToString{
v: parser.XMLParseNoWarning,
e: "NoWarning",
},
ParseOptionToString{
v: parser.XMLParsePedantic,
e: "Pedantic",
},
ParseOptionToString{
v: parser.XMLParseNoBlanks,
e: "NoBlanks",
},
ParseOptionToString{
v: parser.XMLParseSAX1,
e: "SAX1",
},
ParseOptionToString{
v: parser.XMLParseXInclude,
e: "XInclude",
},
ParseOptionToString{
v: parser.XMLParseNoNet,
e: "NoNet",
},
ParseOptionToString{
v: parser.XMLParseNoDict,
e: "NoDict",
},
ParseOptionToString{
v: parser.XMLParseNsclean,
e: "Nsclean",
},
ParseOptionToString{
v: parser.XMLParseNoCDATA,
e: "NoCDATA",
},
ParseOptionToString{
v: parser.XMLParseNoXIncNode,
e: "NoXIncNode",
},
ParseOptionToString{
v: parser.XMLParseCompact,
e: "Compact",
},
ParseOptionToString{
v: parser.XMLParseOld10,
e: "Old10",
},
ParseOptionToString{
v: parser.XMLParseNoBaseFix,
e: "NoBaseFix",
},
ParseOptionToString{
v: parser.XMLParseHuge,
e: "Huge",
},
ParseOptionToString{
v: parser.XMLParseOldSAX,
e: "OldSAX",
},
ParseOptionToString{
v: parser.XMLParseIgnoreEnc,
e: "IgnoreEnc",
},
ParseOptionToString{
v: parser.XMLParseBigLines,
e: "BigLines",
},
}
for _, d := range values {
if d.v.String() != "["+d.e+"]" {
t.Errorf("e '%s', got '%s'", d.e, d.v.String())
}
}
}
func TestParseEmpty(t *testing.T) {
doc, err := ParseString(``)
if err == nil {
t.Errorf("Parse of empty string should fail")
defer doc.Free()
}
}
func TestParse(t *testing.T) {
inputs := [][]string{
goodWFStrings,
goodWFNSStrings,
goodWFDTDStrings,
}
for _, input := range inputs {
parseShouldSucceed(t, 0, input)
}
}
func TestParseBad(t *testing.T) {
inputs := [][]string{
badWFStrings,
}
for _, input := range inputs {
parseShouldFail(t, 0, input)
}
}
func TestParseNoBlanks(t *testing.T) {
inputs := [][]string{
goodWFStrings,
goodWFNSStrings,
goodWFDTDStrings,
}
for _, input := range inputs {
parseShouldSucceed(t, parser.XMLParseNoBlanks, input)
}
}
func TestRoundtripNoBlanks(t *testing.T) {
doc, err := ParseString(`<a> <b/> </a>`, parser.XMLParseNoBlanks)
if err != nil {
t.Errorf("failed to parse string: %s", err)
return
}
if !assert.Regexp(t, regexp.MustCompile(`<a><b/></a>`), doc.Dump(false), "stringified xml should have no blanks") {
return
}
}
supress errors
package libxml2
import (
"regexp"
"testing"
"github.com/lestrrat/go-libxml2/parser"
"github.com/stretchr/testify/assert"
)
const stdXMLDecl = `<?xml version="1.0"?>` + "\n"
var (
goodWFNSStrings = []string{
stdXMLDecl + `<foobar xmlns:bar="xml://foo" bar:foo="bar"/>` + "\n",
stdXMLDecl + `<foobar xmlns="xml://foo" foo="bar"><foo/></foobar>` + "\n",
stdXMLDecl + `<bar:foobar xmlns:bar="xml://foo" foo="bar"><foo/></bar:foobar>` + "\n",
stdXMLDecl + `<bar:foobar xmlns:bar="xml://foo" foo="bar"><bar:foo/></bar:foobar>` + "\n",
stdXMLDecl + `<bar:foobar xmlns:bar="xml://foo" bar:foo="bar"><bar:foo/></bar:foobar>` + "\n",
}
goodWFStrings = []string{
`<foobar/>`,
`<foobar></foobar>`,
`<foobar></foobar>`,
`<?xml version="1.0" encoding="UTF-8"?>` + "\n" + `<foobar></foobar>`,
`<?xml version="1.0" encoding="ISO-8859-1"?>` + "\n" + `<foobar></foobar>`,
stdXMLDecl + `<foobar> </foobar>` + "\n",
stdXMLDecl + `<foobar><foo/></foobar> `,
stdXMLDecl + `<foobar> <foo/> </foobar> `,
stdXMLDecl + `<foobar><![CDATA[<>&"\` + "`" + `]]></foobar>`,
stdXMLDecl + `<foobar><>&"'</foobar>`,
stdXMLDecl + `<foobar>  </foobar>`,
stdXMLDecl + `<!--comment--><foobar>foo</foobar>`,
stdXMLDecl + `<foobar>foo</foobar><!--comment-->`,
stdXMLDecl + `<foobar>foo<!----></foobar>`,
stdXMLDecl + `<foobar foo="bar"/>`,
stdXMLDecl + `<foobar foo="\` + "`" + `bar>"/>`,
}
goodWFDTDStrings = []string{
stdXMLDecl + `<!DOCTYPE foobar [` + "\n" + `<!ENTITY foo " test ">` + "\n" + `]>` + "\n" + `<foobar>&foo;</foobar>`,
stdXMLDecl + `<!DOCTYPE foobar [<!ENTITY foo "bar">]><foobar>&foo;</foobar>`,
stdXMLDecl + `<!DOCTYPE foobar [<!ENTITY foo "bar">]><foobar>&foo;></foobar>`,
stdXMLDecl + `<!DOCTYPE foobar [<!ENTITY foo "bar="foo"">]><foobar>&foo;></foobar>`,
stdXMLDecl + `<!DOCTYPE foobar [<!ENTITY foo "bar">]><foobar>&foo;></foobar>`,
stdXMLDecl + `<!DOCTYPE foobar [<!ENTITY foo "bar">]><foobar foo="&foo;"/>`,
stdXMLDecl + `<!DOCTYPE foobar [<!ENTITY foo "bar">]><foobar foo=">&foo;"/>`,
}
badWFStrings = []string{
"", // totally empty document
stdXMLDecl, // only XML Declaration
"<!--ouch-->", // comment only is like an empty document
`<!DOCTYPE ouch [<!ENTITY foo "bar">]>`, // no good either ...
"<ouch>", // single tag (tag mismatch)
"<ouch/>foo", // trailing junk
"foo<ouch/>", // leading junk
"<ouch foo=bar/>", // bad attribute
`<ouch foo="bar/>`, // bad attribute
"<ouch>&</ouch>", // bad char
`<ouch>&//0x20;</ouch>`, // bad chart
"<foob<e4>r/>", // bad encoding
"<ouch>&foo;</ouch>", // undefind entity
"<ouch>></ouch>", // unterminated entity
stdXMLDecl + `<!DOCTYPE foobar [<!ENTITY foo "bar">]><foobar &foo;="ouch"/>`, // bad placed entity
stdXMLDecl + `<!DOCTYPE foobar [<!ENTITY foo "bar="foo"">]><foobar &foo;/>`, // even worse
"<ouch><!---></ouch>", // bad comment
"<ouch><!-----></ouch>", // bad either... (is this conform with the spec????)
}
)
func parseShouldSucceed(t *testing.T, opts parser.Option, inputs []string) {
t.Logf("Test parsing with parser %v", opts)
for _, s := range inputs {
d, err := ParseString(s, opts)
if !assert.NoError(t, err, "Parse should succeed") {
return
}
d.Free()
}
}
func parseShouldFail(t *testing.T, opts parser.Option, inputs []string) {
for _, s := range inputs {
d, err := ParseString(s, opts)
if err == nil {
d.Free()
t.Errorf("Expected failure to parse '%s'", s)
}
}
}
type ParseOptionToString struct {
v parser.Option
e string
}
func TestParseOptionStringer(t *testing.T) {
values := []ParseOptionToString{
ParseOptionToString{
v: parser.XMLParseRecover,
e: "Recover",
},
ParseOptionToString{
v: parser.XMLParseNoEnt,
e: "NoEnt",
},
ParseOptionToString{
v: parser.XMLParseDTDLoad,
e: "DTDLoad",
},
ParseOptionToString{
v: parser.XMLParseDTDAttr,
e: "DTDAttr",
},
ParseOptionToString{
v: parser.XMLParseDTDValid,
e: "DTDValid",
},
ParseOptionToString{
v: parser.XMLParseNoError,
e: "NoError",
},
ParseOptionToString{
v: parser.XMLParseNoWarning,
e: "NoWarning",
},
ParseOptionToString{
v: parser.XMLParsePedantic,
e: "Pedantic",
},
ParseOptionToString{
v: parser.XMLParseNoBlanks,
e: "NoBlanks",
},
ParseOptionToString{
v: parser.XMLParseSAX1,
e: "SAX1",
},
ParseOptionToString{
v: parser.XMLParseXInclude,
e: "XInclude",
},
ParseOptionToString{
v: parser.XMLParseNoNet,
e: "NoNet",
},
ParseOptionToString{
v: parser.XMLParseNoDict,
e: "NoDict",
},
ParseOptionToString{
v: parser.XMLParseNsclean,
e: "Nsclean",
},
ParseOptionToString{
v: parser.XMLParseNoCDATA,
e: "NoCDATA",
},
ParseOptionToString{
v: parser.XMLParseNoXIncNode,
e: "NoXIncNode",
},
ParseOptionToString{
v: parser.XMLParseCompact,
e: "Compact",
},
ParseOptionToString{
v: parser.XMLParseOld10,
e: "Old10",
},
ParseOptionToString{
v: parser.XMLParseNoBaseFix,
e: "NoBaseFix",
},
ParseOptionToString{
v: parser.XMLParseHuge,
e: "Huge",
},
ParseOptionToString{
v: parser.XMLParseOldSAX,
e: "OldSAX",
},
ParseOptionToString{
v: parser.XMLParseIgnoreEnc,
e: "IgnoreEnc",
},
ParseOptionToString{
v: parser.XMLParseBigLines,
e: "BigLines",
},
}
for _, d := range values {
if d.v.String() != "["+d.e+"]" {
t.Errorf("e '%s', got '%s'", d.e, d.v.String())
}
}
}
func TestParseEmpty(t *testing.T) {
doc, err := ParseString(``)
if err == nil {
t.Errorf("Parse of empty string should fail")
defer doc.Free()
}
}
func TestParse(t *testing.T) {
inputs := [][]string{
goodWFStrings,
goodWFNSStrings,
goodWFDTDStrings,
}
for _, input := range inputs {
parseShouldSucceed(t, 0, input)
}
}
func TestParseBad(t *testing.T) {
ReportErrors(false)
defer ReportErrors(true)
inputs := [][]string{
badWFStrings,
}
for _, input := range inputs {
parseShouldFail(t, 0, input)
}
}
func TestParseNoBlanks(t *testing.T) {
inputs := [][]string{
goodWFStrings,
goodWFNSStrings,
goodWFDTDStrings,
}
for _, input := range inputs {
parseShouldSucceed(t, parser.XMLParseNoBlanks, input)
}
}
func TestRoundtripNoBlanks(t *testing.T) {
doc, err := ParseString(`<a> <b/> </a>`, parser.XMLParseNoBlanks)
if err != nil {
t.Errorf("failed to parse string: %s", err)
return
}
if !assert.Regexp(t, regexp.MustCompile(`<a><b/></a>`), doc.Dump(false), "stringified xml should have no blanks") {
return
}
}
|
// Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"fmt"
"net"
"sync"
"time"
"github.com/keybase/backoff"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/go-framed-msgpack-rpc/rpc"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/kbfsmd"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
const (
// MdServerTokenServer is the expected server type for mdserver authentication.
MdServerTokenServer = "kbfs_md"
// MdServerTokenExpireIn is the TTL to use when constructing an authentication token.
MdServerTokenExpireIn = 2 * 60 * 60 // 2 hours
// MdServerBackgroundRekeyPeriod is how long the rekey checker
// waits between runs on average. The timer gets reset after
// every incoming FolderNeedsRekey RPC.
// The amount of wait is calculated in nextRekeyTime.
MdServerBackgroundRekeyPeriod = 1 * time.Hour
// MdServerDefaultPingIntervalSeconds is the default interval on which the
// client should contact the MD Server
MdServerDefaultPingIntervalSeconds = 10
// MdServerPingTimeout is how long to wait for a ping response
// before breaking the connection and trying to reconnect.
MdServerPingTimeout = 30 * time.Second
)
// MDServerRemote is an implementation of the MDServer interface.
type MDServerRemote struct {
config Config
log traceLogger
deferLog traceLogger
mdSrvAddr string
connOpts rpc.ConnectionOpts
rpcLogFactory *libkb.RPCLogFactory
authToken *kbfscrypto.AuthToken
squelchRekey bool
pinger pinger
authenticatedMtx sync.RWMutex
isAuthenticated bool
connMu sync.RWMutex
conn *rpc.Connection
client keybase1.MetadataClient
observerMu sync.Mutex // protects observers
// chan is nil if we have unregistered locally, but not yet with
// the server.
observers map[tlf.ID]chan<- error
tickerCancel context.CancelFunc
tickerMu sync.Mutex // protects the ticker cancel function
rekeyCancel context.CancelFunc
rekeyTimer *time.Timer
serverOffsetMu sync.RWMutex
serverOffsetKnown bool
serverOffset time.Duration
}
// Test that MDServerRemote fully implements the MDServer interface.
var _ MDServer = (*MDServerRemote)(nil)
// Test that MDServerRemote fully implements the KeyServer interface.
var _ KeyServer = (*MDServerRemote)(nil)
// Test that MDServerRemote fully implements the AuthTokenRefreshHandler interface.
var _ kbfscrypto.AuthTokenRefreshHandler = (*MDServerRemote)(nil)
// Test that MDServerRemote fully implements the ConnectionHandler interface.
var _ rpc.ConnectionHandler = (*MDServerRemote)(nil)
// NewMDServerRemote returns a new instance of MDServerRemote.
func NewMDServerRemote(config Config, srvAddr string,
rpcLogFactory *libkb.RPCLogFactory) *MDServerRemote {
log := config.MakeLogger("")
deferLog := log.CloneWithAddedDepth(1)
mdServer := &MDServerRemote{
config: config,
observers: make(map[tlf.ID]chan<- error),
log: traceLogger{log},
deferLog: traceLogger{deferLog},
mdSrvAddr: srvAddr,
rpcLogFactory: rpcLogFactory,
rekeyTimer: time.NewTimer(nextRekeyTime()),
}
mdServer.pinger = pinger{
name: "MDServerRemote",
doPing: mdServer.pingOnce,
timeout: MdServerPingTimeout,
log: mdServer.log,
}
mdServer.authToken = kbfscrypto.NewAuthToken(config.Crypto(),
MdServerTokenServer, MdServerTokenExpireIn,
"libkbfs_mdserver_remote", VersionString(), mdServer)
constBackoff := backoff.NewConstantBackOff(RPCReconnectInterval)
mdServer.connOpts = rpc.ConnectionOpts{
WrapErrorFunc: libkb.WrapError,
TagsFunc: libkb.LogTagsFromContext,
ReconnectBackoff: func() backoff.BackOff { return constBackoff },
InitialReconnectBackoffWindow: mdserverReconnectBackoffWindow,
}
mdServer.initNewConnection()
// Check for rekey opportunities periodically.
rekeyCtx, rekeyCancel := context.WithCancel(context.Background())
mdServer.rekeyCancel = rekeyCancel
go mdServer.backgroundRekeyChecker(rekeyCtx)
return mdServer
}
func (md *MDServerRemote) getIsAuthenticated() bool {
md.authenticatedMtx.RLock()
defer md.authenticatedMtx.RUnlock()
return md.isAuthenticated
}
func (md *MDServerRemote) setIsAuthenticated(isAuthenticated bool) {
md.authenticatedMtx.Lock()
defer md.authenticatedMtx.Unlock()
md.isAuthenticated = isAuthenticated
}
func (md *MDServerRemote) initNewConnection() {
md.connMu.Lock()
defer md.connMu.Unlock()
if md.conn != nil {
md.conn.Shutdown()
}
md.conn = rpc.NewTLSConnection(
md.mdSrvAddr, kbfscrypto.GetRootCerts(md.mdSrvAddr),
kbfsmd.ServerErrorUnwrapper{}, md, md.rpcLogFactory,
md.config.MakeLogger(""), md.connOpts)
md.client = keybase1.MetadataClient{Cli: md.conn.GetClient()}
}
// RemoteAddress returns the remote mdserver this client is talking to
func (md *MDServerRemote) RemoteAddress() string {
return md.mdSrvAddr
}
// HandlerName implements the ConnectionHandler interface.
func (*MDServerRemote) HandlerName() string {
return "MDServerRemote"
}
// OnConnect implements the ConnectionHandler interface.
func (md *MDServerRemote) OnConnect(ctx context.Context,
conn *rpc.Connection, client rpc.GenericClient,
server *rpc.Server) (err error) {
defer func() {
if err == nil {
md.config.Reporter().Notify(ctx,
connectionNotification(connectionStatusConnected))
}
}()
md.log.CDebugf(ctx, "OnConnect called with a new connection")
// we'll get replies asynchronously as to not block the connection
// for doing other active work for the user. they will be sent to
// the FolderNeedsRekey handler.
if err := server.Register(keybase1.MetadataUpdateProtocol(md)); err != nil {
if _, ok := err.(rpc.AlreadyRegisteredError); !ok {
return err
}
}
// reset auth -- using md.client here would cause problematic recursion.
c := keybase1.MetadataClient{Cli: client}
pingIntervalSeconds, err := md.resetAuth(ctx, c)
switch err.(type) {
case nil:
case NoCurrentSessionError:
md.log.CDebugf(ctx, "Logged-out user")
default:
return err
}
md.config.KBFSOps().PushConnectionStatusChange(MDServiceName, nil)
// start pinging
md.pinger.resetTicker(pingIntervalSeconds)
return nil
}
type ctxMDServerResetKeyType int
const (
// ctxMDServerResetKey identifies whether the current context has
// already passed through `MDServerRemote.resetAuth`.
ctxMDServerResetKey ctxMDServerResetKeyType = iota
)
// resetAuth is called to reset the authorization on an MDServer
// connection. If this function returns NoCurrentSessionError, the
// caller should treat this as a logged-out user.
func (md *MDServerRemote) resetAuth(
ctx context.Context, c keybase1.MetadataClient) (int, error) {
ctx = context.WithValue(ctx, ctxMDServerResetKey, "1")
isAuthenticated := false
defer func() {
md.setIsAuthenticated(isAuthenticated)
}()
session, err := md.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
md.log.CDebugf(ctx,
"Error getting current session (%+v), skipping resetAuth", err)
return MdServerDefaultPingIntervalSeconds, err
}
challenge, err := c.GetChallenge(ctx)
if err != nil {
md.log.CWarningf(ctx, "challenge request error: %v", err)
return 0, err
}
md.log.CDebugf(ctx, "received challenge")
// get a new signature
signature, err := md.authToken.Sign(ctx, session.Name, session.UID,
session.VerifyingKey, challenge)
if err != nil {
md.log.CWarningf(ctx, "error signing authentication token: %v", err)
return 0, err
}
md.log.CDebugf(ctx, "authentication token signed")
// authenticate
pingIntervalSeconds, err := c.Authenticate(ctx, signature)
if err != nil {
md.log.CWarningf(ctx, "authentication error: %v", err)
return 0, err
}
md.log.CDebugf(ctx, "authentication successful; ping interval: %ds",
pingIntervalSeconds)
isAuthenticated = true
md.authenticatedMtx.Lock()
if !md.isAuthenticated {
defer func() {
// request a list of folders needing rekey action
if err := md.getFoldersForRekey(ctx, c); err != nil {
md.log.CWarningf(ctx, "getFoldersForRekey failed with %v", err)
}
md.deferLog.CDebugf(ctx,
"requested list of folders for rekey")
}()
}
// Need to ensure that any conflicting thread gets the updated value
md.isAuthenticated = true
md.authenticatedMtx.Unlock()
return pingIntervalSeconds, nil
}
func (md *MDServerRemote) getClient() keybase1.MetadataClient {
md.connMu.RLock()
defer md.connMu.RUnlock()
return md.client
}
// RefreshAuthToken implements the AuthTokenRefreshHandler interface.
func (md *MDServerRemote) RefreshAuthToken(ctx context.Context) {
md.log.CDebugf(ctx, "MDServerRemote: Refreshing auth token...")
if v := ctx.Value(ctxMDServerResetKey); v != nil {
md.log.CDebugf(ctx, "Avoiding resetAuth recursion")
return
}
_, err := md.resetAuth(ctx, md.getClient())
switch err.(type) {
case nil:
md.log.CDebugf(ctx, "MDServerRemote: auth token refreshed")
case NoCurrentSessionError:
md.log.CDebugf(ctx,
"MDServerRemote: no session available, connection remains anonymous")
default:
md.log.CDebugf(ctx,
"MDServerRemote: error refreshing auth token: %v", err)
// TODO: once KBFS-1982 is merged, an unknown error here
// should just cause a complete disconnect, and we can let the
// rpc connection do the retry.
}
}
func (md *MDServerRemote) pingOnce(ctx context.Context) {
clock := md.config.Clock()
beforePing := clock.Now()
resp, err := md.getClient().Ping2(ctx)
if err == context.DeadlineExceeded {
if md.getIsAuthenticated() {
md.log.CDebugf(ctx, "Ping timeout -- reinitializing connection")
md.initNewConnection()
} else {
md.log.CDebugf(ctx, "Ping timeout but not reinitializing")
}
return
} else if err != nil {
md.log.CDebugf(ctx, "MDServerRemote: ping error %s", err)
return
}
afterPing := clock.Now()
pingLatency := afterPing.Sub(beforePing)
if md.serverOffset > 0 && pingLatency > 5*time.Second {
md.log.CDebugf(ctx, "Ignoring large ping time: %s",
pingLatency)
return
}
serverTimeNow :=
keybase1.FromTime(resp.Timestamp).Add(pingLatency / 2)
func() {
md.serverOffsetMu.Lock()
defer md.serverOffsetMu.Unlock()
// Estimate the server offset, assuming a balanced
// round trip latency (and 0 server processing
// latency). Calculate it so that it can be added
// to a server timestamp in order to get the local
// time of a server-timestamped event.
md.serverOffset = afterPing.Sub(serverTimeNow)
md.serverOffsetKnown = true
}()
}
// OnConnectError implements the ConnectionHandler interface.
func (md *MDServerRemote) OnConnectError(err error, wait time.Duration) {
md.log.CWarningf(context.TODO(),
"MDServerRemote: connection error: %q; retrying in %s", err, wait)
// TODO: it might make sense to show something to the user if this is
// due to authentication, for example.
md.cancelObservers()
md.pinger.cancelTicker()
if md.authToken != nil {
md.authToken.Shutdown()
}
md.config.KBFSOps().PushConnectionStatusChange(MDServiceName, err)
}
// OnDoCommandError implements the ConnectionHandler interface.
func (md *MDServerRemote) OnDoCommandError(err error, wait time.Duration) {
md.log.CWarningf(context.TODO(),
"MDServerRemote: DoCommand error: %q; retrying in %s", err, wait)
// Only push errors that should not be retried as connection status changes.
if !md.ShouldRetry("", err) {
md.config.KBFSOps().PushConnectionStatusChange(MDServiceName, err)
}
}
// OnDisconnected implements the ConnectionHandler interface.
func (md *MDServerRemote) OnDisconnected(ctx context.Context,
status rpc.DisconnectStatus) {
if status == rpc.StartingNonFirstConnection {
md.log.CWarningf(ctx, "MDServerRemote is disconnected")
md.config.Reporter().Notify(ctx,
connectionNotification(connectionStatusDisconnected))
}
func() {
md.serverOffsetMu.Lock()
defer md.serverOffsetMu.Unlock()
md.serverOffsetKnown = false
md.serverOffset = 0
}()
md.setIsAuthenticated(false)
md.cancelObservers()
md.pinger.cancelTicker()
if md.authToken != nil {
md.authToken.Shutdown()
}
md.config.RekeyQueue().Shutdown()
md.config.SetRekeyQueue(NewRekeyQueueStandard(md.config))
// Reset the timer since we will get folders for rekey again on
// the re-connect.
md.resetRekeyTimer()
if status == rpc.StartingNonFirstConnection {
md.config.KBFSOps().PushConnectionStatusChange(MDServiceName, errDisconnected{})
}
}
// ShouldRetry implements the ConnectionHandler interface.
func (md *MDServerRemote) ShouldRetry(name string, err error) bool {
_, shouldThrottle := err.(kbfsmd.ServerErrorThrottle)
return shouldThrottle
}
// ShouldRetryOnConnect implements the ConnectionHandler interface.
func (md *MDServerRemote) ShouldRetryOnConnect(err error) bool {
_, inputCanceled := err.(libkb.InputCanceledError)
return !inputCanceled
}
// CheckReachability implements the MDServer interface.
func (md *MDServerRemote) CheckReachability(ctx context.Context) {
conn, err := net.DialTimeout("tcp", md.mdSrvAddr, MdServerPingTimeout)
if err != nil {
if md.getIsAuthenticated() {
md.log.CDebugf(ctx, "MDServerRemote: CheckReachability(): "+
"failed to connect, reconnecting: %s", err.Error())
md.initNewConnection()
} else {
md.log.CDebugf(ctx, "MDServerRemote: CheckReachability(): "+
"failed to connect (%s), but not reconnecting", err.Error())
}
}
if conn != nil {
conn.Close()
}
}
// Signal errors and clear any registered observers.
func (md *MDServerRemote) cancelObservers() {
md.observerMu.Lock()
defer md.observerMu.Unlock()
// fire errors for any registered observers
for id, observerChan := range md.observers {
md.signalObserverLocked(observerChan, id, MDServerDisconnected{})
}
}
// CancelRegistration implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) CancelRegistration(ctx context.Context, id tlf.ID) {
md.observerMu.Lock()
defer md.observerMu.Unlock()
observerChan, ok := md.observers[id]
if !ok {
// not registered
return
}
// signal that we've seen the update
md.signalObserverLocked(
observerChan, id, errors.New("Registration canceled"))
// Setting nil here indicates that the remote MD server thinks
// we're still registered, though locally no one is listening.
md.observers[id] = nil
}
// Signal an observer. The observer lock must be held.
func (md *MDServerRemote) signalObserverLocked(observerChan chan<- error, id tlf.ID, err error) {
if observerChan != nil {
observerChan <- err
close(observerChan)
}
delete(md.observers, id)
}
// Helper used to retrieve metadata blocks from the MD server.
func (md *MDServerRemote) get(ctx context.Context, arg keybase1.GetMetadataArg) (
tlfID tlf.ID, rmdses []*RootMetadataSigned, err error) {
// request
response, err := md.getClient().GetMetadata(ctx, arg)
if err != nil {
return tlf.ID{}, nil, err
}
// response
tlfID, err = tlf.ParseID(response.FolderID)
if err != nil {
return tlf.ID{}, nil, err
}
// deserialize blocks
rmdses = make([]*RootMetadataSigned, len(response.MdBlocks))
for i, block := range response.MdBlocks {
ver, max := MetadataVer(block.Version), md.config.MetadataVersion()
rmds, err := DecodeRootMetadataSigned(
md.config.Codec(), tlfID, ver, max, block.Block,
keybase1.FromTime(block.Timestamp))
if err != nil {
return tlf.ID{}, nil, err
}
rmdses[i] = rmds
}
return tlfID, rmdses, nil
}
// GetForHandle implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) GetForHandle(ctx context.Context,
handle tlf.Handle, mStatus MergeStatus) (
tlfID tlf.ID, rmds *RootMetadataSigned, err error) {
ctx = rpc.WithFireNow(ctx)
// TODO: Ideally, *tlf.Handle would have a nicer String() function.
md.log.LazyTrace(ctx, "MDServer: GetForHandle %+v %s", handle, mStatus)
defer func() {
md.deferLog.LazyTrace(ctx, "MDServer: GetForHandle %+v %s done (err=%v)", handle, mStatus, err)
}()
encodedHandle, err := md.config.Codec().Encode(handle)
if err != nil {
return tlf.ID{}, nil, err
}
// BranchID needs to be present when Unmerged is true;
// NullBranchID signals that the folder's current branch ID
// should be looked up.
arg := keybase1.GetMetadataArg{
FolderHandle: encodedHandle,
BranchID: NullBranchID.String(),
Unmerged: mStatus == Unmerged,
}
id, rmdses, err := md.get(ctx, arg)
if err != nil {
return tlf.ID{}, nil, err
}
if len(rmdses) == 0 {
return id, nil, nil
}
// TODO: Error if server returns more than one rmds.
return id, rmdses[0], nil
}
// GetForTLF implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) GetForTLF(ctx context.Context, id tlf.ID,
bid BranchID, mStatus MergeStatus) (rmds *RootMetadataSigned, err error) {
ctx = rpc.WithFireNow(ctx)
md.log.LazyTrace(ctx, "MDServer: GetForTLF %s %s %s", id, bid, mStatus)
defer func() {
md.deferLog.LazyTrace(ctx, "MDServer: GetForTLF %s %s %s done (err=%v)", id, bid, mStatus, err)
}()
arg := keybase1.GetMetadataArg{
FolderID: id.String(),
BranchID: bid.String(),
Unmerged: mStatus == Unmerged,
}
_, rmdses, err := md.get(ctx, arg)
if err != nil {
return nil, err
}
if len(rmdses) == 0 {
return nil, nil
}
// TODO: Error if server returns more than one rmds.
return rmdses[0], nil
}
// GetRange implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) GetRange(ctx context.Context, id tlf.ID,
bid BranchID, mStatus MergeStatus, start, stop kbfsmd.Revision) (
rmdses []*RootMetadataSigned, err error) {
ctx = rpc.WithFireNow(ctx)
md.log.LazyTrace(ctx, "MDServer: GetRange %s %s %s %d-%d", id, bid, mStatus, start, stop)
defer func() {
md.deferLog.LazyTrace(ctx, "MDServer: GetRange %s %s %s %d-%d done (err=%v)", id, bid, mStatus, start, stop, err)
}()
arg := keybase1.GetMetadataArg{
FolderID: id.String(),
BranchID: bid.String(),
Unmerged: mStatus == Unmerged,
StartRevision: start.Number(),
StopRevision: stop.Number(),
}
_, rmds, err := md.get(ctx, arg)
return rmds, err
}
// Put implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) Put(ctx context.Context, rmds *RootMetadataSigned,
extra ExtraMetadata) (err error) {
ctx = rpc.WithFireNow(ctx)
md.log.LazyTrace(ctx, "MDServer: Put %s %d", rmds.MD.TlfID(), rmds.MD.RevisionNumber())
defer func() {
md.deferLog.LazyTrace(ctx, "MDServer: Put %s %d done (err=%v)", rmds.MD.TlfID(), rmds.MD.RevisionNumber(), err)
}()
// encode MD block
rmdsBytes, err := EncodeRootMetadataSigned(md.config.Codec(), rmds)
if err != nil {
return err
}
// put request
arg := keybase1.PutMetadataArg{
MdBlock: keybase1.MDBlock{
Version: int(rmds.Version()),
Block: rmdsBytes,
},
LogTags: nil,
}
if rmds.Version() < SegregatedKeyBundlesVer {
if extra != nil {
return fmt.Errorf("Unexpected non-nil extra: %+v", extra)
}
} else if extra != nil {
// For now, if we have a non-nil extra, it must be
// *ExtraMetadataV3, but in the future it might be
// some other type (e.g., *ExtraMetadataV4).
extraV3, ok := extra.(*ExtraMetadataV3)
if !ok {
return fmt.Errorf("Extra of unexpected type %T", extra)
}
// Add any new key bundles.
if extraV3.wkbNew {
wkbBytes, err := md.config.Codec().Encode(extraV3.wkb)
if err != nil {
return err
}
arg.WriterKeyBundle = keybase1.KeyBundle{
Version: int(rmds.Version()),
Bundle: wkbBytes,
}
}
if extraV3.rkbNew {
rkbBytes, err := md.config.Codec().Encode(extraV3.rkb)
if err != nil {
return err
}
arg.ReaderKeyBundle = keybase1.KeyBundle{
Version: int(rmds.Version()),
Bundle: rkbBytes,
}
}
}
return md.getClient().PutMetadata(ctx, arg)
}
// PruneBranch implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) PruneBranch(
ctx context.Context, id tlf.ID, bid BranchID) (err error) {
ctx = rpc.WithFireNow(ctx)
md.log.LazyTrace(ctx, "MDServer: PruneBranch %s %s", id, bid)
defer func() {
md.deferLog.LazyTrace(ctx, "MDServer: PruneBranch %s %s (err=%v)", id, bid, err)
}()
arg := keybase1.PruneBranchArg{
FolderID: id.String(),
BranchID: bid.String(),
LogTags: nil,
}
return md.getClient().PruneBranch(ctx, arg)
}
// MetadataUpdate implements the MetadataUpdateProtocol interface.
func (md *MDServerRemote) MetadataUpdate(_ context.Context, arg keybase1.MetadataUpdateArg) error {
id, err := tlf.ParseID(arg.FolderID)
if err != nil {
return err
}
md.observerMu.Lock()
defer md.observerMu.Unlock()
observerChan, ok := md.observers[id]
if !ok {
// not registered
return nil
}
// signal that we've seen the update
md.signalObserverLocked(observerChan, id, nil)
return nil
}
// FoldersNeedRekey implements the MetadataUpdateProtocol interface.
func (md *MDServerRemote) FoldersNeedRekey(ctx context.Context,
requests []keybase1.RekeyRequest) error {
if md.squelchRekey {
md.log.CDebugf(ctx, "MDServerRemote: rekey updates squelched for testing")
return nil
}
for _, req := range requests {
id, err := tlf.ParseID(req.FolderID)
if err != nil {
return err
}
md.log.CDebugf(ctx, "MDServerRemote: folder needs rekey: %s", id.String())
// queue the folder for rekeying
md.config.RekeyQueue().Enqueue(id)
}
// Reset the timer in case there are a lot of rekey folders
// dribbling in from the server still.
md.resetRekeyTimer()
return nil
}
// FolderNeedsRekey implements the MetadataUpdateProtocol interface.
func (md *MDServerRemote) FolderNeedsRekey(ctx context.Context,
arg keybase1.FolderNeedsRekeyArg) error {
id, err := tlf.ParseID(arg.FolderID)
if err != nil {
return err
}
md.log.CDebugf(ctx, "MDServerRemote: folder needs rekey: %s", id.String())
if md.squelchRekey {
md.log.CDebugf(ctx, "MDServerRemote: rekey updates squelched for testing")
return nil
}
// queue the folder for rekeying
md.config.RekeyQueue().Enqueue(id)
// Reset the timer in case there are a lot of rekey folders
// dribbling in from the server still.
md.resetRekeyTimer()
return nil
}
func (md *MDServerRemote) getConn() *rpc.Connection {
md.connMu.RLock()
defer md.connMu.RUnlock()
return md.conn
}
// RegisterForUpdate implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) RegisterForUpdate(ctx context.Context, id tlf.ID,
currHead kbfsmd.Revision) (<-chan error, error) {
arg := keybase1.RegisterForUpdatesArg{
FolderID: id.String(),
CurrRevision: currHead.Number(),
LogTags: nil,
}
// register
var c chan error
conn := md.getConn()
err := conn.DoCommand(ctx, "register", func(rawClient rpc.GenericClient) error {
// set up the server to receive updates, since we may
// get disconnected between retries.
server := conn.GetServer()
err := server.Register(keybase1.MetadataUpdateProtocol(md))
if err != nil {
if _, ok := err.(rpc.AlreadyRegisteredError); !ok {
return err
}
}
// TODO: Do something with server.Err() when server is
// done?
server.Run()
// keep re-adding the observer on retries, since
// disconnects or connection errors clear observers.
alreadyRegistered := func() bool {
md.observerMu.Lock()
defer md.observerMu.Unlock()
// It's possible for a nil channel to be in
// `md.observers`, if we are still registered with the
// server after a previous cancellation.
existingCh, alreadyRegistered := md.observers[id]
if existingCh != nil {
panic(fmt.Sprintf(
"Attempted double-registration for folder: %s", id))
}
c = make(chan error, 1)
md.observers[id] = c
return alreadyRegistered
}()
if alreadyRegistered {
return nil
}
// Use this instead of md.client since we're already
// inside a DoCommand().
c := keybase1.MetadataClient{Cli: rawClient}
err = c.RegisterForUpdates(ctx, arg)
if err != nil {
func() {
md.observerMu.Lock()
defer md.observerMu.Unlock()
// we could've been canceled by a shutdown so look this up
// again before closing and deleting.
if updateChan, ok := md.observers[id]; ok {
close(updateChan)
delete(md.observers, id)
}
}()
}
return err
})
if err != nil {
c = nil
}
return c, err
}
// TruncateLock implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) TruncateLock(ctx context.Context, id tlf.ID) (
locked bool, err error) {
ctx = rpc.WithFireNow(ctx)
md.log.LazyTrace(ctx, "MDServer: TruncateLock %s", id)
defer func() {
md.deferLog.LazyTrace(ctx, "MDServer: TruncateLock %s (err=%v)", id, err)
}()
return md.getClient().TruncateLock(ctx, id.String())
}
// TruncateUnlock implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) TruncateUnlock(ctx context.Context, id tlf.ID) (
unlocked bool, err error) {
ctx = rpc.WithFireNow(ctx)
md.log.LazyTrace(ctx, "MDServer: TruncateUnlock %s", id)
defer func() {
md.deferLog.LazyTrace(ctx, "MDServer: TruncateUnlock %s (err=%v)", id, err)
}()
return md.getClient().TruncateUnlock(ctx, id.String())
}
// GetLatestHandleForTLF implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) GetLatestHandleForTLF(ctx context.Context, id tlf.ID) (
handle tlf.Handle, err error) {
ctx = rpc.WithFireNow(ctx)
md.log.LazyTrace(ctx, "MDServer: GetLatestHandle %s", id)
defer func() {
md.deferLog.LazyTrace(ctx, "MDServer: GetLatestHandle %s (err=%v)", id, err)
}()
buf, err := md.getClient().GetLatestFolderHandle(ctx, id.String())
if err != nil {
return tlf.Handle{}, err
}
if err := md.config.Codec().Decode(buf, &handle); err != nil {
return tlf.Handle{}, err
}
return handle, nil
}
// OffsetFromServerTime implements the MDServer interface for
// MDServerRemote.
func (md *MDServerRemote) OffsetFromServerTime() (time.Duration, bool) {
md.serverOffsetMu.RLock()
defer md.serverOffsetMu.RUnlock()
return md.serverOffset, md.serverOffsetKnown
}
// CheckForRekeys implements the MDServer interface.
func (md *MDServerRemote) CheckForRekeys(ctx context.Context) <-chan error {
// Wait 5 seconds before asking for rekeys, because the server
// could have an out-of-date cache if we ask too soon. Why 5
// seconds you ask? See `pollWait` in
// github.com/keybase/client/go/auth/user_keys_api.go. We don't
// use that value directly since there's no guarantee the server
// is using the same value. TODO: the server should tell us what
// value it is using.
c := make(chan error, 1)
time.AfterFunc(5*time.Second, func() {
md.log.CInfof(ctx, "CheckForRekeys: checking for rekeys")
select {
case <-ctx.Done():
c <- ctx.Err()
default:
}
if err := md.getFoldersForRekey(ctx, md.getClient()); err != nil {
md.log.CDebugf(ctx, "getFoldersForRekey failed during "+
"CheckForRekeys: %v", err)
c <- err
}
md.resetRekeyTimer()
c <- nil
})
return c
}
// getFoldersForRekey registers to receive updates about folders needing rekey actions.
func (md *MDServerRemote) getFoldersForRekey(ctx context.Context,
client keybase1.MetadataClient) error {
// get this device's crypt public key
session, err := md.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
return client.GetFoldersForRekey(ctx, session.CryptPublicKey.KID())
}
// Shutdown implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) Shutdown() {
md.connMu.Lock()
defer md.connMu.Unlock()
// close the connection
md.conn.Shutdown()
// cancel pending observers
md.cancelObservers()
// cancel the ping ticker
md.pinger.cancelTicker()
// cancel the auth token ticker
if md.authToken != nil {
md.authToken.Shutdown()
}
if md.rekeyCancel != nil {
md.rekeyCancel()
}
}
// IsConnected implements the MDServer interface for MDServerLocal
func (md *MDServerRemote) IsConnected() bool {
conn := md.getConn()
return conn != nil && conn.IsConnected()
}
//
// The below methods support the MD server acting as the key server.
// This will be the case for v1 of KBFS but we may move to our own
// separate key server at some point.
//
// GetTLFCryptKeyServerHalf is an implementation of the KeyServer interface.
func (md *MDServerRemote) GetTLFCryptKeyServerHalf(ctx context.Context,
serverHalfID TLFCryptKeyServerHalfID,
cryptKey kbfscrypto.CryptPublicKey) (
serverHalf kbfscrypto.TLFCryptKeyServerHalf, err error) {
ctx = rpc.WithFireNow(ctx)
md.log.LazyTrace(ctx, "KeyServer: GetTLFCryptKeyServerHalf %s", serverHalfID)
defer func() {
md.deferLog.LazyTrace(ctx, "KeyServer: GetTLFCryptKeyServerHalf %s (err=%v)", serverHalfID, err)
}()
// encode the ID
idBytes, err := md.config.Codec().Encode(serverHalfID)
if err != nil {
return
}
// get the key
arg := keybase1.GetKeyArg{
KeyHalfID: idBytes,
DeviceKID: cryptKey.KID().String(),
LogTags: nil,
}
keyBytes, err := md.getClient().GetKey(ctx, arg)
if err != nil {
return
}
// decode the key
err = md.config.Codec().Decode(keyBytes, &serverHalf)
if err != nil {
return
}
return
}
// PutTLFCryptKeyServerHalves is an implementation of the KeyServer interface.
func (md *MDServerRemote) PutTLFCryptKeyServerHalves(ctx context.Context,
keyServerHalves UserDeviceKeyServerHalves) (err error) {
ctx = rpc.WithFireNow(ctx)
md.log.LazyTrace(ctx, "KeyServer: PutTLFCryptKeyServerHalves %v", keyServerHalves)
defer func() {
md.deferLog.LazyTrace(ctx, "KeyServer: PutTLFCryptKeyServerHalves %v (err=%v)", keyServerHalves, err)
}()
// flatten out the map into an array
var keyHalves []keybase1.KeyHalf
for user, deviceMap := range keyServerHalves {
for devicePubKey, serverHalf := range deviceMap {
keyHalf, err := md.config.Codec().Encode(serverHalf)
if err != nil {
return err
}
keyHalves = append(keyHalves,
keybase1.KeyHalf{
User: user,
DeviceKID: devicePubKey.KID(),
Key: keyHalf,
})
}
}
// put the keys
arg := keybase1.PutKeysArg{
KeyHalves: keyHalves,
LogTags: nil,
}
return md.getClient().PutKeys(ctx, arg)
}
// DeleteTLFCryptKeyServerHalf is an implementation of the KeyServer interface.
func (md *MDServerRemote) DeleteTLFCryptKeyServerHalf(ctx context.Context,
uid keybase1.UID, key kbfscrypto.CryptPublicKey,
serverHalfID TLFCryptKeyServerHalfID) (err error) {
ctx = rpc.WithFireNow(ctx)
md.log.LazyTrace(ctx, "KeyServer: DeleteTLFCryptKeyServerHalf %s %s", uid, serverHalfID)
defer func() {
md.deferLog.LazyTrace(ctx, "KeyServer: DeleteTLFCryptKeyServerHalf %s %s done (err=%v)", uid, serverHalfID, err)
}()
// encode the ID
idBytes, err := md.config.Codec().Encode(serverHalfID)
if err != nil {
return err
}
// get the key
arg := keybase1.DeleteKeyArg{
Uid: uid,
DeviceKID: key.KID(),
KeyHalfID: idBytes,
LogTags: nil,
}
err = md.getClient().DeleteKey(ctx, arg)
if err != nil {
return err
}
return nil
}
// DisableRekeyUpdatesForTesting implements the MDServer interface.
func (md *MDServerRemote) DisableRekeyUpdatesForTesting() {
// This doesn't need a lock for testing.
md.squelchRekey = true
md.rekeyTimer.Stop()
}
// CtxMDSRTagKey is the type used for unique context tags within MDServerRemote
type CtxMDSRTagKey int
const (
// CtxMDSRIDKey is the type of the tag for unique operation IDs
// within MDServerRemote.
CtxMDSRIDKey CtxMDSRTagKey = iota
)
// CtxMDSROpID is the display name for the unique operation
// MDServerRemote ID tag.
const CtxMDSROpID = "MDSRID"
func (md *MDServerRemote) backgroundRekeyChecker(ctx context.Context) {
for {
select {
case <-md.rekeyTimer.C:
if !md.getConn().IsConnected() {
md.resetRekeyTimer()
continue
}
// Assign an ID to this rekey check so we can track it.
newCtx := CtxWithRandomIDReplayable(ctx, CtxMDSRIDKey, CtxMDSROpID, md.log)
md.log.CDebugf(newCtx, "Checking for rekey folders")
if err := md.getFoldersForRekey(
newCtx, md.getClient()); err != nil {
md.log.CWarningf(newCtx, "MDServerRemote: getFoldersForRekey "+
"failed with %v", err)
}
md.resetRekeyTimer()
case <-ctx.Done():
return
}
}
}
// GetKeyBundles implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) GetKeyBundles(ctx context.Context,
tlf tlf.ID, wkbID TLFWriterKeyBundleID, rkbID TLFReaderKeyBundleID) (
wkb *TLFWriterKeyBundleV3, rkb *TLFReaderKeyBundleV3, err error) {
ctx = rpc.WithFireNow(ctx)
md.log.LazyTrace(ctx, "KeyServer: GetKeyBundles %s %s %s", tlf, wkbID, rkbID)
defer func() {
md.deferLog.LazyTrace(ctx, "KeyServer: GetKeyBundles %s %s %s done (err=%v)", tlf, wkbID, rkbID, err)
}()
arg := keybase1.GetKeyBundlesArg{
FolderID: tlf.String(),
WriterBundleID: wkbID.String(),
ReaderBundleID: rkbID.String(),
}
response, err := md.getClient().GetKeyBundles(ctx, arg)
if err != nil {
return nil, nil, err
}
if response.WriterBundle.Bundle != nil {
if response.WriterBundle.Version != int(SegregatedKeyBundlesVer) {
err = fmt.Errorf("Unsupported writer bundle version: %d",
response.WriterBundle.Version)
return nil, nil, err
}
wkb = new(TLFWriterKeyBundleV3)
err = md.config.Codec().Decode(response.WriterBundle.Bundle, wkb)
if err != nil {
return nil, nil, err
}
// Verify it's what we expect.
bundleID, err := md.config.Crypto().MakeTLFWriterKeyBundleID(*wkb)
if err != nil {
return nil, nil, err
}
if bundleID != wkbID {
err = fmt.Errorf("Expected writer bundle ID %s, got: %s",
wkbID, bundleID)
return nil, nil, err
}
}
if response.ReaderBundle.Bundle != nil {
if response.ReaderBundle.Version != int(SegregatedKeyBundlesVer) {
err = fmt.Errorf("Unsupported reader bundle version: %d",
response.ReaderBundle.Version)
return nil, nil, err
}
rkb = new(TLFReaderKeyBundleV3)
err = md.config.Codec().Decode(response.ReaderBundle.Bundle, rkb)
if err != nil {
return nil, nil, err
}
// Verify it's what we expect.
bundleID, err := md.config.Crypto().MakeTLFReaderKeyBundleID(*rkb)
if err != nil {
return nil, nil, err
}
if bundleID != rkbID {
err = fmt.Errorf("Expected reader bundle ID %s, got: %s",
rkbID, bundleID)
return nil, nil, err
}
}
return wkb, rkb, nil
}
// FastForwardBackoff implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) FastForwardBackoff() {
md.connMu.RLock()
defer md.connMu.RUnlock()
md.conn.FastForwardInitialBackoffTimer()
}
func (md *MDServerRemote) resetRekeyTimer() {
md.rekeyTimer.Reset(nextRekeyTime())
}
// nextRekeyTime returns the time remaining to the next rekey.
// The time returned is random with the formula:
// MdServerBackgroundRekeyPeriod/2 + (k * (MdServerBackgroundRekeyPeriod/n))
// average: MdServerBackgroundRekeyPeriod
// minimum: MdServerBackgroundRekeyPeriod/2
// maximum: MdServerBackgroundRekeyPeriod*1.5
// k=0..n, random uniformly distributed.
func nextRekeyTime() time.Duration {
var buf [1]byte
err := kbfscrypto.RandRead(buf[:])
if err != nil {
panic("nextRekeyTime: Random source broken!")
}
return (MdServerBackgroundRekeyPeriod / 2) +
(time.Duration(buf[0]) * (MdServerBackgroundRekeyPeriod / 0xFF))
}
mdserver_remote: don't check for rekeys in single-op mode
Issue: KBFS-2342
// Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"fmt"
"net"
"sync"
"time"
"github.com/keybase/backoff"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/go-framed-msgpack-rpc/rpc"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/kbfsmd"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
const (
// MdServerTokenServer is the expected server type for mdserver authentication.
MdServerTokenServer = "kbfs_md"
// MdServerTokenExpireIn is the TTL to use when constructing an authentication token.
MdServerTokenExpireIn = 2 * 60 * 60 // 2 hours
// MdServerBackgroundRekeyPeriod is how long the rekey checker
// waits between runs on average. The timer gets reset after
// every incoming FolderNeedsRekey RPC.
// The amount of wait is calculated in nextRekeyTime.
MdServerBackgroundRekeyPeriod = 1 * time.Hour
// MdServerDefaultPingIntervalSeconds is the default interval on which the
// client should contact the MD Server
MdServerDefaultPingIntervalSeconds = 10
// MdServerPingTimeout is how long to wait for a ping response
// before breaking the connection and trying to reconnect.
MdServerPingTimeout = 30 * time.Second
)
// MDServerRemote is an implementation of the MDServer interface.
type MDServerRemote struct {
config Config
log traceLogger
deferLog traceLogger
mdSrvAddr string
connOpts rpc.ConnectionOpts
rpcLogFactory *libkb.RPCLogFactory
authToken *kbfscrypto.AuthToken
squelchRekey bool
pinger pinger
authenticatedMtx sync.RWMutex
isAuthenticated bool
connMu sync.RWMutex
conn *rpc.Connection
client keybase1.MetadataClient
observerMu sync.Mutex // protects observers
// chan is nil if we have unregistered locally, but not yet with
// the server.
observers map[tlf.ID]chan<- error
tickerCancel context.CancelFunc
tickerMu sync.Mutex // protects the ticker cancel function
rekeyCancel context.CancelFunc
rekeyTimer *time.Timer
serverOffsetMu sync.RWMutex
serverOffsetKnown bool
serverOffset time.Duration
}
// Test that MDServerRemote fully implements the MDServer interface.
var _ MDServer = (*MDServerRemote)(nil)
// Test that MDServerRemote fully implements the KeyServer interface.
var _ KeyServer = (*MDServerRemote)(nil)
// Test that MDServerRemote fully implements the AuthTokenRefreshHandler interface.
var _ kbfscrypto.AuthTokenRefreshHandler = (*MDServerRemote)(nil)
// Test that MDServerRemote fully implements the ConnectionHandler interface.
var _ rpc.ConnectionHandler = (*MDServerRemote)(nil)
// NewMDServerRemote returns a new instance of MDServerRemote.
func NewMDServerRemote(config Config, srvAddr string,
rpcLogFactory *libkb.RPCLogFactory) *MDServerRemote {
log := config.MakeLogger("")
deferLog := log.CloneWithAddedDepth(1)
mdServer := &MDServerRemote{
config: config,
observers: make(map[tlf.ID]chan<- error),
log: traceLogger{log},
deferLog: traceLogger{deferLog},
mdSrvAddr: srvAddr,
rpcLogFactory: rpcLogFactory,
rekeyTimer: time.NewTimer(nextRekeyTime()),
}
mdServer.pinger = pinger{
name: "MDServerRemote",
doPing: mdServer.pingOnce,
timeout: MdServerPingTimeout,
log: mdServer.log,
}
mdServer.authToken = kbfscrypto.NewAuthToken(config.Crypto(),
MdServerTokenServer, MdServerTokenExpireIn,
"libkbfs_mdserver_remote", VersionString(), mdServer)
constBackoff := backoff.NewConstantBackOff(RPCReconnectInterval)
mdServer.connOpts = rpc.ConnectionOpts{
WrapErrorFunc: libkb.WrapError,
TagsFunc: libkb.LogTagsFromContext,
ReconnectBackoff: func() backoff.BackOff { return constBackoff },
InitialReconnectBackoffWindow: mdserverReconnectBackoffWindow,
}
mdServer.initNewConnection()
// Check for rekey opportunities periodically.
rekeyCtx, rekeyCancel := context.WithCancel(context.Background())
mdServer.rekeyCancel = rekeyCancel
if config.Mode() != InitSingleOp {
go mdServer.backgroundRekeyChecker(rekeyCtx)
}
return mdServer
}
func (md *MDServerRemote) getIsAuthenticated() bool {
md.authenticatedMtx.RLock()
defer md.authenticatedMtx.RUnlock()
return md.isAuthenticated
}
func (md *MDServerRemote) setIsAuthenticated(isAuthenticated bool) {
md.authenticatedMtx.Lock()
defer md.authenticatedMtx.Unlock()
md.isAuthenticated = isAuthenticated
}
func (md *MDServerRemote) initNewConnection() {
md.connMu.Lock()
defer md.connMu.Unlock()
if md.conn != nil {
md.conn.Shutdown()
}
md.conn = rpc.NewTLSConnection(
md.mdSrvAddr, kbfscrypto.GetRootCerts(md.mdSrvAddr),
kbfsmd.ServerErrorUnwrapper{}, md, md.rpcLogFactory,
md.config.MakeLogger(""), md.connOpts)
md.client = keybase1.MetadataClient{Cli: md.conn.GetClient()}
}
// RemoteAddress returns the remote mdserver this client is talking to
func (md *MDServerRemote) RemoteAddress() string {
return md.mdSrvAddr
}
// HandlerName implements the ConnectionHandler interface.
func (*MDServerRemote) HandlerName() string {
return "MDServerRemote"
}
// OnConnect implements the ConnectionHandler interface.
func (md *MDServerRemote) OnConnect(ctx context.Context,
conn *rpc.Connection, client rpc.GenericClient,
server *rpc.Server) (err error) {
defer func() {
if err == nil {
md.config.Reporter().Notify(ctx,
connectionNotification(connectionStatusConnected))
}
}()
md.log.CDebugf(ctx, "OnConnect called with a new connection")
// we'll get replies asynchronously as to not block the connection
// for doing other active work for the user. they will be sent to
// the FolderNeedsRekey handler.
if err := server.Register(keybase1.MetadataUpdateProtocol(md)); err != nil {
if _, ok := err.(rpc.AlreadyRegisteredError); !ok {
return err
}
}
// reset auth -- using md.client here would cause problematic recursion.
c := keybase1.MetadataClient{Cli: client}
pingIntervalSeconds, err := md.resetAuth(ctx, c)
switch err.(type) {
case nil:
case NoCurrentSessionError:
md.log.CDebugf(ctx, "Logged-out user")
default:
return err
}
md.config.KBFSOps().PushConnectionStatusChange(MDServiceName, nil)
// start pinging
md.pinger.resetTicker(pingIntervalSeconds)
return nil
}
type ctxMDServerResetKeyType int
const (
// ctxMDServerResetKey identifies whether the current context has
// already passed through `MDServerRemote.resetAuth`.
ctxMDServerResetKey ctxMDServerResetKeyType = iota
)
// resetAuth is called to reset the authorization on an MDServer
// connection. If this function returns NoCurrentSessionError, the
// caller should treat this as a logged-out user.
func (md *MDServerRemote) resetAuth(
ctx context.Context, c keybase1.MetadataClient) (int, error) {
ctx = context.WithValue(ctx, ctxMDServerResetKey, "1")
isAuthenticated := false
defer func() {
md.setIsAuthenticated(isAuthenticated)
}()
session, err := md.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
md.log.CDebugf(ctx,
"Error getting current session (%+v), skipping resetAuth", err)
return MdServerDefaultPingIntervalSeconds, err
}
challenge, err := c.GetChallenge(ctx)
if err != nil {
md.log.CWarningf(ctx, "challenge request error: %v", err)
return 0, err
}
md.log.CDebugf(ctx, "received challenge")
// get a new signature
signature, err := md.authToken.Sign(ctx, session.Name, session.UID,
session.VerifyingKey, challenge)
if err != nil {
md.log.CWarningf(ctx, "error signing authentication token: %v", err)
return 0, err
}
md.log.CDebugf(ctx, "authentication token signed")
// authenticate
pingIntervalSeconds, err := c.Authenticate(ctx, signature)
if err != nil {
md.log.CWarningf(ctx, "authentication error: %v", err)
return 0, err
}
md.log.CDebugf(ctx, "authentication successful; ping interval: %ds",
pingIntervalSeconds)
isAuthenticated = true
md.authenticatedMtx.Lock()
if !md.isAuthenticated && md.config.Mode() != InitSingleOp {
defer func() {
// request a list of folders needing rekey action
if err := md.getFoldersForRekey(ctx, c); err != nil {
md.log.CWarningf(ctx, "getFoldersForRekey failed with %v", err)
}
md.deferLog.CDebugf(ctx,
"requested list of folders for rekey")
}()
}
// Need to ensure that any conflicting thread gets the updated value
md.isAuthenticated = true
md.authenticatedMtx.Unlock()
return pingIntervalSeconds, nil
}
func (md *MDServerRemote) getClient() keybase1.MetadataClient {
md.connMu.RLock()
defer md.connMu.RUnlock()
return md.client
}
// RefreshAuthToken implements the AuthTokenRefreshHandler interface.
func (md *MDServerRemote) RefreshAuthToken(ctx context.Context) {
md.log.CDebugf(ctx, "MDServerRemote: Refreshing auth token...")
if v := ctx.Value(ctxMDServerResetKey); v != nil {
md.log.CDebugf(ctx, "Avoiding resetAuth recursion")
return
}
_, err := md.resetAuth(ctx, md.getClient())
switch err.(type) {
case nil:
md.log.CDebugf(ctx, "MDServerRemote: auth token refreshed")
case NoCurrentSessionError:
md.log.CDebugf(ctx,
"MDServerRemote: no session available, connection remains anonymous")
default:
md.log.CDebugf(ctx,
"MDServerRemote: error refreshing auth token: %v", err)
// TODO: once KBFS-1982 is merged, an unknown error here
// should just cause a complete disconnect, and we can let the
// rpc connection do the retry.
}
}
func (md *MDServerRemote) pingOnce(ctx context.Context) {
clock := md.config.Clock()
beforePing := clock.Now()
resp, err := md.getClient().Ping2(ctx)
if err == context.DeadlineExceeded {
if md.getIsAuthenticated() {
md.log.CDebugf(ctx, "Ping timeout -- reinitializing connection")
md.initNewConnection()
} else {
md.log.CDebugf(ctx, "Ping timeout but not reinitializing")
}
return
} else if err != nil {
md.log.CDebugf(ctx, "MDServerRemote: ping error %s", err)
return
}
afterPing := clock.Now()
pingLatency := afterPing.Sub(beforePing)
if md.serverOffset > 0 && pingLatency > 5*time.Second {
md.log.CDebugf(ctx, "Ignoring large ping time: %s",
pingLatency)
return
}
serverTimeNow :=
keybase1.FromTime(resp.Timestamp).Add(pingLatency / 2)
func() {
md.serverOffsetMu.Lock()
defer md.serverOffsetMu.Unlock()
// Estimate the server offset, assuming a balanced
// round trip latency (and 0 server processing
// latency). Calculate it so that it can be added
// to a server timestamp in order to get the local
// time of a server-timestamped event.
md.serverOffset = afterPing.Sub(serverTimeNow)
md.serverOffsetKnown = true
}()
}
// OnConnectError implements the ConnectionHandler interface.
func (md *MDServerRemote) OnConnectError(err error, wait time.Duration) {
md.log.CWarningf(context.TODO(),
"MDServerRemote: connection error: %q; retrying in %s", err, wait)
// TODO: it might make sense to show something to the user if this is
// due to authentication, for example.
md.cancelObservers()
md.pinger.cancelTicker()
if md.authToken != nil {
md.authToken.Shutdown()
}
md.config.KBFSOps().PushConnectionStatusChange(MDServiceName, err)
}
// OnDoCommandError implements the ConnectionHandler interface.
func (md *MDServerRemote) OnDoCommandError(err error, wait time.Duration) {
md.log.CWarningf(context.TODO(),
"MDServerRemote: DoCommand error: %q; retrying in %s", err, wait)
// Only push errors that should not be retried as connection status changes.
if !md.ShouldRetry("", err) {
md.config.KBFSOps().PushConnectionStatusChange(MDServiceName, err)
}
}
// OnDisconnected implements the ConnectionHandler interface.
func (md *MDServerRemote) OnDisconnected(ctx context.Context,
status rpc.DisconnectStatus) {
if status == rpc.StartingNonFirstConnection {
md.log.CWarningf(ctx, "MDServerRemote is disconnected")
md.config.Reporter().Notify(ctx,
connectionNotification(connectionStatusDisconnected))
}
func() {
md.serverOffsetMu.Lock()
defer md.serverOffsetMu.Unlock()
md.serverOffsetKnown = false
md.serverOffset = 0
}()
md.setIsAuthenticated(false)
md.cancelObservers()
md.pinger.cancelTicker()
if md.authToken != nil {
md.authToken.Shutdown()
}
md.config.RekeyQueue().Shutdown()
md.config.SetRekeyQueue(NewRekeyQueueStandard(md.config))
// Reset the timer since we will get folders for rekey again on
// the re-connect.
md.resetRekeyTimer()
if status == rpc.StartingNonFirstConnection {
md.config.KBFSOps().PushConnectionStatusChange(MDServiceName, errDisconnected{})
}
}
// ShouldRetry implements the ConnectionHandler interface.
func (md *MDServerRemote) ShouldRetry(name string, err error) bool {
_, shouldThrottle := err.(kbfsmd.ServerErrorThrottle)
return shouldThrottle
}
// ShouldRetryOnConnect implements the ConnectionHandler interface.
func (md *MDServerRemote) ShouldRetryOnConnect(err error) bool {
_, inputCanceled := err.(libkb.InputCanceledError)
return !inputCanceled
}
// CheckReachability implements the MDServer interface.
func (md *MDServerRemote) CheckReachability(ctx context.Context) {
conn, err := net.DialTimeout("tcp", md.mdSrvAddr, MdServerPingTimeout)
if err != nil {
if md.getIsAuthenticated() {
md.log.CDebugf(ctx, "MDServerRemote: CheckReachability(): "+
"failed to connect, reconnecting: %s", err.Error())
md.initNewConnection()
} else {
md.log.CDebugf(ctx, "MDServerRemote: CheckReachability(): "+
"failed to connect (%s), but not reconnecting", err.Error())
}
}
if conn != nil {
conn.Close()
}
}
// Signal errors and clear any registered observers.
func (md *MDServerRemote) cancelObservers() {
md.observerMu.Lock()
defer md.observerMu.Unlock()
// fire errors for any registered observers
for id, observerChan := range md.observers {
md.signalObserverLocked(observerChan, id, MDServerDisconnected{})
}
}
// CancelRegistration implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) CancelRegistration(ctx context.Context, id tlf.ID) {
md.observerMu.Lock()
defer md.observerMu.Unlock()
observerChan, ok := md.observers[id]
if !ok {
// not registered
return
}
// signal that we've seen the update
md.signalObserverLocked(
observerChan, id, errors.New("Registration canceled"))
// Setting nil here indicates that the remote MD server thinks
// we're still registered, though locally no one is listening.
md.observers[id] = nil
}
// Signal an observer. The observer lock must be held.
func (md *MDServerRemote) signalObserverLocked(observerChan chan<- error, id tlf.ID, err error) {
if observerChan != nil {
observerChan <- err
close(observerChan)
}
delete(md.observers, id)
}
// Helper used to retrieve metadata blocks from the MD server.
func (md *MDServerRemote) get(ctx context.Context, arg keybase1.GetMetadataArg) (
tlfID tlf.ID, rmdses []*RootMetadataSigned, err error) {
// request
response, err := md.getClient().GetMetadata(ctx, arg)
if err != nil {
return tlf.ID{}, nil, err
}
// response
tlfID, err = tlf.ParseID(response.FolderID)
if err != nil {
return tlf.ID{}, nil, err
}
// deserialize blocks
rmdses = make([]*RootMetadataSigned, len(response.MdBlocks))
for i, block := range response.MdBlocks {
ver, max := MetadataVer(block.Version), md.config.MetadataVersion()
rmds, err := DecodeRootMetadataSigned(
md.config.Codec(), tlfID, ver, max, block.Block,
keybase1.FromTime(block.Timestamp))
if err != nil {
return tlf.ID{}, nil, err
}
rmdses[i] = rmds
}
return tlfID, rmdses, nil
}
// GetForHandle implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) GetForHandle(ctx context.Context,
handle tlf.Handle, mStatus MergeStatus) (
tlfID tlf.ID, rmds *RootMetadataSigned, err error) {
ctx = rpc.WithFireNow(ctx)
// TODO: Ideally, *tlf.Handle would have a nicer String() function.
md.log.LazyTrace(ctx, "MDServer: GetForHandle %+v %s", handle, mStatus)
defer func() {
md.deferLog.LazyTrace(ctx, "MDServer: GetForHandle %+v %s done (err=%v)", handle, mStatus, err)
}()
encodedHandle, err := md.config.Codec().Encode(handle)
if err != nil {
return tlf.ID{}, nil, err
}
// BranchID needs to be present when Unmerged is true;
// NullBranchID signals that the folder's current branch ID
// should be looked up.
arg := keybase1.GetMetadataArg{
FolderHandle: encodedHandle,
BranchID: NullBranchID.String(),
Unmerged: mStatus == Unmerged,
}
id, rmdses, err := md.get(ctx, arg)
if err != nil {
return tlf.ID{}, nil, err
}
if len(rmdses) == 0 {
return id, nil, nil
}
// TODO: Error if server returns more than one rmds.
return id, rmdses[0], nil
}
// GetForTLF implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) GetForTLF(ctx context.Context, id tlf.ID,
bid BranchID, mStatus MergeStatus) (rmds *RootMetadataSigned, err error) {
ctx = rpc.WithFireNow(ctx)
md.log.LazyTrace(ctx, "MDServer: GetForTLF %s %s %s", id, bid, mStatus)
defer func() {
md.deferLog.LazyTrace(ctx, "MDServer: GetForTLF %s %s %s done (err=%v)", id, bid, mStatus, err)
}()
arg := keybase1.GetMetadataArg{
FolderID: id.String(),
BranchID: bid.String(),
Unmerged: mStatus == Unmerged,
}
_, rmdses, err := md.get(ctx, arg)
if err != nil {
return nil, err
}
if len(rmdses) == 0 {
return nil, nil
}
// TODO: Error if server returns more than one rmds.
return rmdses[0], nil
}
// GetRange implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) GetRange(ctx context.Context, id tlf.ID,
bid BranchID, mStatus MergeStatus, start, stop kbfsmd.Revision) (
rmdses []*RootMetadataSigned, err error) {
ctx = rpc.WithFireNow(ctx)
md.log.LazyTrace(ctx, "MDServer: GetRange %s %s %s %d-%d", id, bid, mStatus, start, stop)
defer func() {
md.deferLog.LazyTrace(ctx, "MDServer: GetRange %s %s %s %d-%d done (err=%v)", id, bid, mStatus, start, stop, err)
}()
arg := keybase1.GetMetadataArg{
FolderID: id.String(),
BranchID: bid.String(),
Unmerged: mStatus == Unmerged,
StartRevision: start.Number(),
StopRevision: stop.Number(),
}
_, rmds, err := md.get(ctx, arg)
return rmds, err
}
// Put implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) Put(ctx context.Context, rmds *RootMetadataSigned,
extra ExtraMetadata) (err error) {
ctx = rpc.WithFireNow(ctx)
md.log.LazyTrace(ctx, "MDServer: Put %s %d", rmds.MD.TlfID(), rmds.MD.RevisionNumber())
defer func() {
md.deferLog.LazyTrace(ctx, "MDServer: Put %s %d done (err=%v)", rmds.MD.TlfID(), rmds.MD.RevisionNumber(), err)
}()
// encode MD block
rmdsBytes, err := EncodeRootMetadataSigned(md.config.Codec(), rmds)
if err != nil {
return err
}
// put request
arg := keybase1.PutMetadataArg{
MdBlock: keybase1.MDBlock{
Version: int(rmds.Version()),
Block: rmdsBytes,
},
LogTags: nil,
}
if rmds.Version() < SegregatedKeyBundlesVer {
if extra != nil {
return fmt.Errorf("Unexpected non-nil extra: %+v", extra)
}
} else if extra != nil {
// For now, if we have a non-nil extra, it must be
// *ExtraMetadataV3, but in the future it might be
// some other type (e.g., *ExtraMetadataV4).
extraV3, ok := extra.(*ExtraMetadataV3)
if !ok {
return fmt.Errorf("Extra of unexpected type %T", extra)
}
// Add any new key bundles.
if extraV3.wkbNew {
wkbBytes, err := md.config.Codec().Encode(extraV3.wkb)
if err != nil {
return err
}
arg.WriterKeyBundle = keybase1.KeyBundle{
Version: int(rmds.Version()),
Bundle: wkbBytes,
}
}
if extraV3.rkbNew {
rkbBytes, err := md.config.Codec().Encode(extraV3.rkb)
if err != nil {
return err
}
arg.ReaderKeyBundle = keybase1.KeyBundle{
Version: int(rmds.Version()),
Bundle: rkbBytes,
}
}
}
return md.getClient().PutMetadata(ctx, arg)
}
// PruneBranch implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) PruneBranch(
ctx context.Context, id tlf.ID, bid BranchID) (err error) {
ctx = rpc.WithFireNow(ctx)
md.log.LazyTrace(ctx, "MDServer: PruneBranch %s %s", id, bid)
defer func() {
md.deferLog.LazyTrace(ctx, "MDServer: PruneBranch %s %s (err=%v)", id, bid, err)
}()
arg := keybase1.PruneBranchArg{
FolderID: id.String(),
BranchID: bid.String(),
LogTags: nil,
}
return md.getClient().PruneBranch(ctx, arg)
}
// MetadataUpdate implements the MetadataUpdateProtocol interface.
func (md *MDServerRemote) MetadataUpdate(_ context.Context, arg keybase1.MetadataUpdateArg) error {
id, err := tlf.ParseID(arg.FolderID)
if err != nil {
return err
}
md.observerMu.Lock()
defer md.observerMu.Unlock()
observerChan, ok := md.observers[id]
if !ok {
// not registered
return nil
}
// signal that we've seen the update
md.signalObserverLocked(observerChan, id, nil)
return nil
}
// FoldersNeedRekey implements the MetadataUpdateProtocol interface.
func (md *MDServerRemote) FoldersNeedRekey(ctx context.Context,
requests []keybase1.RekeyRequest) error {
if md.squelchRekey {
md.log.CDebugf(ctx, "MDServerRemote: rekey updates squelched for testing")
return nil
}
for _, req := range requests {
id, err := tlf.ParseID(req.FolderID)
if err != nil {
return err
}
md.log.CDebugf(ctx, "MDServerRemote: folder needs rekey: %s", id.String())
// queue the folder for rekeying
md.config.RekeyQueue().Enqueue(id)
}
// Reset the timer in case there are a lot of rekey folders
// dribbling in from the server still.
md.resetRekeyTimer()
return nil
}
// FolderNeedsRekey implements the MetadataUpdateProtocol interface.
func (md *MDServerRemote) FolderNeedsRekey(ctx context.Context,
arg keybase1.FolderNeedsRekeyArg) error {
id, err := tlf.ParseID(arg.FolderID)
if err != nil {
return err
}
md.log.CDebugf(ctx, "MDServerRemote: folder needs rekey: %s", id.String())
if md.squelchRekey {
md.log.CDebugf(ctx, "MDServerRemote: rekey updates squelched for testing")
return nil
}
// queue the folder for rekeying
md.config.RekeyQueue().Enqueue(id)
// Reset the timer in case there are a lot of rekey folders
// dribbling in from the server still.
md.resetRekeyTimer()
return nil
}
func (md *MDServerRemote) getConn() *rpc.Connection {
md.connMu.RLock()
defer md.connMu.RUnlock()
return md.conn
}
// RegisterForUpdate implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) RegisterForUpdate(ctx context.Context, id tlf.ID,
currHead kbfsmd.Revision) (<-chan error, error) {
arg := keybase1.RegisterForUpdatesArg{
FolderID: id.String(),
CurrRevision: currHead.Number(),
LogTags: nil,
}
// register
var c chan error
conn := md.getConn()
err := conn.DoCommand(ctx, "register", func(rawClient rpc.GenericClient) error {
// set up the server to receive updates, since we may
// get disconnected between retries.
server := conn.GetServer()
err := server.Register(keybase1.MetadataUpdateProtocol(md))
if err != nil {
if _, ok := err.(rpc.AlreadyRegisteredError); !ok {
return err
}
}
// TODO: Do something with server.Err() when server is
// done?
server.Run()
// keep re-adding the observer on retries, since
// disconnects or connection errors clear observers.
alreadyRegistered := func() bool {
md.observerMu.Lock()
defer md.observerMu.Unlock()
// It's possible for a nil channel to be in
// `md.observers`, if we are still registered with the
// server after a previous cancellation.
existingCh, alreadyRegistered := md.observers[id]
if existingCh != nil {
panic(fmt.Sprintf(
"Attempted double-registration for folder: %s", id))
}
c = make(chan error, 1)
md.observers[id] = c
return alreadyRegistered
}()
if alreadyRegistered {
return nil
}
// Use this instead of md.client since we're already
// inside a DoCommand().
c := keybase1.MetadataClient{Cli: rawClient}
err = c.RegisterForUpdates(ctx, arg)
if err != nil {
func() {
md.observerMu.Lock()
defer md.observerMu.Unlock()
// we could've been canceled by a shutdown so look this up
// again before closing and deleting.
if updateChan, ok := md.observers[id]; ok {
close(updateChan)
delete(md.observers, id)
}
}()
}
return err
})
if err != nil {
c = nil
}
return c, err
}
// TruncateLock implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) TruncateLock(ctx context.Context, id tlf.ID) (
locked bool, err error) {
ctx = rpc.WithFireNow(ctx)
md.log.LazyTrace(ctx, "MDServer: TruncateLock %s", id)
defer func() {
md.deferLog.LazyTrace(ctx, "MDServer: TruncateLock %s (err=%v)", id, err)
}()
return md.getClient().TruncateLock(ctx, id.String())
}
// TruncateUnlock implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) TruncateUnlock(ctx context.Context, id tlf.ID) (
unlocked bool, err error) {
ctx = rpc.WithFireNow(ctx)
md.log.LazyTrace(ctx, "MDServer: TruncateUnlock %s", id)
defer func() {
md.deferLog.LazyTrace(ctx, "MDServer: TruncateUnlock %s (err=%v)", id, err)
}()
return md.getClient().TruncateUnlock(ctx, id.String())
}
// GetLatestHandleForTLF implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) GetLatestHandleForTLF(ctx context.Context, id tlf.ID) (
handle tlf.Handle, err error) {
ctx = rpc.WithFireNow(ctx)
md.log.LazyTrace(ctx, "MDServer: GetLatestHandle %s", id)
defer func() {
md.deferLog.LazyTrace(ctx, "MDServer: GetLatestHandle %s (err=%v)", id, err)
}()
buf, err := md.getClient().GetLatestFolderHandle(ctx, id.String())
if err != nil {
return tlf.Handle{}, err
}
if err := md.config.Codec().Decode(buf, &handle); err != nil {
return tlf.Handle{}, err
}
return handle, nil
}
// OffsetFromServerTime implements the MDServer interface for
// MDServerRemote.
func (md *MDServerRemote) OffsetFromServerTime() (time.Duration, bool) {
md.serverOffsetMu.RLock()
defer md.serverOffsetMu.RUnlock()
return md.serverOffset, md.serverOffsetKnown
}
// CheckForRekeys implements the MDServer interface.
func (md *MDServerRemote) CheckForRekeys(ctx context.Context) <-chan error {
// Wait 5 seconds before asking for rekeys, because the server
// could have an out-of-date cache if we ask too soon. Why 5
// seconds you ask? See `pollWait` in
// github.com/keybase/client/go/auth/user_keys_api.go. We don't
// use that value directly since there's no guarantee the server
// is using the same value. TODO: the server should tell us what
// value it is using.
c := make(chan error, 1)
if md.config.Mode() == InitSingleOp {
c <- nil
return c
}
time.AfterFunc(5*time.Second, func() {
md.log.CInfof(ctx, "CheckForRekeys: checking for rekeys")
select {
case <-ctx.Done():
c <- ctx.Err()
default:
}
if err := md.getFoldersForRekey(ctx, md.getClient()); err != nil {
md.log.CDebugf(ctx, "getFoldersForRekey failed during "+
"CheckForRekeys: %v", err)
c <- err
}
md.resetRekeyTimer()
c <- nil
})
return c
}
// getFoldersForRekey registers to receive updates about folders needing rekey actions.
func (md *MDServerRemote) getFoldersForRekey(ctx context.Context,
client keybase1.MetadataClient) error {
// get this device's crypt public key
session, err := md.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
return client.GetFoldersForRekey(ctx, session.CryptPublicKey.KID())
}
// Shutdown implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) Shutdown() {
md.connMu.Lock()
defer md.connMu.Unlock()
// close the connection
md.conn.Shutdown()
// cancel pending observers
md.cancelObservers()
// cancel the ping ticker
md.pinger.cancelTicker()
// cancel the auth token ticker
if md.authToken != nil {
md.authToken.Shutdown()
}
if md.rekeyCancel != nil {
md.rekeyCancel()
}
}
// IsConnected implements the MDServer interface for MDServerLocal
func (md *MDServerRemote) IsConnected() bool {
conn := md.getConn()
return conn != nil && conn.IsConnected()
}
//
// The below methods support the MD server acting as the key server.
// This will be the case for v1 of KBFS but we may move to our own
// separate key server at some point.
//
// GetTLFCryptKeyServerHalf is an implementation of the KeyServer interface.
func (md *MDServerRemote) GetTLFCryptKeyServerHalf(ctx context.Context,
serverHalfID TLFCryptKeyServerHalfID,
cryptKey kbfscrypto.CryptPublicKey) (
serverHalf kbfscrypto.TLFCryptKeyServerHalf, err error) {
ctx = rpc.WithFireNow(ctx)
md.log.LazyTrace(ctx, "KeyServer: GetTLFCryptKeyServerHalf %s", serverHalfID)
defer func() {
md.deferLog.LazyTrace(ctx, "KeyServer: GetTLFCryptKeyServerHalf %s (err=%v)", serverHalfID, err)
}()
// encode the ID
idBytes, err := md.config.Codec().Encode(serverHalfID)
if err != nil {
return
}
// get the key
arg := keybase1.GetKeyArg{
KeyHalfID: idBytes,
DeviceKID: cryptKey.KID().String(),
LogTags: nil,
}
keyBytes, err := md.getClient().GetKey(ctx, arg)
if err != nil {
return
}
// decode the key
err = md.config.Codec().Decode(keyBytes, &serverHalf)
if err != nil {
return
}
return
}
// PutTLFCryptKeyServerHalves is an implementation of the KeyServer interface.
func (md *MDServerRemote) PutTLFCryptKeyServerHalves(ctx context.Context,
keyServerHalves UserDeviceKeyServerHalves) (err error) {
ctx = rpc.WithFireNow(ctx)
md.log.LazyTrace(ctx, "KeyServer: PutTLFCryptKeyServerHalves %v", keyServerHalves)
defer func() {
md.deferLog.LazyTrace(ctx, "KeyServer: PutTLFCryptKeyServerHalves %v (err=%v)", keyServerHalves, err)
}()
// flatten out the map into an array
var keyHalves []keybase1.KeyHalf
for user, deviceMap := range keyServerHalves {
for devicePubKey, serverHalf := range deviceMap {
keyHalf, err := md.config.Codec().Encode(serverHalf)
if err != nil {
return err
}
keyHalves = append(keyHalves,
keybase1.KeyHalf{
User: user,
DeviceKID: devicePubKey.KID(),
Key: keyHalf,
})
}
}
// put the keys
arg := keybase1.PutKeysArg{
KeyHalves: keyHalves,
LogTags: nil,
}
return md.getClient().PutKeys(ctx, arg)
}
// DeleteTLFCryptKeyServerHalf is an implementation of the KeyServer interface.
func (md *MDServerRemote) DeleteTLFCryptKeyServerHalf(ctx context.Context,
uid keybase1.UID, key kbfscrypto.CryptPublicKey,
serverHalfID TLFCryptKeyServerHalfID) (err error) {
ctx = rpc.WithFireNow(ctx)
md.log.LazyTrace(ctx, "KeyServer: DeleteTLFCryptKeyServerHalf %s %s", uid, serverHalfID)
defer func() {
md.deferLog.LazyTrace(ctx, "KeyServer: DeleteTLFCryptKeyServerHalf %s %s done (err=%v)", uid, serverHalfID, err)
}()
// encode the ID
idBytes, err := md.config.Codec().Encode(serverHalfID)
if err != nil {
return err
}
// get the key
arg := keybase1.DeleteKeyArg{
Uid: uid,
DeviceKID: key.KID(),
KeyHalfID: idBytes,
LogTags: nil,
}
err = md.getClient().DeleteKey(ctx, arg)
if err != nil {
return err
}
return nil
}
// DisableRekeyUpdatesForTesting implements the MDServer interface.
func (md *MDServerRemote) DisableRekeyUpdatesForTesting() {
// This doesn't need a lock for testing.
md.squelchRekey = true
md.rekeyTimer.Stop()
}
// CtxMDSRTagKey is the type used for unique context tags within MDServerRemote
type CtxMDSRTagKey int
const (
// CtxMDSRIDKey is the type of the tag for unique operation IDs
// within MDServerRemote.
CtxMDSRIDKey CtxMDSRTagKey = iota
)
// CtxMDSROpID is the display name for the unique operation
// MDServerRemote ID tag.
const CtxMDSROpID = "MDSRID"
func (md *MDServerRemote) backgroundRekeyChecker(ctx context.Context) {
for {
select {
case <-md.rekeyTimer.C:
if !md.getConn().IsConnected() {
md.resetRekeyTimer()
continue
}
// Assign an ID to this rekey check so we can track it.
newCtx := CtxWithRandomIDReplayable(ctx, CtxMDSRIDKey, CtxMDSROpID, md.log)
md.log.CDebugf(newCtx, "Checking for rekey folders")
if err := md.getFoldersForRekey(
newCtx, md.getClient()); err != nil {
md.log.CWarningf(newCtx, "MDServerRemote: getFoldersForRekey "+
"failed with %v", err)
}
md.resetRekeyTimer()
case <-ctx.Done():
return
}
}
}
// GetKeyBundles implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) GetKeyBundles(ctx context.Context,
tlf tlf.ID, wkbID TLFWriterKeyBundleID, rkbID TLFReaderKeyBundleID) (
wkb *TLFWriterKeyBundleV3, rkb *TLFReaderKeyBundleV3, err error) {
ctx = rpc.WithFireNow(ctx)
md.log.LazyTrace(ctx, "KeyServer: GetKeyBundles %s %s %s", tlf, wkbID, rkbID)
defer func() {
md.deferLog.LazyTrace(ctx, "KeyServer: GetKeyBundles %s %s %s done (err=%v)", tlf, wkbID, rkbID, err)
}()
arg := keybase1.GetKeyBundlesArg{
FolderID: tlf.String(),
WriterBundleID: wkbID.String(),
ReaderBundleID: rkbID.String(),
}
response, err := md.getClient().GetKeyBundles(ctx, arg)
if err != nil {
return nil, nil, err
}
if response.WriterBundle.Bundle != nil {
if response.WriterBundle.Version != int(SegregatedKeyBundlesVer) {
err = fmt.Errorf("Unsupported writer bundle version: %d",
response.WriterBundle.Version)
return nil, nil, err
}
wkb = new(TLFWriterKeyBundleV3)
err = md.config.Codec().Decode(response.WriterBundle.Bundle, wkb)
if err != nil {
return nil, nil, err
}
// Verify it's what we expect.
bundleID, err := md.config.Crypto().MakeTLFWriterKeyBundleID(*wkb)
if err != nil {
return nil, nil, err
}
if bundleID != wkbID {
err = fmt.Errorf("Expected writer bundle ID %s, got: %s",
wkbID, bundleID)
return nil, nil, err
}
}
if response.ReaderBundle.Bundle != nil {
if response.ReaderBundle.Version != int(SegregatedKeyBundlesVer) {
err = fmt.Errorf("Unsupported reader bundle version: %d",
response.ReaderBundle.Version)
return nil, nil, err
}
rkb = new(TLFReaderKeyBundleV3)
err = md.config.Codec().Decode(response.ReaderBundle.Bundle, rkb)
if err != nil {
return nil, nil, err
}
// Verify it's what we expect.
bundleID, err := md.config.Crypto().MakeTLFReaderKeyBundleID(*rkb)
if err != nil {
return nil, nil, err
}
if bundleID != rkbID {
err = fmt.Errorf("Expected reader bundle ID %s, got: %s",
rkbID, bundleID)
return nil, nil, err
}
}
return wkb, rkb, nil
}
// FastForwardBackoff implements the MDServer interface for MDServerRemote.
func (md *MDServerRemote) FastForwardBackoff() {
md.connMu.RLock()
defer md.connMu.RUnlock()
md.conn.FastForwardInitialBackoffTimer()
}
func (md *MDServerRemote) resetRekeyTimer() {
md.rekeyTimer.Reset(nextRekeyTime())
}
// nextRekeyTime returns the time remaining to the next rekey.
// The time returned is random with the formula:
// MdServerBackgroundRekeyPeriod/2 + (k * (MdServerBackgroundRekeyPeriod/n))
// average: MdServerBackgroundRekeyPeriod
// minimum: MdServerBackgroundRekeyPeriod/2
// maximum: MdServerBackgroundRekeyPeriod*1.5
// k=0..n, random uniformly distributed.
func nextRekeyTime() time.Duration {
var buf [1]byte
err := kbfscrypto.RandRead(buf[:])
if err != nil {
panic("nextRekeyTime: Random source broken!")
}
return (MdServerBackgroundRekeyPeriod / 2) +
(time.Duration(buf[0]) * (MdServerBackgroundRekeyPeriod / 0xFF))
}
|
// Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"context"
"sync"
"testing"
"time"
"github.com/keybase/go-codec/codec"
"github.com/stretchr/testify/require"
)
func makeRandomBlockInfo(t *testing.T) BlockInfo {
return BlockInfo{
makeRandomBlockPointer(t),
150,
}
}
func makeRandomDirEntry(
t *testing.T, typ EntryType, size uint64, path string) DirEntry {
return DirEntry{
makeRandomBlockInfo(t),
EntryInfo{
typ,
size,
path,
101,
102,
"",
},
codec.UnknownFieldSetHandler{},
}
}
func makeFakeIndirectFilePtr(t *testing.T, off int64) IndirectFilePtr {
return IndirectFilePtr{
makeRandomBlockInfo(t),
off,
false,
codec.UnknownFieldSetHandler{},
}
}
func makeFakeIndirectDirPtr(t *testing.T, off string) IndirectDirPtr {
return IndirectDirPtr{
makeRandomBlockInfo(t),
off,
codec.UnknownFieldSetHandler{},
}
}
func makeFakeDirBlock(t *testing.T, name string) *DirBlock {
return &DirBlock{Children: map[string]DirEntry{
name: makeRandomDirEntry(t, Dir, 100, name),
}}
}
func initPrefetcherTest(t *testing.T) (*blockRetrievalQueue,
*fakeBlockGetter, *testBlockRetrievalConfig) {
// We don't want the block getter to respect cancelation, because we need
// <-q.Prefetcher().Shutdown() to represent whether the retrieval requests
// _actually_ completed.
bg := newFakeBlockGetter(false)
config := newTestBlockRetrievalConfig(t, bg, nil)
q := newBlockRetrievalQueue(1, 1, config)
require.NotNil(t, q)
return q, bg, config
}
func shutdownPrefetcherTest(q *blockRetrievalQueue) {
q.Shutdown()
}
func testPrefetcherCheckGet(t *testing.T, bcache BlockCache, ptr BlockPointer,
expectedBlock Block, expectedPrefetchStatus PrefetchStatus,
expectedLifetime BlockCacheLifetime) {
block, prefetchStatus, lifetime, err := bcache.GetWithPrefetch(ptr)
require.NoError(t, err)
require.Equal(t, expectedBlock, block)
require.Equal(t, expectedPrefetchStatus.String(), prefetchStatus.String())
require.Equal(t, expectedLifetime.String(), lifetime.String())
}
func waitForPrefetchOrBust(t *testing.T, ch <-chan struct{}) {
// TODO: add t.Helper() once we're on Go 1.9
select {
case <-ch:
case <-time.After(time.Second):
t.Fatal("Failed to wait for prefetch.")
}
}
func TestPrefetcherIndirectFileBlock(t *testing.T) {
t.Log("Test indirect file block prefetching.")
q, bg, config := initPrefetcherTest(t)
defer shutdownPrefetcherTest(q)
t.Log("Initialize an indirect file block pointing to 2 file data blocks.")
ptrs := []IndirectFilePtr{
makeFakeIndirectFilePtr(t, 0),
makeFakeIndirectFilePtr(t, 150),
}
rootPtr := makeRandomBlockPointer(t)
rootBlock := &FileBlock{IPtrs: ptrs}
rootBlock.IsInd = true
indBlock1 := makeFakeFileBlock(t, true)
indBlock2 := makeFakeFileBlock(t, true)
_, continueChRootBlock := bg.setBlockToReturn(rootPtr, rootBlock)
_, continueChIndBlock1 :=
bg.setBlockToReturn(ptrs[0].BlockPointer, indBlock1)
_, continueChIndBlock2 :=
bg.setBlockToReturn(ptrs[1].BlockPointer, indBlock2)
var block Block = &FileBlock{}
ch := q.Request(context.Background(),
defaultOnDemandRequestPriority, makeKMD(), rootPtr, block,
TransientEntry)
continueChRootBlock <- nil
err := <-ch
require.NoError(t, err)
require.Equal(t, rootBlock, block)
t.Log("Release the prefetched indirect blocks.")
continueChIndBlock1 <- nil
continueChIndBlock2 <- nil
t.Log("Wait for the prefetch to finish.")
<-q.Prefetcher().Shutdown()
t.Log("Ensure that the prefetched blocks are in the cache.")
testPrefetcherCheckGet(t, config.BlockCache(), rootPtr, rootBlock,
TriggeredPrefetch, TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(), ptrs[0].BlockPointer,
indBlock1, NoPrefetch, TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(), ptrs[1].BlockPointer,
indBlock2, NoPrefetch, TransientEntry)
}
func TestPrefetcherIndirectDirBlock(t *testing.T) {
t.Log("Test indirect dir block prefetching.")
q, bg, config := initPrefetcherTest(t)
defer shutdownPrefetcherTest(q)
t.Log("Initialize an indirect dir block pointing to 2 dir data blocks.")
ptrs := []IndirectDirPtr{
makeFakeIndirectDirPtr(t, "a"),
makeFakeIndirectDirPtr(t, "b"),
}
rootPtr := makeRandomBlockPointer(t)
rootBlock := &DirBlock{IPtrs: ptrs, Children: make(map[string]DirEntry)}
rootBlock.IsInd = true
indBlock1 := makeFakeDirBlock(t, "a")
indBlock2 := makeFakeDirBlock(t, "b")
_, continueChRootBlock := bg.setBlockToReturn(rootPtr, rootBlock)
_, continueChIndBlock1 :=
bg.setBlockToReturn(ptrs[0].BlockPointer, indBlock1)
_, continueChIndBlock2 :=
bg.setBlockToReturn(ptrs[1].BlockPointer, indBlock2)
block := NewDirBlock()
ch := q.Request(context.Background(),
defaultOnDemandRequestPriority, makeKMD(), rootPtr, block,
TransientEntry)
continueChRootBlock <- nil
err := <-ch
require.NoError(t, err)
require.Equal(t, rootBlock, block)
t.Log("Release the prefetched indirect blocks.")
continueChIndBlock1 <- nil
continueChIndBlock2 <- nil
t.Log("Wait for the prefetch to finish.")
<-q.Prefetcher().Shutdown()
t.Log("Ensure that the prefetched blocks are in the cache.")
testPrefetcherCheckGet(t, config.BlockCache(), rootPtr, rootBlock,
TriggeredPrefetch, TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(), ptrs[0].BlockPointer,
indBlock1, NoPrefetch, TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(), ptrs[1].BlockPointer,
indBlock2, NoPrefetch, TransientEntry)
}
func TestPrefetcherDirectDirBlock(t *testing.T) {
t.Log("Test direct dir block prefetching.")
q, bg, config := initPrefetcherTest(t)
defer shutdownPrefetcherTest(q)
t.Log("Initialize a direct dir block with entries pointing to 3 files.")
fileA := makeFakeFileBlock(t, true)
fileC := makeFakeFileBlock(t, true)
rootPtr := makeRandomBlockPointer(t)
rootDir := &DirBlock{Children: map[string]DirEntry{
"a": makeRandomDirEntry(t, File, 100, "a"),
"b": makeRandomDirEntry(t, Dir, 60, "b"),
"c": makeRandomDirEntry(t, Exec, 20, "c"),
}}
dirB := &DirBlock{Children: map[string]DirEntry{
"d": makeRandomDirEntry(t, File, 100, "d"),
}}
dirBfileD := makeFakeFileBlock(t, true)
_, continueChRootDir := bg.setBlockToReturn(rootPtr, rootDir)
_, continueChFileA :=
bg.setBlockToReturn(rootDir.Children["a"].BlockPointer, fileA)
_, continueChDirB :=
bg.setBlockToReturn(rootDir.Children["b"].BlockPointer, dirB)
_, continueChFileC :=
bg.setBlockToReturn(rootDir.Children["c"].BlockPointer, fileC)
_, _ = bg.setBlockToReturn(dirB.Children["d"].BlockPointer, dirBfileD)
var block Block = &DirBlock{}
ch := q.Request(context.Background(),
defaultOnDemandRequestPriority, makeKMD(), rootPtr, block,
TransientEntry)
continueChRootDir <- nil
err := <-ch
require.NoError(t, err)
require.Equal(t, rootDir, block)
t.Log("Release the blocks in ascending order of their size. The largest " +
"block will error.")
continueChFileC <- nil
continueChDirB <- nil
continueChFileA <- context.Canceled
t.Log("Wait for the prefetch to finish.")
<-q.Prefetcher().Shutdown()
t.Log("Ensure that the prefetched blocks are in the cache.")
testPrefetcherCheckGet(t, config.BlockCache(), rootPtr, rootDir,
TriggeredPrefetch, TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(),
rootDir.Children["c"].BlockPointer, fileC, NoPrefetch, TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(),
rootDir.Children["b"].BlockPointer, dirB, NoPrefetch, TransientEntry)
t.Log("Ensure that the largest block isn't in the cache.")
block, err = config.BlockCache().Get(rootDir.Children["a"].BlockPointer)
require.EqualError(t, err,
NoSuchBlockError{rootDir.Children["a"].BlockPointer.ID}.Error())
t.Log("Ensure that the second-level directory didn't cause a prefetch.")
block, err = config.BlockCache().Get(dirB.Children["d"].BlockPointer)
require.EqualError(t, err,
NoSuchBlockError{dirB.Children["d"].BlockPointer.ID}.Error())
}
func TestPrefetcherAlreadyCached(t *testing.T) {
t.Log("Test direct dir block prefetching when the dir block is cached.")
q, bg, config := initPrefetcherTest(t)
cache := config.BlockCache()
defer shutdownPrefetcherTest(q)
t.Log("Initialize a direct dir block with an entry pointing to 1 " +
"folder, which in turn points to 1 file.")
fileB := makeFakeFileBlock(t, true)
rootPtr := makeRandomBlockPointer(t)
rootDir := &DirBlock{Children: map[string]DirEntry{
"a": makeRandomDirEntry(t, Dir, 60, "a"),
}}
dirA := &DirBlock{Children: map[string]DirEntry{
"b": makeRandomDirEntry(t, File, 100, "b"),
}}
_, continueChRootDir := bg.setBlockToReturn(rootPtr, rootDir)
_, continueChDirA :=
bg.setBlockToReturn(rootDir.Children["a"].BlockPointer, dirA)
_, continueChFileB :=
bg.setBlockToReturn(dirA.Children["b"].BlockPointer, fileB)
t.Log("Request the root block.")
kmd := makeKMD()
var block Block = &DirBlock{}
ch := q.Request(context.Background(),
defaultOnDemandRequestPriority, kmd, rootPtr, block, TransientEntry)
continueChRootDir <- nil
err := <-ch
require.NoError(t, err)
require.Equal(t, rootDir, block)
t.Log("Release the prefetch for dirA.")
continueChDirA <- nil
t.Log("Wait for the prefetch to finish.")
<-q.Prefetcher().Shutdown()
q.TogglePrefetcher(context.Background(), true, nil)
t.Log("Ensure that the prefetched block is in the cache.")
block, err = cache.Get(rootDir.Children["a"].BlockPointer)
require.NoError(t, err)
require.Equal(t, dirA, block)
t.Log("Ensure that the second-level directory didn't cause a prefetch.")
block, err = cache.Get(dirA.Children["b"].BlockPointer)
require.EqualError(t, err,
NoSuchBlockError{dirA.Children["b"].BlockPointer.ID}.Error())
t.Log("Request the already-cached second-level directory block. We don't " +
"need to unblock this one.")
block = &DirBlock{}
ch = q.Request(context.Background(),
defaultOnDemandRequestPriority, kmd,
rootDir.Children["a"].BlockPointer, block, TransientEntry)
err = <-ch
require.NoError(t, err)
require.Equal(t, dirA, block)
t.Log("Release the prefetch for fileB.")
continueChFileB <- nil
t.Log("Wait for the prefetch to finish.")
<-q.Prefetcher().Shutdown()
q.TogglePrefetcher(context.Background(), true, nil)
testPrefetcherCheckGet(t, cache, dirA.Children["b"].BlockPointer, fileB,
NoPrefetch, TransientEntry)
// Check that the dir block is marked as having been prefetched.
testPrefetcherCheckGet(t, cache, rootDir.Children["a"].BlockPointer, dirA,
TriggeredPrefetch, TransientEntry)
t.Log("Remove the prefetched file block from the cache.")
cache.DeleteTransient(dirA.Children["b"].BlockPointer, kmd.TlfID())
_, err = cache.Get(dirA.Children["b"].BlockPointer)
require.EqualError(t, err,
NoSuchBlockError{dirA.Children["b"].BlockPointer.ID}.Error())
t.Log("Request the second-level directory block again. No prefetches " +
"should be triggered.")
block = &DirBlock{}
ch = q.Request(context.Background(),
defaultOnDemandRequestPriority, kmd,
rootDir.Children["a"].BlockPointer, block, TransientEntry)
err = <-ch
require.NoError(t, err)
require.Equal(t, dirA, block)
t.Log("Wait for the prefetch to finish.")
<-q.Prefetcher().Shutdown()
}
func TestPrefetcherNoRepeatedPrefetch(t *testing.T) {
t.Log("Test that prefetches are only triggered once for a given block.")
q, bg, config := initPrefetcherTest(t)
cache := config.BlockCache().(*BlockCacheStandard)
defer shutdownPrefetcherTest(q)
t.Log("Initialize a direct dir block with an entry pointing to 1 file.")
fileA := makeFakeFileBlock(t, true)
rootPtr := makeRandomBlockPointer(t)
rootDir := &DirBlock{Children: map[string]DirEntry{
"a": makeRandomDirEntry(t, File, 60, "a"),
}}
ptrA := rootDir.Children["a"].BlockPointer
_, continueChRootDir := bg.setBlockToReturn(rootPtr, rootDir)
_, continueChFileA := bg.setBlockToReturn(ptrA, fileA)
t.Log("Request the root block.")
var block Block = &DirBlock{}
kmd := makeKMD()
ch := q.Request(context.Background(),
defaultOnDemandRequestPriority, kmd, rootPtr, block, TransientEntry)
continueChRootDir <- nil
err := <-ch
require.NoError(t, err)
require.Equal(t, rootDir, block)
t.Log("Release the prefetched block.")
continueChFileA <- nil
t.Log("Wait for the prefetch to finish, then verify that the prefetched " +
"block is in the cache.")
<-q.Prefetcher().Shutdown()
q.TogglePrefetcher(context.Background(), true, nil)
testPrefetcherCheckGet(t, config.BlockCache(), ptrA, fileA, NoPrefetch,
TransientEntry)
t.Log("Remove the prefetched block from the cache.")
cache.DeleteTransient(ptrA, kmd.TlfID())
_, err = cache.Get(ptrA)
require.EqualError(t, err, NoSuchBlockError{ptrA.ID}.Error())
t.Log("Request the root block again. It should be cached, so it should " +
"return without needing to release the block.")
block = &DirBlock{}
ch = q.Request(context.Background(),
defaultOnDemandRequestPriority, kmd, rootPtr, block, TransientEntry)
err = <-ch
require.NoError(t, err)
require.Equal(t, rootDir, block)
t.Log("Wait for the prefetch to finish, then verify that the child " +
"block is still not in the cache.")
_, err = cache.Get(ptrA)
require.EqualError(t, err, NoSuchBlockError{ptrA.ID}.Error())
<-q.Prefetcher().Shutdown()
}
func TestPrefetcherEmptyDirectDirBlock(t *testing.T) {
t.Log("Test empty direct dir block prefetching.")
q, bg, config := initPrefetcherTest(t)
defer shutdownPrefetcherTest(q)
t.Log("Initialize an empty direct dir block.")
rootPtr := makeRandomBlockPointer(t)
rootDir := &DirBlock{Children: map[string]DirEntry{}}
_, continueChRootDir := bg.setBlockToReturn(rootPtr, rootDir)
var block Block = &DirBlock{}
ch := q.Request(context.Background(),
defaultOnDemandRequestPriority, makeKMD(), rootPtr, block,
TransientEntry)
continueChRootDir <- nil
err := <-ch
require.NoError(t, err)
require.Equal(t, rootDir, block)
t.Log("Wait for prefetching to complete.")
<-q.Prefetcher().Shutdown()
t.Log("Ensure that the directory block is in the cache.")
testPrefetcherCheckGet(t, config.BlockCache(), rootPtr, rootDir,
FinishedPrefetch, TransientEntry)
}
func notifyContinueCh(ch chan<- error, wg *sync.WaitGroup) {
go func() {
ch <- nil
wg.Done()
}()
}
func TestPrefetcherForSyncedTLF(t *testing.T) {
t.Log("Test synced TLF prefetching.")
q, bg, config := initPrefetcherTest(t)
defer shutdownPrefetcherTest(q)
prefetchSyncCh := make(chan struct{})
q.TogglePrefetcher(context.Background(), true, prefetchSyncCh)
prefetchSyncCh <- struct{}{}
kmd := makeKMD()
config.SetTlfSyncState(kmd.TlfID(), true)
t.Log("Initialize a direct dir block with entries pointing to 2 files " +
"and 1 directory. The directory has an entry pointing to another " +
"file, which has 2 indirect blocks.")
fileA := makeFakeFileBlock(t, true)
fileC := makeFakeFileBlock(t, true)
rootPtr := makeRandomBlockPointer(t)
rootDir := &DirBlock{Children: map[string]DirEntry{
"a": makeRandomDirEntry(t, File, 100, "a"),
"b": makeRandomDirEntry(t, Dir, 60, "b"),
"c": makeRandomDirEntry(t, Exec, 20, "c"),
}}
dirB := &DirBlock{Children: map[string]DirEntry{
"d": makeRandomDirEntry(t, File, 100, "d"),
}}
dirBfileDptrs := []IndirectFilePtr{
makeFakeIndirectFilePtr(t, 0),
makeFakeIndirectFilePtr(t, 150),
}
dirBfileD := &FileBlock{IPtrs: dirBfileDptrs}
dirBfileD.IsInd = true
dirBfileDblock1 := makeFakeFileBlock(t, true)
dirBfileDblock2 := makeFakeFileBlock(t, true)
_, continueChRootDir := bg.setBlockToReturn(rootPtr, rootDir)
_, continueChFileA :=
bg.setBlockToReturn(rootDir.Children["a"].BlockPointer, fileA)
_, continueChDirB :=
bg.setBlockToReturn(rootDir.Children["b"].BlockPointer, dirB)
_, continueChFileC :=
bg.setBlockToReturn(rootDir.Children["c"].BlockPointer, fileC)
_, continueChDirBfileD :=
bg.setBlockToReturn(dirB.Children["d"].BlockPointer, dirBfileD)
_, continueChDirBfileDblock1 :=
bg.setBlockToReturn(dirBfileDptrs[0].BlockPointer, dirBfileDblock1)
_, continueChDirBfileDblock2 :=
bg.setBlockToReturn(dirBfileDptrs[1].BlockPointer, dirBfileDblock2)
var block Block = &DirBlock{}
ch := q.Request(context.Background(),
defaultOnDemandRequestPriority, kmd, rootPtr, block, TransientEntry)
continueChRootDir <- nil
err := <-ch
require.NoError(t, err)
require.Equal(t, rootDir, block)
t.Log("Release all the blocks.")
wg := &sync.WaitGroup{}
wg.Add(4)
go func() {
continueChFileC <- nil
continueChDirB <- nil
// After this, the prefetch worker can either pick up the third child of
// dir1 (continueCh2), or the first child of dir2 (continueCh5).
// TODO: The prefetcher should have a "global" prefetch priority
// reservation system that goes down with each next set of prefetches.
notifyContinueCh(continueChFileA, wg)
notifyContinueCh(continueChDirBfileD, wg)
notifyContinueCh(continueChDirBfileDblock1, wg)
notifyContinueCh(continueChDirBfileDblock2, wg)
}()
t.Log("Wait for prefetching to complete.")
// First we wait for all prefetches to be triggered.
// Release after prefetching rootDir
prefetchSyncCh <- struct{}{}
// Release after prefetching fileC
prefetchSyncCh <- struct{}{}
// Release after prefetching dirB
prefetchSyncCh <- struct{}{}
// Release after prefetching fileA
prefetchSyncCh <- struct{}{}
// Release after prefetching dirBfileD
prefetchSyncCh <- struct{}{}
// Release after prefetching dirBfileDblock1
prefetchSyncCh <- struct{}{}
// Release after prefetching dirBfileDblock2
prefetchSyncCh <- struct{}{}
wg.Wait()
// Then we wait for the pending prefetches to complete.
<-q.Prefetcher().Shutdown()
q.TogglePrefetcher(context.Background(), true, prefetchSyncCh)
prefetchSyncCh <- struct{}{}
t.Log("Ensure that the prefetched blocks are all in the cache.")
testPrefetcherCheckGet(t, config.BlockCache(), rootPtr, rootDir,
FinishedPrefetch, TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(),
rootDir.Children["c"].BlockPointer, fileC, FinishedPrefetch,
TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(),
rootDir.Children["b"].BlockPointer, dirB, FinishedPrefetch,
TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(),
rootDir.Children["a"].BlockPointer, fileA, FinishedPrefetch,
TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(),
dirB.Children["d"].BlockPointer, dirBfileD, FinishedPrefetch,
TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(),
dirBfileDptrs[0].BlockPointer, dirBfileDblock1, FinishedPrefetch,
TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(),
dirBfileDptrs[1].BlockPointer, dirBfileDblock2, FinishedPrefetch,
TransientEntry)
block = &DirBlock{}
ch = q.Request(context.Background(),
defaultOnDemandRequestPriority, kmd, rootPtr, block, TransientEntry)
// We don't need to release the block this time because it should be cached
// already.
err = <-ch
require.NoError(t, err)
require.Equal(t, rootDir, block)
prefetchSyncCh <- struct{}{}
t.Log("Wait for prefetching to complete. This shouldn't hang.")
select {
case <-q.Prefetcher().Shutdown():
case <-time.After(time.Second):
t.Fatal("Prefetching hung.")
}
testPrefetcherCheckGet(t, config.BlockCache(), rootPtr, rootDir,
FinishedPrefetch, TransientEntry)
}
func TestPrefetcherMultiLevelIndirectFile(t *testing.T) {
t.Skip("Isn't quite working yet.")
t.Log("Test multi-level indirect file block prefetching.")
q, bg, config := initPrefetcherTest(t)
defer shutdownPrefetcherTest(q)
prefetchSyncCh := make(chan struct{})
q.TogglePrefetcher(context.Background(), true, prefetchSyncCh)
prefetchSyncCh <- struct{}{}
t.Log("Initialize an indirect file block pointing to 2 file data blocks.")
ptrs := []IndirectFilePtr{
makeFakeIndirectFilePtr(t, 0),
makeFakeIndirectFilePtr(t, 150),
}
rootPtr := makeRandomBlockPointer(t)
rootBlock := &FileBlock{IPtrs: ptrs}
rootBlock.IsInd = true
indBlock1 := &FileBlock{IPtrs: []IndirectFilePtr{
makeFakeIndirectFilePtr(t, 10),
makeFakeIndirectFilePtr(t, 20),
}}
indBlock2 := &FileBlock{IPtrs: []IndirectFilePtr{
makeFakeIndirectFilePtr(t, 30),
makeFakeIndirectFilePtr(t, 40),
}}
indBlock11 := makeFakeFileBlock(t, true)
indBlock12 := makeFakeFileBlock(t, true)
indBlock21 := makeFakeFileBlock(t, true)
indBlock22 := makeFakeFileBlock(t, true)
_, continueChRootBlock := bg.setBlockToReturn(rootPtr, rootBlock)
_, continueChIndBlock1 :=
bg.setBlockToReturn(ptrs[0].BlockPointer, indBlock1)
_, continueChIndBlock2 :=
bg.setBlockToReturn(ptrs[1].BlockPointer, indBlock2)
_, continueChIndBlock11 :=
bg.setBlockToReturn(indBlock1.IPtrs[0].BlockPointer, indBlock11)
_, continueChIndBlock12 :=
bg.setBlockToReturn(indBlock1.IPtrs[1].BlockPointer, indBlock12)
_, continueChIndBlock21 :=
bg.setBlockToReturn(indBlock2.IPtrs[0].BlockPointer, indBlock21)
_, continueChIndBlock22 :=
bg.setBlockToReturn(indBlock2.IPtrs[1].BlockPointer, indBlock22)
var block Block = &FileBlock{}
ch := q.Request(context.Background(),
defaultOnDemandRequestPriority, makeKMD(), rootPtr, block,
TransientEntry)
continueChRootBlock <- nil
err := <-ch
require.NoError(t, err)
require.Equal(t, rootBlock, block)
t.Log("Release the prefetched indirect blocks.")
// Release after prefetching rootBlock
prefetchSyncCh <- struct{}{}
// Release 2 blocks
continueChIndBlock1 <- nil
continueChIndBlock2 <- nil
t.Log("Wait for the prefetch to finish.")
<-q.Prefetcher().Shutdown()
q.TogglePrefetcher(context.Background(), true, prefetchSyncCh)
prefetchSyncCh <- struct{}{}
t.Log("Ensure that the prefetched blocks are in the cache.")
testPrefetcherCheckGet(t, config.BlockCache(), rootPtr, rootBlock,
TriggeredPrefetch, TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(), ptrs[0].BlockPointer,
indBlock1, NoPrefetch, TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(), ptrs[1].BlockPointer,
indBlock2, NoPrefetch, TransientEntry)
t.Log("Fetch indirect block1 on-demand.")
block = &FileBlock{}
ch = q.Request(context.Background(), defaultOnDemandRequestPriority,
makeKMD(), rootBlock.IPtrs[0].BlockPointer, block, TransientEntry)
err = <-ch
t.Log("Release the prefetch for indirect block1")
// Release after prefetching block1
prefetchSyncCh <- struct{}{}
// Release 2 blocks
continueChIndBlock11 <- nil
continueChIndBlock12 <- nil
t.Log("Wait for the prefetch to finish.")
<-q.Prefetcher().Shutdown()
q.TogglePrefetcher(context.Background(), true, prefetchSyncCh)
prefetchSyncCh <- struct{}{}
t.Log("Ensure that the prefetched blocks are in the cache.")
testPrefetcherCheckGet(t, config.BlockCache(),
indBlock1.IPtrs[0].BlockPointer, indBlock11, NoPrefetch, TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(),
indBlock1.IPtrs[1].BlockPointer, indBlock12, NoPrefetch, TransientEntry)
t.Log("Fetch indirect block2 on-demand.")
block = &FileBlock{}
ch = q.Request(context.Background(), defaultOnDemandRequestPriority,
makeKMD(), rootBlock.IPtrs[1].BlockPointer, block, TransientEntry)
err = <-ch
t.Log("Release the prefetch for indirect block2")
// Release after prefetching block2
prefetchSyncCh <- struct{}{}
// Release 2 blocks
continueChIndBlock21 <- nil
continueChIndBlock22 <- nil
t.Log("Wait for the prefetch to finish.")
<-q.Prefetcher().Shutdown()
t.Log("Ensure that the prefetched blocks are in the cache.")
testPrefetcherCheckGet(t, config.BlockCache(),
indBlock2.IPtrs[0].BlockPointer, indBlock21, NoPrefetch, TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(),
indBlock2.IPtrs[1].BlockPointer, indBlock22, NoPrefetch, TransientEntry)
}
prefetcher_test: Add multi-level indirect file test.
// Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"context"
"sync"
"testing"
"time"
"github.com/keybase/go-codec/codec"
"github.com/stretchr/testify/require"
)
func makeRandomBlockInfo(t *testing.T) BlockInfo {
return BlockInfo{
makeRandomBlockPointer(t),
150,
}
}
func makeRandomDirEntry(
t *testing.T, typ EntryType, size uint64, path string) DirEntry {
return DirEntry{
makeRandomBlockInfo(t),
EntryInfo{
typ,
size,
path,
101,
102,
"",
},
codec.UnknownFieldSetHandler{},
}
}
func makeFakeIndirectFilePtr(t *testing.T, off int64) IndirectFilePtr {
return IndirectFilePtr{
makeRandomBlockInfo(t),
off,
false,
codec.UnknownFieldSetHandler{},
}
}
func makeFakeIndirectDirPtr(t *testing.T, off string) IndirectDirPtr {
return IndirectDirPtr{
makeRandomBlockInfo(t),
off,
codec.UnknownFieldSetHandler{},
}
}
func makeFakeDirBlock(t *testing.T, name string) *DirBlock {
return &DirBlock{Children: map[string]DirEntry{
name: makeRandomDirEntry(t, Dir, 100, name),
}}
}
func initPrefetcherTest(t *testing.T) (*blockRetrievalQueue,
*fakeBlockGetter, *testBlockRetrievalConfig) {
// We don't want the block getter to respect cancelation, because we need
// <-q.Prefetcher().Shutdown() to represent whether the retrieval requests
// _actually_ completed.
bg := newFakeBlockGetter(false)
config := newTestBlockRetrievalConfig(t, bg, nil)
q := newBlockRetrievalQueue(1, 1, config)
require.NotNil(t, q)
return q, bg, config
}
func shutdownPrefetcherTest(q *blockRetrievalQueue) {
q.Shutdown()
}
func testPrefetcherCheckGet(t *testing.T, bcache BlockCache, ptr BlockPointer,
expectedBlock Block, expectedPrefetchStatus PrefetchStatus,
expectedLifetime BlockCacheLifetime) {
block, prefetchStatus, lifetime, err := bcache.GetWithPrefetch(ptr)
require.NoError(t, err)
require.Equal(t, expectedBlock, block)
require.Equal(t, expectedPrefetchStatus.String(), prefetchStatus.String())
require.Equal(t, expectedLifetime.String(), lifetime.String())
}
func waitForPrefetchOrBust(t *testing.T, ch <-chan struct{}) {
// TODO: add t.Helper() once we're on Go 1.9
select {
case <-ch:
case <-time.After(time.Second):
t.Fatal("Failed to wait for prefetch.")
}
}
func TestPrefetcherIndirectFileBlock(t *testing.T) {
t.Log("Test indirect file block prefetching.")
q, bg, config := initPrefetcherTest(t)
defer shutdownPrefetcherTest(q)
t.Log("Initialize an indirect file block pointing to 2 file data blocks.")
ptrs := []IndirectFilePtr{
makeFakeIndirectFilePtr(t, 0),
makeFakeIndirectFilePtr(t, 150),
}
rootPtr := makeRandomBlockPointer(t)
rootBlock := &FileBlock{IPtrs: ptrs}
rootBlock.IsInd = true
indBlock1 := makeFakeFileBlock(t, true)
indBlock2 := makeFakeFileBlock(t, true)
_, continueChRootBlock := bg.setBlockToReturn(rootPtr, rootBlock)
_, continueChIndBlock1 :=
bg.setBlockToReturn(ptrs[0].BlockPointer, indBlock1)
_, continueChIndBlock2 :=
bg.setBlockToReturn(ptrs[1].BlockPointer, indBlock2)
var block Block = &FileBlock{}
ch := q.Request(context.Background(),
defaultOnDemandRequestPriority, makeKMD(), rootPtr, block,
TransientEntry)
continueChRootBlock <- nil
err := <-ch
require.NoError(t, err)
require.Equal(t, rootBlock, block)
t.Log("Release the prefetched indirect blocks.")
continueChIndBlock1 <- nil
continueChIndBlock2 <- nil
t.Log("Wait for the prefetch to finish.")
<-q.Prefetcher().Shutdown()
t.Log("Ensure that the prefetched blocks are in the cache.")
testPrefetcherCheckGet(t, config.BlockCache(), rootPtr, rootBlock,
TriggeredPrefetch, TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(), ptrs[0].BlockPointer,
indBlock1, NoPrefetch, TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(), ptrs[1].BlockPointer,
indBlock2, NoPrefetch, TransientEntry)
}
func TestPrefetcherIndirectDirBlock(t *testing.T) {
t.Log("Test indirect dir block prefetching.")
q, bg, config := initPrefetcherTest(t)
defer shutdownPrefetcherTest(q)
t.Log("Initialize an indirect dir block pointing to 2 dir data blocks.")
ptrs := []IndirectDirPtr{
makeFakeIndirectDirPtr(t, "a"),
makeFakeIndirectDirPtr(t, "b"),
}
rootPtr := makeRandomBlockPointer(t)
rootBlock := &DirBlock{IPtrs: ptrs, Children: make(map[string]DirEntry)}
rootBlock.IsInd = true
indBlock1 := makeFakeDirBlock(t, "a")
indBlock2 := makeFakeDirBlock(t, "b")
_, continueChRootBlock := bg.setBlockToReturn(rootPtr, rootBlock)
_, continueChIndBlock1 :=
bg.setBlockToReturn(ptrs[0].BlockPointer, indBlock1)
_, continueChIndBlock2 :=
bg.setBlockToReturn(ptrs[1].BlockPointer, indBlock2)
block := NewDirBlock()
ch := q.Request(context.Background(),
defaultOnDemandRequestPriority, makeKMD(), rootPtr, block,
TransientEntry)
continueChRootBlock <- nil
err := <-ch
require.NoError(t, err)
require.Equal(t, rootBlock, block)
t.Log("Release the prefetched indirect blocks.")
continueChIndBlock1 <- nil
continueChIndBlock2 <- nil
t.Log("Wait for the prefetch to finish.")
<-q.Prefetcher().Shutdown()
t.Log("Ensure that the prefetched blocks are in the cache.")
testPrefetcherCheckGet(t, config.BlockCache(), rootPtr, rootBlock,
TriggeredPrefetch, TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(), ptrs[0].BlockPointer,
indBlock1, NoPrefetch, TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(), ptrs[1].BlockPointer,
indBlock2, NoPrefetch, TransientEntry)
}
func TestPrefetcherDirectDirBlock(t *testing.T) {
t.Log("Test direct dir block prefetching.")
q, bg, config := initPrefetcherTest(t)
defer shutdownPrefetcherTest(q)
t.Log("Initialize a direct dir block with entries pointing to 3 files.")
fileA := makeFakeFileBlock(t, true)
fileC := makeFakeFileBlock(t, true)
rootPtr := makeRandomBlockPointer(t)
rootDir := &DirBlock{Children: map[string]DirEntry{
"a": makeRandomDirEntry(t, File, 100, "a"),
"b": makeRandomDirEntry(t, Dir, 60, "b"),
"c": makeRandomDirEntry(t, Exec, 20, "c"),
}}
dirB := &DirBlock{Children: map[string]DirEntry{
"d": makeRandomDirEntry(t, File, 100, "d"),
}}
dirBfileD := makeFakeFileBlock(t, true)
_, continueChRootDir := bg.setBlockToReturn(rootPtr, rootDir)
_, continueChFileA :=
bg.setBlockToReturn(rootDir.Children["a"].BlockPointer, fileA)
_, continueChDirB :=
bg.setBlockToReturn(rootDir.Children["b"].BlockPointer, dirB)
_, continueChFileC :=
bg.setBlockToReturn(rootDir.Children["c"].BlockPointer, fileC)
_, _ = bg.setBlockToReturn(dirB.Children["d"].BlockPointer, dirBfileD)
var block Block = &DirBlock{}
ch := q.Request(context.Background(),
defaultOnDemandRequestPriority, makeKMD(), rootPtr, block,
TransientEntry)
continueChRootDir <- nil
err := <-ch
require.NoError(t, err)
require.Equal(t, rootDir, block)
t.Log("Release the blocks in ascending order of their size. The largest " +
"block will error.")
continueChFileC <- nil
continueChDirB <- nil
continueChFileA <- context.Canceled
t.Log("Wait for the prefetch to finish.")
<-q.Prefetcher().Shutdown()
t.Log("Ensure that the prefetched blocks are in the cache.")
testPrefetcherCheckGet(t, config.BlockCache(), rootPtr, rootDir,
TriggeredPrefetch, TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(),
rootDir.Children["c"].BlockPointer, fileC, NoPrefetch, TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(),
rootDir.Children["b"].BlockPointer, dirB, NoPrefetch, TransientEntry)
t.Log("Ensure that the largest block isn't in the cache.")
block, err = config.BlockCache().Get(rootDir.Children["a"].BlockPointer)
require.EqualError(t, err,
NoSuchBlockError{rootDir.Children["a"].BlockPointer.ID}.Error())
t.Log("Ensure that the second-level directory didn't cause a prefetch.")
block, err = config.BlockCache().Get(dirB.Children["d"].BlockPointer)
require.EqualError(t, err,
NoSuchBlockError{dirB.Children["d"].BlockPointer.ID}.Error())
}
func TestPrefetcherAlreadyCached(t *testing.T) {
t.Log("Test direct dir block prefetching when the dir block is cached.")
q, bg, config := initPrefetcherTest(t)
cache := config.BlockCache()
defer shutdownPrefetcherTest(q)
t.Log("Initialize a direct dir block with an entry pointing to 1 " +
"folder, which in turn points to 1 file.")
fileB := makeFakeFileBlock(t, true)
rootPtr := makeRandomBlockPointer(t)
rootDir := &DirBlock{Children: map[string]DirEntry{
"a": makeRandomDirEntry(t, Dir, 60, "a"),
}}
dirA := &DirBlock{Children: map[string]DirEntry{
"b": makeRandomDirEntry(t, File, 100, "b"),
}}
_, continueChRootDir := bg.setBlockToReturn(rootPtr, rootDir)
_, continueChDirA :=
bg.setBlockToReturn(rootDir.Children["a"].BlockPointer, dirA)
_, continueChFileB :=
bg.setBlockToReturn(dirA.Children["b"].BlockPointer, fileB)
t.Log("Request the root block.")
kmd := makeKMD()
var block Block = &DirBlock{}
ch := q.Request(context.Background(),
defaultOnDemandRequestPriority, kmd, rootPtr, block, TransientEntry)
continueChRootDir <- nil
err := <-ch
require.NoError(t, err)
require.Equal(t, rootDir, block)
t.Log("Release the prefetch for dirA.")
continueChDirA <- nil
t.Log("Wait for the prefetch to finish.")
<-q.Prefetcher().Shutdown()
q.TogglePrefetcher(context.Background(), true, nil)
t.Log("Ensure that the prefetched block is in the cache.")
block, err = cache.Get(rootDir.Children["a"].BlockPointer)
require.NoError(t, err)
require.Equal(t, dirA, block)
t.Log("Ensure that the second-level directory didn't cause a prefetch.")
block, err = cache.Get(dirA.Children["b"].BlockPointer)
require.EqualError(t, err,
NoSuchBlockError{dirA.Children["b"].BlockPointer.ID}.Error())
t.Log("Request the already-cached second-level directory block. We don't " +
"need to unblock this one.")
block = &DirBlock{}
ch = q.Request(context.Background(),
defaultOnDemandRequestPriority, kmd,
rootDir.Children["a"].BlockPointer, block, TransientEntry)
err = <-ch
require.NoError(t, err)
require.Equal(t, dirA, block)
t.Log("Release the prefetch for fileB.")
continueChFileB <- nil
t.Log("Wait for the prefetch to finish.")
<-q.Prefetcher().Shutdown()
q.TogglePrefetcher(context.Background(), true, nil)
testPrefetcherCheckGet(t, cache, dirA.Children["b"].BlockPointer, fileB,
NoPrefetch, TransientEntry)
// Check that the dir block is marked as having been prefetched.
testPrefetcherCheckGet(t, cache, rootDir.Children["a"].BlockPointer, dirA,
TriggeredPrefetch, TransientEntry)
t.Log("Remove the prefetched file block from the cache.")
cache.DeleteTransient(dirA.Children["b"].BlockPointer, kmd.TlfID())
_, err = cache.Get(dirA.Children["b"].BlockPointer)
require.EqualError(t, err,
NoSuchBlockError{dirA.Children["b"].BlockPointer.ID}.Error())
t.Log("Request the second-level directory block again. No prefetches " +
"should be triggered.")
block = &DirBlock{}
ch = q.Request(context.Background(),
defaultOnDemandRequestPriority, kmd,
rootDir.Children["a"].BlockPointer, block, TransientEntry)
err = <-ch
require.NoError(t, err)
require.Equal(t, dirA, block)
t.Log("Wait for the prefetch to finish.")
<-q.Prefetcher().Shutdown()
}
func TestPrefetcherNoRepeatedPrefetch(t *testing.T) {
t.Log("Test that prefetches are only triggered once for a given block.")
q, bg, config := initPrefetcherTest(t)
cache := config.BlockCache().(*BlockCacheStandard)
defer shutdownPrefetcherTest(q)
t.Log("Initialize a direct dir block with an entry pointing to 1 file.")
fileA := makeFakeFileBlock(t, true)
rootPtr := makeRandomBlockPointer(t)
rootDir := &DirBlock{Children: map[string]DirEntry{
"a": makeRandomDirEntry(t, File, 60, "a"),
}}
ptrA := rootDir.Children["a"].BlockPointer
_, continueChRootDir := bg.setBlockToReturn(rootPtr, rootDir)
_, continueChFileA := bg.setBlockToReturn(ptrA, fileA)
t.Log("Request the root block.")
var block Block = &DirBlock{}
kmd := makeKMD()
ch := q.Request(context.Background(),
defaultOnDemandRequestPriority, kmd, rootPtr, block, TransientEntry)
continueChRootDir <- nil
err := <-ch
require.NoError(t, err)
require.Equal(t, rootDir, block)
t.Log("Release the prefetched block.")
continueChFileA <- nil
t.Log("Wait for the prefetch to finish, then verify that the prefetched " +
"block is in the cache.")
<-q.Prefetcher().Shutdown()
q.TogglePrefetcher(context.Background(), true, nil)
testPrefetcherCheckGet(t, config.BlockCache(), ptrA, fileA, NoPrefetch,
TransientEntry)
t.Log("Remove the prefetched block from the cache.")
cache.DeleteTransient(ptrA, kmd.TlfID())
_, err = cache.Get(ptrA)
require.EqualError(t, err, NoSuchBlockError{ptrA.ID}.Error())
t.Log("Request the root block again. It should be cached, so it should " +
"return without needing to release the block.")
block = &DirBlock{}
ch = q.Request(context.Background(),
defaultOnDemandRequestPriority, kmd, rootPtr, block, TransientEntry)
err = <-ch
require.NoError(t, err)
require.Equal(t, rootDir, block)
t.Log("Wait for the prefetch to finish, then verify that the child " +
"block is still not in the cache.")
_, err = cache.Get(ptrA)
require.EqualError(t, err, NoSuchBlockError{ptrA.ID}.Error())
<-q.Prefetcher().Shutdown()
}
func TestPrefetcherEmptyDirectDirBlock(t *testing.T) {
t.Log("Test empty direct dir block prefetching.")
q, bg, config := initPrefetcherTest(t)
defer shutdownPrefetcherTest(q)
t.Log("Initialize an empty direct dir block.")
rootPtr := makeRandomBlockPointer(t)
rootDir := &DirBlock{Children: map[string]DirEntry{}}
_, continueChRootDir := bg.setBlockToReturn(rootPtr, rootDir)
var block Block = &DirBlock{}
ch := q.Request(context.Background(),
defaultOnDemandRequestPriority, makeKMD(), rootPtr, block,
TransientEntry)
continueChRootDir <- nil
err := <-ch
require.NoError(t, err)
require.Equal(t, rootDir, block)
t.Log("Wait for prefetching to complete.")
<-q.Prefetcher().Shutdown()
t.Log("Ensure that the directory block is in the cache.")
testPrefetcherCheckGet(t, config.BlockCache(), rootPtr, rootDir,
FinishedPrefetch, TransientEntry)
}
func notifyContinueCh(ch chan<- error, wg *sync.WaitGroup) {
go func() {
ch <- nil
wg.Done()
}()
}
func TestPrefetcherForSyncedTLF(t *testing.T) {
t.Log("Test synced TLF prefetching.")
q, bg, config := initPrefetcherTest(t)
defer shutdownPrefetcherTest(q)
prefetchSyncCh := make(chan struct{})
q.TogglePrefetcher(context.Background(), true, prefetchSyncCh)
prefetchSyncCh <- struct{}{}
kmd := makeKMD()
config.SetTlfSyncState(kmd.TlfID(), true)
t.Log("Initialize a direct dir block with entries pointing to 2 files " +
"and 1 directory. The directory has an entry pointing to another " +
"file, which has 2 indirect blocks.")
fileA := makeFakeFileBlock(t, true)
fileC := makeFakeFileBlock(t, true)
rootPtr := makeRandomBlockPointer(t)
rootDir := &DirBlock{Children: map[string]DirEntry{
"a": makeRandomDirEntry(t, File, 100, "a"),
"b": makeRandomDirEntry(t, Dir, 60, "b"),
"c": makeRandomDirEntry(t, Exec, 20, "c"),
}}
dirB := &DirBlock{Children: map[string]DirEntry{
"d": makeRandomDirEntry(t, File, 100, "d"),
}}
dirBfileDptrs := []IndirectFilePtr{
makeFakeIndirectFilePtr(t, 0),
makeFakeIndirectFilePtr(t, 150),
}
dirBfileD := &FileBlock{IPtrs: dirBfileDptrs}
dirBfileD.IsInd = true
dirBfileDblock1 := makeFakeFileBlock(t, true)
dirBfileDblock2 := makeFakeFileBlock(t, true)
_, continueChRootDir := bg.setBlockToReturn(rootPtr, rootDir)
_, continueChFileA :=
bg.setBlockToReturn(rootDir.Children["a"].BlockPointer, fileA)
_, continueChDirB :=
bg.setBlockToReturn(rootDir.Children["b"].BlockPointer, dirB)
_, continueChFileC :=
bg.setBlockToReturn(rootDir.Children["c"].BlockPointer, fileC)
_, continueChDirBfileD :=
bg.setBlockToReturn(dirB.Children["d"].BlockPointer, dirBfileD)
_, continueChDirBfileDblock1 :=
bg.setBlockToReturn(dirBfileDptrs[0].BlockPointer, dirBfileDblock1)
_, continueChDirBfileDblock2 :=
bg.setBlockToReturn(dirBfileDptrs[1].BlockPointer, dirBfileDblock2)
var block Block = &DirBlock{}
ch := q.Request(context.Background(),
defaultOnDemandRequestPriority, kmd, rootPtr, block, TransientEntry)
continueChRootDir <- nil
err := <-ch
require.NoError(t, err)
require.Equal(t, rootDir, block)
t.Log("Release all the blocks.")
wg := &sync.WaitGroup{}
wg.Add(4)
go func() {
continueChFileC <- nil
continueChDirB <- nil
// After this, the prefetch worker can either pick up the third child of
// dir1 (continueCh2), or the first child of dir2 (continueCh5).
// TODO: The prefetcher should have a "global" prefetch priority
// reservation system that goes down with each next set of prefetches.
notifyContinueCh(continueChFileA, wg)
notifyContinueCh(continueChDirBfileD, wg)
notifyContinueCh(continueChDirBfileDblock1, wg)
notifyContinueCh(continueChDirBfileDblock2, wg)
}()
t.Log("Wait for prefetching to complete.")
// First we wait for all prefetches to be triggered.
// Release after prefetching rootDir
prefetchSyncCh <- struct{}{}
// Release after prefetching fileC
prefetchSyncCh <- struct{}{}
// Release after prefetching dirB
prefetchSyncCh <- struct{}{}
// Release after prefetching fileA
prefetchSyncCh <- struct{}{}
// Release after prefetching dirBfileD
prefetchSyncCh <- struct{}{}
// Release after prefetching dirBfileDblock1
prefetchSyncCh <- struct{}{}
// Release after prefetching dirBfileDblock2
prefetchSyncCh <- struct{}{}
wg.Wait()
// Then we wait for the pending prefetches to complete.
<-q.Prefetcher().Shutdown()
q.TogglePrefetcher(context.Background(), true, prefetchSyncCh)
prefetchSyncCh <- struct{}{}
t.Log("Ensure that the prefetched blocks are all in the cache.")
testPrefetcherCheckGet(t, config.BlockCache(), rootPtr, rootDir,
FinishedPrefetch, TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(),
rootDir.Children["c"].BlockPointer, fileC, FinishedPrefetch,
TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(),
rootDir.Children["b"].BlockPointer, dirB, FinishedPrefetch,
TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(),
rootDir.Children["a"].BlockPointer, fileA, FinishedPrefetch,
TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(),
dirB.Children["d"].BlockPointer, dirBfileD, FinishedPrefetch,
TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(),
dirBfileDptrs[0].BlockPointer, dirBfileDblock1, FinishedPrefetch,
TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(),
dirBfileDptrs[1].BlockPointer, dirBfileDblock2, FinishedPrefetch,
TransientEntry)
block = &DirBlock{}
ch = q.Request(context.Background(),
defaultOnDemandRequestPriority, kmd, rootPtr, block, TransientEntry)
// We don't need to release the block this time because it should be cached
// already.
err = <-ch
require.NoError(t, err)
require.Equal(t, rootDir, block)
prefetchSyncCh <- struct{}{}
t.Log("Wait for prefetching to complete. This shouldn't hang.")
select {
case <-q.Prefetcher().Shutdown():
case <-time.After(time.Second):
t.Fatal("Prefetching hung.")
}
testPrefetcherCheckGet(t, config.BlockCache(), rootPtr, rootDir,
FinishedPrefetch, TransientEntry)
}
func TestPrefetcherMultiLevelIndirectFile(t *testing.T) {
t.Log("Test multi-level indirect file block prefetching.")
q, bg, config := initPrefetcherTest(t)
defer shutdownPrefetcherTest(q)
prefetchSyncCh := make(chan struct{})
q.TogglePrefetcher(context.Background(), true, prefetchSyncCh)
prefetchSyncCh <- struct{}{}
t.Log("Initialize an indirect file block pointing to 2 file data blocks.")
ptrs := []IndirectFilePtr{
makeFakeIndirectFilePtr(t, 0),
makeFakeIndirectFilePtr(t, 150),
}
rootPtr := makeRandomBlockPointer(t)
rootBlock := &FileBlock{IPtrs: ptrs}
rootBlock.IsInd = true
indBlock1 := &FileBlock{IPtrs: []IndirectFilePtr{
makeFakeIndirectFilePtr(t, 10),
makeFakeIndirectFilePtr(t, 20),
}}
indBlock1.IsInd = true
indBlock2 := &FileBlock{IPtrs: []IndirectFilePtr{
makeFakeIndirectFilePtr(t, 30),
makeFakeIndirectFilePtr(t, 40),
}}
indBlock2.IsInd = true
indBlock11 := makeFakeFileBlock(t, true)
indBlock12 := makeFakeFileBlock(t, true)
indBlock21 := makeFakeFileBlock(t, true)
indBlock22 := makeFakeFileBlock(t, true)
_, continueChRootBlock := bg.setBlockToReturn(rootPtr, rootBlock)
_, continueChIndBlock1 :=
bg.setBlockToReturn(ptrs[0].BlockPointer, indBlock1)
_, continueChIndBlock2 :=
bg.setBlockToReturn(ptrs[1].BlockPointer, indBlock2)
_, continueChIndBlock11 :=
bg.setBlockToReturn(indBlock1.IPtrs[0].BlockPointer, indBlock11)
_, continueChIndBlock12 :=
bg.setBlockToReturn(indBlock1.IPtrs[1].BlockPointer, indBlock12)
_, continueChIndBlock21 :=
bg.setBlockToReturn(indBlock2.IPtrs[0].BlockPointer, indBlock21)
_, continueChIndBlock22 :=
bg.setBlockToReturn(indBlock2.IPtrs[1].BlockPointer, indBlock22)
var block Block = &FileBlock{}
ch := q.Request(context.Background(),
defaultOnDemandRequestPriority, makeKMD(), rootPtr, block,
TransientEntry)
continueChRootBlock <- nil
err := <-ch
require.NoError(t, err)
require.Equal(t, rootBlock, block)
t.Log("Release the prefetched indirect blocks.")
// Release after prefetching rootBlock
prefetchSyncCh <- struct{}{}
// Release 2 blocks
continueChIndBlock1 <- nil
continueChIndBlock2 <- nil
t.Log("Wait for the prefetch to finish.")
<-q.Prefetcher().Shutdown()
q.TogglePrefetcher(context.Background(), true, prefetchSyncCh)
prefetchSyncCh <- struct{}{}
t.Log("Ensure that the prefetched blocks are in the cache.")
testPrefetcherCheckGet(t, config.BlockCache(), rootPtr, rootBlock,
TriggeredPrefetch, TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(), ptrs[0].BlockPointer,
indBlock1, NoPrefetch, TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(), ptrs[1].BlockPointer,
indBlock2, NoPrefetch, TransientEntry)
t.Log("Fetch indirect block1 on-demand.")
block = &FileBlock{}
ch = q.Request(context.Background(), defaultOnDemandRequestPriority,
makeKMD(), rootBlock.IPtrs[0].BlockPointer, block, TransientEntry)
err = <-ch
t.Log("Release the prefetch for indirect block1")
// Release after prefetching block1
prefetchSyncCh <- struct{}{}
// Release 2 blocks
continueChIndBlock11 <- nil
continueChIndBlock12 <- nil
t.Log("Wait for the prefetch to finish.")
<-q.Prefetcher().Shutdown()
q.TogglePrefetcher(context.Background(), true, prefetchSyncCh)
prefetchSyncCh <- struct{}{}
t.Log("Ensure that the prefetched blocks are in the cache.")
testPrefetcherCheckGet(t, config.BlockCache(),
indBlock1.IPtrs[0].BlockPointer, indBlock11, NoPrefetch, TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(),
indBlock1.IPtrs[1].BlockPointer, indBlock12, NoPrefetch, TransientEntry)
t.Log("Fetch indirect block2 on-demand.")
block = &FileBlock{}
ch = q.Request(context.Background(), defaultOnDemandRequestPriority,
makeKMD(), rootBlock.IPtrs[1].BlockPointer, block, TransientEntry)
err = <-ch
t.Log("Release the prefetch for indirect block2")
// Release after prefetching block2
prefetchSyncCh <- struct{}{}
// Release 2 blocks
continueChIndBlock21 <- nil
continueChIndBlock22 <- nil
t.Log("Wait for the prefetch to finish.")
<-q.Prefetcher().Shutdown()
t.Log("Ensure that the prefetched blocks are in the cache.")
testPrefetcherCheckGet(t, config.BlockCache(),
indBlock2.IPtrs[0].BlockPointer, indBlock21, NoPrefetch, TransientEntry)
testPrefetcherCheckGet(t, config.BlockCache(),
indBlock2.IPtrs[1].BlockPointer, indBlock22, NoPrefetch, TransientEntry)
}
|
package paths
import (
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/VonC/godbg"
)
// Path represents a path always '/' separated.
// Either filename or http://...
type Path struct {
path string
}
// NewPath creates a new path.
// If it is a folder, it will end with a trailing '/'
func NewPath(p string) *Path {
res := &Path{path: p}
if strings.HasPrefix(res.path, "http") == false {
res.path = filepath.FromSlash(p)
// fmt.Printf("p '%s' vs. res.path '%s'\n", p, res.path)
// If there is no trailing '/' (after the filepath.FromSlash() call),
// check if one should be added:
if !strings.HasSuffix(res.path, string(filepath.Separator)) && res.path != "" {
if res.Exists() && res.IsDir() {
res.path = res.path + string(filepath.Separator)
}
}
}
return res
}
// NewPathDir will create a Path *always* terminated with a traling '/'.
// Handy for folders which doesn't exist yet
func NewPathDir(p string) *Path {
res := &Path{}
res.path = filepath.FromSlash(p)
if !strings.HasSuffix(res.path, string(filepath.Separator)) {
res.path = res.path + string(filepath.Separator)
}
return res
}
// EndsWithSeparator checks if Paths ends with a filepath separator
func (p *Path) EndsWithSeparator() bool {
if strings.HasSuffix(p.path, string(filepath.Separator)) {
return true
}
return false
}
var fstat func(f *os.File) (fi os.FileInfo, err error)
func ifstat(f *os.File) (fi os.FileInfo, err error) {
return f.Stat()
}
// IsDir checks is a path is an existing directory.
// If there is any error, it is printed on Stderr, but not returned.
func (p *Path) IsDir() bool {
f, err := os.Open(p.path)
if err != nil {
fmt.Fprintln(godbg.Err(), err)
return false
}
defer f.Close()
fi, err := fstat(f)
if err != nil {
fmt.Fprintln(godbg.Err(), err)
return false
}
switch mode := fi.Mode(); {
case mode.IsDir():
return true
}
return false
}
// SetDir makes sure a Path represents a folder (existing or not)
// That means it ends with a path separator
func (p *Path) SetDir() *Path {
if p.EndsWithSeparator() {
return p
}
return NewPathDir(p.path)
}
// Add adds a string path to a Path
// Makes sure the current path represents a folder first
// (existing or not it: just means making sure it ends with file separator)
func (p *Path) Add(s string) *Path {
pp := p.SetDir()
return NewPath(pp.path + s)
}
// AddP adds a Path to a Path
// no check is done regarding the absolute path of the argument
func (p *Path) AddP(path *Path) *Path {
return p.Add(path.path)
}
// NoSep makes sure the path doesn't end with a file separator.
// If it already was not ending with the file separator, it returns the same object. If it was, it returns a new Path.
func (p *Path) NoSep() *Path {
if !p.EndsWithSeparator() {
return p
}
pp := p.path
for strings.HasSuffix(pp, string(filepath.Separator)) {
pp = pp[:len(pp)-1]
}
res := &Path{}
res.path = filepath.FromSlash(pp)
return res
}
// AddNoSep adds a string path to a Path with no triling separator
func (p *Path) AddNoSep(s string) *Path {
pp := p.NoSep()
return NewPath(pp.path + s)
}
// AddPNoSep adds a Path to a Path, making sure the resulting path doesn't end with a file separator
// no check is done regarding the absolute path of the argument
func (p *Path) AddPNoSep(path *Path) *Path {
return p.AddNoSep(path.String())
}
var fosstat func(name string) (fi os.FileInfo, err error)
func ifosstat(name string) (fi os.FileInfo, err error) {
return os.Stat(name)
}
// Exists returns whether the given file or directory exists or not
// http://stackoverflow.com/questions/10510691/how-to-check-whether-a-file-or-directory-denoted-by-a-path-exists-in-golang
func (p *Path) Exists() bool {
path := filepath.FromSlash(p.path)
_, err := fosstat(path)
if err == nil {
return true
}
if os.IsNotExist(err) {
return false
}
//pdbg("Error while checking if '%v' exists: '%v'\n", path, err)
//debug.PrintStack()
//os.Exit(0)
fmt.Fprintln(godbg.Err(), err)
return false
}
// String display a (possibly abbreviated) string version of a Path.
// If nil, returns <nil>
// if too long (>200), display only the first 20 plus its length
func (p *Path) String() string {
if p == nil {
return "<nil>"
}
res := fmt.Sprint(p.path)
if len(res) > 200 {
res = res[:20] + fmt.Sprintf(" (%v)", len(res))
}
return res
}
var fosmkdirall func(path string, perm os.FileMode) error
func ifosmkdirall(path string, perm os.FileMode) error {
return os.MkdirAll(path, perm)
}
// MkdirAll creates a directory named path, along with any necessary parents,
// and return true if created, false otherwise.
// Any error is printed on Stderr
func (p *Path) MkdirAll() bool {
err := fosmkdirall(p.path, 0755)
if err != nil {
fmt.Fprintf(godbg.Err(), "Error creating folder for path '%v': '%v'\n", p.path, err)
return false
}
return true
}
var fosopenfile func(name string, flag int, perm os.FileMode) (file *os.File, err error)
func ifosopenfile(name string, flag int, perm os.FileMode) (file *os.File, err error) {
return os.OpenFile(name, flag, perm)
}
var fosremove func(name string) error
func ifosremove(name string) error {
return os.Remove(name)
}
// MustOpenFile create or append a file, or panic if issue.
// If the Path is a Dir, returns nil.
// The caller is responsible for closing the file
func (p *Path) MustOpenFile(append bool) (file *os.File) {
if p.IsDir() {
return nil
}
var err error
if p.Exists() {
if append {
file, err = fosopenfile(p.path, os.O_APPEND|os.O_WRONLY, 0600)
} else {
err = fosremove(p.path)
}
if err != nil {
panic(err)
}
}
if file == nil {
if file, err = fosopenfile(p.path, os.O_CREATE|os.O_WRONLY, 0600); err != nil {
panic(err)
}
}
return file
}
var ffpabs func(path string) (string, error)
func iffpabs(path string) (string, error) {
return filepath.Abs(path)
}
// Abs returns the absolute path if it can, or nil if error
// The error is printed on stderr
// If the path ends with a separator, said separator is preserved
func (p *Path) Abs() *Path {
res, err := ffpabs(p.path)
if err != nil {
fmt.Fprintf(godbg.Err(), "Unable to get full absollute path for '%v'\n%v\n", p.path, err)
return nil
}
if strings.HasSuffix(p.path, string(filepath.Separator)) {
return NewPathDir(res)
}
return NewPath(res)
}
// Dir is filepath.Dir() for Path:
// It returns all but the last element of path, typically the path's directory
// Its result still ends with a file separator
func (p *Path) Dir() *Path {
pp := p.path
for strings.HasSuffix(pp, string(filepath.Separator)) {
pp = pp[:len(pp)-1]
}
return NewPathDir(filepath.Dir(pp))
}
// Base is filepath.Base():
// It returns the last element of path.
// Trailing path separators are removed before extracting the last element.
func (p *Path) Base() string {
pp := p.path
for strings.HasSuffix(pp, string(filepath.Separator)) {
pp = pp[:len(pp)-1]
}
return filepath.Base(pp)
}
// Dot return a path prefixed with ".\" (dot plus file separator)
// If it already starts with ./, returns the same path
func (p *Path) Dot() *Path {
if strings.HasPrefix(p.path, "."+string(filepath.Separator)) {
return p
}
return NewPath("." + string(filepath.Separator) + p.path)
}
var hasTarRx, _ = regexp.Compile(`\.tar(?:\.[^\.]+)?$`)
// HarTar checks if a file and with .tar(.xxx)
// For example a.tar.gz has tar.
func (p Path) HasTar() bool {
matches := hasTarRx.FindAllStringSubmatchIndex(p.NoSep().String(), -1)
if len(matches) > 0 {
return true
}
return false
}
func (p *Path) isExt(ext string) bool {
return filepath.Ext(p.NoSep().String()) == ext
}
// IsTar checks if a path ends with .tar
// For file or folder
func (p *Path) IsTar() bool {
return p.isExt(".tar")
}
// RemoveExtension removes .tar if path ends with .tar
// Preserves file separator indicating a folder
func (p *Path) RemoveExtension() *Path {
sp := p.NoSep().String()
ext := filepath.Ext(sp)
if ext != "" {
sp = sp[:len(sp)-len(ext)]
}
if p.EndsWithSeparator() {
return NewPathDir(sp)
}
return NewPath(sp)
}
// SetExtTar() add a .tar to the path after removing its current extension
// For file or folder.
// Don't add .tar if, after removing extension, its ends with .tar
// For instance a.tar.gz => a.tar
func (p *Path) SetExtTar() *Path {
return p.setExt(".tar")
}
func (p *Path) setExt(ext string) *Path {
if p.isExt(ext) {
return p
}
p = p.RemoveExtension()
if p.isExt(ext) {
return p
}
if p.EndsWithSeparator() {
return p.AddNoSep(ext).SetDir()
}
return p.AddNoSep(ext)
}
// IsGz checks if a path ends with .gz
// For file or folder
func (p *Path) IsGz() bool {
return p.isExt(".gz")
}
// SetExtGz() add a .gz to the path after removing its current extension
// For file or folder.
// Don't add .gz if, after removing extension, its ends with .gz
// For instance a.gz.xxx => a.gz
func (p *Path) SetExtGz() *Path {
return p.setExt(".gz")
}
// IsGz checks if a path ends with .7z
// For file or folder
func (p *Path) Is7z() bool {
return p.isExt(".7z")
}
// SetExtGz() add a .7z to the path after removing its current extension
// For file or folder.
// Don't add .7z if, after removing extension, its ends with .7z
// For instance a.7z.xxx => a.7z
func (p *Path) SetExt7z() *Path {
return p.setExt(".7z")
}
func init() {
fstat = ifstat
fosstat = ifosstat
fosmkdirall = ifosmkdirall
fosopenfile = ifosopenfile
ffpabs = iffpabs
}
Fix Path isExt() to take into accout .xxx.yyy extension
package paths
import (
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/VonC/godbg"
)
// Path represents a path always '/' separated.
// Either filename or http://...
type Path struct {
path string
}
// NewPath creates a new path.
// If it is a folder, it will end with a trailing '/'
func NewPath(p string) *Path {
res := &Path{path: p}
if strings.HasPrefix(res.path, "http") == false {
res.path = filepath.FromSlash(p)
// fmt.Printf("p '%s' vs. res.path '%s'\n", p, res.path)
// If there is no trailing '/' (after the filepath.FromSlash() call),
// check if one should be added:
if !strings.HasSuffix(res.path, string(filepath.Separator)) && res.path != "" {
if res.Exists() && res.IsDir() {
res.path = res.path + string(filepath.Separator)
}
}
}
return res
}
// NewPathDir will create a Path *always* terminated with a traling '/'.
// Handy for folders which doesn't exist yet
func NewPathDir(p string) *Path {
res := &Path{}
res.path = filepath.FromSlash(p)
if !strings.HasSuffix(res.path, string(filepath.Separator)) {
res.path = res.path + string(filepath.Separator)
}
return res
}
// EndsWithSeparator checks if Paths ends with a filepath separator
func (p *Path) EndsWithSeparator() bool {
if strings.HasSuffix(p.path, string(filepath.Separator)) {
return true
}
return false
}
var fstat func(f *os.File) (fi os.FileInfo, err error)
func ifstat(f *os.File) (fi os.FileInfo, err error) {
return f.Stat()
}
// IsDir checks is a path is an existing directory.
// If there is any error, it is printed on Stderr, but not returned.
func (p *Path) IsDir() bool {
f, err := os.Open(p.path)
if err != nil {
fmt.Fprintln(godbg.Err(), err)
return false
}
defer f.Close()
fi, err := fstat(f)
if err != nil {
fmt.Fprintln(godbg.Err(), err)
return false
}
switch mode := fi.Mode(); {
case mode.IsDir():
return true
}
return false
}
// SetDir makes sure a Path represents a folder (existing or not)
// That means it ends with a path separator
func (p *Path) SetDir() *Path {
if p.EndsWithSeparator() {
return p
}
return NewPathDir(p.path)
}
// Add adds a string path to a Path
// Makes sure the current path represents a folder first
// (existing or not it: just means making sure it ends with file separator)
func (p *Path) Add(s string) *Path {
pp := p.SetDir()
return NewPath(pp.path + s)
}
// AddP adds a Path to a Path
// no check is done regarding the absolute path of the argument
func (p *Path) AddP(path *Path) *Path {
return p.Add(path.path)
}
// NoSep makes sure the path doesn't end with a file separator.
// If it already was not ending with the file separator, it returns the same object. If it was, it returns a new Path.
func (p *Path) NoSep() *Path {
if !p.EndsWithSeparator() {
return p
}
pp := p.path
for strings.HasSuffix(pp, string(filepath.Separator)) {
pp = pp[:len(pp)-1]
}
res := &Path{}
res.path = filepath.FromSlash(pp)
return res
}
// AddNoSep adds a string path to a Path with no triling separator
func (p *Path) AddNoSep(s string) *Path {
pp := p.NoSep()
return NewPath(pp.path + s)
}
// AddPNoSep adds a Path to a Path, making sure the resulting path doesn't end with a file separator
// no check is done regarding the absolute path of the argument
func (p *Path) AddPNoSep(path *Path) *Path {
return p.AddNoSep(path.String())
}
var fosstat func(name string) (fi os.FileInfo, err error)
func ifosstat(name string) (fi os.FileInfo, err error) {
return os.Stat(name)
}
// Exists returns whether the given file or directory exists or not
// http://stackoverflow.com/questions/10510691/how-to-check-whether-a-file-or-directory-denoted-by-a-path-exists-in-golang
func (p *Path) Exists() bool {
path := filepath.FromSlash(p.path)
_, err := fosstat(path)
if err == nil {
return true
}
if os.IsNotExist(err) {
return false
}
//pdbg("Error while checking if '%v' exists: '%v'\n", path, err)
//debug.PrintStack()
//os.Exit(0)
fmt.Fprintln(godbg.Err(), err)
return false
}
// String display a (possibly abbreviated) string version of a Path.
// If nil, returns <nil>
// if too long (>200), display only the first 20 plus its length
func (p *Path) String() string {
if p == nil {
return "<nil>"
}
res := fmt.Sprint(p.path)
if len(res) > 200 {
res = res[:20] + fmt.Sprintf(" (%v)", len(res))
}
return res
}
var fosmkdirall func(path string, perm os.FileMode) error
func ifosmkdirall(path string, perm os.FileMode) error {
return os.MkdirAll(path, perm)
}
// MkdirAll creates a directory named path, along with any necessary parents,
// and return true if created, false otherwise.
// Any error is printed on Stderr
func (p *Path) MkdirAll() bool {
err := fosmkdirall(p.path, 0755)
if err != nil {
fmt.Fprintf(godbg.Err(), "Error creating folder for path '%v': '%v'\n", p.path, err)
return false
}
return true
}
var fosopenfile func(name string, flag int, perm os.FileMode) (file *os.File, err error)
func ifosopenfile(name string, flag int, perm os.FileMode) (file *os.File, err error) {
return os.OpenFile(name, flag, perm)
}
var fosremove func(name string) error
func ifosremove(name string) error {
return os.Remove(name)
}
// MustOpenFile create or append a file, or panic if issue.
// If the Path is a Dir, returns nil.
// The caller is responsible for closing the file
func (p *Path) MustOpenFile(append bool) (file *os.File) {
if p.IsDir() {
return nil
}
var err error
if p.Exists() {
if append {
file, err = fosopenfile(p.path, os.O_APPEND|os.O_WRONLY, 0600)
} else {
err = fosremove(p.path)
}
if err != nil {
panic(err)
}
}
if file == nil {
if file, err = fosopenfile(p.path, os.O_CREATE|os.O_WRONLY, 0600); err != nil {
panic(err)
}
}
return file
}
var ffpabs func(path string) (string, error)
func iffpabs(path string) (string, error) {
return filepath.Abs(path)
}
// Abs returns the absolute path if it can, or nil if error
// The error is printed on stderr
// If the path ends with a separator, said separator is preserved
func (p *Path) Abs() *Path {
res, err := ffpabs(p.path)
if err != nil {
fmt.Fprintf(godbg.Err(), "Unable to get full absollute path for '%v'\n%v\n", p.path, err)
return nil
}
if strings.HasSuffix(p.path, string(filepath.Separator)) {
return NewPathDir(res)
}
return NewPath(res)
}
// Dir is filepath.Dir() for Path:
// It returns all but the last element of path, typically the path's directory
// Its result still ends with a file separator
func (p *Path) Dir() *Path {
pp := p.path
for strings.HasSuffix(pp, string(filepath.Separator)) {
pp = pp[:len(pp)-1]
}
return NewPathDir(filepath.Dir(pp))
}
// Base is filepath.Base():
// It returns the last element of path.
// Trailing path separators are removed before extracting the last element.
func (p *Path) Base() string {
pp := p.path
for strings.HasSuffix(pp, string(filepath.Separator)) {
pp = pp[:len(pp)-1]
}
return filepath.Base(pp)
}
// Dot return a path prefixed with ".\" (dot plus file separator)
// If it already starts with ./, returns the same path
func (p *Path) Dot() *Path {
if strings.HasPrefix(p.path, "."+string(filepath.Separator)) {
return p
}
return NewPath("." + string(filepath.Separator) + p.path)
}
var hasTarRx, _ = regexp.Compile(`\.tar(?:\.[^\.]+)?$`)
// HarTar checks if a file and with .tar(.xxx)
// For example a.tar.gz has tar.
func (p Path) HasTar() bool {
matches := hasTarRx.FindAllStringSubmatchIndex(p.NoSep().String(), -1)
if len(matches) > 0 {
return true
}
return false
}
func (p *Path) isExt(ext string) bool {
sp := p.NoSep().String()
sext := ""
anext := filepath.Ext(sp)
for anext != "" {
sp = sp[:len(sp)-len(anext)]
sext = anext + sext
anext = filepath.Ext(sp)
}
return sext == ext
}
// IsTar checks if a path ends with .tar
// For file or folder
func (p *Path) IsTar() bool {
return p.isExt(".tar")
}
// RemoveExtension removes .tar if path ends with .tar
// Preserves file separator indicating a folder
func (p *Path) RemoveExtension() *Path {
sp := p.NoSep().String()
ext := filepath.Ext(sp)
if ext != "" {
sp = sp[:len(sp)-len(ext)]
}
if p.EndsWithSeparator() {
return NewPathDir(sp)
}
return NewPath(sp)
}
// SetExtTar() add a .tar to the path after removing its current extension
// For file or folder.
// Don't add .tar if, after removing extension, its ends with .tar
// For instance a.tar.gz => a.tar
func (p *Path) SetExtTar() *Path {
return p.setExt(".tar")
}
func (p *Path) setExt(ext string) *Path {
if p.isExt(ext) {
return p
}
p = p.RemoveExtension()
if p.isExt(ext) {
return p
}
if p.EndsWithSeparator() {
return p.AddNoSep(ext).SetDir()
}
return p.AddNoSep(ext)
}
// IsGz checks if a path ends with .gz
// For file or folder
func (p *Path) IsGz() bool {
return p.isExt(".gz")
}
// SetExtGz() add a .gz to the path after removing its current extension
// For file or folder.
// Don't add .gz if, after removing extension, its ends with .gz
// For instance a.gz.xxx => a.gz
func (p *Path) SetExtGz() *Path {
return p.setExt(".gz")
}
// IsGz checks if a path ends with .7z
// For file or folder
func (p *Path) Is7z() bool {
return p.isExt(".7z")
}
// SetExtGz() add a .7z to the path after removing its current extension
// For file or folder.
// Don't add .7z if, after removing extension, its ends with .7z
// For instance a.7z.xxx => a.7z
func (p *Path) SetExt7z() *Path {
return p.setExt(".7z")
}
func init() {
fstat = ifstat
fosstat = ifosstat
fosmkdirall = ifosmkdirall
fosopenfile = ifosopenfile
ffpabs = iffpabs
}
|
// Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"errors"
"fmt"
"path/filepath"
"time"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/tlf"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
)
type overallBlockState int
const (
// cleanState: no outstanding local writes.
cleanState overallBlockState = iota
// dirtyState: there are outstanding local writes that haven't yet been
// synced.
dirtyState
)
// blockReqType indicates whether an operation makes block
// modifications or not
type blockReqType int
const (
// A block read request.
blockRead blockReqType = iota
// A block write request.
blockWrite
// A block read request that is happening from a different
// goroutine than the blockLock rlock holder, using the same lState.
blockReadParallel
// We are looking up a block for the purposes of creating a new
// node in the node cache for it; avoid any unlocks as part of the
// lookup process.
blockLookup
)
type mdToCleanIfUnused struct {
md ReadOnlyRootMetadata
bps *blockPutState
}
type syncInfo struct {
oldInfo BlockInfo
op *syncOp
unrefs []BlockInfo
bps *blockPutState
refBytes uint64
unrefBytes uint64
toCleanIfUnused []mdToCleanIfUnused
}
func (si *syncInfo) DeepCopy(codec kbfscodec.Codec) (*syncInfo, error) {
newSi := &syncInfo{
oldInfo: si.oldInfo,
refBytes: si.refBytes,
unrefBytes: si.unrefBytes,
}
newSi.unrefs = make([]BlockInfo, len(si.unrefs))
copy(newSi.unrefs, si.unrefs)
if si.bps != nil {
newSi.bps = si.bps.DeepCopy()
}
if si.op != nil {
err := kbfscodec.Update(codec, &newSi.op, si.op)
if err != nil {
return nil, err
}
}
newSi.toCleanIfUnused = make([]mdToCleanIfUnused, len(si.toCleanIfUnused))
for i, toClean := range si.toCleanIfUnused {
// It might be overkill to deep-copy these MDs and bpses,
// which are probably immutable, but for now let's do the safe
// thing.
copyMd, err := toClean.md.deepCopy(codec)
if err != nil {
return nil, err
}
newSi.toCleanIfUnused[i].md = copyMd.ReadOnly()
newSi.toCleanIfUnused[i].bps = toClean.bps.DeepCopy()
}
return newSi, nil
}
func (si *syncInfo) removeReplacedBlock(ctx context.Context,
log logger.Logger, ptr BlockPointer) {
for i, ref := range si.op.RefBlocks {
if ref == ptr {
log.CDebugf(ctx, "Replacing old ref %v", ptr)
si.op.RefBlocks = append(si.op.RefBlocks[:i],
si.op.RefBlocks[i+1:]...)
for j, unref := range si.unrefs {
if unref.BlockPointer == ptr {
// Don't completely remove the unref,
// since it contains size info that we
// need to incorporate into the MD
// usage calculations.
si.unrefs[j].BlockPointer = zeroPtr
}
}
break
}
}
}
func (si *syncInfo) mergeUnrefCache(md *RootMetadata) {
for _, info := range si.unrefs {
// it's ok if we push the same ptr.ID/RefNonce multiple times,
// because the subsequent ones should have a QuotaSize of 0.
md.AddUnrefBlock(info)
}
}
type deCacheEntry struct {
// dirEntry is the dirty directory entry corresponding to the
// BlockPointer that maps to this struct.
dirEntry DirEntry
// adds is a map of the pointers for new entry names that have
// been added to the DirBlock for the BlockPointer that maps to
// this struct.
adds map[string]BlockPointer
// dels is a set of the name that have been removed from the
// DirBlock for the BlockPointer that maps to this struct.
dels map[string]bool
}
// folderBlockOps contains all the fields that must be synchronized by
// blockLock. It will eventually also contain all the methods that
// must be synchronized by blockLock, so that folderBranchOps will
// have no knowledge of blockLock.
//
// -- And now, a primer on tracking dirty bytes --
//
// The DirtyBlockCache tracks the number of bytes that are dirtied
// system-wide, as the number of bytes that haven't yet been synced
// ("unsynced"), and a number of bytes that haven't yet been resolved
// yet because the overall file Sync hasn't finished yet ("total").
// This data helps us decide when we need to block incoming Writes, in
// order to keep memory usage from exploding.
//
// It's the responsibility of folderBlockOps (and its helper struct
// dirtyFile) to update these totals in DirtyBlockCache for the
// individual files within this TLF. This is complicated by a few things:
// * New writes to a file are "deferred" while a Sync is happening, and
// are replayed after the Sync finishes.
// * Syncs can be canceled or error out halfway through syncing the blocks,
// leaving the file in a dirty state until the next Sync.
// * Syncs can fail with a /recoverable/ error, in which case they get
// retried automatically by folderBranchOps. In that case, the retried
// Sync also sucks in any outstanding deferred writes.
//
// With all that in mind, here is the rough breakdown of how this
// bytes-tracking is implemented:
// * On a Write/Truncate to a block, folderBranchOps counts all the
// newly-dirtied bytes in a file as "unsynced". That is, if the block was
// already in the dirty cache (and not already being synced), only
// extensions to the block count as "unsynced" bytes.
// * When a Sync starts, dirtyFile remembers the total of bytes being synced,
// and the size of each block being synced.
// * When each block put finishes successfully, dirtyFile subtracts the size
// of that block from "unsynced".
// * When a Sync finishes successfully, the total sum of bytes in that sync
// are subtracted from the "total" dirty bytes outstanding.
// * If a Sync fails, but some blocks were put successfully, those blocks
// are "re-dirtied", which means they count as unsynced bytes again.
// dirtyFile handles this.
// * When a Write/Truncate is deferred due to an ongoing Sync, its bytes
// still count towards the "unsynced" total. In fact, this essentially
// creates a new copy of those blocks, and the whole size of that block
// (not just the newly-dirtied bytes) count for the total. However,
// when the write gets replayed, folderBlockOps first subtracts those bytes
// from the system-wide numbers, since they are about to be replayed.
// * When a Sync is retried after a recoverable failure, dirtyFile adds
// the newly-dirtied deferred bytes to the system-wide numbers, since they
// are now being assimilated into this Sync.
// * dirtyFile also exposes a concept of "orphaned" blocks. These are child
// blocks being synced that are now referenced via a new, permanent block
// ID from the parent indirect block. This matters for when hard failures
// occur during a Sync -- the blocks will no longer be accessible under
// their previous old pointers, and so dirtyFile needs to know their old
// bytes can be cleaned up now.
type folderBlockOps struct {
config Config
log logger.Logger
folderBranch FolderBranch
observers *observerList
// forceSyncChan can be sent on to trigger an immediate
// Sync(). It is a blocking channel.
forceSyncChan chan<- struct{}
// protects access to blocks in this folder and all fields
// below.
blockLock blockLock
// Which files are currently dirty and have dirty blocks that are either
// currently syncing, or waiting to be sync'd.
dirtyFiles map[BlockPointer]*dirtyFile
// For writes and truncates, track the unsynced to-be-unref'd
// block infos, per-path.
unrefCache map[BlockRef]*syncInfo
// For writes and truncates, track the modified (but not yet
// committed) directory entries. Maps the entry BlockRef to a
// modified entry.
deCache map[BlockRef]deCacheEntry
// Writes and truncates for blocks that were being sync'd, and
// need to be replayed after the sync finishes on top of the new
// versions of the blocks.
deferredWrites []func(context.Context, *lockState, KeyMetadata, path) error
// Blocks that need to be deleted from the dirty cache before any
// deferred writes are replayed.
deferredDirtyDeletes []BlockPointer
deferredWaitBytes int64
// set to true if this write or truncate should be deferred
doDeferWrite bool
// nodeCache itself is goroutine-safe, but write/truncate must
// call PathFromNode() only under blockLock (see nodeCache
// comments in folder_branch_ops.go).
nodeCache NodeCache
}
// Only exported methods of folderBlockOps should be used outside of this
// file.
//
// Although, temporarily, folderBranchOps is allowed to reach in and
// manipulate folderBlockOps fields and methods directly.
func (fbo *folderBlockOps) id() tlf.ID {
return fbo.folderBranch.Tlf
}
func (fbo *folderBlockOps) branch() BranchName {
return fbo.folderBranch.Branch
}
// GetState returns the overall block state of this TLF.
func (fbo *folderBlockOps) GetState(lState *lockState) overallBlockState {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
if len(fbo.deCache) == 0 {
return cleanState
}
return dirtyState
}
// getCleanEncodedBlockHelperLocked retrieves the encoded size of the
// clean block pointed to by ptr, which must be valid, either from the
// cache or from the server. If `rtype` is `blockReadParallel`, it's
// assumed that some coordinating goroutine is holding the correct
// locks, and in that case `lState` must be `nil`.
func (fbo *folderBlockOps) getCleanEncodedBlockSizeLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer, branch BranchName,
rtype blockReqType) (uint32, error) {
if rtype != blockReadParallel {
if rtype == blockWrite {
panic("Cannot get the size of a block for writing")
}
fbo.blockLock.AssertAnyLocked(lState)
} else if lState != nil {
panic("Non-nil lState passed to getCleanEncodedBlockSizeLocked " +
"with blockReadParallel")
}
if !ptr.IsValid() {
return 0, InvalidBlockRefError{ptr.Ref()}
}
if block, err := fbo.config.BlockCache().Get(ptr); err == nil {
return block.GetEncodedSize(), nil
}
if err := checkDataVersion(fbo.config, path{}, ptr); err != nil {
return 0, err
}
// Unlock the blockLock while we wait for the network, only if
// it's locked for reading by a single goroutine. If it's locked
// for writing, that indicates we are performing an atomic write
// operation, and we need to ensure that nothing else comes in and
// modifies the blocks, so don't unlock.
//
// If there may be multiple goroutines fetching blocks under the
// same lState, we can't safely unlock since some of the other
// goroutines may be operating on the data assuming they have the
// lock.
bops := fbo.config.BlockOps()
var size uint32
var err error
if rtype != blockReadParallel && rtype != blockLookup {
fbo.blockLock.DoRUnlockedIfPossible(lState, func(*lockState) {
size, err = bops.GetEncodedSize(ctx, kmd, ptr)
})
} else {
size, err = bops.GetEncodedSize(ctx, kmd, ptr)
}
if err != nil {
return 0, err
}
return size, nil
}
// getBlockHelperLocked retrieves the block pointed to by ptr, which
// must be valid, either from the cache or from the server. If
// notifyPath is valid and the block isn't cached, trigger a read
// notification. If `rtype` is `blockReadParallel`, it's assumed that
// some coordinating goroutine is holding the correct locks, and
// in that case `lState` must be `nil`.
//
// This must be called only by get{File,Dir}BlockHelperLocked().
func (fbo *folderBlockOps) getBlockHelperLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer, branch BranchName,
newBlock makeNewBlock, lifetime BlockCacheLifetime, notifyPath path,
rtype blockReqType) (Block, error) {
if rtype != blockReadParallel {
fbo.blockLock.AssertAnyLocked(lState)
} else if lState != nil {
panic("Non-nil lState passed to getBlockHelperLocked " +
"with blockReadParallel")
}
if !ptr.IsValid() {
return nil, InvalidBlockRefError{ptr.Ref()}
}
if block, err := fbo.config.DirtyBlockCache().Get(
fbo.id(), ptr, branch); err == nil {
return block, nil
}
if block, hasPrefetched, lifetime, err :=
fbo.config.BlockCache().GetWithPrefetch(ptr); err == nil {
// If the block was cached in the past, we need to handle it as if it's
// an on-demand request so that its downstream prefetches are triggered
// correctly according to the new on-demand fetch priority.
fbo.config.BlockOps().BlockRetriever().CacheAndPrefetch(ctx,
ptr, block, kmd, defaultOnDemandRequestPriority, lifetime,
hasPrefetched)
return block, nil
}
if err := checkDataVersion(fbo.config, notifyPath, ptr); err != nil {
return nil, err
}
if notifyPath.isValidForNotification() {
fbo.config.Reporter().Notify(ctx, readNotification(notifyPath, false))
defer fbo.config.Reporter().Notify(ctx,
readNotification(notifyPath, true))
}
// Unlock the blockLock while we wait for the network, only if
// it's locked for reading by a single goroutine. If it's locked
// for writing, that indicates we are performing an atomic write
// operation, and we need to ensure that nothing else comes in and
// modifies the blocks, so don't unlock.
//
// If there may be multiple goroutines fetching blocks under the
// same lState, we can't safely unlock since some of the other
// goroutines may be operating on the data assuming they have the
// lock.
// fetch the block, and add to cache
block := newBlock()
bops := fbo.config.BlockOps()
var err error
if rtype != blockReadParallel && rtype != blockLookup {
fbo.blockLock.DoRUnlockedIfPossible(lState, func(*lockState) {
err = bops.Get(ctx, kmd, ptr, block, lifetime)
})
} else {
err = bops.Get(ctx, kmd, ptr, block, lifetime)
}
if err != nil {
return nil, err
}
return block, nil
}
// getFileBlockHelperLocked retrieves the block pointed to by ptr,
// which must be valid, either from an internal cache, the block
// cache, or from the server. An error is returned if the retrieved
// block is not a file block. If `rtype` is `blockReadParallel`, it's
// assumed that some coordinating goroutine is holding the correct
// locks, and in that case `lState` must be `nil`.
//
// This must be called only by GetFileBlockForReading(),
// getFileBlockLocked(), and getFileLocked().
//
// p is used only when reporting errors and sending read
// notifications, and can be empty.
func (fbo *folderBlockOps) getFileBlockHelperLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer,
branch BranchName, p path, rtype blockReqType) (
*FileBlock, error) {
if rtype != blockReadParallel {
fbo.blockLock.AssertAnyLocked(lState)
} else if lState != nil {
panic("Non-nil lState passed to getFileBlockHelperLocked " +
"with blockReadParallel")
}
block, err := fbo.getBlockHelperLocked(
ctx, lState, kmd, ptr, branch, NewFileBlock, TransientEntry, p, rtype)
if err != nil {
return nil, err
}
fblock, ok := block.(*FileBlock)
if !ok {
return nil, NotFileBlockError{ptr, branch, p}
}
return fblock, nil
}
// GetBlockForReading retrieves the block pointed to by ptr, which
// must be valid, either from the cache or from the server. The
// returned block may have a generic type (not DirBlock or FileBlock).
//
// This should be called for "internal" operations, like conflict
// resolution and state checking, which don't know what kind of block
// the pointer refers to. The block will not be cached, if it wasn't
// in the cache already.
func (fbo *folderBlockOps) GetBlockForReading(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer, branch BranchName) (
Block, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.getBlockHelperLocked(ctx, lState, kmd, ptr, branch,
NewCommonBlock, NoCacheEntry, path{}, blockRead)
}
// GetCleanEncodedBlocksSizeSum retrieves the sum of the encoded sizes
// of the blocks pointed to by ptrs, all of which must be valid,
// either from the cache or from the server.
//
// The caller can specify a set of pointers using
// `ignoreRecoverableForRemovalErrors` for which "recoverable" fetch
// errors are tolerated. In that case, the returned sum will not
// include the size for any pointers in the
// `ignoreRecoverableForRemovalErrors` set that hit such an error.
//
// This should be called for "internal" operations, like conflict
// resolution and state checking, which don't know what kind of block
// the pointers refer to. Any downloaded blocks will not be cached,
// if they weren't in the cache already.
func (fbo *folderBlockOps) GetCleanEncodedBlocksSizeSum(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptrs []BlockPointer,
ignoreRecoverableForRemovalErrors map[BlockPointer]bool,
branch BranchName) (uint64, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
sumCh := make(chan uint32, len(ptrs))
eg, groupCtx := errgroup.WithContext(ctx)
for _, ptr := range ptrs {
ptr := ptr // capture range variable
eg.Go(func() error {
size, err := fbo.getCleanEncodedBlockSizeLocked(groupCtx, nil,
kmd, ptr, branch, blockReadParallel)
// TODO: we might be able to recover the size of the
// top-most block of a removed file using the merged
// directory entry, the same way we do in
// `folderBranchOps.unrefEntry`.
if isRecoverableBlockErrorForRemoval(err) &&
ignoreRecoverableForRemovalErrors[ptr] {
fbo.log.CDebugf(groupCtx, "Hit an ignorable, recoverable "+
"error for block %v: %v", ptr, err)
return nil
}
if err != nil {
return err
}
sumCh <- size
return nil
})
}
if err := eg.Wait(); err != nil {
return 0, err
}
close(sumCh)
var sum uint64
for size := range sumCh {
sum += uint64(size)
}
return sum, nil
}
// getDirBlockHelperLocked retrieves the block pointed to by ptr, which
// must be valid, either from the cache or from the server. An error
// is returned if the retrieved block is not a dir block.
//
// This must be called only by GetDirBlockForReading() and
// getDirLocked().
//
// p is used only when reporting errors, and can be empty.
func (fbo *folderBlockOps) getDirBlockHelperLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer,
branch BranchName, p path, rtype blockReqType) (*DirBlock, error) {
if rtype != blockReadParallel {
fbo.blockLock.AssertAnyLocked(lState)
}
// Pass in an empty notify path because notifications should only
// trigger for file reads.
block, err := fbo.getBlockHelperLocked(
ctx, lState, kmd, ptr, branch, NewDirBlock, TransientEntry, path{}, rtype)
if err != nil {
return nil, err
}
dblock, ok := block.(*DirBlock)
if !ok {
return nil, NotDirBlockError{ptr, branch, p}
}
return dblock, nil
}
// GetFileBlockForReading retrieves the block pointed to by ptr, which
// must be valid, either from the cache or from the server. An error
// is returned if the retrieved block is not a file block.
//
// This should be called for "internal" operations, like conflict
// resolution and state checking. "Real" operations should use
// getFileBlockLocked() and getFileLocked() instead.
//
// p is used only when reporting errors, and can be empty.
func (fbo *folderBlockOps) GetFileBlockForReading(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer,
branch BranchName, p path) (*FileBlock, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.getFileBlockHelperLocked(
ctx, lState, kmd, ptr, branch, p, blockRead)
}
// GetDirBlockForReading retrieves the block pointed to by ptr, which
// must be valid, either from the cache or from the server. An error
// is returned if the retrieved block is not a dir block.
//
// This should be called for "internal" operations, like conflict
// resolution and state checking. "Real" operations should use
// getDirLocked() instead.
//
// p is used only when reporting errors, and can be empty.
func (fbo *folderBlockOps) GetDirBlockForReading(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer,
branch BranchName, p path) (*DirBlock, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.getDirBlockHelperLocked(
ctx, lState, kmd, ptr, branch, p, blockRead)
}
// getFileBlockLocked retrieves the block pointed to by ptr, which
// must be valid, either from the cache or from the server. An error
// is returned if the retrieved block is not a file block.
//
// The given path must be valid, and the given pointer must be its
// tail pointer or an indirect pointer from it. A read notification is
// triggered for the given path only if the block isn't in the cache.
//
// This shouldn't be called for "internal" operations, like conflict
// resolution and state checking -- use GetFileBlockForReading() for
// those instead.
//
// When rtype == blockWrite and the cached version of the block is
// currently clean, or the block is currently being synced, this
// method makes a copy of the file block and returns it. If this
// method might be called again for the same block within a single
// operation, it is the caller's responsibility to write that block
// back to the cache as dirty.
//
// Note that blockLock must be locked exactly when rtype ==
// blockWrite, and must be r-locked when rtype == blockRead. (This
// differs from getDirLocked.) This is because a write operation
// (like write, truncate and sync which lock blockLock) fetching a
// file block will almost always need to modify that block, and so
// will pass in blockWrite. If rtype == blockReadParallel, it's
// assumed that some coordinating goroutine is holding the correct
// locks, and in that case `lState` must be `nil`.
//
// file is used only when reporting errors and sending read
// notifications, and can be empty except that file.Branch must be set
// correctly.
//
// This method also returns whether the block was already dirty.
func (fbo *folderBlockOps) getFileBlockLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer,
file path, rtype blockReqType) (
fblock *FileBlock, wasDirty bool, err error) {
switch rtype {
case blockRead:
fbo.blockLock.AssertRLocked(lState)
case blockWrite:
fbo.blockLock.AssertLocked(lState)
case blockReadParallel:
// This goroutine might not be the official lock holder, so
// don't make any assertions.
if lState != nil {
panic("Non-nil lState passed to getFileBlockLocked " +
"with blockReadParallel")
}
case blockLookup:
panic("blockLookup should only be used for directory blocks")
default:
panic(fmt.Sprintf("Unknown block req type: %d", rtype))
}
fblock, err = fbo.getFileBlockHelperLocked(
ctx, lState, kmd, ptr, file.Branch, file, rtype)
if err != nil {
return nil, false, err
}
wasDirty = fbo.config.DirtyBlockCache().IsDirty(fbo.id(), ptr, file.Branch)
if rtype == blockWrite {
// Copy the block if it's for writing, and either the
// block is not yet dirty or the block is currently
// being sync'd and needs a copy even though it's
// already dirty.
df := fbo.dirtyFiles[file.tailPointer()]
if !wasDirty || (df != nil && df.blockNeedsCopy(ptr)) {
fblock = fblock.DeepCopy()
}
}
return fblock, wasDirty, nil
}
// getFileLocked is getFileBlockLocked called with file.tailPointer().
func (fbo *folderBlockOps) getFileLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, file path,
rtype blockReqType) (*FileBlock, error) {
// Callers should have already done this check, but it doesn't
// hurt to do it again.
if !file.isValid() {
return nil, InvalidPathError{file}
}
fblock, _, err := fbo.getFileBlockLocked(
ctx, lState, kmd, file.tailPointer(), file, rtype)
return fblock, err
}
// GetIndirectFileBlockInfos returns a list of BlockInfos for all
// indirect blocks of the given file. If the returned error is a
// recoverable one (as determined by
// isRecoverableBlockErrorForRemoval), the returned list may still be
// non-empty, and holds all the BlockInfos for all found indirect
// blocks.
func (fbo *folderBlockOps) GetIndirectFileBlockInfos(ctx context.Context,
lState *lockState, kmd KeyMetadata, file path) ([]BlockInfo, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
var uid keybase1.UID // Data reads don't depend on the uid.
fd := fbo.newFileData(lState, file, uid, kmd)
return fd.getIndirectFileBlockInfos(ctx)
}
// GetIndirectFileBlockInfosWithTopBlock returns a list of BlockInfos
// for all indirect blocks of the given file, starting from the given
// top-most block. If the returned error is a recoverable one (as
// determined by isRecoverableBlockErrorForRemoval), the returned list
// may still be non-empty, and holds all the BlockInfos for all found
// indirect blocks. (This will be relevant when we handle multiple
// levels of indirection.)
func (fbo *folderBlockOps) GetIndirectFileBlockInfosWithTopBlock(
ctx context.Context, lState *lockState, kmd KeyMetadata, file path,
topBlock *FileBlock) (
[]BlockInfo, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
var uid keybase1.UID // Data reads don't depend on the uid.
fd := fbo.newFileData(lState, file, uid, kmd)
return fd.getIndirectFileBlockInfosWithTopBlock(ctx, topBlock)
}
// DeepCopyFile makes a complete copy of the given file, deduping leaf
// blocks and making new random BlockPointers for all indirect blocks.
// It returns the new top pointer of the copy, and all the new child
// pointers in the copy. It takes a custom DirtyBlockCache, which
// directs where the resulting block copies are stored.
func (fbo *folderBlockOps) DeepCopyFile(
ctx context.Context, lState *lockState, kmd KeyMetadata, file path,
dirtyBcache DirtyBlockCache, dataVer DataVer) (
newTopPtr BlockPointer, allChildPtrs []BlockPointer, err error) {
// Deep copying doesn't alter any data in use, it only makes copy,
// so only a read lock is needed.
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return BlockPointer{}, nil, err
}
fd := fbo.newFileDataWithCache(
lState, file, session.UID, kmd, dirtyBcache)
return fd.deepCopy(ctx, dataVer)
}
func (fbo *folderBlockOps) UndupChildrenInCopy(ctx context.Context,
lState *lockState, kmd KeyMetadata, file path, bps *blockPutState,
dirtyBcache DirtyBlockCache, topBlock *FileBlock) ([]BlockInfo, error) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return nil, err
}
fd := fbo.newFileDataWithCache(
lState, file, session.UID, kmd, dirtyBcache)
return fd.undupChildrenInCopy(ctx, fbo.config.BlockCache(),
fbo.config.BlockOps(), bps, topBlock)
}
func (fbo *folderBlockOps) ReadyNonLeafBlocksInCopy(ctx context.Context,
lState *lockState, kmd KeyMetadata, file path, bps *blockPutState,
dirtyBcache DirtyBlockCache, topBlock *FileBlock) ([]BlockInfo, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return nil, err
}
fd := fbo.newFileDataWithCache(
lState, file, session.UID, kmd, dirtyBcache)
return fd.readyNonLeafBlocksInCopy(ctx, fbo.config.BlockCache(),
fbo.config.BlockOps(), bps, topBlock)
}
// getDirLocked retrieves the block pointed to by the tail pointer of
// the given path, which must be valid, either from the cache or from
// the server. An error is returned if the retrieved block is not a
// dir block.
//
// This shouldn't be called for "internal" operations, like conflict
// resolution and state checking -- use GetDirBlockForReading() for
// those instead.
//
// When rtype == blockWrite and the cached version of the block is
// currently clean, this method makes a copy of the directory block
// and returns it. If this method might be called again for the same
// block within a single operation, it is the caller's responsibility
// to write that block back to the cache as dirty.
//
// Note that blockLock must be either r-locked or locked, but
// independently of rtype. (This differs from getFileLocked and
// getFileBlockLocked.) File write operations (which lock blockLock)
// don't need a copy of parent dir blocks, and non-file write
// operations do need to copy dir blocks for modifications.
func (fbo *folderBlockOps) getDirLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, dir path, rtype blockReqType) (
*DirBlock, error) {
fbo.blockLock.AssertAnyLocked(lState)
// Callers should have already done this check, but it doesn't
// hurt to do it again.
if !dir.isValid() {
return nil, InvalidPathError{dir}
}
// Get the block for the last element in the path.
dblock, err := fbo.getDirBlockHelperLocked(
ctx, lState, kmd, dir.tailPointer(), dir.Branch, dir, rtype)
if err != nil {
return nil, err
}
if rtype == blockWrite && !fbo.config.DirtyBlockCache().IsDirty(
fbo.id(), dir.tailPointer(), dir.Branch) {
// Copy the block if it's for writing and the block is
// not yet dirty.
dblock = dblock.DeepCopy()
}
return dblock, nil
}
// GetDir retrieves the block pointed to by the tail pointer of the
// given path, which must be valid, either from the cache or from the
// server. An error is returned if the retrieved block is not a dir
// block.
//
// This shouldn't be called for "internal" operations, like conflict
// resolution and state checking -- use GetDirBlockForReading() for
// those instead.
//
// When rtype == blockWrite and the cached version of the block is
// currently clean, this method makes a copy of the directory block
// and returns it. If this method might be called again for the same
// block within a single operation, it is the caller's responsibility
// to write that block back to the cache as dirty.
func (fbo *folderBlockOps) GetDir(
ctx context.Context, lState *lockState, kmd KeyMetadata, dir path,
rtype blockReqType) (*DirBlock, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.getDirLocked(ctx, lState, kmd, dir, rtype)
}
func (fbo *folderBlockOps) addDirEntryInCacheLocked(lState *lockState, dir path,
newName string, newDe DirEntry) {
fbo.blockLock.AssertLocked(lState)
cacheEntry := fbo.deCache[dir.tailPointer().Ref()]
if cacheEntry.adds == nil {
cacheEntry.adds = make(map[string]BlockPointer)
}
cacheEntry.adds[newName] = newDe.BlockPointer
// In case it was removed in the cache but not flushed yet.
delete(cacheEntry.dels, newName)
fbo.deCache[dir.tailPointer().Ref()] = cacheEntry
}
func (fbo *folderBlockOps) AddDirEntryInCache(lState *lockState, dir path,
newName string, newDe DirEntry) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
fbo.addDirEntryInCacheLocked(lState, dir, newName, newDe)
// Add target dir entry as well.
if newDe.IsInitialized() {
cacheEntry := fbo.deCache[newDe.BlockPointer.Ref()]
cacheEntry.dirEntry = newDe
fbo.deCache[newDe.BlockPointer.Ref()] = cacheEntry
}
}
func (fbo *folderBlockOps) removeDirEntryInCacheLocked(lState *lockState,
dir path, oldName string) {
fbo.blockLock.AssertLocked(lState)
cacheEntry := fbo.deCache[dir.tailPointer().Ref()]
if cacheEntry.dels == nil {
cacheEntry.dels = make(map[string]bool)
}
cacheEntry.dels[oldName] = true
// In case it was added in the cache but not flushed yet.
delete(cacheEntry.adds, oldName)
fbo.deCache[dir.tailPointer().Ref()] = cacheEntry
}
func (fbo *folderBlockOps) RemoveDirEntryInCache(lState *lockState, dir path,
oldName string) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
fbo.removeDirEntryInCacheLocked(lState, dir, oldName)
}
func (fbo *folderBlockOps) RenameDirEntryInCache(lState *lockState,
oldParent path, oldName string, newParent path, newName string,
newDe DirEntry) (deleteTargetDirEntry bool) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
fbo.addDirEntryInCacheLocked(lState, newParent, newName, newDe)
fbo.removeDirEntryInCacheLocked(lState, oldParent, oldName)
// If there's already an entry for the target, only update the
// Ctime on a rename.
cacheEntry, ok := fbo.deCache[newDe.BlockPointer.Ref()]
if ok && cacheEntry.dirEntry.IsInitialized() {
cacheEntry.dirEntry.Ctime = newDe.Ctime
} else {
cacheEntry.dirEntry = newDe
fbo.deCache[newDe.BlockPointer.Ref()] = cacheEntry
deleteTargetDirEntry = true
}
fbo.deCache[newDe.BlockPointer.Ref()] = cacheEntry
return deleteTargetDirEntry
}
func (fbo *folderBlockOps) setCachedAttrLocked(
lState *lockState, ref BlockRef, attr attrChange, realEntry *DirEntry,
doCreate bool) {
fbo.blockLock.AssertLocked(lState)
fileEntry, ok := fbo.deCache[ref]
if !ok {
if !doCreate {
return
}
fileEntry.dirEntry = *realEntry
}
switch attr {
case exAttr:
fileEntry.dirEntry.Type = realEntry.Type
case mtimeAttr:
fileEntry.dirEntry.Mtime = realEntry.Mtime
}
fileEntry.dirEntry.Ctime = realEntry.Ctime
fbo.deCache[ref] = fileEntry
}
func (fbo *folderBlockOps) SetAttrInDirEntryInCache(lState *lockState,
newDe DirEntry, attr attrChange) (deleteTargetDirEntry bool) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
// If there's already an entry for the target, only update the
// Ctime on a rename.
_, ok := fbo.deCache[newDe.Ref()]
if !ok {
deleteTargetDirEntry = true
}
fbo.setCachedAttrLocked(lState, newDe.Ref(), attr, &newDe, true)
return deleteTargetDirEntry
}
func (fbo *folderBlockOps) ClearCachedAddsAndRemoves(
lState *lockState, dir path) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
cacheEntry, ok := fbo.deCache[dir.tailPointer().Ref()]
if !ok {
return
}
// If there's no dirEntry, we can just delete the whole thing.
if !cacheEntry.dirEntry.IsInitialized() {
delete(fbo.deCache, dir.tailPointer().Ref())
return
}
// Otherwise just nil out the adds and dels.
cacheEntry.adds = nil
cacheEntry.dels = nil
fbo.deCache[dir.tailPointer().Ref()] = cacheEntry
}
// updateWithDirtyEntriesLocked checks if the given DirBlock has any
// entries that are in deCache (i.e., entries pointing to dirty
// files). If so, it makes a copy with all such entries replaced with
// the ones in deCache and returns it. If not, it just returns the
// given one.
func (fbo *folderBlockOps) updateWithDirtyEntriesLocked(ctx context.Context,
lState *lockState, dir path, block *DirBlock) (*DirBlock, error) {
fbo.blockLock.AssertAnyLocked(lState)
// see if this directory has any outstanding writes/truncates that
// require an updated DirEntry
// Save some time for the common case of having no dirty
// files.
if len(fbo.deCache) == 0 {
return block, nil
}
var dblockCopy *DirBlock
dirCacheEntry := fbo.deCache[dir.tailPointer().Ref()]
// TODO: We should get of deCache completely and use only
// DirtyBlockCache to store the dirtied version of the DirBlock.
// We can't do that yet, because there might be multiple
// outstanding dirty files in one directory, and the KBFSOps API
// allows for syncing one at a time, so keeping a single dirtied
// DirBlock would accidentally sync the DirEntry of file A when a
// sync of file B is requested.
//
// Soon a sync will sync everything that's dirty at once, and so
// we can remove deCache at that point. Until then, we must
// incrementally build it up each time.
// Add cached additions to the copy.
for k, ptr := range dirCacheEntry.adds {
de, ok := fbo.deCache[ptr.Ref()]
if !ok {
return nil, fmt.Errorf("No cached dir entry found for new entry "+
"%s in dir %s (%v)", k, dir, dir.tailPointer())
}
if dblockCopy == nil {
dblockCopy = block.DeepCopy()
}
dblockCopy.Children[k] = de.dirEntry
}
// Remove cached removals from the copy.
for k := range dirCacheEntry.adds {
_, ok := block.Children[k]
if !ok {
continue
}
if dblockCopy == nil {
dblockCopy = block.DeepCopy()
}
delete(dblockCopy.Children, k)
}
// Update dir entries for any modified files.
for k, v := range block.Children {
de, ok := fbo.deCache[v.Ref()]
if !ok {
continue
}
if dblockCopy == nil {
dblockCopy = block.DeepCopy()
}
dblockCopy.Children[k] = de.dirEntry
}
if dblockCopy == nil {
return block, nil
}
return dblockCopy, nil
}
// getDirtyDirLocked composes getDirLocked and
// updatedWithDirtyEntriesLocked. Note that a dirty dir means that it
// has entries possibly pointing to dirty files, not that it's dirty
// itself.
func (fbo *folderBlockOps) getDirtyDirLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, dir path, rtype blockReqType) (
*DirBlock, error) {
fbo.blockLock.AssertAnyLocked(lState)
dblock, err := fbo.getDirLocked(ctx, lState, kmd, dir, rtype)
if err != nil {
return nil, err
}
return fbo.updateWithDirtyEntriesLocked(ctx, lState, dir, dblock)
}
// GetDirtyDirChildren returns a map of EntryInfos for the (possibly
// dirty) children entries of the given directory.
func (fbo *folderBlockOps) GetDirtyDirChildren(
ctx context.Context, lState *lockState, kmd KeyMetadata, dir path) (
map[string]EntryInfo, error) {
dblock, err := func() (*DirBlock, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.getDirtyDirLocked(ctx, lState, kmd, dir, blockRead)
}()
if err != nil {
return nil, err
}
children := make(map[string]EntryInfo)
for k, de := range dblock.Children {
children[k] = de.EntryInfo
}
return children, nil
}
// file must have a valid parent.
func (fbo *folderBlockOps) getDirtyParentAndEntryLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, file path, rtype blockReqType) (
*DirBlock, DirEntry, error) {
fbo.blockLock.AssertAnyLocked(lState)
if !file.hasValidParent() {
return nil, DirEntry{}, InvalidParentPathError{file}
}
parentPath := file.parentPath()
dblock, err := fbo.getDirtyDirLocked(
ctx, lState, kmd, *parentPath, rtype)
if err != nil {
return nil, DirEntry{}, err
}
// make sure it exists
name := file.tailName()
de, ok := dblock.Children[name]
if !ok {
return nil, DirEntry{}, NoSuchNameError{name}
}
return dblock, de, err
}
// GetDirtyParentAndEntry returns a copy of the parent DirBlock
// (suitable for modification) of the given file, which may contain
// entries pointing to other dirty files, and its possibly-dirty
// DirEntry in that directory. file must have a valid parent. Use
// GetDirtyEntry() if you only need the DirEntry.
func (fbo *folderBlockOps) GetDirtyParentAndEntry(
ctx context.Context, lState *lockState, kmd KeyMetadata, file path) (
*DirBlock, DirEntry, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.getDirtyParentAndEntryLocked(
ctx, lState, kmd, file, blockWrite)
}
// file must have a valid parent.
func (fbo *folderBlockOps) getDirtyEntryLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, file path) (DirEntry, error) {
// TODO: Since we only need a single DirEntry, avoid having to
// look up every entry in the DirBlock.
_, de, err := fbo.getDirtyParentAndEntryLocked(
ctx, lState, kmd, file, blockLookup)
return de, err
}
// GetDirtyEntry returns the possibly-dirty DirEntry of the given file
// in its parent DirBlock. file must have a valid parent.
func (fbo *folderBlockOps) GetDirtyEntry(
ctx context.Context, lState *lockState, kmd KeyMetadata,
file path) (DirEntry, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.getDirtyEntryLocked(ctx, lState, kmd, file)
}
// Lookup returns the possibly-dirty DirEntry of the given file in its
// parent DirBlock, and a Node for the file if it exists. It has to
// do all of this under the block lock to avoid races with
// UpdatePointers.
func (fbo *folderBlockOps) Lookup(
ctx context.Context, lState *lockState, kmd KeyMetadata,
dir Node, name string) (Node, DirEntry, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
dirPath := fbo.nodeCache.PathFromNode(dir)
if !dirPath.isValid() {
return nil, DirEntry{}, InvalidPathError{dirPath}
}
childPath := dirPath.ChildPathNoPtr(name)
de, err := fbo.getDirtyEntryLocked(ctx, lState, kmd, childPath)
if err != nil {
return nil, DirEntry{}, err
}
if de.Type == Sym {
return nil, de, nil
}
err = checkDataVersion(fbo.config, childPath, de.BlockPointer)
if err != nil {
return nil, DirEntry{}, err
}
node, err := fbo.nodeCache.GetOrCreate(de.BlockPointer, name, dir)
if err != nil {
return nil, DirEntry{}, err
}
return node, de, nil
}
func (fbo *folderBlockOps) getOrCreateDirtyFileLocked(lState *lockState,
file path) *dirtyFile {
fbo.blockLock.AssertLocked(lState)
ptr := file.tailPointer()
df := fbo.dirtyFiles[ptr]
if df == nil {
df = newDirtyFile(file, fbo.config.DirtyBlockCache())
fbo.dirtyFiles[ptr] = df
}
return df
}
// cacheBlockIfNotYetDirtyLocked puts a block into the cache, but only
// does so if the block isn't already marked as dirty in the cache.
// This is useful when operating on a dirty copy of a block that may
// already be in the cache.
func (fbo *folderBlockOps) cacheBlockIfNotYetDirtyLocked(
lState *lockState, ptr BlockPointer, file path, block Block) error {
fbo.blockLock.AssertLocked(lState)
df := fbo.getOrCreateDirtyFileLocked(lState, file)
needsCaching, isSyncing := df.setBlockDirty(ptr)
if needsCaching {
err := fbo.config.DirtyBlockCache().Put(fbo.id(), ptr, file.Branch,
block)
if err != nil {
return err
}
}
if isSyncing {
fbo.doDeferWrite = true
}
return nil
}
func (fbo *folderBlockOps) getOrCreateSyncInfoLocked(
lState *lockState, de DirEntry) (*syncInfo, error) {
fbo.blockLock.AssertLocked(lState)
ref := de.Ref()
si, ok := fbo.unrefCache[ref]
if !ok {
so, err := newSyncOp(de.BlockPointer)
if err != nil {
return nil, err
}
si = &syncInfo{
oldInfo: de.BlockInfo,
op: so,
}
fbo.unrefCache[ref] = si
}
return si, nil
}
// GetDirtyRefs returns a list of references of all known dirty
// blocks.
func (fbo *folderBlockOps) GetDirtyRefs(lState *lockState) []BlockRef {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
var dirtyRefs []BlockRef
for ref := range fbo.deCache {
dirtyRefs = append(dirtyRefs, ref)
}
return dirtyRefs
}
// fixChildBlocksAfterRecoverableErrorLocked should be called when a sync
// failed with a recoverable block error on a multi-block file. It
// makes sure that any outstanding dirty versions of the file are
// fixed up to reflect the fact that some of the indirect pointers now
// need to change.
func (fbo *folderBlockOps) fixChildBlocksAfterRecoverableErrorLocked(
ctx context.Context, lState *lockState, file path, kmd KeyMetadata,
redirtyOnRecoverableError map[BlockPointer]BlockPointer) {
fbo.blockLock.AssertLocked(lState)
df := fbo.dirtyFiles[file.tailPointer()]
if df != nil {
// Un-orphan old blocks, since we are reverting back to the
// previous state.
for _, oldPtr := range redirtyOnRecoverableError {
fbo.log.CDebugf(ctx, "Un-orphaning %v", oldPtr)
df.setBlockOrphaned(oldPtr, false)
}
}
dirtyBcache := fbo.config.DirtyBlockCache()
topBlock, err := dirtyBcache.Get(fbo.id(), file.tailPointer(), fbo.branch())
fblock, ok := topBlock.(*FileBlock)
if err != nil || !ok {
fbo.log.CWarningf(ctx, "Couldn't find dirtied "+
"top-block for %v: %v", file.tailPointer(), err)
return
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't find uid during recovery: %v", err)
return
}
fd := fbo.newFileData(lState, file, session.UID, kmd)
// If a copy of the top indirect block was made, we need to
// redirty all the sync'd blocks under their new IDs, so that
// future syncs will know they failed.
newPtrs := make(map[BlockPointer]bool, len(redirtyOnRecoverableError))
for newPtr := range redirtyOnRecoverableError {
newPtrs[newPtr] = true
}
found, err := fd.findIPtrsAndClearSize(ctx, fblock, newPtrs)
if err != nil {
fbo.log.CWarningf(
ctx, "Couldn't find and clear iptrs during recovery: %v", err)
return
}
for newPtr, oldPtr := range redirtyOnRecoverableError {
if !found[newPtr] {
continue
}
fbo.log.CDebugf(ctx, "Re-dirtying %v (and deleting dirty block %v)",
newPtr, oldPtr)
// These blocks would have been permanent, so they're
// definitely still in the cache.
b, err := fbo.config.BlockCache().Get(newPtr)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't re-dirty %v: %v", newPtr, err)
continue
}
if err = fbo.cacheBlockIfNotYetDirtyLocked(
lState, newPtr, file, b); err != nil {
fbo.log.CWarningf(ctx, "Couldn't re-dirty %v: %v", newPtr, err)
}
fbo.log.CDebugf(ctx, "Deleting dirty ptr %v after recoverable error",
oldPtr)
err = dirtyBcache.Delete(fbo.id(), oldPtr, fbo.branch())
if err != nil {
fbo.log.CDebugf(ctx, "Couldn't del-dirty %v: %v", oldPtr, err)
}
}
}
func (fbo *folderBlockOps) nowUnixNano() int64 {
return fbo.config.Clock().Now().UnixNano()
}
// PrepRename prepares the given rename operation. It returns copies
// of the old and new parent block (which may be the same), what is to
// be the new DirEntry, and a local block cache. It also modifies md,
// which must be a copy.
func (fbo *folderBlockOps) PrepRename(
ctx context.Context, lState *lockState, md *RootMetadata,
oldParent path, oldName string, newParent path, newName string) (
oldPBlock, newPBlock *DirBlock, newDe DirEntry, lbc localBcache,
err error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
// look up in the old path
oldPBlock, err = fbo.getDirLocked(
ctx, lState, md, oldParent, blockWrite)
if err != nil {
return nil, nil, DirEntry{}, nil, err
}
newDe, ok := oldPBlock.Children[oldName]
// does the name exist?
if !ok {
return nil, nil, DirEntry{}, nil, NoSuchNameError{oldName}
}
ro, err := newRenameOp(oldName, oldParent.tailPointer(), newName,
newParent.tailPointer(), newDe.BlockPointer, newDe.Type)
if err != nil {
return nil, nil, DirEntry{}, nil, err
}
// A renameOp doesn't have a single path to represent it, so we
// can't call setFinalPath here unfortunately. That means any
// rename may force a manual paths population at other layers
// (e.g., for journal statuses). TODO: allow a way to set more
// than one final path for renameOps?
md.AddOp(ro)
lbc = make(localBcache)
// TODO: Write a SameBlock() function that can deal properly with
// dedup'd blocks that share an ID but can be updated separately.
if oldParent.tailPointer().ID == newParent.tailPointer().ID {
newPBlock = oldPBlock
} else {
newPBlock, err = fbo.getDirLocked(
ctx, lState, md, newParent, blockWrite)
if err != nil {
return nil, nil, DirEntry{}, nil, err
}
now := fbo.nowUnixNano()
oldGrandparent := *oldParent.parentPath()
if len(oldGrandparent.path) > 0 {
// Update the old parent's mtime/ctime, unless the
// oldGrandparent is the same as newParent (in which
// case, the syncBlockAndCheckEmbedLocked call by the
// caller will take care of it).
if oldGrandparent.tailPointer().ID != newParent.tailPointer().ID {
b, err := fbo.getDirLocked(ctx, lState, md, oldGrandparent, blockWrite)
if err != nil {
return nil, nil, DirEntry{}, nil, err
}
if de, ok := b.Children[oldParent.tailName()]; ok {
de.Ctime = now
de.Mtime = now
b.Children[oldParent.tailName()] = de
// Put this block back into the local cache as dirty
lbc[oldGrandparent.tailPointer()] = b
}
}
} else {
md.data.Dir.Ctime = now
md.data.Dir.Mtime = now
}
}
return oldPBlock, newPBlock, newDe, lbc, nil
}
func (fbo *folderBlockOps) newFileData(lState *lockState,
file path, uid keybase1.UID, kmd KeyMetadata) *fileData {
fbo.blockLock.AssertAnyLocked(lState)
return newFileData(file, uid, fbo.config.Crypto(),
fbo.config.BlockSplitter(), kmd,
func(ctx context.Context, kmd KeyMetadata, ptr BlockPointer,
file path, rtype blockReqType) (*FileBlock, bool, error) {
lState := lState
if rtype == blockReadParallel {
lState = nil
}
return fbo.getFileBlockLocked(
ctx, lState, kmd, ptr, file, rtype)
},
func(ptr BlockPointer, block Block) error {
return fbo.cacheBlockIfNotYetDirtyLocked(
lState, ptr, file, block)
}, fbo.log)
}
func (fbo *folderBlockOps) newFileDataWithCache(lState *lockState,
file path, uid keybase1.UID, kmd KeyMetadata,
dirtyBcache DirtyBlockCache) *fileData {
fbo.blockLock.AssertAnyLocked(lState)
return newFileData(file, uid, fbo.config.Crypto(),
fbo.config.BlockSplitter(), kmd,
func(ctx context.Context, kmd KeyMetadata, ptr BlockPointer,
file path, rtype blockReqType) (*FileBlock, bool, error) {
block, err := dirtyBcache.Get(file.Tlf, ptr, file.Branch)
if fblock, ok := block.(*FileBlock); ok && err == nil {
return fblock, true, nil
}
lState := lState
if rtype == blockReadParallel {
lState = nil
}
return fbo.getFileBlockLocked(
ctx, lState, kmd, ptr, file, rtype)
},
func(ptr BlockPointer, block Block) error {
return dirtyBcache.Put(file.Tlf, ptr, file.Branch, block)
}, fbo.log)
}
// Read reads from the given file into the given buffer at the given
// offset. It returns the number of bytes read and nil, or 0 and the
// error if there was one.
func (fbo *folderBlockOps) Read(
ctx context.Context, lState *lockState, kmd KeyMetadata, file path,
dest []byte, off int64) (int64, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
fbo.log.CDebugf(ctx, "Reading from %v", file.tailPointer())
var uid keybase1.UID // Data reads don't depend on the uid.
fd := fbo.newFileData(lState, file, uid, kmd)
return fd.read(ctx, dest, off)
}
func (fbo *folderBlockOps) maybeWaitOnDeferredWrites(
ctx context.Context, lState *lockState, file Node,
c DirtyPermChan) error {
var errListener chan error
err := func() error {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
filePath, err := fbo.pathFromNodeForBlockWriteLocked(lState, file)
if err != nil {
return err
}
df := fbo.getOrCreateDirtyFileLocked(lState, filePath)
errListener = make(chan error, 1)
df.addErrListener(errListener)
return nil
}()
if err != nil {
return err
}
logTimer := time.After(100 * time.Millisecond)
doLogUnblocked := false
for {
select {
case <-c:
if doLogUnblocked {
fbo.log.CDebugf(ctx, "Write unblocked")
}
// Make sure there aren't any queued errors.
select {
case err := <-errListener:
return err
default:
}
return nil
case <-logTimer:
// Print a log message once if it's taking too long.
fbo.log.CDebugf(ctx,
"Blocking a write because of a full dirty buffer")
doLogUnblocked = true
case err := <-errListener:
// XXX: should we ignore non-fatal errors (like
// context.Canceled), or errors that are specific only to
// some other file being sync'd (e.g., "recoverable" block
// errors from which we couldn't recover)?
return err
}
}
}
func (fbo *folderBlockOps) pathFromNodeForBlockWriteLocked(
lState *lockState, n Node) (path, error) {
fbo.blockLock.AssertLocked(lState)
p := fbo.nodeCache.PathFromNode(n)
if !p.isValid() {
return path{}, InvalidPathError{p}
}
return p, nil
}
// writeGetFileLocked checks write permissions explicitly for
// writeDataLocked, truncateLocked etc and returns
func (fbo *folderBlockOps) writeGetFileLocked(
ctx context.Context, lState *lockState, kmd KeyMetadata,
file path) (*FileBlock, keybase1.UID, error) {
fbo.blockLock.AssertLocked(lState)
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return nil, "", err
}
if !kmd.GetTlfHandle().IsWriter(session.UID) {
return nil, "", NewWriteAccessError(kmd.GetTlfHandle(),
session.Name, file.String())
}
fblock, err := fbo.getFileLocked(ctx, lState, kmd, file, blockWrite)
if err != nil {
return nil, "", err
}
return fblock, session.UID, nil
}
// Returns the set of blocks dirtied during this write that might need
// to be cleaned up if the write is deferred.
func (fbo *folderBlockOps) writeDataLocked(
ctx context.Context, lState *lockState, kmd KeyMetadata, file path,
data []byte, off int64) (latestWrite WriteRange, dirtyPtrs []BlockPointer,
newlyDirtiedChildBytes int64, err error) {
if jServer, err := GetJournalServer(fbo.config); err == nil {
jServer.dirtyOpStart(fbo.id())
defer jServer.dirtyOpEnd(fbo.id())
}
fbo.blockLock.AssertLocked(lState)
fbo.log.CDebugf(ctx, "writeDataLocked on file pointer %v",
file.tailPointer())
defer func() {
fbo.log.CDebugf(ctx, "writeDataLocked done: %v", err)
}()
fblock, uid, err := fbo.writeGetFileLocked(ctx, lState, kmd, file)
if err != nil {
return WriteRange{}, nil, 0, err
}
fd := fbo.newFileData(lState, file, uid, kmd)
dirtyBcache := fbo.config.DirtyBlockCache()
df := fbo.getOrCreateDirtyFileLocked(lState, file)
defer func() {
// Always update unsynced bytes and potentially force a sync,
// even on an error, since the previously-dirty bytes stay in
// the cache.
df.updateNotYetSyncingBytes(newlyDirtiedChildBytes)
if dirtyBcache.ShouldForceSync(fbo.id()) {
select {
// If we can't send on the channel, that means a sync is
// already in progress.
case fbo.forceSyncChan <- struct{}{}:
fbo.log.CDebugf(ctx, "Forcing a sync due to full buffer")
default:
}
}
}()
de, err := fbo.getDirtyEntryLocked(ctx, lState, kmd, file)
if err != nil {
return WriteRange{}, nil, 0, err
}
if de.BlockPointer != file.tailPointer() {
fbo.log.CDebugf(ctx, "DirEntry and file tail pointer don't match: "+
"%v vs %v", de.BlockPointer, file.tailPointer())
}
si, err := fbo.getOrCreateSyncInfoLocked(lState, de)
if err != nil {
return WriteRange{}, nil, 0, err
}
newDe, dirtyPtrs, unrefs, newlyDirtiedChildBytes, bytesExtended, err :=
fd.write(ctx, data, off, fblock, de, df)
// Record the unrefs before checking the error so we remember the
// state of newly dirtied blocks.
si.unrefs = append(si.unrefs, unrefs...)
if err != nil {
return WriteRange{}, nil, newlyDirtiedChildBytes, err
}
// Put it in the `deCache` even if the size didn't change, since
// the `deCache` is used to determine whether there are any dirty
// files. TODO: combine `deCache` with `dirtyFiles` and
// `unrefCache`.
cacheEntry := fbo.deCache[file.tailPointer().Ref()]
cacheEntry.dirEntry = newDe
fbo.deCache[file.tailPointer().Ref()] = cacheEntry
if fbo.doDeferWrite {
df.addDeferredNewBytes(bytesExtended)
}
latestWrite = si.op.addWrite(uint64(off), uint64(len(data)))
return latestWrite, dirtyPtrs, newlyDirtiedChildBytes, nil
}
// Write writes the given data to the given file. May block if there
// is too much unflushed data; in that case, it will be unblocked by a
// future sync.
func (fbo *folderBlockOps) Write(
ctx context.Context, lState *lockState, kmd KeyMetadata,
file Node, data []byte, off int64) error {
// If there is too much unflushed data, we should wait until some
// of it gets flush so our memory usage doesn't grow without
// bound.
c, err := fbo.config.DirtyBlockCache().RequestPermissionToDirty(ctx,
fbo.id(), int64(len(data)))
if err != nil {
return err
}
defer fbo.config.DirtyBlockCache().UpdateUnsyncedBytes(fbo.id(),
-int64(len(data)), false)
err = fbo.maybeWaitOnDeferredWrites(ctx, lState, file, c)
if err != nil {
return err
}
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
filePath, err := fbo.pathFromNodeForBlockWriteLocked(lState, file)
if err != nil {
return err
}
defer func() {
fbo.doDeferWrite = false
}()
latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err := fbo.writeDataLocked(
ctx, lState, kmd, filePath, data, off)
if err != nil {
return err
}
fbo.observers.localChange(ctx, file, latestWrite)
if fbo.doDeferWrite {
// There's an ongoing sync, and this write altered dirty
// blocks that are in the process of syncing. So, we have to
// redo this write once the sync is complete, using the new
// file path.
//
// There is probably a less terrible of doing this that
// doesn't involve so much copying and rewriting, but this is
// the most obviously correct way.
dataCopy := make([]byte, len(data))
copy(dataCopy, data)
fbo.log.CDebugf(ctx, "Deferring a write to file %v off=%d len=%d",
filePath.tailPointer(), off, len(data))
fbo.deferredDirtyDeletes = append(fbo.deferredDirtyDeletes,
dirtyPtrs...)
fbo.deferredWrites = append(fbo.deferredWrites,
func(ctx context.Context, lState *lockState, kmd KeyMetadata, f path) error {
// We are about to re-dirty these bytes, so mark that
// they will no longer be synced via the old file.
df := fbo.getOrCreateDirtyFileLocked(lState, filePath)
df.updateNotYetSyncingBytes(-newlyDirtiedChildBytes)
// Write the data again. We know this won't be
// deferred, so no need to check the new ptrs.
_, _, _, err = fbo.writeDataLocked(
ctx, lState, kmd, f, dataCopy, off)
return err
})
fbo.deferredWaitBytes += newlyDirtiedChildBytes
}
return nil
}
// truncateExtendLocked is called by truncateLocked to extend a file and
// creates a hole.
func (fbo *folderBlockOps) truncateExtendLocked(
ctx context.Context, lState *lockState, kmd KeyMetadata,
file path, size uint64, parentBlocks []parentBlockAndChildIndex) (
WriteRange, []BlockPointer, error) {
fblock, uid, err := fbo.writeGetFileLocked(ctx, lState, kmd, file)
if err != nil {
return WriteRange{}, nil, err
}
fd := fbo.newFileData(lState, file, uid, kmd)
de, err := fbo.getDirtyEntryLocked(ctx, lState, kmd, file)
if err != nil {
return WriteRange{}, nil, err
}
df := fbo.getOrCreateDirtyFileLocked(lState, file)
newDe, dirtyPtrs, err := fd.truncateExtend(
ctx, size, fblock, parentBlocks, de, df)
if err != nil {
return WriteRange{}, nil, err
}
cacheEntry := fbo.deCache[file.tailPointer().Ref()]
cacheEntry.dirEntry = newDe
fbo.deCache[file.tailPointer().Ref()] = cacheEntry
si, err := fbo.getOrCreateSyncInfoLocked(lState, de)
if err != nil {
return WriteRange{}, nil, err
}
latestWrite := si.op.addTruncate(size)
if fbo.config.DirtyBlockCache().ShouldForceSync(fbo.id()) {
select {
// If we can't send on the channel, that means a sync is
// already in progress
case fbo.forceSyncChan <- struct{}{}:
fbo.log.CDebugf(ctx, "Forcing a sync due to full buffer")
default:
}
}
fbo.log.CDebugf(ctx, "truncateExtendLocked: done")
return latestWrite, dirtyPtrs, nil
}
// truncateExtendCutoffPoint is the amount of data in extending
// truncate that will trigger the extending with a hole algorithm.
const truncateExtendCutoffPoint = 128 * 1024
// Returns the set of newly-ID'd blocks created during this truncate
// that might need to be cleaned up if the truncate is deferred.
func (fbo *folderBlockOps) truncateLocked(
ctx context.Context, lState *lockState, kmd KeyMetadata,
file path, size uint64) (*WriteRange, []BlockPointer, int64, error) {
if jServer, err := GetJournalServer(fbo.config); err == nil {
jServer.dirtyOpStart(fbo.id())
defer jServer.dirtyOpEnd(fbo.id())
}
fblock, uid, err := fbo.writeGetFileLocked(ctx, lState, kmd, file)
if err != nil {
return &WriteRange{}, nil, 0, err
}
fd := fbo.newFileData(lState, file, uid, kmd)
// find the block where the file should now end
iSize := int64(size) // TODO: deal with overflow
_, parentBlocks, block, nextBlockOff, startOff, _, err :=
fd.getFileBlockAtOffset(ctx, fblock, iSize, blockWrite)
if err != nil {
return &WriteRange{}, nil, 0, err
}
currLen := int64(startOff) + int64(len(block.Contents))
if currLen+truncateExtendCutoffPoint < iSize {
latestWrite, dirtyPtrs, err := fbo.truncateExtendLocked(
ctx, lState, kmd, file, uint64(iSize), parentBlocks)
if err != nil {
return &latestWrite, dirtyPtrs, 0, err
}
return &latestWrite, dirtyPtrs, 0, err
} else if currLen < iSize {
moreNeeded := iSize - currLen
latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err :=
fbo.writeDataLocked(ctx, lState, kmd, file,
make([]byte, moreNeeded, moreNeeded), currLen)
if err != nil {
return &latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err
}
return &latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err
} else if currLen == iSize && nextBlockOff < 0 {
// same size!
return nil, nil, 0, nil
}
// update the local entry size
de, err := fbo.getDirtyEntryLocked(ctx, lState, kmd, file)
if err != nil {
return nil, nil, 0, err
}
si, err := fbo.getOrCreateSyncInfoLocked(lState, de)
if err != nil {
return nil, nil, 0, err
}
newDe, dirtyPtrs, unrefs, newlyDirtiedChildBytes, err := fd.truncateShrink(
ctx, size, fblock, de)
// Record the unrefs before checking the error so we remember the
// state of newly dirtied blocks.
si.unrefs = append(si.unrefs, unrefs...)
if err != nil {
return nil, nil, newlyDirtiedChildBytes, err
}
// Update dirtied bytes and unrefs regardless of error.
df := fbo.getOrCreateDirtyFileLocked(lState, file)
df.updateNotYetSyncingBytes(newlyDirtiedChildBytes)
latestWrite := si.op.addTruncate(size)
cacheEntry := fbo.deCache[file.tailPointer().Ref()]
cacheEntry.dirEntry = newDe
fbo.deCache[file.tailPointer().Ref()] = cacheEntry
return &latestWrite, dirtyPtrs, newlyDirtiedChildBytes, nil
}
// Truncate truncates or extends the given file to the given size.
// May block if there is too much unflushed data; in that case, it
// will be unblocked by a future sync.
func (fbo *folderBlockOps) Truncate(
ctx context.Context, lState *lockState, kmd KeyMetadata,
file Node, size uint64) error {
// If there is too much unflushed data, we should wait until some
// of it gets flush so our memory usage doesn't grow without
// bound.
//
// Assume the whole remaining file will be dirty after this
// truncate. TODO: try to figure out how many bytes actually will
// be dirtied ahead of time?
c, err := fbo.config.DirtyBlockCache().RequestPermissionToDirty(ctx,
fbo.id(), int64(size))
if err != nil {
return err
}
defer fbo.config.DirtyBlockCache().UpdateUnsyncedBytes(fbo.id(),
-int64(size), false)
err = fbo.maybeWaitOnDeferredWrites(ctx, lState, file, c)
if err != nil {
return err
}
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
filePath, err := fbo.pathFromNodeForBlockWriteLocked(lState, file)
if err != nil {
return err
}
defer func() {
fbo.doDeferWrite = false
}()
latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err := fbo.truncateLocked(
ctx, lState, kmd, filePath, size)
if err != nil {
return err
}
if latestWrite != nil {
fbo.observers.localChange(ctx, file, *latestWrite)
}
if fbo.doDeferWrite {
// There's an ongoing sync, and this truncate altered
// dirty blocks that are in the process of syncing. So,
// we have to redo this truncate once the sync is complete,
// using the new file path.
fbo.log.CDebugf(ctx, "Deferring a truncate to file %v",
filePath.tailPointer())
fbo.deferredDirtyDeletes = append(fbo.deferredDirtyDeletes,
dirtyPtrs...)
fbo.deferredWrites = append(fbo.deferredWrites,
func(ctx context.Context, lState *lockState, kmd KeyMetadata, f path) error {
// We are about to re-dirty these bytes, so mark that
// they will no longer be synced via the old file.
df := fbo.getOrCreateDirtyFileLocked(lState, filePath)
df.updateNotYetSyncingBytes(-newlyDirtiedChildBytes)
// Truncate the file again. We know this won't be
// deferred, so no need to check the new ptrs.
_, _, _, err := fbo.truncateLocked(
ctx, lState, kmd, f, size)
return err
})
fbo.deferredWaitBytes += newlyDirtiedChildBytes
}
return nil
}
// IsDirty returns whether the given file is dirty; if false is
// returned, then the file doesn't need to be synced.
func (fbo *folderBlockOps) IsDirty(lState *lockState, file path) bool {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
// Definitely dirty if a block is dirty.
if fbo.config.DirtyBlockCache().IsDirty(
fbo.id(), file.tailPointer(), file.Branch) {
return true
}
// The deCache entry could still be dirty, if a file had an
// attribute set (like mtime or exec) after the file was removed.
// Still count the file as dirty in that case; most likely, the
// caller will next call `ClearCacheInfo` to remove this entry.
// (See comments in `folderBranchOps.syncLocked`.)
_, ok := fbo.deCache[file.tailPointer().Ref()]
return ok
}
func (fbo *folderBlockOps) clearCacheInfoLocked(lState *lockState,
file path) error {
fbo.blockLock.AssertLocked(lState)
ref := file.tailPointer().Ref()
delete(fbo.deCache, ref)
delete(fbo.unrefCache, ref)
df := fbo.dirtyFiles[file.tailPointer()]
if df != nil {
err := df.finishSync()
if err != nil {
return err
}
delete(fbo.dirtyFiles, file.tailPointer())
}
return nil
}
// ClearCacheInfo removes any cached info for the the given file.
func (fbo *folderBlockOps) ClearCacheInfo(lState *lockState, file path) error {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
return fbo.clearCacheInfoLocked(lState, file)
}
// revertSyncInfoAfterRecoverableError updates the saved sync info to
// include all the blocks from before the error, except for those that
// have encountered recoverable block errors themselves.
func (fbo *folderBlockOps) revertSyncInfoAfterRecoverableError(
blocksToRemove []BlockPointer, result fileSyncState) {
si := result.si
savedSi := result.savedSi
// Save the blocks we need to clean up on the next attempt.
toClean := si.toCleanIfUnused
newIndirect := make(map[BlockPointer]bool)
for _, ptr := range result.newIndirectFileBlockPtrs {
newIndirect[ptr] = true
}
// Propagate all unrefs forward, except those that belong to new
// blocks that were created during the sync.
unrefs := make([]BlockInfo, 0, len(si.unrefs))
for _, unref := range si.unrefs {
if newIndirect[unref.BlockPointer] {
fbo.log.CDebugf(nil, "Dropping unref %v", unref)
continue
}
unrefs = append(unrefs, unref)
}
// This sync will be retried and needs new blocks, so
// reset everything in the sync info.
*si = *savedSi
si.toCleanIfUnused = toClean
si.unrefs = unrefs
if si.bps == nil {
return
}
si.bps.blockStates = nil
// Mark any bad pointers so they get skipped next time.
blocksToRemoveSet := make(map[BlockPointer]bool)
for _, ptr := range blocksToRemove {
blocksToRemoveSet[ptr] = true
}
for _, bs := range savedSi.bps.blockStates {
// Only save the good pointers
if !blocksToRemoveSet[bs.blockPtr] {
si.bps.blockStates = append(si.bps.blockStates, bs)
}
}
}
// ReadyBlock is a thin wrapper around BlockOps.Ready() that handles
// checking for duplicates.
func ReadyBlock(ctx context.Context, bcache BlockCache, bops BlockOps,
crypto cryptoPure, kmd KeyMetadata, block Block, uid keybase1.UID,
bType keybase1.BlockType) (
info BlockInfo, plainSize int, readyBlockData ReadyBlockData, err error) {
var ptr BlockPointer
directType := IndirectBlock
if fBlock, ok := block.(*FileBlock); ok && !fBlock.IsInd {
directType = DirectBlock
// first see if we are duplicating any known blocks in this folder
ptr, err = bcache.CheckForKnownPtr(kmd.TlfID(), fBlock)
if err != nil {
return
}
} else if dBlock, ok := block.(*DirBlock); ok {
if dBlock.IsInd {
panic("Indirect directory blocks aren't supported yet")
}
// TODO: support indirect directory blocks.
directType = DirectBlock
}
// Ready the block, even in the case where we can reuse an
// existing block, just so that we know what the size of the
// encrypted data will be.
id, plainSize, readyBlockData, err := bops.Ready(ctx, kmd, block)
if err != nil {
return
}
if ptr.IsInitialized() {
ptr.RefNonce, err = crypto.MakeBlockRefNonce()
if err != nil {
return
}
ptr.SetWriter(uid)
// In case we're deduping an old pointer with an unknown block type.
ptr.DirectType = directType
} else {
ptr = BlockPointer{
ID: id,
KeyGen: kmd.LatestKeyGeneration(),
DataVer: block.DataVersion(),
DirectType: directType,
Context: kbfsblock.MakeFirstContext(uid, bType),
}
}
info = BlockInfo{
BlockPointer: ptr,
EncodedSize: uint32(readyBlockData.GetEncodedSize()),
}
return
}
// fileSyncState holds state for a sync operation for a single
// file.
type fileSyncState struct {
// If fblock is non-nil, the (dirty, indirect, cached) block
// it points to will be set to savedFblock on a recoverable
// error.
fblock, savedFblock *FileBlock
// redirtyOnRecoverableError, which is non-nil only when fblock is
// non-nil, contains pointers that need to be re-dirtied if the
// top block gets copied during the sync, and a recoverable error
// happens. Maps to the old block pointer for the block, which
// would need a DirtyBlockCache.Delete.
redirtyOnRecoverableError map[BlockPointer]BlockPointer
// If si is non-nil, its updated state will be reset on
// error. Also, if the error is recoverable, it will be
// reverted to savedSi.
//
// TODO: Working with si in this way is racy, since si is a
// member of unrefCache.
si, savedSi *syncInfo
// oldFileBlockPtrs is a list of transient entries in the
// block cache for the file, which should be removed when the
// sync finishes.
oldFileBlockPtrs []BlockPointer
// newIndirectFileBlockPtrs is a list of permanent entries
// added to the block cache for the file, which should be
// removed after the blocks have been sent to the server.
// They are not removed on an error, because in that case the
// file is still dirty locally and may get another chance to
// be sync'd.
//
// TODO: This can be a list of IDs instead.
newIndirectFileBlockPtrs []BlockPointer
}
// startSyncWrite contains the portion of StartSync() that's done
// while write-locking blockLock. If there is no dirty de cache
// entry, dirtyDe will be nil.
func (fbo *folderBlockOps) startSyncWrite(ctx context.Context,
lState *lockState, md *RootMetadata, uid keybase1.UID, file path) (
fblock *FileBlock, bps *blockPutState, syncState fileSyncState,
dirtyDe *DirEntry, err error) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
// update the parent directories, and write all the new blocks out
// to disk
fblock, err = fbo.getFileLocked(ctx, lState, md.ReadOnly(), file, blockWrite)
if err != nil {
return nil, nil, syncState, nil, err
}
fileRef := file.tailPointer().Ref()
si, ok := fbo.unrefCache[fileRef]
if !ok {
return nil, nil, syncState, nil,
fmt.Errorf("No syncOp found for file ref %v", fileRef)
}
// Collapse the write range to reduce the size of the sync op.
si.op.Writes = si.op.collapseWriteRange(nil)
// If this function returns a success, we need to make sure the op
// in `md` is not the same variable as the op in `unrefCache`,
// because the latter could get updated still by local writes
// before `md` is flushed to the server. We don't copy it here
// because code below still needs to modify it (and by extension,
// the one stored in `syncState.si`).
si.op.setFinalPath(file)
md.AddOp(si.op)
// Fill in syncState.
if fblock.IsInd {
fblockCopy := fblock.DeepCopy()
syncState.fblock = fblock
syncState.savedFblock = fblockCopy
syncState.redirtyOnRecoverableError = make(map[BlockPointer]BlockPointer)
}
syncState.si = si
syncState.savedSi, err = si.DeepCopy(fbo.config.Codec())
if err != nil {
return nil, nil, syncState, nil, err
}
if si.bps == nil {
si.bps = newBlockPutState(1)
} else {
// reinstate byte accounting from the previous Sync
md.SetRefBytes(si.refBytes)
md.AddDiskUsage(si.refBytes)
md.SetUnrefBytes(si.unrefBytes)
md.SetMDRefBytes(0) // this will be calculated anew
md.SetDiskUsage(md.DiskUsage() - si.unrefBytes)
syncState.newIndirectFileBlockPtrs = append(
syncState.newIndirectFileBlockPtrs, si.op.Refs()...)
}
defer func() {
si.refBytes = md.RefBytes()
si.unrefBytes = md.UnrefBytes()
}()
dirtyBcache := fbo.config.DirtyBlockCache()
df := fbo.getOrCreateDirtyFileLocked(lState, file)
fd := fbo.newFileData(lState, file, uid, md.ReadOnly())
// Note: below we add possibly updated file blocks as "unref" and
// "ref" blocks. This is fine, since conflict resolution or
// notifications will never happen within a file.
// If needed, split the children blocks up along new boundaries
// (e.g., if using a fingerprint-based block splitter).
unrefs, err := fd.split(ctx, fbo.id(), dirtyBcache, fblock, df)
// Preserve any unrefs before checking the error.
for _, unref := range unrefs {
md.AddUnrefBlock(unref)
}
if err != nil {
return nil, nil, syncState, nil, err
}
// Ready all children blocks, if any.
oldPtrs, err := fd.ready(ctx, fbo.id(), fbo.config.BlockCache(),
fbo.config.DirtyBlockCache(), fbo.config.BlockOps(), si.bps, fblock, df)
if err != nil {
return nil, nil, syncState, nil, err
}
for newInfo, oldPtr := range oldPtrs {
syncState.newIndirectFileBlockPtrs = append(
syncState.newIndirectFileBlockPtrs, newInfo.BlockPointer)
df.setBlockOrphaned(oldPtr, true)
// Defer the DirtyBlockCache.Delete until after the new path
// is ready, in case anyone tries to read the dirty file in
// the meantime.
syncState.oldFileBlockPtrs = append(syncState.oldFileBlockPtrs, oldPtr)
md.AddRefBlock(newInfo)
// If this block is replacing a block from a previous, failed
// Sync, we need to take that block out of the refs list, and
// avoid unrefing it as well.
si.removeReplacedBlock(ctx, fbo.log, oldPtr)
err = df.setBlockSyncing(oldPtr)
if err != nil {
return nil, nil, syncState, nil, err
}
syncState.redirtyOnRecoverableError[newInfo.BlockPointer] = oldPtr
}
err = df.setBlockSyncing(file.tailPointer())
if err != nil {
return nil, nil, syncState, nil, err
}
syncState.oldFileBlockPtrs = append(
syncState.oldFileBlockPtrs, file.tailPointer())
// Capture the current de before we release the block lock, so
// other deferred writes don't slip in.
if de, ok := fbo.deCache[fileRef]; ok {
dirtyDe = &(de.dirEntry)
}
// Leave a copy of the syncOp in `unrefCache`, since it may be
// modified by future local writes while the syncOp in `md` should
// only be modified by the rest of this sync process.
var syncOpCopy *syncOp
err = kbfscodec.Update(fbo.config.Codec(), &syncOpCopy, si.op)
if err != nil {
return nil, nil, syncState, nil, err
}
fbo.unrefCache[fileRef].op = syncOpCopy
// If there are any deferred bytes, it must be because this is
// a retried sync and some blocks snuck in between sync. Those
// blocks will get transferred now, but they are also on the
// deferred list and will be retried on the next sync as well.
df.assimilateDeferredNewBytes()
// TODO: Returning si.bps in this way is racy, since si is a
// member of unrefCache.
return fblock, si.bps, syncState, dirtyDe, nil
}
func (fbo *folderBlockOps) makeLocalBcache(ctx context.Context,
lState *lockState, md *RootMetadata, file path, si *syncInfo,
dirtyDe *DirEntry) (lbc localBcache, err error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
parentPath := file.parentPath()
dblock, err := fbo.getDirLocked(
ctx, lState, md.ReadOnly(), *parentPath, blockWrite)
if err != nil {
return nil, err
}
// Add in the cached unref'd blocks.
si.mergeUnrefCache(md)
lbc = make(localBcache)
// Update the file's directory entry to the cached copy.
if dirtyDe != nil {
dirtyDe.EncodedSize = si.oldInfo.EncodedSize
dblock.Children[file.tailName()] = *dirtyDe
lbc[parentPath.tailPointer()] = dblock
}
return lbc, nil
}
// StartSync starts a sync for the given file. It returns the new
// FileBlock which has the readied top-level block which includes all
// writes since the last sync. Must be used with CleanupSyncState()
// and UpdatePointers/FinishSyncLocked() like so:
//
// fblock, bps, lbc, syncState, err :=
// ...fbo.StartSync(ctx, lState, md, uid, file)
// defer func() {
// ...fbo.CleanupSyncState(
// ctx, lState, md, file, ..., syncState, err)
// }()
// if err != nil {
// ...
// }
// ...
//
//
// ... = fbo.UpdatePointers(..., func() error {
// ...fbo.FinishSyncLocked(ctx, lState, file, ..., syncState)
// })
func (fbo *folderBlockOps) StartSync(ctx context.Context,
lState *lockState, md *RootMetadata, uid keybase1.UID, file path) (
fblock *FileBlock, bps *blockPutState, lbc localBcache,
syncState fileSyncState, err error) {
if jServer, err := GetJournalServer(fbo.config); err == nil {
jServer.dirtyOpStart(fbo.id())
}
fblock, bps, syncState, dirtyDe, err := fbo.startSyncWrite(
ctx, lState, md, uid, file)
if err != nil {
return nil, nil, nil, syncState, err
}
lbc, err = fbo.makeLocalBcache(ctx, lState, md, file, syncState.savedSi,
dirtyDe)
if err != nil {
return nil, nil, nil, syncState, err
}
return fblock, bps, lbc, syncState, err
}
// Does any clean-up for a sync of the given file, given an error
// (which may be nil) that happens during or after StartSync() and
// before FinishSync(). blocksToRemove may be nil.
func (fbo *folderBlockOps) CleanupSyncState(
ctx context.Context, lState *lockState, md ReadOnlyRootMetadata,
file path, blocksToRemove []BlockPointer,
result fileSyncState, err error) {
if jServer, err := GetJournalServer(fbo.config); err == nil {
defer jServer.dirtyOpEnd(fbo.id())
}
if err == nil {
return
}
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
// Notify error listeners before we reset the dirty blocks and
// permissions to be granted.
fbo.notifyErrListenersLocked(lState, file.tailPointer(), err)
// If there was an error, we need to back out any changes that
// might have been filled into the sync op, because it could
// get reused again in a later Sync call.
if result.si != nil {
result.si.op.resetUpdateState()
// Save this MD for later, so we can clean up its
// newly-referenced block pointers if necessary.
result.si.toCleanIfUnused = append(result.si.toCleanIfUnused,
mdToCleanIfUnused{md, result.si.bps.DeepCopy()})
}
if isRecoverableBlockError(err) {
if result.si != nil {
fbo.revertSyncInfoAfterRecoverableError(blocksToRemove, result)
}
if result.fblock != nil {
result.fblock.Set(result.savedFblock)
fbo.fixChildBlocksAfterRecoverableErrorLocked(
ctx, lState, file, md,
result.redirtyOnRecoverableError)
}
} else {
// Since the sync has errored out unrecoverably, the deferred
// bytes are already accounted for.
if df := fbo.dirtyFiles[file.tailPointer()]; df != nil {
df.updateNotYetSyncingBytes(-fbo.deferredWaitBytes)
// Some blocks that were dirty are now clean under their
// readied block ID, and now live in the bps rather than
// the dirty bcache, so we can delete them from the dirty
// bcache.
dirtyBcache := fbo.config.DirtyBlockCache()
for _, ptr := range result.oldFileBlockPtrs {
if df.isBlockOrphaned(ptr) {
fbo.log.CDebugf(ctx, "Deleting dirty orphan: %v", ptr)
if err := dirtyBcache.Delete(fbo.id(), ptr,
fbo.branch()); err != nil {
fbo.log.CDebugf(ctx, "Couldn't delete %v", ptr)
}
}
}
}
// On an unrecoverable error, the deferred writes aren't
// needed anymore since they're already part of the
// (still-)dirty blocks.
fbo.deferredDirtyDeletes = nil
fbo.deferredWrites = nil
fbo.deferredWaitBytes = 0
}
// The sync is over, due to an error, so reset the map so that we
// don't defer any subsequent writes.
// Old syncing blocks are now just dirty
if df := fbo.dirtyFiles[file.tailPointer()]; df != nil {
df.resetSyncingBlocksToDirty()
}
}
// cleanUpUnusedBlocks cleans up the blocks from any previous failed
// sync attempts.
func (fbo *folderBlockOps) cleanUpUnusedBlocks(ctx context.Context,
md ReadOnlyRootMetadata, syncState fileSyncState, fbm *folderBlockManager) error {
numToClean := len(syncState.si.toCleanIfUnused)
if numToClean == 0 {
return nil
}
// What blocks are referenced in the successful MD?
refs := make(map[BlockPointer]bool)
for _, op := range md.data.Changes.Ops {
for _, ptr := range op.Refs() {
if ptr == zeroPtr {
panic("Unexpected zero ref ptr in a sync MD revision")
}
refs[ptr] = true
}
for _, update := range op.allUpdates() {
if update.Ref == zeroPtr {
panic("Unexpected zero update ref ptr in a sync MD revision")
}
refs[update.Ref] = true
}
}
// For each MD to clean, clean up the old failed blocks
// immediately if the merge status matches the successful put, if
// they didn't get referenced in the successful put. If the merge
// status is different (e.g., we ended up on a conflict branch),
// clean it up only if the original revision failed. If the same
// block appears more than once, the one with a different merged
// status takes precedence (which will always come earlier in the
// list of MDs).
blocksSeen := make(map[BlockPointer]bool)
for _, oldMD := range syncState.si.toCleanIfUnused {
bdType := blockDeleteAlways
if oldMD.md.MergedStatus() != md.MergedStatus() {
bdType = blockDeleteOnMDFail
}
failedBps := newBlockPutState(len(oldMD.bps.blockStates))
for _, bs := range oldMD.bps.blockStates {
if bs.blockPtr == zeroPtr {
panic("Unexpected zero block ptr in an old sync MD revision")
}
if blocksSeen[bs.blockPtr] {
continue
}
blocksSeen[bs.blockPtr] = true
if refs[bs.blockPtr] && bdType == blockDeleteAlways {
continue
}
failedBps.blockStates = append(failedBps.blockStates,
blockState{blockPtr: bs.blockPtr})
fbo.log.CDebugf(ctx, "Cleaning up block %v from a previous "+
"failed revision %d (oldMD is %s, bdType=%d)", bs.blockPtr,
oldMD.md.Revision(), oldMD.md.MergedStatus(), bdType)
}
if len(failedBps.blockStates) > 0 {
fbm.cleanUpBlockState(oldMD.md, failedBps, bdType)
}
}
return nil
}
func (fbo *folderBlockOps) doDeferredWritesLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, newPath path) (
stillDirty bool, err error) {
fbo.blockLock.AssertLocked(lState)
// Redo any writes or truncates that happened to our file while
// the sync was happening.
deletes := fbo.deferredDirtyDeletes
writes := fbo.deferredWrites
stillDirty = len(fbo.deferredWrites) != 0
fbo.deferredDirtyDeletes = nil
fbo.deferredWrites = nil
fbo.deferredWaitBytes = 0
// Clear any dirty blocks that resulted from a write/truncate
// happening during the sync, since we're redoing them below.
dirtyBcache := fbo.config.DirtyBlockCache()
for _, ptr := range deletes {
fbo.log.CDebugf(ctx, "Deleting deferred dirty ptr %v", ptr)
if err := dirtyBcache.Delete(fbo.id(), ptr, fbo.branch()); err != nil {
return true, err
}
}
for _, f := range writes {
err = f(ctx, lState, kmd, newPath)
if err != nil {
// It's a little weird to return an error from a deferred
// write here. Hopefully that will never happen.
return true, err
}
}
return stillDirty, nil
}
// FinishSyncLocked finishes the sync process for a file, given the
// state from StartSync. Specifically, it re-applies any writes that
// happened since the call to StartSync.
func (fbo *folderBlockOps) FinishSyncLocked(
ctx context.Context, lState *lockState,
oldPath, newPath path, md ReadOnlyRootMetadata,
syncState fileSyncState, fbm *folderBlockManager) (
stillDirty bool, err error) {
fbo.blockLock.AssertLocked(lState)
dirtyBcache := fbo.config.DirtyBlockCache()
for _, ptr := range syncState.oldFileBlockPtrs {
fbo.log.CDebugf(ctx, "Deleting dirty ptr %v", ptr)
if err := dirtyBcache.Delete(fbo.id(), ptr, fbo.branch()); err != nil {
return true, err
}
}
bcache := fbo.config.BlockCache()
for _, ptr := range syncState.newIndirectFileBlockPtrs {
err := bcache.DeletePermanent(ptr.ID)
if err != nil {
fbo.log.CWarningf(ctx, "Error when deleting %v from cache: %v",
ptr.ID, err)
}
}
stillDirty, err = fbo.doDeferredWritesLocked(ctx, lState, md, newPath)
if err != nil {
return true, err
}
// Clear cached info for the old path. We are guaranteed that any
// concurrent write to this file was deferred, even if it was to a
// block that wasn't currently being sync'd, since the top-most
// block is always in dirtyFiles and is always dirtied during a
// write/truncate.
//
// Also, we can get rid of all the sync state that might have
// happened during the sync, since we will replay the writes
// below anyway.
if err := fbo.clearCacheInfoLocked(lState, oldPath); err != nil {
return true, err
}
if err := fbo.cleanUpUnusedBlocks(ctx, md, syncState, fbm); err != nil {
return true, err
}
return stillDirty, nil
}
// notifyErrListeners notifies any write operations that are blocked
// on a file so that they can learn about unrecoverable sync errors.
func (fbo *folderBlockOps) notifyErrListenersLocked(lState *lockState,
ptr BlockPointer, err error) {
fbo.blockLock.AssertLocked(lState)
if isRecoverableBlockError(err) {
// Don't bother any listeners with this error, since the sync
// will be retried. Unless the sync has reached its retry
// limit, but in that case the listeners will just proceed as
// normal once the dirty block cache bytes are freed, and
// that's ok since this error isn't fatal.
return
}
df := fbo.dirtyFiles[ptr]
if df != nil {
df.notifyErrListeners(err)
}
}
type searchWithOutOfDateCacheError struct {
}
func (e searchWithOutOfDateCacheError) Error() string {
return fmt.Sprintf("Search is using an out-of-date node cache; " +
"try again with a clean cache.")
}
// searchForNodesInDirLocked recursively tries to find a path, and
// ultimately a node, to ptr, given the set of pointers that were
// updated in a particular operation. The keys in nodeMap make up the
// set of BlockPointers that are being searched for, and nodeMap is
// updated in place to include the corresponding discovered nodes.
//
// Returns the number of nodes found by this invocation. If the error
// it returns is searchWithOutOfDateCache, the search should be
// retried by the caller with a clean cache.
func (fbo *folderBlockOps) searchForNodesInDirLocked(ctx context.Context,
lState *lockState, cache NodeCache, newPtrs map[BlockPointer]bool,
kmd KeyMetadata, rootNode Node, currDir path, nodeMap map[BlockPointer]Node,
numNodesFoundSoFar int) (int, error) {
fbo.blockLock.AssertAnyLocked(lState)
dirBlock, err := fbo.getDirLocked(
ctx, lState, kmd, currDir, blockRead)
if err != nil {
return 0, err
}
// getDirLocked may have unlocked blockLock, which means the cache
// could have changed out from under us. Verify that didn't
// happen, so we can avoid messing it up with nodes from an old MD
// version. If it did happen, return a special error that lets
// the caller know they should retry with a fresh cache.
if currDir.path[0].BlockPointer !=
cache.PathFromNode(rootNode).tailPointer() {
return 0, searchWithOutOfDateCacheError{}
}
if numNodesFoundSoFar >= len(nodeMap) {
return 0, nil
}
numNodesFound := 0
for name, de := range dirBlock.Children {
if _, ok := nodeMap[de.BlockPointer]; ok {
childPath := currDir.ChildPath(name, de.BlockPointer)
// make a node for every pathnode
n := rootNode
for i, pn := range childPath.path[1:] {
if !pn.BlockPointer.IsValid() {
// Temporary debugging output for KBFS-1764 -- the
// GetOrCreate call below will panic.
fbo.log.CDebugf(ctx, "Invalid block pointer, path=%s, "+
"path.path=%v (index %d), name=%s, de=%#v, "+
"nodeMap=%v, newPtrs=%v, kmd=%#v",
childPath, childPath.path, i, name, de, nodeMap,
newPtrs, kmd)
}
n, err = cache.GetOrCreate(pn.BlockPointer, pn.Name, n)
if err != nil {
return 0, err
}
}
nodeMap[de.BlockPointer] = n
numNodesFound++
if numNodesFoundSoFar+numNodesFound >= len(nodeMap) {
return numNodesFound, nil
}
}
// otherwise, recurse if this represents an updated block
if _, ok := newPtrs[de.BlockPointer]; de.Type == Dir && ok {
childPath := currDir.ChildPath(name, de.BlockPointer)
n, err := fbo.searchForNodesInDirLocked(ctx, lState, cache,
newPtrs, kmd, rootNode, childPath, nodeMap,
numNodesFoundSoFar+numNodesFound)
if err != nil {
return 0, err
}
numNodesFound += n
if numNodesFoundSoFar+numNodesFound >= len(nodeMap) {
return numNodesFound, nil
}
}
}
return numNodesFound, nil
}
func (fbo *folderBlockOps) trySearchWithCacheLocked(ctx context.Context,
lState *lockState, cache NodeCache, ptrs []BlockPointer,
newPtrs map[BlockPointer]bool, kmd KeyMetadata, rootPtr BlockPointer) (
map[BlockPointer]Node, error) {
fbo.blockLock.AssertAnyLocked(lState)
nodeMap := make(map[BlockPointer]Node)
for _, ptr := range ptrs {
nodeMap[ptr] = nil
}
if len(ptrs) == 0 {
return nodeMap, nil
}
var node Node
// The node cache used by the main part of KBFS is
// fbo.nodeCache. This basically maps from BlockPointers to
// Nodes. Nodes are used by the callers of the library, but
// internally we need to know the series of BlockPointers and
// file/dir names that make up the path of the corresponding
// file/dir. fbo.nodeCache is long-lived and never invalidated.
//
// As folderBranchOps gets informed of new local or remote MD
// updates, which change the BlockPointers of some subset of the
// nodes in this TLF, it calls nodeCache.UpdatePointer for each
// change. Then, when a caller passes some old Node they have
// lying around into an FBO call, we can translate it to its
// current path using fbo.nodeCache. Note that on every TLF
// modification, we are guaranteed that the BlockPointer of the
// root directory will change (because of the merkle-ish tree of
// content hashes we use to assign BlockPointers).
//
// fbo.nodeCache needs to maintain the absolute latest mappings
// for the TLF, or else FBO calls won't see up-to-date data. The
// tension in search comes from the fact that we are trying to
// discover the BlockPointers of certain files at a specific point
// in the MD history, which is not necessarily the same as the
// most-recently-seen MD update. Specifically, some callers
// process a specific range of MDs, but folderBranchOps may have
// heard about a newer one before, or during, when the caller
// started processing. That means fbo.nodeCache may have been
// updated to reflect the newest BlockPointers, and is no longer
// correct as a cache for our search for the data at the old point
// in time.
if cache == fbo.nodeCache {
// Root node should already exist if we have an up-to-date md.
node = cache.Get(rootPtr.Ref())
if node == nil {
return nil, searchWithOutOfDateCacheError{}
}
} else {
// Root node may or may not exist.
var err error
node, err = cache.GetOrCreate(rootPtr,
string(kmd.GetTlfHandle().GetCanonicalName()), nil)
if err != nil {
return nil, err
}
}
if node == nil {
return nil, fmt.Errorf("Cannot find root node corresponding to %v",
rootPtr)
}
// are they looking for the root directory?
numNodesFound := 0
if _, ok := nodeMap[rootPtr]; ok {
nodeMap[rootPtr] = node
numNodesFound++
if numNodesFound >= len(nodeMap) {
return nodeMap, nil
}
}
rootPath := cache.PathFromNode(node)
if len(rootPath.path) != 1 {
return nil, fmt.Errorf("Invalid root path for %v: %s",
rootPtr, rootPath)
}
_, err := fbo.searchForNodesInDirLocked(ctx, lState, cache, newPtrs,
kmd, node, rootPath, nodeMap, numNodesFound)
if err != nil {
return nil, err
}
if rootPtr != cache.PathFromNode(node).tailPointer() {
return nil, searchWithOutOfDateCacheError{}
}
return nodeMap, nil
}
func (fbo *folderBlockOps) searchForNodesLocked(ctx context.Context,
lState *lockState, cache NodeCache, ptrs []BlockPointer,
newPtrs map[BlockPointer]bool, kmd KeyMetadata, rootPtr BlockPointer) (
map[BlockPointer]Node, NodeCache, error) {
fbo.blockLock.AssertAnyLocked(lState)
// First try the passed-in cache. If it doesn't work because the
// cache is out of date, try again with a clean cache.
nodeMap, err := fbo.trySearchWithCacheLocked(ctx, lState, cache, ptrs,
newPtrs, kmd, rootPtr)
if _, ok := err.(searchWithOutOfDateCacheError); ok {
// The md is out-of-date, so use a throwaway cache so we
// don't pollute the real node cache with stale nodes.
fbo.log.CDebugf(ctx, "Root node %v doesn't exist in the node "+
"cache; using a throwaway node cache instead",
rootPtr)
cache = newNodeCacheStandard(fbo.folderBranch)
nodeMap, err = fbo.trySearchWithCacheLocked(ctx, lState, cache, ptrs,
newPtrs, kmd, rootPtr)
}
if err != nil {
return nil, nil, err
}
// Return the whole map even if some nodes weren't found.
return nodeMap, cache, nil
}
// SearchForNodes tries to resolve all the given pointers to a Node
// object, using only the updated pointers specified in newPtrs.
// Returns an error if any subset of the pointer paths do not exist;
// it is the caller's responsibility to decide to error on particular
// unresolved nodes. It also returns the cache that ultimately
// contains the nodes -- this might differ from the passed-in cache if
// another goroutine updated that cache and it no longer contains the
// root pointer specified in md.
func (fbo *folderBlockOps) SearchForNodes(ctx context.Context,
cache NodeCache, ptrs []BlockPointer, newPtrs map[BlockPointer]bool,
kmd KeyMetadata, rootPtr BlockPointer) (
map[BlockPointer]Node, NodeCache, error) {
lState := makeFBOLockState()
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.searchForNodesLocked(
ctx, lState, cache, ptrs, newPtrs, kmd, rootPtr)
}
// SearchForPaths is like SearchForNodes, except it returns a
// consistent view of all the paths of the searched-for pointers.
func (fbo *folderBlockOps) SearchForPaths(ctx context.Context,
cache NodeCache, ptrs []BlockPointer, newPtrs map[BlockPointer]bool,
kmd KeyMetadata, rootPtr BlockPointer) (map[BlockPointer]path, error) {
lState := makeFBOLockState()
// Hold the lock while processing the paths so they can't be changed.
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
nodeMap, cache, err :=
fbo.searchForNodesLocked(
ctx, lState, cache, ptrs, newPtrs, kmd, rootPtr)
if err != nil {
return nil, err
}
paths := make(map[BlockPointer]path)
for ptr, n := range nodeMap {
if n == nil {
paths[ptr] = path{}
continue
}
p := cache.PathFromNode(n)
if p.tailPointer() != ptr {
return nil, NodeNotFoundError{ptr}
}
paths[ptr] = p
}
return paths, nil
}
// getUndirtiedEntry returns the clean entry for the given path
// corresponding to a cached dirty entry. If there is no dirty or
// clean entry, nil is returned.
func (fbo *folderBlockOps) getUndirtiedEntry(
ctx context.Context, lState *lockState, kmd KeyMetadata,
file path) (*DirEntry, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
_, ok := fbo.deCache[file.tailPointer().Ref()]
if !ok {
return nil, nil
}
// Get the undirtied dir block.
dblock, err := fbo.getDirLocked(
ctx, lState, kmd, *file.parentPath(), blockRead)
if err != nil {
return nil, err
}
undirtiedEntry, ok := dblock.Children[file.tailName()]
if !ok {
return nil, nil
}
return &undirtiedEntry, nil
}
func (fbo *folderBlockOps) setCachedAttr(
lState *lockState, ref BlockRef, attr attrChange, realEntry *DirEntry,
doCreate bool) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
fbo.setCachedAttrLocked(lState, ref, attr, realEntry, doCreate)
}
// UpdateCachedEntryAttributes updates any cached entry for the given
// path according to the given op. The node for the path is returned
// if there is one.
func (fbo *folderBlockOps) UpdateCachedEntryAttributes(
ctx context.Context, lState *lockState, kmd KeyMetadata,
dir path, op *setAttrOp) (Node, error) {
childPath := dir.ChildPathNoPtr(op.Name)
// find the node for the actual change; requires looking up
// the child entry to get the BlockPointer, unfortunately.
de, err := fbo.GetDirtyEntry(ctx, lState, kmd, childPath)
if err != nil {
return nil, err
}
childNode := fbo.nodeCache.Get(de.Ref())
if childNode == nil {
// Nothing to do, since the cache entry won't be
// accessible from any node.
return nil, nil
}
childPath = dir.ChildPath(op.Name, de.BlockPointer)
// If there's a cache entry, we need to update it, so try and
// fetch the undirtied entry.
cleanEntry, err := fbo.getUndirtiedEntry(ctx, lState, kmd, childPath)
if err != nil {
return nil, err
}
if cleanEntry != nil {
fbo.setCachedAttr(lState, de.Ref(), op.Attr, cleanEntry, false)
}
return childNode, nil
}
// UpdateCachedEntryAttributesOnRemovedFile updates any cached entry
// for the given path of an unlinked file, according to the given op,
// and it makes a new dirty cache entry if one doesn't exist yet. We
// assume Sync will be called eventually on the corresponding open
// file handle, which will clear out the entry.
func (fbo *folderBlockOps) UpdateCachedEntryAttributesOnRemovedFile(
ctx context.Context, lState *lockState, op *setAttrOp, de DirEntry) {
fbo.setCachedAttr(lState, de.Ref(), op.Attr, &de, true)
}
func (fbo *folderBlockOps) getDeferredWriteCountForTest(lState *lockState) int {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return len(fbo.deferredWrites)
}
func (fbo *folderBlockOps) updatePointer(kmd KeyMetadata, oldPtr BlockPointer, newPtr BlockPointer, shouldPrefetch bool) {
updated := fbo.nodeCache.UpdatePointer(oldPtr.Ref(), newPtr)
if !updated {
return
}
// Only prefetch if the updated pointer is a new block ID.
if oldPtr.ID != newPtr.ID {
// TODO: Remove this comment when we're done debugging because it'll be everywhere.
fbo.log.CDebugf(context.TODO(), "Updated reference for pointer %s to %s.", oldPtr.ID, newPtr.ID)
if shouldPrefetch {
// Prefetch the new ref, but only if the old ref already exists in
// the block cache. Ideally we'd always prefetch it, but we need
// the type of the block so that we can call `NewEmpty`.
// TODO KBFS-1850: Eventually we should use the codec library's
// ability to decode into a nil interface to no longer need to
// pre-initialize the correct type.
block, _, _, err := fbo.config.BlockCache().GetWithPrefetch(oldPtr)
if err != nil {
return
}
fbo.config.BlockOps().Prefetcher().PrefetchBlock(
block.NewEmpty(),
newPtr,
kmd,
updatePointerPrefetchPriority,
)
}
}
}
// UpdatePointers updates all the pointers in the node cache
// atomically. If `afterUpdateFn` is non-nil, it's called under the
// same block lock under which the pointers were updated.
func (fbo *folderBlockOps) UpdatePointers(kmd KeyMetadata, lState *lockState,
op op, shouldPrefetch bool, afterUpdateFn func() error) error {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
for _, update := range op.allUpdates() {
fbo.updatePointer(kmd, update.Unref, update.Ref, shouldPrefetch)
}
if afterUpdateFn == nil {
return nil
}
return afterUpdateFn()
}
func (fbo *folderBlockOps) unlinkDuringFastForwardLocked(ctx context.Context,
lState *lockState, ref BlockRef) {
fbo.blockLock.AssertLocked(lState)
oldNode := fbo.nodeCache.Get(ref)
if oldNode == nil {
return
}
oldPath := fbo.nodeCache.PathFromNode(oldNode)
fbo.log.CDebugf(ctx, "Unlinking missing node %s/%v during "+
"fast-forward", oldPath, ref)
fbo.nodeCache.Unlink(ref, oldPath)
}
func (fbo *folderBlockOps) fastForwardDirAndChildrenLocked(ctx context.Context,
lState *lockState, currDir path, children map[string]map[pathNode]bool,
kmd KeyMetadata) ([]NodeChange, error) {
fbo.blockLock.AssertLocked(lState)
dirBlock, err := fbo.getDirLocked(ctx, lState, kmd, currDir, blockRead)
if err != nil {
return nil, err
}
prefix := currDir.String()
// TODO: parallelize me?
var changes []NodeChange
for child := range children[prefix] {
entry, ok := dirBlock.Children[child.Name]
if !ok {
fbo.unlinkDuringFastForwardLocked(
ctx, lState, child.BlockPointer.Ref())
continue
}
fbo.log.CDebugf(ctx, "Fast-forwarding %v -> %v",
child.BlockPointer, entry.BlockPointer)
fbo.updatePointer(kmd, child.BlockPointer,
entry.BlockPointer, true)
node := fbo.nodeCache.Get(entry.BlockPointer.Ref())
newPath := fbo.nodeCache.PathFromNode(node)
if entry.Type == Dir {
if node != nil {
change := NodeChange{Node: node}
for subchild := range children[newPath.String()] {
change.DirUpdated = append(change.DirUpdated, subchild.Name)
}
changes = append(changes, change)
}
childChanges, err := fbo.fastForwardDirAndChildrenLocked(
ctx, lState, newPath, children, kmd)
if err != nil {
return nil, err
}
changes = append(changes, childChanges...)
} else if node != nil {
// File -- invalidate the entire file contents.
changes = append(changes, NodeChange{
Node: node,
FileUpdated: []WriteRange{{Len: 0, Off: 0}},
})
}
}
delete(children, prefix)
return changes, nil
}
// FastForwardAllNodes attempts to update the block pointers
// associated with nodes in the cache by searching for their paths in
// the current version of the TLF. If it can't find a corresponding
// node, it assumes it's been deleted and unlinks it. Returns the set
// of node changes that resulted. If there are no nodes, it returns a
// nil error because there's nothing to be done.
func (fbo *folderBlockOps) FastForwardAllNodes(ctx context.Context,
lState *lockState, md ReadOnlyRootMetadata) (
changes []NodeChange, err error) {
// Take a hard lock through this whole process. TODO: is there
// any way to relax this? It could lead to file system operation
// timeouts, even on reads, if we hold it too long.
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
nodes := fbo.nodeCache.AllNodes()
if len(nodes) == 0 {
// Nothing needs to be done!
return nil, nil
}
fbo.log.CDebugf(ctx, "Fast-forwarding %d nodes", len(nodes))
defer func() { fbo.log.CDebugf(ctx, "Fast-forward complete: %v", err) }()
// Build a "tree" representation for each interesting path prefix.
children := make(map[string]map[pathNode]bool)
var rootPath path
for _, n := range nodes {
p := fbo.nodeCache.PathFromNode(n)
if len(p.path) == 1 {
rootPath = p
}
prevPath := ""
for _, pn := range p.path {
if prevPath != "" {
childPNs := children[prevPath]
if childPNs == nil {
childPNs = make(map[pathNode]bool)
children[prevPath] = childPNs
}
childPNs[pn] = true
}
prevPath = filepath.Join(prevPath, pn.Name)
}
}
if !rootPath.isValid() {
return nil, errors.New("Couldn't find the root path")
}
fbo.log.CDebugf(ctx, "Fast-forwarding root %v -> %v",
rootPath.path[0].BlockPointer, md.data.Dir.BlockPointer)
fbo.updatePointer(md, rootPath.path[0].BlockPointer,
md.data.Dir.BlockPointer, false)
rootPath.path[0].BlockPointer = md.data.Dir.BlockPointer
rootNode := fbo.nodeCache.Get(md.data.Dir.BlockPointer.Ref())
if rootNode != nil {
change := NodeChange{Node: rootNode}
for child := range children[rootPath.String()] {
change.DirUpdated = append(change.DirUpdated, child.Name)
}
changes = append(changes, change)
}
childChanges, err := fbo.fastForwardDirAndChildrenLocked(
ctx, lState, rootPath, children, md)
if err != nil {
return nil, err
}
changes = append(changes, childChanges...)
// Unlink any children that remain.
for _, childPNs := range children {
for child := range childPNs {
fbo.unlinkDuringFastForwardLocked(
ctx, lState, child.BlockPointer.Ref())
}
}
return changes, nil
}
type chainsPathPopulator interface {
populateChainPaths(context.Context, logger.Logger, *crChains, bool) error
}
// populateChainPaths updates all the paths in all the ops tracked by
// `chains`, using the main nodeCache.
func (fbo *folderBlockOps) populateChainPaths(ctx context.Context,
log logger.Logger, chains *crChains, includeCreates bool) error {
_, err := chains.getPaths(ctx, fbo, log, fbo.nodeCache, includeCreates)
return err
}
var _ chainsPathPopulator = (*folderBlockOps)(nil)
folder_block_ops: comments for dir cache functions, and cleanup
Issue: KBFS-2071
// Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"errors"
"fmt"
"path/filepath"
"time"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/tlf"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
)
type overallBlockState int
const (
// cleanState: no outstanding local writes.
cleanState overallBlockState = iota
// dirtyState: there are outstanding local writes that haven't yet been
// synced.
dirtyState
)
// blockReqType indicates whether an operation makes block
// modifications or not
type blockReqType int
const (
// A block read request.
blockRead blockReqType = iota
// A block write request.
blockWrite
// A block read request that is happening from a different
// goroutine than the blockLock rlock holder, using the same lState.
blockReadParallel
// We are looking up a block for the purposes of creating a new
// node in the node cache for it; avoid any unlocks as part of the
// lookup process.
blockLookup
)
type mdToCleanIfUnused struct {
md ReadOnlyRootMetadata
bps *blockPutState
}
type syncInfo struct {
oldInfo BlockInfo
op *syncOp
unrefs []BlockInfo
bps *blockPutState
refBytes uint64
unrefBytes uint64
toCleanIfUnused []mdToCleanIfUnused
}
func (si *syncInfo) DeepCopy(codec kbfscodec.Codec) (*syncInfo, error) {
newSi := &syncInfo{
oldInfo: si.oldInfo,
refBytes: si.refBytes,
unrefBytes: si.unrefBytes,
}
newSi.unrefs = make([]BlockInfo, len(si.unrefs))
copy(newSi.unrefs, si.unrefs)
if si.bps != nil {
newSi.bps = si.bps.DeepCopy()
}
if si.op != nil {
err := kbfscodec.Update(codec, &newSi.op, si.op)
if err != nil {
return nil, err
}
}
newSi.toCleanIfUnused = make([]mdToCleanIfUnused, len(si.toCleanIfUnused))
for i, toClean := range si.toCleanIfUnused {
// It might be overkill to deep-copy these MDs and bpses,
// which are probably immutable, but for now let's do the safe
// thing.
copyMd, err := toClean.md.deepCopy(codec)
if err != nil {
return nil, err
}
newSi.toCleanIfUnused[i].md = copyMd.ReadOnly()
newSi.toCleanIfUnused[i].bps = toClean.bps.DeepCopy()
}
return newSi, nil
}
func (si *syncInfo) removeReplacedBlock(ctx context.Context,
log logger.Logger, ptr BlockPointer) {
for i, ref := range si.op.RefBlocks {
if ref == ptr {
log.CDebugf(ctx, "Replacing old ref %v", ptr)
si.op.RefBlocks = append(si.op.RefBlocks[:i],
si.op.RefBlocks[i+1:]...)
for j, unref := range si.unrefs {
if unref.BlockPointer == ptr {
// Don't completely remove the unref,
// since it contains size info that we
// need to incorporate into the MD
// usage calculations.
si.unrefs[j].BlockPointer = zeroPtr
}
}
break
}
}
}
func (si *syncInfo) mergeUnrefCache(md *RootMetadata) {
for _, info := range si.unrefs {
// it's ok if we push the same ptr.ID/RefNonce multiple times,
// because the subsequent ones should have a QuotaSize of 0.
md.AddUnrefBlock(info)
}
}
type deCacheEntry struct {
// dirEntry is the dirty directory entry corresponding to the
// BlockPointer that maps to this struct.
dirEntry DirEntry
// adds is a map of the pointers for new entry names that have
// been added to the DirBlock for the BlockPointer that maps to
// this struct.
adds map[string]BlockPointer
// dels is a set of the name that have been removed from the
// DirBlock for the BlockPointer that maps to this struct.
dels map[string]bool
}
// folderBlockOps contains all the fields that must be synchronized by
// blockLock. It will eventually also contain all the methods that
// must be synchronized by blockLock, so that folderBranchOps will
// have no knowledge of blockLock.
//
// -- And now, a primer on tracking dirty bytes --
//
// The DirtyBlockCache tracks the number of bytes that are dirtied
// system-wide, as the number of bytes that haven't yet been synced
// ("unsynced"), and a number of bytes that haven't yet been resolved
// yet because the overall file Sync hasn't finished yet ("total").
// This data helps us decide when we need to block incoming Writes, in
// order to keep memory usage from exploding.
//
// It's the responsibility of folderBlockOps (and its helper struct
// dirtyFile) to update these totals in DirtyBlockCache for the
// individual files within this TLF. This is complicated by a few things:
// * New writes to a file are "deferred" while a Sync is happening, and
// are replayed after the Sync finishes.
// * Syncs can be canceled or error out halfway through syncing the blocks,
// leaving the file in a dirty state until the next Sync.
// * Syncs can fail with a /recoverable/ error, in which case they get
// retried automatically by folderBranchOps. In that case, the retried
// Sync also sucks in any outstanding deferred writes.
//
// With all that in mind, here is the rough breakdown of how this
// bytes-tracking is implemented:
// * On a Write/Truncate to a block, folderBranchOps counts all the
// newly-dirtied bytes in a file as "unsynced". That is, if the block was
// already in the dirty cache (and not already being synced), only
// extensions to the block count as "unsynced" bytes.
// * When a Sync starts, dirtyFile remembers the total of bytes being synced,
// and the size of each block being synced.
// * When each block put finishes successfully, dirtyFile subtracts the size
// of that block from "unsynced".
// * When a Sync finishes successfully, the total sum of bytes in that sync
// are subtracted from the "total" dirty bytes outstanding.
// * If a Sync fails, but some blocks were put successfully, those blocks
// are "re-dirtied", which means they count as unsynced bytes again.
// dirtyFile handles this.
// * When a Write/Truncate is deferred due to an ongoing Sync, its bytes
// still count towards the "unsynced" total. In fact, this essentially
// creates a new copy of those blocks, and the whole size of that block
// (not just the newly-dirtied bytes) count for the total. However,
// when the write gets replayed, folderBlockOps first subtracts those bytes
// from the system-wide numbers, since they are about to be replayed.
// * When a Sync is retried after a recoverable failure, dirtyFile adds
// the newly-dirtied deferred bytes to the system-wide numbers, since they
// are now being assimilated into this Sync.
// * dirtyFile also exposes a concept of "orphaned" blocks. These are child
// blocks being synced that are now referenced via a new, permanent block
// ID from the parent indirect block. This matters for when hard failures
// occur during a Sync -- the blocks will no longer be accessible under
// their previous old pointers, and so dirtyFile needs to know their old
// bytes can be cleaned up now.
type folderBlockOps struct {
config Config
log logger.Logger
folderBranch FolderBranch
observers *observerList
// forceSyncChan can be sent on to trigger an immediate
// Sync(). It is a blocking channel.
forceSyncChan chan<- struct{}
// protects access to blocks in this folder and all fields
// below.
blockLock blockLock
// Which files are currently dirty and have dirty blocks that are either
// currently syncing, or waiting to be sync'd.
dirtyFiles map[BlockPointer]*dirtyFile
// For writes and truncates, track the unsynced to-be-unref'd
// block infos, per-path.
unrefCache map[BlockRef]*syncInfo
// For writes and truncates, track the modified (but not yet
// committed) directory entries. Maps the entry BlockRef to a
// modified entry.
deCache map[BlockRef]deCacheEntry
// Writes and truncates for blocks that were being sync'd, and
// need to be replayed after the sync finishes on top of the new
// versions of the blocks.
deferredWrites []func(context.Context, *lockState, KeyMetadata, path) error
// Blocks that need to be deleted from the dirty cache before any
// deferred writes are replayed.
deferredDirtyDeletes []BlockPointer
deferredWaitBytes int64
// set to true if this write or truncate should be deferred
doDeferWrite bool
// nodeCache itself is goroutine-safe, but write/truncate must
// call PathFromNode() only under blockLock (see nodeCache
// comments in folder_branch_ops.go).
nodeCache NodeCache
}
// Only exported methods of folderBlockOps should be used outside of this
// file.
//
// Although, temporarily, folderBranchOps is allowed to reach in and
// manipulate folderBlockOps fields and methods directly.
func (fbo *folderBlockOps) id() tlf.ID {
return fbo.folderBranch.Tlf
}
func (fbo *folderBlockOps) branch() BranchName {
return fbo.folderBranch.Branch
}
// GetState returns the overall block state of this TLF.
func (fbo *folderBlockOps) GetState(lState *lockState) overallBlockState {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
if len(fbo.deCache) == 0 {
return cleanState
}
return dirtyState
}
// getCleanEncodedBlockHelperLocked retrieves the encoded size of the
// clean block pointed to by ptr, which must be valid, either from the
// cache or from the server. If `rtype` is `blockReadParallel`, it's
// assumed that some coordinating goroutine is holding the correct
// locks, and in that case `lState` must be `nil`.
func (fbo *folderBlockOps) getCleanEncodedBlockSizeLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer, branch BranchName,
rtype blockReqType) (uint32, error) {
if rtype != blockReadParallel {
if rtype == blockWrite {
panic("Cannot get the size of a block for writing")
}
fbo.blockLock.AssertAnyLocked(lState)
} else if lState != nil {
panic("Non-nil lState passed to getCleanEncodedBlockSizeLocked " +
"with blockReadParallel")
}
if !ptr.IsValid() {
return 0, InvalidBlockRefError{ptr.Ref()}
}
if block, err := fbo.config.BlockCache().Get(ptr); err == nil {
return block.GetEncodedSize(), nil
}
if err := checkDataVersion(fbo.config, path{}, ptr); err != nil {
return 0, err
}
// Unlock the blockLock while we wait for the network, only if
// it's locked for reading by a single goroutine. If it's locked
// for writing, that indicates we are performing an atomic write
// operation, and we need to ensure that nothing else comes in and
// modifies the blocks, so don't unlock.
//
// If there may be multiple goroutines fetching blocks under the
// same lState, we can't safely unlock since some of the other
// goroutines may be operating on the data assuming they have the
// lock.
bops := fbo.config.BlockOps()
var size uint32
var err error
if rtype != blockReadParallel && rtype != blockLookup {
fbo.blockLock.DoRUnlockedIfPossible(lState, func(*lockState) {
size, err = bops.GetEncodedSize(ctx, kmd, ptr)
})
} else {
size, err = bops.GetEncodedSize(ctx, kmd, ptr)
}
if err != nil {
return 0, err
}
return size, nil
}
// getBlockHelperLocked retrieves the block pointed to by ptr, which
// must be valid, either from the cache or from the server. If
// notifyPath is valid and the block isn't cached, trigger a read
// notification. If `rtype` is `blockReadParallel`, it's assumed that
// some coordinating goroutine is holding the correct locks, and
// in that case `lState` must be `nil`.
//
// This must be called only by get{File,Dir}BlockHelperLocked().
func (fbo *folderBlockOps) getBlockHelperLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer, branch BranchName,
newBlock makeNewBlock, lifetime BlockCacheLifetime, notifyPath path,
rtype blockReqType) (Block, error) {
if rtype != blockReadParallel {
fbo.blockLock.AssertAnyLocked(lState)
} else if lState != nil {
panic("Non-nil lState passed to getBlockHelperLocked " +
"with blockReadParallel")
}
if !ptr.IsValid() {
return nil, InvalidBlockRefError{ptr.Ref()}
}
if block, err := fbo.config.DirtyBlockCache().Get(
fbo.id(), ptr, branch); err == nil {
return block, nil
}
if block, hasPrefetched, lifetime, err :=
fbo.config.BlockCache().GetWithPrefetch(ptr); err == nil {
// If the block was cached in the past, we need to handle it as if it's
// an on-demand request so that its downstream prefetches are triggered
// correctly according to the new on-demand fetch priority.
fbo.config.BlockOps().BlockRetriever().CacheAndPrefetch(ctx,
ptr, block, kmd, defaultOnDemandRequestPriority, lifetime,
hasPrefetched)
return block, nil
}
if err := checkDataVersion(fbo.config, notifyPath, ptr); err != nil {
return nil, err
}
if notifyPath.isValidForNotification() {
fbo.config.Reporter().Notify(ctx, readNotification(notifyPath, false))
defer fbo.config.Reporter().Notify(ctx,
readNotification(notifyPath, true))
}
// Unlock the blockLock while we wait for the network, only if
// it's locked for reading by a single goroutine. If it's locked
// for writing, that indicates we are performing an atomic write
// operation, and we need to ensure that nothing else comes in and
// modifies the blocks, so don't unlock.
//
// If there may be multiple goroutines fetching blocks under the
// same lState, we can't safely unlock since some of the other
// goroutines may be operating on the data assuming they have the
// lock.
// fetch the block, and add to cache
block := newBlock()
bops := fbo.config.BlockOps()
var err error
if rtype != blockReadParallel && rtype != blockLookup {
fbo.blockLock.DoRUnlockedIfPossible(lState, func(*lockState) {
err = bops.Get(ctx, kmd, ptr, block, lifetime)
})
} else {
err = bops.Get(ctx, kmd, ptr, block, lifetime)
}
if err != nil {
return nil, err
}
return block, nil
}
// getFileBlockHelperLocked retrieves the block pointed to by ptr,
// which must be valid, either from an internal cache, the block
// cache, or from the server. An error is returned if the retrieved
// block is not a file block. If `rtype` is `blockReadParallel`, it's
// assumed that some coordinating goroutine is holding the correct
// locks, and in that case `lState` must be `nil`.
//
// This must be called only by GetFileBlockForReading(),
// getFileBlockLocked(), and getFileLocked().
//
// p is used only when reporting errors and sending read
// notifications, and can be empty.
func (fbo *folderBlockOps) getFileBlockHelperLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer,
branch BranchName, p path, rtype blockReqType) (
*FileBlock, error) {
if rtype != blockReadParallel {
fbo.blockLock.AssertAnyLocked(lState)
} else if lState != nil {
panic("Non-nil lState passed to getFileBlockHelperLocked " +
"with blockReadParallel")
}
block, err := fbo.getBlockHelperLocked(
ctx, lState, kmd, ptr, branch, NewFileBlock, TransientEntry, p, rtype)
if err != nil {
return nil, err
}
fblock, ok := block.(*FileBlock)
if !ok {
return nil, NotFileBlockError{ptr, branch, p}
}
return fblock, nil
}
// GetBlockForReading retrieves the block pointed to by ptr, which
// must be valid, either from the cache or from the server. The
// returned block may have a generic type (not DirBlock or FileBlock).
//
// This should be called for "internal" operations, like conflict
// resolution and state checking, which don't know what kind of block
// the pointer refers to. The block will not be cached, if it wasn't
// in the cache already.
func (fbo *folderBlockOps) GetBlockForReading(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer, branch BranchName) (
Block, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.getBlockHelperLocked(ctx, lState, kmd, ptr, branch,
NewCommonBlock, NoCacheEntry, path{}, blockRead)
}
// GetCleanEncodedBlocksSizeSum retrieves the sum of the encoded sizes
// of the blocks pointed to by ptrs, all of which must be valid,
// either from the cache or from the server.
//
// The caller can specify a set of pointers using
// `ignoreRecoverableForRemovalErrors` for which "recoverable" fetch
// errors are tolerated. In that case, the returned sum will not
// include the size for any pointers in the
// `ignoreRecoverableForRemovalErrors` set that hit such an error.
//
// This should be called for "internal" operations, like conflict
// resolution and state checking, which don't know what kind of block
// the pointers refer to. Any downloaded blocks will not be cached,
// if they weren't in the cache already.
func (fbo *folderBlockOps) GetCleanEncodedBlocksSizeSum(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptrs []BlockPointer,
ignoreRecoverableForRemovalErrors map[BlockPointer]bool,
branch BranchName) (uint64, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
sumCh := make(chan uint32, len(ptrs))
eg, groupCtx := errgroup.WithContext(ctx)
for _, ptr := range ptrs {
ptr := ptr // capture range variable
eg.Go(func() error {
size, err := fbo.getCleanEncodedBlockSizeLocked(groupCtx, nil,
kmd, ptr, branch, blockReadParallel)
// TODO: we might be able to recover the size of the
// top-most block of a removed file using the merged
// directory entry, the same way we do in
// `folderBranchOps.unrefEntry`.
if isRecoverableBlockErrorForRemoval(err) &&
ignoreRecoverableForRemovalErrors[ptr] {
fbo.log.CDebugf(groupCtx, "Hit an ignorable, recoverable "+
"error for block %v: %v", ptr, err)
return nil
}
if err != nil {
return err
}
sumCh <- size
return nil
})
}
if err := eg.Wait(); err != nil {
return 0, err
}
close(sumCh)
var sum uint64
for size := range sumCh {
sum += uint64(size)
}
return sum, nil
}
// getDirBlockHelperLocked retrieves the block pointed to by ptr, which
// must be valid, either from the cache or from the server. An error
// is returned if the retrieved block is not a dir block.
//
// This must be called only by GetDirBlockForReading() and
// getDirLocked().
//
// p is used only when reporting errors, and can be empty.
func (fbo *folderBlockOps) getDirBlockHelperLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer,
branch BranchName, p path, rtype blockReqType) (*DirBlock, error) {
if rtype != blockReadParallel {
fbo.blockLock.AssertAnyLocked(lState)
}
// Pass in an empty notify path because notifications should only
// trigger for file reads.
block, err := fbo.getBlockHelperLocked(
ctx, lState, kmd, ptr, branch, NewDirBlock, TransientEntry, path{}, rtype)
if err != nil {
return nil, err
}
dblock, ok := block.(*DirBlock)
if !ok {
return nil, NotDirBlockError{ptr, branch, p}
}
return dblock, nil
}
// GetFileBlockForReading retrieves the block pointed to by ptr, which
// must be valid, either from the cache or from the server. An error
// is returned if the retrieved block is not a file block.
//
// This should be called for "internal" operations, like conflict
// resolution and state checking. "Real" operations should use
// getFileBlockLocked() and getFileLocked() instead.
//
// p is used only when reporting errors, and can be empty.
func (fbo *folderBlockOps) GetFileBlockForReading(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer,
branch BranchName, p path) (*FileBlock, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.getFileBlockHelperLocked(
ctx, lState, kmd, ptr, branch, p, blockRead)
}
// GetDirBlockForReading retrieves the block pointed to by ptr, which
// must be valid, either from the cache or from the server. An error
// is returned if the retrieved block is not a dir block.
//
// This should be called for "internal" operations, like conflict
// resolution and state checking. "Real" operations should use
// getDirLocked() instead.
//
// p is used only when reporting errors, and can be empty.
func (fbo *folderBlockOps) GetDirBlockForReading(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer,
branch BranchName, p path) (*DirBlock, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.getDirBlockHelperLocked(
ctx, lState, kmd, ptr, branch, p, blockRead)
}
// getFileBlockLocked retrieves the block pointed to by ptr, which
// must be valid, either from the cache or from the server. An error
// is returned if the retrieved block is not a file block.
//
// The given path must be valid, and the given pointer must be its
// tail pointer or an indirect pointer from it. A read notification is
// triggered for the given path only if the block isn't in the cache.
//
// This shouldn't be called for "internal" operations, like conflict
// resolution and state checking -- use GetFileBlockForReading() for
// those instead.
//
// When rtype == blockWrite and the cached version of the block is
// currently clean, or the block is currently being synced, this
// method makes a copy of the file block and returns it. If this
// method might be called again for the same block within a single
// operation, it is the caller's responsibility to write that block
// back to the cache as dirty.
//
// Note that blockLock must be locked exactly when rtype ==
// blockWrite, and must be r-locked when rtype == blockRead. (This
// differs from getDirLocked.) This is because a write operation
// (like write, truncate and sync which lock blockLock) fetching a
// file block will almost always need to modify that block, and so
// will pass in blockWrite. If rtype == blockReadParallel, it's
// assumed that some coordinating goroutine is holding the correct
// locks, and in that case `lState` must be `nil`.
//
// file is used only when reporting errors and sending read
// notifications, and can be empty except that file.Branch must be set
// correctly.
//
// This method also returns whether the block was already dirty.
func (fbo *folderBlockOps) getFileBlockLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer,
file path, rtype blockReqType) (
fblock *FileBlock, wasDirty bool, err error) {
switch rtype {
case blockRead:
fbo.blockLock.AssertRLocked(lState)
case blockWrite:
fbo.blockLock.AssertLocked(lState)
case blockReadParallel:
// This goroutine might not be the official lock holder, so
// don't make any assertions.
if lState != nil {
panic("Non-nil lState passed to getFileBlockLocked " +
"with blockReadParallel")
}
case blockLookup:
panic("blockLookup should only be used for directory blocks")
default:
panic(fmt.Sprintf("Unknown block req type: %d", rtype))
}
fblock, err = fbo.getFileBlockHelperLocked(
ctx, lState, kmd, ptr, file.Branch, file, rtype)
if err != nil {
return nil, false, err
}
wasDirty = fbo.config.DirtyBlockCache().IsDirty(fbo.id(), ptr, file.Branch)
if rtype == blockWrite {
// Copy the block if it's for writing, and either the
// block is not yet dirty or the block is currently
// being sync'd and needs a copy even though it's
// already dirty.
df := fbo.dirtyFiles[file.tailPointer()]
if !wasDirty || (df != nil && df.blockNeedsCopy(ptr)) {
fblock = fblock.DeepCopy()
}
}
return fblock, wasDirty, nil
}
// getFileLocked is getFileBlockLocked called with file.tailPointer().
func (fbo *folderBlockOps) getFileLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, file path,
rtype blockReqType) (*FileBlock, error) {
// Callers should have already done this check, but it doesn't
// hurt to do it again.
if !file.isValid() {
return nil, InvalidPathError{file}
}
fblock, _, err := fbo.getFileBlockLocked(
ctx, lState, kmd, file.tailPointer(), file, rtype)
return fblock, err
}
// GetIndirectFileBlockInfos returns a list of BlockInfos for all
// indirect blocks of the given file. If the returned error is a
// recoverable one (as determined by
// isRecoverableBlockErrorForRemoval), the returned list may still be
// non-empty, and holds all the BlockInfos for all found indirect
// blocks.
func (fbo *folderBlockOps) GetIndirectFileBlockInfos(ctx context.Context,
lState *lockState, kmd KeyMetadata, file path) ([]BlockInfo, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
var uid keybase1.UID // Data reads don't depend on the uid.
fd := fbo.newFileData(lState, file, uid, kmd)
return fd.getIndirectFileBlockInfos(ctx)
}
// GetIndirectFileBlockInfosWithTopBlock returns a list of BlockInfos
// for all indirect blocks of the given file, starting from the given
// top-most block. If the returned error is a recoverable one (as
// determined by isRecoverableBlockErrorForRemoval), the returned list
// may still be non-empty, and holds all the BlockInfos for all found
// indirect blocks. (This will be relevant when we handle multiple
// levels of indirection.)
func (fbo *folderBlockOps) GetIndirectFileBlockInfosWithTopBlock(
ctx context.Context, lState *lockState, kmd KeyMetadata, file path,
topBlock *FileBlock) (
[]BlockInfo, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
var uid keybase1.UID // Data reads don't depend on the uid.
fd := fbo.newFileData(lState, file, uid, kmd)
return fd.getIndirectFileBlockInfosWithTopBlock(ctx, topBlock)
}
// DeepCopyFile makes a complete copy of the given file, deduping leaf
// blocks and making new random BlockPointers for all indirect blocks.
// It returns the new top pointer of the copy, and all the new child
// pointers in the copy. It takes a custom DirtyBlockCache, which
// directs where the resulting block copies are stored.
func (fbo *folderBlockOps) DeepCopyFile(
ctx context.Context, lState *lockState, kmd KeyMetadata, file path,
dirtyBcache DirtyBlockCache, dataVer DataVer) (
newTopPtr BlockPointer, allChildPtrs []BlockPointer, err error) {
// Deep copying doesn't alter any data in use, it only makes copy,
// so only a read lock is needed.
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return BlockPointer{}, nil, err
}
fd := fbo.newFileDataWithCache(
lState, file, session.UID, kmd, dirtyBcache)
return fd.deepCopy(ctx, dataVer)
}
func (fbo *folderBlockOps) UndupChildrenInCopy(ctx context.Context,
lState *lockState, kmd KeyMetadata, file path, bps *blockPutState,
dirtyBcache DirtyBlockCache, topBlock *FileBlock) ([]BlockInfo, error) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return nil, err
}
fd := fbo.newFileDataWithCache(
lState, file, session.UID, kmd, dirtyBcache)
return fd.undupChildrenInCopy(ctx, fbo.config.BlockCache(),
fbo.config.BlockOps(), bps, topBlock)
}
func (fbo *folderBlockOps) ReadyNonLeafBlocksInCopy(ctx context.Context,
lState *lockState, kmd KeyMetadata, file path, bps *blockPutState,
dirtyBcache DirtyBlockCache, topBlock *FileBlock) ([]BlockInfo, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return nil, err
}
fd := fbo.newFileDataWithCache(
lState, file, session.UID, kmd, dirtyBcache)
return fd.readyNonLeafBlocksInCopy(ctx, fbo.config.BlockCache(),
fbo.config.BlockOps(), bps, topBlock)
}
// getDirLocked retrieves the block pointed to by the tail pointer of
// the given path, which must be valid, either from the cache or from
// the server. An error is returned if the retrieved block is not a
// dir block.
//
// This shouldn't be called for "internal" operations, like conflict
// resolution and state checking -- use GetDirBlockForReading() for
// those instead.
//
// When rtype == blockWrite and the cached version of the block is
// currently clean, this method makes a copy of the directory block
// and returns it. If this method might be called again for the same
// block within a single operation, it is the caller's responsibility
// to write that block back to the cache as dirty.
//
// Note that blockLock must be either r-locked or locked, but
// independently of rtype. (This differs from getFileLocked and
// getFileBlockLocked.) File write operations (which lock blockLock)
// don't need a copy of parent dir blocks, and non-file write
// operations do need to copy dir blocks for modifications.
func (fbo *folderBlockOps) getDirLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, dir path, rtype blockReqType) (
*DirBlock, error) {
fbo.blockLock.AssertAnyLocked(lState)
// Callers should have already done this check, but it doesn't
// hurt to do it again.
if !dir.isValid() {
return nil, InvalidPathError{dir}
}
// Get the block for the last element in the path.
dblock, err := fbo.getDirBlockHelperLocked(
ctx, lState, kmd, dir.tailPointer(), dir.Branch, dir, rtype)
if err != nil {
return nil, err
}
if rtype == blockWrite && !fbo.config.DirtyBlockCache().IsDirty(
fbo.id(), dir.tailPointer(), dir.Branch) {
// Copy the block if it's for writing and the block is
// not yet dirty.
dblock = dblock.DeepCopy()
}
return dblock, nil
}
// GetDir retrieves the block pointed to by the tail pointer of the
// given path, which must be valid, either from the cache or from the
// server. An error is returned if the retrieved block is not a dir
// block.
//
// This shouldn't be called for "internal" operations, like conflict
// resolution and state checking -- use GetDirBlockForReading() for
// those instead.
//
// When rtype == blockWrite and the cached version of the block is
// currently clean, this method makes a copy of the directory block
// and returns it. If this method might be called again for the same
// block within a single operation, it is the caller's responsibility
// to write that block back to the cache as dirty.
func (fbo *folderBlockOps) GetDir(
ctx context.Context, lState *lockState, kmd KeyMetadata, dir path,
rtype blockReqType) (*DirBlock, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.getDirLocked(ctx, lState, kmd, dir, rtype)
}
func (fbo *folderBlockOps) addDirEntryInCacheLocked(lState *lockState, dir path,
newName string, newDe DirEntry) {
fbo.blockLock.AssertLocked(lState)
cacheEntry := fbo.deCache[dir.tailPointer().Ref()]
if cacheEntry.adds == nil {
cacheEntry.adds = make(map[string]BlockPointer)
}
cacheEntry.adds[newName] = newDe.BlockPointer
// In case it was removed in the cache but not flushed yet.
delete(cacheEntry.dels, newName)
fbo.deCache[dir.tailPointer().Ref()] = cacheEntry
}
// AddDirEntryInCache adds a brand new entry to the given directory in
// the cache, which will get applied to the dirty block on subsequent
// fetches for the directory. The new entry must not yet have a cache
// entry itself.
func (fbo *folderBlockOps) AddDirEntryInCache(lState *lockState, dir path,
newName string, newDe DirEntry) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
fbo.addDirEntryInCacheLocked(lState, dir, newName, newDe)
// Add target dir entry as well.
if newDe.IsInitialized() {
cacheEntry, ok := fbo.deCache[newDe.Ref()]
if ok {
panic("New entry shouldn't already exist")
}
cacheEntry.dirEntry = newDe
fbo.deCache[newDe.Ref()] = cacheEntry
}
}
func (fbo *folderBlockOps) removeDirEntryInCacheLocked(lState *lockState,
dir path, oldName string) {
fbo.blockLock.AssertLocked(lState)
cacheEntry := fbo.deCache[dir.tailPointer().Ref()]
if cacheEntry.dels == nil {
cacheEntry.dels = make(map[string]bool)
}
cacheEntry.dels[oldName] = true
// In case it was added in the cache but not flushed yet.
delete(cacheEntry.adds, oldName)
fbo.deCache[dir.tailPointer().Ref()] = cacheEntry
}
// RemoveDirEntryInCache removes an entry fron the given directory in
// the cache, which will get applied to the dirty block on subsequent
// fetches for the directory.
func (fbo *folderBlockOps) RemoveDirEntryInCache(lState *lockState, dir path,
oldName string) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
fbo.removeDirEntryInCacheLocked(lState, dir, oldName)
}
// RenameDirEntryInCache updates the entries of both the old and new
// parent dirs for the given target dir atomically (with respect to
// blockLock). It also updates the cache entry for the target, which
// would have its Ctime changed. The updates will get applied to the
// dirty blocks on subsequent fetches.
//
// The returned bool indicates whether or not the caller should clean
// up the target cache entry when the effects of the operation are no
// longer needed.
func (fbo *folderBlockOps) RenameDirEntryInCache(lState *lockState,
oldParent path, oldName string, newParent path, newName string,
newDe DirEntry) (deleteTargetDirEntry bool) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
fbo.addDirEntryInCacheLocked(lState, newParent, newName, newDe)
fbo.removeDirEntryInCacheLocked(lState, oldParent, oldName)
// If there's already an entry for the target, only update the
// Ctime on a rename.
cacheEntry, ok := fbo.deCache[newDe.Ref()]
if ok && cacheEntry.dirEntry.IsInitialized() {
cacheEntry.dirEntry.Ctime = newDe.Ctime
} else {
cacheEntry.dirEntry = newDe
fbo.deCache[newDe.Ref()] = cacheEntry
deleteTargetDirEntry = true
}
fbo.deCache[newDe.Ref()] = cacheEntry
return deleteTargetDirEntry
}
func (fbo *folderBlockOps) setCachedAttrLocked(
lState *lockState, ref BlockRef, attr attrChange, realEntry *DirEntry,
doCreate bool) {
fbo.blockLock.AssertLocked(lState)
fileEntry, ok := fbo.deCache[ref]
if !ok {
if !doCreate {
return
}
fileEntry.dirEntry = *realEntry
}
switch attr {
case exAttr:
fileEntry.dirEntry.Type = realEntry.Type
case mtimeAttr:
fileEntry.dirEntry.Mtime = realEntry.Mtime
}
fileEntry.dirEntry.Ctime = realEntry.Ctime
fbo.deCache[ref] = fileEntry
}
// SetAttrInDirEntryInCache removes an entry fron the given directory
// in the cache, which will get applied to the dirty block on
// subsequent fetches for the directory.
//
// The returned bool indicates whether or not the caller should clean
// up the cache entry when the effects of the operation are no longer
// needed.
func (fbo *folderBlockOps) SetAttrInDirEntryInCache(lState *lockState,
newDe DirEntry, attr attrChange) (deleteTargetDirEntry bool) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
// If there's already an entry for the target, only update the
// Ctime on a rename.
_, ok := fbo.deCache[newDe.Ref()]
if !ok {
deleteTargetDirEntry = true
}
fbo.setCachedAttrLocked(
lState, newDe.Ref(), attr, &newDe,
true /* create the deCache entry if it doesn't exist yet */)
return deleteTargetDirEntry
}
// ClearCachedAddsAndRemoves clears out any cached directory entry
// adds and removes for the given dir.
func (fbo *folderBlockOps) ClearCachedAddsAndRemoves(
lState *lockState, dir path) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
cacheEntry, ok := fbo.deCache[dir.tailPointer().Ref()]
if !ok {
return
}
// If there's no dirEntry, we can just delete the whole thing.
if !cacheEntry.dirEntry.IsInitialized() {
delete(fbo.deCache, dir.tailPointer().Ref())
return
}
// Otherwise just nil out the adds and dels.
cacheEntry.adds = nil
cacheEntry.dels = nil
fbo.deCache[dir.tailPointer().Ref()] = cacheEntry
}
// updateWithDirtyEntriesLocked checks if the given DirBlock has any
// entries that are in deCache (i.e., entries pointing to dirty
// files). If so, it makes a copy with all such entries replaced with
// the ones in deCache and returns it. If not, it just returns the
// given one.
func (fbo *folderBlockOps) updateWithDirtyEntriesLocked(ctx context.Context,
lState *lockState, dir path, block *DirBlock) (*DirBlock, error) {
fbo.blockLock.AssertAnyLocked(lState)
// see if this directory has any outstanding writes/truncates that
// require an updated DirEntry
// Save some time for the common case of having no dirty
// files.
if len(fbo.deCache) == 0 {
return block, nil
}
var dblockCopy *DirBlock
dirCacheEntry := fbo.deCache[dir.tailPointer().Ref()]
// TODO: We should get of deCache completely and use only
// DirtyBlockCache to store the dirtied version of the DirBlock.
// We can't do that yet, because there might be multiple
// outstanding dirty files in one directory, and the KBFSOps API
// allows for syncing one at a time, so keeping a single dirtied
// DirBlock would accidentally sync the DirEntry of file A when a
// sync of file B is requested.
//
// Soon a sync will sync everything that's dirty at once, and so
// we can remove deCache at that point. Until then, we must
// incrementally build it up each time.
// Add cached additions to the copy.
for k, ptr := range dirCacheEntry.adds {
de, ok := fbo.deCache[ptr.Ref()]
if !ok {
return nil, fmt.Errorf("No cached dir entry found for new entry "+
"%s in dir %s (%v)", k, dir, dir.tailPointer())
}
if dblockCopy == nil {
dblockCopy = block.DeepCopy()
}
dblockCopy.Children[k] = de.dirEntry
}
// Remove cached removals from the copy.
for k := range dirCacheEntry.adds {
_, ok := block.Children[k]
if !ok {
continue
}
if dblockCopy == nil {
dblockCopy = block.DeepCopy()
}
delete(dblockCopy.Children, k)
}
// Update dir entries for any modified files.
for k, v := range block.Children {
de, ok := fbo.deCache[v.Ref()]
if !ok {
continue
}
if dblockCopy == nil {
dblockCopy = block.DeepCopy()
}
dblockCopy.Children[k] = de.dirEntry
}
if dblockCopy == nil {
return block, nil
}
return dblockCopy, nil
}
// getDirtyDirLocked composes getDirLocked and
// updatedWithDirtyEntriesLocked. Note that a dirty dir means that it
// has entries possibly pointing to dirty files, not that it's dirty
// itself.
func (fbo *folderBlockOps) getDirtyDirLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, dir path, rtype blockReqType) (
*DirBlock, error) {
fbo.blockLock.AssertAnyLocked(lState)
dblock, err := fbo.getDirLocked(ctx, lState, kmd, dir, rtype)
if err != nil {
return nil, err
}
return fbo.updateWithDirtyEntriesLocked(ctx, lState, dir, dblock)
}
// GetDirtyDirChildren returns a map of EntryInfos for the (possibly
// dirty) children entries of the given directory.
func (fbo *folderBlockOps) GetDirtyDirChildren(
ctx context.Context, lState *lockState, kmd KeyMetadata, dir path) (
map[string]EntryInfo, error) {
dblock, err := func() (*DirBlock, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.getDirtyDirLocked(ctx, lState, kmd, dir, blockRead)
}()
if err != nil {
return nil, err
}
children := make(map[string]EntryInfo)
for k, de := range dblock.Children {
children[k] = de.EntryInfo
}
return children, nil
}
// file must have a valid parent.
func (fbo *folderBlockOps) getDirtyParentAndEntryLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, file path, rtype blockReqType) (
*DirBlock, DirEntry, error) {
fbo.blockLock.AssertAnyLocked(lState)
if !file.hasValidParent() {
return nil, DirEntry{}, InvalidParentPathError{file}
}
parentPath := file.parentPath()
dblock, err := fbo.getDirtyDirLocked(
ctx, lState, kmd, *parentPath, rtype)
if err != nil {
return nil, DirEntry{}, err
}
// make sure it exists
name := file.tailName()
de, ok := dblock.Children[name]
if !ok {
return nil, DirEntry{}, NoSuchNameError{name}
}
return dblock, de, err
}
// GetDirtyParentAndEntry returns a copy of the parent DirBlock
// (suitable for modification) of the given file, which may contain
// entries pointing to other dirty files, and its possibly-dirty
// DirEntry in that directory. file must have a valid parent. Use
// GetDirtyEntry() if you only need the DirEntry.
func (fbo *folderBlockOps) GetDirtyParentAndEntry(
ctx context.Context, lState *lockState, kmd KeyMetadata, file path) (
*DirBlock, DirEntry, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.getDirtyParentAndEntryLocked(
ctx, lState, kmd, file, blockWrite)
}
// file must have a valid parent.
func (fbo *folderBlockOps) getDirtyEntryLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, file path) (DirEntry, error) {
// TODO: Since we only need a single DirEntry, avoid having to
// look up every entry in the DirBlock.
_, de, err := fbo.getDirtyParentAndEntryLocked(
ctx, lState, kmd, file, blockLookup)
return de, err
}
// GetDirtyEntry returns the possibly-dirty DirEntry of the given file
// in its parent DirBlock. file must have a valid parent.
func (fbo *folderBlockOps) GetDirtyEntry(
ctx context.Context, lState *lockState, kmd KeyMetadata,
file path) (DirEntry, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.getDirtyEntryLocked(ctx, lState, kmd, file)
}
// Lookup returns the possibly-dirty DirEntry of the given file in its
// parent DirBlock, and a Node for the file if it exists. It has to
// do all of this under the block lock to avoid races with
// UpdatePointers.
func (fbo *folderBlockOps) Lookup(
ctx context.Context, lState *lockState, kmd KeyMetadata,
dir Node, name string) (Node, DirEntry, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
dirPath := fbo.nodeCache.PathFromNode(dir)
if !dirPath.isValid() {
return nil, DirEntry{}, InvalidPathError{dirPath}
}
childPath := dirPath.ChildPathNoPtr(name)
de, err := fbo.getDirtyEntryLocked(ctx, lState, kmd, childPath)
if err != nil {
return nil, DirEntry{}, err
}
if de.Type == Sym {
return nil, de, nil
}
err = checkDataVersion(fbo.config, childPath, de.BlockPointer)
if err != nil {
return nil, DirEntry{}, err
}
node, err := fbo.nodeCache.GetOrCreate(de.BlockPointer, name, dir)
if err != nil {
return nil, DirEntry{}, err
}
return node, de, nil
}
func (fbo *folderBlockOps) getOrCreateDirtyFileLocked(lState *lockState,
file path) *dirtyFile {
fbo.blockLock.AssertLocked(lState)
ptr := file.tailPointer()
df := fbo.dirtyFiles[ptr]
if df == nil {
df = newDirtyFile(file, fbo.config.DirtyBlockCache())
fbo.dirtyFiles[ptr] = df
}
return df
}
// cacheBlockIfNotYetDirtyLocked puts a block into the cache, but only
// does so if the block isn't already marked as dirty in the cache.
// This is useful when operating on a dirty copy of a block that may
// already be in the cache.
func (fbo *folderBlockOps) cacheBlockIfNotYetDirtyLocked(
lState *lockState, ptr BlockPointer, file path, block Block) error {
fbo.blockLock.AssertLocked(lState)
df := fbo.getOrCreateDirtyFileLocked(lState, file)
needsCaching, isSyncing := df.setBlockDirty(ptr)
if needsCaching {
err := fbo.config.DirtyBlockCache().Put(fbo.id(), ptr, file.Branch,
block)
if err != nil {
return err
}
}
if isSyncing {
fbo.doDeferWrite = true
}
return nil
}
func (fbo *folderBlockOps) getOrCreateSyncInfoLocked(
lState *lockState, de DirEntry) (*syncInfo, error) {
fbo.blockLock.AssertLocked(lState)
ref := de.Ref()
si, ok := fbo.unrefCache[ref]
if !ok {
so, err := newSyncOp(de.BlockPointer)
if err != nil {
return nil, err
}
si = &syncInfo{
oldInfo: de.BlockInfo,
op: so,
}
fbo.unrefCache[ref] = si
}
return si, nil
}
// GetDirtyRefs returns a list of references of all known dirty
// blocks.
func (fbo *folderBlockOps) GetDirtyRefs(lState *lockState) []BlockRef {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
var dirtyRefs []BlockRef
for ref := range fbo.deCache {
dirtyRefs = append(dirtyRefs, ref)
}
return dirtyRefs
}
// fixChildBlocksAfterRecoverableErrorLocked should be called when a sync
// failed with a recoverable block error on a multi-block file. It
// makes sure that any outstanding dirty versions of the file are
// fixed up to reflect the fact that some of the indirect pointers now
// need to change.
func (fbo *folderBlockOps) fixChildBlocksAfterRecoverableErrorLocked(
ctx context.Context, lState *lockState, file path, kmd KeyMetadata,
redirtyOnRecoverableError map[BlockPointer]BlockPointer) {
fbo.blockLock.AssertLocked(lState)
df := fbo.dirtyFiles[file.tailPointer()]
if df != nil {
// Un-orphan old blocks, since we are reverting back to the
// previous state.
for _, oldPtr := range redirtyOnRecoverableError {
fbo.log.CDebugf(ctx, "Un-orphaning %v", oldPtr)
df.setBlockOrphaned(oldPtr, false)
}
}
dirtyBcache := fbo.config.DirtyBlockCache()
topBlock, err := dirtyBcache.Get(fbo.id(), file.tailPointer(), fbo.branch())
fblock, ok := topBlock.(*FileBlock)
if err != nil || !ok {
fbo.log.CWarningf(ctx, "Couldn't find dirtied "+
"top-block for %v: %v", file.tailPointer(), err)
return
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't find uid during recovery: %v", err)
return
}
fd := fbo.newFileData(lState, file, session.UID, kmd)
// If a copy of the top indirect block was made, we need to
// redirty all the sync'd blocks under their new IDs, so that
// future syncs will know they failed.
newPtrs := make(map[BlockPointer]bool, len(redirtyOnRecoverableError))
for newPtr := range redirtyOnRecoverableError {
newPtrs[newPtr] = true
}
found, err := fd.findIPtrsAndClearSize(ctx, fblock, newPtrs)
if err != nil {
fbo.log.CWarningf(
ctx, "Couldn't find and clear iptrs during recovery: %v", err)
return
}
for newPtr, oldPtr := range redirtyOnRecoverableError {
if !found[newPtr] {
continue
}
fbo.log.CDebugf(ctx, "Re-dirtying %v (and deleting dirty block %v)",
newPtr, oldPtr)
// These blocks would have been permanent, so they're
// definitely still in the cache.
b, err := fbo.config.BlockCache().Get(newPtr)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't re-dirty %v: %v", newPtr, err)
continue
}
if err = fbo.cacheBlockIfNotYetDirtyLocked(
lState, newPtr, file, b); err != nil {
fbo.log.CWarningf(ctx, "Couldn't re-dirty %v: %v", newPtr, err)
}
fbo.log.CDebugf(ctx, "Deleting dirty ptr %v after recoverable error",
oldPtr)
err = dirtyBcache.Delete(fbo.id(), oldPtr, fbo.branch())
if err != nil {
fbo.log.CDebugf(ctx, "Couldn't del-dirty %v: %v", oldPtr, err)
}
}
}
func (fbo *folderBlockOps) nowUnixNano() int64 {
return fbo.config.Clock().Now().UnixNano()
}
// PrepRename prepares the given rename operation. It returns copies
// of the old and new parent block (which may be the same), what is to
// be the new DirEntry, and a local block cache. It also modifies md,
// which must be a copy.
func (fbo *folderBlockOps) PrepRename(
ctx context.Context, lState *lockState, md *RootMetadata,
oldParent path, oldName string, newParent path, newName string) (
oldPBlock, newPBlock *DirBlock, newDe DirEntry, lbc localBcache,
err error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
// look up in the old path
oldPBlock, err = fbo.getDirLocked(
ctx, lState, md, oldParent, blockWrite)
if err != nil {
return nil, nil, DirEntry{}, nil, err
}
newDe, ok := oldPBlock.Children[oldName]
// does the name exist?
if !ok {
return nil, nil, DirEntry{}, nil, NoSuchNameError{oldName}
}
ro, err := newRenameOp(oldName, oldParent.tailPointer(), newName,
newParent.tailPointer(), newDe.BlockPointer, newDe.Type)
if err != nil {
return nil, nil, DirEntry{}, nil, err
}
// A renameOp doesn't have a single path to represent it, so we
// can't call setFinalPath here unfortunately. That means any
// rename may force a manual paths population at other layers
// (e.g., for journal statuses). TODO: allow a way to set more
// than one final path for renameOps?
md.AddOp(ro)
lbc = make(localBcache)
// TODO: Write a SameBlock() function that can deal properly with
// dedup'd blocks that share an ID but can be updated separately.
if oldParent.tailPointer().ID == newParent.tailPointer().ID {
newPBlock = oldPBlock
} else {
newPBlock, err = fbo.getDirLocked(
ctx, lState, md, newParent, blockWrite)
if err != nil {
return nil, nil, DirEntry{}, nil, err
}
now := fbo.nowUnixNano()
oldGrandparent := *oldParent.parentPath()
if len(oldGrandparent.path) > 0 {
// Update the old parent's mtime/ctime, unless the
// oldGrandparent is the same as newParent (in which
// case, the syncBlockAndCheckEmbedLocked call by the
// caller will take care of it).
if oldGrandparent.tailPointer().ID != newParent.tailPointer().ID {
b, err := fbo.getDirLocked(ctx, lState, md, oldGrandparent, blockWrite)
if err != nil {
return nil, nil, DirEntry{}, nil, err
}
if de, ok := b.Children[oldParent.tailName()]; ok {
de.Ctime = now
de.Mtime = now
b.Children[oldParent.tailName()] = de
// Put this block back into the local cache as dirty
lbc[oldGrandparent.tailPointer()] = b
}
}
} else {
md.data.Dir.Ctime = now
md.data.Dir.Mtime = now
}
}
return oldPBlock, newPBlock, newDe, lbc, nil
}
func (fbo *folderBlockOps) newFileData(lState *lockState,
file path, uid keybase1.UID, kmd KeyMetadata) *fileData {
fbo.blockLock.AssertAnyLocked(lState)
return newFileData(file, uid, fbo.config.Crypto(),
fbo.config.BlockSplitter(), kmd,
func(ctx context.Context, kmd KeyMetadata, ptr BlockPointer,
file path, rtype blockReqType) (*FileBlock, bool, error) {
lState := lState
if rtype == blockReadParallel {
lState = nil
}
return fbo.getFileBlockLocked(
ctx, lState, kmd, ptr, file, rtype)
},
func(ptr BlockPointer, block Block) error {
return fbo.cacheBlockIfNotYetDirtyLocked(
lState, ptr, file, block)
}, fbo.log)
}
func (fbo *folderBlockOps) newFileDataWithCache(lState *lockState,
file path, uid keybase1.UID, kmd KeyMetadata,
dirtyBcache DirtyBlockCache) *fileData {
fbo.blockLock.AssertAnyLocked(lState)
return newFileData(file, uid, fbo.config.Crypto(),
fbo.config.BlockSplitter(), kmd,
func(ctx context.Context, kmd KeyMetadata, ptr BlockPointer,
file path, rtype blockReqType) (*FileBlock, bool, error) {
block, err := dirtyBcache.Get(file.Tlf, ptr, file.Branch)
if fblock, ok := block.(*FileBlock); ok && err == nil {
return fblock, true, nil
}
lState := lState
if rtype == blockReadParallel {
lState = nil
}
return fbo.getFileBlockLocked(
ctx, lState, kmd, ptr, file, rtype)
},
func(ptr BlockPointer, block Block) error {
return dirtyBcache.Put(file.Tlf, ptr, file.Branch, block)
}, fbo.log)
}
// Read reads from the given file into the given buffer at the given
// offset. It returns the number of bytes read and nil, or 0 and the
// error if there was one.
func (fbo *folderBlockOps) Read(
ctx context.Context, lState *lockState, kmd KeyMetadata, file path,
dest []byte, off int64) (int64, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
fbo.log.CDebugf(ctx, "Reading from %v", file.tailPointer())
var uid keybase1.UID // Data reads don't depend on the uid.
fd := fbo.newFileData(lState, file, uid, kmd)
return fd.read(ctx, dest, off)
}
func (fbo *folderBlockOps) maybeWaitOnDeferredWrites(
ctx context.Context, lState *lockState, file Node,
c DirtyPermChan) error {
var errListener chan error
err := func() error {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
filePath, err := fbo.pathFromNodeForBlockWriteLocked(lState, file)
if err != nil {
return err
}
df := fbo.getOrCreateDirtyFileLocked(lState, filePath)
errListener = make(chan error, 1)
df.addErrListener(errListener)
return nil
}()
if err != nil {
return err
}
logTimer := time.After(100 * time.Millisecond)
doLogUnblocked := false
for {
select {
case <-c:
if doLogUnblocked {
fbo.log.CDebugf(ctx, "Write unblocked")
}
// Make sure there aren't any queued errors.
select {
case err := <-errListener:
return err
default:
}
return nil
case <-logTimer:
// Print a log message once if it's taking too long.
fbo.log.CDebugf(ctx,
"Blocking a write because of a full dirty buffer")
doLogUnblocked = true
case err := <-errListener:
// XXX: should we ignore non-fatal errors (like
// context.Canceled), or errors that are specific only to
// some other file being sync'd (e.g., "recoverable" block
// errors from which we couldn't recover)?
return err
}
}
}
func (fbo *folderBlockOps) pathFromNodeForBlockWriteLocked(
lState *lockState, n Node) (path, error) {
fbo.blockLock.AssertLocked(lState)
p := fbo.nodeCache.PathFromNode(n)
if !p.isValid() {
return path{}, InvalidPathError{p}
}
return p, nil
}
// writeGetFileLocked checks write permissions explicitly for
// writeDataLocked, truncateLocked etc and returns
func (fbo *folderBlockOps) writeGetFileLocked(
ctx context.Context, lState *lockState, kmd KeyMetadata,
file path) (*FileBlock, keybase1.UID, error) {
fbo.blockLock.AssertLocked(lState)
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return nil, "", err
}
if !kmd.GetTlfHandle().IsWriter(session.UID) {
return nil, "", NewWriteAccessError(kmd.GetTlfHandle(),
session.Name, file.String())
}
fblock, err := fbo.getFileLocked(ctx, lState, kmd, file, blockWrite)
if err != nil {
return nil, "", err
}
return fblock, session.UID, nil
}
// Returns the set of blocks dirtied during this write that might need
// to be cleaned up if the write is deferred.
func (fbo *folderBlockOps) writeDataLocked(
ctx context.Context, lState *lockState, kmd KeyMetadata, file path,
data []byte, off int64) (latestWrite WriteRange, dirtyPtrs []BlockPointer,
newlyDirtiedChildBytes int64, err error) {
if jServer, err := GetJournalServer(fbo.config); err == nil {
jServer.dirtyOpStart(fbo.id())
defer jServer.dirtyOpEnd(fbo.id())
}
fbo.blockLock.AssertLocked(lState)
fbo.log.CDebugf(ctx, "writeDataLocked on file pointer %v",
file.tailPointer())
defer func() {
fbo.log.CDebugf(ctx, "writeDataLocked done: %v", err)
}()
fblock, uid, err := fbo.writeGetFileLocked(ctx, lState, kmd, file)
if err != nil {
return WriteRange{}, nil, 0, err
}
fd := fbo.newFileData(lState, file, uid, kmd)
dirtyBcache := fbo.config.DirtyBlockCache()
df := fbo.getOrCreateDirtyFileLocked(lState, file)
defer func() {
// Always update unsynced bytes and potentially force a sync,
// even on an error, since the previously-dirty bytes stay in
// the cache.
df.updateNotYetSyncingBytes(newlyDirtiedChildBytes)
if dirtyBcache.ShouldForceSync(fbo.id()) {
select {
// If we can't send on the channel, that means a sync is
// already in progress.
case fbo.forceSyncChan <- struct{}{}:
fbo.log.CDebugf(ctx, "Forcing a sync due to full buffer")
default:
}
}
}()
de, err := fbo.getDirtyEntryLocked(ctx, lState, kmd, file)
if err != nil {
return WriteRange{}, nil, 0, err
}
if de.BlockPointer != file.tailPointer() {
fbo.log.CDebugf(ctx, "DirEntry and file tail pointer don't match: "+
"%v vs %v", de.BlockPointer, file.tailPointer())
}
si, err := fbo.getOrCreateSyncInfoLocked(lState, de)
if err != nil {
return WriteRange{}, nil, 0, err
}
newDe, dirtyPtrs, unrefs, newlyDirtiedChildBytes, bytesExtended, err :=
fd.write(ctx, data, off, fblock, de, df)
// Record the unrefs before checking the error so we remember the
// state of newly dirtied blocks.
si.unrefs = append(si.unrefs, unrefs...)
if err != nil {
return WriteRange{}, nil, newlyDirtiedChildBytes, err
}
// Put it in the `deCache` even if the size didn't change, since
// the `deCache` is used to determine whether there are any dirty
// files. TODO: combine `deCache` with `dirtyFiles` and
// `unrefCache`.
cacheEntry := fbo.deCache[file.tailPointer().Ref()]
cacheEntry.dirEntry = newDe
fbo.deCache[file.tailPointer().Ref()] = cacheEntry
if fbo.doDeferWrite {
df.addDeferredNewBytes(bytesExtended)
}
latestWrite = si.op.addWrite(uint64(off), uint64(len(data)))
return latestWrite, dirtyPtrs, newlyDirtiedChildBytes, nil
}
// Write writes the given data to the given file. May block if there
// is too much unflushed data; in that case, it will be unblocked by a
// future sync.
func (fbo *folderBlockOps) Write(
ctx context.Context, lState *lockState, kmd KeyMetadata,
file Node, data []byte, off int64) error {
// If there is too much unflushed data, we should wait until some
// of it gets flush so our memory usage doesn't grow without
// bound.
c, err := fbo.config.DirtyBlockCache().RequestPermissionToDirty(ctx,
fbo.id(), int64(len(data)))
if err != nil {
return err
}
defer fbo.config.DirtyBlockCache().UpdateUnsyncedBytes(fbo.id(),
-int64(len(data)), false)
err = fbo.maybeWaitOnDeferredWrites(ctx, lState, file, c)
if err != nil {
return err
}
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
filePath, err := fbo.pathFromNodeForBlockWriteLocked(lState, file)
if err != nil {
return err
}
defer func() {
fbo.doDeferWrite = false
}()
latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err := fbo.writeDataLocked(
ctx, lState, kmd, filePath, data, off)
if err != nil {
return err
}
fbo.observers.localChange(ctx, file, latestWrite)
if fbo.doDeferWrite {
// There's an ongoing sync, and this write altered dirty
// blocks that are in the process of syncing. So, we have to
// redo this write once the sync is complete, using the new
// file path.
//
// There is probably a less terrible of doing this that
// doesn't involve so much copying and rewriting, but this is
// the most obviously correct way.
dataCopy := make([]byte, len(data))
copy(dataCopy, data)
fbo.log.CDebugf(ctx, "Deferring a write to file %v off=%d len=%d",
filePath.tailPointer(), off, len(data))
fbo.deferredDirtyDeletes = append(fbo.deferredDirtyDeletes,
dirtyPtrs...)
fbo.deferredWrites = append(fbo.deferredWrites,
func(ctx context.Context, lState *lockState, kmd KeyMetadata, f path) error {
// We are about to re-dirty these bytes, so mark that
// they will no longer be synced via the old file.
df := fbo.getOrCreateDirtyFileLocked(lState, filePath)
df.updateNotYetSyncingBytes(-newlyDirtiedChildBytes)
// Write the data again. We know this won't be
// deferred, so no need to check the new ptrs.
_, _, _, err = fbo.writeDataLocked(
ctx, lState, kmd, f, dataCopy, off)
return err
})
fbo.deferredWaitBytes += newlyDirtiedChildBytes
}
return nil
}
// truncateExtendLocked is called by truncateLocked to extend a file and
// creates a hole.
func (fbo *folderBlockOps) truncateExtendLocked(
ctx context.Context, lState *lockState, kmd KeyMetadata,
file path, size uint64, parentBlocks []parentBlockAndChildIndex) (
WriteRange, []BlockPointer, error) {
fblock, uid, err := fbo.writeGetFileLocked(ctx, lState, kmd, file)
if err != nil {
return WriteRange{}, nil, err
}
fd := fbo.newFileData(lState, file, uid, kmd)
de, err := fbo.getDirtyEntryLocked(ctx, lState, kmd, file)
if err != nil {
return WriteRange{}, nil, err
}
df := fbo.getOrCreateDirtyFileLocked(lState, file)
newDe, dirtyPtrs, err := fd.truncateExtend(
ctx, size, fblock, parentBlocks, de, df)
if err != nil {
return WriteRange{}, nil, err
}
cacheEntry := fbo.deCache[file.tailPointer().Ref()]
cacheEntry.dirEntry = newDe
fbo.deCache[file.tailPointer().Ref()] = cacheEntry
si, err := fbo.getOrCreateSyncInfoLocked(lState, de)
if err != nil {
return WriteRange{}, nil, err
}
latestWrite := si.op.addTruncate(size)
if fbo.config.DirtyBlockCache().ShouldForceSync(fbo.id()) {
select {
// If we can't send on the channel, that means a sync is
// already in progress
case fbo.forceSyncChan <- struct{}{}:
fbo.log.CDebugf(ctx, "Forcing a sync due to full buffer")
default:
}
}
fbo.log.CDebugf(ctx, "truncateExtendLocked: done")
return latestWrite, dirtyPtrs, nil
}
// truncateExtendCutoffPoint is the amount of data in extending
// truncate that will trigger the extending with a hole algorithm.
const truncateExtendCutoffPoint = 128 * 1024
// Returns the set of newly-ID'd blocks created during this truncate
// that might need to be cleaned up if the truncate is deferred.
func (fbo *folderBlockOps) truncateLocked(
ctx context.Context, lState *lockState, kmd KeyMetadata,
file path, size uint64) (*WriteRange, []BlockPointer, int64, error) {
if jServer, err := GetJournalServer(fbo.config); err == nil {
jServer.dirtyOpStart(fbo.id())
defer jServer.dirtyOpEnd(fbo.id())
}
fblock, uid, err := fbo.writeGetFileLocked(ctx, lState, kmd, file)
if err != nil {
return &WriteRange{}, nil, 0, err
}
fd := fbo.newFileData(lState, file, uid, kmd)
// find the block where the file should now end
iSize := int64(size) // TODO: deal with overflow
_, parentBlocks, block, nextBlockOff, startOff, _, err :=
fd.getFileBlockAtOffset(ctx, fblock, iSize, blockWrite)
if err != nil {
return &WriteRange{}, nil, 0, err
}
currLen := int64(startOff) + int64(len(block.Contents))
if currLen+truncateExtendCutoffPoint < iSize {
latestWrite, dirtyPtrs, err := fbo.truncateExtendLocked(
ctx, lState, kmd, file, uint64(iSize), parentBlocks)
if err != nil {
return &latestWrite, dirtyPtrs, 0, err
}
return &latestWrite, dirtyPtrs, 0, err
} else if currLen < iSize {
moreNeeded := iSize - currLen
latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err :=
fbo.writeDataLocked(ctx, lState, kmd, file,
make([]byte, moreNeeded, moreNeeded), currLen)
if err != nil {
return &latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err
}
return &latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err
} else if currLen == iSize && nextBlockOff < 0 {
// same size!
return nil, nil, 0, nil
}
// update the local entry size
de, err := fbo.getDirtyEntryLocked(ctx, lState, kmd, file)
if err != nil {
return nil, nil, 0, err
}
si, err := fbo.getOrCreateSyncInfoLocked(lState, de)
if err != nil {
return nil, nil, 0, err
}
newDe, dirtyPtrs, unrefs, newlyDirtiedChildBytes, err := fd.truncateShrink(
ctx, size, fblock, de)
// Record the unrefs before checking the error so we remember the
// state of newly dirtied blocks.
si.unrefs = append(si.unrefs, unrefs...)
if err != nil {
return nil, nil, newlyDirtiedChildBytes, err
}
// Update dirtied bytes and unrefs regardless of error.
df := fbo.getOrCreateDirtyFileLocked(lState, file)
df.updateNotYetSyncingBytes(newlyDirtiedChildBytes)
latestWrite := si.op.addTruncate(size)
cacheEntry := fbo.deCache[file.tailPointer().Ref()]
cacheEntry.dirEntry = newDe
fbo.deCache[file.tailPointer().Ref()] = cacheEntry
return &latestWrite, dirtyPtrs, newlyDirtiedChildBytes, nil
}
// Truncate truncates or extends the given file to the given size.
// May block if there is too much unflushed data; in that case, it
// will be unblocked by a future sync.
func (fbo *folderBlockOps) Truncate(
ctx context.Context, lState *lockState, kmd KeyMetadata,
file Node, size uint64) error {
// If there is too much unflushed data, we should wait until some
// of it gets flush so our memory usage doesn't grow without
// bound.
//
// Assume the whole remaining file will be dirty after this
// truncate. TODO: try to figure out how many bytes actually will
// be dirtied ahead of time?
c, err := fbo.config.DirtyBlockCache().RequestPermissionToDirty(ctx,
fbo.id(), int64(size))
if err != nil {
return err
}
defer fbo.config.DirtyBlockCache().UpdateUnsyncedBytes(fbo.id(),
-int64(size), false)
err = fbo.maybeWaitOnDeferredWrites(ctx, lState, file, c)
if err != nil {
return err
}
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
filePath, err := fbo.pathFromNodeForBlockWriteLocked(lState, file)
if err != nil {
return err
}
defer func() {
fbo.doDeferWrite = false
}()
latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err := fbo.truncateLocked(
ctx, lState, kmd, filePath, size)
if err != nil {
return err
}
if latestWrite != nil {
fbo.observers.localChange(ctx, file, *latestWrite)
}
if fbo.doDeferWrite {
// There's an ongoing sync, and this truncate altered
// dirty blocks that are in the process of syncing. So,
// we have to redo this truncate once the sync is complete,
// using the new file path.
fbo.log.CDebugf(ctx, "Deferring a truncate to file %v",
filePath.tailPointer())
fbo.deferredDirtyDeletes = append(fbo.deferredDirtyDeletes,
dirtyPtrs...)
fbo.deferredWrites = append(fbo.deferredWrites,
func(ctx context.Context, lState *lockState, kmd KeyMetadata, f path) error {
// We are about to re-dirty these bytes, so mark that
// they will no longer be synced via the old file.
df := fbo.getOrCreateDirtyFileLocked(lState, filePath)
df.updateNotYetSyncingBytes(-newlyDirtiedChildBytes)
// Truncate the file again. We know this won't be
// deferred, so no need to check the new ptrs.
_, _, _, err := fbo.truncateLocked(
ctx, lState, kmd, f, size)
return err
})
fbo.deferredWaitBytes += newlyDirtiedChildBytes
}
return nil
}
// IsDirty returns whether the given file is dirty; if false is
// returned, then the file doesn't need to be synced.
func (fbo *folderBlockOps) IsDirty(lState *lockState, file path) bool {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
// Definitely dirty if a block is dirty.
if fbo.config.DirtyBlockCache().IsDirty(
fbo.id(), file.tailPointer(), file.Branch) {
return true
}
// The deCache entry could still be dirty, if a file had an
// attribute set (like mtime or exec) after the file was removed.
// Still count the file as dirty in that case; most likely, the
// caller will next call `ClearCacheInfo` to remove this entry.
// (See comments in `folderBranchOps.syncLocked`.)
_, ok := fbo.deCache[file.tailPointer().Ref()]
return ok
}
func (fbo *folderBlockOps) clearCacheInfoLocked(lState *lockState,
file path) error {
fbo.blockLock.AssertLocked(lState)
ref := file.tailPointer().Ref()
delete(fbo.deCache, ref)
delete(fbo.unrefCache, ref)
df := fbo.dirtyFiles[file.tailPointer()]
if df != nil {
err := df.finishSync()
if err != nil {
return err
}
delete(fbo.dirtyFiles, file.tailPointer())
}
return nil
}
// ClearCacheInfo removes any cached info for the the given file.
func (fbo *folderBlockOps) ClearCacheInfo(lState *lockState, file path) error {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
return fbo.clearCacheInfoLocked(lState, file)
}
// revertSyncInfoAfterRecoverableError updates the saved sync info to
// include all the blocks from before the error, except for those that
// have encountered recoverable block errors themselves.
func (fbo *folderBlockOps) revertSyncInfoAfterRecoverableError(
blocksToRemove []BlockPointer, result fileSyncState) {
si := result.si
savedSi := result.savedSi
// Save the blocks we need to clean up on the next attempt.
toClean := si.toCleanIfUnused
newIndirect := make(map[BlockPointer]bool)
for _, ptr := range result.newIndirectFileBlockPtrs {
newIndirect[ptr] = true
}
// Propagate all unrefs forward, except those that belong to new
// blocks that were created during the sync.
unrefs := make([]BlockInfo, 0, len(si.unrefs))
for _, unref := range si.unrefs {
if newIndirect[unref.BlockPointer] {
fbo.log.CDebugf(nil, "Dropping unref %v", unref)
continue
}
unrefs = append(unrefs, unref)
}
// This sync will be retried and needs new blocks, so
// reset everything in the sync info.
*si = *savedSi
si.toCleanIfUnused = toClean
si.unrefs = unrefs
if si.bps == nil {
return
}
si.bps.blockStates = nil
// Mark any bad pointers so they get skipped next time.
blocksToRemoveSet := make(map[BlockPointer]bool)
for _, ptr := range blocksToRemove {
blocksToRemoveSet[ptr] = true
}
for _, bs := range savedSi.bps.blockStates {
// Only save the good pointers
if !blocksToRemoveSet[bs.blockPtr] {
si.bps.blockStates = append(si.bps.blockStates, bs)
}
}
}
// ReadyBlock is a thin wrapper around BlockOps.Ready() that handles
// checking for duplicates.
func ReadyBlock(ctx context.Context, bcache BlockCache, bops BlockOps,
crypto cryptoPure, kmd KeyMetadata, block Block, uid keybase1.UID,
bType keybase1.BlockType) (
info BlockInfo, plainSize int, readyBlockData ReadyBlockData, err error) {
var ptr BlockPointer
directType := IndirectBlock
if fBlock, ok := block.(*FileBlock); ok && !fBlock.IsInd {
directType = DirectBlock
// first see if we are duplicating any known blocks in this folder
ptr, err = bcache.CheckForKnownPtr(kmd.TlfID(), fBlock)
if err != nil {
return
}
} else if dBlock, ok := block.(*DirBlock); ok {
if dBlock.IsInd {
panic("Indirect directory blocks aren't supported yet")
}
// TODO: support indirect directory blocks.
directType = DirectBlock
}
// Ready the block, even in the case where we can reuse an
// existing block, just so that we know what the size of the
// encrypted data will be.
id, plainSize, readyBlockData, err := bops.Ready(ctx, kmd, block)
if err != nil {
return
}
if ptr.IsInitialized() {
ptr.RefNonce, err = crypto.MakeBlockRefNonce()
if err != nil {
return
}
ptr.SetWriter(uid)
// In case we're deduping an old pointer with an unknown block type.
ptr.DirectType = directType
} else {
ptr = BlockPointer{
ID: id,
KeyGen: kmd.LatestKeyGeneration(),
DataVer: block.DataVersion(),
DirectType: directType,
Context: kbfsblock.MakeFirstContext(uid, bType),
}
}
info = BlockInfo{
BlockPointer: ptr,
EncodedSize: uint32(readyBlockData.GetEncodedSize()),
}
return
}
// fileSyncState holds state for a sync operation for a single
// file.
type fileSyncState struct {
// If fblock is non-nil, the (dirty, indirect, cached) block
// it points to will be set to savedFblock on a recoverable
// error.
fblock, savedFblock *FileBlock
// redirtyOnRecoverableError, which is non-nil only when fblock is
// non-nil, contains pointers that need to be re-dirtied if the
// top block gets copied during the sync, and a recoverable error
// happens. Maps to the old block pointer for the block, which
// would need a DirtyBlockCache.Delete.
redirtyOnRecoverableError map[BlockPointer]BlockPointer
// If si is non-nil, its updated state will be reset on
// error. Also, if the error is recoverable, it will be
// reverted to savedSi.
//
// TODO: Working with si in this way is racy, since si is a
// member of unrefCache.
si, savedSi *syncInfo
// oldFileBlockPtrs is a list of transient entries in the
// block cache for the file, which should be removed when the
// sync finishes.
oldFileBlockPtrs []BlockPointer
// newIndirectFileBlockPtrs is a list of permanent entries
// added to the block cache for the file, which should be
// removed after the blocks have been sent to the server.
// They are not removed on an error, because in that case the
// file is still dirty locally and may get another chance to
// be sync'd.
//
// TODO: This can be a list of IDs instead.
newIndirectFileBlockPtrs []BlockPointer
}
// startSyncWrite contains the portion of StartSync() that's done
// while write-locking blockLock. If there is no dirty de cache
// entry, dirtyDe will be nil.
func (fbo *folderBlockOps) startSyncWrite(ctx context.Context,
lState *lockState, md *RootMetadata, uid keybase1.UID, file path) (
fblock *FileBlock, bps *blockPutState, syncState fileSyncState,
dirtyDe *DirEntry, err error) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
// update the parent directories, and write all the new blocks out
// to disk
fblock, err = fbo.getFileLocked(ctx, lState, md.ReadOnly(), file, blockWrite)
if err != nil {
return nil, nil, syncState, nil, err
}
fileRef := file.tailPointer().Ref()
si, ok := fbo.unrefCache[fileRef]
if !ok {
return nil, nil, syncState, nil,
fmt.Errorf("No syncOp found for file ref %v", fileRef)
}
// Collapse the write range to reduce the size of the sync op.
si.op.Writes = si.op.collapseWriteRange(nil)
// If this function returns a success, we need to make sure the op
// in `md` is not the same variable as the op in `unrefCache`,
// because the latter could get updated still by local writes
// before `md` is flushed to the server. We don't copy it here
// because code below still needs to modify it (and by extension,
// the one stored in `syncState.si`).
si.op.setFinalPath(file)
md.AddOp(si.op)
// Fill in syncState.
if fblock.IsInd {
fblockCopy := fblock.DeepCopy()
syncState.fblock = fblock
syncState.savedFblock = fblockCopy
syncState.redirtyOnRecoverableError = make(map[BlockPointer]BlockPointer)
}
syncState.si = si
syncState.savedSi, err = si.DeepCopy(fbo.config.Codec())
if err != nil {
return nil, nil, syncState, nil, err
}
if si.bps == nil {
si.bps = newBlockPutState(1)
} else {
// reinstate byte accounting from the previous Sync
md.SetRefBytes(si.refBytes)
md.AddDiskUsage(si.refBytes)
md.SetUnrefBytes(si.unrefBytes)
md.SetMDRefBytes(0) // this will be calculated anew
md.SetDiskUsage(md.DiskUsage() - si.unrefBytes)
syncState.newIndirectFileBlockPtrs = append(
syncState.newIndirectFileBlockPtrs, si.op.Refs()...)
}
defer func() {
si.refBytes = md.RefBytes()
si.unrefBytes = md.UnrefBytes()
}()
dirtyBcache := fbo.config.DirtyBlockCache()
df := fbo.getOrCreateDirtyFileLocked(lState, file)
fd := fbo.newFileData(lState, file, uid, md.ReadOnly())
// Note: below we add possibly updated file blocks as "unref" and
// "ref" blocks. This is fine, since conflict resolution or
// notifications will never happen within a file.
// If needed, split the children blocks up along new boundaries
// (e.g., if using a fingerprint-based block splitter).
unrefs, err := fd.split(ctx, fbo.id(), dirtyBcache, fblock, df)
// Preserve any unrefs before checking the error.
for _, unref := range unrefs {
md.AddUnrefBlock(unref)
}
if err != nil {
return nil, nil, syncState, nil, err
}
// Ready all children blocks, if any.
oldPtrs, err := fd.ready(ctx, fbo.id(), fbo.config.BlockCache(),
fbo.config.DirtyBlockCache(), fbo.config.BlockOps(), si.bps, fblock, df)
if err != nil {
return nil, nil, syncState, nil, err
}
for newInfo, oldPtr := range oldPtrs {
syncState.newIndirectFileBlockPtrs = append(
syncState.newIndirectFileBlockPtrs, newInfo.BlockPointer)
df.setBlockOrphaned(oldPtr, true)
// Defer the DirtyBlockCache.Delete until after the new path
// is ready, in case anyone tries to read the dirty file in
// the meantime.
syncState.oldFileBlockPtrs = append(syncState.oldFileBlockPtrs, oldPtr)
md.AddRefBlock(newInfo)
// If this block is replacing a block from a previous, failed
// Sync, we need to take that block out of the refs list, and
// avoid unrefing it as well.
si.removeReplacedBlock(ctx, fbo.log, oldPtr)
err = df.setBlockSyncing(oldPtr)
if err != nil {
return nil, nil, syncState, nil, err
}
syncState.redirtyOnRecoverableError[newInfo.BlockPointer] = oldPtr
}
err = df.setBlockSyncing(file.tailPointer())
if err != nil {
return nil, nil, syncState, nil, err
}
syncState.oldFileBlockPtrs = append(
syncState.oldFileBlockPtrs, file.tailPointer())
// Capture the current de before we release the block lock, so
// other deferred writes don't slip in.
if de, ok := fbo.deCache[fileRef]; ok {
dirtyDe = &(de.dirEntry)
}
// Leave a copy of the syncOp in `unrefCache`, since it may be
// modified by future local writes while the syncOp in `md` should
// only be modified by the rest of this sync process.
var syncOpCopy *syncOp
err = kbfscodec.Update(fbo.config.Codec(), &syncOpCopy, si.op)
if err != nil {
return nil, nil, syncState, nil, err
}
fbo.unrefCache[fileRef].op = syncOpCopy
// If there are any deferred bytes, it must be because this is
// a retried sync and some blocks snuck in between sync. Those
// blocks will get transferred now, but they are also on the
// deferred list and will be retried on the next sync as well.
df.assimilateDeferredNewBytes()
// TODO: Returning si.bps in this way is racy, since si is a
// member of unrefCache.
return fblock, si.bps, syncState, dirtyDe, nil
}
func (fbo *folderBlockOps) makeLocalBcache(ctx context.Context,
lState *lockState, md *RootMetadata, file path, si *syncInfo,
dirtyDe *DirEntry) (lbc localBcache, err error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
parentPath := file.parentPath()
dblock, err := fbo.getDirLocked(
ctx, lState, md.ReadOnly(), *parentPath, blockWrite)
if err != nil {
return nil, err
}
// Add in the cached unref'd blocks.
si.mergeUnrefCache(md)
lbc = make(localBcache)
// Update the file's directory entry to the cached copy.
if dirtyDe != nil {
dirtyDe.EncodedSize = si.oldInfo.EncodedSize
dblock.Children[file.tailName()] = *dirtyDe
lbc[parentPath.tailPointer()] = dblock
}
return lbc, nil
}
// StartSync starts a sync for the given file. It returns the new
// FileBlock which has the readied top-level block which includes all
// writes since the last sync. Must be used with CleanupSyncState()
// and UpdatePointers/FinishSyncLocked() like so:
//
// fblock, bps, lbc, syncState, err :=
// ...fbo.StartSync(ctx, lState, md, uid, file)
// defer func() {
// ...fbo.CleanupSyncState(
// ctx, lState, md, file, ..., syncState, err)
// }()
// if err != nil {
// ...
// }
// ...
//
//
// ... = fbo.UpdatePointers(..., func() error {
// ...fbo.FinishSyncLocked(ctx, lState, file, ..., syncState)
// })
func (fbo *folderBlockOps) StartSync(ctx context.Context,
lState *lockState, md *RootMetadata, uid keybase1.UID, file path) (
fblock *FileBlock, bps *blockPutState, lbc localBcache,
syncState fileSyncState, err error) {
if jServer, err := GetJournalServer(fbo.config); err == nil {
jServer.dirtyOpStart(fbo.id())
}
fblock, bps, syncState, dirtyDe, err := fbo.startSyncWrite(
ctx, lState, md, uid, file)
if err != nil {
return nil, nil, nil, syncState, err
}
lbc, err = fbo.makeLocalBcache(ctx, lState, md, file, syncState.savedSi,
dirtyDe)
if err != nil {
return nil, nil, nil, syncState, err
}
return fblock, bps, lbc, syncState, err
}
// Does any clean-up for a sync of the given file, given an error
// (which may be nil) that happens during or after StartSync() and
// before FinishSync(). blocksToRemove may be nil.
func (fbo *folderBlockOps) CleanupSyncState(
ctx context.Context, lState *lockState, md ReadOnlyRootMetadata,
file path, blocksToRemove []BlockPointer,
result fileSyncState, err error) {
if jServer, err := GetJournalServer(fbo.config); err == nil {
defer jServer.dirtyOpEnd(fbo.id())
}
if err == nil {
return
}
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
// Notify error listeners before we reset the dirty blocks and
// permissions to be granted.
fbo.notifyErrListenersLocked(lState, file.tailPointer(), err)
// If there was an error, we need to back out any changes that
// might have been filled into the sync op, because it could
// get reused again in a later Sync call.
if result.si != nil {
result.si.op.resetUpdateState()
// Save this MD for later, so we can clean up its
// newly-referenced block pointers if necessary.
result.si.toCleanIfUnused = append(result.si.toCleanIfUnused,
mdToCleanIfUnused{md, result.si.bps.DeepCopy()})
}
if isRecoverableBlockError(err) {
if result.si != nil {
fbo.revertSyncInfoAfterRecoverableError(blocksToRemove, result)
}
if result.fblock != nil {
result.fblock.Set(result.savedFblock)
fbo.fixChildBlocksAfterRecoverableErrorLocked(
ctx, lState, file, md,
result.redirtyOnRecoverableError)
}
} else {
// Since the sync has errored out unrecoverably, the deferred
// bytes are already accounted for.
if df := fbo.dirtyFiles[file.tailPointer()]; df != nil {
df.updateNotYetSyncingBytes(-fbo.deferredWaitBytes)
// Some blocks that were dirty are now clean under their
// readied block ID, and now live in the bps rather than
// the dirty bcache, so we can delete them from the dirty
// bcache.
dirtyBcache := fbo.config.DirtyBlockCache()
for _, ptr := range result.oldFileBlockPtrs {
if df.isBlockOrphaned(ptr) {
fbo.log.CDebugf(ctx, "Deleting dirty orphan: %v", ptr)
if err := dirtyBcache.Delete(fbo.id(), ptr,
fbo.branch()); err != nil {
fbo.log.CDebugf(ctx, "Couldn't delete %v", ptr)
}
}
}
}
// On an unrecoverable error, the deferred writes aren't
// needed anymore since they're already part of the
// (still-)dirty blocks.
fbo.deferredDirtyDeletes = nil
fbo.deferredWrites = nil
fbo.deferredWaitBytes = 0
}
// The sync is over, due to an error, so reset the map so that we
// don't defer any subsequent writes.
// Old syncing blocks are now just dirty
if df := fbo.dirtyFiles[file.tailPointer()]; df != nil {
df.resetSyncingBlocksToDirty()
}
}
// cleanUpUnusedBlocks cleans up the blocks from any previous failed
// sync attempts.
func (fbo *folderBlockOps) cleanUpUnusedBlocks(ctx context.Context,
md ReadOnlyRootMetadata, syncState fileSyncState, fbm *folderBlockManager) error {
numToClean := len(syncState.si.toCleanIfUnused)
if numToClean == 0 {
return nil
}
// What blocks are referenced in the successful MD?
refs := make(map[BlockPointer]bool)
for _, op := range md.data.Changes.Ops {
for _, ptr := range op.Refs() {
if ptr == zeroPtr {
panic("Unexpected zero ref ptr in a sync MD revision")
}
refs[ptr] = true
}
for _, update := range op.allUpdates() {
if update.Ref == zeroPtr {
panic("Unexpected zero update ref ptr in a sync MD revision")
}
refs[update.Ref] = true
}
}
// For each MD to clean, clean up the old failed blocks
// immediately if the merge status matches the successful put, if
// they didn't get referenced in the successful put. If the merge
// status is different (e.g., we ended up on a conflict branch),
// clean it up only if the original revision failed. If the same
// block appears more than once, the one with a different merged
// status takes precedence (which will always come earlier in the
// list of MDs).
blocksSeen := make(map[BlockPointer]bool)
for _, oldMD := range syncState.si.toCleanIfUnused {
bdType := blockDeleteAlways
if oldMD.md.MergedStatus() != md.MergedStatus() {
bdType = blockDeleteOnMDFail
}
failedBps := newBlockPutState(len(oldMD.bps.blockStates))
for _, bs := range oldMD.bps.blockStates {
if bs.blockPtr == zeroPtr {
panic("Unexpected zero block ptr in an old sync MD revision")
}
if blocksSeen[bs.blockPtr] {
continue
}
blocksSeen[bs.blockPtr] = true
if refs[bs.blockPtr] && bdType == blockDeleteAlways {
continue
}
failedBps.blockStates = append(failedBps.blockStates,
blockState{blockPtr: bs.blockPtr})
fbo.log.CDebugf(ctx, "Cleaning up block %v from a previous "+
"failed revision %d (oldMD is %s, bdType=%d)", bs.blockPtr,
oldMD.md.Revision(), oldMD.md.MergedStatus(), bdType)
}
if len(failedBps.blockStates) > 0 {
fbm.cleanUpBlockState(oldMD.md, failedBps, bdType)
}
}
return nil
}
func (fbo *folderBlockOps) doDeferredWritesLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, newPath path) (
stillDirty bool, err error) {
fbo.blockLock.AssertLocked(lState)
// Redo any writes or truncates that happened to our file while
// the sync was happening.
deletes := fbo.deferredDirtyDeletes
writes := fbo.deferredWrites
stillDirty = len(fbo.deferredWrites) != 0
fbo.deferredDirtyDeletes = nil
fbo.deferredWrites = nil
fbo.deferredWaitBytes = 0
// Clear any dirty blocks that resulted from a write/truncate
// happening during the sync, since we're redoing them below.
dirtyBcache := fbo.config.DirtyBlockCache()
for _, ptr := range deletes {
fbo.log.CDebugf(ctx, "Deleting deferred dirty ptr %v", ptr)
if err := dirtyBcache.Delete(fbo.id(), ptr, fbo.branch()); err != nil {
return true, err
}
}
for _, f := range writes {
err = f(ctx, lState, kmd, newPath)
if err != nil {
// It's a little weird to return an error from a deferred
// write here. Hopefully that will never happen.
return true, err
}
}
return stillDirty, nil
}
// FinishSyncLocked finishes the sync process for a file, given the
// state from StartSync. Specifically, it re-applies any writes that
// happened since the call to StartSync.
func (fbo *folderBlockOps) FinishSyncLocked(
ctx context.Context, lState *lockState,
oldPath, newPath path, md ReadOnlyRootMetadata,
syncState fileSyncState, fbm *folderBlockManager) (
stillDirty bool, err error) {
fbo.blockLock.AssertLocked(lState)
dirtyBcache := fbo.config.DirtyBlockCache()
for _, ptr := range syncState.oldFileBlockPtrs {
fbo.log.CDebugf(ctx, "Deleting dirty ptr %v", ptr)
if err := dirtyBcache.Delete(fbo.id(), ptr, fbo.branch()); err != nil {
return true, err
}
}
bcache := fbo.config.BlockCache()
for _, ptr := range syncState.newIndirectFileBlockPtrs {
err := bcache.DeletePermanent(ptr.ID)
if err != nil {
fbo.log.CWarningf(ctx, "Error when deleting %v from cache: %v",
ptr.ID, err)
}
}
stillDirty, err = fbo.doDeferredWritesLocked(ctx, lState, md, newPath)
if err != nil {
return true, err
}
// Clear cached info for the old path. We are guaranteed that any
// concurrent write to this file was deferred, even if it was to a
// block that wasn't currently being sync'd, since the top-most
// block is always in dirtyFiles and is always dirtied during a
// write/truncate.
//
// Also, we can get rid of all the sync state that might have
// happened during the sync, since we will replay the writes
// below anyway.
if err := fbo.clearCacheInfoLocked(lState, oldPath); err != nil {
return true, err
}
if err := fbo.cleanUpUnusedBlocks(ctx, md, syncState, fbm); err != nil {
return true, err
}
return stillDirty, nil
}
// notifyErrListeners notifies any write operations that are blocked
// on a file so that they can learn about unrecoverable sync errors.
func (fbo *folderBlockOps) notifyErrListenersLocked(lState *lockState,
ptr BlockPointer, err error) {
fbo.blockLock.AssertLocked(lState)
if isRecoverableBlockError(err) {
// Don't bother any listeners with this error, since the sync
// will be retried. Unless the sync has reached its retry
// limit, but in that case the listeners will just proceed as
// normal once the dirty block cache bytes are freed, and
// that's ok since this error isn't fatal.
return
}
df := fbo.dirtyFiles[ptr]
if df != nil {
df.notifyErrListeners(err)
}
}
type searchWithOutOfDateCacheError struct {
}
func (e searchWithOutOfDateCacheError) Error() string {
return fmt.Sprintf("Search is using an out-of-date node cache; " +
"try again with a clean cache.")
}
// searchForNodesInDirLocked recursively tries to find a path, and
// ultimately a node, to ptr, given the set of pointers that were
// updated in a particular operation. The keys in nodeMap make up the
// set of BlockPointers that are being searched for, and nodeMap is
// updated in place to include the corresponding discovered nodes.
//
// Returns the number of nodes found by this invocation. If the error
// it returns is searchWithOutOfDateCache, the search should be
// retried by the caller with a clean cache.
func (fbo *folderBlockOps) searchForNodesInDirLocked(ctx context.Context,
lState *lockState, cache NodeCache, newPtrs map[BlockPointer]bool,
kmd KeyMetadata, rootNode Node, currDir path, nodeMap map[BlockPointer]Node,
numNodesFoundSoFar int) (int, error) {
fbo.blockLock.AssertAnyLocked(lState)
dirBlock, err := fbo.getDirLocked(
ctx, lState, kmd, currDir, blockRead)
if err != nil {
return 0, err
}
// getDirLocked may have unlocked blockLock, which means the cache
// could have changed out from under us. Verify that didn't
// happen, so we can avoid messing it up with nodes from an old MD
// version. If it did happen, return a special error that lets
// the caller know they should retry with a fresh cache.
if currDir.path[0].BlockPointer !=
cache.PathFromNode(rootNode).tailPointer() {
return 0, searchWithOutOfDateCacheError{}
}
if numNodesFoundSoFar >= len(nodeMap) {
return 0, nil
}
numNodesFound := 0
for name, de := range dirBlock.Children {
if _, ok := nodeMap[de.BlockPointer]; ok {
childPath := currDir.ChildPath(name, de.BlockPointer)
// make a node for every pathnode
n := rootNode
for i, pn := range childPath.path[1:] {
if !pn.BlockPointer.IsValid() {
// Temporary debugging output for KBFS-1764 -- the
// GetOrCreate call below will panic.
fbo.log.CDebugf(ctx, "Invalid block pointer, path=%s, "+
"path.path=%v (index %d), name=%s, de=%#v, "+
"nodeMap=%v, newPtrs=%v, kmd=%#v",
childPath, childPath.path, i, name, de, nodeMap,
newPtrs, kmd)
}
n, err = cache.GetOrCreate(pn.BlockPointer, pn.Name, n)
if err != nil {
return 0, err
}
}
nodeMap[de.BlockPointer] = n
numNodesFound++
if numNodesFoundSoFar+numNodesFound >= len(nodeMap) {
return numNodesFound, nil
}
}
// otherwise, recurse if this represents an updated block
if _, ok := newPtrs[de.BlockPointer]; de.Type == Dir && ok {
childPath := currDir.ChildPath(name, de.BlockPointer)
n, err := fbo.searchForNodesInDirLocked(ctx, lState, cache,
newPtrs, kmd, rootNode, childPath, nodeMap,
numNodesFoundSoFar+numNodesFound)
if err != nil {
return 0, err
}
numNodesFound += n
if numNodesFoundSoFar+numNodesFound >= len(nodeMap) {
return numNodesFound, nil
}
}
}
return numNodesFound, nil
}
func (fbo *folderBlockOps) trySearchWithCacheLocked(ctx context.Context,
lState *lockState, cache NodeCache, ptrs []BlockPointer,
newPtrs map[BlockPointer]bool, kmd KeyMetadata, rootPtr BlockPointer) (
map[BlockPointer]Node, error) {
fbo.blockLock.AssertAnyLocked(lState)
nodeMap := make(map[BlockPointer]Node)
for _, ptr := range ptrs {
nodeMap[ptr] = nil
}
if len(ptrs) == 0 {
return nodeMap, nil
}
var node Node
// The node cache used by the main part of KBFS is
// fbo.nodeCache. This basically maps from BlockPointers to
// Nodes. Nodes are used by the callers of the library, but
// internally we need to know the series of BlockPointers and
// file/dir names that make up the path of the corresponding
// file/dir. fbo.nodeCache is long-lived and never invalidated.
//
// As folderBranchOps gets informed of new local or remote MD
// updates, which change the BlockPointers of some subset of the
// nodes in this TLF, it calls nodeCache.UpdatePointer for each
// change. Then, when a caller passes some old Node they have
// lying around into an FBO call, we can translate it to its
// current path using fbo.nodeCache. Note that on every TLF
// modification, we are guaranteed that the BlockPointer of the
// root directory will change (because of the merkle-ish tree of
// content hashes we use to assign BlockPointers).
//
// fbo.nodeCache needs to maintain the absolute latest mappings
// for the TLF, or else FBO calls won't see up-to-date data. The
// tension in search comes from the fact that we are trying to
// discover the BlockPointers of certain files at a specific point
// in the MD history, which is not necessarily the same as the
// most-recently-seen MD update. Specifically, some callers
// process a specific range of MDs, but folderBranchOps may have
// heard about a newer one before, or during, when the caller
// started processing. That means fbo.nodeCache may have been
// updated to reflect the newest BlockPointers, and is no longer
// correct as a cache for our search for the data at the old point
// in time.
if cache == fbo.nodeCache {
// Root node should already exist if we have an up-to-date md.
node = cache.Get(rootPtr.Ref())
if node == nil {
return nil, searchWithOutOfDateCacheError{}
}
} else {
// Root node may or may not exist.
var err error
node, err = cache.GetOrCreate(rootPtr,
string(kmd.GetTlfHandle().GetCanonicalName()), nil)
if err != nil {
return nil, err
}
}
if node == nil {
return nil, fmt.Errorf("Cannot find root node corresponding to %v",
rootPtr)
}
// are they looking for the root directory?
numNodesFound := 0
if _, ok := nodeMap[rootPtr]; ok {
nodeMap[rootPtr] = node
numNodesFound++
if numNodesFound >= len(nodeMap) {
return nodeMap, nil
}
}
rootPath := cache.PathFromNode(node)
if len(rootPath.path) != 1 {
return nil, fmt.Errorf("Invalid root path for %v: %s",
rootPtr, rootPath)
}
_, err := fbo.searchForNodesInDirLocked(ctx, lState, cache, newPtrs,
kmd, node, rootPath, nodeMap, numNodesFound)
if err != nil {
return nil, err
}
if rootPtr != cache.PathFromNode(node).tailPointer() {
return nil, searchWithOutOfDateCacheError{}
}
return nodeMap, nil
}
func (fbo *folderBlockOps) searchForNodesLocked(ctx context.Context,
lState *lockState, cache NodeCache, ptrs []BlockPointer,
newPtrs map[BlockPointer]bool, kmd KeyMetadata, rootPtr BlockPointer) (
map[BlockPointer]Node, NodeCache, error) {
fbo.blockLock.AssertAnyLocked(lState)
// First try the passed-in cache. If it doesn't work because the
// cache is out of date, try again with a clean cache.
nodeMap, err := fbo.trySearchWithCacheLocked(ctx, lState, cache, ptrs,
newPtrs, kmd, rootPtr)
if _, ok := err.(searchWithOutOfDateCacheError); ok {
// The md is out-of-date, so use a throwaway cache so we
// don't pollute the real node cache with stale nodes.
fbo.log.CDebugf(ctx, "Root node %v doesn't exist in the node "+
"cache; using a throwaway node cache instead",
rootPtr)
cache = newNodeCacheStandard(fbo.folderBranch)
nodeMap, err = fbo.trySearchWithCacheLocked(ctx, lState, cache, ptrs,
newPtrs, kmd, rootPtr)
}
if err != nil {
return nil, nil, err
}
// Return the whole map even if some nodes weren't found.
return nodeMap, cache, nil
}
// SearchForNodes tries to resolve all the given pointers to a Node
// object, using only the updated pointers specified in newPtrs.
// Returns an error if any subset of the pointer paths do not exist;
// it is the caller's responsibility to decide to error on particular
// unresolved nodes. It also returns the cache that ultimately
// contains the nodes -- this might differ from the passed-in cache if
// another goroutine updated that cache and it no longer contains the
// root pointer specified in md.
func (fbo *folderBlockOps) SearchForNodes(ctx context.Context,
cache NodeCache, ptrs []BlockPointer, newPtrs map[BlockPointer]bool,
kmd KeyMetadata, rootPtr BlockPointer) (
map[BlockPointer]Node, NodeCache, error) {
lState := makeFBOLockState()
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.searchForNodesLocked(
ctx, lState, cache, ptrs, newPtrs, kmd, rootPtr)
}
// SearchForPaths is like SearchForNodes, except it returns a
// consistent view of all the paths of the searched-for pointers.
func (fbo *folderBlockOps) SearchForPaths(ctx context.Context,
cache NodeCache, ptrs []BlockPointer, newPtrs map[BlockPointer]bool,
kmd KeyMetadata, rootPtr BlockPointer) (map[BlockPointer]path, error) {
lState := makeFBOLockState()
// Hold the lock while processing the paths so they can't be changed.
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
nodeMap, cache, err :=
fbo.searchForNodesLocked(
ctx, lState, cache, ptrs, newPtrs, kmd, rootPtr)
if err != nil {
return nil, err
}
paths := make(map[BlockPointer]path)
for ptr, n := range nodeMap {
if n == nil {
paths[ptr] = path{}
continue
}
p := cache.PathFromNode(n)
if p.tailPointer() != ptr {
return nil, NodeNotFoundError{ptr}
}
paths[ptr] = p
}
return paths, nil
}
// getUndirtiedEntry returns the clean entry for the given path
// corresponding to a cached dirty entry. If there is no dirty or
// clean entry, nil is returned.
func (fbo *folderBlockOps) getUndirtiedEntry(
ctx context.Context, lState *lockState, kmd KeyMetadata,
file path) (*DirEntry, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
_, ok := fbo.deCache[file.tailPointer().Ref()]
if !ok {
return nil, nil
}
// Get the undirtied dir block.
dblock, err := fbo.getDirLocked(
ctx, lState, kmd, *file.parentPath(), blockRead)
if err != nil {
return nil, err
}
undirtiedEntry, ok := dblock.Children[file.tailName()]
if !ok {
return nil, nil
}
return &undirtiedEntry, nil
}
func (fbo *folderBlockOps) setCachedAttr(
lState *lockState, ref BlockRef, attr attrChange, realEntry *DirEntry,
doCreate bool) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
fbo.setCachedAttrLocked(lState, ref, attr, realEntry, doCreate)
}
// UpdateCachedEntryAttributes updates any cached entry for the given
// path according to the given op. The node for the path is returned
// if there is one.
func (fbo *folderBlockOps) UpdateCachedEntryAttributes(
ctx context.Context, lState *lockState, kmd KeyMetadata,
dir path, op *setAttrOp) (Node, error) {
childPath := dir.ChildPathNoPtr(op.Name)
// find the node for the actual change; requires looking up
// the child entry to get the BlockPointer, unfortunately.
de, err := fbo.GetDirtyEntry(ctx, lState, kmd, childPath)
if err != nil {
return nil, err
}
childNode := fbo.nodeCache.Get(de.Ref())
if childNode == nil {
// Nothing to do, since the cache entry won't be
// accessible from any node.
return nil, nil
}
childPath = dir.ChildPath(op.Name, de.BlockPointer)
// If there's a cache entry, we need to update it, so try and
// fetch the undirtied entry.
cleanEntry, err := fbo.getUndirtiedEntry(ctx, lState, kmd, childPath)
if err != nil {
return nil, err
}
if cleanEntry != nil {
fbo.setCachedAttr(lState, de.Ref(), op.Attr, cleanEntry, false)
}
return childNode, nil
}
// UpdateCachedEntryAttributesOnRemovedFile updates any cached entry
// for the given path of an unlinked file, according to the given op,
// and it makes a new dirty cache entry if one doesn't exist yet. We
// assume Sync will be called eventually on the corresponding open
// file handle, which will clear out the entry.
func (fbo *folderBlockOps) UpdateCachedEntryAttributesOnRemovedFile(
ctx context.Context, lState *lockState, op *setAttrOp, de DirEntry) {
fbo.setCachedAttr(lState, de.Ref(), op.Attr, &de, true)
}
func (fbo *folderBlockOps) getDeferredWriteCountForTest(lState *lockState) int {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return len(fbo.deferredWrites)
}
func (fbo *folderBlockOps) updatePointer(kmd KeyMetadata, oldPtr BlockPointer, newPtr BlockPointer, shouldPrefetch bool) {
updated := fbo.nodeCache.UpdatePointer(oldPtr.Ref(), newPtr)
if !updated {
return
}
// Only prefetch if the updated pointer is a new block ID.
if oldPtr.ID != newPtr.ID {
// TODO: Remove this comment when we're done debugging because it'll be everywhere.
fbo.log.CDebugf(context.TODO(), "Updated reference for pointer %s to %s.", oldPtr.ID, newPtr.ID)
if shouldPrefetch {
// Prefetch the new ref, but only if the old ref already exists in
// the block cache. Ideally we'd always prefetch it, but we need
// the type of the block so that we can call `NewEmpty`.
// TODO KBFS-1850: Eventually we should use the codec library's
// ability to decode into a nil interface to no longer need to
// pre-initialize the correct type.
block, _, _, err := fbo.config.BlockCache().GetWithPrefetch(oldPtr)
if err != nil {
return
}
fbo.config.BlockOps().Prefetcher().PrefetchBlock(
block.NewEmpty(),
newPtr,
kmd,
updatePointerPrefetchPriority,
)
}
}
}
// UpdatePointers updates all the pointers in the node cache
// atomically. If `afterUpdateFn` is non-nil, it's called under the
// same block lock under which the pointers were updated.
func (fbo *folderBlockOps) UpdatePointers(kmd KeyMetadata, lState *lockState,
op op, shouldPrefetch bool, afterUpdateFn func() error) error {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
for _, update := range op.allUpdates() {
fbo.updatePointer(kmd, update.Unref, update.Ref, shouldPrefetch)
}
if afterUpdateFn == nil {
return nil
}
return afterUpdateFn()
}
func (fbo *folderBlockOps) unlinkDuringFastForwardLocked(ctx context.Context,
lState *lockState, ref BlockRef) {
fbo.blockLock.AssertLocked(lState)
oldNode := fbo.nodeCache.Get(ref)
if oldNode == nil {
return
}
oldPath := fbo.nodeCache.PathFromNode(oldNode)
fbo.log.CDebugf(ctx, "Unlinking missing node %s/%v during "+
"fast-forward", oldPath, ref)
fbo.nodeCache.Unlink(ref, oldPath)
}
func (fbo *folderBlockOps) fastForwardDirAndChildrenLocked(ctx context.Context,
lState *lockState, currDir path, children map[string]map[pathNode]bool,
kmd KeyMetadata) ([]NodeChange, error) {
fbo.blockLock.AssertLocked(lState)
dirBlock, err := fbo.getDirLocked(ctx, lState, kmd, currDir, blockRead)
if err != nil {
return nil, err
}
prefix := currDir.String()
// TODO: parallelize me?
var changes []NodeChange
for child := range children[prefix] {
entry, ok := dirBlock.Children[child.Name]
if !ok {
fbo.unlinkDuringFastForwardLocked(
ctx, lState, child.BlockPointer.Ref())
continue
}
fbo.log.CDebugf(ctx, "Fast-forwarding %v -> %v",
child.BlockPointer, entry.BlockPointer)
fbo.updatePointer(kmd, child.BlockPointer,
entry.BlockPointer, true)
node := fbo.nodeCache.Get(entry.BlockPointer.Ref())
newPath := fbo.nodeCache.PathFromNode(node)
if entry.Type == Dir {
if node != nil {
change := NodeChange{Node: node}
for subchild := range children[newPath.String()] {
change.DirUpdated = append(change.DirUpdated, subchild.Name)
}
changes = append(changes, change)
}
childChanges, err := fbo.fastForwardDirAndChildrenLocked(
ctx, lState, newPath, children, kmd)
if err != nil {
return nil, err
}
changes = append(changes, childChanges...)
} else if node != nil {
// File -- invalidate the entire file contents.
changes = append(changes, NodeChange{
Node: node,
FileUpdated: []WriteRange{{Len: 0, Off: 0}},
})
}
}
delete(children, prefix)
return changes, nil
}
// FastForwardAllNodes attempts to update the block pointers
// associated with nodes in the cache by searching for their paths in
// the current version of the TLF. If it can't find a corresponding
// node, it assumes it's been deleted and unlinks it. Returns the set
// of node changes that resulted. If there are no nodes, it returns a
// nil error because there's nothing to be done.
func (fbo *folderBlockOps) FastForwardAllNodes(ctx context.Context,
lState *lockState, md ReadOnlyRootMetadata) (
changes []NodeChange, err error) {
// Take a hard lock through this whole process. TODO: is there
// any way to relax this? It could lead to file system operation
// timeouts, even on reads, if we hold it too long.
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
nodes := fbo.nodeCache.AllNodes()
if len(nodes) == 0 {
// Nothing needs to be done!
return nil, nil
}
fbo.log.CDebugf(ctx, "Fast-forwarding %d nodes", len(nodes))
defer func() { fbo.log.CDebugf(ctx, "Fast-forward complete: %v", err) }()
// Build a "tree" representation for each interesting path prefix.
children := make(map[string]map[pathNode]bool)
var rootPath path
for _, n := range nodes {
p := fbo.nodeCache.PathFromNode(n)
if len(p.path) == 1 {
rootPath = p
}
prevPath := ""
for _, pn := range p.path {
if prevPath != "" {
childPNs := children[prevPath]
if childPNs == nil {
childPNs = make(map[pathNode]bool)
children[prevPath] = childPNs
}
childPNs[pn] = true
}
prevPath = filepath.Join(prevPath, pn.Name)
}
}
if !rootPath.isValid() {
return nil, errors.New("Couldn't find the root path")
}
fbo.log.CDebugf(ctx, "Fast-forwarding root %v -> %v",
rootPath.path[0].BlockPointer, md.data.Dir.BlockPointer)
fbo.updatePointer(md, rootPath.path[0].BlockPointer,
md.data.Dir.BlockPointer, false)
rootPath.path[0].BlockPointer = md.data.Dir.BlockPointer
rootNode := fbo.nodeCache.Get(md.data.Dir.BlockPointer.Ref())
if rootNode != nil {
change := NodeChange{Node: rootNode}
for child := range children[rootPath.String()] {
change.DirUpdated = append(change.DirUpdated, child.Name)
}
changes = append(changes, change)
}
childChanges, err := fbo.fastForwardDirAndChildrenLocked(
ctx, lState, rootPath, children, md)
if err != nil {
return nil, err
}
changes = append(changes, childChanges...)
// Unlink any children that remain.
for _, childPNs := range children {
for child := range childPNs {
fbo.unlinkDuringFastForwardLocked(
ctx, lState, child.BlockPointer.Ref())
}
}
return changes, nil
}
type chainsPathPopulator interface {
populateChainPaths(context.Context, logger.Logger, *crChains, bool) error
}
// populateChainPaths updates all the paths in all the ops tracked by
// `chains`, using the main nodeCache.
func (fbo *folderBlockOps) populateChainPaths(ctx context.Context,
log logger.Logger, chains *crChains, includeCreates bool) error {
_, err := chains.getPaths(ctx, fbo, log, fbo.nodeCache, includeCreates)
return err
}
var _ chainsPathPopulator = (*folderBlockOps)(nil)
|
package throttler
import (
"fmt"
"os/exec"
"strconv"
"strings"
"syscall"
)
const (
tcRootQDisc = `dev %s handle 10: root`
tcDefaultClass = `dev %s parent 10: classid 10:1`
tcTargetClass = `dev %s parent 10:1 classid 10:10`
tcNetemRule = `dev %s parent 10:10 handle 100:`
tcRate = `rate %vkbit`
tcDelay = `delay %vms`
tcLoss = `loss %v%%`
tcAddClass = `sudo tc class add`
tcDelClass = `sudo tc class del`
tcAddQDisc = `sudo tc qdisc add`
tcDelQDisc = `sudo tc qdisc del`
iptAddTarget = `sudo %s -A POSTROUTING -t mangle -j CLASSIFY --set-class 10:10`
iptDelTarget = `sudo %s -D POSTROUTING -t mangle -j CLASSIFY --set-class 10:10`
iptDestIP = `-d %s`
iptProto = `-p %s`
iptDestPorts = `--match multiport --dports %s`
iptDestPort = `--dport %s`
iptDelSearch = `class 0010:0010`
iptList = `sudo %s -S -t mangle`
ip4Tables = `iptables`
ip6Tables = `ip6tables`
iptDel = `sudo %s -t mangle -D`
tcExists = `sudo tc qdisc show | grep "netem"`
tcCheck = `sudo tc -s qdisc`
)
type tcThrottler struct {
c commander
}
func (t *tcThrottler) setup(cfg *Config) error {
err := addRootQDisc(cfg, t.c) //The root node to append the filters
if err != nil {
return err
}
err = addDefaultClass(cfg, t.c) //The default class for all traffic that isn't classified
if err != nil {
return err
}
err = addTargetClass(cfg, t.c) //The class that the network emulator rule is assigned
if err != nil {
return err
}
err = addNetemRule(cfg, t.c) //The network emulator rule that contains the desired behavior
if err != nil {
return err
}
return addIptablesRules(cfg, t.c) //The network emulator rule that contains the desired behavior
}
func addRootQDisc(cfg *Config, c commander) error {
//Add the root QDisc
root := fmt.Sprintf(tcRootQDisc, cfg.Device)
strs := []string{tcAddQDisc, root, "htb"}
cmd := strings.Join(strs, " ")
return c.execute(cmd)
}
func addDefaultClass(cfg *Config, c commander) error {
//Add the default Class
def := fmt.Sprintf(tcDefaultClass, cfg.Device)
rate := ""
if cfg.DefaultBandwidth > 0 {
rate = fmt.Sprintf(tcRate, cfg.DefaultBandwidth)
} else {
rate = fmt.Sprintf(tcRate, 1000000)
}
strs := []string{tcAddClass, def, "htb", rate}
cmd := strings.Join(strs, " ")
return c.execute(cmd)
}
func addTargetClass(cfg *Config, c commander) error {
//Add the target Class
tar := fmt.Sprintf(tcTargetClass, cfg.Device)
rate := ""
if cfg.DefaultBandwidth > -1 {
rate = fmt.Sprintf(tcRate, cfg.DefaultBandwidth)
} else {
rate = fmt.Sprintf(tcRate, 1000000)
}
strs := []string{tcAddClass, tar, "htb", rate}
cmd := strings.Join(strs, " ")
return c.execute(cmd)
}
func addNetemRule(cfg *Config, c commander) error {
//Add the Network Emulator rule
net := fmt.Sprintf(tcNetemRule, cfg.Device)
strs := []string{tcAddQDisc, net, "netem"}
if cfg.Latency > 0 {
strs = append(strs, fmt.Sprintf(tcDelay, cfg.Latency))
}
if cfg.TargetBandwidth > -1 {
strs = append(strs, fmt.Sprintf(tcRate, cfg.TargetBandwidth))
}
if cfg.PacketLoss > 0 {
strs = append(strs, fmt.Sprintf(tcLoss, strconv.FormatFloat(cfg.PacketLoss, 'f', 2, 64)))
}
cmd := strings.Join(strs, " ")
return c.execute(cmd)
}
func addIptablesRules(cfg *Config, c commander) error {
var err error
if err == nil && len(cfg.TargetIps) > 0 {
err = addIptablesRulesForAddrs(cfg, c, ip4Tables, cfg.TargetIps)
}
if err == nil && len(cfg.TargetIps6) > 0 {
err = addIptablesRulesForAddrs(cfg, c, ip6Tables, cfg.TargetIps6)
}
return err
}
func addIptablesRulesForAddrs(cfg *Config, c commander, command string, addrs []string) error {
rules := []string{}
ports := ""
if len(cfg.TargetPorts) > 0 {
if len(cfg.TargetPorts) > 1 {
prts := strings.Join(cfg.TargetPorts, ",")
ports = fmt.Sprintf(iptDestPorts, prts)
} else {
ports = fmt.Sprintf(iptDestPort, cfg.TargetPorts[0])
}
}
addTargetCmd := fmt.Sprintf(iptAddTarget, command)
if len(cfg.TargetProtos) > 0 {
for _, ptc := range cfg.TargetProtos {
proto := fmt.Sprintf(iptProto, ptc)
rule := addTargetCmd + " " + proto
if ptc != "icmp" {
if ports != "" {
rule += " " + ports
}
}
rules = append(rules, rule)
}
} else {
rules = []string{addTargetCmd}
}
if len(addrs) > 0 {
iprules := []string{}
for _, ip := range addrs {
dest := fmt.Sprintf(iptDestIP, ip)
if len(rules) > 0 {
for _, rule := range rules {
r := rule + " " + dest
iprules = append(iprules, r)
}
} else {
iprules = append(iprules, dest)
}
}
if len(iprules) > 0 {
rules = iprules
}
}
for _, rule := range rules {
if err := c.execute(rule); err != nil {
return err
}
}
return nil
}
func (t *tcThrottler) teardown(cfg *Config) error {
if err := delIptablesRules(cfg, t.c); err != nil {
return err
}
// The root node to append the filters
if err := delRootQDisc(cfg, t.c); err != nil {
return err
}
return nil
}
func delIptablesRules(cfg *Config, c commander) error {
iptablesCommands := []string{ip4Tables, ip6Tables}
for _, iptablesCommand := range iptablesCommands {
if !c.commandExists(iptablesCommand) {
continue
}
lines, err := c.executeGetLines(fmt.Sprintf(iptList, iptablesCommand))
if err != nil {
// ignore exit code 3 from iptables, which might happen if the system
// has the ip6tables command, but no IPv6 capabilities
werr, ok := err.(*exec.ExitError)
if !ok {
return err
}
status, ok := werr.Sys().(syscall.WaitStatus)
if !ok {
return err
}
if status.ExitStatus() == 3 {
continue
}
return err
}
delCmdPrefix := fmt.Sprintf(iptDel, iptablesCommand)
for _, line := range lines {
if strings.Contains(line, iptDelSearch) {
cmd := strings.Replace(line, "-A", delCmdPrefix, 1)
err = c.execute(cmd)
if err != nil {
return err
}
}
}
}
return nil
}
func delRootQDisc(cfg *Config, c commander) error {
//Delete the root QDisc
root := fmt.Sprintf(tcRootQDisc, cfg.Device)
strs := []string{tcDelQDisc, root}
cmd := strings.Join(strs, " ")
return c.execute(cmd)
}
func (t *tcThrottler) exists() bool {
if dry {
return false
}
err := t.c.execute(tcExists)
return err == nil
}
func (t *tcThrottler) check() string {
return tcCheck
}
Fix shaping default bandwidth
The current code uses cfg.DefaultBandwidth when addTargetClass() which
is not right at first sight.
The other thing is that HTB seems to have a default built-in class where
all unclassified packets will go into. This can be checked with value
of direct_packets_stats in output of "tc qdisc show dev eth0", and as
such we need to explicitly set the default class to 10:1 to handle this
package throttler
import (
"fmt"
"os/exec"
"strconv"
"strings"
"syscall"
)
const (
tcRootQDisc = `dev %s handle 10: root`
tcRootExtra = `default 1`
tcDefaultClass = `dev %s parent 10: classid 10:1`
tcTargetClass = `dev %s parent 10: classid 10:10`
tcNetemRule = `dev %s parent 10:10 handle 100:`
tcRate = `rate %vkbit`
tcDelay = `delay %vms`
tcLoss = `loss %v%%`
tcAddClass = `sudo tc class add`
tcDelClass = `sudo tc class del`
tcAddQDisc = `sudo tc qdisc add`
tcDelQDisc = `sudo tc qdisc del`
iptAddTarget = `sudo %s -A POSTROUTING -t mangle -j CLASSIFY --set-class 10:10`
iptDelTarget = `sudo %s -D POSTROUTING -t mangle -j CLASSIFY --set-class 10:10`
iptDestIP = `-d %s`
iptProto = `-p %s`
iptDestPorts = `--match multiport --dports %s`
iptDestPort = `--dport %s`
iptDelSearch = `class 0010:0010`
iptList = `sudo %s -S -t mangle`
ip4Tables = `iptables`
ip6Tables = `ip6tables`
iptDel = `sudo %s -t mangle -D`
tcExists = `sudo tc qdisc show | grep "netem"`
tcCheck = `sudo tc -s qdisc`
)
type tcThrottler struct {
c commander
}
func (t *tcThrottler) setup(cfg *Config) error {
err := addRootQDisc(cfg, t.c) //The root node to append the filters
if err != nil {
return err
}
err = addDefaultClass(cfg, t.c) //The default class for all traffic that isn't classified
if err != nil {
return err
}
err = addTargetClass(cfg, t.c) //The class that the network emulator rule is assigned
if err != nil {
return err
}
err = addNetemRule(cfg, t.c) //The network emulator rule that contains the desired behavior
if err != nil {
return err
}
return addIptablesRules(cfg, t.c) //The network emulator rule that contains the desired behavior
}
func addRootQDisc(cfg *Config, c commander) error {
//Add the root QDisc
root := fmt.Sprintf(tcRootQDisc, cfg.Device)
strs := []string{tcAddQDisc, root, "htb", tcRootExtra}
cmd := strings.Join(strs, " ")
return c.execute(cmd)
}
func addDefaultClass(cfg *Config, c commander) error {
//Add the default Class
def := fmt.Sprintf(tcDefaultClass, cfg.Device)
rate := ""
if cfg.DefaultBandwidth > 0 {
rate = fmt.Sprintf(tcRate, cfg.DefaultBandwidth)
} else {
rate = fmt.Sprintf(tcRate, 1000000)
}
strs := []string{tcAddClass, def, "htb", rate}
cmd := strings.Join(strs, " ")
return c.execute(cmd)
}
func addTargetClass(cfg *Config, c commander) error {
//Add the target Class
tar := fmt.Sprintf(tcTargetClass, cfg.Device)
rate := ""
if cfg.TargetBandwidth > -1 {
rate = fmt.Sprintf(tcRate, cfg.TargetBandwidth)
} else {
rate = fmt.Sprintf(tcRate, 1000000)
}
strs := []string{tcAddClass, tar, "htb", rate}
cmd := strings.Join(strs, " ")
return c.execute(cmd)
}
func addNetemRule(cfg *Config, c commander) error {
//Add the Network Emulator rule
net := fmt.Sprintf(tcNetemRule, cfg.Device)
strs := []string{tcAddQDisc, net, "netem"}
if cfg.Latency > 0 {
strs = append(strs, fmt.Sprintf(tcDelay, cfg.Latency))
}
if cfg.TargetBandwidth > -1 {
strs = append(strs, fmt.Sprintf(tcRate, cfg.TargetBandwidth))
}
if cfg.PacketLoss > 0 {
strs = append(strs, fmt.Sprintf(tcLoss, strconv.FormatFloat(cfg.PacketLoss, 'f', 2, 64)))
}
cmd := strings.Join(strs, " ")
return c.execute(cmd)
}
func addIptablesRules(cfg *Config, c commander) error {
var err error
if err == nil && len(cfg.TargetIps) > 0 {
err = addIptablesRulesForAddrs(cfg, c, ip4Tables, cfg.TargetIps)
}
if err == nil && len(cfg.TargetIps6) > 0 {
err = addIptablesRulesForAddrs(cfg, c, ip6Tables, cfg.TargetIps6)
}
return err
}
func addIptablesRulesForAddrs(cfg *Config, c commander, command string, addrs []string) error {
rules := []string{}
ports := ""
if len(cfg.TargetPorts) > 0 {
if len(cfg.TargetPorts) > 1 {
prts := strings.Join(cfg.TargetPorts, ",")
ports = fmt.Sprintf(iptDestPorts, prts)
} else {
ports = fmt.Sprintf(iptDestPort, cfg.TargetPorts[0])
}
}
addTargetCmd := fmt.Sprintf(iptAddTarget, command)
if len(cfg.TargetProtos) > 0 {
for _, ptc := range cfg.TargetProtos {
proto := fmt.Sprintf(iptProto, ptc)
rule := addTargetCmd + " " + proto
if ptc != "icmp" {
if ports != "" {
rule += " " + ports
}
}
rules = append(rules, rule)
}
} else {
rules = []string{addTargetCmd}
}
if len(addrs) > 0 {
iprules := []string{}
for _, ip := range addrs {
dest := fmt.Sprintf(iptDestIP, ip)
if len(rules) > 0 {
for _, rule := range rules {
r := rule + " " + dest
iprules = append(iprules, r)
}
} else {
iprules = append(iprules, dest)
}
}
if len(iprules) > 0 {
rules = iprules
}
}
for _, rule := range rules {
if err := c.execute(rule); err != nil {
return err
}
}
return nil
}
func (t *tcThrottler) teardown(cfg *Config) error {
if err := delIptablesRules(cfg, t.c); err != nil {
return err
}
// The root node to append the filters
if err := delRootQDisc(cfg, t.c); err != nil {
return err
}
return nil
}
func delIptablesRules(cfg *Config, c commander) error {
iptablesCommands := []string{ip4Tables, ip6Tables}
for _, iptablesCommand := range iptablesCommands {
if !c.commandExists(iptablesCommand) {
continue
}
lines, err := c.executeGetLines(fmt.Sprintf(iptList, iptablesCommand))
if err != nil {
// ignore exit code 3 from iptables, which might happen if the system
// has the ip6tables command, but no IPv6 capabilities
werr, ok := err.(*exec.ExitError)
if !ok {
return err
}
status, ok := werr.Sys().(syscall.WaitStatus)
if !ok {
return err
}
if status.ExitStatus() == 3 {
continue
}
return err
}
delCmdPrefix := fmt.Sprintf(iptDel, iptablesCommand)
for _, line := range lines {
if strings.Contains(line, iptDelSearch) {
cmd := strings.Replace(line, "-A", delCmdPrefix, 1)
err = c.execute(cmd)
if err != nil {
return err
}
}
}
}
return nil
}
func delRootQDisc(cfg *Config, c commander) error {
//Delete the root QDisc
root := fmt.Sprintf(tcRootQDisc, cfg.Device)
strs := []string{tcDelQDisc, root}
cmd := strings.Join(strs, " ")
return c.execute(cmd)
}
func (t *tcThrottler) exists() bool {
if dry {
return false
}
err := t.c.execute(tcExists)
return err == nil
}
func (t *tcThrottler) check() string {
return tcCheck
}
|
// Copyright 2014 Damjan Cvetko. All rights reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree.
package pcapgo
import (
"encoding/binary"
"errors"
"fmt"
"io"
"time"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
)
// Reader wraps an underlying io.Reader to read packet data in PCAP
// format. See http://wiki.wireshark.org/Development/LibpcapFileFormat
// for information on the file format.
//
// We currenty read v2.4 file format with nanosecond and microsecdond
// timestamp resolution in little-endian and big-endian encoding.
type Reader struct {
r io.Reader
byteOrder binary.ByteOrder
nanoSecsFactor uint32
versionMajor uint16
versionMinor uint16
// timezone
// sigfigs
snaplen uint32
linkType layers.LinkType
// reusable buffer
buf []byte
}
const magicNanoseconds = 0xA1B23C4D
const magicMicrosecondsBigendian = 0xD4C3B2A1
const magicNanosecondsBigendian = 0x4D3CB2A1
// NewReader returns a new reader object, for reading packet data from
// the given reader. The reader must be open and header data is
// read from it at this point.
// If the file format is not supported an error is returned
//
// // Create new reader:
// f, _ := os.Open("/tmp/file.pcap")
// defer f.Close()
// r, err := NewReader(f)
// data, ci, err := r.ReadPacketData()
func NewReader(r io.Reader) (*Reader, error) {
ret := Reader{r: r}
if err := ret.readHeader(); err != nil {
return nil, err
}
return &ret, nil
}
func (r *Reader) readHeader() error {
buf := make([]byte, 24)
if n, err := io.ReadFull(r.r, buf); err != nil {
return err
} else if n < 24 {
return errors.New("Not enough data for read")
}
if magic := binary.LittleEndian.Uint32(buf[0:4]); magic == magicNanoseconds {
r.byteOrder = binary.LittleEndian
r.nanoSecsFactor = 1
} else if magic == magicNanosecondsBigendian {
r.byteOrder = binary.BigEndian
r.nanoSecsFactor = 1
} else if magic == magicMicroseconds {
r.byteOrder = binary.LittleEndian
r.nanoSecsFactor = 1000
} else if magic == magicMicrosecondsBigendian {
r.byteOrder = binary.BigEndian
r.nanoSecsFactor = 1000
} else {
return errors.New(fmt.Sprintf("Unknown maigc %x", magic))
}
if r.versionMajor = r.byteOrder.Uint16(buf[4:6]); r.versionMajor != versionMajor {
return errors.New(fmt.Sprintf("Unknown major version %d", r.versionMajor))
}
if r.versionMinor = r.byteOrder.Uint16(buf[6:8]); r.versionMinor != versionMinor {
return errors.New(fmt.Sprintf("Unknown minor version %d", r.versionMinor))
}
// ignore timezone 8:12 and sigfigs 12:16
r.snaplen = r.byteOrder.Uint32(buf[16:20])
r.buf = make([]byte, r.snaplen+16)
r.linkType = layers.LinkType(r.byteOrder.Uint32(buf[20:24]))
return nil
}
// Read next packet from file
func (r *Reader) ReadPacketData() (data []byte, ci gopacket.CaptureInfo, err error) {
if ci, err = r.readPacketHeader(); err != nil {
return
}
var n int
if 16+ci.CaptureLength > len(r.buf) {
err = fmt.Errorf("capture length with header exceeds buffer size: %d > %d", 16+ci.CaptureLength, len(r.buf))
return
}
data = r.buf[16 : 16+ci.CaptureLength]
if n, err = io.ReadFull(r.r, data); err != nil {
return
} else if n < ci.CaptureLength {
err = io.ErrUnexpectedEOF
}
return
}
func (r *Reader) readPacketHeader() (ci gopacket.CaptureInfo, err error) {
var n int
if n, err = io.ReadFull(r.r, r.buf[0:16]); err != nil {
return
} else if n < 16 {
err = io.ErrUnexpectedEOF
return
}
ci.Timestamp = time.Unix(int64(r.byteOrder.Uint32(r.buf[0:4])), int64(r.byteOrder.Uint32(r.buf[4:8])*r.nanoSecsFactor)).UTC()
ci.CaptureLength = int(r.byteOrder.Uint32(r.buf[8:12]))
ci.Length = int(r.byteOrder.Uint32(r.buf[12:16]))
return
}
// LinkType returns network, as a layers.LinkType.
func (r *Reader) LinkType() layers.LinkType {
return r.linkType
}
// Reader formater
func (r *Reader) String() string {
return fmt.Sprintf("PcapFile maj: %x min: %x snaplen: %d linktype: %s", r.versionMajor, r.versionMinor, r.snaplen, r.linkType)
}
Fixed typo in the word "magic".
// Copyright 2014 Damjan Cvetko. All rights reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree.
package pcapgo
import (
"encoding/binary"
"errors"
"fmt"
"io"
"time"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
)
// Reader wraps an underlying io.Reader to read packet data in PCAP
// format. See http://wiki.wireshark.org/Development/LibpcapFileFormat
// for information on the file format.
//
// We currenty read v2.4 file format with nanosecond and microsecdond
// timestamp resolution in little-endian and big-endian encoding.
type Reader struct {
r io.Reader
byteOrder binary.ByteOrder
nanoSecsFactor uint32
versionMajor uint16
versionMinor uint16
// timezone
// sigfigs
snaplen uint32
linkType layers.LinkType
// reusable buffer
buf []byte
}
const magicNanoseconds = 0xA1B23C4D
const magicMicrosecondsBigendian = 0xD4C3B2A1
const magicNanosecondsBigendian = 0x4D3CB2A1
// NewReader returns a new reader object, for reading packet data from
// the given reader. The reader must be open and header data is
// read from it at this point.
// If the file format is not supported an error is returned
//
// // Create new reader:
// f, _ := os.Open("/tmp/file.pcap")
// defer f.Close()
// r, err := NewReader(f)
// data, ci, err := r.ReadPacketData()
func NewReader(r io.Reader) (*Reader, error) {
ret := Reader{r: r}
if err := ret.readHeader(); err != nil {
return nil, err
}
return &ret, nil
}
func (r *Reader) readHeader() error {
buf := make([]byte, 24)
if n, err := io.ReadFull(r.r, buf); err != nil {
return err
} else if n < 24 {
return errors.New("Not enough data for read")
}
if magic := binary.LittleEndian.Uint32(buf[0:4]); magic == magicNanoseconds {
r.byteOrder = binary.LittleEndian
r.nanoSecsFactor = 1
} else if magic == magicNanosecondsBigendian {
r.byteOrder = binary.BigEndian
r.nanoSecsFactor = 1
} else if magic == magicMicroseconds {
r.byteOrder = binary.LittleEndian
r.nanoSecsFactor = 1000
} else if magic == magicMicrosecondsBigendian {
r.byteOrder = binary.BigEndian
r.nanoSecsFactor = 1000
} else {
return errors.New(fmt.Sprintf("Unknown magic %x", magic))
}
if r.versionMajor = r.byteOrder.Uint16(buf[4:6]); r.versionMajor != versionMajor {
return errors.New(fmt.Sprintf("Unknown major version %d", r.versionMajor))
}
if r.versionMinor = r.byteOrder.Uint16(buf[6:8]); r.versionMinor != versionMinor {
return errors.New(fmt.Sprintf("Unknown minor version %d", r.versionMinor))
}
// ignore timezone 8:12 and sigfigs 12:16
r.snaplen = r.byteOrder.Uint32(buf[16:20])
r.buf = make([]byte, r.snaplen+16)
r.linkType = layers.LinkType(r.byteOrder.Uint32(buf[20:24]))
return nil
}
// Read next packet from file
func (r *Reader) ReadPacketData() (data []byte, ci gopacket.CaptureInfo, err error) {
if ci, err = r.readPacketHeader(); err != nil {
return
}
var n int
if 16+ci.CaptureLength > len(r.buf) {
err = fmt.Errorf("capture length with header exceeds buffer size: %d > %d", 16+ci.CaptureLength, len(r.buf))
return
}
data = r.buf[16 : 16+ci.CaptureLength]
if n, err = io.ReadFull(r.r, data); err != nil {
return
} else if n < ci.CaptureLength {
err = io.ErrUnexpectedEOF
}
return
}
func (r *Reader) readPacketHeader() (ci gopacket.CaptureInfo, err error) {
var n int
if n, err = io.ReadFull(r.r, r.buf[0:16]); err != nil {
return
} else if n < 16 {
err = io.ErrUnexpectedEOF
return
}
ci.Timestamp = time.Unix(int64(r.byteOrder.Uint32(r.buf[0:4])), int64(r.byteOrder.Uint32(r.buf[4:8])*r.nanoSecsFactor)).UTC()
ci.CaptureLength = int(r.byteOrder.Uint32(r.buf[8:12]))
ci.Length = int(r.byteOrder.Uint32(r.buf[12:16]))
return
}
// LinkType returns network, as a layers.LinkType.
func (r *Reader) LinkType() layers.LinkType {
return r.linkType
}
// Reader formater
func (r *Reader) String() string {
return fmt.Sprintf("PcapFile maj: %x min: %x snaplen: %d linktype: %s", r.versionMajor, r.versionMinor, r.snaplen, r.linkType)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.