CombinedText stringlengths 4 3.42M |
|---|
// Copyright (c) 2015 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package db
import (
"errors"
"fmt"
"math"
"sort"
"sync"
"sync/atomic"
"github.com/couchbase/sync_gateway/base"
)
var ByteCachePollingTime = 1000 // initial polling time for notify, ms
const (
kSequenceOffsetLength = 0 // disabled until we actually need it
kMaxUnreadPollCount = 10 // If the channel polls for (kMaxUnusedPollCount) times after publishing a notification without anyone calling getChanges(), terminates polling
kMaxEmptyPollCount = 100 // If a channel is polled and finds no changes (kMaxEmptyPollCount) times, it triggers a null update in order to trigger unused poll handling (i.e. see if anyone is still listening)
)
type kvChannelIndex struct {
indexBucket base.Bucket // Database connection (used for connection queries)
partitionMap IndexPartitionMap // Index partition map
channelName string // Channel name
lastPolledChanges []*LogEntry // Set of changes found in most recent polling. Optimization for scenario where multiple continuous changes listeners are awakened at once
lastPolledSince base.SequenceClock // Since value used for most recent polling
lastPolledClock base.SequenceClock // New channel clock after most recent polling
lastPolledLock sync.RWMutex // Synchronization for lastPolled data
unreadPollCount uint32 // Number of times the channel has polled for data since the last non-empty poll, without a getChanges call
pollCount uint32 // Number of times the channel has polled for data and not found changes
stableSequence base.SequenceClock // Global stable sequence
stableSequenceCb func() base.SequenceClock // Callback for retrieval of global stable sequence
onChange func(base.Set) // Notification callback
clock *base.SequenceClockImpl // Channel clock
channelStorage ChannelStorage // Channel storage - manages interaction with the index format
}
func NewKvChannelIndex(channelName string, bucket base.Bucket, partitions IndexPartitionMap, stableClockCallback func() base.SequenceClock, onChangeCallback func(base.Set)) *kvChannelIndex {
channelIndex := &kvChannelIndex{
channelName: channelName,
indexBucket: bucket,
partitionMap: partitions,
stableSequenceCb: stableClockCallback,
onChange: onChangeCallback,
channelStorage: NewChannelStorage(bucket, channelName, partitions),
}
// Init stable sequence, last polled
channelIndex.stableSequence = channelIndex.stableSequenceCb()
// Initialize and load channel clock
channelIndex.loadClock()
return channelIndex
}
//
// Index Writing
//
func (k *kvChannelIndex) Add(entry *LogEntry) error {
// Update the sequence in the appropriate cache block
entries := make([]*LogEntry, 1)
entries[0] = entry
return k.AddSet(entries)
}
// Adds a set
func (k *kvChannelIndex) AddSet(entries []*LogEntry) error {
base.LogTo("DIndex+", "Adding set of %d entries to channel %s", len(entries), k.channelName)
clockUpdates, err := k.channelStorage.AddEntrySet(entries)
if err != nil {
return err
}
// Update the clock. Doing once per AddSet (instead of after each block update) to minimize the
// round trips.
return k.writeClockCas(clockUpdates)
}
func (k *kvChannelIndex) Compact() {
// TODO: for each index block being cached, check whether expired
}
func (k *kvChannelIndex) updateIndexCount() error {
// increment index count
key := getIndexCountKey(k.channelName)
newValue, err := k.indexBucket.Incr(key, 1, 1, 0)
if err != nil {
base.Warn("Error from Incr in updateCacheClock(%s): %v", key, err)
return err
}
base.LogTo("DCache", "Updated clock for %s (%s) to %d", k.channelName, key, newValue)
return nil
}
func (k *kvChannelIndex) pollForChanges(stableClock base.SequenceClock, newChannelClock base.SequenceClock) (hasChanges bool, cancelPolling bool) {
changeCacheExpvars.Add(fmt.Sprintf("pollCount-%s", k.channelName), 1)
// Increment the overall poll count since a changes request (regardless of whether there have been polled changes)
totalPollCount := atomic.AddUint32(&k.pollCount, 1)
unreadPollCount := atomic.LoadUint32(&k.unreadPollCount)
if unreadPollCount > kMaxUnreadPollCount {
// We've sent a notify, but had (kMaxUnreadPollCount) polls without anyone calling getChanges.
// Assume nobody is listening for updates - cancel polling for this channel
return false, true
}
if unreadPollCount > 0 {
// Give listeners more time to call getChanges, but increment
atomic.AddUint32(&k.unreadPollCount, 1)
}
k.lastPolledLock.Lock()
defer k.lastPolledLock.Unlock()
base.LogTo("DIndex+", "Poll for changes for channel %s", k.channelName)
if k.lastPolledClock == nil {
k.lastPolledClock = k.clock.Copy()
k.lastPolledSince = k.clock.Copy()
}
// Find the minimum of stable clock and new channel clock (to ignore cases when channel clock has
// changed but stable hasn't yet)
combinedClock := base.GetMinimumClock(stableClock, newChannelClock)
if !combinedClock.AnyAfter(k.lastPolledClock) {
// No changes. Only return true if we've exceeded empty poll count (and want to trigger the "is
// anyone listening" check)
if totalPollCount > kMaxEmptyPollCount {
return true, false
} else {
return false, false
}
}
// The clock has changed - load the changes and store in last polled
if err := k.updateLastPolled(combinedClock); err != nil {
base.Warn("Error updating last polled for channel %s: %v", k.channelName, err)
return false, false
}
// We have changes - increment unread counter if we haven't already
if unreadPollCount == 0 {
atomic.AddUint32(&k.unreadPollCount, 1)
}
return true, false
}
func (k *kvChannelIndex) updateLastPolled(combinedClock base.SequenceClock) error {
// Compare counter again, in case someone has already updated last polled while we waited for the lock
if combinedClock.AnyAfter(k.clock) {
// Get changes since the last clock
recentChanges, err := k.channelStorage.GetChanges(k.lastPolledClock, combinedClock)
indexExpvars.Add("updateChannelPolled", 1)
if err != nil {
return err
}
if len(recentChanges) > 0 {
k.lastPolledChanges = recentChanges
k.lastPolledSince.SetTo(k.lastPolledClock)
k.lastPolledClock.SetTo(combinedClock)
} else {
base.Warn("pollForChanges: channel [%s] clock changed, but no changes found in cache.", k.channelName)
return errors.New("Expected changes based on clock, none found")
}
/* onChange handled by change index
if k.onChange != nil {
k.onChange(base.SetOf(k.channelName))
}
*/
}
return nil
}
func (k *kvChannelIndex) checkLastPolled(since base.SequenceClock, chanClock base.SequenceClock) (results []*LogEntry) {
k.lastPolledLock.RLock()
defer k.lastPolledLock.RUnlock()
if k.lastPolledClock == nil || k.lastPolledSince == nil {
return results
}
if k.lastPolledClock.Equals(chanClock) && k.lastPolledSince.AllBefore(since) {
copy(results, k.lastPolledChanges)
}
return results
}
// Returns the set of index entries for the channel more recent than the
// specified since SequenceClock. Index entries with sequence values greater than
// the index stable sequence are not returned.
func (k *kvChannelIndex) getChanges(since base.SequenceClock) ([]*LogEntry, error) {
var results []*LogEntry
// Someone is still interested in this channel - reset poll counts
atomic.StoreUint32(&k.pollCount, 0)
atomic.StoreUint32(&k.unreadPollCount, 0)
// TODO: pass in an option to reuse existing channel clock
chanClock, err := k.loadChannelClock()
if err != nil {
base.Warn("Error loading channel clock:%v", err)
}
base.LogTo("DIndex+", "[channelIndex.GetChanges] Channel clock for channel %s: %s", k.channelName, base.PrintClock(chanClock))
// If requested clock is later than the channel clock, return empty
if since.AllAfter(chanClock) {
base.LogTo("DIndex+", "requested clock is later than channel clock - no new changes to report")
return results, nil
}
// If the since value is more recent than the last polled clock, return the results from the
// last polling. Has the potential to return values earlier than since and later than
// lastPolledClock, but these duplicates will be ignored by replication. Could validate
// greater than since inside this if clause, but leaving out as a performance optimization for
// now
if lastPolledResults := k.checkLastPolled(since, chanClock); len(lastPolledResults) > 0 {
indexExpvars.Add("getChanges_lastPolled_hit", 1)
return lastPolledResults, nil
}
indexExpvars.Add("getChanges_lastPolled_miss", 1)
return k.channelStorage.GetChanges(since, chanClock)
}
// Returns the block keys needed to retrieve all changes between fromClock and toClock. When
// stableOnly is true, restricts to only those needed to retrieve changes earlier than the stable sequence
/*
func (k *kvChannelIndex) calculateBlocks(fromClock base.SequenceClock, toClock base.SequenceClock, stableOnly bool) BlockSet {
stableClock := k.stableSequence
sinceBlockSet := make(BlockSet)
for vb, sequence := range k.clock.value {
sinceSeq := since.GetSequence(vb)
if sequence > sinceSeq && (sinceSeq > toClock.GetSequence(vb) || stableOnly == false) {
sinceEntry := kvIndexEntry{vbNo: vb, sequence: sinceSeq}
blockKey := GenerateBlockKey(k.channelName, sinceSeq, k.partitionMap[vbNo])
sinceBlockSet[blockKey] = append(sinceBlockSet[blockKey], sinceEntry)
}
}
}
*/
func (k *kvChannelIndex) getIndexCounter() (uint64, error) {
key := getIndexCountKey(k.channelName)
var intValue uint64
_, err := k.indexBucket.Get(key, &intValue)
if err != nil {
// return nil here - cache clock may not have been initialized yet
return 0, nil
}
return intValue, nil
}
func (k *kvChannelIndex) loadChannelClock() (base.SequenceClock, error) {
chanClock := base.NewSyncSequenceClock()
key := getChannelClockKey(k.channelName)
value, _, err := k.indexBucket.GetRaw(key)
indexExpvars.Add("get_loadChannelClock", 1)
if err != nil {
base.LogTo("DIndex+", "Error loading channel clock for key %s:%v", key, err)
return chanClock, err
}
err = chanClock.Unmarshal(value)
if err != nil {
base.Warn("Error unmarshalling channel clock for channel %s, clock value %v", k.channelName, value)
}
return chanClock, err
}
// Determine the cache block index for a sequence
func (k *kvChannelIndex) getBlockIndex(sequence uint64) uint16 {
base.LogTo("DCache", "block index for sequence %d is %d", sequence, uint16(sequence/byteCacheBlockCapacity))
return uint16(sequence / byteCacheBlockCapacity)
}
func (k *kvChannelIndex) loadClock() {
if k.clock == nil {
k.clock = base.NewSequenceClockImpl()
}
data, cas, err := k.indexBucket.GetRaw(getChannelClockKey(k.channelName))
if err != nil {
base.LogTo("DCache+", "Unable to find existing channel clock for channel %s - treating as new", k.channelName)
}
k.clock.Unmarshal(data)
k.clock.SetCas(cas)
base.LogTo("DCache+", "Loaded channel clock: %s", base.PrintClock(k.clock))
}
func (k *kvChannelIndex) writeClockCas(updateClock base.SequenceClock) error {
// Initial set, for the first cas update attempt
k.clock.UpdateWithClock(updateClock)
value, err := k.clock.Marshal()
if err != nil {
return err
}
casOut, err := writeCasRaw(k.indexBucket, getChannelClockKey(k.channelName), value, k.clock.Cas(), 0, func(value []byte) (updatedValue []byte, err error) {
// Note: The following is invoked upon cas failure - may be called multiple times
err = k.clock.Unmarshal(value)
if err != nil {
base.Warn("Error unmarshalling clock during update", err)
return nil, err
}
k.clock.UpdateWithClock(updateClock)
return k.clock.Marshal()
})
k.clock.SetCas(casOut)
return nil
}
// A vbCache caches a set of LogEntry values, representing the set of entries for the channel for a
// particular vbucket. Intended to cache recently accessed channel block data, to avoid repeated
// block retrieval, parsing, and deduplication.
// The validFrom and validTo specify the range for which the cache is valid.
// Additional entries can be added to the cache using appendEntries (for entries after validTo), or
// prependEntries (backfilling values prior to validFrom).
type vbCache struct {
validFrom uint64 // Sequence the cache is valid from
validTo uint64 // Sequence the cache is valid to
entries SortedEntrySet // Deduplicated entries for the vbucket for sequences between validFrom and validTo, ordered by sequence
docIDs map[string]uint64 // Map of doc ids present in entries, mapped to that doc's sequence id - used to optimize dedup processing
cacheLock sync.RWMutex // Lock used for updating cache from index
}
func newVbCache() *vbCache {
return &vbCache{
validFrom: math.MaxUint64,
validTo: 0,
docIDs: make(map[string]uint64),
}
}
// Returns all cached entries between fromSequence and toSequence. When the cache doesn't have the full range, returns
// non-zero validFrom and validTo values
func (v *vbCache) getEntries(fromSequence, toSequence uint64) (validFrom uint64, validTo uint64, entries []*LogEntry) {
// Returning a new copy (instead of subslice of entries) to protect against future deduplication within entries
// TODO: use binary search to find starting point in entry list?
if fromSequence > toSequence {
return 0, 0, entries
}
v.cacheLock.RLock()
defer v.cacheLock.RUnlock()
for _, entry := range v.entries {
if entry.Sequence >= fromSequence {
if entry.Sequence <= toSequence {
entries = append(entries, entry)
} else {
break
}
}
}
validFrom = maxUint64(fromSequence, v.validFrom)
validTo = minUint64(toSequence, v.validTo)
return validFrom, validTo, entries
}
// Adds the provided set of entries to the cache. Incoming entries must be ordered by vbucket sequence, and the first
// entry must be greater than the current cache's validTo.
// Deduplicates by doc id.
func (v *vbCache) appendEntries(entries SortedEntrySet, validFrom uint64, validTo uint64) error {
if len(entries) == 0 {
return nil
}
if v.validTo > 0 && validFrom > v.validTo+1 {
return errors.New("Cache conflict - attempt to append entries with gap in valid range")
}
entries, duplicateDocIDs, err := v.validateCacheUpdate(entries, false)
if err != nil {
return err
}
v.cacheLock.Lock()
defer v.cacheLock.Unlock()
// Remove duplicates from cache
for _, docID := range duplicateDocIDs {
v.entries.Remove(v.docIDs[docID])
}
// Append entries
v.entries = append(v.entries, entries...)
// Update docID map
for _, entry := range entries {
v.docIDs[entry.DocID] = entry.Sequence
}
// If this is the first set appended, may need to set validFrom as well as validTo
if validFrom < v.validFrom {
v.validFrom = validFrom
}
v.validTo = validTo
return nil
}
// Adds the provided set of entries to the cache. Incoming entries must be ordered by vbucket sequence, and the first
// entry must be greater than the current cache's validTo.
// Deduplicates by doc id.
func (v *vbCache) prependEntries(entries SortedEntrySet, validFrom uint64, validTo uint64) error {
if len(entries) == 0 {
return nil
}
if v.validFrom < math.MaxUint64 && validTo < v.validFrom-1 {
return errors.New("Cache conflict - attempt to prepend entries with gap in valid range")
}
entries, _, err := v.validateCacheUpdate(entries, true)
if err != nil {
return err
}
v.cacheLock.Lock()
defer v.cacheLock.Unlock()
// Prepend all remaining entries
v.entries = append(entries, v.entries...)
// Append new doc ids
for _, entry := range entries {
v.docIDs[entry.DocID] = entry.Sequence
}
v.validFrom = validFrom
return nil
}
// Pre-processing for an incoming set of cache updates. Returns the set of already cached doc IDs that need to get
// deduplicated, the set of new DocIDs, removes any doc ID duplicates in the set
func (v *vbCache) validateCacheUpdate(entries SortedEntrySet, deleteDuplicates bool) (validEntries SortedEntrySet, duplicateEntries []string, err error) {
v.cacheLock.RLock()
defer v.cacheLock.RUnlock()
var prevSeq uint64
prevSeq = math.MaxUint64
// Iterate over entries, doing the following:
// - deduplicating by DocID within the set of new entries (*not* v.entries)
// - build list of docIDs that are already known to the cache
entriesDocIDs := make(map[string]interface{})
for i := len(entries) - 1; i >= 0; i-- {
entry := entries[i]
// validate that the sequences are ordered as expected
if entry.Sequence > prevSeq {
return entries, duplicateEntries, errors.New("Entries must be ordered by sequence when updating cache")
}
prevSeq = entry.Sequence
// If we've already seen this doc ID in the new entry set, remove it
if _, ok := entriesDocIDs[entry.DocID]; ok {
entries = append(entries[:i], entries[i+1:]...)
} else { // Otherwise, add to appropriate DocId list
if _, ok := v.docIDs[entry.DocID]; ok {
duplicateEntries = append(duplicateEntries, entry.DocID)
if deleteDuplicates {
entries = append(entries[:i], entries[i+1:]...)
}
}
}
}
return entries, duplicateEntries, nil
}
// Removes an entry from the cache.
func (v *vbCache) removeEntry(docID string, sequence uint64) {
v.cacheLock.Lock()
defer v.cacheLock.Unlock()
v._removeEntry(docID, sequence)
}
// Removes an entry from the cache. Assumes caller has write lock on v.cacheLock
func (v *vbCache) _removeEntry(docID string, sequence uint64) {
}
// SortedEntrySet
// Optimizes removal of entries from a sorted array.
type SortedEntrySet []*LogEntry
func (h *SortedEntrySet) Remove(x uint64) error {
i := SearchSortedEntrySet(*h, x)
if i < len(*h) && (*h)[i].Sequence == x {
*h = append((*h)[:i], (*h)[i+1:]...)
return nil
} else {
return errors.New("Value not found")
}
}
// Skipped Sequence version of sort.SearchInts - based on http://golang.org/src/sort/search.go?s=2959:2994#L73
func SearchSortedEntrySet(a SortedEntrySet, x uint64) int {
return sort.Search(len(a), func(i int) bool { return a[i].Sequence >= x })
}
//////// Utility functions for key generation
// Get the key for the cache count doc
func getIndexCountKey(channelName string) string {
return fmt.Sprintf("%s_count:%s", kIndexPrefix, channelName)
}
// Get the key for the cache block, based on the block index
func getIndexBlockKey(channelName string, blockIndex uint16, partition uint16) string {
return fmt.Sprintf("%s:%s:block%d:%s", kIndexPrefix, channelName, blockIndex, vbSuffixMap[partition])
}
// Get the key for the cache block, based on the block index
func getChannelClockKey(channelName string) string {
return fmt.Sprintf("%s_SequenceClock:%s", kIndexPrefix, channelName)
}
func minUint64(a, b uint64) uint64 {
if a <= b {
return a
}
return b
}
func maxUint64(a, b uint64) uint64 {
if a >= b {
return a
}
return b
}
Reduce warning to logging when channel clock not found
// Copyright (c) 2015 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package db
import (
"errors"
"fmt"
"math"
"sort"
"sync"
"sync/atomic"
"github.com/couchbase/sync_gateway/base"
)
var ByteCachePollingTime = 1000 // initial polling time for notify, ms
const (
kSequenceOffsetLength = 0 // disabled until we actually need it
kMaxUnreadPollCount = 10 // If the channel polls for (kMaxUnusedPollCount) times after publishing a notification without anyone calling getChanges(), terminates polling
kMaxEmptyPollCount = 100 // If a channel is polled and finds no changes (kMaxEmptyPollCount) times, it triggers a null update in order to trigger unused poll handling (i.e. see if anyone is still listening)
)
type kvChannelIndex struct {
indexBucket base.Bucket // Database connection (used for connection queries)
partitionMap IndexPartitionMap // Index partition map
channelName string // Channel name
lastPolledChanges []*LogEntry // Set of changes found in most recent polling. Optimization for scenario where multiple continuous changes listeners are awakened at once
lastPolledSince base.SequenceClock // Since value used for most recent polling
lastPolledClock base.SequenceClock // New channel clock after most recent polling
lastPolledLock sync.RWMutex // Synchronization for lastPolled data
unreadPollCount uint32 // Number of times the channel has polled for data since the last non-empty poll, without a getChanges call
pollCount uint32 // Number of times the channel has polled for data and not found changes
stableSequence base.SequenceClock // Global stable sequence
stableSequenceCb func() base.SequenceClock // Callback for retrieval of global stable sequence
onChange func(base.Set) // Notification callback
clock *base.SequenceClockImpl // Channel clock
channelStorage ChannelStorage // Channel storage - manages interaction with the index format
}
func NewKvChannelIndex(channelName string, bucket base.Bucket, partitions IndexPartitionMap, stableClockCallback func() base.SequenceClock, onChangeCallback func(base.Set)) *kvChannelIndex {
channelIndex := &kvChannelIndex{
channelName: channelName,
indexBucket: bucket,
partitionMap: partitions,
stableSequenceCb: stableClockCallback,
onChange: onChangeCallback,
channelStorage: NewChannelStorage(bucket, channelName, partitions),
}
// Init stable sequence, last polled
channelIndex.stableSequence = channelIndex.stableSequenceCb()
// Initialize and load channel clock
channelIndex.loadClock()
return channelIndex
}
//
// Index Writing
//
func (k *kvChannelIndex) Add(entry *LogEntry) error {
// Update the sequence in the appropriate cache block
entries := make([]*LogEntry, 1)
entries[0] = entry
return k.AddSet(entries)
}
// Adds a set
func (k *kvChannelIndex) AddSet(entries []*LogEntry) error {
base.LogTo("DIndex+", "Adding set of %d entries to channel %s", len(entries), k.channelName)
clockUpdates, err := k.channelStorage.AddEntrySet(entries)
if err != nil {
return err
}
// Update the clock. Doing once per AddSet (instead of after each block update) to minimize the
// round trips.
return k.writeClockCas(clockUpdates)
}
func (k *kvChannelIndex) Compact() {
// TODO: for each index block being cached, check whether expired
}
func (k *kvChannelIndex) updateIndexCount() error {
// increment index count
key := getIndexCountKey(k.channelName)
newValue, err := k.indexBucket.Incr(key, 1, 1, 0)
if err != nil {
base.Warn("Error from Incr in updateCacheClock(%s): %v", key, err)
return err
}
base.LogTo("DCache", "Updated clock for %s (%s) to %d", k.channelName, key, newValue)
return nil
}
func (k *kvChannelIndex) pollForChanges(stableClock base.SequenceClock, newChannelClock base.SequenceClock) (hasChanges bool, cancelPolling bool) {
changeCacheExpvars.Add(fmt.Sprintf("pollCount-%s", k.channelName), 1)
// Increment the overall poll count since a changes request (regardless of whether there have been polled changes)
totalPollCount := atomic.AddUint32(&k.pollCount, 1)
unreadPollCount := atomic.LoadUint32(&k.unreadPollCount)
if unreadPollCount > kMaxUnreadPollCount {
// We've sent a notify, but had (kMaxUnreadPollCount) polls without anyone calling getChanges.
// Assume nobody is listening for updates - cancel polling for this channel
return false, true
}
if unreadPollCount > 0 {
// Give listeners more time to call getChanges, but increment
atomic.AddUint32(&k.unreadPollCount, 1)
}
k.lastPolledLock.Lock()
defer k.lastPolledLock.Unlock()
base.LogTo("DIndex+", "Poll for changes for channel %s", k.channelName)
if k.lastPolledClock == nil {
k.lastPolledClock = k.clock.Copy()
k.lastPolledSince = k.clock.Copy()
}
// Find the minimum of stable clock and new channel clock (to ignore cases when channel clock has
// changed but stable hasn't yet)
combinedClock := base.GetMinimumClock(stableClock, newChannelClock)
if !combinedClock.AnyAfter(k.lastPolledClock) {
// No changes. Only return true if we've exceeded empty poll count (and want to trigger the "is
// anyone listening" check)
if totalPollCount > kMaxEmptyPollCount {
return true, false
} else {
return false, false
}
}
// The clock has changed - load the changes and store in last polled
if err := k.updateLastPolled(combinedClock); err != nil {
base.Warn("Error updating last polled for channel %s: %v", k.channelName, err)
return false, false
}
// We have changes - increment unread counter if we haven't already
if unreadPollCount == 0 {
atomic.AddUint32(&k.unreadPollCount, 1)
}
return true, false
}
func (k *kvChannelIndex) updateLastPolled(combinedClock base.SequenceClock) error {
// Compare counter again, in case someone has already updated last polled while we waited for the lock
if combinedClock.AnyAfter(k.clock) {
// Get changes since the last clock
recentChanges, err := k.channelStorage.GetChanges(k.lastPolledClock, combinedClock)
indexExpvars.Add("updateChannelPolled", 1)
if err != nil {
return err
}
if len(recentChanges) > 0 {
k.lastPolledChanges = recentChanges
k.lastPolledSince.SetTo(k.lastPolledClock)
k.lastPolledClock.SetTo(combinedClock)
} else {
base.Warn("pollForChanges: channel [%s] clock changed, but no changes found in cache.", k.channelName)
return errors.New("Expected changes based on clock, none found")
}
/* onChange handled by change index
if k.onChange != nil {
k.onChange(base.SetOf(k.channelName))
}
*/
}
return nil
}
func (k *kvChannelIndex) checkLastPolled(since base.SequenceClock, chanClock base.SequenceClock) (results []*LogEntry) {
k.lastPolledLock.RLock()
defer k.lastPolledLock.RUnlock()
if k.lastPolledClock == nil || k.lastPolledSince == nil {
return results
}
if k.lastPolledClock.Equals(chanClock) && k.lastPolledSince.AllBefore(since) {
copy(results, k.lastPolledChanges)
}
return results
}
// Returns the set of index entries for the channel more recent than the
// specified since SequenceClock. Index entries with sequence values greater than
// the index stable sequence are not returned.
func (k *kvChannelIndex) getChanges(since base.SequenceClock) ([]*LogEntry, error) {
var results []*LogEntry
// Someone is still interested in this channel - reset poll counts
atomic.StoreUint32(&k.pollCount, 0)
atomic.StoreUint32(&k.unreadPollCount, 0)
// TODO: pass in an option to reuse existing channel clock
chanClock, err := k.loadChannelClock()
if err != nil {
base.LogTo("DIndex+", "Error loading channel clock for channel %s - may not exist for channel: %v", k.channelName, err)
}
base.LogTo("DIndex+", "[channelIndex.GetChanges] Channel clock for channel %s: %s", k.channelName, base.PrintClock(chanClock))
// If requested clock is later than the channel clock, return empty
if since.AllAfter(chanClock) {
base.LogTo("DIndex+", "requested clock is later than channel clock - no new changes to report")
return results, nil
}
// If the since value is more recent than the last polled clock, return the results from the
// last polling. Has the potential to return values earlier than since and later than
// lastPolledClock, but these duplicates will be ignored by replication. Could validate
// greater than since inside this if clause, but leaving out as a performance optimization for
// now
if lastPolledResults := k.checkLastPolled(since, chanClock); len(lastPolledResults) > 0 {
indexExpvars.Add("getChanges_lastPolled_hit", 1)
return lastPolledResults, nil
}
indexExpvars.Add("getChanges_lastPolled_miss", 1)
return k.channelStorage.GetChanges(since, chanClock)
}
// Returns the block keys needed to retrieve all changes between fromClock and toClock. When
// stableOnly is true, restricts to only those needed to retrieve changes earlier than the stable sequence
/*
func (k *kvChannelIndex) calculateBlocks(fromClock base.SequenceClock, toClock base.SequenceClock, stableOnly bool) BlockSet {
stableClock := k.stableSequence
sinceBlockSet := make(BlockSet)
for vb, sequence := range k.clock.value {
sinceSeq := since.GetSequence(vb)
if sequence > sinceSeq && (sinceSeq > toClock.GetSequence(vb) || stableOnly == false) {
sinceEntry := kvIndexEntry{vbNo: vb, sequence: sinceSeq}
blockKey := GenerateBlockKey(k.channelName, sinceSeq, k.partitionMap[vbNo])
sinceBlockSet[blockKey] = append(sinceBlockSet[blockKey], sinceEntry)
}
}
}
*/
func (k *kvChannelIndex) getIndexCounter() (uint64, error) {
key := getIndexCountKey(k.channelName)
var intValue uint64
_, err := k.indexBucket.Get(key, &intValue)
if err != nil {
// return nil here - cache clock may not have been initialized yet
return 0, nil
}
return intValue, nil
}
func (k *kvChannelIndex) loadChannelClock() (base.SequenceClock, error) {
chanClock := base.NewSyncSequenceClock()
key := getChannelClockKey(k.channelName)
value, _, err := k.indexBucket.GetRaw(key)
indexExpvars.Add("get_loadChannelClock", 1)
if err != nil {
base.LogTo("DIndex+", "Error loading channel clock for key %s:%v", key, err)
return chanClock, err
}
err = chanClock.Unmarshal(value)
if err != nil {
base.Warn("Error unmarshalling channel clock for channel %s, clock value %v", k.channelName, value)
}
return chanClock, err
}
// Determine the cache block index for a sequence
func (k *kvChannelIndex) getBlockIndex(sequence uint64) uint16 {
base.LogTo("DCache", "block index for sequence %d is %d", sequence, uint16(sequence/byteCacheBlockCapacity))
return uint16(sequence / byteCacheBlockCapacity)
}
func (k *kvChannelIndex) loadClock() {
if k.clock == nil {
k.clock = base.NewSequenceClockImpl()
}
data, cas, err := k.indexBucket.GetRaw(getChannelClockKey(k.channelName))
if err != nil {
base.LogTo("DCache+", "Unable to find existing channel clock for channel %s - treating as new", k.channelName)
}
k.clock.Unmarshal(data)
k.clock.SetCas(cas)
base.LogTo("DCache+", "Loaded channel clock: %s", base.PrintClock(k.clock))
}
func (k *kvChannelIndex) writeClockCas(updateClock base.SequenceClock) error {
// Initial set, for the first cas update attempt
k.clock.UpdateWithClock(updateClock)
value, err := k.clock.Marshal()
if err != nil {
return err
}
casOut, err := writeCasRaw(k.indexBucket, getChannelClockKey(k.channelName), value, k.clock.Cas(), 0, func(value []byte) (updatedValue []byte, err error) {
// Note: The following is invoked upon cas failure - may be called multiple times
err = k.clock.Unmarshal(value)
if err != nil {
base.Warn("Error unmarshalling clock during update", err)
return nil, err
}
k.clock.UpdateWithClock(updateClock)
return k.clock.Marshal()
})
k.clock.SetCas(casOut)
return nil
}
// A vbCache caches a set of LogEntry values, representing the set of entries for the channel for a
// particular vbucket. Intended to cache recently accessed channel block data, to avoid repeated
// block retrieval, parsing, and deduplication.
// The validFrom and validTo specify the range for which the cache is valid.
// Additional entries can be added to the cache using appendEntries (for entries after validTo), or
// prependEntries (backfilling values prior to validFrom).
type vbCache struct {
validFrom uint64 // Sequence the cache is valid from
validTo uint64 // Sequence the cache is valid to
entries SortedEntrySet // Deduplicated entries for the vbucket for sequences between validFrom and validTo, ordered by sequence
docIDs map[string]uint64 // Map of doc ids present in entries, mapped to that doc's sequence id - used to optimize dedup processing
cacheLock sync.RWMutex // Lock used for updating cache from index
}
func newVbCache() *vbCache {
return &vbCache{
validFrom: math.MaxUint64,
validTo: 0,
docIDs: make(map[string]uint64),
}
}
// Returns all cached entries between fromSequence and toSequence. When the cache doesn't have the full range, returns
// non-zero validFrom and validTo values
func (v *vbCache) getEntries(fromSequence, toSequence uint64) (validFrom uint64, validTo uint64, entries []*LogEntry) {
// Returning a new copy (instead of subslice of entries) to protect against future deduplication within entries
// TODO: use binary search to find starting point in entry list?
if fromSequence > toSequence {
return 0, 0, entries
}
v.cacheLock.RLock()
defer v.cacheLock.RUnlock()
for _, entry := range v.entries {
if entry.Sequence >= fromSequence {
if entry.Sequence <= toSequence {
entries = append(entries, entry)
} else {
break
}
}
}
validFrom = maxUint64(fromSequence, v.validFrom)
validTo = minUint64(toSequence, v.validTo)
return validFrom, validTo, entries
}
// Adds the provided set of entries to the cache. Incoming entries must be ordered by vbucket sequence, and the first
// entry must be greater than the current cache's validTo.
// Deduplicates by doc id.
func (v *vbCache) appendEntries(entries SortedEntrySet, validFrom uint64, validTo uint64) error {
if len(entries) == 0 {
return nil
}
if v.validTo > 0 && validFrom > v.validTo+1 {
return errors.New("Cache conflict - attempt to append entries with gap in valid range")
}
entries, duplicateDocIDs, err := v.validateCacheUpdate(entries, false)
if err != nil {
return err
}
v.cacheLock.Lock()
defer v.cacheLock.Unlock()
// Remove duplicates from cache
for _, docID := range duplicateDocIDs {
v.entries.Remove(v.docIDs[docID])
}
// Append entries
v.entries = append(v.entries, entries...)
// Update docID map
for _, entry := range entries {
v.docIDs[entry.DocID] = entry.Sequence
}
// If this is the first set appended, may need to set validFrom as well as validTo
if validFrom < v.validFrom {
v.validFrom = validFrom
}
v.validTo = validTo
return nil
}
// Adds the provided set of entries to the cache. Incoming entries must be ordered by vbucket sequence, and the first
// entry must be greater than the current cache's validTo.
// Deduplicates by doc id.
func (v *vbCache) prependEntries(entries SortedEntrySet, validFrom uint64, validTo uint64) error {
if len(entries) == 0 {
return nil
}
if v.validFrom < math.MaxUint64 && validTo < v.validFrom-1 {
return errors.New("Cache conflict - attempt to prepend entries with gap in valid range")
}
entries, _, err := v.validateCacheUpdate(entries, true)
if err != nil {
return err
}
v.cacheLock.Lock()
defer v.cacheLock.Unlock()
// Prepend all remaining entries
v.entries = append(entries, v.entries...)
// Append new doc ids
for _, entry := range entries {
v.docIDs[entry.DocID] = entry.Sequence
}
v.validFrom = validFrom
return nil
}
// Pre-processing for an incoming set of cache updates. Returns the set of already cached doc IDs that need to get
// deduplicated, the set of new DocIDs, removes any doc ID duplicates in the set
func (v *vbCache) validateCacheUpdate(entries SortedEntrySet, deleteDuplicates bool) (validEntries SortedEntrySet, duplicateEntries []string, err error) {
v.cacheLock.RLock()
defer v.cacheLock.RUnlock()
var prevSeq uint64
prevSeq = math.MaxUint64
// Iterate over entries, doing the following:
// - deduplicating by DocID within the set of new entries (*not* v.entries)
// - build list of docIDs that are already known to the cache
entriesDocIDs := make(map[string]interface{})
for i := len(entries) - 1; i >= 0; i-- {
entry := entries[i]
// validate that the sequences are ordered as expected
if entry.Sequence > prevSeq {
return entries, duplicateEntries, errors.New("Entries must be ordered by sequence when updating cache")
}
prevSeq = entry.Sequence
// If we've already seen this doc ID in the new entry set, remove it
if _, ok := entriesDocIDs[entry.DocID]; ok {
entries = append(entries[:i], entries[i+1:]...)
} else { // Otherwise, add to appropriate DocId list
if _, ok := v.docIDs[entry.DocID]; ok {
duplicateEntries = append(duplicateEntries, entry.DocID)
if deleteDuplicates {
entries = append(entries[:i], entries[i+1:]...)
}
}
}
}
return entries, duplicateEntries, nil
}
// Removes an entry from the cache.
func (v *vbCache) removeEntry(docID string, sequence uint64) {
v.cacheLock.Lock()
defer v.cacheLock.Unlock()
v._removeEntry(docID, sequence)
}
// Removes an entry from the cache. Assumes caller has write lock on v.cacheLock
func (v *vbCache) _removeEntry(docID string, sequence uint64) {
}
// SortedEntrySet
// Optimizes removal of entries from a sorted array.
type SortedEntrySet []*LogEntry
func (h *SortedEntrySet) Remove(x uint64) error {
i := SearchSortedEntrySet(*h, x)
if i < len(*h) && (*h)[i].Sequence == x {
*h = append((*h)[:i], (*h)[i+1:]...)
return nil
} else {
return errors.New("Value not found")
}
}
// Skipped Sequence version of sort.SearchInts - based on http://golang.org/src/sort/search.go?s=2959:2994#L73
func SearchSortedEntrySet(a SortedEntrySet, x uint64) int {
return sort.Search(len(a), func(i int) bool { return a[i].Sequence >= x })
}
//////// Utility functions for key generation
// Get the key for the cache count doc
func getIndexCountKey(channelName string) string {
return fmt.Sprintf("%s_count:%s", kIndexPrefix, channelName)
}
// Get the key for the cache block, based on the block index
func getIndexBlockKey(channelName string, blockIndex uint16, partition uint16) string {
return fmt.Sprintf("%s:%s:block%d:%s", kIndexPrefix, channelName, blockIndex, vbSuffixMap[partition])
}
// Get the key for the cache block, based on the block index
func getChannelClockKey(channelName string) string {
return fmt.Sprintf("%s_SequenceClock:%s", kIndexPrefix, channelName)
}
func minUint64(a, b uint64) uint64 {
if a <= b {
return a
}
return b
}
func maxUint64(a, b uint64) uint64 {
if a >= b {
return a
}
return b
}
|
package messenger
import (
"encoding/json"
"fmt"
"net/http"
"time"
)
const (
// WebhookURL is where the Messenger client should listen for webhook events.
WebhookURL = "/webhook"
)
// Options are the settings used when creating a Messenger client.
type Options struct {
// Verify sets whether or not to be in the "verify" mode. Used for
// verifying webhooks on the Facebook Developer Portal.
Verify bool
// VerifyToken is the token to be used when verifying the webhook. Is set
// when the webhook is created.
VerifyToken string
// Token is the access token of the Facebook page to send messages from.
Token string
}
// MessageHandler is a handler used for responding to a message containing text.
type MessageHandler func(Message, *Response)
// DeliveryHandler is a handler used for responding to a read receipt.
type DeliveryHandler func(Delivery, *Response)
// Messenger is the client which manages communication with the Messenger Platform API.
type Messenger struct {
mux *http.ServeMux
messageHandlers []MessageHandler
deliveryHandlers []DeliveryHandler
token string
}
// New creates a new Messenger. You pass in Options in order to affect settings.
func New(mo Options) *Messenger {
m := &Messenger{
mux: http.NewServeMux(),
token: mo.Token,
}
if mo.Verify {
m.mux.HandleFunc(WebhookURL, newVerifyHandler(mo.VerifyToken))
} else {
m.mux.HandleFunc(WebhookURL, m.handle)
}
return m
}
// HandleMessage adds a new MessageHandler to the Messenger which will be triggered
// when a message is recieved by the client.
func (m *Messenger) HandleMessage(f MessageHandler) {
m.messageHandlers = append(m.messageHandlers, f)
}
// HandleDelivery adds a new DeliveryHandler to the Messenger which will be triggered
// when a previously sent message is read by the recipient.
func (m *Messenger) HandleDelivery(f DeliveryHandler) {
m.deliveryHandlers = append(m.deliveryHandlers, f)
}
// Handler returns the Messenger in HTTP client form.
func (m *Messenger) Handler() http.Handler {
return m.mux
}
// handle is the internal HTTP handler for the webhooks.
func (m *Messenger) handle(w http.ResponseWriter, r *http.Request) {
var rec Receive
err := json.NewDecoder(r.Body).Decode(&rec)
if err != nil {
fmt.Println(err)
fmt.Fprintln(w, `{status: 'not ok'}`)
return
}
if rec.Object != "page" {
fmt.Println("Object is not page, undefined behaviour. Got", rec.Object)
}
m.dispatch(rec)
fmt.Fprintln(w, `{status: 'ok'}`)
}
// dispatch triggers all of the relevant handlers when a webhook event is received.
func (m *Messenger) dispatch(r Receive) {
for _, entry := range r.Entry {
for _, info := range entry.Messaging {
a := m.classify(info, entry)
if a == UnknownAction {
fmt.Println("Unknown action:", info)
continue
}
resp := &Response{
to: Recipient{info.Sender.ID},
token: m.token,
}
switch a {
case TextAction:
for _, f := range m.messageHandlers {
message := *info.Message
message.Sender = info.Sender
message.Recipient = info.Recipient
message.Time = time.Unix(info.Timestamp, 0)
f(message, resp)
}
case DeliveryAction:
for _, f := range m.deliveryHandlers {
f(*info.Delivery, resp)
}
}
}
}
}
// classify determines what type of message a webhook event is.
func (m *Messenger) classify(info MessageInfo, e Entry) Action {
if info.Message != nil {
return TextAction
} else if info.Delivery != nil {
return DeliveryAction
}
return UnknownAction
}
// newVerifyHandler returns a function which can be used to handle webhook verification
func newVerifyHandler(token string) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
if r.FormValue("hub.verify_token") == token {
fmt.Fprintln(w, r.FormValue("hub.challenge"))
return
}
fmt.Fprintln(w, "Incorrect verify token.")
}
}
Fix spelling mistake
package messenger
import (
"encoding/json"
"fmt"
"net/http"
"time"
)
const (
// WebhookURL is where the Messenger client should listen for webhook events.
WebhookURL = "/webhook"
)
// Options are the settings used when creating a Messenger client.
type Options struct {
// Verify sets whether or not to be in the "verify" mode. Used for
// verifying webhooks on the Facebook Developer Portal.
Verify bool
// VerifyToken is the token to be used when verifying the webhook. Is set
// when the webhook is created.
VerifyToken string
// Token is the access token of the Facebook page to send messages from.
Token string
}
// MessageHandler is a handler used for responding to a message containing text.
type MessageHandler func(Message, *Response)
// DeliveryHandler is a handler used for responding to a read receipt.
type DeliveryHandler func(Delivery, *Response)
// Messenger is the client which manages communication with the Messenger Platform API.
type Messenger struct {
mux *http.ServeMux
messageHandlers []MessageHandler
deliveryHandlers []DeliveryHandler
token string
}
// New creates a new Messenger. You pass in Options in order to affect settings.
func New(mo Options) *Messenger {
m := &Messenger{
mux: http.NewServeMux(),
token: mo.Token,
}
if mo.Verify {
m.mux.HandleFunc(WebhookURL, newVerifyHandler(mo.VerifyToken))
} else {
m.mux.HandleFunc(WebhookURL, m.handle)
}
return m
}
// HandleMessage adds a new MessageHandler to the Messenger which will be triggered
// when a message is received by the client.
func (m *Messenger) HandleMessage(f MessageHandler) {
m.messageHandlers = append(m.messageHandlers, f)
}
// HandleDelivery adds a new DeliveryHandler to the Messenger which will be triggered
// when a previously sent message is read by the recipient.
func (m *Messenger) HandleDelivery(f DeliveryHandler) {
m.deliveryHandlers = append(m.deliveryHandlers, f)
}
// Handler returns the Messenger in HTTP client form.
func (m *Messenger) Handler() http.Handler {
return m.mux
}
// handle is the internal HTTP handler for the webhooks.
func (m *Messenger) handle(w http.ResponseWriter, r *http.Request) {
var rec Receive
err := json.NewDecoder(r.Body).Decode(&rec)
if err != nil {
fmt.Println(err)
fmt.Fprintln(w, `{status: 'not ok'}`)
return
}
if rec.Object != "page" {
fmt.Println("Object is not page, undefined behaviour. Got", rec.Object)
}
m.dispatch(rec)
fmt.Fprintln(w, `{status: 'ok'}`)
}
// dispatch triggers all of the relevant handlers when a webhook event is received.
func (m *Messenger) dispatch(r Receive) {
for _, entry := range r.Entry {
for _, info := range entry.Messaging {
a := m.classify(info, entry)
if a == UnknownAction {
fmt.Println("Unknown action:", info)
continue
}
resp := &Response{
to: Recipient{info.Sender.ID},
token: m.token,
}
switch a {
case TextAction:
for _, f := range m.messageHandlers {
message := *info.Message
message.Sender = info.Sender
message.Recipient = info.Recipient
message.Time = time.Unix(info.Timestamp, 0)
f(message, resp)
}
case DeliveryAction:
for _, f := range m.deliveryHandlers {
f(*info.Delivery, resp)
}
}
}
}
}
// classify determines what type of message a webhook event is.
func (m *Messenger) classify(info MessageInfo, e Entry) Action {
if info.Message != nil {
return TextAction
} else if info.Delivery != nil {
return DeliveryAction
}
return UnknownAction
}
// newVerifyHandler returns a function which can be used to handle webhook verification
func newVerifyHandler(token string) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
if r.FormValue("hub.verify_token") == token {
fmt.Fprintln(w, r.FormValue("hub.challenge"))
return
}
fmt.Fprintln(w, "Incorrect verify token.")
}
}
|
package format
import (
"testing"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs/configschema"
"github.com/hashicorp/terraform/plans"
"github.com/mitchellh/colorstring"
"github.com/zclconf/go-cty/cty"
)
func TestResourceChange_primitiveTypes(t *testing.T) {
testCases := map[string]testCase{
"creation": {
Action: plans.Create,
Mode: addrs.ManagedResourceMode,
Before: cty.NullVal(cty.EmptyObject),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be created
+ resource "test_instance" "example" {
+ id = (known after apply)
}
`,
},
"creation (null string)": {
Action: plans.Create,
Mode: addrs.ManagedResourceMode,
Before: cty.NullVal(cty.EmptyObject),
After: cty.ObjectVal(map[string]cty.Value{
"string": cty.StringVal("null"),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"string": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be created
+ resource "test_instance" "example" {
+ string = "null"
}
`,
},
"creation (null string with extra whitespace)": {
Action: plans.Create,
Mode: addrs.ManagedResourceMode,
Before: cty.NullVal(cty.EmptyObject),
After: cty.ObjectVal(map[string]cty.Value{
"string": cty.StringVal("null "),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"string": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be created
+ resource "test_instance" "example" {
+ string = "null "
}
`,
},
"deletion": {
Action: plans.Delete,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
}),
After: cty.NullVal(cty.EmptyObject),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be destroyed
- resource "test_instance" "example" {
- id = "i-02ae66f368e8518a9" -> null
}
`,
},
"deletion (empty string)": {
Action: plans.Delete,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"intentionally_long": cty.StringVal(""),
}),
After: cty.NullVal(cty.EmptyObject),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
"intentionally_long": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be destroyed
- resource "test_instance" "example" {
- id = "i-02ae66f368e8518a9" -> null
}
`,
},
"string in-place update": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
}
`,
},
"string force-new update": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "ami"},
}),
Tainted: false,
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER" # forces replacement
id = "i-02ae66f368e8518a9"
}
`,
},
"string in-place update (null values)": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"unchanged": cty.NullVal(cty.String),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"unchanged": cty.NullVal(cty.String),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"unchanged": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
}
`,
},
"in-place update of multi-line string field": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"more_lines": cty.StringVal(`original
`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"more_lines": cty.StringVal(`original
new line
`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"more_lines": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ more_lines = <<~EOT
original
+ new line
EOT
}
`,
},
"addition of multi-line string field": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"more_lines": cty.NullVal(cty.String),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"more_lines": cty.StringVal(`original
new line
`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"more_lines": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
+ more_lines = <<~EOT
original
new line
EOT
}
`,
},
"force-new update of multi-line string field": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"more_lines": cty.StringVal(`original
`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"more_lines": cty.StringVal(`original
new line
`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"more_lines": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "more_lines"},
}),
Tainted: false,
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ more_lines = <<~EOT # forces replacement
original
+ new line
EOT
}
`,
},
// Sensitive
"creation with sensitive field": {
Action: plans.Create,
Mode: addrs.ManagedResourceMode,
Before: cty.NullVal(cty.EmptyObject),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"password": cty.StringVal("top-secret"),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
"password": {Type: cty.String, Optional: true, Sensitive: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be created
+ resource "test_instance" "example" {
+ id = (known after apply)
+ password = (sensitive value)
}
`,
},
"update with equal sensitive field": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("blah"),
"str": cty.StringVal("before"),
"password": cty.StringVal("top-secret"),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"str": cty.StringVal("after"),
"password": cty.StringVal("top-secret"),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
"str": {Type: cty.String, Optional: true},
"password": {Type: cty.String, Optional: true, Sensitive: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "blah" -> (known after apply)
password = (sensitive value)
~ str = "before" -> "after"
}
`,
},
// tainted resources
"replace tainted resource": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-AFTER"),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "ami"},
}),
Tainted: true,
ExpectedOutput: ` # test_instance.example is tainted, so must be replaced
-/+ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER" # forces replacement
~ id = "i-02ae66f368e8518a9" -> (known after apply)
}
`,
},
"force replacement with empty before value": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("name"),
"forced": cty.NullVal(cty.String),
}),
After: cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("name"),
"forced": cty.StringVal("example"),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"name": {Type: cty.String, Optional: true},
"forced": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "forced"},
}),
Tainted: false,
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
+ forced = "example" # forces replacement
name = "name"
}
`,
},
"force replacement with empty before value legacy": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("name"),
"forced": cty.StringVal(""),
}),
After: cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("name"),
"forced": cty.StringVal("example"),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"name": {Type: cty.String, Optional: true},
"forced": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "forced"},
}),
Tainted: false,
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
+ forced = "example" # forces replacement
name = "name"
}
`,
},
}
runTestCases(t, testCases)
}
func TestResourceChange_JSON(t *testing.T) {
testCases := map[string]testCase{
"creation": {
Action: plans.Create,
Mode: addrs.ManagedResourceMode,
Before: cty.NullVal(cty.EmptyObject),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{
"str": "value",
"list":["a","b", 234, true],
"obj": {"key": "val"}
}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be created
+ resource "test_instance" "example" {
+ id = (known after apply)
+ json_field = jsonencode(
{
+ list = [
+ "a",
+ "b",
+ 234,
+ true,
]
+ obj = {
+ key = "val"
}
+ str = "value"
}
)
}
`,
},
"in-place update of object": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{"aaa": "value"}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{"aaa": "value", "bbb": "new_value"}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ {
aaa = "value"
+ bbb = "new_value"
}
)
}
`,
},
"in-place update (from empty tuple)": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{"aaa": []}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{"aaa": ["value"]}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ {
~ aaa = [
+ "value",
]
}
)
}
`,
},
"in-place update (to empty tuple)": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{"aaa": ["value"]}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{"aaa": []}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ {
~ aaa = [
- "value",
]
}
)
}
`,
},
"in-place update (tuple of different types)": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{"aaa": [42, {"foo":"bar"}, "value"]}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{"aaa": [42, {"foo":"baz"}, "value"]}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ {
~ aaa = [
42,
~ {
~ foo = "bar" -> "baz"
},
"value",
]
}
)
}
`,
},
"force-new update": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{"aaa": "value"}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{"aaa": "value", "bbb": "new_value"}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "json_field"},
}),
Tainted: false,
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ {
aaa = "value"
+ bbb = "new_value"
} # forces replacement
)
}
`,
},
"in-place update (whitespace change)": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{"aaa": "value", "bbb": "another"}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{"aaa":"value",
"bbb":"another"}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode( # whitespace changes
{
aaa = "value"
bbb = "another"
}
)
}
`,
},
"force-new update (whitespace change)": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{"aaa": "value", "bbb": "another"}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{"aaa":"value",
"bbb":"another"}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "json_field"},
}),
Tainted: false,
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode( # whitespace changes force replacement
{
aaa = "value"
bbb = "another"
}
)
}
`,
},
"creation (empty)": {
Action: plans.Create,
Mode: addrs.ManagedResourceMode,
Before: cty.NullVal(cty.EmptyObject),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be created
+ resource "test_instance" "example" {
+ id = (known after apply)
+ json_field = jsonencode({})
}
`,
},
"JSON list item removal": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`["first","second","third"]`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`["first","second"]`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ [
"first",
"second",
- "third",
]
)
}
`,
},
"JSON list item addition": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`["first","second"]`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`["first","second","third"]`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ [
"first",
"second",
+ "third",
]
)
}
`,
},
"JSON list object addition": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{"first":"111"}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{"first":"111","second":"222"}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ {
first = "111"
+ second = "222"
}
)
}
`,
},
"JSON object with nested list": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{
"Statement": ["first"]
}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{
"Statement": ["first", "second"]
}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ {
~ Statement = [
"first",
+ "second",
]
}
)
}
`,
},
"JSON list of objects - adding item": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`[{"one": "111"}]`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`[{"one": "111"}, {"two": "222"}]`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ [
{
one = "111"
},
+ {
+ two = "222"
},
]
)
}
`,
},
"JSON list of objects - removing item": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`[{"one": "111"}, {"two": "222"}, {"three": "333"}]`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`[{"one": "111"}, {"three": "333"}]`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ [
{
one = "111"
},
- {
- two = "222"
},
{
three = "333"
},
]
)
}
`,
},
"JSON object with list of objects": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{"parent":[{"one": "111"}]}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{"parent":[{"one": "111"}, {"two": "222"}]}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ {
~ parent = [
{
one = "111"
},
+ {
+ two = "222"
},
]
}
)
}
`,
},
"JSON object double nested lists": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{"parent":[{"another_list": ["111"]}]}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{"parent":[{"another_list": ["111", "222"]}]}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ {
~ parent = [
~ {
~ another_list = [
"111",
+ "222",
]
},
]
}
)
}
`,
},
"in-place update from object to tuple": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{"aaa": [42, {"foo":"bar"}, "value"]}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`["aaa", 42, "something"]`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ {
- aaa = [
- 42,
- {
- foo = "bar"
},
- "value",
]
} -> [
+ "aaa",
+ 42,
+ "something",
]
)
}
`,
},
}
runTestCases(t, testCases)
}
func TestResourceChange_primitiveList(t *testing.T) {
testCases := map[string]testCase{
"in-place update - creation": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.NullVal(cty.List(cty.String)),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("new-element"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"list_field": {Type: cty.List(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
+ list_field = [
+ "new-element",
]
}
`,
},
"in-place update - first addition": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListValEmpty(cty.String),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("new-element"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"list_field": {Type: cty.List(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ list_field = [
+ "new-element",
]
}
`,
},
"in-place update - insertion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
cty.StringVal("cccc"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"list_field": {Type: cty.List(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ list_field = [
"aaaa",
+ "bbbb",
"cccc",
]
}
`,
},
"force-new update - insertion": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
cty.StringVal("cccc"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"list_field": {Type: cty.List(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "list_field"},
}),
Tainted: false,
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ list_field = [ # forces replacement
"aaaa",
+ "bbbb",
"cccc",
]
}
`,
},
"in-place update - deletion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("bbbb"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"list_field": {Type: cty.List(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ list_field = [
- "aaaa",
"bbbb",
- "cccc",
]
}
`,
},
"creation - empty list": {
Action: plans.Create,
Mode: addrs.ManagedResourceMode,
Before: cty.NullVal(cty.EmptyObject),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListValEmpty(cty.String),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"list_field": {Type: cty.List(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be created
+ resource "test_instance" "example" {
+ ami = "ami-STATIC"
+ id = (known after apply)
+ list_field = []
}
`,
},
"in-place update - full to empty": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListValEmpty(cty.String),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"list_field": {Type: cty.List(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ list_field = [
- "aaaa",
- "bbbb",
- "cccc",
]
}
`,
},
"in-place update - null to empty": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.NullVal(cty.List(cty.String)),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListValEmpty(cty.String),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"list_field": {Type: cty.List(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
+ list_field = []
}
`,
},
"update to unknown element": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("aaaa"),
cty.UnknownVal(cty.String),
cty.StringVal("cccc"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"list_field": {Type: cty.List(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ list_field = [
"aaaa",
- "bbbb",
+ (known after apply),
"cccc",
]
}
`,
},
"update - two new unknown elements": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("aaaa"),
cty.UnknownVal(cty.String),
cty.UnknownVal(cty.String),
cty.StringVal("cccc"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"list_field": {Type: cty.List(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ list_field = [
"aaaa",
- "bbbb",
+ (known after apply),
+ (known after apply),
"cccc",
]
}
`,
},
}
runTestCases(t, testCases)
}
func TestResourceChange_primitiveSet(t *testing.T) {
testCases := map[string]testCase{
"in-place update - creation": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.NullVal(cty.Set(cty.String)),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("new-element"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"set_field": {Type: cty.Set(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
+ set_field = [
+ "new-element",
]
}
`,
},
"in-place update - first insertion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetValEmpty(cty.String),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("new-element"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"set_field": {Type: cty.Set(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ set_field = [
+ "new-element",
]
}
`,
},
"in-place update - insertion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
cty.StringVal("cccc"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"set_field": {Type: cty.Set(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ set_field = [
"aaaa",
+ "bbbb",
"cccc",
]
}
`,
},
"force-new update - insertion": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
cty.StringVal("cccc"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"set_field": {Type: cty.Set(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "set_field"},
}),
Tainted: false,
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ set_field = [ # forces replacement
"aaaa",
+ "bbbb",
"cccc",
]
}
`,
},
"in-place update - deletion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("bbbb"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"set_field": {Type: cty.Set(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ set_field = [
- "aaaa",
"bbbb",
- "cccc",
]
}
`,
},
"creation - empty set": {
Action: plans.Create,
Mode: addrs.ManagedResourceMode,
Before: cty.NullVal(cty.EmptyObject),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetValEmpty(cty.String),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"set_field": {Type: cty.Set(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be created
+ resource "test_instance" "example" {
+ ami = "ami-STATIC"
+ id = (known after apply)
+ set_field = []
}
`,
},
"in-place update - full to empty set": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetValEmpty(cty.String),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"set_field": {Type: cty.Set(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ set_field = [
- "aaaa",
- "bbbb",
]
}
`,
},
"in-place update - null to empty set": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.NullVal(cty.Set(cty.String)),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetValEmpty(cty.String),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"set_field": {Type: cty.Set(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
+ set_field = []
}
`,
},
"in-place update to unknown": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.UnknownVal(cty.Set(cty.String)),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"set_field": {Type: cty.Set(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ set_field = [
- "aaaa",
- "bbbb",
] -> (known after apply)
}
`,
},
"in-place update to unknown element": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("aaaa"),
cty.UnknownVal(cty.String),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"set_field": {Type: cty.Set(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ set_field = [
"aaaa",
- "bbbb",
~ (known after apply),
]
}
`,
},
}
runTestCases(t, testCases)
}
func TestResourceChange_map(t *testing.T) {
testCases := map[string]testCase{
"in-place update - creation": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.NullVal(cty.Map(cty.String)),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapVal(map[string]cty.Value{
"new-key": cty.StringVal("new-element"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"map_field": {Type: cty.Map(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
+ map_field = {
+ "new-key" = "new-element"
}
}
`,
},
"in-place update - first insertion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapValEmpty(cty.String),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapVal(map[string]cty.Value{
"new-key": cty.StringVal("new-element"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"map_field": {Type: cty.Map(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ map_field = {
+ "new-key" = "new-element"
}
}
`,
},
"in-place update - insertion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapVal(map[string]cty.Value{
"a": cty.StringVal("aaaa"),
"c": cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapVal(map[string]cty.Value{
"a": cty.StringVal("aaaa"),
"b": cty.StringVal("bbbb"),
"c": cty.StringVal("cccc"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"map_field": {Type: cty.Map(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ map_field = {
"a" = "aaaa"
+ "b" = "bbbb"
"c" = "cccc"
}
}
`,
},
"force-new update - insertion": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapVal(map[string]cty.Value{
"a": cty.StringVal("aaaa"),
"c": cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapVal(map[string]cty.Value{
"a": cty.StringVal("aaaa"),
"b": cty.StringVal("bbbb"),
"c": cty.StringVal("cccc"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"map_field": {Type: cty.Map(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "map_field"},
}),
Tainted: false,
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ map_field = { # forces replacement
"a" = "aaaa"
+ "b" = "bbbb"
"c" = "cccc"
}
}
`,
},
"in-place update - deletion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapVal(map[string]cty.Value{
"a": cty.StringVal("aaaa"),
"b": cty.StringVal("bbbb"),
"c": cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapVal(map[string]cty.Value{
"b": cty.StringVal("bbbb"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"map_field": {Type: cty.Map(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ map_field = {
- "a" = "aaaa" -> null
"b" = "bbbb"
- "c" = "cccc" -> null
}
}
`,
},
"creation - empty": {
Action: plans.Create,
Mode: addrs.ManagedResourceMode,
Before: cty.NullVal(cty.EmptyObject),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapValEmpty(cty.String),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"map_field": {Type: cty.Map(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be created
+ resource "test_instance" "example" {
+ ami = "ami-STATIC"
+ id = (known after apply)
+ map_field = {}
}
`,
},
"update to unknown element": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapVal(map[string]cty.Value{
"a": cty.StringVal("aaaa"),
"b": cty.StringVal("bbbb"),
"c": cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapVal(map[string]cty.Value{
"a": cty.StringVal("aaaa"),
"b": cty.UnknownVal(cty.String),
"c": cty.StringVal("cccc"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"map_field": {Type: cty.Map(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ map_field = {
"a" = "aaaa"
~ "b" = "bbbb" -> (known after apply)
"c" = "cccc"
}
}
`,
},
}
runTestCases(t, testCases)
}
func TestResourceChange_nestedList(t *testing.T) {
testCases := map[string]testCase{
"in-place update - equal": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingList,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
root_block_device {
volume_type = "gp2"
}
}
`,
},
"in-place update - creation": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.ListValEmpty(cty.Object(map[string]cty.Type{
"volume_type": cty.String,
})),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.NullVal(cty.String),
}),
}),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingList,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
+ root_block_device {}
}
`,
},
"in-place update - first insertion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.ListValEmpty(cty.Object(map[string]cty.Type{
"volume_type": cty.String,
})),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingList,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
+ root_block_device {
+ volume_type = "gp2"
}
}
`,
},
"in-place update - insertion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.NullVal(cty.String),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.StringVal("new_value"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
"new_field": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingList,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
~ root_block_device {
+ new_field = "new_value"
volume_type = "gp2"
}
}
`,
},
"force-new update (inside block)": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("different"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "root_block_device"},
cty.IndexStep{Key: cty.NumberIntVal(0)},
cty.GetAttrStep{Name: "volume_type"},
}),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingList,
},
},
},
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
~ root_block_device {
~ volume_type = "gp2" -> "different" # forces replacement
}
}
`,
},
"force-new update (whole block)": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("different"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "root_block_device"},
}),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingList,
},
},
},
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
~ root_block_device { # forces replacement
~ volume_type = "gp2" -> "different"
}
}
`,
},
"in-place update - deletion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.StringVal("new_value"),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.ListValEmpty(cty.Object(map[string]cty.Type{
"volume_type": cty.String,
"new_field": cty.String,
})),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
"new_field": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingList,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
- root_block_device {
- new_field = "new_value" -> null
- volume_type = "gp2" -> null
}
}
`,
},
"with dynamically-typed attribute": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"block": cty.EmptyTupleVal,
}),
After: cty.ObjectVal(map[string]cty.Value{
"block": cty.TupleVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"attr": cty.StringVal("foo"),
}),
cty.ObjectVal(map[string]cty.Value{
"attr": cty.True,
}),
}),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
BlockTypes: map[string]*configschema.NestedBlock{
"block": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"attr": {Type: cty.DynamicPseudoType, Optional: true},
},
},
Nesting: configschema.NestingList,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
+ block {
+ attr = "foo"
}
+ block {
+ attr = true
}
}
`,
},
}
runTestCases(t, testCases)
}
func TestResourceChange_nestedSet(t *testing.T) {
testCases := map[string]testCase{
"in-place update - creation": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{
"volume_type": cty.String,
})),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.SetVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingSet,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
+ root_block_device {
+ volume_type = "gp2"
}
}
`,
},
"in-place update - insertion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.SetVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.NullVal(cty.String),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.SetVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.StringVal("new_value"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
"new_field": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingSet,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
+ root_block_device {
+ new_field = "new_value"
+ volume_type = "gp2"
}
- root_block_device {
- volume_type = "gp2" -> null
}
}
`,
},
"force-new update (whole block)": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.SetVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.SetVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("different"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "root_block_device"},
}),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingSet,
},
},
},
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
+ root_block_device { # forces replacement
+ volume_type = "different"
}
- root_block_device { # forces replacement
- volume_type = "gp2" -> null
}
}
`,
},
"in-place update - deletion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.SetVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.StringVal("new_value"),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{
"volume_type": cty.String,
"new_field": cty.String,
})),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
"new_field": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingSet,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
- root_block_device {
- new_field = "new_value" -> null
- volume_type = "gp2" -> null
}
}
`,
},
}
runTestCases(t, testCases)
}
func TestResourceChange_nestedMap(t *testing.T) {
testCases := map[string]testCase{
"in-place update - creation": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.MapValEmpty(cty.Object(map[string]cty.Type{
"volume_type": cty.String,
})),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.MapVal(map[string]cty.Value{
"a": cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingMap,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
+ root_block_device "a" {
+ volume_type = "gp2"
}
}
`,
},
"in-place update - change attr": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.MapVal(map[string]cty.Value{
"a": cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.NullVal(cty.String),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.MapVal(map[string]cty.Value{
"a": cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.StringVal("new_value"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
"new_field": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingMap,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
~ root_block_device "a" {
+ new_field = "new_value"
volume_type = "gp2"
}
}
`,
},
"in-place update - insertion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.MapVal(map[string]cty.Value{
"a": cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.NullVal(cty.String),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.MapVal(map[string]cty.Value{
"a": cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.NullVal(cty.String),
}),
"b": cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.StringVal("new_value"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
"new_field": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingMap,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
root_block_device "a" {
volume_type = "gp2"
}
+ root_block_device "b" {
+ new_field = "new_value"
+ volume_type = "gp2"
}
}
`,
},
"force-new update (whole block)": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.MapVal(map[string]cty.Value{
"a": cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
}),
"b": cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("standard"),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.MapVal(map[string]cty.Value{
"a": cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("different"),
}),
"b": cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("standard"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "root_block_device"},
cty.IndexStep{Key: cty.StringVal("a")},
}),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingMap,
},
},
},
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
~ root_block_device "a" { # forces replacement
~ volume_type = "gp2" -> "different"
}
root_block_device "b" {
volume_type = "standard"
}
}
`,
},
"in-place update - deletion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.MapVal(map[string]cty.Value{
"a": cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.StringVal("new_value"),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.MapValEmpty(cty.Object(map[string]cty.Type{
"volume_type": cty.String,
"new_field": cty.String,
})),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
"new_field": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingMap,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
- root_block_device "a" {
- new_field = "new_value" -> null
- volume_type = "gp2" -> null
}
}
`,
},
"in-place sequence update - deletion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"list": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("x")}),
cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("y")}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"list": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("y")}),
cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("z")}),
}),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
BlockTypes: map[string]*configschema.NestedBlock{
"list": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"attr": {
Type: cty.String,
Required: true,
},
},
},
Nesting: configschema.NestingList,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ list {
~ attr = "x" -> "y"
}
~ list {
~ attr = "y" -> "z"
}
}
`,
},
}
runTestCases(t, testCases)
}
type testCase struct {
Action plans.Action
Mode addrs.ResourceMode
Before cty.Value
After cty.Value
Schema *configschema.Block
RequiredReplace cty.PathSet
Tainted bool
ExpectedOutput string
}
func runTestCases(t *testing.T, testCases map[string]testCase) {
color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
ty := tc.Schema.ImpliedType()
beforeVal := tc.Before
switch { // Some fixups to make the test cases a little easier to write
case beforeVal.IsNull():
beforeVal = cty.NullVal(ty) // allow mistyped nulls
case !beforeVal.IsKnown():
beforeVal = cty.UnknownVal(ty) // allow mistyped unknowns
}
before, err := plans.NewDynamicValue(beforeVal, ty)
if err != nil {
t.Fatal(err)
}
afterVal := tc.After
switch { // Some fixups to make the test cases a little easier to write
case afterVal.IsNull():
afterVal = cty.NullVal(ty) // allow mistyped nulls
case !afterVal.IsKnown():
afterVal = cty.UnknownVal(ty) // allow mistyped unknowns
}
after, err := plans.NewDynamicValue(afterVal, ty)
if err != nil {
t.Fatal(err)
}
change := &plans.ResourceInstanceChangeSrc{
Addr: addrs.Resource{
Mode: tc.Mode,
Type: "test_instance",
Name: "example",
}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance),
ProviderAddr: addrs.AbsProviderConfig{
Provider: addrs.NewLegacyProvider("test"),
Module: addrs.RootModule,
},
ChangeSrc: plans.ChangeSrc{
Action: tc.Action,
Before: before,
After: after,
},
RequiredReplace: tc.RequiredReplace,
}
output := ResourceChange(change, tc.Tainted, tc.Schema, color)
if output != tc.ExpectedOutput {
t.Fatalf("Unexpected diff.\ngot:\n%s\nwant:\n%s\n", output, tc.ExpectedOutput)
}
})
}
}
command: Add tests for format.OutputChanges
Most of the functionality for rendering output changes is covered by the
tests for ResourceChanges, as they both share the same diff renderer.
This commit adds a few tests to cover some of the output specific code.
package format
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs/configschema"
"github.com/hashicorp/terraform/plans"
"github.com/mitchellh/colorstring"
"github.com/zclconf/go-cty/cty"
)
func TestResourceChange_primitiveTypes(t *testing.T) {
testCases := map[string]testCase{
"creation": {
Action: plans.Create,
Mode: addrs.ManagedResourceMode,
Before: cty.NullVal(cty.EmptyObject),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be created
+ resource "test_instance" "example" {
+ id = (known after apply)
}
`,
},
"creation (null string)": {
Action: plans.Create,
Mode: addrs.ManagedResourceMode,
Before: cty.NullVal(cty.EmptyObject),
After: cty.ObjectVal(map[string]cty.Value{
"string": cty.StringVal("null"),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"string": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be created
+ resource "test_instance" "example" {
+ string = "null"
}
`,
},
"creation (null string with extra whitespace)": {
Action: plans.Create,
Mode: addrs.ManagedResourceMode,
Before: cty.NullVal(cty.EmptyObject),
After: cty.ObjectVal(map[string]cty.Value{
"string": cty.StringVal("null "),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"string": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be created
+ resource "test_instance" "example" {
+ string = "null "
}
`,
},
"deletion": {
Action: plans.Delete,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
}),
After: cty.NullVal(cty.EmptyObject),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be destroyed
- resource "test_instance" "example" {
- id = "i-02ae66f368e8518a9" -> null
}
`,
},
"deletion (empty string)": {
Action: plans.Delete,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"intentionally_long": cty.StringVal(""),
}),
After: cty.NullVal(cty.EmptyObject),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
"intentionally_long": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be destroyed
- resource "test_instance" "example" {
- id = "i-02ae66f368e8518a9" -> null
}
`,
},
"string in-place update": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
}
`,
},
"string force-new update": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "ami"},
}),
Tainted: false,
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER" # forces replacement
id = "i-02ae66f368e8518a9"
}
`,
},
"string in-place update (null values)": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"unchanged": cty.NullVal(cty.String),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"unchanged": cty.NullVal(cty.String),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"unchanged": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
}
`,
},
"in-place update of multi-line string field": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"more_lines": cty.StringVal(`original
`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"more_lines": cty.StringVal(`original
new line
`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"more_lines": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ more_lines = <<~EOT
original
+ new line
EOT
}
`,
},
"addition of multi-line string field": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"more_lines": cty.NullVal(cty.String),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"more_lines": cty.StringVal(`original
new line
`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"more_lines": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
+ more_lines = <<~EOT
original
new line
EOT
}
`,
},
"force-new update of multi-line string field": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"more_lines": cty.StringVal(`original
`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"more_lines": cty.StringVal(`original
new line
`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"more_lines": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "more_lines"},
}),
Tainted: false,
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ more_lines = <<~EOT # forces replacement
original
+ new line
EOT
}
`,
},
// Sensitive
"creation with sensitive field": {
Action: plans.Create,
Mode: addrs.ManagedResourceMode,
Before: cty.NullVal(cty.EmptyObject),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"password": cty.StringVal("top-secret"),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
"password": {Type: cty.String, Optional: true, Sensitive: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be created
+ resource "test_instance" "example" {
+ id = (known after apply)
+ password = (sensitive value)
}
`,
},
"update with equal sensitive field": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("blah"),
"str": cty.StringVal("before"),
"password": cty.StringVal("top-secret"),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"str": cty.StringVal("after"),
"password": cty.StringVal("top-secret"),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
"str": {Type: cty.String, Optional: true},
"password": {Type: cty.String, Optional: true, Sensitive: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "blah" -> (known after apply)
password = (sensitive value)
~ str = "before" -> "after"
}
`,
},
// tainted resources
"replace tainted resource": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-AFTER"),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "ami"},
}),
Tainted: true,
ExpectedOutput: ` # test_instance.example is tainted, so must be replaced
-/+ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER" # forces replacement
~ id = "i-02ae66f368e8518a9" -> (known after apply)
}
`,
},
"force replacement with empty before value": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("name"),
"forced": cty.NullVal(cty.String),
}),
After: cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("name"),
"forced": cty.StringVal("example"),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"name": {Type: cty.String, Optional: true},
"forced": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "forced"},
}),
Tainted: false,
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
+ forced = "example" # forces replacement
name = "name"
}
`,
},
"force replacement with empty before value legacy": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("name"),
"forced": cty.StringVal(""),
}),
After: cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("name"),
"forced": cty.StringVal("example"),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"name": {Type: cty.String, Optional: true},
"forced": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "forced"},
}),
Tainted: false,
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
+ forced = "example" # forces replacement
name = "name"
}
`,
},
}
runTestCases(t, testCases)
}
func TestResourceChange_JSON(t *testing.T) {
testCases := map[string]testCase{
"creation": {
Action: plans.Create,
Mode: addrs.ManagedResourceMode,
Before: cty.NullVal(cty.EmptyObject),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{
"str": "value",
"list":["a","b", 234, true],
"obj": {"key": "val"}
}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be created
+ resource "test_instance" "example" {
+ id = (known after apply)
+ json_field = jsonencode(
{
+ list = [
+ "a",
+ "b",
+ 234,
+ true,
]
+ obj = {
+ key = "val"
}
+ str = "value"
}
)
}
`,
},
"in-place update of object": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{"aaa": "value"}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{"aaa": "value", "bbb": "new_value"}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ {
aaa = "value"
+ bbb = "new_value"
}
)
}
`,
},
"in-place update (from empty tuple)": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{"aaa": []}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{"aaa": ["value"]}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ {
~ aaa = [
+ "value",
]
}
)
}
`,
},
"in-place update (to empty tuple)": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{"aaa": ["value"]}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{"aaa": []}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ {
~ aaa = [
- "value",
]
}
)
}
`,
},
"in-place update (tuple of different types)": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{"aaa": [42, {"foo":"bar"}, "value"]}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{"aaa": [42, {"foo":"baz"}, "value"]}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ {
~ aaa = [
42,
~ {
~ foo = "bar" -> "baz"
},
"value",
]
}
)
}
`,
},
"force-new update": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{"aaa": "value"}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{"aaa": "value", "bbb": "new_value"}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "json_field"},
}),
Tainted: false,
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ {
aaa = "value"
+ bbb = "new_value"
} # forces replacement
)
}
`,
},
"in-place update (whitespace change)": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{"aaa": "value", "bbb": "another"}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{"aaa":"value",
"bbb":"another"}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode( # whitespace changes
{
aaa = "value"
bbb = "another"
}
)
}
`,
},
"force-new update (whitespace change)": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{"aaa": "value", "bbb": "another"}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{"aaa":"value",
"bbb":"another"}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "json_field"},
}),
Tainted: false,
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode( # whitespace changes force replacement
{
aaa = "value"
bbb = "another"
}
)
}
`,
},
"creation (empty)": {
Action: plans.Create,
Mode: addrs.ManagedResourceMode,
Before: cty.NullVal(cty.EmptyObject),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be created
+ resource "test_instance" "example" {
+ id = (known after apply)
+ json_field = jsonencode({})
}
`,
},
"JSON list item removal": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`["first","second","third"]`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`["first","second"]`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ [
"first",
"second",
- "third",
]
)
}
`,
},
"JSON list item addition": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`["first","second"]`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`["first","second","third"]`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ [
"first",
"second",
+ "third",
]
)
}
`,
},
"JSON list object addition": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{"first":"111"}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{"first":"111","second":"222"}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ {
first = "111"
+ second = "222"
}
)
}
`,
},
"JSON object with nested list": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{
"Statement": ["first"]
}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{
"Statement": ["first", "second"]
}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ {
~ Statement = [
"first",
+ "second",
]
}
)
}
`,
},
"JSON list of objects - adding item": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`[{"one": "111"}]`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`[{"one": "111"}, {"two": "222"}]`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ [
{
one = "111"
},
+ {
+ two = "222"
},
]
)
}
`,
},
"JSON list of objects - removing item": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`[{"one": "111"}, {"two": "222"}, {"three": "333"}]`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`[{"one": "111"}, {"three": "333"}]`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ [
{
one = "111"
},
- {
- two = "222"
},
{
three = "333"
},
]
)
}
`,
},
"JSON object with list of objects": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{"parent":[{"one": "111"}]}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{"parent":[{"one": "111"}, {"two": "222"}]}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ {
~ parent = [
{
one = "111"
},
+ {
+ two = "222"
},
]
}
)
}
`,
},
"JSON object double nested lists": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{"parent":[{"another_list": ["111"]}]}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`{"parent":[{"another_list": ["111", "222"]}]}`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ {
~ parent = [
~ {
~ another_list = [
"111",
+ "222",
]
},
]
}
)
}
`,
},
"in-place update from object to tuple": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"json_field": cty.StringVal(`{"aaa": [42, {"foo":"bar"}, "value"]}`),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"json_field": cty.StringVal(`["aaa", 42, "something"]`),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"json_field": {Type: cty.String, Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ json_field = jsonencode(
~ {
- aaa = [
- 42,
- {
- foo = "bar"
},
- "value",
]
} -> [
+ "aaa",
+ 42,
+ "something",
]
)
}
`,
},
}
runTestCases(t, testCases)
}
func TestResourceChange_primitiveList(t *testing.T) {
testCases := map[string]testCase{
"in-place update - creation": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.NullVal(cty.List(cty.String)),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("new-element"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"list_field": {Type: cty.List(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
+ list_field = [
+ "new-element",
]
}
`,
},
"in-place update - first addition": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListValEmpty(cty.String),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("new-element"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"list_field": {Type: cty.List(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ list_field = [
+ "new-element",
]
}
`,
},
"in-place update - insertion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
cty.StringVal("cccc"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"list_field": {Type: cty.List(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ list_field = [
"aaaa",
+ "bbbb",
"cccc",
]
}
`,
},
"force-new update - insertion": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
cty.StringVal("cccc"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"list_field": {Type: cty.List(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "list_field"},
}),
Tainted: false,
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ list_field = [ # forces replacement
"aaaa",
+ "bbbb",
"cccc",
]
}
`,
},
"in-place update - deletion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("bbbb"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"list_field": {Type: cty.List(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ list_field = [
- "aaaa",
"bbbb",
- "cccc",
]
}
`,
},
"creation - empty list": {
Action: plans.Create,
Mode: addrs.ManagedResourceMode,
Before: cty.NullVal(cty.EmptyObject),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListValEmpty(cty.String),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"list_field": {Type: cty.List(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be created
+ resource "test_instance" "example" {
+ ami = "ami-STATIC"
+ id = (known after apply)
+ list_field = []
}
`,
},
"in-place update - full to empty": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListValEmpty(cty.String),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"list_field": {Type: cty.List(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ list_field = [
- "aaaa",
- "bbbb",
- "cccc",
]
}
`,
},
"in-place update - null to empty": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.NullVal(cty.List(cty.String)),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListValEmpty(cty.String),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"list_field": {Type: cty.List(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
+ list_field = []
}
`,
},
"update to unknown element": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("aaaa"),
cty.UnknownVal(cty.String),
cty.StringVal("cccc"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"list_field": {Type: cty.List(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ list_field = [
"aaaa",
- "bbbb",
+ (known after apply),
"cccc",
]
}
`,
},
"update - two new unknown elements": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"list_field": cty.ListVal([]cty.Value{
cty.StringVal("aaaa"),
cty.UnknownVal(cty.String),
cty.UnknownVal(cty.String),
cty.StringVal("cccc"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"list_field": {Type: cty.List(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ list_field = [
"aaaa",
- "bbbb",
+ (known after apply),
+ (known after apply),
"cccc",
]
}
`,
},
}
runTestCases(t, testCases)
}
func TestResourceChange_primitiveSet(t *testing.T) {
testCases := map[string]testCase{
"in-place update - creation": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.NullVal(cty.Set(cty.String)),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("new-element"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"set_field": {Type: cty.Set(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
+ set_field = [
+ "new-element",
]
}
`,
},
"in-place update - first insertion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetValEmpty(cty.String),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("new-element"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"set_field": {Type: cty.Set(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ set_field = [
+ "new-element",
]
}
`,
},
"in-place update - insertion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
cty.StringVal("cccc"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"set_field": {Type: cty.Set(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ set_field = [
"aaaa",
+ "bbbb",
"cccc",
]
}
`,
},
"force-new update - insertion": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
cty.StringVal("cccc"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"set_field": {Type: cty.Set(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "set_field"},
}),
Tainted: false,
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ set_field = [ # forces replacement
"aaaa",
+ "bbbb",
"cccc",
]
}
`,
},
"in-place update - deletion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("bbbb"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"set_field": {Type: cty.Set(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ set_field = [
- "aaaa",
"bbbb",
- "cccc",
]
}
`,
},
"creation - empty set": {
Action: plans.Create,
Mode: addrs.ManagedResourceMode,
Before: cty.NullVal(cty.EmptyObject),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetValEmpty(cty.String),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"set_field": {Type: cty.Set(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be created
+ resource "test_instance" "example" {
+ ami = "ami-STATIC"
+ id = (known after apply)
+ set_field = []
}
`,
},
"in-place update - full to empty set": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetValEmpty(cty.String),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"set_field": {Type: cty.Set(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ set_field = [
- "aaaa",
- "bbbb",
]
}
`,
},
"in-place update - null to empty set": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.NullVal(cty.Set(cty.String)),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetValEmpty(cty.String),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"set_field": {Type: cty.Set(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
+ set_field = []
}
`,
},
"in-place update to unknown": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.UnknownVal(cty.Set(cty.String)),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"set_field": {Type: cty.Set(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ set_field = [
- "aaaa",
- "bbbb",
] -> (known after apply)
}
`,
},
"in-place update to unknown element": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("aaaa"),
cty.StringVal("bbbb"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"set_field": cty.SetVal([]cty.Value{
cty.StringVal("aaaa"),
cty.UnknownVal(cty.String),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"set_field": {Type: cty.Set(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ set_field = [
"aaaa",
- "bbbb",
~ (known after apply),
]
}
`,
},
}
runTestCases(t, testCases)
}
func TestResourceChange_map(t *testing.T) {
testCases := map[string]testCase{
"in-place update - creation": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.NullVal(cty.Map(cty.String)),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapVal(map[string]cty.Value{
"new-key": cty.StringVal("new-element"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"map_field": {Type: cty.Map(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
+ map_field = {
+ "new-key" = "new-element"
}
}
`,
},
"in-place update - first insertion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapValEmpty(cty.String),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapVal(map[string]cty.Value{
"new-key": cty.StringVal("new-element"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"map_field": {Type: cty.Map(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ map_field = {
+ "new-key" = "new-element"
}
}
`,
},
"in-place update - insertion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapVal(map[string]cty.Value{
"a": cty.StringVal("aaaa"),
"c": cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapVal(map[string]cty.Value{
"a": cty.StringVal("aaaa"),
"b": cty.StringVal("bbbb"),
"c": cty.StringVal("cccc"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"map_field": {Type: cty.Map(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ map_field = {
"a" = "aaaa"
+ "b" = "bbbb"
"c" = "cccc"
}
}
`,
},
"force-new update - insertion": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapVal(map[string]cty.Value{
"a": cty.StringVal("aaaa"),
"c": cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapVal(map[string]cty.Value{
"a": cty.StringVal("aaaa"),
"b": cty.StringVal("bbbb"),
"c": cty.StringVal("cccc"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"map_field": {Type: cty.Map(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "map_field"},
}),
Tainted: false,
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ map_field = { # forces replacement
"a" = "aaaa"
+ "b" = "bbbb"
"c" = "cccc"
}
}
`,
},
"in-place update - deletion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapVal(map[string]cty.Value{
"a": cty.StringVal("aaaa"),
"b": cty.StringVal("bbbb"),
"c": cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapVal(map[string]cty.Value{
"b": cty.StringVal("bbbb"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"map_field": {Type: cty.Map(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ map_field = {
- "a" = "aaaa" -> null
"b" = "bbbb"
- "c" = "cccc" -> null
}
}
`,
},
"creation - empty": {
Action: plans.Create,
Mode: addrs.ManagedResourceMode,
Before: cty.NullVal(cty.EmptyObject),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapValEmpty(cty.String),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"map_field": {Type: cty.Map(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be created
+ resource "test_instance" "example" {
+ ami = "ami-STATIC"
+ id = (known after apply)
+ map_field = {}
}
`,
},
"update to unknown element": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapVal(map[string]cty.Value{
"a": cty.StringVal("aaaa"),
"b": cty.StringVal("bbbb"),
"c": cty.StringVal("cccc"),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.UnknownVal(cty.String),
"ami": cty.StringVal("ami-STATIC"),
"map_field": cty.MapVal(map[string]cty.Value{
"a": cty.StringVal("aaaa"),
"b": cty.UnknownVal(cty.String),
"c": cty.StringVal("cccc"),
}),
}),
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
"map_field": {Type: cty.Map(cty.String), Optional: true},
},
},
RequiredReplace: cty.NewPathSet(),
Tainted: false,
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
ami = "ami-STATIC"
~ id = "i-02ae66f368e8518a9" -> (known after apply)
~ map_field = {
"a" = "aaaa"
~ "b" = "bbbb" -> (known after apply)
"c" = "cccc"
}
}
`,
},
}
runTestCases(t, testCases)
}
func TestResourceChange_nestedList(t *testing.T) {
testCases := map[string]testCase{
"in-place update - equal": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingList,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
root_block_device {
volume_type = "gp2"
}
}
`,
},
"in-place update - creation": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.ListValEmpty(cty.Object(map[string]cty.Type{
"volume_type": cty.String,
})),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.NullVal(cty.String),
}),
}),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingList,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
+ root_block_device {}
}
`,
},
"in-place update - first insertion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.ListValEmpty(cty.Object(map[string]cty.Type{
"volume_type": cty.String,
})),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingList,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
+ root_block_device {
+ volume_type = "gp2"
}
}
`,
},
"in-place update - insertion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.NullVal(cty.String),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.StringVal("new_value"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
"new_field": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingList,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
~ root_block_device {
+ new_field = "new_value"
volume_type = "gp2"
}
}
`,
},
"force-new update (inside block)": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("different"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "root_block_device"},
cty.IndexStep{Key: cty.NumberIntVal(0)},
cty.GetAttrStep{Name: "volume_type"},
}),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingList,
},
},
},
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
~ root_block_device {
~ volume_type = "gp2" -> "different" # forces replacement
}
}
`,
},
"force-new update (whole block)": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("different"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "root_block_device"},
}),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingList,
},
},
},
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
~ root_block_device { # forces replacement
~ volume_type = "gp2" -> "different"
}
}
`,
},
"in-place update - deletion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.StringVal("new_value"),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.ListValEmpty(cty.Object(map[string]cty.Type{
"volume_type": cty.String,
"new_field": cty.String,
})),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
"new_field": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingList,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
- root_block_device {
- new_field = "new_value" -> null
- volume_type = "gp2" -> null
}
}
`,
},
"with dynamically-typed attribute": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"block": cty.EmptyTupleVal,
}),
After: cty.ObjectVal(map[string]cty.Value{
"block": cty.TupleVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"attr": cty.StringVal("foo"),
}),
cty.ObjectVal(map[string]cty.Value{
"attr": cty.True,
}),
}),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
BlockTypes: map[string]*configschema.NestedBlock{
"block": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"attr": {Type: cty.DynamicPseudoType, Optional: true},
},
},
Nesting: configschema.NestingList,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
+ block {
+ attr = "foo"
}
+ block {
+ attr = true
}
}
`,
},
}
runTestCases(t, testCases)
}
func TestResourceChange_nestedSet(t *testing.T) {
testCases := map[string]testCase{
"in-place update - creation": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{
"volume_type": cty.String,
})),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.SetVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingSet,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
+ root_block_device {
+ volume_type = "gp2"
}
}
`,
},
"in-place update - insertion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.SetVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.NullVal(cty.String),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.SetVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.StringVal("new_value"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
"new_field": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingSet,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
+ root_block_device {
+ new_field = "new_value"
+ volume_type = "gp2"
}
- root_block_device {
- volume_type = "gp2" -> null
}
}
`,
},
"force-new update (whole block)": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.SetVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.SetVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("different"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "root_block_device"},
}),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingSet,
},
},
},
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
+ root_block_device { # forces replacement
+ volume_type = "different"
}
- root_block_device { # forces replacement
- volume_type = "gp2" -> null
}
}
`,
},
"in-place update - deletion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.SetVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.StringVal("new_value"),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{
"volume_type": cty.String,
"new_field": cty.String,
})),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
"new_field": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingSet,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
- root_block_device {
- new_field = "new_value" -> null
- volume_type = "gp2" -> null
}
}
`,
},
}
runTestCases(t, testCases)
}
func TestResourceChange_nestedMap(t *testing.T) {
testCases := map[string]testCase{
"in-place update - creation": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.MapValEmpty(cty.Object(map[string]cty.Type{
"volume_type": cty.String,
})),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.MapVal(map[string]cty.Value{
"a": cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingMap,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
+ root_block_device "a" {
+ volume_type = "gp2"
}
}
`,
},
"in-place update - change attr": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.MapVal(map[string]cty.Value{
"a": cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.NullVal(cty.String),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.MapVal(map[string]cty.Value{
"a": cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.StringVal("new_value"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
"new_field": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingMap,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
~ root_block_device "a" {
+ new_field = "new_value"
volume_type = "gp2"
}
}
`,
},
"in-place update - insertion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.MapVal(map[string]cty.Value{
"a": cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.NullVal(cty.String),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.MapVal(map[string]cty.Value{
"a": cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.NullVal(cty.String),
}),
"b": cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.StringVal("new_value"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
"new_field": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingMap,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
root_block_device "a" {
volume_type = "gp2"
}
+ root_block_device "b" {
+ new_field = "new_value"
+ volume_type = "gp2"
}
}
`,
},
"force-new update (whole block)": {
Action: plans.DeleteThenCreate,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.MapVal(map[string]cty.Value{
"a": cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
}),
"b": cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("standard"),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.MapVal(map[string]cty.Value{
"a": cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("different"),
}),
"b": cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("standard"),
}),
}),
}),
RequiredReplace: cty.NewPathSet(cty.Path{
cty.GetAttrStep{Name: "root_block_device"},
cty.IndexStep{Key: cty.StringVal("a")},
}),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingMap,
},
},
},
ExpectedOutput: ` # test_instance.example must be replaced
-/+ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
~ root_block_device "a" { # forces replacement
~ volume_type = "gp2" -> "different"
}
root_block_device "b" {
volume_type = "standard"
}
}
`,
},
"in-place update - deletion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-BEFORE"),
"root_block_device": cty.MapVal(map[string]cty.Value{
"a": cty.ObjectVal(map[string]cty.Value{
"volume_type": cty.StringVal("gp2"),
"new_field": cty.StringVal("new_value"),
}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("i-02ae66f368e8518a9"),
"ami": cty.StringVal("ami-AFTER"),
"root_block_device": cty.MapValEmpty(cty.Object(map[string]cty.Type{
"volume_type": cty.String,
"new_field": cty.String,
})),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"root_block_device": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"volume_type": {
Type: cty.String,
Optional: true,
Computed: true,
},
"new_field": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
},
Nesting: configschema.NestingMap,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ ami = "ami-BEFORE" -> "ami-AFTER"
id = "i-02ae66f368e8518a9"
- root_block_device "a" {
- new_field = "new_value" -> null
- volume_type = "gp2" -> null
}
}
`,
},
"in-place sequence update - deletion": {
Action: plans.Update,
Mode: addrs.ManagedResourceMode,
Before: cty.ObjectVal(map[string]cty.Value{
"list": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("x")}),
cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("y")}),
}),
}),
After: cty.ObjectVal(map[string]cty.Value{
"list": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("y")}),
cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("z")}),
}),
}),
RequiredReplace: cty.NewPathSet(),
Tainted: false,
Schema: &configschema.Block{
BlockTypes: map[string]*configschema.NestedBlock{
"list": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"attr": {
Type: cty.String,
Required: true,
},
},
},
Nesting: configschema.NestingList,
},
},
},
ExpectedOutput: ` # test_instance.example will be updated in-place
~ resource "test_instance" "example" {
~ list {
~ attr = "x" -> "y"
}
~ list {
~ attr = "y" -> "z"
}
}
`,
},
}
runTestCases(t, testCases)
}
type testCase struct {
Action plans.Action
Mode addrs.ResourceMode
Before cty.Value
After cty.Value
Schema *configschema.Block
RequiredReplace cty.PathSet
Tainted bool
ExpectedOutput string
}
func runTestCases(t *testing.T, testCases map[string]testCase) {
color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
ty := tc.Schema.ImpliedType()
beforeVal := tc.Before
switch { // Some fixups to make the test cases a little easier to write
case beforeVal.IsNull():
beforeVal = cty.NullVal(ty) // allow mistyped nulls
case !beforeVal.IsKnown():
beforeVal = cty.UnknownVal(ty) // allow mistyped unknowns
}
before, err := plans.NewDynamicValue(beforeVal, ty)
if err != nil {
t.Fatal(err)
}
afterVal := tc.After
switch { // Some fixups to make the test cases a little easier to write
case afterVal.IsNull():
afterVal = cty.NullVal(ty) // allow mistyped nulls
case !afterVal.IsKnown():
afterVal = cty.UnknownVal(ty) // allow mistyped unknowns
}
after, err := plans.NewDynamicValue(afterVal, ty)
if err != nil {
t.Fatal(err)
}
change := &plans.ResourceInstanceChangeSrc{
Addr: addrs.Resource{
Mode: tc.Mode,
Type: "test_instance",
Name: "example",
}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance),
ProviderAddr: addrs.AbsProviderConfig{
Provider: addrs.NewLegacyProvider("test"),
Module: addrs.RootModule,
},
ChangeSrc: plans.ChangeSrc{
Action: tc.Action,
Before: before,
After: after,
},
RequiredReplace: tc.RequiredReplace,
}
output := ResourceChange(change, tc.Tainted, tc.Schema, color)
if output != tc.ExpectedOutput {
t.Fatalf("Unexpected diff.\ngot:\n%s\nwant:\n%s\n", output, tc.ExpectedOutput)
}
})
}
}
func TestOutputChanges(t *testing.T) {
color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true}
testCases := map[string]struct {
changes []*plans.OutputChangeSrc
output string
}{
"new output value": {
[]*plans.OutputChangeSrc{
outputChange(
"foo",
cty.NullVal(cty.DynamicPseudoType),
cty.StringVal("bar"),
false,
),
},
`
+ foo = "bar"`,
},
"removed output": {
[]*plans.OutputChangeSrc{
outputChange(
"foo",
cty.StringVal("bar"),
cty.NullVal(cty.DynamicPseudoType),
false,
),
},
`
- foo = "bar" -> null`,
},
"single string change": {
[]*plans.OutputChangeSrc{
outputChange(
"foo",
cty.StringVal("bar"),
cty.StringVal("baz"),
false,
),
},
`
~ foo = "bar" -> "baz"`,
},
"element added to list": {
[]*plans.OutputChangeSrc{
outputChange(
"foo",
cty.ListVal([]cty.Value{
cty.StringVal("alpha"),
cty.StringVal("beta"),
cty.StringVal("delta"),
cty.StringVal("epsilon"),
}),
cty.ListVal([]cty.Value{
cty.StringVal("alpha"),
cty.StringVal("beta"),
cty.StringVal("gamma"),
cty.StringVal("delta"),
cty.StringVal("epsilon"),
}),
false,
),
},
`
~ foo = [
"alpha",
"beta",
+ "gamma",
"delta",
"epsilon",
]`,
},
"multiple outputs changed, one sensitive": {
[]*plans.OutputChangeSrc{
outputChange(
"a",
cty.NumberIntVal(1),
cty.NumberIntVal(2),
false,
),
outputChange(
"b",
cty.StringVal("hunter2"),
cty.StringVal("correct-horse-battery-staple"),
true,
),
outputChange(
"c",
cty.BoolVal(false),
cty.BoolVal(true),
false,
),
},
`
~ a = 1 -> 2
~ b = (sensitive value)
~ c = false -> true`,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
output := OutputChanges(tc.changes, color)
if output != tc.output {
t.Errorf("Unexpected diff.\ngot:\n%s\nwant:\n%s\n", output, tc.output)
}
})
}
}
func outputChange(name string, before, after cty.Value, sensitive bool) *plans.OutputChangeSrc {
addr := addrs.AbsOutputValue{
OutputValue: addrs.OutputValue{Name: name},
}
change := &plans.OutputChange{
Addr: addr, Change: plans.Change{
Before: before,
After: after,
},
Sensitive: sensitive,
}
changeSrc, err := change.Encode()
if err != nil {
panic(fmt.Sprintf("failed to encode change for %s: %s", addr, err))
}
return changeSrc
}
|
// Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.
// Copyright (c) 2006-2015 Sippy Software, Inc. All rights reserved.
// Copyright (c) 2015 Andrii Pylypenko. All rights reserved.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation and/or
// other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package sippy_sdp
import (
"crypto/rand"
"errors"
"net"
"strings"
"strconv"
"sync/atomic"
"sippy/net"
)
var _sdp_session_id int64
func init() {
buf := make([]byte, 6)
rand.Read(buf)
for i := 0; i < len(buf); i++ {
_sdp_session_id |= int64(buf[i]) << (uint(i) * 8)
}
}
type SdpOrigin struct {
username string
session_id string
version int64
network_type string
address_type string
address string
}
func ParseSdpOrigin(body string) (*SdpOrigin, error) {
arr := strings.Fields(body)
if len(arr) != 6 {
return nil, errors.New("Malformed field: " + body)
}
version, err := strconv.ParseInt(arr[2], 10, 64)
if err != nil {
return nil, err
}
return &SdpOrigin{
username : arr[0],
session_id : arr[1],
version : version,
network_type : arr[3],
address_type : arr[4],
address : arr[5],
}, nil
}
func NewSdpOrigin(address string) (*SdpOrigin, error) {
ip := net.ParseIP(address)
if ip == nil {
return nil, errors.New("The address is not IP address: " + address)
}
address_type := "IP4"
if ip.To16() != nil {
address_type = "IP6"
}
sid := atomic.AddInt64(&_sdp_session_id, 1)
self := &SdpOrigin {
username : "-",
session_id : strconv.FormatInt(sid, 10),
network_type : "IN",
address_type : address_type,
address : address,
}
self.version = sid
return self, nil
}
func (self *SdpOrigin) String() string {
version := strconv.FormatInt(self.version, 10)
return strings.Join([]string{ self.username, self.session_id, version, self.network_type, self.address_type, self.address }, " ")
}
func (self *SdpOrigin) LocalStr(hostport *sippy_net.HostPort) string {
version := strconv.FormatInt(self.version, 10)
return strings.Join([]string{ self.username, self.session_id, version, self.network_type, self.address_type, self.address }, " ")
}
func (self *SdpOrigin) GetCopy() *SdpOrigin {
if self == nil {
return nil
}
var ret SdpOrigin = *self
return &ret
}
func (self *SdpOrigin) IncVersion() {
self.version++
}
func (self *SdpOrigin) GetSessionId() string {
return self.session_id
}
func (self *SdpOrigin) GetVersion() int64 {
return self.version
}
Use To4() instead of To16().
// Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.
// Copyright (c) 2006-2015 Sippy Software, Inc. All rights reserved.
// Copyright (c) 2015 Andrii Pylypenko. All rights reserved.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation and/or
// other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package sippy_sdp
import (
"crypto/rand"
"errors"
"net"
"strings"
"strconv"
"sync/atomic"
"sippy/net"
)
var _sdp_session_id int64
func init() {
buf := make([]byte, 6)
rand.Read(buf)
for i := 0; i < len(buf); i++ {
_sdp_session_id |= int64(buf[i]) << (uint(i) * 8)
}
}
type SdpOrigin struct {
username string
session_id string
version int64
network_type string
address_type string
address string
}
func ParseSdpOrigin(body string) (*SdpOrigin, error) {
arr := strings.Fields(body)
if len(arr) != 6 {
return nil, errors.New("Malformed field: " + body)
}
version, err := strconv.ParseInt(arr[2], 10, 64)
if err != nil {
return nil, err
}
return &SdpOrigin{
username : arr[0],
session_id : arr[1],
version : version,
network_type : arr[3],
address_type : arr[4],
address : arr[5],
}, nil
}
func NewSdpOrigin(address string) (*SdpOrigin, error) {
ip := net.ParseIP(address)
if ip == nil {
return nil, errors.New("The address is not IP address: " + address)
}
address_type := "IP4"
if ip.To4() == nil {
address_type = "IP6"
}
sid := atomic.AddInt64(&_sdp_session_id, 1)
self := &SdpOrigin {
username : "-",
session_id : strconv.FormatInt(sid, 10),
network_type : "IN",
address_type : address_type,
address : address,
}
self.version = sid
return self, nil
}
func (self *SdpOrigin) String() string {
version := strconv.FormatInt(self.version, 10)
return strings.Join([]string{ self.username, self.session_id, version, self.network_type, self.address_type, self.address }, " ")
}
func (self *SdpOrigin) LocalStr(hostport *sippy_net.HostPort) string {
version := strconv.FormatInt(self.version, 10)
return strings.Join([]string{ self.username, self.session_id, version, self.network_type, self.address_type, self.address }, " ")
}
func (self *SdpOrigin) GetCopy() *SdpOrigin {
if self == nil {
return nil
}
var ret SdpOrigin = *self
return &ret
}
func (self *SdpOrigin) IncVersion() {
self.version++
}
func (self *SdpOrigin) GetSessionId() string {
return self.session_id
}
func (self *SdpOrigin) GetVersion() int64 {
return self.version
}
|
package consul
import (
"errors"
"sort"
"strconv"
"strings"
"text/template"
"time"
"github.com/BurntSushi/ty/fun"
"github.com/Sirupsen/logrus"
"github.com/cenk/backoff"
"github.com/containous/traefik/job"
"github.com/containous/traefik/log"
"github.com/containous/traefik/provider"
"github.com/containous/traefik/safe"
"github.com/containous/traefik/types"
"github.com/hashicorp/consul/api"
)
const (
// DefaultWatchWaitTime is the duration to wait when polling consul
DefaultWatchWaitTime = 15 * time.Second
)
var _ provider.Provider = (*CatalogProvider)(nil)
// CatalogProvider holds configurations of the Consul catalog provider.
type CatalogProvider struct {
provider.BaseProvider `mapstructure:",squash"`
Endpoint string `description:"Consul server endpoint"`
Domain string `description:"Default domain used"`
Prefix string `description:"Prefix used for Consul catalog tags"`
client *api.Client
}
type serviceUpdate struct {
ServiceName string
Attributes []string
}
type catalogUpdate struct {
Service *serviceUpdate
Nodes []*api.ServiceEntry
}
type nodeSorter []*api.ServiceEntry
func (a nodeSorter) Len() int {
return len(a)
}
func (a nodeSorter) Swap(i int, j int) {
a[i], a[j] = a[j], a[i]
}
func (a nodeSorter) Less(i int, j int) bool {
lentr := a[i]
rentr := a[j]
ls := strings.ToLower(lentr.Service.Service)
lr := strings.ToLower(rentr.Service.Service)
if ls != lr {
return ls < lr
}
if lentr.Service.Address != rentr.Service.Address {
return lentr.Service.Address < rentr.Service.Address
}
if lentr.Node.Address != rentr.Node.Address {
return lentr.Node.Address < rentr.Node.Address
}
return lentr.Service.Port < rentr.Service.Port
}
func (p *CatalogProvider) watchServices(stopCh <-chan struct{}) <-chan map[string][]string {
watchCh := make(chan map[string][]string)
catalog := p.client.Catalog()
safe.Go(func() {
defer close(watchCh)
opts := &api.QueryOptions{WaitTime: DefaultWatchWaitTime}
for {
select {
case <-stopCh:
return
default:
}
data, meta, err := catalog.Services(opts)
if err != nil {
log.WithError(err).Errorf("Failed to list services")
return
}
// If LastIndex didn't change then it means `Get` returned
// because of the WaitTime and the key didn't changed.
if opts.WaitIndex == meta.LastIndex {
continue
}
opts.WaitIndex = meta.LastIndex
if data != nil {
watchCh <- data
}
}
})
return watchCh
}
func (p *CatalogProvider) healthyNodes(service string) (catalogUpdate, error) {
health := p.client.Health()
opts := &api.QueryOptions{}
data, _, err := health.Service(service, "", true, opts)
if err != nil {
log.WithError(err).Errorf("Failed to fetch details of " + service)
return catalogUpdate{}, err
}
nodes := fun.Filter(func(node *api.ServiceEntry) bool {
constraintTags := p.getContraintTags(node.Service.Tags)
ok, failingConstraint := p.MatchConstraints(constraintTags)
if ok == false && failingConstraint != nil {
log.Debugf("Service %v pruned by '%v' constraint", service, failingConstraint.String())
}
return ok
}, data).([]*api.ServiceEntry)
//Merge tags of nodes matching constraints, in a single slice.
tags := fun.Foldl(func(node *api.ServiceEntry, set []string) []string {
return fun.Keys(fun.Union(
fun.Set(set),
fun.Set(node.Service.Tags),
).(map[string]bool)).([]string)
}, []string{}, nodes).([]string)
return catalogUpdate{
Service: &serviceUpdate{
ServiceName: service,
Attributes: tags,
},
Nodes: nodes,
}, nil
}
func (p *CatalogProvider) getEntryPoints(list string) []string {
return strings.Split(list, ",")
}
func (p *CatalogProvider) getBackend(node *api.ServiceEntry) string {
return strings.ToLower(node.Service.Service)
}
func (p *CatalogProvider) getFrontendRule(service serviceUpdate) string {
customFrontendRule := p.getAttribute("frontend.rule", service.Attributes, "")
if customFrontendRule != "" {
return customFrontendRule
}
return "Host:" + service.ServiceName + "." + p.Domain
}
func (p *CatalogProvider) getBackendAddress(node *api.ServiceEntry) string {
if node.Service.Address != "" {
return node.Service.Address
}
return node.Node.Address
}
func (p *CatalogProvider) getBackendName(node *api.ServiceEntry, index int) string {
serviceName := strings.ToLower(node.Service.Service) + "--" + node.Service.Address + "--" + strconv.Itoa(node.Service.Port)
for _, tag := range node.Service.Tags {
serviceName += "--" + provider.Normalize(tag)
}
serviceName = strings.Replace(serviceName, ".", "-", -1)
serviceName = strings.Replace(serviceName, "=", "-", -1)
// unique int at the end
serviceName += "--" + strconv.Itoa(index)
return serviceName
}
func (p *CatalogProvider) getAttribute(name string, tags []string, defaultValue string) string {
for _, tag := range tags {
if strings.Index(strings.ToLower(tag), p.Prefix+".") == 0 {
if kv := strings.SplitN(tag[len(p.Prefix+"."):], "=", 2); len(kv) == 2 && strings.ToLower(kv[0]) == strings.ToLower(name) {
return kv[1]
}
}
}
return defaultValue
}
func (p *CatalogProvider) getContraintTags(tags []string) []string {
var list []string
for _, tag := range tags {
if strings.Index(strings.ToLower(tag), p.Prefix+".tags=") == 0 {
splitedTags := strings.Split(tag[len(p.Prefix+".tags="):], ",")
list = append(list, splitedTags...)
}
}
return list
}
func (p *CatalogProvider) buildConfig(catalog []catalogUpdate) *types.Configuration {
var FuncMap = template.FuncMap{
"getBackend": p.getBackend,
"getFrontendRule": p.getFrontendRule,
"getBackendName": p.getBackendName,
"getBackendAddress": p.getBackendAddress,
"getAttribute": p.getAttribute,
"getEntryPoints": p.getEntryPoints,
"hasMaxconnAttributes": p.hasMaxconnAttributes,
}
allNodes := []*api.ServiceEntry{}
services := []*serviceUpdate{}
for _, info := range catalog {
for _, node := range info.Nodes {
isEnabled := p.getAttribute("enable", node.Service.Tags, "true")
if isEnabled != "false" && len(info.Nodes) > 0 {
services = append(services, info.Service)
allNodes = append(allNodes, info.Nodes...)
break
}
}
}
// Ensure a stable ordering of nodes so that identical configurations may be detected
sort.Sort(nodeSorter(allNodes))
templateObjects := struct {
Services []*serviceUpdate
Nodes []*api.ServiceEntry
}{
Services: services,
Nodes: allNodes,
}
configuration, err := p.GetConfiguration("templates/consul_catalog.tmpl", FuncMap, templateObjects)
if err != nil {
log.WithError(err).Error("Failed to create config")
}
return configuration
}
func (p *CatalogProvider) hasMaxconnAttributes(attributes []string) bool {
amount := p.getAttribute("backend.maxconn.amount", attributes, "")
extractorfunc := p.getAttribute("backend.maxconn.extractorfunc", attributes, "")
if amount != "" && extractorfunc != "" {
return true
}
return false
}
func (p *CatalogProvider) getNodes(index map[string][]string) ([]catalogUpdate, error) {
visited := make(map[string]bool)
nodes := []catalogUpdate{}
for service := range index {
name := strings.ToLower(service)
if !strings.Contains(name, " ") && !visited[name] {
visited[name] = true
log.WithFields(logrus.Fields{
"service": name,
}).Debug("Fetching service")
healthy, err := p.healthyNodes(name)
if err != nil {
return nil, err
}
// healthy.Nodes can be empty if constraints do not match, without throwing error
if healthy.Service != nil && len(healthy.Nodes) > 0 {
nodes = append(nodes, healthy)
}
}
}
return nodes, nil
}
func (p *CatalogProvider) watch(configurationChan chan<- types.ConfigMessage, stop chan bool) error {
stopCh := make(chan struct{})
serviceCatalog := p.watchServices(stopCh)
defer close(stopCh)
for {
select {
case <-stop:
return nil
case index, ok := <-serviceCatalog:
if !ok {
return errors.New("Consul service list nil")
}
log.Debug("List of services changed")
nodes, err := p.getNodes(index)
if err != nil {
return err
}
configuration := p.buildConfig(nodes)
configurationChan <- types.ConfigMessage{
ProviderName: "consul_catalog",
Configuration: configuration,
}
}
}
}
// Provide allows the consul catalog provider to provide configurations to traefik
// using the given configuration channel.
func (p *CatalogProvider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, constraints types.Constraints) error {
config := api.DefaultConfig()
config.Address = p.Endpoint
client, err := api.NewClient(config)
if err != nil {
return err
}
p.client = client
p.Constraints = append(p.Constraints, constraints...)
pool.Go(func(stop chan bool) {
notify := func(err error, time time.Duration) {
log.Errorf("Consul connection error %+v, retrying in %s", err, time)
}
operation := func() error {
return p.watch(configurationChan, stop)
}
err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify)
if err != nil {
log.Errorf("Cannot connect to consul server %+v", err)
}
})
return err
}
Using more sensible consul blocking query to detect health check changes
package consul
import (
"errors"
"sort"
"strconv"
"strings"
"text/template"
"time"
"github.com/BurntSushi/ty/fun"
"github.com/Sirupsen/logrus"
"github.com/cenk/backoff"
"github.com/containous/traefik/job"
"github.com/containous/traefik/log"
"github.com/containous/traefik/provider"
"github.com/containous/traefik/safe"
"github.com/containous/traefik/types"
"github.com/hashicorp/consul/api"
)
const (
// DefaultWatchWaitTime is the duration to wait when polling consul
DefaultWatchWaitTime = 15 * time.Second
)
var _ provider.Provider = (*CatalogProvider)(nil)
// CatalogProvider holds configurations of the Consul catalog provider.
type CatalogProvider struct {
provider.BaseProvider `mapstructure:",squash"`
Endpoint string `description:"Consul server endpoint"`
Domain string `description:"Default domain used"`
Prefix string `description:"Prefix used for Consul catalog tags"`
client *api.Client
}
type serviceUpdate struct {
ServiceName string
Attributes []string
}
type catalogUpdate struct {
Service *serviceUpdate
Nodes []*api.ServiceEntry
}
type nodeSorter []*api.ServiceEntry
func (a nodeSorter) Len() int {
return len(a)
}
func (a nodeSorter) Swap(i int, j int) {
a[i], a[j] = a[j], a[i]
}
func (a nodeSorter) Less(i int, j int) bool {
lentr := a[i]
rentr := a[j]
ls := strings.ToLower(lentr.Service.Service)
lr := strings.ToLower(rentr.Service.Service)
if ls != lr {
return ls < lr
}
if lentr.Service.Address != rentr.Service.Address {
return lentr.Service.Address < rentr.Service.Address
}
if lentr.Node.Address != rentr.Node.Address {
return lentr.Node.Address < rentr.Node.Address
}
return lentr.Service.Port < rentr.Service.Port
}
func (p *CatalogProvider) watchServices(stopCh <-chan struct{}) <-chan map[string][]string {
watchCh := make(chan map[string][]string)
catalog := p.client.Catalog()
health := p.client.Health()
var lastHealthIndex uint64
safe.Go(func() {
defer close(watchCh)
catalogOptions := &api.QueryOptions{WaitTime: DefaultWatchWaitTime}
healthOptions := &api.QueryOptions{}
for {
select {
case <-stopCh:
return
default:
}
data, catalogMeta, err := catalog.Services(catalogOptions)
if err != nil {
log.WithError(err).Errorf("Failed to list services")
return
}
// Listening to changes that leads to `passing` state or degrades from it.
// The call is used just as a trigger for further actions
// (intentionally there is no interest in the received data).
_, healthMeta, err := health.State("passing", healthOptions)
if err != nil {
log.WithError(err).Errorf("Failed to retrieve health checks")
return
}
// If LastIndex didn't change then it means `Get` returned
// because of the WaitTime and the key didn't changed.
sameServiceAmount := catalogOptions.WaitIndex == catalogMeta.LastIndex
sameServiceHealth := lastHealthIndex == healthMeta.LastIndex
if sameServiceAmount && sameServiceHealth {
continue
}
catalogOptions.WaitIndex = catalogMeta.LastIndex
lastHealthIndex = healthMeta.LastIndex
if data != nil {
watchCh <- data
}
}
})
return watchCh
}
func (p *CatalogProvider) healthyNodes(service string) (catalogUpdate, error) {
health := p.client.Health()
opts := &api.QueryOptions{}
data, _, err := health.Service(service, "", true, opts)
if err != nil {
log.WithError(err).Errorf("Failed to fetch details of " + service)
return catalogUpdate{}, err
}
nodes := fun.Filter(func(node *api.ServiceEntry) bool {
constraintTags := p.getContraintTags(node.Service.Tags)
ok, failingConstraint := p.MatchConstraints(constraintTags)
if ok == false && failingConstraint != nil {
log.Debugf("Service %v pruned by '%v' constraint", service, failingConstraint.String())
}
return ok
}, data).([]*api.ServiceEntry)
//Merge tags of nodes matching constraints, in a single slice.
tags := fun.Foldl(func(node *api.ServiceEntry, set []string) []string {
return fun.Keys(fun.Union(
fun.Set(set),
fun.Set(node.Service.Tags),
).(map[string]bool)).([]string)
}, []string{}, nodes).([]string)
return catalogUpdate{
Service: &serviceUpdate{
ServiceName: service,
Attributes: tags,
},
Nodes: nodes,
}, nil
}
func (p *CatalogProvider) getEntryPoints(list string) []string {
return strings.Split(list, ",")
}
func (p *CatalogProvider) getBackend(node *api.ServiceEntry) string {
return strings.ToLower(node.Service.Service)
}
func (p *CatalogProvider) getFrontendRule(service serviceUpdate) string {
customFrontendRule := p.getAttribute("frontend.rule", service.Attributes, "")
if customFrontendRule != "" {
return customFrontendRule
}
return "Host:" + service.ServiceName + "." + p.Domain
}
func (p *CatalogProvider) getBackendAddress(node *api.ServiceEntry) string {
if node.Service.Address != "" {
return node.Service.Address
}
return node.Node.Address
}
func (p *CatalogProvider) getBackendName(node *api.ServiceEntry, index int) string {
serviceName := strings.ToLower(node.Service.Service) + "--" + node.Service.Address + "--" + strconv.Itoa(node.Service.Port)
for _, tag := range node.Service.Tags {
serviceName += "--" + provider.Normalize(tag)
}
serviceName = strings.Replace(serviceName, ".", "-", -1)
serviceName = strings.Replace(serviceName, "=", "-", -1)
// unique int at the end
serviceName += "--" + strconv.Itoa(index)
return serviceName
}
func (p *CatalogProvider) getAttribute(name string, tags []string, defaultValue string) string {
for _, tag := range tags {
if strings.Index(strings.ToLower(tag), p.Prefix+".") == 0 {
if kv := strings.SplitN(tag[len(p.Prefix+"."):], "=", 2); len(kv) == 2 && strings.ToLower(kv[0]) == strings.ToLower(name) {
return kv[1]
}
}
}
return defaultValue
}
func (p *CatalogProvider) getContraintTags(tags []string) []string {
var list []string
for _, tag := range tags {
if strings.Index(strings.ToLower(tag), p.Prefix+".tags=") == 0 {
splitedTags := strings.Split(tag[len(p.Prefix+".tags="):], ",")
list = append(list, splitedTags...)
}
}
return list
}
func (p *CatalogProvider) buildConfig(catalog []catalogUpdate) *types.Configuration {
var FuncMap = template.FuncMap{
"getBackend": p.getBackend,
"getFrontendRule": p.getFrontendRule,
"getBackendName": p.getBackendName,
"getBackendAddress": p.getBackendAddress,
"getAttribute": p.getAttribute,
"getEntryPoints": p.getEntryPoints,
"hasMaxconnAttributes": p.hasMaxconnAttributes,
}
allNodes := []*api.ServiceEntry{}
services := []*serviceUpdate{}
for _, info := range catalog {
for _, node := range info.Nodes {
isEnabled := p.getAttribute("enable", node.Service.Tags, "true")
if isEnabled != "false" && len(info.Nodes) > 0 {
services = append(services, info.Service)
allNodes = append(allNodes, info.Nodes...)
break
}
}
}
// Ensure a stable ordering of nodes so that identical configurations may be detected
sort.Sort(nodeSorter(allNodes))
templateObjects := struct {
Services []*serviceUpdate
Nodes []*api.ServiceEntry
}{
Services: services,
Nodes: allNodes,
}
configuration, err := p.GetConfiguration("templates/consul_catalog.tmpl", FuncMap, templateObjects)
if err != nil {
log.WithError(err).Error("Failed to create config")
}
return configuration
}
func (p *CatalogProvider) hasMaxconnAttributes(attributes []string) bool {
amount := p.getAttribute("backend.maxconn.amount", attributes, "")
extractorfunc := p.getAttribute("backend.maxconn.extractorfunc", attributes, "")
if amount != "" && extractorfunc != "" {
return true
}
return false
}
func (p *CatalogProvider) getNodes(index map[string][]string) ([]catalogUpdate, error) {
visited := make(map[string]bool)
nodes := []catalogUpdate{}
for service := range index {
name := strings.ToLower(service)
if !strings.Contains(name, " ") && !visited[name] {
visited[name] = true
log.WithFields(logrus.Fields{
"service": name,
}).Debug("Fetching service")
healthy, err := p.healthyNodes(name)
if err != nil {
return nil, err
}
// healthy.Nodes can be empty if constraints do not match, without throwing error
if healthy.Service != nil && len(healthy.Nodes) > 0 {
nodes = append(nodes, healthy)
}
}
}
return nodes, nil
}
func (p *CatalogProvider) watch(configurationChan chan<- types.ConfigMessage, stop chan bool) error {
stopCh := make(chan struct{})
serviceCatalog := p.watchServices(stopCh)
defer close(stopCh)
for {
select {
case <-stop:
return nil
case index, ok := <-serviceCatalog:
if !ok {
return errors.New("Consul service list nil")
}
log.Debug("List of services changed")
nodes, err := p.getNodes(index)
if err != nil {
return err
}
configuration := p.buildConfig(nodes)
configurationChan <- types.ConfigMessage{
ProviderName: "consul_catalog",
Configuration: configuration,
}
}
}
}
// Provide allows the consul catalog provider to provide configurations to traefik
// using the given configuration channel.
func (p *CatalogProvider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, constraints types.Constraints) error {
config := api.DefaultConfig()
config.Address = p.Endpoint
client, err := api.NewClient(config)
if err != nil {
return err
}
p.client = client
p.Constraints = append(p.Constraints, constraints...)
pool.Go(func(stop chan bool) {
notify := func(err error, time time.Duration) {
log.Errorf("Consul connection error %+v, retrying in %s", err, time)
}
operation := func() error {
return p.watch(configurationChan, stop)
}
err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify)
if err != nil {
log.Errorf("Cannot connect to consul server %+v", err)
}
})
return err
}
|
//
// Copyright 2016-2018 Bryan T. Meyers <bmeyers@datadrake.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package sourceforge
import (
"encoding/xml"
"fmt"
"github.com/DataDrake/cuppa/results"
"net/http"
"regexp"
"time"
)
const (
// ListingURL is the location of the SourceForge FTP file listing
API = "https://sourceforge.net/projects/%s/rss?path=/%s"
)
// TarballRegex matches SourceForge sources
var TarballRegex = regexp.MustCompile("https?://.*sourceforge.net/projects/(.+)/files/(.+)/(.+)-([\\d]+(?:.\\d+)*).+$")
// ProjectRegex matches SourceForge sources
var ProjectRegex = regexp.MustCompile("https?://.*sourceforge.net/project/(.+)/(.+)/(.+)-([\\d]+(?:.\\d+)*).+$")
// Provider is the upstream provider interface for SourceForge
type Provider struct{}
type Item struct {
XMLName xml.Name `xml:"item"`
Link string `xml:"link"`
Date string `xml:"pubDate"`
}
type Feed struct {
XMLName xml.Name `xml:"rss"`
Items []Item `xml:"channel>item"`
}
func (f *Feed) toResults(name string) *results.ResultSet {
rs := results.NewResultSet(name)
for _, item := range f.Items {
sm := TarballRegex.FindStringSubmatch(item.Link)
if len(sm) != 5 {
continue
}
pub, _ := time.Parse(time.RFC1123, item.Date + "C")
r := &results.Result {
Name: name,
Version: sm[4],
Location: item.Link,
Published: pub,
}
rs.AddResult(r)
}
return rs
}
// Latest finds the newest release for a SourceForge package
func (c Provider) Latest(name string) (r *results.Result, s results.Status) {
rs, s := c.Releases(name)
if s != results.OK {
return
}
r = rs.First()
return
}
// Match checks to see if this provider can handle this kind of query
func (c Provider) Match(query string) string {
sm := TarballRegex.FindStringSubmatch(query)
if len(sm) != 5 {
sm = ProjectRegex.FindStringSubmatch(query)
if len(sm) != 5 {
return ""
}
}
return sm[0]
}
// Name gives the name of this provider
func (c Provider) Name() string {
return "SourceForge"
}
// Releases finds all matching releases for a SourceForge package
func (c Provider) Releases(name string) (rs *results.ResultSet, s results.Status) {
sm := TarballRegex.FindStringSubmatch(name)
if len(sm) != 5 {
sm = ProjectRegex.FindStringSubmatch(name)
}
// Query the API
resp, err := http.Get(fmt.Sprintf(API, sm[1], sm[2]))
if err != nil {
panic(err.Error())
}
defer resp.Body.Close()
// Translate Status Code
switch resp.StatusCode {
case 200:
s = results.OK
case 404:
s = results.NotFound
default:
s = results.Unavailable
}
// Fail if not OK
if s != results.OK {
return
}
dec := xml.NewDecoder(resp.Body)
feed := &Feed{}
err = dec.Decode(feed)
if err != nil {
panic(err.Error())
}
rs = feed.toResults(sm[3])
if rs.Len() == 0 {
s = results.NotFound
}
return
}
Better matching for sourceforge
//
// Copyright 2016-2018 Bryan T. Meyers <bmeyers@datadrake.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package sourceforge
import (
"encoding/xml"
"fmt"
"github.com/DataDrake/cuppa/results"
"net/http"
"regexp"
"time"
)
const (
// ListingURL is the location of the SourceForge FTP file listing
API = "https://sourceforge.net/projects/%s/rss?path=/%s"
)
// TarballRegex matches SourceForge sources
var TarballRegex = regexp.MustCompile("https?://.*sourceforge.net/projects/(.+)/files/(.+)/(.+)-([\\d]+(?:.\\d+)*).+$")
// ProjectRegex matches SourceForge sources
var ProjectRegex = regexp.MustCompile("https?://.*sourceforge.net/project/(.+?)/(.+)/.+?/(.+)-([\\d]+(?:.\\d+)*).+$")
// Provider is the upstream provider interface for SourceForge
type Provider struct{}
type Item struct {
XMLName xml.Name `xml:"item"`
Link string `xml:"link"`
Date string `xml:"pubDate"`
}
type Feed struct {
XMLName xml.Name `xml:"rss"`
Items []Item `xml:"channel>item"`
}
func (f *Feed) toResults(name string) *results.ResultSet {
rs := results.NewResultSet(name)
for _, item := range f.Items {
sm := TarballRegex.FindStringSubmatch(item.Link)
if len(sm) != 5 {
continue
}
pub, _ := time.Parse(time.RFC1123, item.Date + "C")
r := &results.Result {
Name: name,
Version: sm[4],
Location: item.Link,
Published: pub,
}
rs.AddResult(r)
}
return rs
}
// Latest finds the newest release for a SourceForge package
func (c Provider) Latest(name string) (r *results.Result, s results.Status) {
rs, s := c.Releases(name)
if s != results.OK {
return
}
r = rs.First()
return
}
// Match checks to see if this provider can handle this kind of query
func (c Provider) Match(query string) string {
sm := TarballRegex.FindStringSubmatch(query)
if len(sm) != 5 {
sm = ProjectRegex.FindStringSubmatch(query)
if len(sm) != 5 {
return ""
}
}
return sm[0]
}
// Name gives the name of this provider
func (c Provider) Name() string {
return "SourceForge"
}
// Releases finds all matching releases for a SourceForge package
func (c Provider) Releases(name string) (rs *results.ResultSet, s results.Status) {
sm := TarballRegex.FindStringSubmatch(name)
if len(sm) != 5 {
sm = ProjectRegex.FindStringSubmatch(name)
}
fmt.Printf("%#v\n", sm)
// Query the API
resp, err := http.Get(fmt.Sprintf(API, sm[1], sm[2]))
if err != nil {
panic(err.Error())
}
defer resp.Body.Close()
// Translate Status Code
switch resp.StatusCode {
case 200:
s = results.OK
case 404:
s = results.NotFound
default:
s = results.Unavailable
}
// Fail if not OK
if s != results.OK {
return
}
dec := xml.NewDecoder(resp.Body)
feed := &Feed{}
err = dec.Decode(feed)
if err != nil {
panic(err.Error())
}
rs = feed.toResults(sm[3])
if rs.Len() == 0 {
s = results.NotFound
}
return
}
|
// Copyright 2013 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package docker
import (
"encoding/json"
"fmt"
"net"
"net/http"
"net/http/httptest"
"net/url"
"strconv"
"strings"
"github.com/fsouza/go-dockerclient/testing"
dtesting "github.com/fsouza/go-dockerclient/testing"
"github.com/tsuru/config"
"github.com/tsuru/docker-cluster/cluster"
"github.com/tsuru/tsuru/api"
"github.com/tsuru/tsuru/app"
"github.com/tsuru/tsuru/auth"
"github.com/tsuru/tsuru/auth/native"
"github.com/tsuru/tsuru/db"
"github.com/tsuru/tsuru/db/dbtest"
"github.com/tsuru/tsuru/event"
"github.com/tsuru/tsuru/event/eventtest"
tsuruIo "github.com/tsuru/tsuru/io"
"github.com/tsuru/tsuru/permission"
"github.com/tsuru/tsuru/permission/permissiontest"
"github.com/tsuru/tsuru/provision"
"github.com/tsuru/tsuru/provision/docker/container"
"github.com/tsuru/tsuru/provision/docker/types"
"github.com/tsuru/tsuru/provision/provisiontest"
"github.com/tsuru/tsuru/queue"
"gopkg.in/check.v1"
"gopkg.in/mgo.v2"
)
type HandlersSuite struct {
conn *db.Storage
user *auth.User
token auth.Token
team *auth.Team
clusterSess *mgo.Session
p *dockerProvisioner
server *dtesting.DockerServer
}
var _ = check.Suite(&HandlersSuite{})
var nativeScheme = auth.ManagedScheme(native.NativeScheme{})
func (s *HandlersSuite) SetUpSuite(c *check.C) {
config.Set("database:name", "docker_provision_handlers_tests_s")
config.Set("docker:collection", "docker_handler_suite")
config.Set("docker:run-cmd:port", 8888)
config.Set("docker:router", "fake")
config.Set("docker:cluster:mongo-url", "127.0.0.1:27017")
config.Set("docker:cluster:mongo-database", "docker_provision_handlers_tests_cluster_stor")
config.Set("docker:repository-namespace", "tsuru")
config.Set("queue:mongo-url", "127.0.0.1:27017")
config.Set("queue:mongo-database", "queue_provision_docker_tests_handlers")
config.Set("iaas:default", "test-iaas")
config.Set("iaas:node-protocol", "http")
config.Set("iaas:node-port", 1234)
config.Set("routers:fake:type", "fake")
var err error
s.conn, err = db.Conn()
c.Assert(err, check.IsNil)
clusterDbURL, _ := config.GetString("docker:cluster:mongo-url")
s.clusterSess, err = mgo.Dial(clusterDbURL)
c.Assert(err, check.IsNil)
app.AuthScheme = nativeScheme
s.team = &auth.Team{Name: "admin"}
err = s.conn.Teams().Insert(s.team)
c.Assert(err, check.IsNil)
}
func (s *HandlersSuite) SetUpTest(c *check.C) {
config.Set("docker:api-timeout", 2)
queue.ResetQueue()
err := clearClusterStorage(s.clusterSess)
c.Assert(err, check.IsNil)
s.server, err = dtesting.NewServer("127.0.0.1:0", nil, nil)
c.Assert(err, check.IsNil)
s.p = &dockerProvisioner{storage: &cluster.MapStorage{}}
mainDockerProvisioner = s.p
err = mainDockerProvisioner.Initialize()
c.Assert(err, check.IsNil)
s.p.cluster, err = cluster.New(nil, s.p.storage, "",
cluster.Node{Address: s.server.URL(), Metadata: map[string]string{"pool": "test-default"}},
)
coll := mainDockerProvisioner.Collection()
defer coll.Close()
err = dbtest.ClearAllCollections(coll.Database)
c.Assert(err, check.IsNil)
s.user, s.token = permissiontest.CustomUserWithPermission(c, nativeScheme, "provisioner-docker", permission.Permission{
Scheme: permission.PermAll,
Context: permission.PermissionContext{CtxType: permission.CtxGlobal},
})
}
func (s *HandlersSuite) TearDownTest(c *check.C) {
s.server.Stop()
}
func (s *HandlersSuite) TearDownSuite(c *check.C) {
defer s.clusterSess.Close()
defer s.conn.Close()
coll := mainDockerProvisioner.Collection()
defer coll.Close()
coll.Database.DropDatabase()
databaseName, _ := config.GetString("docker:cluster:mongo-database")
s.clusterSess.DB(databaseName).DropDatabase()
}
func startFakeDockerNode(c *check.C) (*testing.DockerServer, func()) {
pong := make(chan struct{})
server, err := testing.NewServer("127.0.0.1:0", nil, func(r *http.Request) {
if strings.Contains(r.URL.Path, "ping") {
close(pong)
}
})
c.Assert(err, check.IsNil)
url, err := url.Parse(server.URL())
c.Assert(err, check.IsNil)
_, portString, _ := net.SplitHostPort(url.Host)
port, _ := strconv.Atoi(portString)
config.Set("iaas:node-port", port)
return server, func() {
<-pong
queue.ResetQueue()
}
}
func (s *HandlersSuite) TestMoveContainersEmptyBodyHandler(c *check.C) {
recorder := httptest.NewRecorder()
request, err := http.NewRequest("POST", "/docker/containers/move", nil)
c.Assert(err, check.IsNil)
request.Header.Set("Authorization", "bearer "+s.token.GetValue())
server := api.RunServer(true)
server.ServeHTTP(recorder, request)
c.Assert(recorder.Code, check.Equals, http.StatusBadRequest)
}
func (s *HandlersSuite) TestMoveContainersEmptyToHandler(c *check.C) {
recorder := httptest.NewRecorder()
v := url.Values{}
v.Set("from", "fromhost")
v.Set("to", "")
b := strings.NewReader(v.Encode())
request, err := http.NewRequest("POST", "/docker/containers/move", b)
c.Assert(err, check.IsNil)
request.Header.Set("Content-Type", "application/x-www-form-urlencoded")
request.Header.Set("Authorization", "bearer "+s.token.GetValue())
server := api.RunServer(true)
server.ServeHTTP(recorder, request)
c.Assert(recorder.Code, check.Equals, http.StatusInternalServerError)
c.Assert(recorder.Body.String(), check.Equals, "Invalid params: from: fromhost - to: \n")
}
func (s *HandlersSuite) TestMoveContainersHandler(c *check.C) {
recorder := httptest.NewRecorder()
v := url.Values{}
v.Set("from", "localhost")
v.Set("to", "127.0.0.1")
b := strings.NewReader(v.Encode())
request, err := http.NewRequest("POST", "/docker/containers/move", b)
c.Assert(err, check.IsNil)
request.Header.Set("Content-Type", "application/x-www-form-urlencoded")
request.Header.Set("Authorization", "bearer "+s.token.GetValue())
mainDockerProvisioner.Cluster().Register(cluster.Node{Address: "http://localhost:2375"})
mainDockerProvisioner.Cluster().Register(cluster.Node{Address: "http://127.0.0.1:2375"})
server := api.RunServer(true)
server.ServeHTTP(recorder, request)
c.Assert(recorder.Code, check.Equals, http.StatusOK)
c.Assert(recorder.Header().Get("Content-Type"), check.Equals, "application/x-json-stream")
validJson := fmt.Sprintf("[%s]", strings.Replace(strings.Trim(recorder.Body.String(), "\n "), "\n", ",", -1))
var result []tsuruIo.SimpleJsonMessage
err = json.Unmarshal([]byte(validJson), &result)
c.Assert(err, check.IsNil)
c.Assert(result, check.DeepEquals, []tsuruIo.SimpleJsonMessage{
{Message: "No units to move in localhost\n"},
{Message: "Containers moved successfully!\n"},
})
c.Assert(eventtest.EventDesc{
Target: event.Target{Type: event.TargetTypeNode, Value: "localhost"},
Owner: s.token.GetUserName(),
Kind: "node.update.move.containers",
StartCustomData: []map[string]interface{}{
{"name": "from", "value": "localhost"},
{"name": "to", "value": "127.0.0.1"},
},
}, eventtest.HasEvent)
}
func (s *HandlersSuite) TestMoveContainerNotFound(c *check.C) {
recorder := httptest.NewRecorder()
mainDockerProvisioner.Cluster().Register(cluster.Node{Address: "http://127.0.0.1:2375"})
v := url.Values{}
v.Set("to", "127.0.0.1")
b := strings.NewReader(v.Encode())
request, err := http.NewRequest("POST", "/docker/container/myid/move", b)
c.Assert(err, check.IsNil)
request.Header.Set("Content-Type", "application/x-www-form-urlencoded")
request.Header.Set("Authorization", "bearer "+s.token.GetValue())
server := api.RunServer(true)
server.ServeHTTP(recorder, request)
c.Assert(recorder.Code, check.Equals, http.StatusNotFound)
}
func (s *HandlersSuite) TestDockerLogsUpdateHandler(c *check.C) {
values1 := url.Values{
"Driver": []string{"awslogs"},
"LogOpts.awslogs-region": []string{"sa-east1"},
}
values2 := url.Values{
"pool": []string{"POOL1"},
"Driver": []string{"bs"},
}
values3 := url.Values{
"pool": []string{"POOL2"},
"Driver": []string{"fluentd"},
"LogOpts.fluentd-address": []string{"localhost:2222"},
}
doReq := func(val url.Values) {
reader := strings.NewReader(val.Encode())
recorder := httptest.NewRecorder()
request, err := http.NewRequest("POST", "/docker/logs", reader)
c.Assert(err, check.IsNil)
request.Header.Set("Authorization", "bearer "+s.token.GetValue())
request.Header.Set("Content-Type", "application/x-www-form-urlencoded")
server := api.RunServer(true)
server.ServeHTTP(recorder, request)
c.Assert(recorder.Body.String(), check.Equals, "{\"Message\":\"Log config successfully updated.\\n\"}\n")
c.Assert(recorder.Code, check.Equals, http.StatusOK)
c.Assert(recorder.Header().Get("Content-Type"), check.Equals, "application/x-json-stream")
var pool string
var customData []map[string]interface{}
for k, v := range val {
if k == "pool" {
pool = v[0]
continue
}
customData = append(customData, map[string]interface{}{"name": k, "value": v[0]})
}
c.Assert(eventtest.EventDesc{
Target: event.Target{Type: event.TargetTypePool, Value: pool},
Owner: s.token.GetUserName(),
Kind: "pool.update.logs",
StartCustomData: customData,
}, eventtest.HasEvent)
}
doReq(values1)
entries, err := container.LogLoadAll()
c.Assert(err, check.IsNil)
c.Assert(entries, check.DeepEquals, map[string]container.DockerLogConfig{
"": {DockerLogConfig: types.DockerLogConfig{Driver: "awslogs", LogOpts: map[string]string{"awslogs-region": "sa-east1"}}},
})
doReq(values2)
entries, err = container.LogLoadAll()
c.Assert(err, check.IsNil)
c.Assert(entries, check.DeepEquals, map[string]container.DockerLogConfig{
"": {DockerLogConfig: types.DockerLogConfig{Driver: "awslogs", LogOpts: map[string]string{"awslogs-region": "sa-east1"}}},
"POOL1": {DockerLogConfig: types.DockerLogConfig{Driver: "bs", LogOpts: map[string]string{}}},
})
doReq(values3)
entries, err = container.LogLoadAll()
c.Assert(err, check.IsNil)
c.Assert(entries, check.DeepEquals, map[string]container.DockerLogConfig{
"": {DockerLogConfig: types.DockerLogConfig{Driver: "awslogs", LogOpts: map[string]string{"awslogs-region": "sa-east1"}}},
"POOL1": {DockerLogConfig: types.DockerLogConfig{Driver: "bs", LogOpts: map[string]string{}}},
"POOL2": {DockerLogConfig: types.DockerLogConfig{Driver: "fluentd", LogOpts: map[string]string{"fluentd-address": "localhost:2222"}}},
})
}
func (s *HandlersSuite) TestDockerLogsUpdateHandlerWithRestartNoApps(c *check.C) {
values := url.Values{
"restart": []string{"true"},
"Driver": []string{"awslogs"},
"LogOpts.awslogs-region": []string{"sa-east1"},
}
recorder := httptest.NewRecorder()
reader := strings.NewReader(values.Encode())
request, err := http.NewRequest("POST", "/docker/logs", reader)
c.Assert(err, check.IsNil)
request.Header.Set("Authorization", "bearer "+s.token.GetValue())
request.Header.Set("Content-Type", "application/x-www-form-urlencoded")
server := api.RunServer(true)
server.ServeHTTP(recorder, request)
c.Assert(recorder.Body.String(), check.Equals, "{\"Message\":\"Log config successfully updated.\\n\"}\n")
c.Assert(recorder.Code, check.Equals, http.StatusOK)
entries, err := container.LogLoadAll()
c.Assert(err, check.IsNil)
c.Assert(entries, check.DeepEquals, map[string]container.DockerLogConfig{
"": {DockerLogConfig: types.DockerLogConfig{Driver: "awslogs", LogOpts: map[string]string{"awslogs-region": "sa-east1"}}},
})
}
func (s *HandlersSuite) TestDockerLogsUpdateHandlerWithRestartSomeApps(c *check.C) {
appPools := [][]string{{"app1", "pool1"}, {"app2", "pool2"}, {"app3", "pool2"}}
storage, err := db.Conn()
c.Assert(err, check.IsNil)
for _, appPool := range appPools {
opts := provision.AddPoolOptions{Name: appPool[1]}
provision.AddPool(opts)
err := newFakeImage(s.p, "tsuru/app-"+appPool[0], nil)
c.Assert(err, check.IsNil)
appInstance := provisiontest.NewFakeApp(appPool[0], "python", 0)
appStruct := &app.App{
Name: appInstance.GetName(),
Platform: appInstance.GetPlatform(),
Pool: opts.Name,
Router: "fake",
}
err = storage.Apps().Insert(appStruct)
c.Assert(err, check.IsNil)
err = s.p.Provision(appStruct)
c.Assert(err, check.IsNil)
}
values := url.Values{
"pool": []string{"pool2"},
"restart": []string{"true"},
"Driver": []string{"awslogs"},
"LogOpts.awslogs-region": []string{"sa-east1"},
}
recorder := httptest.NewRecorder()
reader := strings.NewReader(values.Encode())
request, err := http.NewRequest("POST", "/docker/logs", reader)
c.Assert(err, check.IsNil)
request.Header.Set("Authorization", "bearer "+s.token.GetValue())
request.Header.Set("Content-Type", "application/x-www-form-urlencoded")
server := api.RunServer(true)
server.ServeHTTP(recorder, request)
responseParts := strings.Split(recorder.Body.String(), "\n")
c.Assert(responseParts, check.HasLen, 17)
c.Assert(responseParts[0], check.Equals, "{\"Message\":\"Log config successfully updated.\\n\"}")
c.Assert(responseParts[1], check.Equals, "{\"Message\":\"Restarting 2 applications: [app2, app3]\\n\"}")
c.Assert(recorder.Code, check.Equals, http.StatusOK)
entries, err := container.LogLoadAll()
c.Assert(err, check.IsNil)
c.Assert(entries, check.DeepEquals, map[string]container.DockerLogConfig{
"": {},
"pool2": {DockerLogConfig: types.DockerLogConfig{Driver: "awslogs", LogOpts: map[string]string{"awslogs-region": "sa-east1"}}},
})
}
func (s *HandlersSuite) TestDockerLogsInfoHandler(c *check.C) {
recorder := httptest.NewRecorder()
request, err := http.NewRequest("GET", "/docker/logs", nil)
c.Assert(err, check.IsNil)
request.Header.Set("Authorization", "bearer "+s.token.GetValue())
server := api.RunServer(true)
server.ServeHTTP(recorder, request)
c.Assert(recorder.Code, check.Equals, http.StatusOK)
c.Assert(recorder.Header().Get("Content-Type"), check.Equals, "application/json")
var conf map[string]container.DockerLogConfig
err = json.Unmarshal(recorder.Body.Bytes(), &conf)
c.Assert(err, check.IsNil)
c.Assert(conf, check.DeepEquals, map[string]container.DockerLogConfig{
"": {},
})
newConf := container.DockerLogConfig{DockerLogConfig: types.DockerLogConfig{Driver: "syslog"}}
err = newConf.Save("p1")
c.Assert(err, check.IsNil)
request, err = http.NewRequest("GET", "/docker/logs", nil)
c.Assert(err, check.IsNil)
request.Header.Set("Authorization", "bearer "+s.token.GetValue())
recorder = httptest.NewRecorder()
server.ServeHTTP(recorder, request)
c.Assert(recorder.Code, check.Equals, http.StatusOK)
var conf2 map[string]container.DockerLogConfig
err = json.Unmarshal(recorder.Body.Bytes(), &conf2)
c.Assert(err, check.IsNil)
c.Assert(conf2, check.DeepEquals, map[string]container.DockerLogConfig{
"": {},
"p1": {DockerLogConfig: types.DockerLogConfig{Driver: "syslog", LogOpts: map[string]string{}}},
})
}
provision/docker: fix shadow var
// Copyright 2013 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package docker
import (
"encoding/json"
"fmt"
"net"
"net/http"
"net/http/httptest"
"net/url"
"strconv"
"strings"
"github.com/fsouza/go-dockerclient/testing"
dtesting "github.com/fsouza/go-dockerclient/testing"
"github.com/tsuru/config"
"github.com/tsuru/docker-cluster/cluster"
"github.com/tsuru/tsuru/api"
"github.com/tsuru/tsuru/app"
"github.com/tsuru/tsuru/auth"
"github.com/tsuru/tsuru/auth/native"
"github.com/tsuru/tsuru/db"
"github.com/tsuru/tsuru/db/dbtest"
"github.com/tsuru/tsuru/event"
"github.com/tsuru/tsuru/event/eventtest"
tsuruIo "github.com/tsuru/tsuru/io"
"github.com/tsuru/tsuru/permission"
"github.com/tsuru/tsuru/permission/permissiontest"
"github.com/tsuru/tsuru/provision"
"github.com/tsuru/tsuru/provision/docker/container"
"github.com/tsuru/tsuru/provision/docker/types"
"github.com/tsuru/tsuru/provision/provisiontest"
"github.com/tsuru/tsuru/queue"
"gopkg.in/check.v1"
"gopkg.in/mgo.v2"
)
type HandlersSuite struct {
conn *db.Storage
user *auth.User
token auth.Token
team *auth.Team
clusterSess *mgo.Session
p *dockerProvisioner
server *dtesting.DockerServer
}
var _ = check.Suite(&HandlersSuite{})
var nativeScheme = auth.ManagedScheme(native.NativeScheme{})
func (s *HandlersSuite) SetUpSuite(c *check.C) {
config.Set("database:name", "docker_provision_handlers_tests_s")
config.Set("docker:collection", "docker_handler_suite")
config.Set("docker:run-cmd:port", 8888)
config.Set("docker:router", "fake")
config.Set("docker:cluster:mongo-url", "127.0.0.1:27017")
config.Set("docker:cluster:mongo-database", "docker_provision_handlers_tests_cluster_stor")
config.Set("docker:repository-namespace", "tsuru")
config.Set("queue:mongo-url", "127.0.0.1:27017")
config.Set("queue:mongo-database", "queue_provision_docker_tests_handlers")
config.Set("iaas:default", "test-iaas")
config.Set("iaas:node-protocol", "http")
config.Set("iaas:node-port", 1234)
config.Set("routers:fake:type", "fake")
var err error
s.conn, err = db.Conn()
c.Assert(err, check.IsNil)
clusterDbURL, _ := config.GetString("docker:cluster:mongo-url")
s.clusterSess, err = mgo.Dial(clusterDbURL)
c.Assert(err, check.IsNil)
app.AuthScheme = nativeScheme
s.team = &auth.Team{Name: "admin"}
err = s.conn.Teams().Insert(s.team)
c.Assert(err, check.IsNil)
}
func (s *HandlersSuite) SetUpTest(c *check.C) {
config.Set("docker:api-timeout", 2)
queue.ResetQueue()
err := clearClusterStorage(s.clusterSess)
c.Assert(err, check.IsNil)
s.server, err = dtesting.NewServer("127.0.0.1:0", nil, nil)
c.Assert(err, check.IsNil)
s.p = &dockerProvisioner{storage: &cluster.MapStorage{}}
mainDockerProvisioner = s.p
err = mainDockerProvisioner.Initialize()
c.Assert(err, check.IsNil)
s.p.cluster, err = cluster.New(nil, s.p.storage, "",
cluster.Node{Address: s.server.URL(), Metadata: map[string]string{"pool": "test-default"}},
)
coll := mainDockerProvisioner.Collection()
defer coll.Close()
err = dbtest.ClearAllCollections(coll.Database)
c.Assert(err, check.IsNil)
s.user, s.token = permissiontest.CustomUserWithPermission(c, nativeScheme, "provisioner-docker", permission.Permission{
Scheme: permission.PermAll,
Context: permission.PermissionContext{CtxType: permission.CtxGlobal},
})
}
func (s *HandlersSuite) TearDownTest(c *check.C) {
s.server.Stop()
}
func (s *HandlersSuite) TearDownSuite(c *check.C) {
defer s.clusterSess.Close()
defer s.conn.Close()
coll := mainDockerProvisioner.Collection()
defer coll.Close()
coll.Database.DropDatabase()
databaseName, _ := config.GetString("docker:cluster:mongo-database")
s.clusterSess.DB(databaseName).DropDatabase()
}
func startFakeDockerNode(c *check.C) (*testing.DockerServer, func()) {
pong := make(chan struct{})
server, err := testing.NewServer("127.0.0.1:0", nil, func(r *http.Request) {
if strings.Contains(r.URL.Path, "ping") {
close(pong)
}
})
c.Assert(err, check.IsNil)
url, err := url.Parse(server.URL())
c.Assert(err, check.IsNil)
_, portString, _ := net.SplitHostPort(url.Host)
port, _ := strconv.Atoi(portString)
config.Set("iaas:node-port", port)
return server, func() {
<-pong
queue.ResetQueue()
}
}
func (s *HandlersSuite) TestMoveContainersEmptyBodyHandler(c *check.C) {
recorder := httptest.NewRecorder()
request, err := http.NewRequest("POST", "/docker/containers/move", nil)
c.Assert(err, check.IsNil)
request.Header.Set("Authorization", "bearer "+s.token.GetValue())
server := api.RunServer(true)
server.ServeHTTP(recorder, request)
c.Assert(recorder.Code, check.Equals, http.StatusBadRequest)
}
func (s *HandlersSuite) TestMoveContainersEmptyToHandler(c *check.C) {
recorder := httptest.NewRecorder()
v := url.Values{}
v.Set("from", "fromhost")
v.Set("to", "")
b := strings.NewReader(v.Encode())
request, err := http.NewRequest("POST", "/docker/containers/move", b)
c.Assert(err, check.IsNil)
request.Header.Set("Content-Type", "application/x-www-form-urlencoded")
request.Header.Set("Authorization", "bearer "+s.token.GetValue())
server := api.RunServer(true)
server.ServeHTTP(recorder, request)
c.Assert(recorder.Code, check.Equals, http.StatusInternalServerError)
c.Assert(recorder.Body.String(), check.Equals, "Invalid params: from: fromhost - to: \n")
}
func (s *HandlersSuite) TestMoveContainersHandler(c *check.C) {
recorder := httptest.NewRecorder()
v := url.Values{}
v.Set("from", "localhost")
v.Set("to", "127.0.0.1")
b := strings.NewReader(v.Encode())
request, err := http.NewRequest("POST", "/docker/containers/move", b)
c.Assert(err, check.IsNil)
request.Header.Set("Content-Type", "application/x-www-form-urlencoded")
request.Header.Set("Authorization", "bearer "+s.token.GetValue())
mainDockerProvisioner.Cluster().Register(cluster.Node{Address: "http://localhost:2375"})
mainDockerProvisioner.Cluster().Register(cluster.Node{Address: "http://127.0.0.1:2375"})
server := api.RunServer(true)
server.ServeHTTP(recorder, request)
c.Assert(recorder.Code, check.Equals, http.StatusOK)
c.Assert(recorder.Header().Get("Content-Type"), check.Equals, "application/x-json-stream")
validJson := fmt.Sprintf("[%s]", strings.Replace(strings.Trim(recorder.Body.String(), "\n "), "\n", ",", -1))
var result []tsuruIo.SimpleJsonMessage
err = json.Unmarshal([]byte(validJson), &result)
c.Assert(err, check.IsNil)
c.Assert(result, check.DeepEquals, []tsuruIo.SimpleJsonMessage{
{Message: "No units to move in localhost\n"},
{Message: "Containers moved successfully!\n"},
})
c.Assert(eventtest.EventDesc{
Target: event.Target{Type: event.TargetTypeNode, Value: "localhost"},
Owner: s.token.GetUserName(),
Kind: "node.update.move.containers",
StartCustomData: []map[string]interface{}{
{"name": "from", "value": "localhost"},
{"name": "to", "value": "127.0.0.1"},
},
}, eventtest.HasEvent)
}
func (s *HandlersSuite) TestMoveContainerNotFound(c *check.C) {
recorder := httptest.NewRecorder()
mainDockerProvisioner.Cluster().Register(cluster.Node{Address: "http://127.0.0.1:2375"})
v := url.Values{}
v.Set("to", "127.0.0.1")
b := strings.NewReader(v.Encode())
request, err := http.NewRequest("POST", "/docker/container/myid/move", b)
c.Assert(err, check.IsNil)
request.Header.Set("Content-Type", "application/x-www-form-urlencoded")
request.Header.Set("Authorization", "bearer "+s.token.GetValue())
server := api.RunServer(true)
server.ServeHTTP(recorder, request)
c.Assert(recorder.Code, check.Equals, http.StatusNotFound)
}
func (s *HandlersSuite) TestDockerLogsUpdateHandler(c *check.C) {
values1 := url.Values{
"Driver": []string{"awslogs"},
"LogOpts.awslogs-region": []string{"sa-east1"},
}
values2 := url.Values{
"pool": []string{"POOL1"},
"Driver": []string{"bs"},
}
values3 := url.Values{
"pool": []string{"POOL2"},
"Driver": []string{"fluentd"},
"LogOpts.fluentd-address": []string{"localhost:2222"},
}
doReq := func(val url.Values) {
reader := strings.NewReader(val.Encode())
recorder := httptest.NewRecorder()
request, err := http.NewRequest("POST", "/docker/logs", reader)
c.Assert(err, check.IsNil)
request.Header.Set("Authorization", "bearer "+s.token.GetValue())
request.Header.Set("Content-Type", "application/x-www-form-urlencoded")
server := api.RunServer(true)
server.ServeHTTP(recorder, request)
c.Assert(recorder.Body.String(), check.Equals, "{\"Message\":\"Log config successfully updated.\\n\"}\n")
c.Assert(recorder.Code, check.Equals, http.StatusOK)
c.Assert(recorder.Header().Get("Content-Type"), check.Equals, "application/x-json-stream")
var pool string
var customData []map[string]interface{}
for k, v := range val {
if k == "pool" {
pool = v[0]
continue
}
customData = append(customData, map[string]interface{}{"name": k, "value": v[0]})
}
c.Assert(eventtest.EventDesc{
Target: event.Target{Type: event.TargetTypePool, Value: pool},
Owner: s.token.GetUserName(),
Kind: "pool.update.logs",
StartCustomData: customData,
}, eventtest.HasEvent)
}
doReq(values1)
entries, err := container.LogLoadAll()
c.Assert(err, check.IsNil)
c.Assert(entries, check.DeepEquals, map[string]container.DockerLogConfig{
"": {DockerLogConfig: types.DockerLogConfig{Driver: "awslogs", LogOpts: map[string]string{"awslogs-region": "sa-east1"}}},
})
doReq(values2)
entries, err = container.LogLoadAll()
c.Assert(err, check.IsNil)
c.Assert(entries, check.DeepEquals, map[string]container.DockerLogConfig{
"": {DockerLogConfig: types.DockerLogConfig{Driver: "awslogs", LogOpts: map[string]string{"awslogs-region": "sa-east1"}}},
"POOL1": {DockerLogConfig: types.DockerLogConfig{Driver: "bs", LogOpts: map[string]string{}}},
})
doReq(values3)
entries, err = container.LogLoadAll()
c.Assert(err, check.IsNil)
c.Assert(entries, check.DeepEquals, map[string]container.DockerLogConfig{
"": {DockerLogConfig: types.DockerLogConfig{Driver: "awslogs", LogOpts: map[string]string{"awslogs-region": "sa-east1"}}},
"POOL1": {DockerLogConfig: types.DockerLogConfig{Driver: "bs", LogOpts: map[string]string{}}},
"POOL2": {DockerLogConfig: types.DockerLogConfig{Driver: "fluentd", LogOpts: map[string]string{"fluentd-address": "localhost:2222"}}},
})
}
func (s *HandlersSuite) TestDockerLogsUpdateHandlerWithRestartNoApps(c *check.C) {
values := url.Values{
"restart": []string{"true"},
"Driver": []string{"awslogs"},
"LogOpts.awslogs-region": []string{"sa-east1"},
}
recorder := httptest.NewRecorder()
reader := strings.NewReader(values.Encode())
request, err := http.NewRequest("POST", "/docker/logs", reader)
c.Assert(err, check.IsNil)
request.Header.Set("Authorization", "bearer "+s.token.GetValue())
request.Header.Set("Content-Type", "application/x-www-form-urlencoded")
server := api.RunServer(true)
server.ServeHTTP(recorder, request)
c.Assert(recorder.Body.String(), check.Equals, "{\"Message\":\"Log config successfully updated.\\n\"}\n")
c.Assert(recorder.Code, check.Equals, http.StatusOK)
entries, err := container.LogLoadAll()
c.Assert(err, check.IsNil)
c.Assert(entries, check.DeepEquals, map[string]container.DockerLogConfig{
"": {DockerLogConfig: types.DockerLogConfig{Driver: "awslogs", LogOpts: map[string]string{"awslogs-region": "sa-east1"}}},
})
}
func (s *HandlersSuite) TestDockerLogsUpdateHandlerWithRestartSomeApps(c *check.C) {
appPools := [][]string{{"app1", "pool1"}, {"app2", "pool2"}, {"app3", "pool2"}}
storage, err := db.Conn()
c.Assert(err, check.IsNil)
for _, appPool := range appPools {
opts := provision.AddPoolOptions{Name: appPool[1]}
provision.AddPool(opts)
err = newFakeImage(s.p, "tsuru/app-"+appPool[0], nil)
c.Assert(err, check.IsNil)
appInstance := provisiontest.NewFakeApp(appPool[0], "python", 0)
appStruct := &app.App{
Name: appInstance.GetName(),
Platform: appInstance.GetPlatform(),
Pool: opts.Name,
Router: "fake",
}
err = storage.Apps().Insert(appStruct)
c.Assert(err, check.IsNil)
err = s.p.Provision(appStruct)
c.Assert(err, check.IsNil)
}
values := url.Values{
"pool": []string{"pool2"},
"restart": []string{"true"},
"Driver": []string{"awslogs"},
"LogOpts.awslogs-region": []string{"sa-east1"},
}
recorder := httptest.NewRecorder()
reader := strings.NewReader(values.Encode())
request, err := http.NewRequest("POST", "/docker/logs", reader)
c.Assert(err, check.IsNil)
request.Header.Set("Authorization", "bearer "+s.token.GetValue())
request.Header.Set("Content-Type", "application/x-www-form-urlencoded")
server := api.RunServer(true)
server.ServeHTTP(recorder, request)
responseParts := strings.Split(recorder.Body.String(), "\n")
c.Assert(responseParts, check.HasLen, 17)
c.Assert(responseParts[0], check.Equals, "{\"Message\":\"Log config successfully updated.\\n\"}")
c.Assert(responseParts[1], check.Equals, "{\"Message\":\"Restarting 2 applications: [app2, app3]\\n\"}")
c.Assert(recorder.Code, check.Equals, http.StatusOK)
entries, err := container.LogLoadAll()
c.Assert(err, check.IsNil)
c.Assert(entries, check.DeepEquals, map[string]container.DockerLogConfig{
"": {},
"pool2": {DockerLogConfig: types.DockerLogConfig{Driver: "awslogs", LogOpts: map[string]string{"awslogs-region": "sa-east1"}}},
})
}
func (s *HandlersSuite) TestDockerLogsInfoHandler(c *check.C) {
recorder := httptest.NewRecorder()
request, err := http.NewRequest("GET", "/docker/logs", nil)
c.Assert(err, check.IsNil)
request.Header.Set("Authorization", "bearer "+s.token.GetValue())
server := api.RunServer(true)
server.ServeHTTP(recorder, request)
c.Assert(recorder.Code, check.Equals, http.StatusOK)
c.Assert(recorder.Header().Get("Content-Type"), check.Equals, "application/json")
var conf map[string]container.DockerLogConfig
err = json.Unmarshal(recorder.Body.Bytes(), &conf)
c.Assert(err, check.IsNil)
c.Assert(conf, check.DeepEquals, map[string]container.DockerLogConfig{
"": {},
})
newConf := container.DockerLogConfig{DockerLogConfig: types.DockerLogConfig{Driver: "syslog"}}
err = newConf.Save("p1")
c.Assert(err, check.IsNil)
request, err = http.NewRequest("GET", "/docker/logs", nil)
c.Assert(err, check.IsNil)
request.Header.Set("Authorization", "bearer "+s.token.GetValue())
recorder = httptest.NewRecorder()
server.ServeHTTP(recorder, request)
c.Assert(recorder.Code, check.Equals, http.StatusOK)
var conf2 map[string]container.DockerLogConfig
err = json.Unmarshal(recorder.Body.Bytes(), &conf2)
c.Assert(err, check.IsNil)
c.Assert(conf2, check.DeepEquals, map[string]container.DockerLogConfig{
"": {},
"p1": {DockerLogConfig: types.DockerLogConfig{Driver: "syslog", LogOpts: map[string]string{}}},
})
}
|
package main
import (
"eaciit/gdrj/model"
"eaciit/gdrj/modules"
"os"
"github.com/eaciit/dbox"
"github.com/eaciit/orm/v1"
"github.com/eaciit/toolkit"
// "strings"
"time"
)
var conn dbox.IConnection
var count int
var (
t0 time.Time
fiscalyear, iscount, scount int
data map[string]float64
masters = toolkit.M{}
)
func setinitialconnection() {
var err error
conn, err = modules.GetDboxIConnection("db_godrej")
if err != nil {
toolkit.Println("Initial connection found : ", err)
os.Exit(1)
}
err = gdrj.SetDb(conn)
if err != nil {
toolkit.Println("Initial connection found : ", err)
os.Exit(1)
}
}
type plalloc struct {
Key string
/*
Current float64
Expect float64
Total float64
*/
TotalSales float64
TotalValue float64
ChannelSales map[string]float64
ChannelValue map[string]float64
ExpectedValue map[string]float64
Ratio map[string]float64
}
var plallocs = map[string]*plalloc{}
func buildRatio(tn string) error {
plallocs = map[string]*plalloc{}
rconn, _ := modules.GetDboxIConnection("db_godrej")
defer rconn.Close()
//totalBlanks := map[string]float64{}
ctrx, e := rconn.NewQuery().From(tn).
Select().Cursor(nil)
if e != nil {
return e
}
defer ctrx.Close()
count := ctrx.Count()
i := 0
for {
mtrx := toolkit.M{}
if e = ctrx.Fetch(&mtrx, 1, false); e != nil {
break
}
i++
toolkit.Printfn("Ratio 1: %d of %d in %s",
i, count, time.Since(t0).String())
key := mtrx.Get("key", toolkit.M{}).(toolkit.M)
fiscal := key.GetString("date_fiscal")
channel := key.GetString("customer_reportchannel")
sales := mtrx.GetFloat64("PL8A")
value := mtrx.GetFloat64("PL7A")
falloc := plallocs[fiscal]
if falloc == nil {
falloc = new(plalloc)
falloc.Ratio = map[string]float64{}
falloc.ChannelValue = map[string]float64{}
falloc.ChannelSales = map[string]float64{}
falloc.ExpectedValue = map[string]float64{}
}
falloc.Key = fiscal
falloc.TotalSales += sales
falloc.TotalValue += value
falloc.ChannelSales[channel] = falloc.ChannelSales[channel] + sales
falloc.ChannelValue[channel] = falloc.ChannelValue[channel] + value
plallocs[fiscal] = falloc
}
for _, falloc := range plallocs {
for c, _ := range falloc.ChannelSales {
falloc.Ratio[c] = toolkit.Div(falloc.ChannelSales[c], falloc.TotalSales)
falloc.ExpectedValue[c] = falloc.Ratio[c] * falloc.TotalValue
}
}
return nil
}
func main() {
t0 = time.Now()
setinitialconnection()
defer gdrj.CloseDb()
prepmastercalc()
toolkit.Println("Start data query...")
tablenames := []string{
"salespls-summary"}
for _, tn := range tablenames {
e := buildRatio(tn)
if e != nil {
toolkit.Printfn("Build ratio error: %s - %s", tn, e.Error())
return
}
e = processTable(tn)
if e != nil {
toolkit.Printfn("Process table error: %s - %s", tn, e.Error())
return
}
}
}
func processTable(tn string) error {
cursor, _ := conn.NewQuery().From(tn).Select().Cursor(nil)
defer cursor.Close()
count := cursor.Count()
i := 0
for {
mr := toolkit.M{}
ef := cursor.Fetch(&mr, 1, false)
if ef != nil {
break
}
i++
toolkit.Printfn("Processing %s, %d of %d in %s",
tn, i, count, time.Since(t0).String())
key := mr.Get("key", toolkit.M{}).(toolkit.M)
fiscal := key.GetString("date_fiscal")
channel := key.GetString("customer_reportchannel")
sales := mr.GetFloat64("PL8A")
value := mr.GetFloat64("PL7A")
newv := value
falloc := plallocs[fiscal]
if channel == "RD" {
newv = sales * falloc.ExpectedValue[channel] / falloc.ChannelSales[channel]
} else {
if value != 0 {
newv = value * falloc.ExpectedValue[channel] / falloc.ChannelValue[channel]
}
}
key.Set("customer_customergroupname", "RD")
key.Set("customer_customergroup", "RD")
mr.Set("key", key)
mr.Set("PL7A", newv)
mr = CalcSum(mr)
esave := conn.NewQuery().From(tn).Save().Exec(toolkit.M{}.Set("data", mr))
if esave != nil {
return esave
}
}
return nil
}
func CalcSum(tkm toolkit.M) toolkit.M {
var netsales, cogs, grossmargin, sellingexpense,
sga, opincome, directexpense, indirectexpense,
royaltiestrademark, advtpromoexpense, operatingexpense,
freightexpense, nonoprincome, ebt, taxexpense,
percentpbt, eat, totdepreexp, damagegoods, ebitda, ebitdaroyalties, ebitsga,
grosssales, discount, advexp, promoexp, spgexp float64
exclude := []string{"PL8A", "PL14A", "PL74A", "PL26A", "PL32A", "PL39A", "PL41A", "PL44A",
"PL74B", "PL74C", "PL32B", "PL94B", "PL94C", "PL39B", "PL41B", "PL41C", "PL44B", "PL44C", "PL44D", "PL44E",
"PL44F", "PL6A", "PL0", "PL28", "PL29A", "PL31"}
//"PL94A",
plmodels := masters.Get("plmodel").(map[string]*gdrj.PLModel)
inexclude := func(f string) bool {
for _, v := range exclude {
if v == f {
return true
}
}
return false
}
for k, v := range tkm {
if k == "_id" {
continue
}
if inexclude(k) {
continue
}
// arrk := strings.Split(k, "_")
plmodel, exist := plmodels[k]
if !exist {
//toolkit.Println(k)
continue
}
Amount := toolkit.ToFloat64(v, 6, toolkit.RoundingAuto)
// PLHeader1
// PLHeader2
// PLHeader3
// switch v.Group1 {
switch plmodel.PLHeader1 {
case "Net Sales":
netsales += Amount
case "Direct Expense":
directexpense += Amount
case "Indirect Expense":
indirectexpense += Amount
case "Freight Expense":
freightexpense += Amount
case "Royalties & Trademark Exp":
royaltiestrademark += Amount
case "Advt & Promo Expenses":
advtpromoexpense += Amount
case "G&A Expenses":
sga += Amount
case "Non Operating (Income) / Exp":
nonoprincome += Amount
case "Tax Expense":
taxexpense += Amount
case "Total Depreciation Exp":
if plmodel.PLHeader2 == "Damaged Goods" {
damagegoods += Amount
} else {
totdepreexp += Amount
}
}
// switch v.Group2 {
switch plmodel.PLHeader2 {
case "Gross Sales":
grosssales += Amount
case "Discount":
discount += Amount
case "Advertising Expenses":
advexp += Amount
case "Promotions Expenses":
promoexp += Amount
case "SPG Exp / Export Cost":
spgexp += Amount
}
}
cogs = directexpense + indirectexpense
grossmargin = netsales + cogs
sellingexpense = freightexpense + royaltiestrademark + advtpromoexpense
operatingexpense = sellingexpense + sga
opincome = grossmargin + operatingexpense
ebt = opincome + nonoprincome //asume nonopriceincome already minus
percentpbt = 0
if ebt != 0 {
percentpbt = taxexpense / ebt * 100
}
eat = ebt + taxexpense
ebitda = totdepreexp + damagegoods + opincome
ebitdaroyalties = ebitda - royaltiestrademark
ebitsga = opincome - sga
ebitsgaroyalty := ebitsga - royaltiestrademark
tkm.Set("PL0", grosssales)
tkm.Set("PL6A", discount)
tkm.Set("PL8A", netsales)
tkm.Set("PL14A", directexpense)
tkm.Set("PL74A", indirectexpense)
tkm.Set("PL26A", royaltiestrademark)
tkm.Set("PL32A", advtpromoexpense)
tkm.Set("PL94A", sga)
tkm.Set("PL39A", nonoprincome)
tkm.Set("PL41A", taxexpense)
tkm.Set("PL44A", totdepreexp)
tkm.Set("PL28", advexp)
tkm.Set("PL29A", promoexp)
tkm.Set("PL31", spgexp)
tkm.Set("PL74B", cogs)
tkm.Set("PL74C", grossmargin)
tkm.Set("PL32B", sellingexpense)
tkm.Set("PL94B", operatingexpense)
tkm.Set("PL94C", opincome)
tkm.Set("PL39B", ebt)
tkm.Set("PL41B", percentpbt)
tkm.Set("PL41C", eat)
tkm.Set("PL44B", opincome)
tkm.Set("PL44C", ebitda)
tkm.Set("PL44D", ebitdaroyalties)
tkm.Set("PL44E", ebitsga)
tkm.Set("PL44F", ebitsgaroyalty)
return tkm
}
func buildmap(holder interface{},
fnModel func() orm.IModel,
filter *dbox.Filter,
fnIter func(holder interface{}, obj interface{})) interface{} {
crx, ecrx := gdrj.Find(fnModel(), filter, nil)
if ecrx != nil {
toolkit.Printfn("Cursor Error: %s", ecrx.Error())
os.Exit(100)
}
defer crx.Close()
for {
s := fnModel()
e := crx.Fetch(s, 1, false)
if e != nil {
break
}
fnIter(holder, s)
}
return holder
}
func prepmastercalc() {
toolkit.Println("--> PL MODEL")
masters.Set("plmodel", buildmap(map[string]*gdrj.PLModel{},
func() orm.IModel {
return new(gdrj.PLModel)
},
nil,
func(holder, obj interface{}) {
h := holder.(map[string]*gdrj.PLModel)
o := obj.(*gdrj.PLModel)
h[o.ID] = o
}).(map[string]*gdrj.PLModel))
}
apply onluy for mt.gt.rd
package main
import (
"eaciit/gdrj/model"
"eaciit/gdrj/modules"
"os"
"github.com/eaciit/dbox"
"github.com/eaciit/orm/v1"
"github.com/eaciit/toolkit"
// "strings"
"time"
)
var conn dbox.IConnection
var count int
var (
t0 time.Time
fiscalyear, iscount, scount int
data map[string]float64
masters = toolkit.M{}
)
func setinitialconnection() {
var err error
conn, err = modules.GetDboxIConnection("db_godrej")
if err != nil {
toolkit.Println("Initial connection found : ", err)
os.Exit(1)
}
err = gdrj.SetDb(conn)
if err != nil {
toolkit.Println("Initial connection found : ", err)
os.Exit(1)
}
}
type plalloc struct {
Key string
/*
Current float64
Expect float64
Total float64
*/
TotalSales float64
TotalValue float64
ChannelSales map[string]float64
ChannelValue map[string]float64
ExpectedValue map[string]float64
Ratio map[string]float64
}
var plallocs = map[string]*plalloc{}
func buildRatio(tn string) error {
plallocs = map[string]*plalloc{}
rconn, _ := modules.GetDboxIConnection("db_godrej")
defer rconn.Close()
//totalBlanks := map[string]float64{}
ctrx, e := rconn.NewQuery().From(tn).
Select().Cursor(nil)
if e != nil {
return e
}
defer ctrx.Close()
count := ctrx.Count()
i := 0
for {
mtrx := toolkit.M{}
if e = ctrx.Fetch(&mtrx, 1, false); e != nil {
break
}
i++
toolkit.Printfn("Ratio 1: %d of %d in %s",
i, count, time.Since(t0).String())
key := mtrx.Get("key", toolkit.M{}).(toolkit.M)
fiscal := key.GetString("date_fiscal")
channel := key.GetString("customer_reportchannel")
sales := mtrx.GetFloat64("PL8A")
value := mtrx.GetFloat64("PL7A")
if toolkit.HasMember([]string{"GT", "MT", "RD"}, channel) {
falloc := plallocs[fiscal]
if falloc == nil {
falloc = new(plalloc)
falloc.Ratio = map[string]float64{}
falloc.ChannelValue = map[string]float64{}
falloc.ChannelSales = map[string]float64{}
falloc.ExpectedValue = map[string]float64{}
}
falloc.Key = fiscal
falloc.TotalSales += sales
falloc.TotalValue += value
falloc.ChannelSales[channel] = falloc.ChannelSales[channel] + sales
falloc.ChannelValue[channel] = falloc.ChannelValue[channel] + value
plallocs[fiscal] = falloc
}
}
for _, falloc := range plallocs {
for c, _ := range falloc.ChannelSales {
falloc.Ratio[c] = toolkit.Div(falloc.ChannelSales[c], falloc.TotalSales)
falloc.ExpectedValue[c] = falloc.Ratio[c] * falloc.TotalValue
}
}
return nil
}
func main() {
t0 = time.Now()
setinitialconnection()
defer gdrj.CloseDb()
prepmastercalc()
toolkit.Println("Start data query...")
tablenames := []string{
"salespls-summary"}
for _, tn := range tablenames {
e := buildRatio(tn)
if e != nil {
toolkit.Printfn("Build ratio error: %s - %s", tn, e.Error())
return
}
e = processTable(tn)
if e != nil {
toolkit.Printfn("Process table error: %s - %s", tn, e.Error())
return
}
}
}
func processTable(tn string) error {
cursor, _ := conn.NewQuery().From(tn).Select().Cursor(nil)
defer cursor.Close()
count := cursor.Count()
i := 0
for {
mr := toolkit.M{}
ef := cursor.Fetch(&mr, 1, false)
if ef != nil {
break
}
i++
toolkit.Printfn("Processing %s, %d of %d in %s",
tn, i, count, time.Since(t0).String())
key := mr.Get("key", toolkit.M{}).(toolkit.M)
fiscal := key.GetString("date_fiscal")
channel := key.GetString("customer_reportchannel")
sales := mr.GetFloat64("PL8A")
value := mr.GetFloat64("PL7A")
newv := value
falloc := plallocs[fiscal]
if channel == "RD" {
newv = sales * falloc.ExpectedValue[channel] / falloc.ChannelSales[channel]
} else {
if value != 0 {
newv = value * falloc.ExpectedValue[channel] / falloc.ChannelValue[channel]
}
}
key.Set("customer_customergroupname", "RD")
key.Set("customer_customergroup", "RD")
mr.Set("key", key)
mr.Set("PL7A", newv)
mr = CalcSum(mr)
esave := conn.NewQuery().From(tn).Save().Exec(toolkit.M{}.Set("data", mr))
if esave != nil {
return esave
}
}
return nil
}
func CalcSum(tkm toolkit.M) toolkit.M {
var netsales, cogs, grossmargin, sellingexpense,
sga, opincome, directexpense, indirectexpense,
royaltiestrademark, advtpromoexpense, operatingexpense,
freightexpense, nonoprincome, ebt, taxexpense,
percentpbt, eat, totdepreexp, damagegoods, ebitda, ebitdaroyalties, ebitsga,
grosssales, discount, advexp, promoexp, spgexp float64
exclude := []string{"PL8A", "PL14A", "PL74A", "PL26A", "PL32A", "PL39A", "PL41A", "PL44A",
"PL74B", "PL74C", "PL32B", "PL94B", "PL94C", "PL39B", "PL41B", "PL41C", "PL44B", "PL44C", "PL44D", "PL44E",
"PL44F", "PL6A", "PL0", "PL28", "PL29A", "PL31"}
//"PL94A",
plmodels := masters.Get("plmodel").(map[string]*gdrj.PLModel)
inexclude := func(f string) bool {
for _, v := range exclude {
if v == f {
return true
}
}
return false
}
for k, v := range tkm {
if k == "_id" {
continue
}
if inexclude(k) {
continue
}
// arrk := strings.Split(k, "_")
plmodel, exist := plmodels[k]
if !exist {
//toolkit.Println(k)
continue
}
Amount := toolkit.ToFloat64(v, 6, toolkit.RoundingAuto)
// PLHeader1
// PLHeader2
// PLHeader3
// switch v.Group1 {
switch plmodel.PLHeader1 {
case "Net Sales":
netsales += Amount
case "Direct Expense":
directexpense += Amount
case "Indirect Expense":
indirectexpense += Amount
case "Freight Expense":
freightexpense += Amount
case "Royalties & Trademark Exp":
royaltiestrademark += Amount
case "Advt & Promo Expenses":
advtpromoexpense += Amount
case "G&A Expenses":
sga += Amount
case "Non Operating (Income) / Exp":
nonoprincome += Amount
case "Tax Expense":
taxexpense += Amount
case "Total Depreciation Exp":
if plmodel.PLHeader2 == "Damaged Goods" {
damagegoods += Amount
} else {
totdepreexp += Amount
}
}
// switch v.Group2 {
switch plmodel.PLHeader2 {
case "Gross Sales":
grosssales += Amount
case "Discount":
discount += Amount
case "Advertising Expenses":
advexp += Amount
case "Promotions Expenses":
promoexp += Amount
case "SPG Exp / Export Cost":
spgexp += Amount
}
}
cogs = directexpense + indirectexpense
grossmargin = netsales + cogs
sellingexpense = freightexpense + royaltiestrademark + advtpromoexpense
operatingexpense = sellingexpense + sga
opincome = grossmargin + operatingexpense
ebt = opincome + nonoprincome //asume nonopriceincome already minus
percentpbt = 0
if ebt != 0 {
percentpbt = taxexpense / ebt * 100
}
eat = ebt + taxexpense
ebitda = totdepreexp + damagegoods + opincome
ebitdaroyalties = ebitda - royaltiestrademark
ebitsga = opincome - sga
ebitsgaroyalty := ebitsga - royaltiestrademark
tkm.Set("PL0", grosssales)
tkm.Set("PL6A", discount)
tkm.Set("PL8A", netsales)
tkm.Set("PL14A", directexpense)
tkm.Set("PL74A", indirectexpense)
tkm.Set("PL26A", royaltiestrademark)
tkm.Set("PL32A", advtpromoexpense)
tkm.Set("PL94A", sga)
tkm.Set("PL39A", nonoprincome)
tkm.Set("PL41A", taxexpense)
tkm.Set("PL44A", totdepreexp)
tkm.Set("PL28", advexp)
tkm.Set("PL29A", promoexp)
tkm.Set("PL31", spgexp)
tkm.Set("PL74B", cogs)
tkm.Set("PL74C", grossmargin)
tkm.Set("PL32B", sellingexpense)
tkm.Set("PL94B", operatingexpense)
tkm.Set("PL94C", opincome)
tkm.Set("PL39B", ebt)
tkm.Set("PL41B", percentpbt)
tkm.Set("PL41C", eat)
tkm.Set("PL44B", opincome)
tkm.Set("PL44C", ebitda)
tkm.Set("PL44D", ebitdaroyalties)
tkm.Set("PL44E", ebitsga)
tkm.Set("PL44F", ebitsgaroyalty)
return tkm
}
func buildmap(holder interface{},
fnModel func() orm.IModel,
filter *dbox.Filter,
fnIter func(holder interface{}, obj interface{})) interface{} {
crx, ecrx := gdrj.Find(fnModel(), filter, nil)
if ecrx != nil {
toolkit.Printfn("Cursor Error: %s", ecrx.Error())
os.Exit(100)
}
defer crx.Close()
for {
s := fnModel()
e := crx.Fetch(s, 1, false)
if e != nil {
break
}
fnIter(holder, s)
}
return holder
}
func prepmastercalc() {
toolkit.Println("--> PL MODEL")
masters.Set("plmodel", buildmap(map[string]*gdrj.PLModel{},
func() orm.IModel {
return new(gdrj.PLModel)
},
nil,
func(holder, obj interface{}) {
h := holder.(map[string]*gdrj.PLModel)
o := obj.(*gdrj.PLModel)
h[o.ID] = o
}).(map[string]*gdrj.PLModel))
}
|
// Package db to connect to mongodb
package db
import (
"errors"
"fmt"
"github.com/MG-RAST/AWE/lib/conf"
"github.com/MG-RAST/AWE/vendor/github.com/MG-RAST/golib/mgo"
"time"
)
const (
DbTimeout = time.Duration(time.Second * 10)
)
var (
Connection connection
)
type connection struct {
dbname string
username string
password string
Session *mgo.Session
DB *mgo.Database
}
func Initialize() (err error) {
c := connection{}
s, err := mgo.DialWithTimeout(conf.MONGODB_HOST, DbTimeout)
if err != nil {
e := errors.New(fmt.Sprintf("no reachable mongodb server(s) at %s", conf.MONGODB_HOST))
return e
}
c.Session = s
c.DB = c.Session.DB(conf.MONGODB_DATABASE)
if conf.MONGODB_USER != "" && conf.MONGODB_PASSWD != "" {
c.DB.Login(conf.MONGODB_USER, conf.MONGODB_PASSWD)
}
Connection = c
return
}
func Drop() error {
return Connection.DB.DropDatabase()
}
The 10 second timeout causes AWE server not to recover existing jobs on the first restart if mongo was also restarted. After the initial mongo query for jobs to recover gets cached, then it will work. Getting it to work requires multiple restarts. Increasing the timeout should fix the issue. May want to remove this timeout entirely.
// Package db to connect to mongodb
package db
import (
"errors"
"fmt"
"github.com/MG-RAST/AWE/lib/conf"
"github.com/MG-RAST/AWE/vendor/github.com/MG-RAST/golib/mgo"
"time"
)
const (
DbTimeout = time.Duration(time.Second * 1200)
)
var (
Connection connection
)
type connection struct {
dbname string
username string
password string
Session *mgo.Session
DB *mgo.Database
}
func Initialize() (err error) {
c := connection{}
s, err := mgo.DialWithTimeout(conf.MONGODB_HOST, DbTimeout)
if err != nil {
e := errors.New(fmt.Sprintf("no reachable mongodb server(s) at %s", conf.MONGODB_HOST))
return e
}
c.Session = s
c.DB = c.Session.DB(conf.MONGODB_DATABASE)
if conf.MONGODB_USER != "" && conf.MONGODB_PASSWD != "" {
c.DB.Login(conf.MONGODB_USER, conf.MONGODB_PASSWD)
}
Connection = c
return
}
func Drop() error {
return Connection.DB.DropDatabase()
}
|
package data
import (
"bytes"
"time"
"appengine"
"appengine/datastore"
"bacaberita/parser"
"bacaberita/utils"
)
type Feed struct {
Title string
Link string
Description string
Date time.Time
ImageUrl string
ImageTitle string
ImageLink string
Url string
Updated time.Time
Created time.Time
}
type FeedItem struct {
Title string
Link string
Guid string
Date time.Time
Description string
MediaUrl string
MediaLength int
MediaType string
Feed Feed
Created time.Time
Updated time.Time
Content string
}
func RegisterFeed(c appengine.Context, url string) (*datastore.Key, *Feed, error) {
feed := new(Feed)
feed.Url = url
feed.Created = time.Now()
key := feed.NewKey(c)
err := datastore.Get(c, key, feed)
if err == nil {
return key, feed, nil
} else if err != datastore.ErrNoSuchEntity {
return key, feed, err
}
key, err = datastore.Put(c, key, feed)
return key, feed, err
}
func GetFeed(c appengine.Context, url string) (*Feed, error) {
feed := new(Feed)
feed.Url = url
key := feed.NewKey(c)
err := datastore.Get(c, key, feed)
return feed, err
}
func UpdateFeed(c appengine.Context, feed *Feed) (*datastore.Key, error) {
content, err := utils.Download(c, feed.Url)
if err != nil {
return nil, err
}
r := bytes.NewBuffer(content)
data, err := parser.ParseRSS(r)
if err != nil {
return nil, err
}
feed.UpdateFromParser(data)
feed.Updated = time.Now()
key := feed.NewKey(c)
datastore.Put(c, key, feed)
return key, nil
}
func (feed *Feed) UpdateFromParser(data *parser.Feed) {
if data.Title != nil {
feed.Title = *data.Title
}
if data.Link != nil {
feed.Link = *data.Link
}
if data.Description != nil {
feed.Description = *data.Description
}
if data.Date != nil {
feed.Date = *data.Date
}
if data.ImageUrl != nil {
feed.ImageUrl = *data.ImageUrl
}
if data.ImageTitle != nil {
feed.ImageTitle = *data.ImageTitle
}
if data.ImageLink != nil {
feed.ImageLink = *data.ImageLink
}
}
func (feed *Feed) NewKey(c appengine.Context) *datastore.Key {
return datastore.NewKey(c, "Feed", feed.Url, 0, nil)
}
Insert feed items to the datastore
Ref #16
package data
import (
"bytes"
"fmt"
"time"
"appengine"
"appengine/datastore"
"bacaberita/parser"
"bacaberita/utils"
)
type Feed struct {
Title string
Link string
Description string
Date time.Time
ImageUrl string
ImageTitle string
ImageLink string
Url string
Updated time.Time
Created time.Time
}
type FeedItem struct {
Title string
Link string
Guid string
Date time.Time
Description string
MediaUrl string
MediaLength int
MediaType string
Feed Feed
Created time.Time
Updated time.Time
Content string
}
func RegisterFeed(c appengine.Context, url string) (*datastore.Key, *Feed, error) {
feed := new(Feed)
feed.Url = url
feed.Created = time.Now()
key := feed.NewKey(c)
err := datastore.Get(c, key, feed)
if err == nil {
return key, feed, nil
} else if err != datastore.ErrNoSuchEntity {
return key, feed, err
}
key, err = datastore.Put(c, key, feed)
return key, feed, err
}
func GetFeed(c appengine.Context, url string) (*Feed, error) {
feed := new(Feed)
feed.Url = url
key := feed.NewKey(c)
err := datastore.Get(c, key, feed)
return feed, err
}
func UpdateFeed(c appengine.Context, feed *Feed) (*datastore.Key, error) {
content, err := utils.Download(c, feed.Url)
if err != nil {
return nil, err
}
r := bytes.NewBuffer(content)
data, err := parser.ParseRSS(r)
if err != nil {
return nil, err
}
feed.UpdateFromParser(data)
feed.Updated = time.Now()
key := feed.NewKey(c)
datastore.Put(c, key, feed)
err = StoreFeedItems(c, data, key)
return key, err
}
func StoreFeedItems(c appengine.Context, data *parser.Feed, parent *datastore.Key) error {
for _, item := range data.Items {
feedItem := new(FeedItem)
feedItem.UpdateFromParser(item)
key := feedItem.NewKey(c, parent)
key, err := datastore.Put(c, key, feedItem)
if err != nil {
c.Errorf("Error inserting item: url=%s error=%w", feedItem.Link, err)
return err
}
}
return nil
}
func (feed *Feed) UpdateFromParser(data *parser.Feed) {
if data.Title != nil {
feed.Title = *data.Title
}
if data.Link != nil {
feed.Link = *data.Link
}
if data.Description != nil {
feed.Description = *data.Description
}
if data.Date != nil {
feed.Date = *data.Date
}
if data.ImageUrl != nil {
feed.ImageUrl = *data.ImageUrl
}
if data.ImageTitle != nil {
feed.ImageTitle = *data.ImageTitle
}
if data.ImageLink != nil {
feed.ImageLink = *data.ImageLink
}
}
func (feed *Feed) NewKey(c appengine.Context) *datastore.Key {
return datastore.NewKey(c, "Feed", feed.Url, 0, nil)
}
func (item *FeedItem) UpdateFromParser(data *parser.Item) {
if data.Title != nil {
item.Title = *data.Title
}
if data.Link != nil {
item.Link = *data.Link
}
if data.Guid != nil {
item.Guid = *data.Guid
}
if data.Date != nil {
item.Date = *data.Date
}
if data.Description != nil {
item.Description = *data.Description
}
if data.Media != nil {
item.MediaUrl = data.Media.Url
item.MediaLength = data.Media.Length
item.MediaType = data.Media.Type
}
}
func (item *FeedItem) NewKey(c appengine.Context, parent *datastore.Key) *datastore.Key {
id := fmt.Sprintf("%s-%s", utils.Sha1(parent.StringID()), utils.Sha1(item.Link))
return datastore.NewKey(c, "FeedItem", id, 0, parent)
}
|
package model
import (
"fmt"
"time"
"strings"
"encoding/json"
"github.com/boltdb/bolt"
"../cfg"
)
const (
BUCKET_INDEX = "index"
)
type Meta struct {
Posts int
PerPage int
Pages int
Drafts int
UpdatedAt time.Time
}
func (meta *Meta) init() {
meta.Posts = 0
meta.PerPage = cfg.Int("ui.per_page")
meta.Pages = 0
meta.Drafts = 0
meta.UpdatedAt = time.Now()
}
func (meta *Meta) Load() {
meta.init()
db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte([]byte(BUCKET_INDEX)))
if bucket != nil {
jsonMeta := bucket.Get([]byte("meta"))
json.Unmarshal(jsonMeta, &meta)
}
return nil
})
}
func RebuildIndex() error {
cfg.Log("Rebuilding index...")
meta := new(Meta)
meta.init()
pathMap := make(map[string]string)
pageMap := make(map[string][]*Post)
tagMap := make(map[string][]*Post)
var draftList []*Post
if err := db.View(func(tx *bolt.Tx) error {
bucketPosts := tx.Bucket([]byte(BUCKET_POSTS))
c := bucketPosts.Cursor()
currentPage := 0
pageKey := fmt.Sprintf("page-%d", currentPage)
for k, v := c.Last(); k != nil; k, v = c.Prev() {
post := new(Post)
json.Unmarshal(v, &post)
if post.Draft {
meta.Drafts++
draftList = append(draftList, post)
} else {
meta.Posts++
pathMap[post.Path] = post.UUID
pageMap[pageKey] = append(pageMap[pageKey], post)
if len(pageMap[pageKey]) >= meta.PerPage {
currentPage++
meta.Pages++
pageKey = fmt.Sprintf("page-%d", currentPage)
}
for _, tag := range post.Tags {
tagKey := fmt.Sprintf("tag-%s", strings.ToLower(tag))
tagMap[tagKey] = append(tagMap[tagKey], post)
}
}
}
return nil
}); err != nil {
return err
}
if meta.Pages == 0 {
meta.Pages = 1
}
if err := db.Update(func(tx *bolt.Tx) error {
if err := tx.DeleteBucket([]byte(BUCKET_INDEX)); err != nil {
return err
}
bucketIndex, err := tx.CreateBucketIfNotExists([]byte(BUCKET_INDEX))
if err != nil {
return err
}
if err := tx.DeleteBucket([]byte(BUCKET_MAP)); err != nil {
return err
}
bucketMap, err := tx.CreateBucketIfNotExists([]byte(BUCKET_MAP))
if err != nil {
return err
}
jsonMeta, _ := json.Marshal(meta)
cfg.Log(string(jsonMeta))
if err := bucketIndex.Put([]byte("meta"), []byte(jsonMeta)); err != nil {
return err
}
for pageKey, postsPage := range pageMap {
jsonPage, _ := json.Marshal(postsPage)
if err := bucketIndex.Put([]byte(pageKey), []byte(jsonPage)); err != nil {
return err
}
}
jsonDrafts, _ := json.Marshal(draftList)
if err := bucketIndex.Put([]byte("drafts"), []byte(jsonDrafts)); err != nil {
return err
}
for path, uuid := range pathMap {
if err := bucketMap.Put([]byte(path), []byte(uuid)); err != nil {
return err
}
}
for tagKey, postsTag := range tagMap {
jsonPosts, _ := json.Marshal(postsTag)
if err := bucketIndex.Put([]byte(tagKey), []byte(jsonPosts)); err != nil {
return err
}
}
return nil
}); err != nil {
return err
}
cfg.Log("Index rebuilt!")
return nil
}
func GetPostsPage(page int) ([]Post, error) {
pageKey := fmt.Sprintf("page-%d", page)
var posts []Post
if err := db.View(func(tx *bolt.Tx) error {
bucketIndex := tx.Bucket([]byte(BUCKET_INDEX))
if bucketIndex == nil {
panic("Bucket index not found!")
}
jsonPosts := bucketIndex.Get([]byte(pageKey))
json.Unmarshal(jsonPosts, &posts)
return nil
}); err != nil {
return posts, err
}
return posts, nil
}
func GetTagPosts(tag string) ([]Post, error) {
tagKey := fmt.Sprintf("tag-%s", strings.ToLower(tag))
var posts []Post
if err := db.View(func(tx *bolt.Tx) error {
bucketIndex := tx.Bucket([]byte(BUCKET_INDEX))
if bucketIndex == nil {
panic("Bucket index not found!")
}
jsonPosts := bucketIndex.Get([]byte(tagKey))
json.Unmarshal(jsonPosts, &posts)
return nil
}); err != nil {
return posts, err
}
return posts, nil
}
func GetDraftPosts() ([]Post, error) {
var posts []Post
if err := db.View(func(tx *bolt.Tx) error {
bucketIndex := tx.Bucket([]byte(BUCKET_INDEX))
if bucketIndex == nil {
panic("Bucket index not found!")
}
jsonPosts := bucketIndex.Get([]byte("drafts"))
json.Unmarshal(jsonPosts, &posts)
return nil
}); err != nil {
return posts, err
}
return posts, nil
}
tags in separate bucket
package model
import (
"encoding/json"
"fmt"
"strings"
"time"
"github.com/boltdb/bolt"
"../cfg"
)
const (
BUCKET_INDEX = "index"
BUCKET_TAGS = "tags"
)
type Meta struct {
Posts int
PerPage int
Pages int
Drafts int
UpdatedAt time.Time
}
func (meta *Meta) init() {
meta.Posts = 0
meta.PerPage = cfg.Int("ui.per_page")
meta.Pages = 0
meta.Drafts = 0
meta.UpdatedAt = time.Now()
}
func (meta *Meta) Load() {
meta.init()
db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte([]byte(BUCKET_INDEX)))
if bucket != nil {
jsonMeta := bucket.Get([]byte("meta"))
json.Unmarshal(jsonMeta, &meta)
}
return nil
})
}
func RebuildIndex() error {
cfg.Log("Rebuilding index...")
meta := new(Meta)
meta.init()
pathMap := make(map[string]string)
pageMap := make(map[string][]*Post)
tagMap := make(map[string][]*Post)
var draftList []*Post
if err := db.View(func(tx *bolt.Tx) error {
bucketPosts := tx.Bucket([]byte(BUCKET_POSTS))
c := bucketPosts.Cursor()
currentPage := 0
pageKey := fmt.Sprintf("page-%d", currentPage)
for k, v := c.Last(); k != nil; k, v = c.Prev() {
post := new(Post)
json.Unmarshal(v, &post)
if post.Draft {
meta.Drafts++
draftList = append(draftList, post)
} else {
meta.Posts++
pathMap[post.Path] = post.UUID
pageMap[pageKey] = append(pageMap[pageKey], post)
if len(pageMap[pageKey]) >= meta.PerPage {
currentPage++
meta.Pages++
pageKey = fmt.Sprintf("page-%d", currentPage)
}
for _, tag := range post.Tags {
tagMap[strings.ToLower(tag)] = append(tagMap[strings.ToLower(tag)], post)
}
}
}
return nil
}); err != nil {
return err
}
if meta.Pages == 0 {
meta.Pages = 1
}
if err := db.Update(func(tx *bolt.Tx) error {
tx.DeleteBucket([]byte(BUCKET_INDEX))
bucketIndex, err := tx.CreateBucketIfNotExists([]byte(BUCKET_INDEX))
if err != nil {
return err
}
tx.DeleteBucket([]byte(BUCKET_MAP))
bucketMap, err := tx.CreateBucketIfNotExists([]byte(BUCKET_MAP))
if err != nil {
return err
}
tx.DeleteBucket([]byte(BUCKET_TAGS))
bucketTags, err := tx.CreateBucketIfNotExists([]byte(BUCKET_TAGS))
if err != nil {
return err
}
jsonMeta, _ := json.Marshal(meta)
cfg.Log(string(jsonMeta))
if err := bucketIndex.Put([]byte("meta"), []byte(jsonMeta)); err != nil {
return err
}
for pageKey, postsPage := range pageMap {
jsonPage, _ := json.Marshal(postsPage)
if err := bucketIndex.Put([]byte(pageKey), []byte(jsonPage)); err != nil {
return err
}
}
jsonDrafts, _ := json.Marshal(draftList)
if err := bucketIndex.Put([]byte("drafts"), []byte(jsonDrafts)); err != nil {
return err
}
for path, uuid := range pathMap {
if err := bucketMap.Put([]byte(path), []byte(uuid)); err != nil {
return err
}
}
for tag, postsTag := range tagMap {
jsonPosts, _ := json.Marshal(postsTag)
if err := bucketTags.Put([]byte(tag), []byte(jsonPosts)); err != nil {
return err
}
}
return nil
}); err != nil {
return err
}
cfg.Log("Index rebuilt!")
return nil
}
func GetPostsPage(page int) ([]Post, error) {
pageKey := fmt.Sprintf("page-%d", page)
var posts []Post
if err := db.View(func(tx *bolt.Tx) error {
bucketIndex := tx.Bucket([]byte(BUCKET_INDEX))
if bucketIndex == nil {
panic("Bucket index not found!")
}
jsonPosts := bucketIndex.Get([]byte(pageKey))
json.Unmarshal(jsonPosts, &posts)
return nil
}); err != nil {
return posts, err
}
return posts, nil
}
func GetTagPosts(tag string) ([]Post, error) {
var posts []Post
if err := db.View(func(tx *bolt.Tx) error {
bucketIndex := tx.Bucket([]byte(BUCKET_TAGS))
if bucketIndex == nil {
panic("Bucket index not found!")
}
jsonPosts := bucketIndex.Get([]byte(strings.ToLower(tag)))
json.Unmarshal(jsonPosts, &posts)
return nil
}); err != nil {
return posts, err
}
return posts, nil
}
func GetDraftPosts() ([]Post, error) {
var posts []Post
if err := db.View(func(tx *bolt.Tx) error {
bucketIndex := tx.Bucket([]byte(BUCKET_INDEX))
if bucketIndex == nil {
panic("Bucket index not found!")
}
jsonPosts := bucketIndex.Get([]byte("drafts"))
json.Unmarshal(jsonPosts, &posts)
return nil
}); err != nil {
return posts, err
}
return posts, nil
}
|
// Copyright 2017 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package integration
import (
"fmt"
"io/ioutil"
"os"
"regexp"
"strings"
"time"
tsuruNet "github.com/tsuru/tsuru/net"
"gopkg.in/check.v1"
)
var (
T = NewCommand("tsuru").WithArgs
platforms = []string{}
provisioners = []string{}
clusterManagers = []ClusterManager{}
flows = []ExecFlow{
platformsToInstall(),
installerConfigTest(),
installerComposeTest(),
installerTest(),
targetTest(),
loginTest(),
removeInstallNodes(),
quotaTest(),
teamTest(),
poolAdd(),
nodeHealer(),
platformAdd(),
exampleApps(),
serviceImageSetup(),
serviceCreate(),
serviceBind(),
}
installerConfig = ""
)
func platformsToInstall() ExecFlow {
flow := ExecFlow{
provides: []string{"platformimages"},
}
flow.forward = func(c *check.C, env *Environment) {
for _, platImg := range platforms {
env.Add("platformimages", platImg)
}
}
return flow
}
func installerConfigTest() ExecFlow {
flow := ExecFlow{
provides: []string{"installerconfig"},
}
flow.forward = func(c *check.C, env *Environment) {
f, err := ioutil.TempFile("", "installer-config")
c.Assert(err, check.IsNil)
defer f.Close()
f.Write([]byte(installerConfig))
c.Assert(err, check.IsNil)
env.Set("installerconfig", f.Name())
}
flow.backward = func(c *check.C, env *Environment) {
res := NewCommand("rm", "{{.installerconfig}}").Run(env)
c.Check(res, ResultOk)
}
return flow
}
func installerComposeTest() ExecFlow {
flow := ExecFlow{
provides: []string{"installercompose"},
}
flow.forward = func(c *check.C, env *Environment) {
composeFile, err := ioutil.TempFile("", "installer-compose")
c.Assert(err, check.IsNil)
defer composeFile.Close()
f, err := ioutil.TempFile("", "installer-config")
c.Assert(err, check.IsNil)
defer func() {
res := NewCommand("rm", f.Name()).Run(env)
c.Check(res, ResultOk)
f.Close()
}()
res := T("install-config-init", f.Name(), composeFile.Name()).Run(env)
c.Assert(res, ResultOk)
env.Set("installercompose", composeFile.Name())
}
flow.backward = func(c *check.C, env *Environment) {
res := NewCommand("rm", "{{.installercompose}}").Run(env)
c.Check(res, ResultOk)
}
return flow
}
func installerTest() ExecFlow {
flow := ExecFlow{
provides: []string{"targetaddr"},
}
flow.forward = func(c *check.C, env *Environment) {
res := T("install-create", "--config", "{{.installerconfig}}", "--compose", "{{.installercompose}}").WithTimeout(60 * time.Minute).Run(env)
c.Assert(res, ResultOk)
regex := regexp.MustCompile(`(?si).*Core Hosts:.*?([\d.]+)\s.*`)
parts := regex.FindStringSubmatch(res.Stdout.String())
c.Assert(parts, check.HasLen, 2)
targetHost := parts[1]
regex = regexp.MustCompile(`(?si).*tsuru_tsuru.*?\|\s(\d+)`)
parts = regex.FindStringSubmatch(res.Stdout.String())
c.Assert(parts, check.HasLen, 2)
targetPort := parts[1]
env.Set("targetaddr", fmt.Sprintf("http://%s:%s", targetHost, targetPort))
regex = regexp.MustCompile(`\| (https?[^\s]+?) \|`)
allParts := regex.FindAllStringSubmatch(res.Stdout.String(), -1)
certsDir := fmt.Sprintf("%s/.tsuru/installs/%s/certs", os.Getenv("HOME"), installerName(env))
for i, parts := range allParts {
if i == 0 && len(provisioners) == 0 {
// Keep the first node when there's no provisioner
continue
}
c.Assert(parts, check.HasLen, 2)
env.Add("noderegisteropts", fmt.Sprintf("--register address=%s --cacert %s/ca.pem --clientcert %s/cert.pem --clientkey %s/key.pem", parts[1], certsDir, certsDir, certsDir))
env.Add("installernodes", parts[1])
}
regex = regexp.MustCompile(`Username: ([[:print:]]+)`)
parts = regex.FindStringSubmatch(res.Stdout.String())
env.Set("adminuser", parts[1])
regex = regexp.MustCompile(`Password: ([[:print:]]+)`)
parts = regex.FindStringSubmatch(res.Stdout.String())
env.Set("adminpassword", parts[1])
}
flow.backward = func(c *check.C, env *Environment) {
res := T("install-remove", "--config", "{{.installerconfig}}", "-y").Run(env)
c.Check(res, ResultOk)
}
return flow
}
func targetTest() ExecFlow {
flow := ExecFlow{}
flow.forward = func(c *check.C, env *Environment) {
targetName := "integration-target"
res := T("target-add", targetName, "{{.targetaddr}}").Run(env)
c.Assert(res, ResultOk)
res = T("target-list").Run(env)
c.Assert(res, ResultMatches, Expected{Stdout: `\s+` + targetName + ` .*`})
res = T("target-set", targetName).Run(env)
c.Assert(res, ResultOk)
}
return flow
}
func loginTest() ExecFlow {
flow := ExecFlow{}
flow.forward = func(c *check.C, env *Environment) {
res := T("login", "{{.adminuser}}").WithInput("{{.adminpassword}}").Run(env)
c.Assert(res, ResultOk)
}
return flow
}
func removeInstallNodes() ExecFlow {
flow := ExecFlow{
matrix: map[string]string{
"node": "installernodes",
},
}
flow.forward = func(c *check.C, env *Environment) {
res := T("node-remove", "-y", "--no-rebalance", "{{.node}}").Run(env)
c.Assert(res, ResultOk)
}
return flow
}
func quotaTest() ExecFlow {
flow := ExecFlow{
requires: []string{"adminuser"},
}
flow.forward = func(c *check.C, env *Environment) {
res := T("user-quota-change", "{{.adminuser}}", "100").Run(env)
c.Assert(res, ResultOk)
res = T("user-quota-view", "{{.adminuser}}").Run(env)
c.Assert(res, ResultMatches, Expected{Stdout: `(?s)Apps usage.*/100`})
}
return flow
}
func teamTest() ExecFlow {
flow := ExecFlow{
provides: []string{"team"},
}
teamName := "integration-team"
flow.forward = func(c *check.C, env *Environment) {
res := T("team-create", teamName).Run(env)
c.Assert(res, ResultOk)
env.Set("team", teamName)
}
flow.backward = func(c *check.C, env *Environment) {
res := T("team-remove", "-y", teamName).Run(env)
c.Check(res, ResultOk)
}
return flow
}
func nodeHealer() ExecFlow {
flow := ExecFlow{
requires: []string{"nodeopts"},
matrix: map[string]string{
"pool": "multinodepools",
},
}
flow.forward = func(c *check.C, env *Environment) {
poolName := env.Get("pool")
res := T("node-add", "{{.nodeopts}}", "pool="+poolName).Run(env)
c.Assert(res, ResultOk)
nodeAddr := waitNewNode(c, env)
env.Set("newnode-"+poolName, nodeAddr)
res = T("node-healing-update", "--enable", "--max-unresponsive", "130").Run(env)
c.Assert(res, ResultOk)
res = T("node-container-upgrade", "big-sibling", "-y").Run(env)
c.Assert(res, ResultOk)
// Wait BS node status upgrade
time.Sleep(time.Minute)
res = T("machine-list").Run(env)
c.Assert(res, ResultOk)
table := resultTable{raw: res.Stdout.String()}
table.parse()
var machineID string
for _, row := range table.rows {
c.Assert(row, check.HasLen, 4)
if tsuruNet.URLToHost(nodeAddr) == row[2] {
machineID = row[0]
break
}
}
c.Assert(machineID, check.Not(check.Equals), "")
res = T("machine-destroy", machineID).Run(env)
c.Assert(res, ResultOk)
ok := retry(15*time.Minute, func() bool {
res = T("event-list", "-k", "healer", "-t", "node", "-v", nodeAddr).Run(env)
c.Assert(res, ResultOk)
return res.Stdout.String() != ""
})
c.Assert(ok, check.Equals, true, check.Commentf("node healing did not start after 15 minutes: %v", res))
ok = retry(30*time.Minute, func() bool {
res = T("event-list", "-k", "healer", "-t", "node", "-v", nodeAddr, "-r").Run(env)
c.Assert(res, ResultOk)
return res.Stdout.String() == ""
})
c.Assert(ok, check.Equals, true, check.Commentf("node healing did not finish after 30 minutes: %v", res))
res = T("event-list", "-k", "healer", "-t", "node", "-v", nodeAddr).Run(env)
c.Assert(res, ResultOk)
table = resultTable{raw: res.Stdout.String()}
table.parse()
c.Assert(table.rows, check.HasLen, 1)
c.Assert(table.rows[0][2], check.Equals, "true", check.Commentf("expected success, got: %v - event info: %v", res, T("event-info", table.rows[0][0]).Run(env)))
eventId := table.rows[0][0]
res = T("event-info", eventId).Run(env)
c.Assert(res, ResultOk)
newAddrRegexp := regexp.MustCompile(`(?s)End Custom Data:.*?_id: (.*?)\s`)
newAddrParts := newAddrRegexp.FindStringSubmatch(res.Stdout.String())
newAddr := newAddrParts[1]
env.Set("newnode-"+poolName, newAddr)
}
flow.backward = func(c *check.C, env *Environment) {
nodeAddr := env.Get("newnode-" + env.Get("pool"))
if nodeAddr == "" {
return
}
res := T("node-remove", "-y", "--destroy", "--no-rebalance", nodeAddr).Run(env)
c.Check(res, ResultOk)
}
return flow
}
func waitNewNode(c *check.C, env *Environment) string {
regex := regexp.MustCompile(`node.create.*?node:\s+(.*?)\s+`)
res := T("event-list").Run(env)
c.Assert(res, ResultOk)
parts := regex.FindStringSubmatch(res.Stdout.String())
c.Assert(parts, check.HasLen, 2)
nodeAddr := parts[1]
regex = regexp.MustCompile("(?i)" + nodeAddr + `.*?ready`)
ok := retry(5*time.Minute, func() bool {
res = T("node-list").Run(env)
return regex.MatchString(res.Stdout.String())
})
c.Assert(ok, check.Equals, true, check.Commentf("node not ready after 5 minutes: %v", res))
return nodeAddr
}
func poolAdd() ExecFlow {
flow := ExecFlow{
provides: []string{"poolnames"},
}
flow.forward = func(c *check.C, env *Environment) {
for _, prov := range provisioners {
poolName := "ipool-" + prov
res := T("pool-add", "--provisioner", prov, poolName).Run(env)
c.Assert(res, ResultOk)
env.Add("poolnames", poolName)
env.Add("multinodepools", poolName)
res = T("pool-constraint-set", poolName, "team", "{{.team}}").Run(env)
c.Assert(res, ResultOk)
opts := nodeOrRegisterOpts(c, env)
res = T("node-add", opts, "pool="+poolName).Run(env)
c.Assert(res, ResultOk)
nodeAddr := waitNewNode(c, env)
env.Add("nodeaddrs", nodeAddr)
}
for _, cluster := range clusterManagers {
poolName := "ipool-" + cluster.Name()
res := T("pool-add", "--provisioner", cluster.Provisioner(), poolName).Run(env)
c.Assert(res, ResultOk)
env.Add("poolnames", poolName)
res = T("pool-constraint-set", poolName, "team", "{{.team}}").Run(env)
c.Assert(res, ResultOk)
res = cluster.Start()
c.Assert(res, ResultOk)
clusterName := "icluster-" + cluster.Name()
params := []string{"cluster-add", clusterName, cluster.Provisioner(), "--pool", poolName}
clusterParams, nodeCreate := cluster.UpdateParams()
if nodeCreate {
env.Add("multinodepools", poolName)
}
res = T(append(params, clusterParams...)...).Run(env)
c.Assert(res, ResultOk)
T("cluster-list").Run(env)
regex := regexp.MustCompile("(?i)ready")
addressRegex := regexp.MustCompile(`(?m)^ *\| *((?:https?:\/\/)?\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(?::\d+)?) *\|`)
nodeIPs := make([]string, 0)
ok := retry(time.Minute, func() bool {
res = T("node-list", "-f", "tsuru.io/cluster="+clusterName).Run(env)
if regex.MatchString(res.Stdout.String()) {
parts := addressRegex.FindAllStringSubmatch(res.Stdout.String(), -1)
for _, part := range parts {
if len(part) == 2 && len(part[1]) > 0 {
nodeIPs = append(nodeIPs, part[1])
}
}
return true
}
return false
})
c.Assert(ok, check.Equals, true, check.Commentf("nodes not ready after 1 minute: %v", res))
for _, ip := range nodeIPs {
res = T("node-update", ip, "pool="+poolName).Run(env)
c.Assert(res, ResultOk)
}
res = T("event-list").Run(env)
c.Assert(res, ResultOk)
for _, ip := range nodeIPs {
regex = regexp.MustCompile(`node.update.*?node:\s+` + ip)
c.Assert(regex.MatchString(res.Stdout.String()), check.Equals, true)
}
ok = retry(time.Minute, func() bool {
res = T("node-list").Run(env)
for _, ip := range nodeIPs {
regex = regexp.MustCompile("(?i)" + ip + `.*?ready`)
if !regex.MatchString(res.Stdout.String()) {
return false
}
}
return true
})
c.Assert(ok, check.Equals, true, check.Commentf("nodes not ready after 1 minute: %v", res))
}
}
flow.backward = func(c *check.C, env *Environment) {
for _, cluster := range clusterManagers {
res := T("cluster-remove", "-y", "icluster-"+cluster.Name()).Run(env)
c.Check(res, ResultOk)
res = cluster.Delete()
c.Check(res, ResultOk)
poolName := "ipool-" + cluster.Name()
res = T("pool-remove", "-y", poolName).Run(env)
c.Check(res, ResultOk)
}
for _, node := range env.All("nodeaddrs") {
res := T("node-remove", "-y", "--no-rebalance", node).Run(env)
c.Check(res, ResultOk)
}
for _, prov := range provisioners {
poolName := "ipool-" + prov
res := T("pool-remove", "-y", poolName).Run(env)
c.Check(res, ResultOk)
}
}
return flow
}
func platformAdd() ExecFlow {
flow := ExecFlow{
provides: []string{"installedplatforms"},
matrix: map[string]string{
"platimg": "platformimages",
},
parallel: true,
}
flow.forward = func(c *check.C, env *Environment) {
img := env.Get("platimg")
suffix := img[strings.LastIndex(img, "/")+1:]
platName := "iplat-" + suffix
res := T("platform-add", platName, "-i", img).WithTimeout(15 * time.Minute).Run(env)
c.Assert(res, ResultOk)
env.Add("installedplatforms", platName)
res = T("platform-list").Run(env)
c.Assert(res, ResultOk)
c.Assert(res, ResultMatches, Expected{Stdout: "(?s).*- " + platName + ".*"})
}
flow.backward = func(c *check.C, env *Environment) {
img := env.Get("platimg")
suffix := img[strings.LastIndex(img, "/")+1:]
platName := "iplat-" + suffix
res := T("platform-remove", "-y", platName).Run(env)
c.Check(res, ResultOk)
}
return flow
}
func exampleApps() ExecFlow {
flow := ExecFlow{
matrix: map[string]string{
"pool": "poolnames",
"plat": "installedplatforms",
},
parallel: true,
provides: []string{"appnames"},
}
flow.forward = func(c *check.C, env *Environment) {
appName := fmt.Sprintf("iapp-%s-%s", env.Get("plat"), env.Get("pool"))
res := T("app-create", appName, "{{.plat}}", "-t", "{{.team}}", "-o", "{{.pool}}").Run(env)
c.Assert(res, ResultOk)
res = T("app-info", "-a", appName).Run(env)
c.Assert(res, ResultOk)
platRE := regexp.MustCompile(`(?s)Platform: (.*?)\n`)
parts := platRE.FindStringSubmatch(res.Stdout.String())
c.Assert(parts, check.HasLen, 2)
lang := strings.Replace(parts[1], "iplat-", "", -1)
res = T("app-deploy", "-a", appName, "{{.examplesdir}}/"+lang+"/").Run(env)
c.Assert(res, ResultOk)
regex := regexp.MustCompile("started")
ok := retry(5*time.Minute, func() bool {
res = T("app-info", "-a", appName).Run(env)
c.Assert(res, ResultOk)
return regex.MatchString(res.Stdout.String())
})
c.Assert(ok, check.Equals, true, check.Commentf("app not ready after 5 minutes: %v", res))
addrRE := regexp.MustCompile(`(?s)Address: (.*?)\n`)
parts = addrRE.FindStringSubmatch(res.Stdout.String())
c.Assert(parts, check.HasLen, 2)
cmd := NewCommand("curl", "-sSf", "http://"+parts[1])
ok = retry(15*time.Minute, func() bool {
res = cmd.Run(env)
return res.ExitCode == 0
})
c.Assert(ok, check.Equals, true, check.Commentf("invalid result: %v", res))
env.Add("appnames", appName)
}
flow.backward = func(c *check.C, env *Environment) {
appName := "iapp-{{.plat}}-{{.pool}}"
res := T("app-remove", "-y", "-a", appName).Run(env)
c.Check(res, ResultOk)
}
return flow
}
func serviceImageSetup() ExecFlow {
return ExecFlow{
provides: []string{"serviceimage"},
forward: func(c *check.C, env *Environment) {
env.Add("serviceimage", "tsuru/eviaas")
},
}
}
func serviceCreate() ExecFlow {
flow := ExecFlow{
provides: []string{"servicename"},
requires: []string{"poolnames", "installedplatforms", "serviceimage"},
}
appName := "integration-service-app"
flow.forward = func(c *check.C, env *Environment) {
res := T("app-create", appName, env.Get("installedplatforms"), "-t", "{{.team}}", "-o", env.Get("poolnames")).Run(env)
c.Assert(res, ResultOk)
res = T("app-info", "-a", appName).Run(env)
c.Assert(res, ResultOk)
res = T("env-set", "-a", appName, "EVI_ENVIRONS='{\"INTEGRATION_ENV\":\"TRUE\"}'").Run(env)
c.Assert(res, ResultOk)
res = T("app-deploy", "-a", appName, "-i", "{{.serviceimage}}").Run(env)
c.Assert(res, ResultOk)
regex := regexp.MustCompile("started")
ok := retry(5*time.Minute, func() bool {
res = T("app-info", "-a", appName).Run(env)
c.Assert(res, ResultOk)
return regex.MatchString(res.Stdout.String())
})
c.Assert(ok, check.Equals, true, check.Commentf("app not ready after 5 minutes: %v", res))
addrRE := regexp.MustCompile(`(?s)Address: (.*?)\n`)
parts := addrRE.FindStringSubmatch(res.Stdout.String())
c.Assert(parts, check.HasLen, 2)
dir, err := ioutil.TempDir("", "service")
c.Assert(err, check.IsNil)
currDir, err := os.Getwd()
c.Assert(err, check.IsNil)
err = os.Chdir(dir)
c.Assert(err, check.IsNil)
defer os.Chdir(currDir)
res = T("service-template").Run(env)
c.Assert(res, ResultOk)
replaces := map[string]string{
"team_responsible_to_provide_service": "integration-team",
"production-endpoint.com": "http://" + parts[1],
"servicename": "integration-service",
}
for k, v := range replaces {
res = NewCommand("sed", "-i", "-e", "'s~"+k+"~"+v+"~'", "manifest.yaml").Run(env)
c.Assert(res, ResultOk)
}
res = T("service-create", "manifest.yaml").Run(env)
c.Assert(res, ResultOk)
res = T("service-info", "integration-service").Run(env)
c.Assert(res, ResultOk)
env.Set("servicename", "integration-service")
}
flow.backward = func(c *check.C, env *Environment) {
res := T("app-remove", "-y", "-a", appName).Run(env)
c.Check(res, ResultOk)
res = T("service-destroy", "integration-service", "-y").Run(env)
c.Check(res, ResultOk)
}
return flow
}
func serviceBind() ExecFlow {
flow := ExecFlow{
matrix: map[string]string{
"app": "appnames",
},
parallel: true,
requires: []string{"appnames", "servicename"},
provides: []string{"bindnames"},
}
bindName := "{{.servicename}}-{{.app}}"
flow.forward = func(c *check.C, env *Environment) {
res := T("service-instance-add", "{{.servicename}}", bindName, "-t", "integration-team").Run(env)
c.Assert(res, ResultOk)
res = T("service-instance-bind", "{{.servicename}}", bindName, "-a", "{{.app}}").Run(env)
c.Assert(res, ResultOk)
ok := retry(15*time.Minute, func() bool {
res = T("event-list", "-k", "app.update.bind", "-v", "{{.app}}", "-r").Run(env)
c.Assert(res, ResultOk)
return res.Stdout.String() == ""
})
c.Assert(ok, check.Equals, true, check.Commentf("bind did not complete after 15 minutes: %v", res))
res = T("event-list", "-k", "app.update.bind", "-v", "{{.app}}").Run(env)
c.Assert(res, ResultOk)
c.Assert(res, ResultMatches, Expected{Stdout: `.*true.*`}, check.Commentf("event did not succeed"))
ok = retry(time.Minute, func() bool {
res = T("env-get", "-a", "{{.app}}").Run(env)
c.Check(res, ResultOk)
return strings.Contains(res.Stdout.String(), "INTEGRATION_ENV=")
})
c.Assert(ok, check.Equals, true, check.Commentf("env not gettable after 1 minute: %v", res))
cmd := T("app-run", "-a", "{{.app}}", "env")
ok = retry(time.Minute, func() bool {
res = cmd.Run(env)
return strings.Contains(res.Stdout.String(), "INTEGRATION_ENV=TRUE")
})
c.Assert(ok, check.Equals, true, check.Commentf("env not injected after 1 minute: %v", res))
env.Add("bindnames", bindName)
}
flow.backward = func(c *check.C, env *Environment) {
res := T("service-instance-remove", "{{.servicename}}", bindName, "-f", "-y").Run(env)
c.Check(res, ResultOk)
}
return flow
}
func (s *S) TestBase(c *check.C) {
s.config(c)
if s.env == nil {
return
}
var executedFlows []*ExecFlow
defer func() {
for i := len(executedFlows) - 1; i >= 0; i-- {
executedFlows[i].Rollback(c, s.env)
}
}()
for i := range flows {
f := &flows[i]
if len(f.provides) > 0 {
providesAll := true
for _, envVar := range f.provides {
if s.env.Get(envVar) == "" {
providesAll = false
break
}
}
if providesAll {
continue
}
}
executedFlows = append(executedFlows, f)
f.Run(c, s.env)
}
}
integration: disable healing after healing test
// Copyright 2017 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package integration
import (
"fmt"
"io/ioutil"
"os"
"regexp"
"strings"
"time"
tsuruNet "github.com/tsuru/tsuru/net"
"gopkg.in/check.v1"
)
var (
T = NewCommand("tsuru").WithArgs
platforms = []string{}
provisioners = []string{}
clusterManagers = []ClusterManager{}
flows = []ExecFlow{
platformsToInstall(),
installerConfigTest(),
installerComposeTest(),
installerTest(),
targetTest(),
loginTest(),
removeInstallNodes(),
quotaTest(),
teamTest(),
poolAdd(),
nodeHealer(),
platformAdd(),
exampleApps(),
serviceImageSetup(),
serviceCreate(),
serviceBind(),
}
installerConfig = ""
)
func platformsToInstall() ExecFlow {
flow := ExecFlow{
provides: []string{"platformimages"},
}
flow.forward = func(c *check.C, env *Environment) {
for _, platImg := range platforms {
env.Add("platformimages", platImg)
}
}
return flow
}
func installerConfigTest() ExecFlow {
flow := ExecFlow{
provides: []string{"installerconfig"},
}
flow.forward = func(c *check.C, env *Environment) {
f, err := ioutil.TempFile("", "installer-config")
c.Assert(err, check.IsNil)
defer f.Close()
f.Write([]byte(installerConfig))
c.Assert(err, check.IsNil)
env.Set("installerconfig", f.Name())
}
flow.backward = func(c *check.C, env *Environment) {
res := NewCommand("rm", "{{.installerconfig}}").Run(env)
c.Check(res, ResultOk)
}
return flow
}
func installerComposeTest() ExecFlow {
flow := ExecFlow{
provides: []string{"installercompose"},
}
flow.forward = func(c *check.C, env *Environment) {
composeFile, err := ioutil.TempFile("", "installer-compose")
c.Assert(err, check.IsNil)
defer composeFile.Close()
f, err := ioutil.TempFile("", "installer-config")
c.Assert(err, check.IsNil)
defer func() {
res := NewCommand("rm", f.Name()).Run(env)
c.Check(res, ResultOk)
f.Close()
}()
res := T("install-config-init", f.Name(), composeFile.Name()).Run(env)
c.Assert(res, ResultOk)
env.Set("installercompose", composeFile.Name())
}
flow.backward = func(c *check.C, env *Environment) {
res := NewCommand("rm", "{{.installercompose}}").Run(env)
c.Check(res, ResultOk)
}
return flow
}
func installerTest() ExecFlow {
flow := ExecFlow{
provides: []string{"targetaddr"},
}
flow.forward = func(c *check.C, env *Environment) {
res := T("install-create", "--config", "{{.installerconfig}}", "--compose", "{{.installercompose}}").WithTimeout(60 * time.Minute).Run(env)
c.Assert(res, ResultOk)
regex := regexp.MustCompile(`(?si).*Core Hosts:.*?([\d.]+)\s.*`)
parts := regex.FindStringSubmatch(res.Stdout.String())
c.Assert(parts, check.HasLen, 2)
targetHost := parts[1]
regex = regexp.MustCompile(`(?si).*tsuru_tsuru.*?\|\s(\d+)`)
parts = regex.FindStringSubmatch(res.Stdout.String())
c.Assert(parts, check.HasLen, 2)
targetPort := parts[1]
env.Set("targetaddr", fmt.Sprintf("http://%s:%s", targetHost, targetPort))
regex = regexp.MustCompile(`\| (https?[^\s]+?) \|`)
allParts := regex.FindAllStringSubmatch(res.Stdout.String(), -1)
certsDir := fmt.Sprintf("%s/.tsuru/installs/%s/certs", os.Getenv("HOME"), installerName(env))
for i, parts := range allParts {
if i == 0 && len(provisioners) == 0 {
// Keep the first node when there's no provisioner
continue
}
c.Assert(parts, check.HasLen, 2)
env.Add("noderegisteropts", fmt.Sprintf("--register address=%s --cacert %s/ca.pem --clientcert %s/cert.pem --clientkey %s/key.pem", parts[1], certsDir, certsDir, certsDir))
env.Add("installernodes", parts[1])
}
regex = regexp.MustCompile(`Username: ([[:print:]]+)`)
parts = regex.FindStringSubmatch(res.Stdout.String())
env.Set("adminuser", parts[1])
regex = regexp.MustCompile(`Password: ([[:print:]]+)`)
parts = regex.FindStringSubmatch(res.Stdout.String())
env.Set("adminpassword", parts[1])
}
flow.backward = func(c *check.C, env *Environment) {
res := T("install-remove", "--config", "{{.installerconfig}}", "-y").Run(env)
c.Check(res, ResultOk)
}
return flow
}
func targetTest() ExecFlow {
flow := ExecFlow{}
flow.forward = func(c *check.C, env *Environment) {
targetName := "integration-target"
res := T("target-add", targetName, "{{.targetaddr}}").Run(env)
c.Assert(res, ResultOk)
res = T("target-list").Run(env)
c.Assert(res, ResultMatches, Expected{Stdout: `\s+` + targetName + ` .*`})
res = T("target-set", targetName).Run(env)
c.Assert(res, ResultOk)
}
return flow
}
func loginTest() ExecFlow {
flow := ExecFlow{}
flow.forward = func(c *check.C, env *Environment) {
res := T("login", "{{.adminuser}}").WithInput("{{.adminpassword}}").Run(env)
c.Assert(res, ResultOk)
}
return flow
}
func removeInstallNodes() ExecFlow {
flow := ExecFlow{
matrix: map[string]string{
"node": "installernodes",
},
}
flow.forward = func(c *check.C, env *Environment) {
res := T("node-remove", "-y", "--no-rebalance", "{{.node}}").Run(env)
c.Assert(res, ResultOk)
}
return flow
}
func quotaTest() ExecFlow {
flow := ExecFlow{
requires: []string{"adminuser"},
}
flow.forward = func(c *check.C, env *Environment) {
res := T("user-quota-change", "{{.adminuser}}", "100").Run(env)
c.Assert(res, ResultOk)
res = T("user-quota-view", "{{.adminuser}}").Run(env)
c.Assert(res, ResultMatches, Expected{Stdout: `(?s)Apps usage.*/100`})
}
return flow
}
func teamTest() ExecFlow {
flow := ExecFlow{
provides: []string{"team"},
}
teamName := "integration-team"
flow.forward = func(c *check.C, env *Environment) {
res := T("team-create", teamName).Run(env)
c.Assert(res, ResultOk)
env.Set("team", teamName)
}
flow.backward = func(c *check.C, env *Environment) {
res := T("team-remove", "-y", teamName).Run(env)
c.Check(res, ResultOk)
}
return flow
}
func nodeHealer() ExecFlow {
flow := ExecFlow{
requires: []string{"nodeopts"},
matrix: map[string]string{
"pool": "multinodepools",
},
}
flow.forward = func(c *check.C, env *Environment) {
poolName := env.Get("pool")
res := T("node-add", "{{.nodeopts}}", "pool="+poolName).Run(env)
c.Assert(res, ResultOk)
nodeAddr := waitNewNode(c, env)
env.Set("newnode-"+poolName, nodeAddr)
res = T("node-healing-update", "--enable", "--max-unresponsive", "130").Run(env)
c.Assert(res, ResultOk)
res = T("node-container-upgrade", "big-sibling", "-y").Run(env)
c.Assert(res, ResultOk)
// Wait BS node status upgrade
time.Sleep(time.Minute)
res = T("machine-list").Run(env)
c.Assert(res, ResultOk)
table := resultTable{raw: res.Stdout.String()}
table.parse()
var machineID string
for _, row := range table.rows {
c.Assert(row, check.HasLen, 4)
if tsuruNet.URLToHost(nodeAddr) == row[2] {
machineID = row[0]
break
}
}
c.Assert(machineID, check.Not(check.Equals), "")
res = T("machine-destroy", machineID).Run(env)
c.Assert(res, ResultOk)
ok := retry(15*time.Minute, func() bool {
res = T("event-list", "-k", "healer", "-t", "node", "-v", nodeAddr).Run(env)
c.Assert(res, ResultOk)
return res.Stdout.String() != ""
})
c.Assert(ok, check.Equals, true, check.Commentf("node healing did not start after 15 minutes: %v", res))
ok = retry(30*time.Minute, func() bool {
res = T("event-list", "-k", "healer", "-t", "node", "-v", nodeAddr, "-r").Run(env)
c.Assert(res, ResultOk)
return res.Stdout.String() == ""
})
c.Assert(ok, check.Equals, true, check.Commentf("node healing did not finish after 30 minutes: %v", res))
res = T("node-healing-update", "--disable").Run(env)
c.Assert(res, ResultOk)
res = T("event-list", "-k", "healer", "-t", "node", "-v", nodeAddr).Run(env)
c.Assert(res, ResultOk)
table = resultTable{raw: res.Stdout.String()}
table.parse()
c.Assert(table.rows, check.HasLen, 1)
c.Assert(table.rows[0][2], check.Equals, "true", check.Commentf("expected success, got: %v - event info: %v", res, T("event-info", table.rows[0][0]).Run(env)))
eventId := table.rows[0][0]
res = T("event-info", eventId).Run(env)
c.Assert(res, ResultOk)
newAddrRegexp := regexp.MustCompile(`(?s)End Custom Data:.*?_id: (.*?)\s`)
newAddrParts := newAddrRegexp.FindStringSubmatch(res.Stdout.String())
newAddr := newAddrParts[1]
env.Set("newnode-"+poolName, newAddr)
}
flow.backward = func(c *check.C, env *Environment) {
nodeAddr := env.Get("newnode-" + env.Get("pool"))
if nodeAddr == "" {
return
}
res := T("node-remove", "-y", "--destroy", "--no-rebalance", nodeAddr).Run(env)
c.Check(res, ResultOk)
}
return flow
}
func waitNewNode(c *check.C, env *Environment) string {
regex := regexp.MustCompile(`node.create.*?node:\s+(.*?)\s+`)
res := T("event-list").Run(env)
c.Assert(res, ResultOk)
parts := regex.FindStringSubmatch(res.Stdout.String())
c.Assert(parts, check.HasLen, 2)
nodeAddr := parts[1]
regex = regexp.MustCompile("(?i)" + nodeAddr + `.*?ready`)
ok := retry(5*time.Minute, func() bool {
res = T("node-list").Run(env)
return regex.MatchString(res.Stdout.String())
})
c.Assert(ok, check.Equals, true, check.Commentf("node not ready after 5 minutes: %v", res))
return nodeAddr
}
func poolAdd() ExecFlow {
flow := ExecFlow{
provides: []string{"poolnames"},
}
flow.forward = func(c *check.C, env *Environment) {
for _, prov := range provisioners {
poolName := "ipool-" + prov
res := T("pool-add", "--provisioner", prov, poolName).Run(env)
c.Assert(res, ResultOk)
env.Add("poolnames", poolName)
env.Add("multinodepools", poolName)
res = T("pool-constraint-set", poolName, "team", "{{.team}}").Run(env)
c.Assert(res, ResultOk)
opts := nodeOrRegisterOpts(c, env)
res = T("node-add", opts, "pool="+poolName).Run(env)
c.Assert(res, ResultOk)
nodeAddr := waitNewNode(c, env)
env.Add("nodeaddrs", nodeAddr)
}
for _, cluster := range clusterManagers {
poolName := "ipool-" + cluster.Name()
res := T("pool-add", "--provisioner", cluster.Provisioner(), poolName).Run(env)
c.Assert(res, ResultOk)
env.Add("poolnames", poolName)
res = T("pool-constraint-set", poolName, "team", "{{.team}}").Run(env)
c.Assert(res, ResultOk)
res = cluster.Start()
c.Assert(res, ResultOk)
clusterName := "icluster-" + cluster.Name()
params := []string{"cluster-add", clusterName, cluster.Provisioner(), "--pool", poolName}
clusterParams, nodeCreate := cluster.UpdateParams()
if nodeCreate {
env.Add("multinodepools", poolName)
}
res = T(append(params, clusterParams...)...).Run(env)
c.Assert(res, ResultOk)
T("cluster-list").Run(env)
regex := regexp.MustCompile("(?i)ready")
addressRegex := regexp.MustCompile(`(?m)^ *\| *((?:https?:\/\/)?\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(?::\d+)?) *\|`)
nodeIPs := make([]string, 0)
ok := retry(time.Minute, func() bool {
res = T("node-list", "-f", "tsuru.io/cluster="+clusterName).Run(env)
if regex.MatchString(res.Stdout.String()) {
parts := addressRegex.FindAllStringSubmatch(res.Stdout.String(), -1)
for _, part := range parts {
if len(part) == 2 && len(part[1]) > 0 {
nodeIPs = append(nodeIPs, part[1])
}
}
return true
}
return false
})
c.Assert(ok, check.Equals, true, check.Commentf("nodes not ready after 1 minute: %v", res))
for _, ip := range nodeIPs {
res = T("node-update", ip, "pool="+poolName).Run(env)
c.Assert(res, ResultOk)
}
res = T("event-list").Run(env)
c.Assert(res, ResultOk)
for _, ip := range nodeIPs {
regex = regexp.MustCompile(`node.update.*?node:\s+` + ip)
c.Assert(regex.MatchString(res.Stdout.String()), check.Equals, true)
}
ok = retry(time.Minute, func() bool {
res = T("node-list").Run(env)
for _, ip := range nodeIPs {
regex = regexp.MustCompile("(?i)" + ip + `.*?ready`)
if !regex.MatchString(res.Stdout.String()) {
return false
}
}
return true
})
c.Assert(ok, check.Equals, true, check.Commentf("nodes not ready after 1 minute: %v", res))
}
}
flow.backward = func(c *check.C, env *Environment) {
for _, cluster := range clusterManagers {
res := T("cluster-remove", "-y", "icluster-"+cluster.Name()).Run(env)
c.Check(res, ResultOk)
res = cluster.Delete()
c.Check(res, ResultOk)
poolName := "ipool-" + cluster.Name()
res = T("pool-remove", "-y", poolName).Run(env)
c.Check(res, ResultOk)
}
for _, node := range env.All("nodeaddrs") {
res := T("node-remove", "-y", "--no-rebalance", node).Run(env)
c.Check(res, ResultOk)
}
for _, prov := range provisioners {
poolName := "ipool-" + prov
res := T("pool-remove", "-y", poolName).Run(env)
c.Check(res, ResultOk)
}
}
return flow
}
func platformAdd() ExecFlow {
flow := ExecFlow{
provides: []string{"installedplatforms"},
matrix: map[string]string{
"platimg": "platformimages",
},
parallel: true,
}
flow.forward = func(c *check.C, env *Environment) {
img := env.Get("platimg")
suffix := img[strings.LastIndex(img, "/")+1:]
platName := "iplat-" + suffix
res := T("platform-add", platName, "-i", img).WithTimeout(15 * time.Minute).Run(env)
c.Assert(res, ResultOk)
env.Add("installedplatforms", platName)
res = T("platform-list").Run(env)
c.Assert(res, ResultOk)
c.Assert(res, ResultMatches, Expected{Stdout: "(?s).*- " + platName + ".*"})
}
flow.backward = func(c *check.C, env *Environment) {
img := env.Get("platimg")
suffix := img[strings.LastIndex(img, "/")+1:]
platName := "iplat-" + suffix
res := T("platform-remove", "-y", platName).Run(env)
c.Check(res, ResultOk)
}
return flow
}
func exampleApps() ExecFlow {
flow := ExecFlow{
matrix: map[string]string{
"pool": "poolnames",
"plat": "installedplatforms",
},
parallel: true,
provides: []string{"appnames"},
}
flow.forward = func(c *check.C, env *Environment) {
appName := fmt.Sprintf("iapp-%s-%s", env.Get("plat"), env.Get("pool"))
res := T("app-create", appName, "{{.plat}}", "-t", "{{.team}}", "-o", "{{.pool}}").Run(env)
c.Assert(res, ResultOk)
res = T("app-info", "-a", appName).Run(env)
c.Assert(res, ResultOk)
platRE := regexp.MustCompile(`(?s)Platform: (.*?)\n`)
parts := platRE.FindStringSubmatch(res.Stdout.String())
c.Assert(parts, check.HasLen, 2)
lang := strings.Replace(parts[1], "iplat-", "", -1)
res = T("app-deploy", "-a", appName, "{{.examplesdir}}/"+lang+"/").Run(env)
c.Assert(res, ResultOk)
regex := regexp.MustCompile("started")
ok := retry(5*time.Minute, func() bool {
res = T("app-info", "-a", appName).Run(env)
c.Assert(res, ResultOk)
return regex.MatchString(res.Stdout.String())
})
c.Assert(ok, check.Equals, true, check.Commentf("app not ready after 5 minutes: %v", res))
addrRE := regexp.MustCompile(`(?s)Address: (.*?)\n`)
parts = addrRE.FindStringSubmatch(res.Stdout.String())
c.Assert(parts, check.HasLen, 2)
cmd := NewCommand("curl", "-sSf", "http://"+parts[1])
ok = retry(15*time.Minute, func() bool {
res = cmd.Run(env)
return res.ExitCode == 0
})
c.Assert(ok, check.Equals, true, check.Commentf("invalid result: %v", res))
env.Add("appnames", appName)
}
flow.backward = func(c *check.C, env *Environment) {
appName := "iapp-{{.plat}}-{{.pool}}"
res := T("app-remove", "-y", "-a", appName).Run(env)
c.Check(res, ResultOk)
}
return flow
}
func serviceImageSetup() ExecFlow {
return ExecFlow{
provides: []string{"serviceimage"},
forward: func(c *check.C, env *Environment) {
env.Add("serviceimage", "tsuru/eviaas")
},
}
}
func serviceCreate() ExecFlow {
flow := ExecFlow{
provides: []string{"servicename"},
requires: []string{"poolnames", "installedplatforms", "serviceimage"},
}
appName := "integration-service-app"
flow.forward = func(c *check.C, env *Environment) {
res := T("app-create", appName, env.Get("installedplatforms"), "-t", "{{.team}}", "-o", env.Get("poolnames")).Run(env)
c.Assert(res, ResultOk)
res = T("app-info", "-a", appName).Run(env)
c.Assert(res, ResultOk)
res = T("env-set", "-a", appName, "EVI_ENVIRONS='{\"INTEGRATION_ENV\":\"TRUE\"}'").Run(env)
c.Assert(res, ResultOk)
res = T("app-deploy", "-a", appName, "-i", "{{.serviceimage}}").Run(env)
c.Assert(res, ResultOk)
regex := regexp.MustCompile("started")
ok := retry(5*time.Minute, func() bool {
res = T("app-info", "-a", appName).Run(env)
c.Assert(res, ResultOk)
return regex.MatchString(res.Stdout.String())
})
c.Assert(ok, check.Equals, true, check.Commentf("app not ready after 5 minutes: %v", res))
addrRE := regexp.MustCompile(`(?s)Address: (.*?)\n`)
parts := addrRE.FindStringSubmatch(res.Stdout.String())
c.Assert(parts, check.HasLen, 2)
dir, err := ioutil.TempDir("", "service")
c.Assert(err, check.IsNil)
currDir, err := os.Getwd()
c.Assert(err, check.IsNil)
err = os.Chdir(dir)
c.Assert(err, check.IsNil)
defer os.Chdir(currDir)
res = T("service-template").Run(env)
c.Assert(res, ResultOk)
replaces := map[string]string{
"team_responsible_to_provide_service": "integration-team",
"production-endpoint.com": "http://" + parts[1],
"servicename": "integration-service",
}
for k, v := range replaces {
res = NewCommand("sed", "-i", "-e", "'s~"+k+"~"+v+"~'", "manifest.yaml").Run(env)
c.Assert(res, ResultOk)
}
res = T("service-create", "manifest.yaml").Run(env)
c.Assert(res, ResultOk)
res = T("service-info", "integration-service").Run(env)
c.Assert(res, ResultOk)
env.Set("servicename", "integration-service")
}
flow.backward = func(c *check.C, env *Environment) {
res := T("app-remove", "-y", "-a", appName).Run(env)
c.Check(res, ResultOk)
res = T("service-destroy", "integration-service", "-y").Run(env)
c.Check(res, ResultOk)
}
return flow
}
func serviceBind() ExecFlow {
flow := ExecFlow{
matrix: map[string]string{
"app": "appnames",
},
parallel: true,
requires: []string{"appnames", "servicename"},
provides: []string{"bindnames"},
}
bindName := "{{.servicename}}-{{.app}}"
flow.forward = func(c *check.C, env *Environment) {
res := T("service-instance-add", "{{.servicename}}", bindName, "-t", "integration-team").Run(env)
c.Assert(res, ResultOk)
res = T("service-instance-bind", "{{.servicename}}", bindName, "-a", "{{.app}}").Run(env)
c.Assert(res, ResultOk)
ok := retry(15*time.Minute, func() bool {
res = T("event-list", "-k", "app.update.bind", "-v", "{{.app}}", "-r").Run(env)
c.Assert(res, ResultOk)
return res.Stdout.String() == ""
})
c.Assert(ok, check.Equals, true, check.Commentf("bind did not complete after 15 minutes: %v", res))
res = T("event-list", "-k", "app.update.bind", "-v", "{{.app}}").Run(env)
c.Assert(res, ResultOk)
c.Assert(res, ResultMatches, Expected{Stdout: `.*true.*`}, check.Commentf("event did not succeed"))
ok = retry(time.Minute, func() bool {
res = T("env-get", "-a", "{{.app}}").Run(env)
c.Check(res, ResultOk)
return strings.Contains(res.Stdout.String(), "INTEGRATION_ENV=")
})
c.Assert(ok, check.Equals, true, check.Commentf("env not gettable after 1 minute: %v", res))
cmd := T("app-run", "-a", "{{.app}}", "env")
ok = retry(time.Minute, func() bool {
res = cmd.Run(env)
return strings.Contains(res.Stdout.String(), "INTEGRATION_ENV=TRUE")
})
c.Assert(ok, check.Equals, true, check.Commentf("env not injected after 1 minute: %v", res))
env.Add("bindnames", bindName)
}
flow.backward = func(c *check.C, env *Environment) {
res := T("service-instance-remove", "{{.servicename}}", bindName, "-f", "-y").Run(env)
c.Check(res, ResultOk)
}
return flow
}
func (s *S) TestBase(c *check.C) {
s.config(c)
if s.env == nil {
return
}
var executedFlows []*ExecFlow
defer func() {
for i := len(executedFlows) - 1; i >= 0; i-- {
executedFlows[i].Rollback(c, s.env)
}
}()
for i := range flows {
f := &flows[i]
if len(f.provides) > 0 {
providesAll := true
for _, envVar := range f.provides {
if s.env.Get(envVar) == "" {
providesAll = false
break
}
}
if providesAll {
continue
}
}
executedFlows = append(executedFlows, f)
f.Run(c, s.env)
}
}
|
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.package recipe
package integration
import (
"bytes"
"fmt"
"reflect"
"sort"
"sync"
"testing"
"time"
"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
"github.com/coreos/etcd/etcdserver/api/v3rpc"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/lease"
"github.com/coreos/etcd/pkg/testutil"
"github.com/coreos/etcd/storage/storagepb"
)
// TestV3PutOverwrite puts a key with the v3 api to a random cluster member,
// overwrites it, then checks that the change was applied.
func TestV3PutOverwrite(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
kvc := clus.RandClient().KV
key := []byte("foo")
reqput := &pb.PutRequest{Key: key, Value: []byte("bar")}
respput, err := kvc.Put(context.TODO(), reqput)
if err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
// overwrite
reqput.Value = []byte("baz")
respput2, err := kvc.Put(context.TODO(), reqput)
if err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
if respput2.Header.Revision <= respput.Header.Revision {
t.Fatalf("expected newer revision on overwrite, got %v <= %v",
respput2.Header.Revision, respput.Header.Revision)
}
reqrange := &pb.RangeRequest{Key: key}
resprange, err := kvc.Range(context.TODO(), reqrange)
if err != nil {
t.Fatalf("couldn't get key (%v)", err)
}
if len(resprange.Kvs) != 1 {
t.Fatalf("expected 1 key, got %v", len(resprange.Kvs))
}
kv := resprange.Kvs[0]
if kv.ModRevision <= kv.CreateRevision {
t.Errorf("expected modRev > createRev, got %d <= %d",
kv.ModRevision, kv.CreateRevision)
}
if !reflect.DeepEqual(reqput.Value, kv.Value) {
t.Errorf("expected value %v, got %v", reqput.Value, kv.Value)
}
}
func TestV3TxnTooManyOps(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
kvc := clus.RandClient().KV
addCompareOps := func(txn *pb.TxnRequest) {
txn.Compare = append(txn.Compare,
&pb.Compare{
Result: pb.Compare_GREATER,
Target: pb.Compare_CREATE,
Key: []byte("bar"),
})
}
addSuccessOps := func(txn *pb.TxnRequest) {
txn.Success = append(txn.Success,
&pb.RequestUnion{
Request: &pb.RequestUnion_RequestPut{
RequestPut: &pb.PutRequest{
Key: []byte("bar"),
Value: []byte("bar"),
},
},
})
}
addFailureOps := func(txn *pb.TxnRequest) {
txn.Failure = append(txn.Failure,
&pb.RequestUnion{
Request: &pb.RequestUnion_RequestPut{
RequestPut: &pb.PutRequest{
Key: []byte("bar"),
Value: []byte("bar"),
},
},
})
}
tests := []func(txn *pb.TxnRequest){
addCompareOps,
addSuccessOps,
addFailureOps,
}
for i, tt := range tests {
txn := &pb.TxnRequest{}
for j := 0; j < v3rpc.MaxOpsPerTxn+1; j++ {
tt(txn)
}
_, err := kvc.Txn(context.Background(), txn)
if err != v3rpc.ErrTooManyOps {
t.Errorf("#%d: err = %v, want %v", i, err, v3rpc.ErrTooManyOps)
}
}
}
// TestV3PutMissingLease ensures that a Put on a key with a bogus lease fails.
func TestV3PutMissingLease(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
kvc := clus.RandClient().KV
key := []byte("foo")
preq := &pb.PutRequest{Key: key, Lease: 123456}
tests := []func(){
// put case
func() {
if presp, err := kvc.Put(context.TODO(), preq); err == nil {
t.Errorf("succeeded put key. req: %v. resp: %v", preq, presp)
}
},
// txn success case
func() {
txn := &pb.TxnRequest{}
txn.Success = append(txn.Success, &pb.RequestUnion{
Request: &pb.RequestUnion_RequestPut{
RequestPut: preq}})
if tresp, err := kvc.Txn(context.TODO(), txn); err == nil {
t.Errorf("succeeded txn success. req: %v. resp: %v", txn, tresp)
}
},
// txn failure case
func() {
txn := &pb.TxnRequest{}
txn.Failure = append(txn.Failure, &pb.RequestUnion{
Request: &pb.RequestUnion_RequestPut{
RequestPut: preq}})
cmp := &pb.Compare{
Result: pb.Compare_GREATER,
Target: pb.Compare_CREATE,
Key: []byte("bar"),
}
txn.Compare = append(txn.Compare, cmp)
if tresp, err := kvc.Txn(context.TODO(), txn); err == nil {
t.Errorf("succeeded txn failure. req: %v. resp: %v", txn, tresp)
}
},
// ignore bad lease in failure on success txn
func() {
txn := &pb.TxnRequest{}
rreq := &pb.RangeRequest{Key: []byte("bar")}
txn.Success = append(txn.Success, &pb.RequestUnion{
Request: &pb.RequestUnion_RequestRange{
RequestRange: rreq}})
txn.Failure = append(txn.Failure, &pb.RequestUnion{
Request: &pb.RequestUnion_RequestPut{
RequestPut: preq}})
if tresp, err := kvc.Txn(context.TODO(), txn); err != nil {
t.Errorf("failed good txn. req: %v. resp: %v", txn, tresp)
}
},
}
for i, f := range tests {
f()
// key shouldn't have been stored
rreq := &pb.RangeRequest{Key: key}
rresp, err := kvc.Range(context.TODO(), rreq)
if err != nil {
t.Errorf("#%d. could not rangereq (%v)", i, err)
} else if len(rresp.Kvs) != 0 {
t.Errorf("#%d. expected no keys, got %v", i, rresp)
}
}
}
// TestV3DeleteRange tests various edge cases in the DeleteRange API.
func TestV3DeleteRange(t *testing.T) {
defer testutil.AfterTest(t)
tests := []struct {
keySet []string
begin string
end string
wantSet [][]byte
}{
// delete middle
{
[]string{"foo", "foo/abc", "fop"},
"foo/", "fop",
[][]byte{[]byte("foo"), []byte("fop")},
},
// no delete
{
[]string{"foo", "foo/abc", "fop"},
"foo/", "foo/",
[][]byte{[]byte("foo"), []byte("foo/abc"), []byte("fop")},
},
// delete first
{
[]string{"foo", "foo/abc", "fop"},
"fo", "fop",
[][]byte{[]byte("fop")},
},
// delete tail
{
[]string{"foo", "foo/abc", "fop"},
"foo/", "fos",
[][]byte{[]byte("foo")},
},
// delete exact
{
[]string{"foo", "foo/abc", "fop"},
"foo/abc", "",
[][]byte{[]byte("foo"), []byte("fop")},
},
// delete none, [x,x)
{
[]string{"foo"},
"foo", "foo",
[][]byte{[]byte("foo")},
},
}
for i, tt := range tests {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
kvc := clus.RandClient().KV
ks := tt.keySet
for j := range ks {
reqput := &pb.PutRequest{Key: []byte(ks[j]), Value: []byte{}}
_, err := kvc.Put(context.TODO(), reqput)
if err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
}
dreq := &pb.DeleteRangeRequest{
Key: []byte(tt.begin),
RangeEnd: []byte(tt.end)}
dresp, err := kvc.DeleteRange(context.TODO(), dreq)
if err != nil {
t.Fatalf("couldn't delete range on test %d (%v)", i, err)
}
rreq := &pb.RangeRequest{Key: []byte{0x0}, RangeEnd: []byte{0xff}}
rresp, err := kvc.Range(context.TODO(), rreq)
if err != nil {
t.Errorf("couldn't get range on test %v (%v)", i, err)
}
if dresp.Header.Revision != rresp.Header.Revision {
t.Errorf("expected revision %v, got %v",
dresp.Header.Revision, rresp.Header.Revision)
}
keys := [][]byte{}
for j := range rresp.Kvs {
keys = append(keys, rresp.Kvs[j].Key)
}
if reflect.DeepEqual(tt.wantSet, keys) == false {
t.Errorf("expected %v on test %v, got %v", tt.wantSet, i, keys)
}
// can't defer because tcp ports will be in use
clus.Terminate(t)
}
}
// TestV3TxnInvaildRange tests txn
func TestV3TxnInvaildRange(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
kvc := clus.RandClient().KV
preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
for i := 0; i < 3; i++ {
_, err := kvc.Put(context.Background(), preq)
if err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
}
_, err := kvc.Compact(context.Background(), &pb.CompactionRequest{Revision: 2})
if err != nil {
t.Fatalf("couldn't compact kv space (%v)", err)
}
// future rev
txn := &pb.TxnRequest{}
txn.Success = append(txn.Success, &pb.RequestUnion{
Request: &pb.RequestUnion_RequestPut{
RequestPut: preq}})
rreq := &pb.RangeRequest{Key: []byte("foo"), Revision: 100}
txn.Success = append(txn.Success, &pb.RequestUnion{
Request: &pb.RequestUnion_RequestRange{
RequestRange: rreq}})
if _, err := kvc.Txn(context.TODO(), txn); err != v3rpc.ErrFutureRev {
t.Errorf("err = %v, want %v", err, v3rpc.ErrFutureRev)
}
// compacted rev
tv, _ := txn.Success[1].Request.(*pb.RequestUnion_RequestRange)
tv.RequestRange.Revision = 1
if _, err := kvc.Txn(context.TODO(), txn); err != v3rpc.ErrCompacted {
t.Errorf("err = %v, want %v", err, v3rpc.ErrCompacted)
}
}
// TestV3WatchFromCurrentRevision tests Watch APIs from current revision.
func TestV3WatchFromCurrentRevision(t *testing.T) {
defer testutil.AfterTest(t)
tests := []struct {
putKeys []string
watchRequest *pb.WatchRequest
wresps []*pb.WatchResponse
}{
// watch the key, matching
{
[]string{"foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo")}}},
[]*pb.WatchResponse{
{
Header: &pb.ResponseHeader{Revision: 1},
Created: true,
},
{
Header: &pb.ResponseHeader{Revision: 2},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
},
},
},
},
// watch the key, non-matching
{
[]string{"foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("helloworld")}}},
[]*pb.WatchResponse{
{
Header: &pb.ResponseHeader{Revision: 1},
Created: true,
},
},
},
// watch the prefix, matching
{
[]string{"fooLong"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Prefix: []byte("foo")}}},
[]*pb.WatchResponse{
{
Header: &pb.ResponseHeader{Revision: 1},
Created: true,
},
{
Header: &pb.ResponseHeader{Revision: 2},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("fooLong"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
},
},
},
},
// watch the prefix, non-matching
{
[]string{"foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Prefix: []byte("helloworld")}}},
[]*pb.WatchResponse{
{
Header: &pb.ResponseHeader{Revision: 1},
Created: true,
},
},
},
// multiple puts, one watcher with matching key
{
[]string{"foo", "foo", "foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo")}}},
[]*pb.WatchResponse{
{
Header: &pb.ResponseHeader{Revision: 1},
Created: true,
},
{
Header: &pb.ResponseHeader{Revision: 2},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
},
},
{
Header: &pb.ResponseHeader{Revision: 3},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2},
},
},
},
{
Header: &pb.ResponseHeader{Revision: 4},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 3},
},
},
},
},
},
// multiple puts, one watcher with matching prefix
{
[]string{"foo", "foo", "foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Prefix: []byte("foo")}}},
[]*pb.WatchResponse{
{
Header: &pb.ResponseHeader{Revision: 1},
Created: true,
},
{
Header: &pb.ResponseHeader{Revision: 2},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
},
},
{
Header: &pb.ResponseHeader{Revision: 3},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2},
},
},
},
{
Header: &pb.ResponseHeader{Revision: 4},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 3},
},
},
},
},
},
}
for i, tt := range tests {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
wAPI := clus.RandClient().Watch
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wStream, err := wAPI.Watch(ctx)
if err != nil {
t.Fatalf("#%d: wAPI.Watch error: %v", i, err)
}
if err := wStream.Send(tt.watchRequest); err != nil {
t.Fatalf("#%d: wStream.Send error: %v", i, err)
}
go func() {
for _, k := range tt.putKeys {
kvc := clus.RandClient().KV
req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
if _, err := kvc.Put(context.TODO(), req); err != nil {
t.Fatalf("#%d: couldn't put key (%v)", i, err)
}
}
}()
var createdWatchId int64
for j, wresp := range tt.wresps {
resp, err := wStream.Recv()
if err != nil {
t.Errorf("#%d.%d: wStream.Recv error: %v", i, j, err)
}
if resp.Header == nil {
t.Fatalf("#%d.%d: unexpected nil resp.Header", i, j)
}
if resp.Header.Revision != wresp.Header.Revision {
t.Errorf("#%d.%d: resp.Header.Revision got = %d, want = %d", i, j, resp.Header.Revision, wresp.Header.Revision)
}
if wresp.Created != resp.Created {
t.Errorf("#%d.%d: resp.Created got = %v, want = %v", i, j, resp.Created, wresp.Created)
}
if resp.Created {
createdWatchId = resp.WatchId
}
if resp.WatchId != createdWatchId {
t.Errorf("#%d.%d: resp.WatchId got = %d, want = %d", i, j, resp.WatchId, createdWatchId)
}
if !reflect.DeepEqual(resp.Events, wresp.Events) {
t.Errorf("#%d.%d: resp.Events got = %+v, want = %+v", i, j, resp.Events, wresp.Events)
}
}
rok, nr := WaitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
// can't defer because tcp ports will be in use
clus.Terminate(t)
}
}
// TestV3WatchCancelSynced tests Watch APIs cancellation from synced map.
func TestV3WatchCancelSynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchCancel(t, 0)
}
// TestV3WatchCancelUnsynced tests Watch APIs cancellation from unsynced map.
func TestV3WatchCancelUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchCancel(t, 1)
}
func testV3WatchCancel(t *testing.T, startRev int64) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wStream, errW := clus.RandClient().Watch.Watch(ctx)
if errW != nil {
t.Fatalf("wAPI.Watch error: %v", errW)
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"), StartRevision: startRev}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
wresp, errR := wStream.Recv()
if errR != nil {
t.Errorf("wStream.Recv error: %v", errR)
}
if !wresp.Created {
t.Errorf("wresp.Created got = %v, want = true", wresp.Created)
}
creq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CancelRequest{
CancelRequest: &pb.WatchCancelRequest{
WatchId: wresp.WatchId}}}
if err := wStream.Send(creq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
cresp, err := wStream.Recv()
if err != nil {
t.Errorf("wStream.Recv error: %v", err)
}
if !cresp.Canceled {
t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled)
}
kvc := clus.RandClient().KV
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
t.Errorf("couldn't put key (%v)", err)
}
// watch got canceled, so this should block
rok, nr := WaitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
clus.Terminate(t)
}
func TestV3WatchMultipleWatchersSynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleWatchers(t, 0)
}
func TestV3WatchMultipleWatchersUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleWatchers(t, 1)
}
// testV3WatchMultipleWatchers tests multiple watchers on the same key
// and one watcher with matching prefix. It first puts the key
// that matches all watchers, and another key that matches only
// one watcher to test if it receives expected events.
func testV3WatchMultipleWatchers(t *testing.T, startRev int64) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
kvc := clus.RandClient().KV
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wStream, errW := clus.RandClient().Watch.Watch(ctx)
if errW != nil {
t.Fatalf("wAPI.Watch error: %v", errW)
}
watchKeyN := 4
for i := 0; i < watchKeyN+1; i++ {
var wreq *pb.WatchRequest
if i < watchKeyN {
wreq = &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"), StartRevision: startRev}}}
} else {
wreq = &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Prefix: []byte("fo"), StartRevision: startRev}}}
}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
}
ids := make(map[int64]struct{})
for i := 0; i < watchKeyN+1; i++ {
wresp, err := wStream.Recv()
if err != nil {
t.Fatalf("wStream.Recv error: %v", err)
}
if !wresp.Created {
t.Fatalf("wresp.Created got = %v, want = true", wresp.Created)
}
ids[wresp.WatchId] = struct{}{}
}
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
for i := 0; i < watchKeyN+1; i++ {
wresp, err := wStream.Recv()
if err != nil {
t.Fatalf("wStream.Recv error: %v", err)
}
if _, ok := ids[wresp.WatchId]; !ok {
t.Errorf("watchId %d is not created!", wresp.WatchId)
} else {
delete(ids, wresp.WatchId)
}
if len(wresp.Events) == 0 {
t.Errorf("#%d: no events received", i)
}
for _, ev := range wresp.Events {
if string(ev.Kv.Key) != "foo" {
t.Errorf("ev.Kv.Key got = %s, want = foo", ev.Kv.Key)
}
if string(ev.Kv.Value) != "bar" {
t.Errorf("ev.Kv.Value got = %s, want = bar", ev.Kv.Value)
}
}
}
// now put one key that has only one matching watcher
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("fo"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
wresp, err := wStream.Recv()
if err != nil {
t.Errorf("wStream.Recv error: %v", err)
}
if len(wresp.Events) != 1 {
t.Fatalf("len(wresp.Events) got = %d, want = 1", len(wresp.Events))
}
if string(wresp.Events[0].Kv.Key) != "fo" {
t.Errorf("wresp.Events[0].Kv.Key got = %s, want = fo", wresp.Events[0].Kv.Key)
}
// now Recv should block because there is no more events coming
rok, nr := WaitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
clus.Terminate(t)
}
func TestV3WatchMultipleEventsTxnSynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleEventsTxn(t, 0)
}
func TestV3WatchMultipleEventsTxnUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleEventsTxn(t, 1)
}
// testV3WatchMultipleEventsTxn tests Watch APIs when it receives multiple events.
func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wStream, wErr := clus.RandClient().Watch.Watch(ctx)
if wErr != nil {
t.Fatalf("wAPI.Watch error: %v", wErr)
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Prefix: []byte("foo"), StartRevision: startRev}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
kvc := clus.RandClient().KV
txn := pb.TxnRequest{}
for i := 0; i < 3; i++ {
ru := &pb.RequestUnion{}
ru.Request = &pb.RequestUnion_RequestPut{
RequestPut: &pb.PutRequest{
Key: []byte(fmt.Sprintf("foo%d", i)), Value: []byte("bar")}}
txn.Success = append(txn.Success, ru)
}
tresp, err := kvc.Txn(context.Background(), &txn)
if err != nil {
t.Fatalf("kvc.Txn error: %v", err)
}
if !tresp.Succeeded {
t.Fatalf("kvc.Txn failed: %+v", tresp)
}
events := []*storagepb.Event{}
for len(events) < 3 {
resp, err := wStream.Recv()
if err != nil {
t.Errorf("wStream.Recv error: %v", err)
}
if resp.Created {
continue
}
events = append(events, resp.Events...)
}
sort.Sort(eventsSortByKey(events))
wevents := []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo2"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
}
if !reflect.DeepEqual(events, wevents) {
t.Errorf("events got = %+v, want = %+v", events, wevents)
}
rok, nr := WaitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
// can't defer because tcp ports will be in use
clus.Terminate(t)
}
type eventsSortByKey []*storagepb.Event
func (evs eventsSortByKey) Len() int { return len(evs) }
func (evs eventsSortByKey) Swap(i, j int) { evs[i], evs[j] = evs[j], evs[i] }
func (evs eventsSortByKey) Less(i, j int) bool { return bytes.Compare(evs[i].Kv.Key, evs[j].Kv.Key) < 0 }
func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
kvc := clus.RandClient().KV
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo1"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wStream, wErr := clus.RandClient().Watch.Watch(ctx)
if wErr != nil {
t.Fatalf("wAPI.Watch error: %v", wErr)
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Prefix: []byte("foo"), StartRevision: 1}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo1"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
allWevents := []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 3, ModRevision: 3, Version: 1},
},
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 2},
},
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 3, ModRevision: 5, Version: 2},
},
}
events := []*storagepb.Event{}
for len(events) < 4 {
resp, err := wStream.Recv()
if err != nil {
t.Errorf("wStream.Recv error: %v", err)
}
if resp.Created {
continue
}
events = append(events, resp.Events...)
// if PUT requests are committed by now, first receive would return
// multiple events, but if not, it returns a single event. In SSD,
// it should return 4 events at once.
}
if !reflect.DeepEqual(events, allWevents) {
t.Errorf("events got = %+v, want = %+v", events, allWevents)
}
rok, nr := WaitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
}
func TestV3WatchMultipleStreamsSynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleStreams(t, 0)
}
func TestV3WatchMultipleStreamsUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleStreams(t, 1)
}
// testV3WatchMultipleStreams tests multiple watchers on the same key on multiple streams.
func testV3WatchMultipleStreams(t *testing.T, startRev int64) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
wAPI := clus.RandClient().Watch
kvc := clus.RandClient().KV
streams := make([]pb.Watch_WatchClient, 5)
for i := range streams {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wStream, errW := wAPI.Watch(ctx)
if errW != nil {
t.Fatalf("wAPI.Watch error: %v", errW)
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"), StartRevision: startRev}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
streams[i] = wStream
}
for _, wStream := range streams {
wresp, err := wStream.Recv()
if err != nil {
t.Fatalf("wStream.Recv error: %v", err)
}
if !wresp.Created {
t.Fatalf("wresp.Created got = %v, want = true", wresp.Created)
}
}
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
var wg sync.WaitGroup
wg.Add(len(streams))
wevents := []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
}
for i := range streams {
go func(i int) {
defer wg.Done()
wStream := streams[i]
wresp, err := wStream.Recv()
if err != nil {
t.Fatalf("wStream.Recv error: %v", err)
}
if wresp.WatchId != 0 {
t.Errorf("watchId got = %d, want = 0", wresp.WatchId)
}
if !reflect.DeepEqual(wresp.Events, wevents) {
t.Errorf("wresp.Events got = %+v, want = %+v", wresp.Events, wevents)
}
// now Recv should block because there is no more events coming
rok, nr := WaitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
}(i)
}
wg.Wait()
clus.Terminate(t)
}
// WaitResponse waits on the given stream for given duration.
// If there is no more events, true and a nil response will be
// returned closing the WatchClient stream. Or the response will
// be returned.
func WaitResponse(wc pb.Watch_WatchClient, timeout time.Duration) (bool, *pb.WatchResponse) {
rCh := make(chan *pb.WatchResponse)
go func() {
resp, _ := wc.Recv()
rCh <- resp
}()
select {
case nr := <-rCh:
return false, nr
case <-time.After(timeout):
}
wc.CloseSend()
rv, ok := <-rCh
if rv != nil || !ok {
return false, rv
}
return true, nil
}
func TestV3RangeRequest(t *testing.T) {
defer testutil.AfterTest(t)
tests := []struct {
putKeys []string
reqs []pb.RangeRequest
wresps [][]string
wmores []bool
}{
// single key
{
[]string{"foo", "bar"},
[]pb.RangeRequest{
// exists
{Key: []byte("foo")},
// doesn't exist
{Key: []byte("baz")},
},
[][]string{
{"foo"},
{},
},
[]bool{false, false},
},
// multi-key
{
[]string{"a", "b", "c", "d", "e"},
[]pb.RangeRequest{
// all in range
{Key: []byte("a"), RangeEnd: []byte("z")},
// [b, d)
{Key: []byte("b"), RangeEnd: []byte("d")},
// out of range
{Key: []byte("f"), RangeEnd: []byte("z")},
// [c,c) = empty
{Key: []byte("c"), RangeEnd: []byte("c")},
// [d, b) = empty
{Key: []byte("d"), RangeEnd: []byte("b")},
},
[][]string{
{"a", "b", "c", "d", "e"},
{"b", "c"},
{},
{},
{},
},
[]bool{false, false, false, false, false},
},
// revision
{
[]string{"a", "b", "c", "d", "e"},
[]pb.RangeRequest{
{Key: []byte("a"), RangeEnd: []byte("z"), Revision: 0},
{Key: []byte("a"), RangeEnd: []byte("z"), Revision: 1},
{Key: []byte("a"), RangeEnd: []byte("z"), Revision: 2},
{Key: []byte("a"), RangeEnd: []byte("z"), Revision: 3},
},
[][]string{
{"a", "b", "c", "d", "e"},
{},
{"a"},
{"a", "b"},
},
[]bool{false, false, false, false},
},
// limit
{
[]string{"foo", "bar"},
[]pb.RangeRequest{
// more
{Key: []byte("a"), RangeEnd: []byte("z"), Limit: 1},
// no more
{Key: []byte("a"), RangeEnd: []byte("z"), Limit: 2},
},
[][]string{
{"bar"},
{"bar", "foo"},
},
[]bool{true, false},
},
// sort
{
[]string{"b", "a", "c", "d", "c"},
[]pb.RangeRequest{
{
Key: []byte("a"), RangeEnd: []byte("z"),
Limit: 1,
SortOrder: pb.RangeRequest_ASCEND,
SortTarget: pb.RangeRequest_KEY,
},
{
Key: []byte("a"), RangeEnd: []byte("z"),
Limit: 1,
SortOrder: pb.RangeRequest_DESCEND,
SortTarget: pb.RangeRequest_KEY,
},
{
Key: []byte("a"), RangeEnd: []byte("z"),
Limit: 1,
SortOrder: pb.RangeRequest_ASCEND,
SortTarget: pb.RangeRequest_CREATE,
},
{
Key: []byte("a"), RangeEnd: []byte("z"),
Limit: 1,
SortOrder: pb.RangeRequest_DESCEND,
SortTarget: pb.RangeRequest_MOD,
},
{
Key: []byte("z"), RangeEnd: []byte("z"),
Limit: 1,
SortOrder: pb.RangeRequest_DESCEND,
SortTarget: pb.RangeRequest_CREATE,
},
},
[][]string{
{"a"},
{"d"},
{"b"},
{"c"},
{},
},
[]bool{true, true, true, true, false},
},
}
for i, tt := range tests {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
for _, k := range tt.putKeys {
kvc := clus.RandClient().KV
req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
if _, err := kvc.Put(context.TODO(), req); err != nil {
t.Fatalf("#%d: couldn't put key (%v)", i, err)
}
}
for j, req := range tt.reqs {
kvc := clus.RandClient().KV
resp, err := kvc.Range(context.TODO(), &req)
if err != nil {
t.Errorf("#%d.%d: Range error: %v", i, j, err)
continue
}
if len(resp.Kvs) != len(tt.wresps[j]) {
t.Errorf("#%d.%d: bad len(resp.Kvs). got = %d, want = %d, ", i, j, len(resp.Kvs), len(tt.wresps[j]))
continue
}
for k, wKey := range tt.wresps[j] {
respKey := string(resp.Kvs[k].Key)
if respKey != wKey {
t.Errorf("#%d.%d: key[%d]. got = %v, want = %v, ", i, j, k, respKey, wKey)
}
}
if resp.More != tt.wmores[j] {
t.Errorf("#%d.%d: bad more. got = %v, want = %v, ", i, j, resp.More, tt.wmores[j])
}
wrev := req.Revision
if wrev == 0 {
wrev = int64(len(tt.putKeys) + 1)
}
if resp.Header.Revision != wrev {
t.Errorf("#%d.%d: bad header revision. got = %d. want = %d", i, j, resp.Header.Revision, wrev)
}
}
clus.Terminate(t)
}
}
// TestV3LeaseRevoke ensures a key is deleted once its lease is revoked.
func TestV3LeaseRevoke(t *testing.T) {
defer testutil.AfterTest(t)
testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error {
lc := clus.RandClient().Lease
_, err := lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseID})
return err
})
}
// TestV3LeaseCreateById ensures leases may be created by a given id.
func TestV3LeaseCreateByID(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
// create fixed lease
lresp, err := clus.RandClient().Lease.LeaseCreate(
context.TODO(),
&pb.LeaseCreateRequest{ID: 1, TTL: 1})
if err != nil {
t.Errorf("could not create lease 1 (%v)", err)
}
if lresp.ID != 1 {
t.Errorf("got id %v, wanted id %v", lresp.ID)
}
// create duplicate fixed lease
lresp, err = clus.RandClient().Lease.LeaseCreate(
context.TODO(),
&pb.LeaseCreateRequest{ID: 1, TTL: 1})
if err != nil {
t.Error(err)
}
if lresp.ID != 0 || lresp.Error != lease.ErrLeaseExists.Error() {
t.Errorf("got id %v, wanted id 0 (%v)", lresp.ID, lresp.Error)
}
// create fresh fixed lease
lresp, err = clus.RandClient().Lease.LeaseCreate(
context.TODO(),
&pb.LeaseCreateRequest{ID: 2, TTL: 1})
if err != nil {
t.Errorf("could not create lease 2 (%v)", err)
}
if lresp.ID != 2 {
t.Errorf("got id %v, wanted id %v", lresp.ID)
}
}
// TestV3LeaseExpire ensures a key is deleted once a key expires.
func TestV3LeaseExpire(t *testing.T) {
defer testutil.AfterTest(t)
testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error {
// let lease lapse; wait for deleted key
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wStream, err := clus.RandClient().Watch.Watch(ctx)
if err != nil {
return err
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"), StartRevision: 1}}}
if err := wStream.Send(wreq); err != nil {
return err
}
if _, err := wStream.Recv(); err != nil {
// the 'created' message
return err
}
if _, err := wStream.Recv(); err != nil {
// the 'put' message
return err
}
errc := make(chan error, 1)
go func() {
resp, err := wStream.Recv()
switch {
case err != nil:
errc <- err
case len(resp.Events) != 1:
fallthrough
case resp.Events[0].Type != storagepb.DELETE:
errc <- fmt.Errorf("expected key delete, got %v", resp)
default:
errc <- nil
}
}()
select {
case <-time.After(15 * time.Second):
return fmt.Errorf("lease expiration too slow")
case err := <-errc:
return err
}
})
}
// TestV3LeaseKeepAlive ensures keepalive keeps the lease alive.
func TestV3LeaseKeepAlive(t *testing.T) {
defer testutil.AfterTest(t)
testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error {
lc := clus.RandClient().Lease
lreq := &pb.LeaseKeepAliveRequest{ID: leaseID}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
lac, err := lc.LeaseKeepAlive(ctx)
if err != nil {
return err
}
defer lac.CloseSend()
// renew long enough so lease would've expired otherwise
for i := 0; i < 3; i++ {
if err = lac.Send(lreq); err != nil {
return err
}
lresp, rxerr := lac.Recv()
if rxerr != nil {
return rxerr
}
if lresp.ID != leaseID {
return fmt.Errorf("expected lease ID %v, got %v", leaseID, lresp.ID)
}
time.Sleep(time.Duration(lresp.TTL/2) * time.Second)
}
_, err = lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseID})
return err
})
}
// TestV3LeaseExists creates a lease on a random client, then sends a keepalive on another
// client to confirm it's visible to the whole cluster.
func TestV3LeaseExists(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
// create lease
ctx0, cancel0 := context.WithCancel(context.Background())
defer cancel0()
lresp, err := clus.RandClient().Lease.LeaseCreate(
ctx0,
&pb.LeaseCreateRequest{TTL: 30})
if err != nil {
t.Fatal(err)
}
if lresp.Error != "" {
t.Fatal(lresp.Error)
}
// confirm keepalive
ctx1, cancel1 := context.WithCancel(context.Background())
defer cancel1()
lac, err := clus.RandClient().Lease.LeaseKeepAlive(ctx1)
if err != nil {
t.Fatal(err)
}
defer lac.CloseSend()
if err = lac.Send(&pb.LeaseKeepAliveRequest{ID: lresp.ID}); err != nil {
t.Fatal(err)
}
if _, err = lac.Recv(); err != nil {
t.Fatal(err)
}
}
// acquireLeaseAndKey creates a new lease and creates an attached key.
func acquireLeaseAndKey(clus *ClusterV3, key string) (int64, error) {
// create lease
lresp, err := clus.RandClient().Lease.LeaseCreate(
context.TODO(),
&pb.LeaseCreateRequest{TTL: 1})
if err != nil {
return 0, err
}
if lresp.Error != "" {
return 0, fmt.Errorf(lresp.Error)
}
// attach to key
put := &pb.PutRequest{Key: []byte(key), Lease: lresp.ID}
if _, err := clus.RandClient().KV.Put(context.TODO(), put); err != nil {
return 0, err
}
return lresp.ID, nil
}
// testLeaseRemoveLeasedKey performs some action while holding a lease with an
// attached key "foo", then confirms the key is gone.
func testLeaseRemoveLeasedKey(t *testing.T, act func(*ClusterV3, int64) error) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
leaseID, err := acquireLeaseAndKey(clus, "foo")
if err != nil {
t.Fatal(err)
}
if err = act(clus, leaseID); err != nil {
t.Fatal(err)
}
// confirm no key
rreq := &pb.RangeRequest{Key: []byte("foo")}
rresp, err := clus.RandClient().KV.Range(context.TODO(), rreq)
if err != nil {
t.Fatal(err)
}
if len(rresp.Kvs) != 0 {
t.Fatalf("lease removed but key remains")
}
}
integration: add want IDs to Errorf calls
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.package recipe
package integration
import (
"bytes"
"fmt"
"reflect"
"sort"
"sync"
"testing"
"time"
"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
"github.com/coreos/etcd/etcdserver/api/v3rpc"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/lease"
"github.com/coreos/etcd/pkg/testutil"
"github.com/coreos/etcd/storage/storagepb"
)
// TestV3PutOverwrite puts a key with the v3 api to a random cluster member,
// overwrites it, then checks that the change was applied.
func TestV3PutOverwrite(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
kvc := clus.RandClient().KV
key := []byte("foo")
reqput := &pb.PutRequest{Key: key, Value: []byte("bar")}
respput, err := kvc.Put(context.TODO(), reqput)
if err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
// overwrite
reqput.Value = []byte("baz")
respput2, err := kvc.Put(context.TODO(), reqput)
if err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
if respput2.Header.Revision <= respput.Header.Revision {
t.Fatalf("expected newer revision on overwrite, got %v <= %v",
respput2.Header.Revision, respput.Header.Revision)
}
reqrange := &pb.RangeRequest{Key: key}
resprange, err := kvc.Range(context.TODO(), reqrange)
if err != nil {
t.Fatalf("couldn't get key (%v)", err)
}
if len(resprange.Kvs) != 1 {
t.Fatalf("expected 1 key, got %v", len(resprange.Kvs))
}
kv := resprange.Kvs[0]
if kv.ModRevision <= kv.CreateRevision {
t.Errorf("expected modRev > createRev, got %d <= %d",
kv.ModRevision, kv.CreateRevision)
}
if !reflect.DeepEqual(reqput.Value, kv.Value) {
t.Errorf("expected value %v, got %v", reqput.Value, kv.Value)
}
}
func TestV3TxnTooManyOps(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
kvc := clus.RandClient().KV
addCompareOps := func(txn *pb.TxnRequest) {
txn.Compare = append(txn.Compare,
&pb.Compare{
Result: pb.Compare_GREATER,
Target: pb.Compare_CREATE,
Key: []byte("bar"),
})
}
addSuccessOps := func(txn *pb.TxnRequest) {
txn.Success = append(txn.Success,
&pb.RequestUnion{
Request: &pb.RequestUnion_RequestPut{
RequestPut: &pb.PutRequest{
Key: []byte("bar"),
Value: []byte("bar"),
},
},
})
}
addFailureOps := func(txn *pb.TxnRequest) {
txn.Failure = append(txn.Failure,
&pb.RequestUnion{
Request: &pb.RequestUnion_RequestPut{
RequestPut: &pb.PutRequest{
Key: []byte("bar"),
Value: []byte("bar"),
},
},
})
}
tests := []func(txn *pb.TxnRequest){
addCompareOps,
addSuccessOps,
addFailureOps,
}
for i, tt := range tests {
txn := &pb.TxnRequest{}
for j := 0; j < v3rpc.MaxOpsPerTxn+1; j++ {
tt(txn)
}
_, err := kvc.Txn(context.Background(), txn)
if err != v3rpc.ErrTooManyOps {
t.Errorf("#%d: err = %v, want %v", i, err, v3rpc.ErrTooManyOps)
}
}
}
// TestV3PutMissingLease ensures that a Put on a key with a bogus lease fails.
func TestV3PutMissingLease(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
kvc := clus.RandClient().KV
key := []byte("foo")
preq := &pb.PutRequest{Key: key, Lease: 123456}
tests := []func(){
// put case
func() {
if presp, err := kvc.Put(context.TODO(), preq); err == nil {
t.Errorf("succeeded put key. req: %v. resp: %v", preq, presp)
}
},
// txn success case
func() {
txn := &pb.TxnRequest{}
txn.Success = append(txn.Success, &pb.RequestUnion{
Request: &pb.RequestUnion_RequestPut{
RequestPut: preq}})
if tresp, err := kvc.Txn(context.TODO(), txn); err == nil {
t.Errorf("succeeded txn success. req: %v. resp: %v", txn, tresp)
}
},
// txn failure case
func() {
txn := &pb.TxnRequest{}
txn.Failure = append(txn.Failure, &pb.RequestUnion{
Request: &pb.RequestUnion_RequestPut{
RequestPut: preq}})
cmp := &pb.Compare{
Result: pb.Compare_GREATER,
Target: pb.Compare_CREATE,
Key: []byte("bar"),
}
txn.Compare = append(txn.Compare, cmp)
if tresp, err := kvc.Txn(context.TODO(), txn); err == nil {
t.Errorf("succeeded txn failure. req: %v. resp: %v", txn, tresp)
}
},
// ignore bad lease in failure on success txn
func() {
txn := &pb.TxnRequest{}
rreq := &pb.RangeRequest{Key: []byte("bar")}
txn.Success = append(txn.Success, &pb.RequestUnion{
Request: &pb.RequestUnion_RequestRange{
RequestRange: rreq}})
txn.Failure = append(txn.Failure, &pb.RequestUnion{
Request: &pb.RequestUnion_RequestPut{
RequestPut: preq}})
if tresp, err := kvc.Txn(context.TODO(), txn); err != nil {
t.Errorf("failed good txn. req: %v. resp: %v", txn, tresp)
}
},
}
for i, f := range tests {
f()
// key shouldn't have been stored
rreq := &pb.RangeRequest{Key: key}
rresp, err := kvc.Range(context.TODO(), rreq)
if err != nil {
t.Errorf("#%d. could not rangereq (%v)", i, err)
} else if len(rresp.Kvs) != 0 {
t.Errorf("#%d. expected no keys, got %v", i, rresp)
}
}
}
// TestV3DeleteRange tests various edge cases in the DeleteRange API.
func TestV3DeleteRange(t *testing.T) {
defer testutil.AfterTest(t)
tests := []struct {
keySet []string
begin string
end string
wantSet [][]byte
}{
// delete middle
{
[]string{"foo", "foo/abc", "fop"},
"foo/", "fop",
[][]byte{[]byte("foo"), []byte("fop")},
},
// no delete
{
[]string{"foo", "foo/abc", "fop"},
"foo/", "foo/",
[][]byte{[]byte("foo"), []byte("foo/abc"), []byte("fop")},
},
// delete first
{
[]string{"foo", "foo/abc", "fop"},
"fo", "fop",
[][]byte{[]byte("fop")},
},
// delete tail
{
[]string{"foo", "foo/abc", "fop"},
"foo/", "fos",
[][]byte{[]byte("foo")},
},
// delete exact
{
[]string{"foo", "foo/abc", "fop"},
"foo/abc", "",
[][]byte{[]byte("foo"), []byte("fop")},
},
// delete none, [x,x)
{
[]string{"foo"},
"foo", "foo",
[][]byte{[]byte("foo")},
},
}
for i, tt := range tests {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
kvc := clus.RandClient().KV
ks := tt.keySet
for j := range ks {
reqput := &pb.PutRequest{Key: []byte(ks[j]), Value: []byte{}}
_, err := kvc.Put(context.TODO(), reqput)
if err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
}
dreq := &pb.DeleteRangeRequest{
Key: []byte(tt.begin),
RangeEnd: []byte(tt.end)}
dresp, err := kvc.DeleteRange(context.TODO(), dreq)
if err != nil {
t.Fatalf("couldn't delete range on test %d (%v)", i, err)
}
rreq := &pb.RangeRequest{Key: []byte{0x0}, RangeEnd: []byte{0xff}}
rresp, err := kvc.Range(context.TODO(), rreq)
if err != nil {
t.Errorf("couldn't get range on test %v (%v)", i, err)
}
if dresp.Header.Revision != rresp.Header.Revision {
t.Errorf("expected revision %v, got %v",
dresp.Header.Revision, rresp.Header.Revision)
}
keys := [][]byte{}
for j := range rresp.Kvs {
keys = append(keys, rresp.Kvs[j].Key)
}
if reflect.DeepEqual(tt.wantSet, keys) == false {
t.Errorf("expected %v on test %v, got %v", tt.wantSet, i, keys)
}
// can't defer because tcp ports will be in use
clus.Terminate(t)
}
}
// TestV3TxnInvaildRange tests txn
func TestV3TxnInvaildRange(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
kvc := clus.RandClient().KV
preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
for i := 0; i < 3; i++ {
_, err := kvc.Put(context.Background(), preq)
if err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
}
_, err := kvc.Compact(context.Background(), &pb.CompactionRequest{Revision: 2})
if err != nil {
t.Fatalf("couldn't compact kv space (%v)", err)
}
// future rev
txn := &pb.TxnRequest{}
txn.Success = append(txn.Success, &pb.RequestUnion{
Request: &pb.RequestUnion_RequestPut{
RequestPut: preq}})
rreq := &pb.RangeRequest{Key: []byte("foo"), Revision: 100}
txn.Success = append(txn.Success, &pb.RequestUnion{
Request: &pb.RequestUnion_RequestRange{
RequestRange: rreq}})
if _, err := kvc.Txn(context.TODO(), txn); err != v3rpc.ErrFutureRev {
t.Errorf("err = %v, want %v", err, v3rpc.ErrFutureRev)
}
// compacted rev
tv, _ := txn.Success[1].Request.(*pb.RequestUnion_RequestRange)
tv.RequestRange.Revision = 1
if _, err := kvc.Txn(context.TODO(), txn); err != v3rpc.ErrCompacted {
t.Errorf("err = %v, want %v", err, v3rpc.ErrCompacted)
}
}
// TestV3WatchFromCurrentRevision tests Watch APIs from current revision.
func TestV3WatchFromCurrentRevision(t *testing.T) {
defer testutil.AfterTest(t)
tests := []struct {
putKeys []string
watchRequest *pb.WatchRequest
wresps []*pb.WatchResponse
}{
// watch the key, matching
{
[]string{"foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo")}}},
[]*pb.WatchResponse{
{
Header: &pb.ResponseHeader{Revision: 1},
Created: true,
},
{
Header: &pb.ResponseHeader{Revision: 2},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
},
},
},
},
// watch the key, non-matching
{
[]string{"foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("helloworld")}}},
[]*pb.WatchResponse{
{
Header: &pb.ResponseHeader{Revision: 1},
Created: true,
},
},
},
// watch the prefix, matching
{
[]string{"fooLong"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Prefix: []byte("foo")}}},
[]*pb.WatchResponse{
{
Header: &pb.ResponseHeader{Revision: 1},
Created: true,
},
{
Header: &pb.ResponseHeader{Revision: 2},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("fooLong"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
},
},
},
},
// watch the prefix, non-matching
{
[]string{"foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Prefix: []byte("helloworld")}}},
[]*pb.WatchResponse{
{
Header: &pb.ResponseHeader{Revision: 1},
Created: true,
},
},
},
// multiple puts, one watcher with matching key
{
[]string{"foo", "foo", "foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo")}}},
[]*pb.WatchResponse{
{
Header: &pb.ResponseHeader{Revision: 1},
Created: true,
},
{
Header: &pb.ResponseHeader{Revision: 2},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
},
},
{
Header: &pb.ResponseHeader{Revision: 3},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2},
},
},
},
{
Header: &pb.ResponseHeader{Revision: 4},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 3},
},
},
},
},
},
// multiple puts, one watcher with matching prefix
{
[]string{"foo", "foo", "foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Prefix: []byte("foo")}}},
[]*pb.WatchResponse{
{
Header: &pb.ResponseHeader{Revision: 1},
Created: true,
},
{
Header: &pb.ResponseHeader{Revision: 2},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
},
},
{
Header: &pb.ResponseHeader{Revision: 3},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2},
},
},
},
{
Header: &pb.ResponseHeader{Revision: 4},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 3},
},
},
},
},
},
}
for i, tt := range tests {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
wAPI := clus.RandClient().Watch
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wStream, err := wAPI.Watch(ctx)
if err != nil {
t.Fatalf("#%d: wAPI.Watch error: %v", i, err)
}
if err := wStream.Send(tt.watchRequest); err != nil {
t.Fatalf("#%d: wStream.Send error: %v", i, err)
}
go func() {
for _, k := range tt.putKeys {
kvc := clus.RandClient().KV
req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
if _, err := kvc.Put(context.TODO(), req); err != nil {
t.Fatalf("#%d: couldn't put key (%v)", i, err)
}
}
}()
var createdWatchId int64
for j, wresp := range tt.wresps {
resp, err := wStream.Recv()
if err != nil {
t.Errorf("#%d.%d: wStream.Recv error: %v", i, j, err)
}
if resp.Header == nil {
t.Fatalf("#%d.%d: unexpected nil resp.Header", i, j)
}
if resp.Header.Revision != wresp.Header.Revision {
t.Errorf("#%d.%d: resp.Header.Revision got = %d, want = %d", i, j, resp.Header.Revision, wresp.Header.Revision)
}
if wresp.Created != resp.Created {
t.Errorf("#%d.%d: resp.Created got = %v, want = %v", i, j, resp.Created, wresp.Created)
}
if resp.Created {
createdWatchId = resp.WatchId
}
if resp.WatchId != createdWatchId {
t.Errorf("#%d.%d: resp.WatchId got = %d, want = %d", i, j, resp.WatchId, createdWatchId)
}
if !reflect.DeepEqual(resp.Events, wresp.Events) {
t.Errorf("#%d.%d: resp.Events got = %+v, want = %+v", i, j, resp.Events, wresp.Events)
}
}
rok, nr := WaitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
// can't defer because tcp ports will be in use
clus.Terminate(t)
}
}
// TestV3WatchCancelSynced tests Watch APIs cancellation from synced map.
func TestV3WatchCancelSynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchCancel(t, 0)
}
// TestV3WatchCancelUnsynced tests Watch APIs cancellation from unsynced map.
func TestV3WatchCancelUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchCancel(t, 1)
}
func testV3WatchCancel(t *testing.T, startRev int64) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wStream, errW := clus.RandClient().Watch.Watch(ctx)
if errW != nil {
t.Fatalf("wAPI.Watch error: %v", errW)
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"), StartRevision: startRev}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
wresp, errR := wStream.Recv()
if errR != nil {
t.Errorf("wStream.Recv error: %v", errR)
}
if !wresp.Created {
t.Errorf("wresp.Created got = %v, want = true", wresp.Created)
}
creq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CancelRequest{
CancelRequest: &pb.WatchCancelRequest{
WatchId: wresp.WatchId}}}
if err := wStream.Send(creq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
cresp, err := wStream.Recv()
if err != nil {
t.Errorf("wStream.Recv error: %v", err)
}
if !cresp.Canceled {
t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled)
}
kvc := clus.RandClient().KV
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
t.Errorf("couldn't put key (%v)", err)
}
// watch got canceled, so this should block
rok, nr := WaitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
clus.Terminate(t)
}
func TestV3WatchMultipleWatchersSynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleWatchers(t, 0)
}
func TestV3WatchMultipleWatchersUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleWatchers(t, 1)
}
// testV3WatchMultipleWatchers tests multiple watchers on the same key
// and one watcher with matching prefix. It first puts the key
// that matches all watchers, and another key that matches only
// one watcher to test if it receives expected events.
func testV3WatchMultipleWatchers(t *testing.T, startRev int64) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
kvc := clus.RandClient().KV
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wStream, errW := clus.RandClient().Watch.Watch(ctx)
if errW != nil {
t.Fatalf("wAPI.Watch error: %v", errW)
}
watchKeyN := 4
for i := 0; i < watchKeyN+1; i++ {
var wreq *pb.WatchRequest
if i < watchKeyN {
wreq = &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"), StartRevision: startRev}}}
} else {
wreq = &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Prefix: []byte("fo"), StartRevision: startRev}}}
}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
}
ids := make(map[int64]struct{})
for i := 0; i < watchKeyN+1; i++ {
wresp, err := wStream.Recv()
if err != nil {
t.Fatalf("wStream.Recv error: %v", err)
}
if !wresp.Created {
t.Fatalf("wresp.Created got = %v, want = true", wresp.Created)
}
ids[wresp.WatchId] = struct{}{}
}
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
for i := 0; i < watchKeyN+1; i++ {
wresp, err := wStream.Recv()
if err != nil {
t.Fatalf("wStream.Recv error: %v", err)
}
if _, ok := ids[wresp.WatchId]; !ok {
t.Errorf("watchId %d is not created!", wresp.WatchId)
} else {
delete(ids, wresp.WatchId)
}
if len(wresp.Events) == 0 {
t.Errorf("#%d: no events received", i)
}
for _, ev := range wresp.Events {
if string(ev.Kv.Key) != "foo" {
t.Errorf("ev.Kv.Key got = %s, want = foo", ev.Kv.Key)
}
if string(ev.Kv.Value) != "bar" {
t.Errorf("ev.Kv.Value got = %s, want = bar", ev.Kv.Value)
}
}
}
// now put one key that has only one matching watcher
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("fo"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
wresp, err := wStream.Recv()
if err != nil {
t.Errorf("wStream.Recv error: %v", err)
}
if len(wresp.Events) != 1 {
t.Fatalf("len(wresp.Events) got = %d, want = 1", len(wresp.Events))
}
if string(wresp.Events[0].Kv.Key) != "fo" {
t.Errorf("wresp.Events[0].Kv.Key got = %s, want = fo", wresp.Events[0].Kv.Key)
}
// now Recv should block because there is no more events coming
rok, nr := WaitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
clus.Terminate(t)
}
func TestV3WatchMultipleEventsTxnSynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleEventsTxn(t, 0)
}
func TestV3WatchMultipleEventsTxnUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleEventsTxn(t, 1)
}
// testV3WatchMultipleEventsTxn tests Watch APIs when it receives multiple events.
func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wStream, wErr := clus.RandClient().Watch.Watch(ctx)
if wErr != nil {
t.Fatalf("wAPI.Watch error: %v", wErr)
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Prefix: []byte("foo"), StartRevision: startRev}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
kvc := clus.RandClient().KV
txn := pb.TxnRequest{}
for i := 0; i < 3; i++ {
ru := &pb.RequestUnion{}
ru.Request = &pb.RequestUnion_RequestPut{
RequestPut: &pb.PutRequest{
Key: []byte(fmt.Sprintf("foo%d", i)), Value: []byte("bar")}}
txn.Success = append(txn.Success, ru)
}
tresp, err := kvc.Txn(context.Background(), &txn)
if err != nil {
t.Fatalf("kvc.Txn error: %v", err)
}
if !tresp.Succeeded {
t.Fatalf("kvc.Txn failed: %+v", tresp)
}
events := []*storagepb.Event{}
for len(events) < 3 {
resp, err := wStream.Recv()
if err != nil {
t.Errorf("wStream.Recv error: %v", err)
}
if resp.Created {
continue
}
events = append(events, resp.Events...)
}
sort.Sort(eventsSortByKey(events))
wevents := []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo2"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
}
if !reflect.DeepEqual(events, wevents) {
t.Errorf("events got = %+v, want = %+v", events, wevents)
}
rok, nr := WaitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
// can't defer because tcp ports will be in use
clus.Terminate(t)
}
type eventsSortByKey []*storagepb.Event
func (evs eventsSortByKey) Len() int { return len(evs) }
func (evs eventsSortByKey) Swap(i, j int) { evs[i], evs[j] = evs[j], evs[i] }
func (evs eventsSortByKey) Less(i, j int) bool { return bytes.Compare(evs[i].Kv.Key, evs[j].Kv.Key) < 0 }
func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
kvc := clus.RandClient().KV
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo1"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wStream, wErr := clus.RandClient().Watch.Watch(ctx)
if wErr != nil {
t.Fatalf("wAPI.Watch error: %v", wErr)
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Prefix: []byte("foo"), StartRevision: 1}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo1"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
allWevents := []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 3, ModRevision: 3, Version: 1},
},
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 2},
},
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 3, ModRevision: 5, Version: 2},
},
}
events := []*storagepb.Event{}
for len(events) < 4 {
resp, err := wStream.Recv()
if err != nil {
t.Errorf("wStream.Recv error: %v", err)
}
if resp.Created {
continue
}
events = append(events, resp.Events...)
// if PUT requests are committed by now, first receive would return
// multiple events, but if not, it returns a single event. In SSD,
// it should return 4 events at once.
}
if !reflect.DeepEqual(events, allWevents) {
t.Errorf("events got = %+v, want = %+v", events, allWevents)
}
rok, nr := WaitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
}
func TestV3WatchMultipleStreamsSynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleStreams(t, 0)
}
func TestV3WatchMultipleStreamsUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleStreams(t, 1)
}
// testV3WatchMultipleStreams tests multiple watchers on the same key on multiple streams.
func testV3WatchMultipleStreams(t *testing.T, startRev int64) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
wAPI := clus.RandClient().Watch
kvc := clus.RandClient().KV
streams := make([]pb.Watch_WatchClient, 5)
for i := range streams {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wStream, errW := wAPI.Watch(ctx)
if errW != nil {
t.Fatalf("wAPI.Watch error: %v", errW)
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"), StartRevision: startRev}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
streams[i] = wStream
}
for _, wStream := range streams {
wresp, err := wStream.Recv()
if err != nil {
t.Fatalf("wStream.Recv error: %v", err)
}
if !wresp.Created {
t.Fatalf("wresp.Created got = %v, want = true", wresp.Created)
}
}
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
var wg sync.WaitGroup
wg.Add(len(streams))
wevents := []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
}
for i := range streams {
go func(i int) {
defer wg.Done()
wStream := streams[i]
wresp, err := wStream.Recv()
if err != nil {
t.Fatalf("wStream.Recv error: %v", err)
}
if wresp.WatchId != 0 {
t.Errorf("watchId got = %d, want = 0", wresp.WatchId)
}
if !reflect.DeepEqual(wresp.Events, wevents) {
t.Errorf("wresp.Events got = %+v, want = %+v", wresp.Events, wevents)
}
// now Recv should block because there is no more events coming
rok, nr := WaitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
}(i)
}
wg.Wait()
clus.Terminate(t)
}
// WaitResponse waits on the given stream for given duration.
// If there is no more events, true and a nil response will be
// returned closing the WatchClient stream. Or the response will
// be returned.
func WaitResponse(wc pb.Watch_WatchClient, timeout time.Duration) (bool, *pb.WatchResponse) {
rCh := make(chan *pb.WatchResponse)
go func() {
resp, _ := wc.Recv()
rCh <- resp
}()
select {
case nr := <-rCh:
return false, nr
case <-time.After(timeout):
}
wc.CloseSend()
rv, ok := <-rCh
if rv != nil || !ok {
return false, rv
}
return true, nil
}
func TestV3RangeRequest(t *testing.T) {
defer testutil.AfterTest(t)
tests := []struct {
putKeys []string
reqs []pb.RangeRequest
wresps [][]string
wmores []bool
}{
// single key
{
[]string{"foo", "bar"},
[]pb.RangeRequest{
// exists
{Key: []byte("foo")},
// doesn't exist
{Key: []byte("baz")},
},
[][]string{
{"foo"},
{},
},
[]bool{false, false},
},
// multi-key
{
[]string{"a", "b", "c", "d", "e"},
[]pb.RangeRequest{
// all in range
{Key: []byte("a"), RangeEnd: []byte("z")},
// [b, d)
{Key: []byte("b"), RangeEnd: []byte("d")},
// out of range
{Key: []byte("f"), RangeEnd: []byte("z")},
// [c,c) = empty
{Key: []byte("c"), RangeEnd: []byte("c")},
// [d, b) = empty
{Key: []byte("d"), RangeEnd: []byte("b")},
},
[][]string{
{"a", "b", "c", "d", "e"},
{"b", "c"},
{},
{},
{},
},
[]bool{false, false, false, false, false},
},
// revision
{
[]string{"a", "b", "c", "d", "e"},
[]pb.RangeRequest{
{Key: []byte("a"), RangeEnd: []byte("z"), Revision: 0},
{Key: []byte("a"), RangeEnd: []byte("z"), Revision: 1},
{Key: []byte("a"), RangeEnd: []byte("z"), Revision: 2},
{Key: []byte("a"), RangeEnd: []byte("z"), Revision: 3},
},
[][]string{
{"a", "b", "c", "d", "e"},
{},
{"a"},
{"a", "b"},
},
[]bool{false, false, false, false},
},
// limit
{
[]string{"foo", "bar"},
[]pb.RangeRequest{
// more
{Key: []byte("a"), RangeEnd: []byte("z"), Limit: 1},
// no more
{Key: []byte("a"), RangeEnd: []byte("z"), Limit: 2},
},
[][]string{
{"bar"},
{"bar", "foo"},
},
[]bool{true, false},
},
// sort
{
[]string{"b", "a", "c", "d", "c"},
[]pb.RangeRequest{
{
Key: []byte("a"), RangeEnd: []byte("z"),
Limit: 1,
SortOrder: pb.RangeRequest_ASCEND,
SortTarget: pb.RangeRequest_KEY,
},
{
Key: []byte("a"), RangeEnd: []byte("z"),
Limit: 1,
SortOrder: pb.RangeRequest_DESCEND,
SortTarget: pb.RangeRequest_KEY,
},
{
Key: []byte("a"), RangeEnd: []byte("z"),
Limit: 1,
SortOrder: pb.RangeRequest_ASCEND,
SortTarget: pb.RangeRequest_CREATE,
},
{
Key: []byte("a"), RangeEnd: []byte("z"),
Limit: 1,
SortOrder: pb.RangeRequest_DESCEND,
SortTarget: pb.RangeRequest_MOD,
},
{
Key: []byte("z"), RangeEnd: []byte("z"),
Limit: 1,
SortOrder: pb.RangeRequest_DESCEND,
SortTarget: pb.RangeRequest_CREATE,
},
},
[][]string{
{"a"},
{"d"},
{"b"},
{"c"},
{},
},
[]bool{true, true, true, true, false},
},
}
for i, tt := range tests {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
for _, k := range tt.putKeys {
kvc := clus.RandClient().KV
req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
if _, err := kvc.Put(context.TODO(), req); err != nil {
t.Fatalf("#%d: couldn't put key (%v)", i, err)
}
}
for j, req := range tt.reqs {
kvc := clus.RandClient().KV
resp, err := kvc.Range(context.TODO(), &req)
if err != nil {
t.Errorf("#%d.%d: Range error: %v", i, j, err)
continue
}
if len(resp.Kvs) != len(tt.wresps[j]) {
t.Errorf("#%d.%d: bad len(resp.Kvs). got = %d, want = %d, ", i, j, len(resp.Kvs), len(tt.wresps[j]))
continue
}
for k, wKey := range tt.wresps[j] {
respKey := string(resp.Kvs[k].Key)
if respKey != wKey {
t.Errorf("#%d.%d: key[%d]. got = %v, want = %v, ", i, j, k, respKey, wKey)
}
}
if resp.More != tt.wmores[j] {
t.Errorf("#%d.%d: bad more. got = %v, want = %v, ", i, j, resp.More, tt.wmores[j])
}
wrev := req.Revision
if wrev == 0 {
wrev = int64(len(tt.putKeys) + 1)
}
if resp.Header.Revision != wrev {
t.Errorf("#%d.%d: bad header revision. got = %d. want = %d", i, j, resp.Header.Revision, wrev)
}
}
clus.Terminate(t)
}
}
// TestV3LeaseRevoke ensures a key is deleted once its lease is revoked.
func TestV3LeaseRevoke(t *testing.T) {
defer testutil.AfterTest(t)
testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error {
lc := clus.RandClient().Lease
_, err := lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseID})
return err
})
}
// TestV3LeaseCreateById ensures leases may be created by a given id.
func TestV3LeaseCreateByID(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
// create fixed lease
lresp, err := clus.RandClient().Lease.LeaseCreate(
context.TODO(),
&pb.LeaseCreateRequest{ID: 1, TTL: 1})
if err != nil {
t.Errorf("could not create lease 1 (%v)", err)
}
if lresp.ID != 1 {
t.Errorf("got id %v, wanted id %v", lresp.ID, 1)
}
// create duplicate fixed lease
lresp, err = clus.RandClient().Lease.LeaseCreate(
context.TODO(),
&pb.LeaseCreateRequest{ID: 1, TTL: 1})
if err != nil {
t.Error(err)
}
if lresp.ID != 0 || lresp.Error != lease.ErrLeaseExists.Error() {
t.Errorf("got id %v, wanted id 0 (%v)", lresp.ID, lresp.Error)
}
// create fresh fixed lease
lresp, err = clus.RandClient().Lease.LeaseCreate(
context.TODO(),
&pb.LeaseCreateRequest{ID: 2, TTL: 1})
if err != nil {
t.Errorf("could not create lease 2 (%v)", err)
}
if lresp.ID != 2 {
t.Errorf("got id %v, wanted id %v", lresp.ID, 2)
}
}
// TestV3LeaseExpire ensures a key is deleted once a key expires.
func TestV3LeaseExpire(t *testing.T) {
defer testutil.AfterTest(t)
testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error {
// let lease lapse; wait for deleted key
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wStream, err := clus.RandClient().Watch.Watch(ctx)
if err != nil {
return err
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"), StartRevision: 1}}}
if err := wStream.Send(wreq); err != nil {
return err
}
if _, err := wStream.Recv(); err != nil {
// the 'created' message
return err
}
if _, err := wStream.Recv(); err != nil {
// the 'put' message
return err
}
errc := make(chan error, 1)
go func() {
resp, err := wStream.Recv()
switch {
case err != nil:
errc <- err
case len(resp.Events) != 1:
fallthrough
case resp.Events[0].Type != storagepb.DELETE:
errc <- fmt.Errorf("expected key delete, got %v", resp)
default:
errc <- nil
}
}()
select {
case <-time.After(15 * time.Second):
return fmt.Errorf("lease expiration too slow")
case err := <-errc:
return err
}
})
}
// TestV3LeaseKeepAlive ensures keepalive keeps the lease alive.
func TestV3LeaseKeepAlive(t *testing.T) {
defer testutil.AfterTest(t)
testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error {
lc := clus.RandClient().Lease
lreq := &pb.LeaseKeepAliveRequest{ID: leaseID}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
lac, err := lc.LeaseKeepAlive(ctx)
if err != nil {
return err
}
defer lac.CloseSend()
// renew long enough so lease would've expired otherwise
for i := 0; i < 3; i++ {
if err = lac.Send(lreq); err != nil {
return err
}
lresp, rxerr := lac.Recv()
if rxerr != nil {
return rxerr
}
if lresp.ID != leaseID {
return fmt.Errorf("expected lease ID %v, got %v", leaseID, lresp.ID)
}
time.Sleep(time.Duration(lresp.TTL/2) * time.Second)
}
_, err = lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseID})
return err
})
}
// TestV3LeaseExists creates a lease on a random client, then sends a keepalive on another
// client to confirm it's visible to the whole cluster.
func TestV3LeaseExists(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
// create lease
ctx0, cancel0 := context.WithCancel(context.Background())
defer cancel0()
lresp, err := clus.RandClient().Lease.LeaseCreate(
ctx0,
&pb.LeaseCreateRequest{TTL: 30})
if err != nil {
t.Fatal(err)
}
if lresp.Error != "" {
t.Fatal(lresp.Error)
}
// confirm keepalive
ctx1, cancel1 := context.WithCancel(context.Background())
defer cancel1()
lac, err := clus.RandClient().Lease.LeaseKeepAlive(ctx1)
if err != nil {
t.Fatal(err)
}
defer lac.CloseSend()
if err = lac.Send(&pb.LeaseKeepAliveRequest{ID: lresp.ID}); err != nil {
t.Fatal(err)
}
if _, err = lac.Recv(); err != nil {
t.Fatal(err)
}
}
// acquireLeaseAndKey creates a new lease and creates an attached key.
func acquireLeaseAndKey(clus *ClusterV3, key string) (int64, error) {
// create lease
lresp, err := clus.RandClient().Lease.LeaseCreate(
context.TODO(),
&pb.LeaseCreateRequest{TTL: 1})
if err != nil {
return 0, err
}
if lresp.Error != "" {
return 0, fmt.Errorf(lresp.Error)
}
// attach to key
put := &pb.PutRequest{Key: []byte(key), Lease: lresp.ID}
if _, err := clus.RandClient().KV.Put(context.TODO(), put); err != nil {
return 0, err
}
return lresp.ID, nil
}
// testLeaseRemoveLeasedKey performs some action while holding a lease with an
// attached key "foo", then confirms the key is gone.
func testLeaseRemoveLeasedKey(t *testing.T, act func(*ClusterV3, int64) error) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
leaseID, err := acquireLeaseAndKey(clus, "foo")
if err != nil {
t.Fatal(err)
}
if err = act(clus, leaseID); err != nil {
t.Fatal(err)
}
// confirm no key
rreq := &pb.RangeRequest{Key: []byte("foo")}
rresp, err := clus.RandClient().KV.Range(context.TODO(), rreq)
if err != nil {
t.Fatal(err)
}
if len(rresp.Kvs) != 0 {
t.Fatalf("lease removed but key remains")
}
}
|
package hashstructure
import (
"encoding/binary"
"fmt"
"hash"
"hash/crc64"
"hash/fnv"
"io"
"reflect"
"sort"
)
// HashOptions are options that are available for hashing.
type HashOptions struct {
// Hasher is the hash function to use. If this isn't set, it will
// default to CRC-64. CRC probably isn't the best hash function to use
// but it is in the Go standard library and there is a lot of support
// for hardware acceleration.
Hasher hash.Hash64
}
// Hash returns the hash value of an arbitrary value.
//
// If opts is nil, then default options will be used. See HashOptions
// for the default values.
//
// Notes on the value:
//
// * Unexported fields on structs are ignored and do not affect the
// hash value.
//
// * Adding an exported field to a struct with the zero value will change
// the hash value.
//
func Hash(v interface{}, opts *HashOptions) (uint64, error) {
// Create default options
if opts == nil {
opts = &HashOptions{}
}
if opts.Hasher == nil {
opts.Hasher = crc64.New(crc64.MakeTable(crc64.ECMA))
}
// Reset the hash
opts.Hasher.Reset()
// Create our walker and walk the structure
w := &walker{w: opts.Hasher}
if err := w.visit(reflect.ValueOf(v)); err != nil {
return 0, err
}
return opts.Hasher.Sum64(), nil
}
type walker struct {
w io.Writer
}
func (w *walker) visit(v reflect.Value) error {
// Loop since these can be wrapped in multiple layers of pointers
// and interfaces.
for {
// If we have an interface, dereference it. We have to do this up
// here because it might be a nil in there and the check below must
// catch that.
if v.Kind() == reflect.Interface {
v = v.Elem()
continue
}
if v.Kind() == reflect.Ptr {
v = reflect.Indirect(v)
continue
}
break
}
// If it is nil, treat it like a zero.
if !v.IsValid() {
var tmp int8
v = reflect.ValueOf(tmp)
}
// Binary writing can use raw ints, we have to convert to
// a sized-int, we'll choose the largest...
switch v.Kind() {
case reflect.Int:
v = reflect.ValueOf(int64(v.Int()))
case reflect.Uint:
v = reflect.ValueOf(uint64(v.Uint()))
case reflect.Bool:
var tmp int8
if v.Bool() {
tmp = 1
}
v = reflect.ValueOf(tmp)
}
k := v.Kind()
// We can shortcut numeric values by directly binary writing them
if k >= reflect.Int && k <= reflect.Complex64 {
return binary.Write(w.w, binary.LittleEndian, v.Interface())
}
switch k {
case reflect.Array:
l := v.Len()
for i := 0; i < l; i++ {
if err := w.visit(v.Index(i)); err != nil {
return err
}
}
case reflect.Map:
var err error
// We first need to order the keys so it is a deterministic walk
var hasher hash.Hash64
m := make(map[uint64]reflect.Value)
ks := make([]uint64, v.Len())
keys := v.MapKeys()
for i := 0; i < len(keys); i++ {
k := keys[i]
ks[i], err = Hash(k.Interface(), &HashOptions{Hasher: hasher})
if err != nil {
return err
}
// Hash collision! We use a secondary hash function. Reset
// the loop and start over. If we already are trying a second
// hash function, panic.
if _, ok := m[ks[i]]; ok {
if hasher != nil {
panic(fmt.Sprintf("unresolvable hash collision: %#v", k.Interface()))
}
hasher = fnv.New64()
i = 0
}
m[ks[i]] = k
}
// Go through the sorted keys and hash
sort.Sort(uint64Slice(ks))
for _, hashKey := range ks {
k := m[hashKey]
v := v.MapIndex(k)
if err := w.visit(k); err != nil {
return err
}
if err := w.visit(v); err != nil {
return err
}
}
case reflect.Struct:
t := v.Type()
l := v.NumField()
for i := 0; i < l; i++ {
if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
if err := w.visit(v); err != nil {
return err
}
}
}
case reflect.Slice:
l := v.Len()
for i := 0; i < l; i++ {
if err := w.visit(v.Index(i)); err != nil {
return err
}
}
case reflect.String:
_, err := w.w.Write([]byte(v.String()))
return err
default:
return fmt.Errorf("unknown kind to hash: %s", k)
}
return nil
}
// uint64Slice is a sortable uint64 slice
type uint64Slice []uint64
func (p uint64Slice) Len() int { return len(p) }
func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
missing continue
this didn't affect functionality but could've in the future
package hashstructure
import (
"encoding/binary"
"fmt"
"hash"
"hash/crc64"
"hash/fnv"
"io"
"reflect"
"sort"
)
// HashOptions are options that are available for hashing.
type HashOptions struct {
// Hasher is the hash function to use. If this isn't set, it will
// default to CRC-64. CRC probably isn't the best hash function to use
// but it is in the Go standard library and there is a lot of support
// for hardware acceleration.
Hasher hash.Hash64
}
// Hash returns the hash value of an arbitrary value.
//
// If opts is nil, then default options will be used. See HashOptions
// for the default values.
//
// Notes on the value:
//
// * Unexported fields on structs are ignored and do not affect the
// hash value.
//
// * Adding an exported field to a struct with the zero value will change
// the hash value.
//
func Hash(v interface{}, opts *HashOptions) (uint64, error) {
// Create default options
if opts == nil {
opts = &HashOptions{}
}
if opts.Hasher == nil {
opts.Hasher = crc64.New(crc64.MakeTable(crc64.ECMA))
}
// Reset the hash
opts.Hasher.Reset()
// Create our walker and walk the structure
w := &walker{w: opts.Hasher}
if err := w.visit(reflect.ValueOf(v)); err != nil {
return 0, err
}
return opts.Hasher.Sum64(), nil
}
type walker struct {
w io.Writer
}
func (w *walker) visit(v reflect.Value) error {
// Loop since these can be wrapped in multiple layers of pointers
// and interfaces.
for {
// If we have an interface, dereference it. We have to do this up
// here because it might be a nil in there and the check below must
// catch that.
if v.Kind() == reflect.Interface {
v = v.Elem()
continue
}
if v.Kind() == reflect.Ptr {
v = reflect.Indirect(v)
continue
}
break
}
// If it is nil, treat it like a zero.
if !v.IsValid() {
var tmp int8
v = reflect.ValueOf(tmp)
}
// Binary writing can use raw ints, we have to convert to
// a sized-int, we'll choose the largest...
switch v.Kind() {
case reflect.Int:
v = reflect.ValueOf(int64(v.Int()))
case reflect.Uint:
v = reflect.ValueOf(uint64(v.Uint()))
case reflect.Bool:
var tmp int8
if v.Bool() {
tmp = 1
}
v = reflect.ValueOf(tmp)
}
k := v.Kind()
// We can shortcut numeric values by directly binary writing them
if k >= reflect.Int && k <= reflect.Complex64 {
return binary.Write(w.w, binary.LittleEndian, v.Interface())
}
switch k {
case reflect.Array:
l := v.Len()
for i := 0; i < l; i++ {
if err := w.visit(v.Index(i)); err != nil {
return err
}
}
case reflect.Map:
var err error
// We first need to order the keys so it is a deterministic walk
var hasher hash.Hash64
m := make(map[uint64]reflect.Value)
ks := make([]uint64, v.Len())
keys := v.MapKeys()
for i := 0; i < len(keys); i++ {
k := keys[i]
ks[i], err = Hash(k.Interface(), &HashOptions{Hasher: hasher})
if err != nil {
return err
}
// Hash collision! We use a secondary hash function. Reset
// the loop and start over. If we already are trying a second
// hash function, panic.
if _, ok := m[ks[i]]; ok {
if hasher != nil {
panic(fmt.Sprintf("unresolvable hash collision: %#v", k.Interface()))
}
hasher = fnv.New64()
i = 0
continue
}
m[ks[i]] = k
}
// Go through the sorted keys and hash
sort.Sort(uint64Slice(ks))
for _, hashKey := range ks {
k := m[hashKey]
v := v.MapIndex(k)
if err := w.visit(k); err != nil {
return err
}
if err := w.visit(v); err != nil {
return err
}
}
case reflect.Struct:
t := v.Type()
l := v.NumField()
for i := 0; i < l; i++ {
if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
if err := w.visit(v); err != nil {
return err
}
}
}
case reflect.Slice:
l := v.Len()
for i := 0; i < l; i++ {
if err := w.visit(v.Index(i)); err != nil {
return err
}
}
case reflect.String:
_, err := w.w.Write([]byte(v.String()))
return err
default:
return fmt.Errorf("unknown kind to hash: %s", k)
}
return nil
}
// uint64Slice is a sortable uint64 slice
type uint64Slice []uint64
func (p uint64Slice) Len() int { return len(p) }
func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
package hashstructure
import (
"encoding/binary"
"fmt"
"hash"
"hash/crc64"
"hash/fnv"
"io"
"reflect"
"sort"
)
// HashOptions are options that are available for hashing.
type HashOptions struct {
// Hasher is the hash function to use. If this isn't set, it will
// default to CRC-64. CRC probably isn't the best hash function to use
// but it is in the Go standard library and there is a lot of support
// for hardware acceleration.
Hasher hash.Hash64
}
// Hash returns the hash value of an arbitrary value.
//
// If opts is nil, then default options will be used. See HashOptions
// for the default values.
//
// Notes on the value:
//
// * Unexported fields on structs are ignored and do not affect the
// hash value.
//
// * Adding an exported field to a struct with the zero value will change
// the hash value.
//
func Hash(v interface{}, opts *HashOptions) (uint64, error) {
// Create default options
if opts == nil {
opts = &HashOptions{}
}
if opts.Hasher == nil {
opts.Hasher = crc64.New(crc64.MakeTable(crc64.ECMA))
}
// Reset the hash
opts.Hasher.Reset()
// Create our walker and walk the structure
w := &walker{w: opts.Hasher}
if err := w.visit(reflect.ValueOf(v)); err != nil {
return 0, err
}
return opts.Hasher.Sum64(), nil
}
type walker struct {
w io.Writer
}
func (w *walker) visit(v reflect.Value) error {
// Loop since these can be wrapped in multiple layers of pointers
// and interfaces.
for {
// If we have an interface, dereference it. We have to do this up
// here because it might be a nil in there and the check below must
// catch that.
if v.Kind() == reflect.Interface {
v = v.Elem()
continue
}
if v.Kind() == reflect.Ptr {
v = reflect.Indirect(v)
continue
}
break
}
// If it is nil, treat it like a zero.
if !v.IsValid() {
var tmp int8
v = reflect.ValueOf(tmp)
}
// Binary writing can use raw ints, we have to convert to
// a sized-int, we'll choose the largest...
switch v.Kind() {
case reflect.Int:
v = reflect.ValueOf(int64(v.Int()))
case reflect.Uint:
v = reflect.ValueOf(uint64(v.Uint()))
case reflect.Bool:
var tmp int8
if v.Bool() {
tmp = 1
}
v = reflect.ValueOf(tmp)
}
k := v.Kind()
// We can shortcut numeric values by directly binary writing them
if k >= reflect.Int && k <= reflect.Complex64 {
return binary.Write(w.w, binary.LittleEndian, v.Interface())
}
switch k {
case reflect.Array:
l := v.Len()
for i := 0; i < l; i++ {
if err := w.visit(v.Index(i)); err != nil {
return err
}
}
case reflect.Map:
var err error
// We first need to order the keys so it is a deterministic walk
var hasher hash.Hash64
m := make(map[uint64]reflect.Value)
ks := make([]uint64, v.Len())
keys := v.MapKeys()
for i := 0; i < len(keys); i++ {
k := keys[i]
ks[i], err = Hash(k.Interface(), &HashOptions{Hasher: hasher})
if err != nil {
return err
}
// Hash collision! We use a secondary hash function. Reset
// the loop and start over. If we already are trying a second
// hash function, panic.
if _, ok := m[ks[i]]; ok {
if hasher != nil {
return fmt.Errorf("unresolvable hash collision: %#v", k.Interface())
}
hasher = fnv.New64()
i = 0
continue
}
m[ks[i]] = k
}
// Go through the sorted keys and hash
sort.Sort(uint64Slice(ks))
for _, hashKey := range ks {
k := m[hashKey]
v := v.MapIndex(k)
if err := w.visit(k); err != nil {
return err
}
if err := w.visit(v); err != nil {
return err
}
}
case reflect.Struct:
t := v.Type()
l := v.NumField()
for i := 0; i < l; i++ {
if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
if err := w.visit(v); err != nil {
return err
}
}
}
case reflect.Slice:
l := v.Len()
for i := 0; i < l; i++ {
if err := w.visit(v.Index(i)); err != nil {
return err
}
}
case reflect.String:
_, err := w.w.Write([]byte(v.String()))
return err
default:
return fmt.Errorf("unknown kind to hash: %s", k)
}
return nil
}
// uint64Slice is a sortable uint64 slice
type uint64Slice []uint64
func (p uint64Slice) Len() int { return len(p) }
func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
only rehash collisions on keys that collided
package hashstructure
import (
"encoding/binary"
"fmt"
"hash"
"hash/crc64"
"hash/fnv"
"io"
"reflect"
"sort"
)
// HashOptions are options that are available for hashing.
type HashOptions struct {
// Hasher is the hash function to use. If this isn't set, it will
// default to CRC-64. CRC probably isn't the best hash function to use
// but it is in the Go standard library and there is a lot of support
// for hardware acceleration.
Hasher hash.Hash64
}
// Hash returns the hash value of an arbitrary value.
//
// If opts is nil, then default options will be used. See HashOptions
// for the default values.
//
// Notes on the value:
//
// * Unexported fields on structs are ignored and do not affect the
// hash value.
//
// * Adding an exported field to a struct with the zero value will change
// the hash value.
//
func Hash(v interface{}, opts *HashOptions) (uint64, error) {
// Create default options
if opts == nil {
opts = &HashOptions{}
}
if opts.Hasher == nil {
opts.Hasher = crc64.New(crc64.MakeTable(crc64.ECMA))
}
// Reset the hash
opts.Hasher.Reset()
// Create our walker and walk the structure
w := &walker{w: opts.Hasher}
if err := w.visit(reflect.ValueOf(v)); err != nil {
return 0, err
}
return opts.Hasher.Sum64(), nil
}
type walker struct {
w io.Writer
}
func (w *walker) visit(v reflect.Value) error {
// Loop since these can be wrapped in multiple layers of pointers
// and interfaces.
for {
// If we have an interface, dereference it. We have to do this up
// here because it might be a nil in there and the check below must
// catch that.
if v.Kind() == reflect.Interface {
v = v.Elem()
continue
}
if v.Kind() == reflect.Ptr {
v = reflect.Indirect(v)
continue
}
break
}
// If it is nil, treat it like a zero.
if !v.IsValid() {
var tmp int8
v = reflect.ValueOf(tmp)
}
// Binary writing can use raw ints, we have to convert to
// a sized-int, we'll choose the largest...
switch v.Kind() {
case reflect.Int:
v = reflect.ValueOf(int64(v.Int()))
case reflect.Uint:
v = reflect.ValueOf(uint64(v.Uint()))
case reflect.Bool:
var tmp int8
if v.Bool() {
tmp = 1
}
v = reflect.ValueOf(tmp)
}
k := v.Kind()
// We can shortcut numeric values by directly binary writing them
if k >= reflect.Int && k <= reflect.Complex64 {
return binary.Write(w.w, binary.LittleEndian, v.Interface())
}
switch k {
case reflect.Array:
l := v.Len()
for i := 0; i < l; i++ {
if err := w.visit(v.Index(i)); err != nil {
return err
}
}
case reflect.Map:
// We first need to order the keys so it is a deterministic walk
keys := v.MapKeys()
idxs, err := sortValues(keys)
if err != nil {
return err
}
for _, idx := range idxs {
k := keys[idx]
v := v.MapIndex(k)
if err := w.visit(k); err != nil {
return err
}
if err := w.visit(v); err != nil {
return err
}
}
case reflect.Struct:
t := v.Type()
l := v.NumField()
for i := 0; i < l; i++ {
if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
if err := w.visit(v); err != nil {
return err
}
}
}
case reflect.Slice:
l := v.Len()
for i := 0; i < l; i++ {
if err := w.visit(v.Index(i)); err != nil {
return err
}
}
case reflect.String:
_, err := w.w.Write([]byte(v.String()))
return err
default:
return fmt.Errorf("unknown kind to hash: %s", k)
}
return nil
}
// sortValues sorts arbitrary reflection values and returns the ordering
// of that they should be accessed. Given the same set of reflect values,
// this will always return the same int slice.
func sortValues(vs []reflect.Value) ([]int, error) {
// This stores any values that have collisions and need to be
// recomputed. Because this is so rare, we don't allocate anything here.
var collision []int
// Get the hash values for all the keys
var err error
ks := make([]uint64, len(vs))
m := make(map[uint64]int)
for i, v := range vs {
ks[i], err = Hash(v.Interface(), nil)
if err != nil {
return nil, err
}
if v, ok := m[ks[i]]; ok {
if v >= 0 {
// Store the original collision and mark the index as -1
// which means we already recorded it, but that it was a
// collision for the future.
collision = append(collision, v)
m[ks[i]] = -1
}
collision = append(collision, i)
continue
}
m[ks[i]] = i
}
// If we have any collisions, hash those now using FNV
if len(collision) > 0 {
hasher := fnv.New64()
for _, c := range collision {
ks[c], err = Hash(vs[c].Interface(), &HashOptions{Hasher: hasher})
if err != nil {
return nil, err
}
if _, ok := m[ks[c]]; ok {
return nil, fmt.Errorf(
"unresolvable hash collision: %#v", vs[c].Interface())
}
m[ks[c]] = c
}
}
// Sort the keys
sort.Sort(uint64Slice(ks))
// Build the result
result := make([]int, len(vs))
for i, v := range ks {
result[i] = m[v]
}
return result, nil
}
// uint64Slice is a sortable uint64 slice
type uint64Slice []uint64
func (p uint64Slice) Len() int { return len(p) }
func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
package docker
import (
"bufio"
"bytes"
"context"
"fmt"
"github.com/ViBiOh/docker-deploy/jsonHttp"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/docker/client"
"gopkg.in/yaml.v2"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"regexp"
"strings"
)
const host = `DOCKER_HOST`
const version = `DOCKER_VERSION`
const configurationFile = `./users`
const admin = `admin`
const ownerLabel = `owner`
const appLabel = `app`
const minMemory = 67108864
const maxMemory = 536870912
var commaByte = []byte(`,`)
var splitLogs = regexp.MustCompile(`.{8}(.*?)\n`)
var networkConfig = network.NetworkingConfig{
EndpointsConfig: map[string]*network.EndpointSettings{
`traefik`: &network.EndpointSettings{},
},
}
var containersRequest = regexp.MustCompile(`/containers/?$`)
var containerRequest = regexp.MustCompile(`/containers/([^/]+)/?$`)
var startRequest = regexp.MustCompile(`/containers/([^/]+)/start`)
var stopRequest = regexp.MustCompile(`/containers/([^/]+)/stop`)
var restartRequest = regexp.MustCompile(`/containers/([^/]+)/restart`)
var logRequest = regexp.MustCompile(`/containers/([^/]+)/logs`)
type results struct {
Results interface{} `json:"results"`
}
type user struct {
username string
password string
role string
}
type dockerComposeService struct {
Image string
Command string
Environment map[string]string
Labels map[string]string
ReadOnly bool `yaml:"read_only"`
CPUShares int64 `yaml:"cpu_shares"`
MemoryLimit int64 `yaml:"mem_limit"`
}
type dockerCompose struct {
Version string
Services map[string]dockerComposeService
}
var docker *client.Client
var users map[string]*user
func errorHandler(w http.ResponseWriter, err error) {
log.Print(err)
http.Error(w, err.Error(), http.StatusInternalServerError)
}
func init() {
users = readConfiguration(configurationFile)
client, err := client.NewClient(os.Getenv(host), os.Getenv(version), nil, nil)
if err != nil {
log.Fatal(err)
} else {
docker = client
}
}
func readConfiguration(path string) map[string]*user {
configFile, err := os.Open(path)
defer configFile.Close()
if err != nil {
log.Print(err)
return nil
}
users := make(map[string]*user)
scanner := bufio.NewScanner(configFile)
for scanner.Scan() {
parts := bytes.Split(scanner.Bytes(), commaByte)
user := user{string(parts[0]), string(parts[1]), string(parts[2])}
users[strings.ToLower(user.username)] = &user
}
return users
}
func isAllowed(loggedUser *user, containerID string) (bool, error) {
if loggedUser.role != admin {
container, err := inspectContainer(string(containerID))
if err != nil {
return false, err
}
owner, ok := container.Config.Labels[ownerLabel]
if !ok || owner != loggedUser.username {
return false, nil
}
}
return true, nil
}
func listContainers(loggedUser *user, appName *string) ([]types.Container, error) {
options := types.ContainerListOptions{All: true}
options.Filters = filters.NewArgs()
if loggedUser != nil && loggedUser.role != admin {
if _, err := filters.ParseFlag(`label=`+ownerLabel+`=`+loggedUser.username, options.Filters); err != nil {
return nil, err
}
} else if appName != nil && *appName != `` {
if _, err := filters.ParseFlag(`label=`+appLabel+`=`+*appName, options.Filters); err != nil {
return nil, err
}
}
return docker.ContainerList(context.Background(), options)
}
func inspectContainer(containerID string) (types.ContainerJSON, error) {
return docker.ContainerInspect(context.Background(), containerID)
}
func startContainer(containerID string) error {
return docker.ContainerStart(context.Background(), string(containerID), types.ContainerStartOptions{})
}
func stopContainer(containerID string) error {
return docker.ContainerStop(context.Background(), containerID, nil)
}
func restartContainer(containerID string) error {
return docker.ContainerRestart(context.Background(), containerID, nil)
}
func rmContainer(containerID string) error {
return docker.ContainerRemove(context.Background(), containerID, types.ContainerRemoveOptions{RemoveVolumes: true, Force: true})
}
func inspectContainerHandler(w http.ResponseWriter, containerID []byte) {
if container, err := inspectContainer(string(containerID)); err != nil {
errorHandler(w, err)
} else {
jsonHttp.ResponseJSON(w, container)
}
}
func basicActionHandler(w http.ResponseWriter, loggedUser *user, containerID []byte, handle func(string) error) {
id := string(containerID)
allowed, err := isAllowed(loggedUser, id)
if !allowed {
forbidden(w)
} else if err != nil {
errorHandler(w, err)
} else {
if err = handle(id); err != nil {
errorHandler(w, err)
} else {
w.Write(nil)
}
}
}
func logContainerHandler(w http.ResponseWriter, containerID []byte) {
logs, err := docker.ContainerLogs(context.Background(), string(containerID), types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true, Follow: false})
if err != nil {
errorHandler(w, err)
return
}
defer logs.Close()
if logLines, err := ioutil.ReadAll(logs); err != nil {
errorHandler(w, err)
} else {
matches := splitLogs.FindAllSubmatch(logLines, -1)
cleanLogs := make([]string, 0, len(matches))
for _, match := range matches {
cleanLogs = append(cleanLogs, string(match[1]))
}
jsonHttp.ResponseJSON(w, results{cleanLogs})
}
}
func listContainersHandler(w http.ResponseWriter, loggerUser *user) {
if containers, err := listContainers(loggerUser, nil); err != nil {
errorHandler(w, err)
} else {
jsonHttp.ResponseJSON(w, results{containers})
}
}
func readBody(body io.ReadCloser) ([]byte, error) {
defer body.Close()
return ioutil.ReadAll(body)
}
func getConfig(service *dockerComposeService, loggedUser *user, appName string) *container.Config {
environments := make([]string, len(service.Environment))
for key, value := range service.Environment {
environments = append(environments, key+`=`+value)
}
if service.Labels == nil {
service.Labels = make(map[string]string)
}
service.Labels[ownerLabel] = loggedUser.username
service.Labels[appLabel] = appName
config := container.Config{
Image: service.Image,
Labels: service.Labels,
Env: environments,
}
if service.Command != `` {
config.Cmd = strslice.StrSlice([]string{service.Command})
}
return &config
}
func getHostConfig(service *dockerComposeService) *container.HostConfig {
hostConfig := container.HostConfig{
LogConfig: container.LogConfig{Type: `json-file`, Config: map[string]string{
`max-size`: `50m`,
}},
RestartPolicy: container.RestartPolicy{Name: `on-failure`, MaximumRetryCount: 5},
Resources: container.Resources{
CPUShares: 128,
Memory: minMemory,
},
SecurityOpt: []string{`no-new-privileges`},
}
if service.ReadOnly {
hostConfig.ReadonlyRootfs = service.ReadOnly
}
if service.CPUShares != 0 {
hostConfig.Resources.CPUShares = service.CPUShares
}
if service.MemoryLimit != 0 {
if service.MemoryLimit < maxMemory {
hostConfig.Resources.Memory = service.MemoryLimit
} else {
hostConfig.Resources.Memory = maxMemory
}
}
return &hostConfig
}
func createAppHandler(w http.ResponseWriter, loggedUser *user, appName []byte, composeFile []byte) {
if len(appName) == 0 || len(composeFile) == 0 {
http.Error(w, `An application name and a compose file are required`, http.StatusBadRequest)
return
}
compose := dockerCompose{}
if err := yaml.Unmarshal(composeFile, &compose); err != nil {
errorHandler(w, err)
return
}
appNameStr := string(appName)
log.Print(loggedUser.username + ` deploys ` + appNameStr)
ownerContainers, err := listContainers(loggedUser, &appNameStr)
if err != nil {
errorHandler(w, err)
return
}
for _, container := range ownerContainers {
log.Print(loggedUser.username + ` stops ` + strings.Join(container.Names, `, `))
stopContainer(container.ID)
log.Print(loggedUser.username + ` rm ` + strings.Join(container.Names, `, `))
rmContainer(container.ID)
}
ids := make([]string, len(compose.Services))
for serviceName, service := range compose.Services {
log.Print(loggedUser.username + ` pulls ` + service.Image)
pull, err := docker.ImagePull(context.Background(), service.Image, types.ImagePullOptions{})
if err != nil {
errorHandler(w, err)
return
}
readBody(pull)
log.Print(loggedUser.username + ` starts ` + serviceName)
id, err := docker.ContainerCreate(context.Background(), getConfig(&service, loggedUser, appNameStr), getHostConfig(&service), &networkConfig, appNameStr+`_`+serviceName)
if err != nil {
errorHandler(w, err)
return
}
startContainer(id.ID)
ids = append(ids, id.ID)
}
jsonHttp.ResponseJSON(w, results{ids})
}
func isAuthenticated(r *http.Request) (*user, error) {
username, password, ok := r.BasicAuth()
if ok {
user, ok := users[strings.ToLower(username)]
if ok && user.password == password {
return user, nil
} else {
return nil, fmt.Errorf(`Invalid credentials for ` + username)
}
}
return nil, fmt.Errorf(`Unable to read basic authentication`)
}
func unauthorized(w http.ResponseWriter, err error) {
http.Error(w, err.Error(), http.StatusUnauthorized)
}
func forbidden(w http.ResponseWriter) {
http.Error(w, `Forbidden`, http.StatusForbidden)
}
// Handler for Hello request. Should be use with net/http
type Handler struct {
}
func (handler Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Add(`Access-Control-Allow-Origin`, `*`)
w.Header().Add(`Access-Control-Allow-Headers`, `Content-Type, Authorization`)
w.Header().Add(`Access-Control-Allow-Methods`, `GET, POST, DELETE`)
w.Header().Add(`X-Content-Type-Options`, `nosniff`)
if r.Method == http.MethodOptions {
w.Write(nil)
return
}
loggedUser, err := isAuthenticated(r)
if err != nil {
unauthorized(w, err)
return
}
urlPath := []byte(r.URL.Path)
if containersRequest.Match(urlPath) && r.Method == http.MethodGet {
listContainersHandler(w, loggedUser)
} else if containerRequest.Match(urlPath) && r.Method == http.MethodGet {
inspectContainerHandler(w, containerRequest.FindSubmatch(urlPath)[1])
} else if startRequest.Match(urlPath) && r.Method == http.MethodPost {
basicActionHandler(w, loggedUser, startRequest.FindSubmatch(urlPath)[1], startContainer)
} else if stopRequest.Match(urlPath) && r.Method == http.MethodPost {
basicActionHandler(w, loggedUser, stopRequest.FindSubmatch(urlPath)[1], stopContainer)
} else if restartRequest.Match(urlPath) && r.Method == http.MethodPost {
basicActionHandler(w, loggedUser, restartRequest.FindSubmatch(urlPath)[1], restartContainer)
} else if containerRequest.Match(urlPath) && r.Method == http.MethodDelete {
basicActionHandler(w, loggedUser, containerRequest.FindSubmatch(urlPath)[1], rmContainer)
} else if logRequest.Match(urlPath) && r.Method == http.MethodGet {
logContainerHandler(w, logRequest.FindSubmatch(urlPath)[1])
} else if containerRequest.Match(urlPath) && r.Method == http.MethodPost {
if composeBody, err := readBody(r.Body); err != nil {
errorHandler(w, err)
} else {
createAppHandler(w, loggedUser, containerRequest.FindSubmatch(urlPath)[1], composeBody)
}
}
}
Update docker.go
package docker
import (
"bufio"
"bytes"
"context"
"fmt"
"github.com/ViBiOh/docker-deploy/jsonHttp"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/docker/client"
"gopkg.in/yaml.v2"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"regexp"
"strings"
)
const host = `DOCKER_HOST`
const version = `DOCKER_VERSION`
const configurationFile = `./users`
const admin = `admin`
const ownerLabel = `owner`
const appLabel = `app`
const minMemory = 67108864
const maxMemory = 536870912
const defaultTag = `latest`
var commaByte = []byte(`,`)
var splitLogs = regexp.MustCompile(`.{8}(.*?)\n`)
var networkConfig = network.NetworkingConfig{
EndpointsConfig: map[string]*network.EndpointSettings{
`traefik`: &network.EndpointSettings{},
},
}
var imageTag = regexp.MustCompile(`^\S*?:\S+$`)
var containersRequest = regexp.MustCompile(`/containers/?$`)
var containerRequest = regexp.MustCompile(`/containers/([^/]+)/?$`)
var startRequest = regexp.MustCompile(`/containers/([^/]+)/start`)
var stopRequest = regexp.MustCompile(`/containers/([^/]+)/stop`)
var restartRequest = regexp.MustCompile(`/containers/([^/]+)/restart`)
var logRequest = regexp.MustCompile(`/containers/([^/]+)/logs`)
type results struct {
Results interface{} `json:"results"`
}
type user struct {
username string
password string
role string
}
type dockerComposeService struct {
Image string
Command string
Environment map[string]string
Labels map[string]string
ReadOnly bool `yaml:"read_only"`
CPUShares int64 `yaml:"cpu_shares"`
MemoryLimit int64 `yaml:"mem_limit"`
}
type dockerCompose struct {
Version string
Services map[string]dockerComposeService
}
var docker *client.Client
var users map[string]*user
func errorHandler(w http.ResponseWriter, err error) {
log.Print(err)
http.Error(w, err.Error(), http.StatusInternalServerError)
}
func init() {
users = readConfiguration(configurationFile)
client, err := client.NewClient(os.Getenv(host), os.Getenv(version), nil, nil)
if err != nil {
log.Fatal(err)
} else {
docker = client
}
}
func readConfiguration(path string) map[string]*user {
configFile, err := os.Open(path)
defer configFile.Close()
if err != nil {
log.Print(err)
return nil
}
users := make(map[string]*user)
scanner := bufio.NewScanner(configFile)
for scanner.Scan() {
parts := bytes.Split(scanner.Bytes(), commaByte)
user := user{string(parts[0]), string(parts[1]), string(parts[2])}
users[strings.ToLower(user.username)] = &user
}
return users
}
func isAllowed(loggedUser *user, containerID string) (bool, error) {
if loggedUser.role != admin {
container, err := inspectContainer(string(containerID))
if err != nil {
return false, err
}
owner, ok := container.Config.Labels[ownerLabel]
if !ok || owner != loggedUser.username {
return false, nil
}
}
return true, nil
}
func listContainers(loggedUser *user, appName *string) ([]types.Container, error) {
options := types.ContainerListOptions{All: true}
options.Filters = filters.NewArgs()
if loggedUser != nil && loggedUser.role != admin {
if _, err := filters.ParseFlag(`label=`+ownerLabel+`=`+loggedUser.username, options.Filters); err != nil {
return nil, err
}
} else if appName != nil && *appName != `` {
if _, err := filters.ParseFlag(`label=`+appLabel+`=`+*appName, options.Filters); err != nil {
return nil, err
}
}
return docker.ContainerList(context.Background(), options)
}
func inspectContainer(containerID string) (types.ContainerJSON, error) {
return docker.ContainerInspect(context.Background(), containerID)
}
func startContainer(containerID string) error {
return docker.ContainerStart(context.Background(), string(containerID), types.ContainerStartOptions{})
}
func stopContainer(containerID string) error {
return docker.ContainerStop(context.Background(), containerID, nil)
}
func restartContainer(containerID string) error {
return docker.ContainerRestart(context.Background(), containerID, nil)
}
func rmContainer(containerID string) error {
return docker.ContainerRemove(context.Background(), containerID, types.ContainerRemoveOptions{RemoveVolumes: true, Force: true})
}
func inspectContainerHandler(w http.ResponseWriter, containerID []byte) {
if container, err := inspectContainer(string(containerID)); err != nil {
errorHandler(w, err)
} else {
jsonHttp.ResponseJSON(w, container)
}
}
func basicActionHandler(w http.ResponseWriter, loggedUser *user, containerID []byte, handle func(string) error) {
id := string(containerID)
allowed, err := isAllowed(loggedUser, id)
if !allowed {
forbidden(w)
} else if err != nil {
errorHandler(w, err)
} else {
if err = handle(id); err != nil {
errorHandler(w, err)
} else {
w.Write(nil)
}
}
}
func logContainerHandler(w http.ResponseWriter, containerID []byte) {
logs, err := docker.ContainerLogs(context.Background(), string(containerID), types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true, Follow: false})
if err != nil {
errorHandler(w, err)
return
}
defer logs.Close()
if logLines, err := ioutil.ReadAll(logs); err != nil {
errorHandler(w, err)
} else {
matches := splitLogs.FindAllSubmatch(logLines, -1)
cleanLogs := make([]string, 0, len(matches))
for _, match := range matches {
cleanLogs = append(cleanLogs, string(match[1]))
}
jsonHttp.ResponseJSON(w, results{cleanLogs})
}
}
func listContainersHandler(w http.ResponseWriter, loggerUser *user) {
if containers, err := listContainers(loggerUser, nil); err != nil {
errorHandler(w, err)
} else {
jsonHttp.ResponseJSON(w, results{containers})
}
}
func readBody(body io.ReadCloser) ([]byte, error) {
defer body.Close()
return ioutil.ReadAll(body)
}
func getConfig(service *dockerComposeService, loggedUser *user, appName string) *container.Config {
environments := make([]string, len(service.Environment))
for key, value := range service.Environment {
environments = append(environments, key+`=`+value)
}
if service.Labels == nil {
service.Labels = make(map[string]string)
}
service.Labels[ownerLabel] = loggedUser.username
service.Labels[appLabel] = appName
config := container.Config{
Image: service.Image,
Labels: service.Labels,
Env: environments,
}
if service.Command != `` {
config.Cmd = strslice.StrSlice([]string{service.Command})
}
return &config
}
func getHostConfig(service *dockerComposeService) *container.HostConfig {
hostConfig := container.HostConfig{
LogConfig: container.LogConfig{Type: `json-file`, Config: map[string]string{
`max-size`: `50m`,
}},
RestartPolicy: container.RestartPolicy{Name: `on-failure`, MaximumRetryCount: 5},
Resources: container.Resources{
CPUShares: 128,
Memory: minMemory,
},
SecurityOpt: []string{`no-new-privileges`},
}
if service.ReadOnly {
hostConfig.ReadonlyRootfs = service.ReadOnly
}
if service.CPUShares != 0 {
hostConfig.Resources.CPUShares = service.CPUShares
}
if service.MemoryLimit != 0 {
if service.MemoryLimit < maxMemory {
hostConfig.Resources.Memory = service.MemoryLimit
} else {
hostConfig.Resources.Memory = maxMemory
}
}
return &hostConfig
}
func createAppHandler(w http.ResponseWriter, loggedUser *user, appName []byte, composeFile []byte) {
if len(appName) == 0 || len(composeFile) == 0 {
http.Error(w, `An application name and a compose file are required`, http.StatusBadRequest)
return
}
compose := dockerCompose{}
if err := yaml.Unmarshal(composeFile, &compose); err != nil {
errorHandler(w, err)
return
}
appNameStr := string(appName)
log.Print(loggedUser.username + ` deploys ` + appNameStr)
ownerContainers, err := listContainers(loggedUser, &appNameStr)
if err != nil {
errorHandler(w, err)
return
}
for _, container := range ownerContainers {
log.Print(loggedUser.username + ` stops ` + strings.Join(container.Names, `, `))
stopContainer(container.ID)
log.Print(loggedUser.username + ` rm ` + strings.Join(container.Names, `, `))
rmContainer(container.ID)
}
ids := make([]string, len(compose.Services))
for serviceName, service := range compose.Services {
image := service.Image
if !imageTag.Match(image) {
image = image + defaultTag
}
log.Print(loggedUser.username + ` pulls ` + image)
pull, err := docker.ImagePull(context.Background(), image, types.ImagePullOptions{})
if err != nil {
errorHandler(w, err)
return
}
readBody(pull)
log.Print(loggedUser.username + ` pulls ended for ` + image)
log.Print(loggedUser.username + ` starts ` + serviceName)
id, err := docker.ContainerCreate(context.Background(), getConfig(&service, loggedUser, appNameStr), getHostConfig(&service), &networkConfig, appNameStr+`_`+serviceName)
if err != nil {
errorHandler(w, err)
return
}
startContainer(id.ID)
ids = append(ids, id.ID)
}
jsonHttp.ResponseJSON(w, results{ids})
}
func isAuthenticated(r *http.Request) (*user, error) {
username, password, ok := r.BasicAuth()
if ok {
user, ok := users[strings.ToLower(username)]
if ok && user.password == password {
return user, nil
} else {
return nil, fmt.Errorf(`Invalid credentials for ` + username)
}
}
return nil, fmt.Errorf(`Unable to read basic authentication`)
}
func unauthorized(w http.ResponseWriter, err error) {
http.Error(w, err.Error(), http.StatusUnauthorized)
}
func forbidden(w http.ResponseWriter) {
http.Error(w, `Forbidden`, http.StatusForbidden)
}
// Handler for Hello request. Should be use with net/http
type Handler struct {
}
func (handler Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Add(`Access-Control-Allow-Origin`, `*`)
w.Header().Add(`Access-Control-Allow-Headers`, `Content-Type, Authorization`)
w.Header().Add(`Access-Control-Allow-Methods`, `GET, POST, DELETE`)
w.Header().Add(`X-Content-Type-Options`, `nosniff`)
if r.Method == http.MethodOptions {
w.Write(nil)
return
}
loggedUser, err := isAuthenticated(r)
if err != nil {
unauthorized(w, err)
return
}
urlPath := []byte(r.URL.Path)
if containersRequest.Match(urlPath) && r.Method == http.MethodGet {
listContainersHandler(w, loggedUser)
} else if containerRequest.Match(urlPath) && r.Method == http.MethodGet {
inspectContainerHandler(w, containerRequest.FindSubmatch(urlPath)[1])
} else if startRequest.Match(urlPath) && r.Method == http.MethodPost {
basicActionHandler(w, loggedUser, startRequest.FindSubmatch(urlPath)[1], startContainer)
} else if stopRequest.Match(urlPath) && r.Method == http.MethodPost {
basicActionHandler(w, loggedUser, stopRequest.FindSubmatch(urlPath)[1], stopContainer)
} else if restartRequest.Match(urlPath) && r.Method == http.MethodPost {
basicActionHandler(w, loggedUser, restartRequest.FindSubmatch(urlPath)[1], restartContainer)
} else if containerRequest.Match(urlPath) && r.Method == http.MethodDelete {
basicActionHandler(w, loggedUser, containerRequest.FindSubmatch(urlPath)[1], rmContainer)
} else if logRequest.Match(urlPath) && r.Method == http.MethodGet {
logContainerHandler(w, logRequest.FindSubmatch(urlPath)[1])
} else if containerRequest.Match(urlPath) && r.Method == http.MethodPost {
if composeBody, err := readBody(r.Body); err != nil {
errorHandler(w, err)
} else {
createAppHandler(w, loggedUser, containerRequest.FindSubmatch(urlPath)[1], composeBody)
}
}
}
|
package model
import (
"encoding/json"
"fmt"
"io"
"strings"
)
type Metadata struct {
APIVersion string
EndpointPrefix string
JSONVersion string
ServiceAbbreviation string
ServiceFullName string
SignatureVersion string
TargetPrefix string
Protocol string
ChecksumFormat string
GlobalEndpoint string
TimestampFormat string
}
type HTTPOptions struct {
Method string
RequestURI string
}
type Operation struct {
Name string
Documentation string
HTTP HTTPOptions
InputRef *ShapeRef `json:"Input"`
OutputRef *ShapeRef `json:"Output"`
}
func (o Operation) Input() *Shape {
return o.InputRef.Shape()
}
func (o Operation) Output() *Shape {
return o.OutputRef.Shape()
}
type Error struct {
Code string
HTTPStatusCode int
SenderFault bool
}
type ShapeRef struct {
ShapeName string `json:"Shape"`
Documentation string
Location string
LocationName string
Wrapper bool
ResultWrapper string
Streaming bool
}
func (ref *ShapeRef) WrappedType() string {
if ref.ResultWrapper != "" {
return "*" + exportable(ref.ResultWrapper)
}
return ref.Shape().Type()
}
func (ref *ShapeRef) WrappedLiteral() string {
if ref.ResultWrapper != "" {
return "&" + exportable(ref.ResultWrapper) + "{}"
}
return ref.Shape().Literal()
}
func (ref *ShapeRef) Shape() *Shape {
if ref == nil {
return nil
}
return service.Shapes[ref.ShapeName]
}
type Member struct {
ShapeRef
Name string
Required bool
}
func (m Member) JSONTag() string {
if !m.Required {
return fmt.Sprintf("`json:\"%s,omitempty\"`", m.Name)
}
return fmt.Sprintf("`json:\"%s\"`", m.Name)
}
func (m Member) XMLTag(wrapper string) string {
var path []string
if wrapper != "" {
path = append(path, wrapper)
}
if m.LocationName != "" {
path = append(path, m.LocationName)
} else {
path = append(path, m.Name)
}
if m.Shape().ShapeType == "list" && service.Metadata.Protocol != "rest-xml" {
loc := m.Shape().MemberRef.LocationName
if loc == "" {
loc = "member"
}
path = append(path, loc)
}
return fmt.Sprintf("`xml:\"%s\"`", strings.Join(path, ">"))
}
func (m Member) EC2Tag() string {
var path []string
if m.LocationName != "" {
path = append(path, m.LocationName)
} else {
path = append(path, m.Name)
}
if m.Shape().ShapeType == "list" {
loc := m.Shape().MemberRef.LocationName
if loc == "" {
loc = "member"
}
path = append(path, loc)
}
name := m.LocationName
if name == "" {
name = m.Name
}
return fmt.Sprintf("`ec2:%q xml:%q`", name, strings.Join(path, ">"))
}
func (m Member) Shape() *Shape {
return m.ShapeRef.Shape()
}
func (m Member) Type() string {
if m.Streaming {
return "io.ReadCloser"
}
return m.Shape().Type()
}
type Shape struct {
Name string
ShapeType string `json:"Type"`
Required []string
MemberRefs map[string]ShapeRef `json:"Members"`
MemberRef *ShapeRef `json:"Member"`
KeyRef *ShapeRef `json:"Key"`
ValueRef *ShapeRef `json:"Value"`
Error Error
Exception bool
Documentation string
Min int
Max int
Pattern string
Sensitive bool
Wrapper bool
Payload string
}
func (s *Shape) Message() bool {
return strings.HasSuffix(s.Name, "Message") && s.ResultWrapper() != ""
}
func (s *Shape) MessageTag() string {
tag := strings.TrimSuffix(s.ResultWrapper(), "Result") + "Response"
return fmt.Sprintf("`xml:\"%s\"`", tag)
}
func (s *Shape) Key() *Shape {
return s.KeyRef.Shape()
}
func (s *Shape) Member() *Shape {
return s.MemberRef.Shape()
}
func (s *Shape) Members() map[string]Member {
required := func(v string) bool {
for _, s := range s.Required {
if s == v {
return true
}
}
return false
}
members := map[string]Member{}
for name, ref := range s.MemberRefs {
members[name] = Member{
Name: name,
Required: required(name),
ShapeRef: ref,
}
}
return members
}
func (s *Shape) ResultWrapper() string {
var wrappers []string
for _, op := range service.Operations {
if op.OutputRef != nil && op.OutputRef.ShapeName == s.Name {
wrappers = append(wrappers, op.OutputRef.ResultWrapper)
}
}
if len(wrappers) == 1 {
return wrappers[0]
}
return ""
}
func (s *Shape) Value() *Shape {
return s.ValueRef.Shape()
}
func (s *Shape) Literal() string {
if s.ShapeType == "structure" {
return "&" + s.Type()[1:] + "{}"
}
panic("trying to make a literal non-structure for " + s.Name)
}
func (s *Shape) ElementType() string {
switch s.ShapeType {
case "structure":
return exportable(s.Name)
case "integer":
return "int"
case "long":
return "int64"
case "float":
return "float32"
case "double":
return "float64"
case "string":
return "string"
case "map":
return "map[" + s.Key().ElementType() + "]" + s.Value().ElementType()
case "list":
return "[]" + s.Member().ElementType()
case "boolean":
return "bool"
case "blob":
return "[]byte"
case "timestamp":
return "time.Time"
}
panic(fmt.Errorf("type %q (%q) not found", s.Name, s.ShapeType))
}
func (s *Shape) Type() string {
switch s.ShapeType {
case "structure":
return "*" + exportable(s.Name)
case "integer":
return "aws.IntegerValue"
case "long":
return "aws.LongValue"
case "float":
return "aws.FloatValue"
case "double":
return "aws.DoubleValue"
case "string":
return "aws.StringValue"
case "map":
return "map[" + s.Key().ElementType() + "]" + s.Value().ElementType()
case "list":
return "[]" + s.Member().ElementType()
case "boolean":
return "aws.BooleanValue"
case "blob":
return "[]byte"
case "timestamp":
return "time.Time"
}
panic(fmt.Errorf("type %q (%q) not found", s.Name, s.ShapeType))
}
type Service struct {
Name string
FullName string
PackageName string
Metadata Metadata
Documentation string
Operations map[string]Operation
Shapes map[string]*Shape
}
func (s Service) Wrappers() map[string]*Shape {
wrappers := map[string]*Shape{}
// collect all wrapper types
for _, op := range s.Operations {
if op.InputRef != nil && op.InputRef.ResultWrapper != "" {
wrappers[op.InputRef.ResultWrapper] = op.Input()
}
if op.OutputRef != nil && op.OutputRef.ResultWrapper != "" {
wrappers[op.OutputRef.ResultWrapper] = op.Output()
}
}
// remove all existing types?
for name := range wrappers {
if _, ok := s.Shapes[name]; ok {
delete(wrappers, name)
}
}
return wrappers
}
var service Service
func Load(name string, r io.Reader) error {
service = Service{}
if err := json.NewDecoder(r).Decode(&service); err != nil {
return err
}
for name, shape := range service.Shapes {
shape.Name = name
}
service.FullName = service.Metadata.ServiceFullName
service.PackageName = strings.ToLower(name)
service.Name = name
return nil
}
Fix EC2 request param names.
Lord only knows what locationName means for that.
package model
import (
"encoding/json"
"fmt"
"io"
"strings"
)
type Metadata struct {
APIVersion string
EndpointPrefix string
JSONVersion string
ServiceAbbreviation string
ServiceFullName string
SignatureVersion string
TargetPrefix string
Protocol string
ChecksumFormat string
GlobalEndpoint string
TimestampFormat string
}
type HTTPOptions struct {
Method string
RequestURI string
}
type Operation struct {
Name string
Documentation string
HTTP HTTPOptions
InputRef *ShapeRef `json:"Input"`
OutputRef *ShapeRef `json:"Output"`
}
func (o Operation) Input() *Shape {
return o.InputRef.Shape()
}
func (o Operation) Output() *Shape {
return o.OutputRef.Shape()
}
type Error struct {
Code string
HTTPStatusCode int
SenderFault bool
}
type ShapeRef struct {
ShapeName string `json:"Shape"`
Documentation string
Location string
LocationName string
Wrapper bool
ResultWrapper string
Streaming bool
}
func (ref *ShapeRef) WrappedType() string {
if ref.ResultWrapper != "" {
return "*" + exportable(ref.ResultWrapper)
}
return ref.Shape().Type()
}
func (ref *ShapeRef) WrappedLiteral() string {
if ref.ResultWrapper != "" {
return "&" + exportable(ref.ResultWrapper) + "{}"
}
return ref.Shape().Literal()
}
func (ref *ShapeRef) Shape() *Shape {
if ref == nil {
return nil
}
return service.Shapes[ref.ShapeName]
}
type Member struct {
ShapeRef
Name string
Required bool
}
func (m Member) JSONTag() string {
if !m.Required {
return fmt.Sprintf("`json:\"%s,omitempty\"`", m.Name)
}
return fmt.Sprintf("`json:\"%s\"`", m.Name)
}
func (m Member) XMLTag(wrapper string) string {
var path []string
if wrapper != "" {
path = append(path, wrapper)
}
if m.LocationName != "" {
path = append(path, m.LocationName)
} else {
path = append(path, m.Name)
}
if m.Shape().ShapeType == "list" && service.Metadata.Protocol != "rest-xml" {
loc := m.Shape().MemberRef.LocationName
if loc == "" {
loc = "member"
}
path = append(path, loc)
}
return fmt.Sprintf("`xml:\"%s\"`", strings.Join(path, ">"))
}
func (m Member) EC2Tag() string {
var path []string
if m.LocationName != "" {
path = append(path, m.LocationName)
} else {
path = append(path, m.Name)
}
if m.Shape().ShapeType == "list" {
loc := m.Shape().MemberRef.LocationName
if loc == "" {
loc = "member"
}
path = append(path, loc)
}
return fmt.Sprintf("`ec2:%q xml:%q`", m.Name, strings.Join(path, ">"))
}
func (m Member) Shape() *Shape {
return m.ShapeRef.Shape()
}
func (m Member) Type() string {
if m.Streaming {
return "io.ReadCloser"
}
return m.Shape().Type()
}
type Shape struct {
Name string
ShapeType string `json:"Type"`
Required []string
MemberRefs map[string]ShapeRef `json:"Members"`
MemberRef *ShapeRef `json:"Member"`
KeyRef *ShapeRef `json:"Key"`
ValueRef *ShapeRef `json:"Value"`
Error Error
Exception bool
Documentation string
Min int
Max int
Pattern string
Sensitive bool
Wrapper bool
Payload string
}
func (s *Shape) Message() bool {
return strings.HasSuffix(s.Name, "Message") && s.ResultWrapper() != ""
}
func (s *Shape) MessageTag() string {
tag := strings.TrimSuffix(s.ResultWrapper(), "Result") + "Response"
return fmt.Sprintf("`xml:\"%s\"`", tag)
}
func (s *Shape) Key() *Shape {
return s.KeyRef.Shape()
}
func (s *Shape) Member() *Shape {
return s.MemberRef.Shape()
}
func (s *Shape) Members() map[string]Member {
required := func(v string) bool {
for _, s := range s.Required {
if s == v {
return true
}
}
return false
}
members := map[string]Member{}
for name, ref := range s.MemberRefs {
members[name] = Member{
Name: name,
Required: required(name),
ShapeRef: ref,
}
}
return members
}
func (s *Shape) ResultWrapper() string {
var wrappers []string
for _, op := range service.Operations {
if op.OutputRef != nil && op.OutputRef.ShapeName == s.Name {
wrappers = append(wrappers, op.OutputRef.ResultWrapper)
}
}
if len(wrappers) == 1 {
return wrappers[0]
}
return ""
}
func (s *Shape) Value() *Shape {
return s.ValueRef.Shape()
}
func (s *Shape) Literal() string {
if s.ShapeType == "structure" {
return "&" + s.Type()[1:] + "{}"
}
panic("trying to make a literal non-structure for " + s.Name)
}
func (s *Shape) ElementType() string {
switch s.ShapeType {
case "structure":
return exportable(s.Name)
case "integer":
return "int"
case "long":
return "int64"
case "float":
return "float32"
case "double":
return "float64"
case "string":
return "string"
case "map":
return "map[" + s.Key().ElementType() + "]" + s.Value().ElementType()
case "list":
return "[]" + s.Member().ElementType()
case "boolean":
return "bool"
case "blob":
return "[]byte"
case "timestamp":
return "time.Time"
}
panic(fmt.Errorf("type %q (%q) not found", s.Name, s.ShapeType))
}
func (s *Shape) Type() string {
switch s.ShapeType {
case "structure":
return "*" + exportable(s.Name)
case "integer":
return "aws.IntegerValue"
case "long":
return "aws.LongValue"
case "float":
return "aws.FloatValue"
case "double":
return "aws.DoubleValue"
case "string":
return "aws.StringValue"
case "map":
return "map[" + s.Key().ElementType() + "]" + s.Value().ElementType()
case "list":
return "[]" + s.Member().ElementType()
case "boolean":
return "aws.BooleanValue"
case "blob":
return "[]byte"
case "timestamp":
return "time.Time"
}
panic(fmt.Errorf("type %q (%q) not found", s.Name, s.ShapeType))
}
type Service struct {
Name string
FullName string
PackageName string
Metadata Metadata
Documentation string
Operations map[string]Operation
Shapes map[string]*Shape
}
func (s Service) Wrappers() map[string]*Shape {
wrappers := map[string]*Shape{}
// collect all wrapper types
for _, op := range s.Operations {
if op.InputRef != nil && op.InputRef.ResultWrapper != "" {
wrappers[op.InputRef.ResultWrapper] = op.Input()
}
if op.OutputRef != nil && op.OutputRef.ResultWrapper != "" {
wrappers[op.OutputRef.ResultWrapper] = op.Output()
}
}
// remove all existing types?
for name := range wrappers {
if _, ok := s.Shapes[name]; ok {
delete(wrappers, name)
}
}
return wrappers
}
var service Service
func Load(name string, r io.Reader) error {
service = Service{}
if err := json.NewDecoder(r).Decode(&service); err != nil {
return err
}
for name, shape := range service.Shapes {
shape.Name = name
}
service.FullName = service.Metadata.ServiceFullName
service.PackageName = strings.ToLower(name)
service.Name = name
return nil
}
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"strings"
"github.com/pingcap/tidb/util/types"
)
// SchemaState is the state for schema elements.
type SchemaState byte
const (
// StateNone means this schema element is absent and can't be used.
StateNone SchemaState = iota
// StateDeleteOnly means we can only delete items for this schema element.
StateDeleteOnly
// StateWriteOnly means we can use any write operation on this schema element,
// but outer can't read the changed data.
StateWriteOnly
// StateReorganization meas we are re-organizating whole data for this shema changed.
StateReorganization
// StatePublic means this schema element is ok for all write and read operations.
StatePublic
)
// String implements fmt.Stringer interface.
func (s SchemaState) String() string {
switch s {
case StateDeleteOnly:
return "delete only"
case StateWriteOnly:
return "write only"
case StateReorganization:
return "reorganization"
case StatePublic:
return "public"
default:
return "none"
}
}
// ColumnInfo provides meta data describing of a table column.
type ColumnInfo struct {
ID int64 `json:"id"`
Name CIStr `json:"name"`
Offset int `json:"offset"`
DefaultValue interface{} `json:"default"`
types.FieldType `json:"type"`
State SchemaState `json:"state"`
}
// Clone clones ColumnInfo.
func (c *ColumnInfo) Clone() *ColumnInfo {
nc := *c
return &nc
}
// TableInfo provides meta data describing a DB table.
type TableInfo struct {
ID int64 `json:"id"`
Name CIStr `json:"name"`
Charset string `json:"charset"`
Collate string `json:"collate"`
// Columns are listed in the order in which they appear in the schema.
Columns []*ColumnInfo `json:"cols"`
Indices []*IndexInfo `json:"index_info"`
State SchemaState `json:"state"`
}
// Clone clones TableInfo.
func (t *TableInfo) Clone() *TableInfo {
nt := *t
nt.Columns = make([]*ColumnInfo, len(t.Columns))
nt.Indices = make([]*IndexInfo, len(t.Indices))
for i := range t.Columns {
nt.Columns[i] = t.Columns[i].Clone()
}
for i := range t.Indices {
nt.Indices[i] = t.Indices[i].Clone()
}
return &nt
}
// IndexColumn provides index column info.
type IndexColumn struct {
Name CIStr `json:"name"` // Index name
Offset int `json:"offset"` // Index offset
Length int `json:"length"` // Index length
}
// Clone clones IndexColumn.
func (i *IndexColumn) Clone() *IndexColumn {
ni := *i
return &ni
}
// IndexInfo provides meta data describing a DB index.
// It corresponds to the statement `CREATE INDEX Name ON Table (Column);`
// See: https://dev.mysql.com/doc/refman/5.7/en/create-index.html
type IndexInfo struct {
Name CIStr `json:"idx_name"` // Index name.
Table CIStr `json:"tbl_name"` // Table name.
Columns []*IndexColumn `json:"idx_cols"` // Index columns.
Unique bool `json:"is_unique"` // Whether the index is unique.
Primary bool `json:"is_primary"` // Whether the index is primary key.
State SchemaState `json:"state"`
}
// Clone clones IndexInfo.
func (index *IndexInfo) Clone() *IndexInfo {
ni := *index
ni.Columns = make([]*IndexColumn, len(index.Columns))
for i := range index.Columns {
ni.Columns[i] = index.Columns[i].Clone()
}
return &ni
}
// DBInfo provides meta data describing a DB.
type DBInfo struct {
ID int64 `json:"id"` // Database ID
Name CIStr `json:"db_name"` // DB name.
Charset string `json:"charset"`
Collate string `json:"collate"`
Tables []*TableInfo `json:"-"` // Tables in the DB.
State SchemaState `json:"state"`
}
// Clone clones DBInfo.
func (db *DBInfo) Clone() *DBInfo {
newInfo := *db
newInfo.Tables = make([]*TableInfo, len(db.Tables))
for i := range db.Tables {
newInfo.Tables[i] = db.Tables[i].Clone()
}
return &newInfo
}
// CIStr is case insensitve string.
type CIStr struct {
O string `json:"O"` // Original string.
L string `json:"L"` // Lower case string.
}
// String implements fmt.Stringer interface.
func (cis CIStr) String() string {
return cis.O
}
// NewCIStr creates a new CIStr.
func NewCIStr(s string) (cs CIStr) {
cs.O = s
cs.L = strings.ToLower(s)
return
}
model: rename meas to means
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"strings"
"github.com/pingcap/tidb/util/types"
)
// SchemaState is the state for schema elements.
type SchemaState byte
const (
// StateNone means this schema element is absent and can't be used.
StateNone SchemaState = iota
// StateDeleteOnly means we can only delete items for this schema element.
StateDeleteOnly
// StateWriteOnly means we can use any write operation on this schema element,
// but outer can't read the changed data.
StateWriteOnly
// StateReorganization means we are re-organizating whole data for this shema changed.
StateReorganization
// StatePublic means this schema element is ok for all write and read operations.
StatePublic
)
// String implements fmt.Stringer interface.
func (s SchemaState) String() string {
switch s {
case StateDeleteOnly:
return "delete only"
case StateWriteOnly:
return "write only"
case StateReorganization:
return "reorganization"
case StatePublic:
return "public"
default:
return "none"
}
}
// ColumnInfo provides meta data describing of a table column.
type ColumnInfo struct {
ID int64 `json:"id"`
Name CIStr `json:"name"`
Offset int `json:"offset"`
DefaultValue interface{} `json:"default"`
types.FieldType `json:"type"`
State SchemaState `json:"state"`
}
// Clone clones ColumnInfo.
func (c *ColumnInfo) Clone() *ColumnInfo {
nc := *c
return &nc
}
// TableInfo provides meta data describing a DB table.
type TableInfo struct {
ID int64 `json:"id"`
Name CIStr `json:"name"`
Charset string `json:"charset"`
Collate string `json:"collate"`
// Columns are listed in the order in which they appear in the schema.
Columns []*ColumnInfo `json:"cols"`
Indices []*IndexInfo `json:"index_info"`
State SchemaState `json:"state"`
}
// Clone clones TableInfo.
func (t *TableInfo) Clone() *TableInfo {
nt := *t
nt.Columns = make([]*ColumnInfo, len(t.Columns))
nt.Indices = make([]*IndexInfo, len(t.Indices))
for i := range t.Columns {
nt.Columns[i] = t.Columns[i].Clone()
}
for i := range t.Indices {
nt.Indices[i] = t.Indices[i].Clone()
}
return &nt
}
// IndexColumn provides index column info.
type IndexColumn struct {
Name CIStr `json:"name"` // Index name
Offset int `json:"offset"` // Index offset
Length int `json:"length"` // Index length
}
// Clone clones IndexColumn.
func (i *IndexColumn) Clone() *IndexColumn {
ni := *i
return &ni
}
// IndexInfo provides meta data describing a DB index.
// It corresponds to the statement `CREATE INDEX Name ON Table (Column);`
// See: https://dev.mysql.com/doc/refman/5.7/en/create-index.html
type IndexInfo struct {
Name CIStr `json:"idx_name"` // Index name.
Table CIStr `json:"tbl_name"` // Table name.
Columns []*IndexColumn `json:"idx_cols"` // Index columns.
Unique bool `json:"is_unique"` // Whether the index is unique.
Primary bool `json:"is_primary"` // Whether the index is primary key.
State SchemaState `json:"state"`
}
// Clone clones IndexInfo.
func (index *IndexInfo) Clone() *IndexInfo {
ni := *index
ni.Columns = make([]*IndexColumn, len(index.Columns))
for i := range index.Columns {
ni.Columns[i] = index.Columns[i].Clone()
}
return &ni
}
// DBInfo provides meta data describing a DB.
type DBInfo struct {
ID int64 `json:"id"` // Database ID
Name CIStr `json:"db_name"` // DB name.
Charset string `json:"charset"`
Collate string `json:"collate"`
Tables []*TableInfo `json:"-"` // Tables in the DB.
State SchemaState `json:"state"`
}
// Clone clones DBInfo.
func (db *DBInfo) Clone() *DBInfo {
newInfo := *db
newInfo.Tables = make([]*TableInfo, len(db.Tables))
for i := range db.Tables {
newInfo.Tables[i] = db.Tables[i].Clone()
}
return &newInfo
}
// CIStr is case insensitve string.
type CIStr struct {
O string `json:"O"` // Original string.
L string `json:"L"` // Lower case string.
}
// String implements fmt.Stringer interface.
func (cis CIStr) String() string {
return cis.O
}
// NewCIStr creates a new CIStr.
func NewCIStr(s string) (cs CIStr) {
cs.O = s
cs.L = strings.ToLower(s)
return
}
|
package models
import (
"fmt"
"github.com/jinzhu/gorm"
"net/url"
"time"
)
type Image struct {
gorm.Model
Src string `json:"src"`
Tags []Tag `gorm:"many2many:images_tags"`
Date time.Time `gorm:"not null" json:"date"`
ChatID int64 `gorm:"not null" json:"-"`
Chat Chat `gorm:"ForeignKey:ChatID;AssociationForeignKey:TGID"`
}
func (img *Image) GetImgByParams(db *gorm.DB, params url.Values) ([]Image, error) {
img_slice := []Image{}
q_tmp := db.Model(&Image{}).Preload("Tags").Preload("Chat")
chat_id, ok := params["chat_id"]
if ok {
q_tmp = q_tmp.Where("chat_id = ?", chat_id[0])
}
date_from, ok := params["date_to"]
if ok {
q_tmp = q_tmp.Where("date > ?", date_from[0])
}
date_to, ok := params["date_to"]
if ok {
q_tmp = q_tmp.Where("date < ?", date_to[0])
}
if q_tmp.Find(&img_slice).RecordNotFound() {
return nil, db.Error
}
if err := db.Error; err != nil {
return nil, err
}
return img_slice, nil
}
func (img *Image) CreateImageWithTags(db *gorm.DB, ts []string) error {
var tags []Tag
for _, t := range ts {
tags = append(tags, Tag{Name: t})
}
ch := Chat{
TGID: img.ChatID,
Tags: tags,
}
tx := db.Begin()
if err := tx.Create(img).Error; err != nil {
tx.Rollback()
return fmt.Errorf("unable to save image: %s", err)
}
for _, t := range tags {
if err := t.CreateIfUnique(tx); err != nil {
tx.Rollback()
return fmt.Errorf("unable to save tag: %s", err)
}
if err := tx.Model(&t).
Association("Images").
Append(img).Error; err != nil {
tx.Rollback()
return fmt.Errorf("unable to save img-tag: %s", err)
}
if err := tx.Model(&t).
Association("Chats").
Append(&ch).Error; err != nil {
tx.Rollback()
return fmt.Errorf("unable to save img-tag: %s", err)
}
}
return tx.Commit().Error
}
Update GetImage
package models
import (
"fmt"
"github.com/jinzhu/gorm"
"net/url"
"time"
)
type Image struct {
gorm.Model
Src string `json:"src"`
Tags []Tag `gorm:"many2many:images_tags"`
Date time.Time `gorm:"not null" json:"date"`
ChatID int64 `gorm:"not null" json:"-"`
Chat Chat `gorm:"ForeignKey:ChatID;AssociationForeignKey:TGID"`
}
func (img *Image) GetImgByParams(db *gorm.DB, params url.Values, user *User) ([]Image, error) {
img_slice := []Image{}
q_tmp := db.Model(&Image{}).
Preload("Tags").
Preload("Chat").
Where("chat_id IN ?", user.Chats)
chat_id, ok := params["chat_id"]
if ok {
q_tmp = q_tmp.Where("chat_id = ?", chat_id[0])
}
date_from, ok := params["date_to"]
if ok {
q_tmp = q_tmp.Where("date > ?", date_from[0])
}
date_to, ok := params["date_to"]
if ok {
q_tmp = q_tmp.Where("date < ?", date_to[0])
}
if q_tmp.Find(&img_slice).RecordNotFound() {
return nil, db.Error
}
if err := db.Error; err != nil {
return nil, err
}
return img_slice, nil
}
func (img *Image) CreateImageWithTags(db *gorm.DB, ts []string) error {
var tags []Tag
for _, t := range ts {
tags = append(tags, Tag{Name: t})
}
ch := Chat{
TGID: img.ChatID,
Tags: tags,
}
tx := db.Begin()
if err := tx.Create(img).Error; err != nil {
tx.Rollback()
return fmt.Errorf("unable to save image: %s", err)
}
for _, t := range tags {
if err := t.CreateIfUnique(tx); err != nil {
tx.Rollback()
return fmt.Errorf("unable to save tag: %s", err)
}
if err := tx.Model(&t).
Association("Images").
Append(img).Error; err != nil {
tx.Rollback()
return fmt.Errorf("unable to save img-tag: %s", err)
}
if err := tx.Model(&t).
Association("Chats").
Append(&ch).Error; err != nil {
tx.Rollback()
return fmt.Errorf("unable to save img-tag: %s", err)
}
}
return tx.Commit().Error
}
|
// Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"sort"
"strings"
"time"
"unicode/utf8"
"github.com/Unknwon/cae/zip"
"github.com/Unknwon/com"
"github.com/gogits/git"
"github.com/gogits/gogs/modules/base"
"github.com/gogits/gogs/modules/bin"
"github.com/gogits/gogs/modules/log"
"github.com/gogits/gogs/modules/process"
"github.com/gogits/gogs/modules/setting"
)
const (
TPL_UPDATE_HOOK = "#!/usr/bin/env %s\n%s update $1 $2 $3\n"
)
var (
ErrRepoAlreadyExist = errors.New("Repository already exist")
ErrRepoNotExist = errors.New("Repository does not exist")
ErrRepoFileNotExist = errors.New("Repository file does not exist")
ErrRepoNameIllegal = errors.New("Repository name contains illegal characters")
ErrRepoFileNotLoaded = errors.New("Repository file not loaded")
ErrMirrorNotExist = errors.New("Mirror does not exist")
)
var (
LanguageIgns, Licenses []string
)
// getAssetList returns corresponding asset list in 'conf'.
func getAssetList(prefix string) []string {
assets := make([]string, 0, 15)
for _, name := range bin.AssetNames() {
if strings.HasPrefix(name, prefix) {
assets = append(assets, strings.TrimPrefix(name, prefix+"/"))
}
}
return assets
}
func LoadRepoConfig() {
// Load .gitignore and license files.
types := []string{"gitignore", "license"}
typeFiles := make([][]string, 2)
for i, t := range types {
files := getAssetList(path.Join("conf", t))
customPath := path.Join(setting.CustomPath, "conf", t)
if com.IsDir(customPath) {
customFiles, err := com.StatDir(customPath)
if err != nil {
log.Fatal("Fail to get custom %s files: %v", t, err)
}
for _, f := range customFiles {
if !com.IsSliceContainsStr(files, f) {
files = append(files, f)
}
}
}
typeFiles[i] = files
}
LanguageIgns = typeFiles[0]
Licenses = typeFiles[1]
sort.Strings(LanguageIgns)
sort.Strings(Licenses)
}
func NewRepoContext() {
zip.Verbose = false
// Check if server has basic git setting.
stdout, stderr, err := process.Exec("NewRepoContext(get setting)", "git", "config", "--get", "user.name")
if strings.Contains(stderr, "fatal:") {
log.Fatal("repo.NewRepoContext(fail to get git user.name): %s", stderr)
} else if err != nil || len(strings.TrimSpace(stdout)) == 0 {
if _, stderr, err = process.Exec("NewRepoContext(set email)", "git", "config", "--global", "user.email", "gogitservice@gmail.com"); err != nil {
log.Fatal("repo.NewRepoContext(fail to set git user.email): %s", stderr)
} else if _, stderr, err = process.Exec("NewRepoContext(set name)", "git", "config", "--global", "user.name", "Gogs"); err != nil {
log.Fatal("repo.NewRepoContext(fail to set git user.name): %s", stderr)
}
}
barePath := path.Join(setting.RepoRootPath, "git-bare.zip")
if !com.IsExist(barePath) {
data, err := bin.Asset("conf/content/git-bare.zip")
if err != nil {
log.Fatal("Fail to get asset 'git-bare.zip': %v", err)
} else if err := ioutil.WriteFile(barePath, data, os.ModePerm); err != nil {
log.Fatal("Fail to write asset 'git-bare.zip': %v", err)
}
}
}
// Repository represents a git repository.
type Repository struct {
Id int64
OwnerId int64 `xorm:"UNIQUE(s)"`
Owner *User `xorm:"-"`
ForkId int64
LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
Name string `xorm:"INDEX NOT NULL"`
Description string
Website string
NumWatches int
NumStars int
NumForks int
NumIssues int
NumClosedIssues int
NumOpenIssues int `xorm:"-"`
NumMilestones int `xorm:"NOT NULL DEFAULT 0"`
NumClosedMilestones int `xorm:"NOT NULL DEFAULT 0"`
NumOpenMilestones int `xorm:"-"`
NumTags int `xorm:"-"`
IsPrivate bool
IsMirror bool
IsBare bool
IsGoget bool
DefaultBranch string
Created time.Time `xorm:"CREATED"`
Updated time.Time `xorm:"UPDATED"`
}
func (repo *Repository) GetOwner() (err error) {
repo.Owner, err = GetUserById(repo.OwnerId)
return err
}
// IsRepositoryExist returns true if the repository with given name under user has already existed.
func IsRepositoryExist(u *User, repoName string) (bool, error) {
repo := Repository{OwnerId: u.Id}
has, err := x.Where("lower_name = ?", strings.ToLower(repoName)).Get(&repo)
if err != nil {
return has, err
} else if !has {
return false, nil
}
return com.IsDir(RepoPath(u.Name, repoName)), nil
}
var (
illegalEquals = []string{"debug", "raw", "install", "api", "avatar", "user", "org", "help", "stars", "issues", "pulls", "commits", "repo", "template", "admin", "new"}
illegalSuffixs = []string{".git"}
)
// IsLegalName returns false if name contains illegal characters.
func IsLegalName(repoName string) bool {
repoName = strings.ToLower(repoName)
for _, char := range illegalEquals {
if repoName == char {
return false
}
}
for _, char := range illegalSuffixs {
if strings.HasSuffix(repoName, char) {
return false
}
}
return true
}
// Mirror represents a mirror information of repository.
type Mirror struct {
Id int64
RepoId int64
RepoName string // <user name>/<repo name>
Interval int // Hour.
Updated time.Time `xorm:"UPDATED"`
NextUpdate time.Time
}
// MirrorRepository creates a mirror repository from source.
func MirrorRepository(repoId int64, userName, repoName, repoPath, url string) error {
// TODO: need timeout.
_, stderr, err := process.Exec(fmt.Sprintf("MirrorRepository: %s/%s", userName, repoName),
"git", "clone", "--mirror", url, repoPath)
if err != nil {
return errors.New("git clone --mirror: " + stderr)
}
if _, err = x.InsertOne(&Mirror{
RepoId: repoId,
RepoName: strings.ToLower(userName + "/" + repoName),
Interval: 24,
NextUpdate: time.Now().Add(24 * time.Hour),
}); err != nil {
return err
}
return git.UnpackRefs(repoPath)
}
func GetMirror(repoId int64) (*Mirror, error) {
m := &Mirror{RepoId: repoId}
has, err := x.Get(m)
if err != nil {
return nil, err
} else if !has {
return nil, ErrMirrorNotExist
}
return m, nil
}
func UpdateMirror(m *Mirror) error {
_, err := x.Id(m.Id).Update(m)
return err
}
// MirrorUpdate checks and updates mirror repositories.
func MirrorUpdate() {
if err := x.Iterate(new(Mirror), func(idx int, bean interface{}) error {
m := bean.(*Mirror)
if m.NextUpdate.After(time.Now()) {
return nil
}
// TODO: need timeout.
repoPath := filepath.Join(setting.RepoRootPath, m.RepoName+".git")
if _, stderr, err := process.ExecDir(
repoPath, fmt.Sprintf("MirrorUpdate: %s", repoPath),
"git", "remote", "update"); err != nil {
return errors.New("git remote update: " + stderr)
} else if err = git.UnpackRefs(repoPath); err != nil {
return errors.New("UnpackRefs: " + err.Error())
}
m.NextUpdate = time.Now().Add(time.Duration(m.Interval) * time.Hour)
return UpdateMirror(m)
}); err != nil {
log.Error("repo.MirrorUpdate: %v", err)
}
}
// MigrateRepository migrates a existing repository from other project hosting.
func MigrateRepository(u *User, name, desc string, private, mirror bool, url string) (*Repository, error) {
repo, err := CreateRepository(u, name, desc, "", "", private, mirror, false)
if err != nil {
return nil, err
}
// Clone to temprory path and do the init commit.
tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("%d", time.Now().Nanosecond()))
os.MkdirAll(tmpDir, os.ModePerm)
repoPath := RepoPath(u.Name, name)
repo.IsBare = false
if mirror {
if err = MirrorRepository(repo.Id, u.Name, repo.Name, repoPath, url); err != nil {
return repo, err
}
repo.IsMirror = true
return repo, UpdateRepository(repo)
}
// TODO: need timeout.
// Clone from local repository.
_, stderr, err := process.Exec(
fmt.Sprintf("MigrateRepository(git clone): %s", repoPath),
"git", "clone", repoPath, tmpDir)
if err != nil {
return repo, errors.New("git clone: " + stderr)
}
// TODO: need timeout.
// Pull data from source.
if _, stderr, err = process.ExecDir(
tmpDir, fmt.Sprintf("MigrateRepository(git pull): %s", repoPath),
"git", "pull", url); err != nil {
return repo, errors.New("git pull: " + stderr)
}
// TODO: need timeout.
// Push data to local repository.
if _, stderr, err = process.ExecDir(
tmpDir, fmt.Sprintf("MigrateRepository(git push): %s", repoPath),
"git", "push", "origin", "master"); err != nil {
return repo, errors.New("git push: " + stderr)
}
return repo, UpdateRepository(repo)
}
// extractGitBareZip extracts git-bare.zip to repository path.
func extractGitBareZip(repoPath string) error {
z, err := zip.Open(filepath.Join(setting.RepoRootPath, "git-bare.zip"))
if err != nil {
return err
}
defer z.Close()
return z.ExtractTo(repoPath)
}
// initRepoCommit temporarily changes with work directory.
func initRepoCommit(tmpPath string, sig *git.Signature) (err error) {
var stderr string
if _, stderr, err = process.ExecDir(
tmpPath, fmt.Sprintf("initRepoCommit(git add): %s", tmpPath),
"git", "add", "--all"); err != nil {
return errors.New("git add: " + stderr)
}
if _, stderr, err = process.ExecDir(
tmpPath, fmt.Sprintf("initRepoCommit(git commit): %s", tmpPath),
"git", "commit", fmt.Sprintf("--author='%s <%s>'", sig.Name, sig.Email),
"-m", "Init commit"); err != nil {
return errors.New("git commit: " + stderr)
}
if _, stderr, err = process.ExecDir(
tmpPath, fmt.Sprintf("initRepoCommit(git push): %s", tmpPath),
"git", "push", "origin", "master"); err != nil {
return errors.New("git push: " + stderr)
}
return nil
}
func createHookUpdate(hookPath, content string) error {
pu, err := os.OpenFile(hookPath, os.O_CREATE|os.O_WRONLY, 0777)
if err != nil {
return err
}
defer pu.Close()
_, err = pu.WriteString(content)
return err
}
// SetRepoEnvs sets environment variables for command update.
func SetRepoEnvs(userId int64, userName, repoName, repoUserName string) {
os.Setenv("userId", base.ToStr(userId))
os.Setenv("userName", userName)
os.Setenv("repoName", repoName)
os.Setenv("repoUserName", repoUserName)
}
// InitRepository initializes README and .gitignore if needed.
func initRepository(f string, user *User, repo *Repository, initReadme bool, repoLang, license string) error {
repoPath := RepoPath(user.Name, repo.Name)
// Create bare new repository.
if err := extractGitBareZip(repoPath); err != nil {
return err
}
rp := strings.NewReplacer("\\", "/", " ", "\\ ")
// hook/post-update
if err := createHookUpdate(filepath.Join(repoPath, "hooks", "update"),
fmt.Sprintf(TPL_UPDATE_HOOK, setting.ScriptType,
rp.Replace(appPath))); err != nil {
return err
}
// Initialize repository according to user's choice.
fileName := map[string]string{}
if initReadme {
fileName["readme"] = "README.md"
}
if repoLang != "" {
fileName["gitign"] = ".gitignore"
}
if license != "" {
fileName["license"] = "LICENSE"
}
// Clone to temprory path and do the init commit.
tmpDir := filepath.Join(os.TempDir(), base.ToStr(time.Now().Nanosecond()))
os.MkdirAll(tmpDir, os.ModePerm)
_, stderr, err := process.Exec(
fmt.Sprintf("initRepository(git clone): %s", repoPath),
"git", "clone", repoPath, tmpDir)
if err != nil {
return errors.New("initRepository(git clone): " + stderr)
}
// README
if initReadme {
defaultReadme := repo.Name + "\n" + strings.Repeat("=",
utf8.RuneCountInString(repo.Name)) + "\n\n" + repo.Description
if err := ioutil.WriteFile(filepath.Join(tmpDir, fileName["readme"]),
[]byte(defaultReadme), 0644); err != nil {
return err
}
}
// .gitignore
if repoLang != "" {
filePath := "conf/gitignore/" + repoLang
targetPath := path.Join(tmpDir, fileName["gitign"])
data, err := bin.Asset(filePath)
if err == nil {
if err = ioutil.WriteFile(targetPath, data, os.ModePerm); err != nil {
return err
}
} else {
// Check custom files.
filePath = path.Join(setting.CustomPath, "conf/gitignore", repoLang)
if com.IsFile(filePath) {
if err := com.Copy(filePath, targetPath); err != nil {
return err
}
}
}
}
// LICENSE
if license != "" {
filePath := "conf/license/" + license
targetPath := path.Join(tmpDir, fileName["license"])
data, err := bin.Asset(filePath)
if err == nil {
if err = ioutil.WriteFile(targetPath, data, os.ModePerm); err != nil {
return err
}
} else {
// Check custom files.
filePath = path.Join(setting.CustomPath, "conf/license", license)
if com.IsFile(filePath) {
if err := com.Copy(filePath, targetPath); err != nil {
return err
}
}
}
}
if len(fileName) == 0 {
return nil
}
SetRepoEnvs(user.Id, user.Name, repo.Name, user.Name)
// Apply changes and commit.
return initRepoCommit(tmpDir, user.NewGitSig())
}
// CreateRepository creates a repository for given user or organization.
func CreateRepository(u *User, name, desc, lang, license string, private, mirror, initReadme bool) (*Repository, error) {
if !IsLegalName(name) {
return nil, ErrRepoNameIllegal
}
isExist, err := IsRepositoryExist(u, name)
if err != nil {
return nil, err
} else if isExist {
return nil, ErrRepoAlreadyExist
}
sess := x.NewSession()
defer sess.Close()
if err = sess.Begin(); err != nil {
return nil, err
}
repo := &Repository{
OwnerId: u.Id,
Owner: u,
Name: name,
LowerName: strings.ToLower(name),
Description: desc,
IsPrivate: private,
IsBare: lang == "" && license == "" && !initReadme,
}
if !repo.IsBare {
repo.DefaultBranch = "master"
}
if _, err = sess.Insert(repo); err != nil {
sess.Rollback()
return nil, err
}
var t *Team // Owner team.
mode := WRITABLE
if mirror {
mode = READABLE
}
access := &Access{
UserName: u.LowerName,
RepoName: strings.ToLower(path.Join(u.Name, repo.Name)),
Mode: mode,
}
// Give access to all members in owner team.
if u.IsOrganization() {
t, err = u.GetOwnerTeam()
if err != nil {
sess.Rollback()
return nil, err
}
us, err := GetTeamMembers(u.Id, t.Id)
if err != nil {
sess.Rollback()
return nil, err
}
for _, u := range us {
access.UserName = u.LowerName
if _, err = sess.Insert(access); err != nil {
sess.Rollback()
return nil, err
}
}
} else {
if _, err = sess.Insert(access); err != nil {
sess.Rollback()
return nil, err
}
}
rawSql := "UPDATE `user` SET num_repos = num_repos + 1 WHERE id = ?"
if _, err = sess.Exec(rawSql, u.Id); err != nil {
sess.Rollback()
return nil, err
}
// Update owner team info and count.
if u.IsOrganization() {
t.RepoIds += "$" + base.ToStr(repo.Id) + "|"
t.NumRepos++
if _, err = sess.Id(t.Id).AllCols().Update(t); err != nil {
sess.Rollback()
return nil, err
}
}
if err = sess.Commit(); err != nil {
return nil, err
}
if u.IsOrganization() {
ous, err := GetOrgUsersByOrgId(u.Id)
if err != nil {
log.Error("repo.CreateRepository(GetOrgUsersByOrgId): %v", err)
} else {
for _, ou := range ous {
if err = WatchRepo(ou.Uid, repo.Id, true); err != nil {
log.Error("repo.CreateRepository(WatchRepo): %v", err)
}
}
}
}
if err = WatchRepo(u.Id, repo.Id, true); err != nil {
log.Error("repo.CreateRepository(WatchRepo2): %v", err)
}
if err = NewRepoAction(u, repo); err != nil {
log.Error("repo.CreateRepository(NewRepoAction): %v", err)
}
// No need for init for mirror.
if mirror {
return repo, nil
}
repoPath := RepoPath(u.Name, repo.Name)
if err = initRepository(repoPath, u, repo, initReadme, lang, license); err != nil {
if err2 := os.RemoveAll(repoPath); err2 != nil {
log.Error("repo.CreateRepository(initRepository): %v", err)
return nil, errors.New(fmt.Sprintf(
"delete repo directory %s/%s failed(2): %v", u.Name, repo.Name, err2))
}
return nil, err
}
_, stderr, err := process.ExecDir(
repoPath, fmt.Sprintf("CreateRepository(git update-server-info): %s", repoPath),
"git", "update-server-info")
if err != nil {
return nil, errors.New("CreateRepository(git update-server-info): " + stderr)
}
return repo, nil
}
// GetRepositoriesWithUsers returns given number of repository objects with offset.
// It also auto-gets corresponding users.
func GetRepositoriesWithUsers(num, offset int) ([]*Repository, error) {
repos := make([]*Repository, 0, num)
if err := x.Limit(num, offset).Asc("id").Find(&repos); err != nil {
return nil, err
}
for _, repo := range repos {
repo.Owner = &User{Id: repo.OwnerId}
has, err := x.Get(repo.Owner)
if err != nil {
return nil, err
} else if !has {
return nil, ErrUserNotExist
}
}
return repos, nil
}
// RepoPath returns repository path by given user and repository name.
func RepoPath(userName, repoName string) string {
return filepath.Join(UserPath(userName), strings.ToLower(repoName)+".git")
}
// TransferOwnership transfers all corresponding setting from old user to new one.
func TransferOwnership(u *User, newOwner string, repo *Repository) (err error) {
newUser, err := GetUserByName(newOwner)
if err != nil {
return err
}
// Update accesses.
/*accesses := make([]Access, 0, 10)
if err = x.Find(&accesses, &Access{RepoName: u.LowerName + "/" + repo.LowerName}); err != nil {
return err
}*/
//fmt.Println("0")
sess := x.NewSession()
defer sess.Close()
if err = sess.Begin(); err != nil {
return err
}
access := &Access{
RepoName: newUser.LowerName + "/" + repo.LowerName,
UserName: newUser.LowerName,
}
//fmt.Println("1")
sess.Where("repo_name = ?", u.LowerName+"/"+repo.LowerName)
_, err = sess.And("user_name = ?", u.LowerName).Update(&Access{UserName: newUser.LowerName})
if err != nil {
sess.Rollback()
return err
}
//fmt.Println("2")
_, err = sess.Where("repo_name = ?", u.LowerName+"/"+repo.LowerName).Update(access)
if err != nil {
sess.Rollback()
return err
}
/*
for i := range accesses {
accesses[i].RepoName = newUser.LowerName + "/" + repo.LowerName
if accesses[i].UserName == u.LowerName {
accesses[i].UserName = newUser.LowerName
}
if err = UpdateAccessWithSession(sess, &accesses[i]); err != nil {
return err
}
}*/
//fmt.Println("3")
// Update repository.
repo.OwnerId = newUser.Id
if _, err := sess.Id(repo.Id).Update(repo); err != nil {
sess.Rollback()
return err
}
//fmt.Println("4")
// Update user repository number.
rawSql := "UPDATE `user` SET num_repos = num_repos + 1 WHERE id = ?"
if _, err = sess.Exec(rawSql, newUser.Id); err != nil {
sess.Rollback()
return err
}
//fmt.Println("5")
rawSql = "UPDATE `user` SET num_repos = num_repos - 1 WHERE id = ?"
if _, err = sess.Exec(rawSql, u.Id); err != nil {
sess.Rollback()
return err
}
//fmt.Println("6")
// Add watch of new owner to repository.
if !IsWatching(newUser.Id, repo.Id) {
if err = WatchRepo(newUser.Id, repo.Id, true); err != nil {
sess.Rollback()
return err
}
}
//fmt.Println("7")
if err = TransferRepoAction(u, newUser, repo); err != nil {
sess.Rollback()
return err
}
//fmt.Println("8")
// Change repository directory name.
if err = os.Rename(RepoPath(u.Name, repo.Name), RepoPath(newUser.Name, repo.Name)); err != nil {
sess.Rollback()
return err
}
//fmt.Println("9")
return sess.Commit()
}
// ChangeRepositoryName changes all corresponding setting from old repository name to new one.
func ChangeRepositoryName(userName, oldRepoName, newRepoName string) (err error) {
// Update accesses.
accesses := make([]Access, 0, 10)
if err = x.Find(&accesses, &Access{RepoName: strings.ToLower(userName + "/" + oldRepoName)}); err != nil {
return err
}
sess := x.NewSession()
defer sess.Close()
if err = sess.Begin(); err != nil {
return err
}
for i := range accesses {
accesses[i].RepoName = userName + "/" + newRepoName
if err = UpdateAccessWithSession(sess, &accesses[i]); err != nil {
return err
}
}
// Change repository directory name.
if err = os.Rename(RepoPath(userName, oldRepoName), RepoPath(userName, newRepoName)); err != nil {
sess.Rollback()
return err
}
return sess.Commit()
}
func UpdateRepository(repo *Repository) error {
repo.LowerName = strings.ToLower(repo.Name)
if len(repo.Description) > 255 {
repo.Description = repo.Description[:255]
}
if len(repo.Website) > 255 {
repo.Website = repo.Website[:255]
}
_, err := x.Id(repo.Id).AllCols().Update(repo)
return err
}
// DeleteRepository deletes a repository for a user or orgnaztion.
func DeleteRepository(userId, repoId int64, userName string) error {
repo := &Repository{Id: repoId, OwnerId: userId}
has, err := x.Get(repo)
if err != nil {
return err
} else if !has {
return ErrRepoNotExist
}
sess := x.NewSession()
defer sess.Close()
if err = sess.Begin(); err != nil {
return err
}
if _, err = sess.Delete(&Repository{Id: repoId}); err != nil {
sess.Rollback()
return err
}
if _, err := sess.Delete(&Access{RepoName: strings.ToLower(path.Join(userName, repo.Name))}); err != nil {
sess.Rollback()
return err
}
if _, err := sess.Delete(&Action{RepoId: repo.Id}); err != nil {
sess.Rollback()
return err
}
if _, err = sess.Delete(&Watch{RepoId: repoId}); err != nil {
sess.Rollback()
return err
}
if _, err = sess.Delete(&Mirror{RepoId: repoId}); err != nil {
sess.Rollback()
return err
}
if _, err = sess.Delete(&IssueUser{RepoId: repoId}); err != nil {
sess.Rollback()
return err
}
if _, err = sess.Delete(&Milestone{RepoId: repoId}); err != nil {
sess.Rollback()
return err
}
if _, err = sess.Delete(&Release{RepoId: repoId}); err != nil {
sess.Rollback()
return err
}
// Delete comments.
if err = x.Iterate(&Issue{RepoId: repoId}, func(idx int, bean interface{}) error {
issue := bean.(*Issue)
if _, err = sess.Delete(&Comment{IssueId: issue.Id}); err != nil {
sess.Rollback()
return err
}
return nil
}); err != nil {
sess.Rollback()
return err
}
if _, err = sess.Delete(&Issue{RepoId: repoId}); err != nil {
sess.Rollback()
return err
}
rawSql := "UPDATE `user` SET num_repos = num_repos - 1 WHERE id = ?"
if _, err = sess.Exec(rawSql, userId); err != nil {
sess.Rollback()
return err
}
if err = os.RemoveAll(RepoPath(userName, repo.Name)); err != nil {
sess.Rollback()
return err
}
return sess.Commit()
}
// GetRepositoryByName returns the repository by given name under user if exists.
func GetRepositoryByName(userId int64, repoName string) (*Repository, error) {
repo := &Repository{
OwnerId: userId,
LowerName: strings.ToLower(repoName),
}
has, err := x.Get(repo)
if err != nil {
return nil, err
} else if !has {
return nil, ErrRepoNotExist
}
return repo, err
}
// GetRepositoryById returns the repository by given id if exists.
func GetRepositoryById(id int64) (*Repository, error) {
repo := &Repository{}
has, err := x.Id(id).Get(repo)
if err != nil {
return nil, err
} else if !has {
return nil, ErrRepoNotExist
}
return repo, nil
}
// GetRepositories returns a list of repositories of given user.
func GetRepositories(uid int64, private bool) ([]*Repository, error) {
repos := make([]*Repository, 0, 10)
sess := x.Desc("updated")
if !private {
sess.Where("is_private=?", false)
}
err := sess.Find(&repos, &Repository{OwnerId: uid})
return repos, err
}
// GetRecentUpdatedRepositories returns the list of repositories that are recently updated.
func GetRecentUpdatedRepositories() (repos []*Repository, err error) {
err = x.Where("is_private=?", false).Limit(5).Desc("updated").Find(&repos)
return repos, err
}
// GetRepositoryCount returns the total number of repositories of user.
func GetRepositoryCount(user *User) (int64, error) {
return x.Count(&Repository{OwnerId: user.Id})
}
// GetCollaboratorNames returns a list of user name of repository's collaborators.
func GetCollaboratorNames(repoName string) ([]string, error) {
accesses := make([]*Access, 0, 10)
if err := x.Find(&accesses, &Access{RepoName: strings.ToLower(repoName)}); err != nil {
return nil, err
}
names := make([]string, len(accesses))
for i := range accesses {
names[i] = accesses[i].UserName
}
return names, nil
}
// GetCollaborativeRepos returns a list of repositories that user is collaborator.
func GetCollaborativeRepos(uname string) ([]*Repository, error) {
uname = strings.ToLower(uname)
accesses := make([]*Access, 0, 10)
if err := x.Find(&accesses, &Access{UserName: uname}); err != nil {
return nil, err
}
repos := make([]*Repository, 0, 10)
for _, access := range accesses {
infos := strings.Split(access.RepoName, "/")
if infos[0] == uname {
continue
}
u, err := GetUserByName(infos[0])
if err != nil {
return nil, err
}
repo, err := GetRepositoryByName(u.Id, infos[1])
if err != nil {
return nil, err
}
repo.Owner = u
repos = append(repos, repo)
}
return repos, nil
}
// GetCollaborators returns a list of users of repository's collaborators.
func GetCollaborators(repoName string) (us []*User, err error) {
accesses := make([]*Access, 0, 10)
if err = x.Find(&accesses, &Access{RepoName: strings.ToLower(repoName)}); err != nil {
return nil, err
}
us = make([]*User, len(accesses))
for i := range accesses {
us[i], err = GetUserByName(accesses[i].UserName)
if err != nil {
return nil, err
}
}
return us, nil
}
// Watch is connection request for receiving repository notifycation.
type Watch struct {
Id int64
UserId int64 `xorm:"UNIQUE(watch)"`
RepoId int64 `xorm:"UNIQUE(watch)"`
}
// Watch or unwatch repository.
func WatchRepo(uid, rid int64, watch bool) (err error) {
if watch {
if _, err = x.Insert(&Watch{RepoId: rid, UserId: uid}); err != nil {
return err
}
rawSql := "UPDATE `repository` SET num_watches = num_watches + 1 WHERE id = ?"
_, err = x.Exec(rawSql, rid)
} else {
if _, err = x.Delete(&Watch{0, uid, rid}); err != nil {
return err
}
rawSql := "UPDATE `repository` SET num_watches = num_watches - 1 WHERE id = ?"
_, err = x.Exec(rawSql, rid)
}
return err
}
// GetWatchers returns all watchers of given repository.
func GetWatchers(rid int64) ([]*Watch, error) {
watches := make([]*Watch, 0, 10)
err := x.Find(&watches, &Watch{RepoId: rid})
return watches, err
}
// NotifyWatchers creates batch of actions for every watcher.
func NotifyWatchers(act *Action) error {
// Add feeds for user self and all watchers.
watches, err := GetWatchers(act.RepoId)
if err != nil {
return errors.New("repo.NotifyWatchers(get watches): " + err.Error())
}
// Add feed for actioner.
act.UserId = act.ActUserId
if _, err = x.InsertOne(act); err != nil {
return errors.New("repo.NotifyWatchers(create action): " + err.Error())
}
for i := range watches {
if act.ActUserId == watches[i].UserId {
continue
}
act.Id = 0
act.UserId = watches[i].UserId
if _, err = x.InsertOne(act); err != nil {
return errors.New("repo.NotifyWatchers(create action): " + err.Error())
}
}
return nil
}
// IsWatching checks if user has watched given repository.
func IsWatching(uid, rid int64) bool {
has, _ := x.Get(&Watch{0, uid, rid})
return has
}
func ForkRepository(repoName string, uid int64) {
}
remove debug info
// Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"sort"
"strings"
"time"
"unicode/utf8"
"github.com/Unknwon/cae/zip"
"github.com/Unknwon/com"
"github.com/gogits/git"
"github.com/gogits/gogs/modules/base"
"github.com/gogits/gogs/modules/bin"
"github.com/gogits/gogs/modules/log"
"github.com/gogits/gogs/modules/process"
"github.com/gogits/gogs/modules/setting"
)
const (
TPL_UPDATE_HOOK = "#!/usr/bin/env %s\n%s update $1 $2 $3\n"
)
var (
ErrRepoAlreadyExist = errors.New("Repository already exist")
ErrRepoNotExist = errors.New("Repository does not exist")
ErrRepoFileNotExist = errors.New("Repository file does not exist")
ErrRepoNameIllegal = errors.New("Repository name contains illegal characters")
ErrRepoFileNotLoaded = errors.New("Repository file not loaded")
ErrMirrorNotExist = errors.New("Mirror does not exist")
)
var (
LanguageIgns, Licenses []string
)
// getAssetList returns corresponding asset list in 'conf'.
func getAssetList(prefix string) []string {
assets := make([]string, 0, 15)
for _, name := range bin.AssetNames() {
if strings.HasPrefix(name, prefix) {
assets = append(assets, strings.TrimPrefix(name, prefix+"/"))
}
}
return assets
}
func LoadRepoConfig() {
// Load .gitignore and license files.
types := []string{"gitignore", "license"}
typeFiles := make([][]string, 2)
for i, t := range types {
files := getAssetList(path.Join("conf", t))
customPath := path.Join(setting.CustomPath, "conf", t)
if com.IsDir(customPath) {
customFiles, err := com.StatDir(customPath)
if err != nil {
log.Fatal("Fail to get custom %s files: %v", t, err)
}
for _, f := range customFiles {
if !com.IsSliceContainsStr(files, f) {
files = append(files, f)
}
}
}
typeFiles[i] = files
}
LanguageIgns = typeFiles[0]
Licenses = typeFiles[1]
sort.Strings(LanguageIgns)
sort.Strings(Licenses)
}
func NewRepoContext() {
zip.Verbose = false
// Check if server has basic git setting.
stdout, stderr, err := process.Exec("NewRepoContext(get setting)", "git", "config", "--get", "user.name")
if strings.Contains(stderr, "fatal:") {
log.Fatal("repo.NewRepoContext(fail to get git user.name): %s", stderr)
} else if err != nil || len(strings.TrimSpace(stdout)) == 0 {
if _, stderr, err = process.Exec("NewRepoContext(set email)", "git", "config", "--global", "user.email", "gogitservice@gmail.com"); err != nil {
log.Fatal("repo.NewRepoContext(fail to set git user.email): %s", stderr)
} else if _, stderr, err = process.Exec("NewRepoContext(set name)", "git", "config", "--global", "user.name", "Gogs"); err != nil {
log.Fatal("repo.NewRepoContext(fail to set git user.name): %s", stderr)
}
}
barePath := path.Join(setting.RepoRootPath, "git-bare.zip")
if !com.IsExist(barePath) {
data, err := bin.Asset("conf/content/git-bare.zip")
if err != nil {
log.Fatal("Fail to get asset 'git-bare.zip': %v", err)
} else if err := ioutil.WriteFile(barePath, data, os.ModePerm); err != nil {
log.Fatal("Fail to write asset 'git-bare.zip': %v", err)
}
}
}
// Repository represents a git repository.
type Repository struct {
Id int64
OwnerId int64 `xorm:"UNIQUE(s)"`
Owner *User `xorm:"-"`
ForkId int64
LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
Name string `xorm:"INDEX NOT NULL"`
Description string
Website string
NumWatches int
NumStars int
NumForks int
NumIssues int
NumClosedIssues int
NumOpenIssues int `xorm:"-"`
NumMilestones int `xorm:"NOT NULL DEFAULT 0"`
NumClosedMilestones int `xorm:"NOT NULL DEFAULT 0"`
NumOpenMilestones int `xorm:"-"`
NumTags int `xorm:"-"`
IsPrivate bool
IsMirror bool
IsBare bool
IsGoget bool
DefaultBranch string
Created time.Time `xorm:"CREATED"`
Updated time.Time `xorm:"UPDATED"`
}
func (repo *Repository) GetOwner() (err error) {
repo.Owner, err = GetUserById(repo.OwnerId)
return err
}
// IsRepositoryExist returns true if the repository with given name under user has already existed.
func IsRepositoryExist(u *User, repoName string) (bool, error) {
repo := Repository{OwnerId: u.Id}
has, err := x.Where("lower_name = ?", strings.ToLower(repoName)).Get(&repo)
if err != nil {
return has, err
} else if !has {
return false, nil
}
return com.IsDir(RepoPath(u.Name, repoName)), nil
}
var (
illegalEquals = []string{"debug", "raw", "install", "api", "avatar", "user", "org", "help", "stars", "issues", "pulls", "commits", "repo", "template", "admin", "new"}
illegalSuffixs = []string{".git"}
)
// IsLegalName returns false if name contains illegal characters.
func IsLegalName(repoName string) bool {
repoName = strings.ToLower(repoName)
for _, char := range illegalEquals {
if repoName == char {
return false
}
}
for _, char := range illegalSuffixs {
if strings.HasSuffix(repoName, char) {
return false
}
}
return true
}
// Mirror represents a mirror information of repository.
type Mirror struct {
Id int64
RepoId int64
RepoName string // <user name>/<repo name>
Interval int // Hour.
Updated time.Time `xorm:"UPDATED"`
NextUpdate time.Time
}
// MirrorRepository creates a mirror repository from source.
func MirrorRepository(repoId int64, userName, repoName, repoPath, url string) error {
// TODO: need timeout.
_, stderr, err := process.Exec(fmt.Sprintf("MirrorRepository: %s/%s", userName, repoName),
"git", "clone", "--mirror", url, repoPath)
if err != nil {
return errors.New("git clone --mirror: " + stderr)
}
if _, err = x.InsertOne(&Mirror{
RepoId: repoId,
RepoName: strings.ToLower(userName + "/" + repoName),
Interval: 24,
NextUpdate: time.Now().Add(24 * time.Hour),
}); err != nil {
return err
}
return git.UnpackRefs(repoPath)
}
func GetMirror(repoId int64) (*Mirror, error) {
m := &Mirror{RepoId: repoId}
has, err := x.Get(m)
if err != nil {
return nil, err
} else if !has {
return nil, ErrMirrorNotExist
}
return m, nil
}
func UpdateMirror(m *Mirror) error {
_, err := x.Id(m.Id).Update(m)
return err
}
// MirrorUpdate checks and updates mirror repositories.
func MirrorUpdate() {
if err := x.Iterate(new(Mirror), func(idx int, bean interface{}) error {
m := bean.(*Mirror)
if m.NextUpdate.After(time.Now()) {
return nil
}
// TODO: need timeout.
repoPath := filepath.Join(setting.RepoRootPath, m.RepoName+".git")
if _, stderr, err := process.ExecDir(
repoPath, fmt.Sprintf("MirrorUpdate: %s", repoPath),
"git", "remote", "update"); err != nil {
return errors.New("git remote update: " + stderr)
} else if err = git.UnpackRefs(repoPath); err != nil {
return errors.New("UnpackRefs: " + err.Error())
}
m.NextUpdate = time.Now().Add(time.Duration(m.Interval) * time.Hour)
return UpdateMirror(m)
}); err != nil {
log.Error("repo.MirrorUpdate: %v", err)
}
}
// MigrateRepository migrates a existing repository from other project hosting.
func MigrateRepository(u *User, name, desc string, private, mirror bool, url string) (*Repository, error) {
repo, err := CreateRepository(u, name, desc, "", "", private, mirror, false)
if err != nil {
return nil, err
}
// Clone to temprory path and do the init commit.
tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("%d", time.Now().Nanosecond()))
os.MkdirAll(tmpDir, os.ModePerm)
repoPath := RepoPath(u.Name, name)
repo.IsBare = false
if mirror {
if err = MirrorRepository(repo.Id, u.Name, repo.Name, repoPath, url); err != nil {
return repo, err
}
repo.IsMirror = true
return repo, UpdateRepository(repo)
}
// TODO: need timeout.
// Clone from local repository.
_, stderr, err := process.Exec(
fmt.Sprintf("MigrateRepository(git clone): %s", repoPath),
"git", "clone", repoPath, tmpDir)
if err != nil {
return repo, errors.New("git clone: " + stderr)
}
// TODO: need timeout.
// Pull data from source.
if _, stderr, err = process.ExecDir(
tmpDir, fmt.Sprintf("MigrateRepository(git pull): %s", repoPath),
"git", "pull", url); err != nil {
return repo, errors.New("git pull: " + stderr)
}
// TODO: need timeout.
// Push data to local repository.
if _, stderr, err = process.ExecDir(
tmpDir, fmt.Sprintf("MigrateRepository(git push): %s", repoPath),
"git", "push", "origin", "master"); err != nil {
return repo, errors.New("git push: " + stderr)
}
return repo, UpdateRepository(repo)
}
// extractGitBareZip extracts git-bare.zip to repository path.
func extractGitBareZip(repoPath string) error {
z, err := zip.Open(filepath.Join(setting.RepoRootPath, "git-bare.zip"))
if err != nil {
return err
}
defer z.Close()
return z.ExtractTo(repoPath)
}
// initRepoCommit temporarily changes with work directory.
func initRepoCommit(tmpPath string, sig *git.Signature) (err error) {
var stderr string
if _, stderr, err = process.ExecDir(
tmpPath, fmt.Sprintf("initRepoCommit(git add): %s", tmpPath),
"git", "add", "--all"); err != nil {
return errors.New("git add: " + stderr)
}
if _, stderr, err = process.ExecDir(
tmpPath, fmt.Sprintf("initRepoCommit(git commit): %s", tmpPath),
"git", "commit", fmt.Sprintf("--author='%s <%s>'", sig.Name, sig.Email),
"-m", "Init commit"); err != nil {
return errors.New("git commit: " + stderr)
}
if _, stderr, err = process.ExecDir(
tmpPath, fmt.Sprintf("initRepoCommit(git push): %s", tmpPath),
"git", "push", "origin", "master"); err != nil {
return errors.New("git push: " + stderr)
}
return nil
}
func createHookUpdate(hookPath, content string) error {
pu, err := os.OpenFile(hookPath, os.O_CREATE|os.O_WRONLY, 0777)
if err != nil {
return err
}
defer pu.Close()
_, err = pu.WriteString(content)
return err
}
// SetRepoEnvs sets environment variables for command update.
func SetRepoEnvs(userId int64, userName, repoName, repoUserName string) {
os.Setenv("userId", base.ToStr(userId))
os.Setenv("userName", userName)
os.Setenv("repoName", repoName)
os.Setenv("repoUserName", repoUserName)
}
// InitRepository initializes README and .gitignore if needed.
func initRepository(f string, user *User, repo *Repository, initReadme bool, repoLang, license string) error {
repoPath := RepoPath(user.Name, repo.Name)
// Create bare new repository.
if err := extractGitBareZip(repoPath); err != nil {
return err
}
rp := strings.NewReplacer("\\", "/", " ", "\\ ")
// hook/post-update
if err := createHookUpdate(filepath.Join(repoPath, "hooks", "update"),
fmt.Sprintf(TPL_UPDATE_HOOK, setting.ScriptType,
rp.Replace(appPath))); err != nil {
return err
}
// Initialize repository according to user's choice.
fileName := map[string]string{}
if initReadme {
fileName["readme"] = "README.md"
}
if repoLang != "" {
fileName["gitign"] = ".gitignore"
}
if license != "" {
fileName["license"] = "LICENSE"
}
// Clone to temprory path and do the init commit.
tmpDir := filepath.Join(os.TempDir(), base.ToStr(time.Now().Nanosecond()))
os.MkdirAll(tmpDir, os.ModePerm)
_, stderr, err := process.Exec(
fmt.Sprintf("initRepository(git clone): %s", repoPath),
"git", "clone", repoPath, tmpDir)
if err != nil {
return errors.New("initRepository(git clone): " + stderr)
}
// README
if initReadme {
defaultReadme := repo.Name + "\n" + strings.Repeat("=",
utf8.RuneCountInString(repo.Name)) + "\n\n" + repo.Description
if err := ioutil.WriteFile(filepath.Join(tmpDir, fileName["readme"]),
[]byte(defaultReadme), 0644); err != nil {
return err
}
}
// .gitignore
if repoLang != "" {
filePath := "conf/gitignore/" + repoLang
targetPath := path.Join(tmpDir, fileName["gitign"])
data, err := bin.Asset(filePath)
if err == nil {
if err = ioutil.WriteFile(targetPath, data, os.ModePerm); err != nil {
return err
}
} else {
// Check custom files.
filePath = path.Join(setting.CustomPath, "conf/gitignore", repoLang)
if com.IsFile(filePath) {
if err := com.Copy(filePath, targetPath); err != nil {
return err
}
}
}
}
// LICENSE
if license != "" {
filePath := "conf/license/" + license
targetPath := path.Join(tmpDir, fileName["license"])
data, err := bin.Asset(filePath)
if err == nil {
if err = ioutil.WriteFile(targetPath, data, os.ModePerm); err != nil {
return err
}
} else {
// Check custom files.
filePath = path.Join(setting.CustomPath, "conf/license", license)
if com.IsFile(filePath) {
if err := com.Copy(filePath, targetPath); err != nil {
return err
}
}
}
}
if len(fileName) == 0 {
return nil
}
SetRepoEnvs(user.Id, user.Name, repo.Name, user.Name)
// Apply changes and commit.
return initRepoCommit(tmpDir, user.NewGitSig())
}
// CreateRepository creates a repository for given user or organization.
func CreateRepository(u *User, name, desc, lang, license string, private, mirror, initReadme bool) (*Repository, error) {
if !IsLegalName(name) {
return nil, ErrRepoNameIllegal
}
isExist, err := IsRepositoryExist(u, name)
if err != nil {
return nil, err
} else if isExist {
return nil, ErrRepoAlreadyExist
}
sess := x.NewSession()
defer sess.Close()
if err = sess.Begin(); err != nil {
return nil, err
}
repo := &Repository{
OwnerId: u.Id,
Owner: u,
Name: name,
LowerName: strings.ToLower(name),
Description: desc,
IsPrivate: private,
IsBare: lang == "" && license == "" && !initReadme,
}
if !repo.IsBare {
repo.DefaultBranch = "master"
}
if _, err = sess.Insert(repo); err != nil {
sess.Rollback()
return nil, err
}
var t *Team // Owner team.
mode := WRITABLE
if mirror {
mode = READABLE
}
access := &Access{
UserName: u.LowerName,
RepoName: strings.ToLower(path.Join(u.Name, repo.Name)),
Mode: mode,
}
// Give access to all members in owner team.
if u.IsOrganization() {
t, err = u.GetOwnerTeam()
if err != nil {
sess.Rollback()
return nil, err
}
us, err := GetTeamMembers(u.Id, t.Id)
if err != nil {
sess.Rollback()
return nil, err
}
for _, u := range us {
access.UserName = u.LowerName
if _, err = sess.Insert(access); err != nil {
sess.Rollback()
return nil, err
}
}
} else {
if _, err = sess.Insert(access); err != nil {
sess.Rollback()
return nil, err
}
}
rawSql := "UPDATE `user` SET num_repos = num_repos + 1 WHERE id = ?"
if _, err = sess.Exec(rawSql, u.Id); err != nil {
sess.Rollback()
return nil, err
}
// Update owner team info and count.
if u.IsOrganization() {
t.RepoIds += "$" + base.ToStr(repo.Id) + "|"
t.NumRepos++
if _, err = sess.Id(t.Id).AllCols().Update(t); err != nil {
sess.Rollback()
return nil, err
}
}
if err = sess.Commit(); err != nil {
return nil, err
}
if u.IsOrganization() {
ous, err := GetOrgUsersByOrgId(u.Id)
if err != nil {
log.Error("repo.CreateRepository(GetOrgUsersByOrgId): %v", err)
} else {
for _, ou := range ous {
if err = WatchRepo(ou.Uid, repo.Id, true); err != nil {
log.Error("repo.CreateRepository(WatchRepo): %v", err)
}
}
}
}
if err = WatchRepo(u.Id, repo.Id, true); err != nil {
log.Error("repo.CreateRepository(WatchRepo2): %v", err)
}
if err = NewRepoAction(u, repo); err != nil {
log.Error("repo.CreateRepository(NewRepoAction): %v", err)
}
// No need for init for mirror.
if mirror {
return repo, nil
}
repoPath := RepoPath(u.Name, repo.Name)
if err = initRepository(repoPath, u, repo, initReadme, lang, license); err != nil {
if err2 := os.RemoveAll(repoPath); err2 != nil {
log.Error("repo.CreateRepository(initRepository): %v", err)
return nil, errors.New(fmt.Sprintf(
"delete repo directory %s/%s failed(2): %v", u.Name, repo.Name, err2))
}
return nil, err
}
_, stderr, err := process.ExecDir(
repoPath, fmt.Sprintf("CreateRepository(git update-server-info): %s", repoPath),
"git", "update-server-info")
if err != nil {
return nil, errors.New("CreateRepository(git update-server-info): " + stderr)
}
return repo, nil
}
// GetRepositoriesWithUsers returns given number of repository objects with offset.
// It also auto-gets corresponding users.
func GetRepositoriesWithUsers(num, offset int) ([]*Repository, error) {
repos := make([]*Repository, 0, num)
if err := x.Limit(num, offset).Asc("id").Find(&repos); err != nil {
return nil, err
}
for _, repo := range repos {
repo.Owner = &User{Id: repo.OwnerId}
has, err := x.Get(repo.Owner)
if err != nil {
return nil, err
} else if !has {
return nil, ErrUserNotExist
}
}
return repos, nil
}
// RepoPath returns repository path by given user and repository name.
func RepoPath(userName, repoName string) string {
return filepath.Join(UserPath(userName), strings.ToLower(repoName)+".git")
}
// TransferOwnership transfers all corresponding setting from old user to new one.
func TransferOwnership(u *User, newOwner string, repo *Repository) (err error) {
newUser, err := GetUserByName(newOwner)
if err != nil {
return err
}
// Update accesses.
/*accesses := make([]Access, 0, 10)
if err = x.Find(&accesses, &Access{RepoName: u.LowerName + "/" + repo.LowerName}); err != nil {
return err
}*/
sess := x.NewSession()
defer sess.Close()
if err = sess.Begin(); err != nil {
return err
}
access := &Access{
RepoName: newUser.LowerName + "/" + repo.LowerName,
}
sess.Where("repo_name = ?", u.LowerName+"/"+repo.LowerName)
_, err = sess.And("user_name = ?", u.LowerName).Update(&Access{UserName: newUser.LowerName})
if err != nil {
sess.Rollback()
return err
}
_, err = sess.Where("repo_name = ?", u.LowerName+"/"+repo.LowerName).Update(access)
if err != nil {
sess.Rollback()
return err
}
/*
for i := range accesses {
accesses[i].RepoName = newUser.LowerName + "/" + repo.LowerName
if accesses[i].UserName == u.LowerName {
accesses[i].UserName = newUser.LowerName
}
if err = UpdateAccessWithSession(sess, &accesses[i]); err != nil {
return err
}
}*/
// Update repository.
repo.OwnerId = newUser.Id
if _, err := sess.Id(repo.Id).Update(repo); err != nil {
sess.Rollback()
return err
}
// Update user repository number.
rawSql := "UPDATE `user` SET num_repos = num_repos + 1 WHERE id = ?"
if _, err = sess.Exec(rawSql, newUser.Id); err != nil {
sess.Rollback()
return err
}
rawSql = "UPDATE `user` SET num_repos = num_repos - 1 WHERE id = ?"
if _, err = sess.Exec(rawSql, u.Id); err != nil {
sess.Rollback()
return err
}
// Add watch of new owner to repository.
if !IsWatching(newUser.Id, repo.Id) {
if err = WatchRepo(newUser.Id, repo.Id, true); err != nil {
sess.Rollback()
return err
}
}
if err = TransferRepoAction(u, newUser, repo); err != nil {
sess.Rollback()
return err
}
// Change repository directory name.
if err = os.Rename(RepoPath(u.Name, repo.Name), RepoPath(newUser.Name, repo.Name)); err != nil {
sess.Rollback()
return err
}
return sess.Commit()
}
// ChangeRepositoryName changes all corresponding setting from old repository name to new one.
func ChangeRepositoryName(userName, oldRepoName, newRepoName string) (err error) {
// Update accesses.
accesses := make([]Access, 0, 10)
if err = x.Find(&accesses, &Access{RepoName: strings.ToLower(userName + "/" + oldRepoName)}); err != nil {
return err
}
sess := x.NewSession()
defer sess.Close()
if err = sess.Begin(); err != nil {
return err
}
for i := range accesses {
accesses[i].RepoName = userName + "/" + newRepoName
if err = UpdateAccessWithSession(sess, &accesses[i]); err != nil {
return err
}
}
// Change repository directory name.
if err = os.Rename(RepoPath(userName, oldRepoName), RepoPath(userName, newRepoName)); err != nil {
sess.Rollback()
return err
}
return sess.Commit()
}
func UpdateRepository(repo *Repository) error {
repo.LowerName = strings.ToLower(repo.Name)
if len(repo.Description) > 255 {
repo.Description = repo.Description[:255]
}
if len(repo.Website) > 255 {
repo.Website = repo.Website[:255]
}
_, err := x.Id(repo.Id).AllCols().Update(repo)
return err
}
// DeleteRepository deletes a repository for a user or orgnaztion.
func DeleteRepository(userId, repoId int64, userName string) error {
repo := &Repository{Id: repoId, OwnerId: userId}
has, err := x.Get(repo)
if err != nil {
return err
} else if !has {
return ErrRepoNotExist
}
sess := x.NewSession()
defer sess.Close()
if err = sess.Begin(); err != nil {
return err
}
if _, err = sess.Delete(&Repository{Id: repoId}); err != nil {
sess.Rollback()
return err
}
if _, err := sess.Delete(&Access{RepoName: strings.ToLower(path.Join(userName, repo.Name))}); err != nil {
sess.Rollback()
return err
}
if _, err := sess.Delete(&Action{RepoId: repo.Id}); err != nil {
sess.Rollback()
return err
}
if _, err = sess.Delete(&Watch{RepoId: repoId}); err != nil {
sess.Rollback()
return err
}
if _, err = sess.Delete(&Mirror{RepoId: repoId}); err != nil {
sess.Rollback()
return err
}
if _, err = sess.Delete(&IssueUser{RepoId: repoId}); err != nil {
sess.Rollback()
return err
}
if _, err = sess.Delete(&Milestone{RepoId: repoId}); err != nil {
sess.Rollback()
return err
}
if _, err = sess.Delete(&Release{RepoId: repoId}); err != nil {
sess.Rollback()
return err
}
// Delete comments.
if err = x.Iterate(&Issue{RepoId: repoId}, func(idx int, bean interface{}) error {
issue := bean.(*Issue)
if _, err = sess.Delete(&Comment{IssueId: issue.Id}); err != nil {
sess.Rollback()
return err
}
return nil
}); err != nil {
sess.Rollback()
return err
}
if _, err = sess.Delete(&Issue{RepoId: repoId}); err != nil {
sess.Rollback()
return err
}
rawSql := "UPDATE `user` SET num_repos = num_repos - 1 WHERE id = ?"
if _, err = sess.Exec(rawSql, userId); err != nil {
sess.Rollback()
return err
}
if err = os.RemoveAll(RepoPath(userName, repo.Name)); err != nil {
sess.Rollback()
return err
}
return sess.Commit()
}
// GetRepositoryByName returns the repository by given name under user if exists.
func GetRepositoryByName(userId int64, repoName string) (*Repository, error) {
repo := &Repository{
OwnerId: userId,
LowerName: strings.ToLower(repoName),
}
has, err := x.Get(repo)
if err != nil {
return nil, err
} else if !has {
return nil, ErrRepoNotExist
}
return repo, err
}
// GetRepositoryById returns the repository by given id if exists.
func GetRepositoryById(id int64) (*Repository, error) {
repo := &Repository{}
has, err := x.Id(id).Get(repo)
if err != nil {
return nil, err
} else if !has {
return nil, ErrRepoNotExist
}
return repo, nil
}
// GetRepositories returns a list of repositories of given user.
func GetRepositories(uid int64, private bool) ([]*Repository, error) {
repos := make([]*Repository, 0, 10)
sess := x.Desc("updated")
if !private {
sess.Where("is_private=?", false)
}
err := sess.Find(&repos, &Repository{OwnerId: uid})
return repos, err
}
// GetRecentUpdatedRepositories returns the list of repositories that are recently updated.
func GetRecentUpdatedRepositories() (repos []*Repository, err error) {
err = x.Where("is_private=?", false).Limit(5).Desc("updated").Find(&repos)
return repos, err
}
// GetRepositoryCount returns the total number of repositories of user.
func GetRepositoryCount(user *User) (int64, error) {
return x.Count(&Repository{OwnerId: user.Id})
}
// GetCollaboratorNames returns a list of user name of repository's collaborators.
func GetCollaboratorNames(repoName string) ([]string, error) {
accesses := make([]*Access, 0, 10)
if err := x.Find(&accesses, &Access{RepoName: strings.ToLower(repoName)}); err != nil {
return nil, err
}
names := make([]string, len(accesses))
for i := range accesses {
names[i] = accesses[i].UserName
}
return names, nil
}
// GetCollaborativeRepos returns a list of repositories that user is collaborator.
func GetCollaborativeRepos(uname string) ([]*Repository, error) {
uname = strings.ToLower(uname)
accesses := make([]*Access, 0, 10)
if err := x.Find(&accesses, &Access{UserName: uname}); err != nil {
return nil, err
}
repos := make([]*Repository, 0, 10)
for _, access := range accesses {
infos := strings.Split(access.RepoName, "/")
if infos[0] == uname {
continue
}
u, err := GetUserByName(infos[0])
if err != nil {
return nil, err
}
repo, err := GetRepositoryByName(u.Id, infos[1])
if err != nil {
return nil, err
}
repo.Owner = u
repos = append(repos, repo)
}
return repos, nil
}
// GetCollaborators returns a list of users of repository's collaborators.
func GetCollaborators(repoName string) (us []*User, err error) {
accesses := make([]*Access, 0, 10)
if err = x.Find(&accesses, &Access{RepoName: strings.ToLower(repoName)}); err != nil {
return nil, err
}
us = make([]*User, len(accesses))
for i := range accesses {
us[i], err = GetUserByName(accesses[i].UserName)
if err != nil {
return nil, err
}
}
return us, nil
}
// Watch is connection request for receiving repository notifycation.
type Watch struct {
Id int64
UserId int64 `xorm:"UNIQUE(watch)"`
RepoId int64 `xorm:"UNIQUE(watch)"`
}
// Watch or unwatch repository.
func WatchRepo(uid, rid int64, watch bool) (err error) {
if watch {
if _, err = x.Insert(&Watch{RepoId: rid, UserId: uid}); err != nil {
return err
}
rawSql := "UPDATE `repository` SET num_watches = num_watches + 1 WHERE id = ?"
_, err = x.Exec(rawSql, rid)
} else {
if _, err = x.Delete(&Watch{0, uid, rid}); err != nil {
return err
}
rawSql := "UPDATE `repository` SET num_watches = num_watches - 1 WHERE id = ?"
_, err = x.Exec(rawSql, rid)
}
return err
}
// GetWatchers returns all watchers of given repository.
func GetWatchers(rid int64) ([]*Watch, error) {
watches := make([]*Watch, 0, 10)
err := x.Find(&watches, &Watch{RepoId: rid})
return watches, err
}
// NotifyWatchers creates batch of actions for every watcher.
func NotifyWatchers(act *Action) error {
// Add feeds for user self and all watchers.
watches, err := GetWatchers(act.RepoId)
if err != nil {
return errors.New("repo.NotifyWatchers(get watches): " + err.Error())
}
// Add feed for actioner.
act.UserId = act.ActUserId
if _, err = x.InsertOne(act); err != nil {
return errors.New("repo.NotifyWatchers(create action): " + err.Error())
}
for i := range watches {
if act.ActUserId == watches[i].UserId {
continue
}
act.Id = 0
act.UserId = watches[i].UserId
if _, err = x.InsertOne(act); err != nil {
return errors.New("repo.NotifyWatchers(create action): " + err.Error())
}
}
return nil
}
// IsWatching checks if user has watched given repository.
func IsWatching(uid, rid int64) bool {
has, _ := x.Get(&Watch{0, uid, rid})
return has
}
func ForkRepository(repoName string, uid int64) {
}
|
// Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (
"bytes"
"errors"
"fmt"
"html/template"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"sort"
"strings"
"time"
"code.gitea.io/git"
"code.gitea.io/gitea/modules/bindata"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/markdown"
"code.gitea.io/gitea/modules/process"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/sync"
api "code.gitea.io/sdk/gitea"
"github.com/Unknwon/cae/zip"
"github.com/Unknwon/com"
"github.com/go-xorm/xorm"
version "github.com/mcuadros/go-version"
ini "gopkg.in/ini.v1"
"code.gitea.io/gitea/modules/scrub"
)
const (
tplUpdateHook = "#!/usr/bin/env %s\n%s update $1 $2 $3 --config='%s'\n\"%s/custom/bin/update\" $1 $2 $3 \"%s\"\n"
)
var repoWorkingPool = sync.NewExclusivePool()
var (
// ErrRepoFileNotExist repository file does not exist error
ErrRepoFileNotExist = errors.New("Repository file does not exist")
// ErrRepoFileNotLoaded repository file not loaded error
ErrRepoFileNotLoaded = errors.New("Repository file not loaded")
// ErrMirrorNotExist mirror does not exist error
ErrMirrorNotExist = errors.New("Mirror does not exist")
// ErrInvalidReference invalid reference specified error
ErrInvalidReference = errors.New("Invalid reference specified")
// ErrNameEmpty name is empty error
ErrNameEmpty = errors.New("Name is empty")
)
var (
// Gitignores contains the gitiginore files
Gitignores []string
// Licenses contains the license files
Licenses []string
// Readmes contains the readme files
Readmes []string
// LabelTemplates contains the label template files
LabelTemplates []string
// ItemsPerPage maximum items per page in forks, watchers and stars of a repo
ItemsPerPage = 40
)
// LoadRepoConfig loads the repository config
func LoadRepoConfig() {
// Load .gitignore and license files and readme templates.
types := []string{"gitignore", "license", "readme", "label"}
typeFiles := make([][]string, 4)
for i, t := range types {
files, err := bindata.AssetDir("conf/" + t)
if err != nil {
log.Fatal(4, "Fail to get %s files: %v", t, err)
}
customPath := path.Join(setting.CustomPath, "conf", t)
if com.IsDir(customPath) {
customFiles, err := com.StatDir(customPath)
if err != nil {
log.Fatal(4, "Fail to get custom %s files: %v", t, err)
}
for _, f := range customFiles {
if !com.IsSliceContainsStr(files, f) {
files = append(files, f)
}
}
}
typeFiles[i] = files
}
Gitignores = typeFiles[0]
Licenses = typeFiles[1]
Readmes = typeFiles[2]
LabelTemplates = typeFiles[3]
sort.Strings(Gitignores)
sort.Strings(Licenses)
sort.Strings(Readmes)
sort.Strings(LabelTemplates)
// Filter out invalid names and promote preferred licenses.
sortedLicenses := make([]string, 0, len(Licenses))
for _, name := range setting.Repository.PreferredLicenses {
if com.IsSliceContainsStr(Licenses, name) {
sortedLicenses = append(sortedLicenses, name)
}
}
for _, name := range Licenses {
if !com.IsSliceContainsStr(setting.Repository.PreferredLicenses, name) {
sortedLicenses = append(sortedLicenses, name)
}
}
Licenses = sortedLicenses
}
// NewRepoContext creates a new repository context
func NewRepoContext() {
zip.Verbose = false
// Check Git installation.
if _, err := exec.LookPath("git"); err != nil {
log.Fatal(4, "Fail to test 'git' command: %v (forgotten install?)", err)
}
// Check Git version.
gitVer, err := git.BinVersion()
if err != nil {
log.Fatal(4, "Fail to get Git version: %v", err)
}
log.Info("Git Version: %s", gitVer)
if version.Compare("1.7.1", gitVer, ">") {
log.Fatal(4, "Gogs requires Git version greater or equal to 1.7.1")
}
// Git requires setting user.name and user.email in order to commit changes.
for configKey, defaultValue := range map[string]string{"user.name": "Gogs", "user.email": "gogs@fake.local"} {
if stdout, stderr, err := process.Exec("NewRepoContext(get setting)", "git", "config", "--get", configKey); err != nil || strings.TrimSpace(stdout) == "" {
// ExitError indicates this config is not set
if _, ok := err.(*exec.ExitError); ok || strings.TrimSpace(stdout) == "" {
if _, stderr, gerr := process.Exec("NewRepoContext(set "+configKey+")", "git", "config", "--global", configKey, defaultValue); gerr != nil {
log.Fatal(4, "Fail to set git %s(%s): %s", configKey, gerr, stderr)
}
log.Info("Git config %s set to %s", configKey, defaultValue)
} else {
log.Fatal(4, "Fail to get git %s(%s): %s", configKey, err, stderr)
}
}
}
// Set git some configurations.
if _, stderr, err := process.Exec("NewRepoContext(git config --global core.quotepath false)",
"git", "config", "--global", "core.quotepath", "false"); err != nil {
log.Fatal(4, "Fail to execute 'git config --global core.quotepath false': %s", stderr)
}
RemoveAllWithNotice("Clean up repository temporary data", filepath.Join(setting.AppDataPath, "tmp"))
}
// Repository represents a git repository.
type Repository struct {
ID int64 `xorm:"pk autoincr"`
OwnerID int64 `xorm:"UNIQUE(s)"`
Owner *User `xorm:"-"`
LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
Name string `xorm:"INDEX NOT NULL"`
Description string
Website string
DefaultBranch string
NumWatches int
NumStars int
NumForks int
NumIssues int
NumClosedIssues int
NumOpenIssues int `xorm:"-"`
NumPulls int
NumClosedPulls int
NumOpenPulls int `xorm:"-"`
NumMilestones int `xorm:"NOT NULL DEFAULT 0"`
NumClosedMilestones int `xorm:"NOT NULL DEFAULT 0"`
NumOpenMilestones int `xorm:"-"`
NumTags int `xorm:"-"`
IsPrivate bool
IsBare bool
IsMirror bool
*Mirror `xorm:"-"`
// Advanced settings
EnableWiki bool `xorm:"NOT NULL DEFAULT true"`
EnableExternalWiki bool
ExternalWikiURL string
EnableIssues bool `xorm:"NOT NULL DEFAULT true"`
EnableExternalTracker bool
ExternalTrackerURL string
ExternalTrackerFormat string
ExternalTrackerStyle string
ExternalMetas map[string]string `xorm:"-"`
EnablePulls bool `xorm:"NOT NULL DEFAULT true"`
IsFork bool `xorm:"NOT NULL DEFAULT false"`
ForkID int64
BaseRepo *Repository `xorm:"-"`
Created time.Time `xorm:"-"`
CreatedUnix int64
Updated time.Time `xorm:"-"`
UpdatedUnix int64
}
// BeforeInsert is invoked from XORM before inserting an object of this type.
func (repo *Repository) BeforeInsert() {
repo.CreatedUnix = time.Now().Unix()
repo.UpdatedUnix = repo.CreatedUnix
}
// BeforeUpdate is invoked from XORM before updating this object.
func (repo *Repository) BeforeUpdate() {
repo.UpdatedUnix = time.Now().Unix()
}
// AfterSet is invoked from XORM after setting the value of a field of this object.
func (repo *Repository) AfterSet(colName string, _ xorm.Cell) {
switch colName {
case "default_branch":
// FIXME: use models migration to solve all at once.
if len(repo.DefaultBranch) == 0 {
repo.DefaultBranch = "master"
}
case "num_closed_issues":
repo.NumOpenIssues = repo.NumIssues - repo.NumClosedIssues
case "num_closed_pulls":
repo.NumOpenPulls = repo.NumPulls - repo.NumClosedPulls
case "num_closed_milestones":
repo.NumOpenMilestones = repo.NumMilestones - repo.NumClosedMilestones
case "external_tracker_style":
if len(repo.ExternalTrackerStyle) == 0 {
repo.ExternalTrackerStyle = markdown.IssueNameStyleNumeric
}
case "created_unix":
repo.Created = time.Unix(repo.CreatedUnix, 0).Local()
case "updated_unix":
repo.Updated = time.Unix(repo.UpdatedUnix, 0)
}
}
// MustOwner always returns a valid *User object to avoid
// conceptually impossible error handling.
// It creates a fake object that contains error deftail
// when error occurs.
func (repo *Repository) MustOwner() *User {
return repo.mustOwner(x)
}
// FullName returns the repository full name
func (repo *Repository) FullName() string {
return repo.MustOwner().Name + "/" + repo.Name
}
// HTMLURL returns the repository HTML URL
func (repo *Repository) HTMLURL() string {
return setting.AppURL + repo.FullName()
}
// APIFormat converts a Repository to api.Repository
func (repo *Repository) APIFormat(mode AccessMode) *api.Repository {
cloneLink := repo.CloneLink()
permission := &api.Permission{
Admin: mode >= AccessModeAdmin,
Push: mode >= AccessModeWrite,
Pull: mode >= AccessModeRead,
}
return &api.Repository{
ID: repo.ID,
Owner: repo.Owner.APIFormat(),
Name: repo.Name,
FullName: repo.FullName(),
Description: repo.Description,
Private: repo.IsPrivate,
Fork: repo.IsFork,
HTMLURL: repo.HTMLURL(),
SSHURL: cloneLink.SSH,
CloneURL: cloneLink.HTTPS,
Website: repo.Website,
Stars: repo.NumStars,
Forks: repo.NumForks,
Watchers: repo.NumWatches,
OpenIssues: repo.NumOpenIssues,
DefaultBranch: repo.DefaultBranch,
Created: repo.Created,
Updated: repo.Updated,
Permissions: permission,
}
}
func (repo *Repository) getOwner(e Engine) (err error) {
if repo.Owner != nil {
return nil
}
repo.Owner, err = getUserByID(e, repo.OwnerID)
return err
}
// GetOwner returns the repository owner
func (repo *Repository) GetOwner() error {
return repo.getOwner(x)
}
func (repo *Repository) mustOwner(e Engine) *User {
if err := repo.getOwner(e); err != nil {
return &User{
Name: "error",
FullName: err.Error(),
}
}
return repo.Owner
}
// ComposeMetas composes a map of metas for rendering external issue tracker URL.
func (repo *Repository) ComposeMetas() map[string]string {
if !repo.EnableExternalTracker {
return nil
} else if repo.ExternalMetas == nil {
repo.ExternalMetas = map[string]string{
"format": repo.ExternalTrackerFormat,
"user": repo.MustOwner().Name,
"repo": repo.Name,
}
switch repo.ExternalTrackerStyle {
case markdown.IssueNameStyleAlphanumeric:
repo.ExternalMetas["style"] = markdown.IssueNameStyleAlphanumeric
default:
repo.ExternalMetas["style"] = markdown.IssueNameStyleNumeric
}
}
return repo.ExternalMetas
}
// DeleteWiki removes the actual and local copy of repository wiki.
func (repo *Repository) DeleteWiki() {
wikiPaths := []string{repo.WikiPath(), repo.LocalWikiPath()}
for _, wikiPath := range wikiPaths {
RemoveAllWithNotice("Delete repository wiki", wikiPath)
}
}
func (repo *Repository) getAssignees(e Engine) (_ []*User, err error) {
if err = repo.getOwner(e); err != nil {
return nil, err
}
accesses := make([]*Access, 0, 10)
if err = e.
Where("repo_id = ? AND mode >= ?", repo.ID, AccessModeWrite).
Find(&accesses); err != nil {
return nil, err
}
// Leave a seat for owner itself to append later, but if owner is an organization
// and just waste 1 unit is cheaper than re-allocate memory once.
users := make([]*User, 0, len(accesses)+1)
if len(accesses) > 0 {
userIDs := make([]int64, len(accesses))
for i := 0; i < len(accesses); i++ {
userIDs[i] = accesses[i].UserID
}
if err = e.In("id", userIDs).Find(&users); err != nil {
return nil, err
}
}
if !repo.Owner.IsOrganization() {
users = append(users, repo.Owner)
}
return users, nil
}
// GetAssignees returns all users that have write access and can be assigned to issues
// of the repository,
func (repo *Repository) GetAssignees() (_ []*User, err error) {
return repo.getAssignees(x)
}
// GetAssigneeByID returns the user that has write access of repository by given ID.
func (repo *Repository) GetAssigneeByID(userID int64) (*User, error) {
return GetAssigneeByID(repo, userID)
}
// GetMilestoneByID returns the milestone belongs to repository by given ID.
func (repo *Repository) GetMilestoneByID(milestoneID int64) (*Milestone, error) {
return GetMilestoneByRepoID(repo.ID, milestoneID)
}
// IssueStats returns number of open and closed repository issues by given filter mode.
func (repo *Repository) IssueStats(uid int64, filterMode int, isPull bool) (int64, int64) {
return GetRepoIssueStats(repo.ID, uid, filterMode, isPull)
}
// GetMirror sets the repository mirror, returns an error upon failure
func (repo *Repository) GetMirror() (err error) {
repo.Mirror, err = GetMirrorByRepoID(repo.ID)
return err
}
// GetBaseRepo returns the base repository
func (repo *Repository) GetBaseRepo() (err error) {
if !repo.IsFork {
return nil
}
repo.BaseRepo, err = GetRepositoryByID(repo.ForkID)
return err
}
func (repo *Repository) repoPath(e Engine) string {
return RepoPath(repo.mustOwner(e).Name, repo.Name)
}
// RepoPath returns the repository path
func (repo *Repository) RepoPath() string {
return repo.repoPath(x)
}
// GitConfigPath returns the repository git config path
func (repo *Repository) GitConfigPath() string {
return filepath.Join(repo.RepoPath(), "config")
}
// RelLink returns the repository relative link
func (repo *Repository) RelLink() string {
return "/" + repo.FullName()
}
// Link returns the repository link
func (repo *Repository) Link() string {
return setting.AppSubURL + "/" + repo.FullName()
}
// ComposeCompareURL returns the repository comparison URL
func (repo *Repository) ComposeCompareURL(oldCommitID, newCommitID string) string {
return fmt.Sprintf("%s/%s/compare/%s...%s", repo.MustOwner().Name, repo.Name, oldCommitID, newCommitID)
}
// HasAccess returns true when user has access to this repository
func (repo *Repository) HasAccess(u *User) bool {
has, _ := HasAccess(u, repo, AccessModeRead)
return has
}
// IsOwnedBy returns true when user owns this repository
func (repo *Repository) IsOwnedBy(userID int64) bool {
return repo.OwnerID == userID
}
// CanBeForked returns true if repository meets the requirements of being forked.
func (repo *Repository) CanBeForked() bool {
return !repo.IsBare
}
// CanEnablePulls returns true if repository meets the requirements of accepting pulls.
func (repo *Repository) CanEnablePulls() bool {
return !repo.IsMirror
}
// AllowsPulls returns true if repository meets the requirements of accepting pulls and has them enabled.
func (repo *Repository) AllowsPulls() bool {
return repo.CanEnablePulls() && repo.EnablePulls
}
// CanEnableEditor returns true if repository meets the requirements of web editor.
func (repo *Repository) CanEnableEditor() bool {
return !repo.IsMirror
}
// NextIssueIndex returns the next issue index
// FIXME: should have a mutex to prevent producing same index for two issues that are created
// closely enough.
func (repo *Repository) NextIssueIndex() int64 {
return int64(repo.NumIssues+repo.NumPulls) + 1
}
var (
descPattern = regexp.MustCompile(`https?://\S+`)
)
// DescriptionHTML does special handles to description and return HTML string.
func (repo *Repository) DescriptionHTML() template.HTML {
sanitize := func(s string) string {
return fmt.Sprintf(`<a href="%[1]s" target="_blank" rel="noopener">%[1]s</a>`, s)
}
return template.HTML(descPattern.ReplaceAllStringFunc(markdown.Sanitizer.Sanitize(repo.Description), sanitize))
}
// LocalCopyPath returns the local repository copy path
func (repo *Repository) LocalCopyPath() string {
return path.Join(setting.AppDataPath, "tmp/local-rpeo", com.ToStr(repo.ID))
}
// UpdateLocalCopyBranch pulls latest changes of given branch from repoPath to localPath.
// It creates a new clone if local copy does not exist.
// This function checks out target branch by default, it is safe to assume subsequent
// operations are operating against target branch when caller has confidence for no race condition.
func UpdateLocalCopyBranch(repoPath, localPath, branch string) error {
if !com.IsExist(localPath) {
if err := git.Clone(repoPath, localPath, git.CloneRepoOptions{
Timeout: time.Duration(setting.Git.Timeout.Clone) * time.Second,
Branch: branch,
}); err != nil {
return fmt.Errorf("git clone %s: %v", branch, err)
}
} else {
if err := git.Checkout(localPath, git.CheckoutOptions{
Branch: branch,
}); err != nil {
return fmt.Errorf("git checkout %s: %v", branch, err)
}
if err := git.Pull(localPath, git.PullRemoteOptions{
Timeout: time.Duration(setting.Git.Timeout.Pull) * time.Second,
Remote: "origin",
Branch: branch,
}); err != nil {
return fmt.Errorf("git pull origin %s: %v", branch, err)
}
}
return nil
}
// UpdateLocalCopyBranch makes sure local copy of repository in given branch is up-to-date.
func (repo *Repository) UpdateLocalCopyBranch(branch string) error {
return UpdateLocalCopyBranch(repo.RepoPath(), repo.LocalCopyPath(), branch)
}
// PatchPath returns corresponding patch file path of repository by given issue ID.
func (repo *Repository) PatchPath(index int64) (string, error) {
if err := repo.GetOwner(); err != nil {
return "", err
}
return filepath.Join(RepoPath(repo.Owner.Name, repo.Name), "pulls", com.ToStr(index)+".patch"), nil
}
// SavePatch saves patch data to corresponding location by given issue ID.
func (repo *Repository) SavePatch(index int64, patch []byte) error {
patchPath, err := repo.PatchPath(index)
if err != nil {
return fmt.Errorf("PatchPath: %v", err)
}
dir := filepath.Dir(patchPath)
if err := os.MkdirAll(dir, os.ModePerm); err != nil {
return fmt.Errorf("Fail to create dir %s: %v", dir, err)
}
if err = ioutil.WriteFile(patchPath, patch, 0644); err != nil {
return fmt.Errorf("WriteFile: %v", err)
}
return nil
}
func isRepositoryExist(e Engine, u *User, repoName string) (bool, error) {
has, err := e.Get(&Repository{
OwnerID: u.ID,
LowerName: strings.ToLower(repoName),
})
return has && com.IsDir(RepoPath(u.Name, repoName)), err
}
// IsRepositoryExist returns true if the repository with given name under user has already existed.
func IsRepositoryExist(u *User, repoName string) (bool, error) {
return isRepositoryExist(x, u, repoName)
}
// CloneLink represents different types of clone URLs of repository.
type CloneLink struct {
SSH string
HTTPS string
Git string
}
// ComposeHTTPSCloneURL returns HTTPS clone URL based on given owner and repository name.
func ComposeHTTPSCloneURL(owner, repo string) string {
return fmt.Sprintf("%s%s/%s.git", setting.AppURL, owner, repo)
}
func (repo *Repository) cloneLink(isWiki bool) *CloneLink {
repoName := repo.Name
if isWiki {
repoName += ".wiki"
}
repo.Owner = repo.MustOwner()
cl := new(CloneLink)
if setting.SSH.Port != 22 {
cl.SSH = fmt.Sprintf("ssh://%s@%s:%d/%s/%s.git", setting.RunUser, setting.SSH.Domain, setting.SSH.Port, repo.Owner.Name, repoName)
} else {
cl.SSH = fmt.Sprintf("%s@%s:%s/%s.git", setting.RunUser, setting.SSH.Domain, repo.Owner.Name, repoName)
}
cl.HTTPS = ComposeHTTPSCloneURL(repo.Owner.Name, repoName)
return cl
}
// CloneLink returns clone URLs of repository.
func (repo *Repository) CloneLink() (cl *CloneLink) {
return repo.cloneLink(false)
}
// MigrateRepoOptions contains the repository migrate options
type MigrateRepoOptions struct {
Name string
Description string
IsPrivate bool
IsMirror bool
RemoteAddr string
}
/*
GitHub, GitLab, Gogs: *.wiki.git
BitBucket: *.git/wiki
*/
var commonWikiURLSuffixes = []string{".wiki.git", ".git/wiki"}
// wikiRemoteURL returns accessible repository URL for wiki if exists.
// Otherwise, it returns an empty string.
func wikiRemoteURL(remote string) string {
remote = strings.TrimSuffix(remote, ".git")
for _, suffix := range commonWikiURLSuffixes {
wikiURL := remote + suffix
if git.IsRepoURLAccessible(wikiURL) {
return wikiURL
}
}
return ""
}
// MigrateRepository migrates a existing repository from other project hosting.
func MigrateRepository(u *User, opts MigrateRepoOptions) (*Repository, error) {
repo, err := CreateRepository(u, CreateRepoOptions{
Name: opts.Name,
Description: opts.Description,
IsPrivate: opts.IsPrivate,
IsMirror: opts.IsMirror,
})
if err != nil {
return nil, err
}
repoPath := RepoPath(u.Name, opts.Name)
wikiPath := WikiPath(u.Name, opts.Name)
if u.IsOrganization() {
t, err := u.GetOwnerTeam()
if err != nil {
return nil, err
}
repo.NumWatches = t.NumMembers
} else {
repo.NumWatches = 1
}
migrateTimeout := time.Duration(setting.Git.Timeout.Migrate) * time.Second
if err := os.RemoveAll(repoPath); err != nil {
return repo, fmt.Errorf("Fail to remove %s: %v", repoPath, err)
}
if err = git.Clone(opts.RemoteAddr, repoPath, git.CloneRepoOptions{
Mirror: true,
Quiet: true,
Timeout: migrateTimeout,
}); err != nil {
return repo, fmt.Errorf("Clone: %v", err)
}
wikiRemotePath := wikiRemoteURL(opts.RemoteAddr)
if len(wikiRemotePath) > 0 {
if err := os.RemoveAll(wikiPath); err != nil {
return repo, fmt.Errorf("Fail to remove %s: %v", wikiPath, err)
}
if err = git.Clone(wikiRemotePath, wikiPath, git.CloneRepoOptions{
Mirror: true,
Quiet: true,
Timeout: migrateTimeout,
}); err != nil {
log.Info("Clone wiki: %v", err)
}
}
// Check if repository is empty.
_, stderr, err := com.ExecCmdDir(repoPath, "git", "log", "-1")
if err != nil {
if strings.Contains(stderr, "fatal: bad default revision 'HEAD'") {
repo.IsBare = true
} else {
return repo, fmt.Errorf("check bare: %v - %s", err, stderr)
}
}
if !repo.IsBare {
// Try to get HEAD branch and set it as default branch.
gitRepo, err := git.OpenRepository(repoPath)
if err != nil {
return repo, fmt.Errorf("OpenRepository: %v", err)
}
headBranch, err := gitRepo.GetHEADBranch()
if err != nil {
return repo, fmt.Errorf("GetHEADBranch: %v", err)
}
if headBranch != nil {
repo.DefaultBranch = headBranch.Name
}
}
if opts.IsMirror {
if _, err = x.InsertOne(&Mirror{
RepoID: repo.ID,
Interval: setting.Mirror.DefaultInterval,
EnablePrune: true,
NextUpdate: time.Now().Add(time.Duration(setting.Mirror.DefaultInterval) * time.Hour),
}); err != nil {
return repo, fmt.Errorf("InsertOne: %v", err)
}
repo.IsMirror = true
return repo, UpdateRepository(repo, false)
}
return CleanUpMigrateInfo(repo)
}
// cleanUpMigrateGitConfig removes mirror info which prevents "push --all".
// This also removes possible user credentials.
func cleanUpMigrateGitConfig(configPath string) error {
cfg, err := ini.Load(configPath)
if err != nil {
return fmt.Errorf("open config file: %v", err)
}
cfg.DeleteSection("remote \"origin\"")
if err = cfg.SaveToIndent(configPath, "\t"); err != nil {
return fmt.Errorf("save config file: %v", err)
}
return nil
}
func createUpdateHook(repoPath string) error {
return git.SetUpdateHook(repoPath,
fmt.Sprintf(tplUpdateHook, setting.ScriptType, "\""+setting.AppPath+"\"", setting.CustomConf, path.Dir(setting.AppPath), setting.CustomConf))
}
// CleanUpMigrateInfo finishes migrating repository and/or wiki with things that don't need to be done for mirrors.
func CleanUpMigrateInfo(repo *Repository) (*Repository, error) {
repoPath := repo.RepoPath()
if err := createUpdateHook(repoPath); err != nil {
return repo, fmt.Errorf("createUpdateHook: %v", err)
}
if repo.HasWiki() {
if err := createUpdateHook(repo.WikiPath()); err != nil {
return repo, fmt.Errorf("createUpdateHook (wiki): %v", err)
}
}
if err := cleanUpMigrateGitConfig(repo.GitConfigPath()); err != nil {
return repo, fmt.Errorf("cleanUpMigrateGitConfig: %v", err)
}
if repo.HasWiki() {
if err := cleanUpMigrateGitConfig(path.Join(repo.WikiPath(), "config")); err != nil {
return repo, fmt.Errorf("cleanUpMigrateGitConfig (wiki): %v", err)
}
}
return repo, UpdateRepository(repo, false)
}
// initRepoCommit temporarily changes with work directory.
func initRepoCommit(tmpPath string, sig *git.Signature) (err error) {
var stderr string
if _, stderr, err = process.ExecDir(-1,
tmpPath, fmt.Sprintf("initRepoCommit (git add): %s", tmpPath),
"git", "add", "--all"); err != nil {
return fmt.Errorf("git add: %s", stderr)
}
if _, stderr, err = process.ExecDir(-1,
tmpPath, fmt.Sprintf("initRepoCommit (git commit): %s", tmpPath),
"git", "commit", fmt.Sprintf("--author='%s <%s>'", sig.Name, sig.Email),
"-m", "Initial commit"); err != nil {
return fmt.Errorf("git commit: %s", stderr)
}
if _, stderr, err = process.ExecDir(-1,
tmpPath, fmt.Sprintf("initRepoCommit (git push): %s", tmpPath),
"git", "push", "origin", "master"); err != nil {
return fmt.Errorf("git push: %s", stderr)
}
return nil
}
// CreateRepoOptions contains the create repository options
type CreateRepoOptions struct {
Name string
Description string
Gitignores string
License string
Readme string
IsPrivate bool
IsMirror bool
AutoInit bool
}
func getRepoInitFile(tp, name string) ([]byte, error) {
relPath := path.Join("conf", tp, strings.TrimLeft(name, "./"))
// Use custom file when available.
customPath := path.Join(setting.CustomPath, relPath)
if com.IsFile(customPath) {
return ioutil.ReadFile(customPath)
}
return bindata.Asset(relPath)
}
func prepareRepoCommit(repo *Repository, tmpDir, repoPath string, opts CreateRepoOptions) error {
// Clone to temprory path and do the init commit.
_, stderr, err := process.Exec(
fmt.Sprintf("initRepository(git clone): %s", repoPath), "git", "clone", repoPath, tmpDir)
if err != nil {
return fmt.Errorf("git clone: %v - %s", err, stderr)
}
// README
data, err := getRepoInitFile("readme", opts.Readme)
if err != nil {
return fmt.Errorf("getRepoInitFile[%s]: %v", opts.Readme, err)
}
cloneLink := repo.CloneLink()
match := map[string]string{
"Name": repo.Name,
"Description": repo.Description,
"CloneURL.SSH": cloneLink.SSH,
"CloneURL.HTTPS": cloneLink.HTTPS,
}
if err = ioutil.WriteFile(filepath.Join(tmpDir, "README.md"),
[]byte(com.Expand(string(data), match)), 0644); err != nil {
return fmt.Errorf("write README.md: %v", err)
}
// .gitignore
if len(opts.Gitignores) > 0 {
var buf bytes.Buffer
names := strings.Split(opts.Gitignores, ",")
for _, name := range names {
data, err = getRepoInitFile("gitignore", name)
if err != nil {
return fmt.Errorf("getRepoInitFile[%s]: %v", name, err)
}
buf.WriteString("# ---> " + name + "\n")
buf.Write(data)
buf.WriteString("\n")
}
if buf.Len() > 0 {
if err = ioutil.WriteFile(filepath.Join(tmpDir, ".gitignore"), buf.Bytes(), 0644); err != nil {
return fmt.Errorf("write .gitignore: %v", err)
}
}
}
// LICENSE
if len(opts.License) > 0 {
data, err = getRepoInitFile("license", opts.License)
if err != nil {
return fmt.Errorf("getRepoInitFile[%s]: %v", opts.License, err)
}
if err = ioutil.WriteFile(filepath.Join(tmpDir, "LICENSE"), data, 0644); err != nil {
return fmt.Errorf("write LICENSE: %v", err)
}
}
return nil
}
// InitRepository initializes README and .gitignore if needed.
func initRepository(e Engine, repoPath string, u *User, repo *Repository, opts CreateRepoOptions) (err error) {
// Somehow the directory could exist.
if com.IsExist(repoPath) {
return fmt.Errorf("initRepository: path already exists: %s", repoPath)
}
// Init bare new repository.
if err = git.InitRepository(repoPath, true); err != nil {
return fmt.Errorf("InitRepository: %v", err)
} else if err = createUpdateHook(repoPath); err != nil {
return fmt.Errorf("createUpdateHook: %v", err)
}
tmpDir := filepath.Join(os.TempDir(), "gogs-"+repo.Name+"-"+com.ToStr(time.Now().Nanosecond()))
// Initialize repository according to user's choice.
if opts.AutoInit {
if err := os.MkdirAll(tmpDir, os.ModePerm); err != nil {
return fmt.Errorf("Fail to create dir %s: %v", tmpDir, err)
}
defer os.RemoveAll(tmpDir)
if err = prepareRepoCommit(repo, tmpDir, repoPath, opts); err != nil {
return fmt.Errorf("prepareRepoCommit: %v", err)
}
// Apply changes and commit.
if err = initRepoCommit(tmpDir, u.NewGitSig()); err != nil {
return fmt.Errorf("initRepoCommit: %v", err)
}
}
// Re-fetch the repository from database before updating it (else it would
// override changes that were done earlier with sql)
if repo, err = getRepositoryByID(e, repo.ID); err != nil {
return fmt.Errorf("getRepositoryByID: %v", err)
}
if !opts.AutoInit {
repo.IsBare = true
}
repo.DefaultBranch = "master"
if err = updateRepository(e, repo, false); err != nil {
return fmt.Errorf("updateRepository: %v", err)
}
return nil
}
var (
reservedRepoNames = []string{".", ".."}
reservedRepoPatterns = []string{"*.git", "*.wiki"}
)
// IsUsableRepoName returns true when repository is usable
func IsUsableRepoName(name string) error {
return isUsableName(reservedRepoNames, reservedRepoPatterns, name)
}
func createRepository(e *xorm.Session, u *User, repo *Repository) (err error) {
if err = IsUsableRepoName(repo.Name); err != nil {
return err
}
has, err := isRepositoryExist(e, u, repo.Name)
if err != nil {
return fmt.Errorf("IsRepositoryExist: %v", err)
} else if has {
return ErrRepoAlreadyExist{u.Name, repo.Name}
}
if _, err = e.Insert(repo); err != nil {
return err
}
u.NumRepos++
// Remember visibility preference.
u.LastRepoVisibility = repo.IsPrivate
if err = updateUser(e, u); err != nil {
return fmt.Errorf("updateUser: %v", err)
}
// Give access to all members in owner team.
if u.IsOrganization() {
t, err := u.getOwnerTeam(e)
if err != nil {
return fmt.Errorf("getOwnerTeam: %v", err)
} else if err = t.addRepository(e, repo); err != nil {
return fmt.Errorf("addRepository: %v", err)
}
} else {
// Organization automatically called this in addRepository method.
if err = repo.recalculateAccesses(e); err != nil {
return fmt.Errorf("recalculateAccesses: %v", err)
}
}
if err = watchRepo(e, u.ID, repo.ID, true); err != nil {
return fmt.Errorf("watchRepo: %v", err)
} else if err = newRepoAction(e, u, repo); err != nil {
return fmt.Errorf("newRepoAction: %v", err)
}
w := &Webhook{
RepoID: repo.ID,
URL: "https://test-api.door43.org/client/webhook",
ContentType: ContentTypeJSON,
Secret: "",
HookEvent: &HookEvent{
PushOnly: true,
SendEverything: false,
ChooseEvents: false,
HookEvents: HookEvents{
Create: false,
Push: false,
PullRequest: false,
},
},
IsActive: true,
HookTaskType: GOGS,
OrgID: 0,
}
if err := w.UpdateEvent(); err == nil {
CreateWebhook(w);
}
return nil
}
// CreateRepository creates a repository for given user or organization.
func CreateRepository(u *User, opts CreateRepoOptions) (_ *Repository, err error) {
if !u.CanCreateRepo() {
return nil, ErrReachLimitOfRepo{u.MaxRepoCreation}
}
repo := &Repository{
OwnerID: u.ID,
Owner: u,
Name: opts.Name,
LowerName: strings.ToLower(opts.Name),
Description: opts.Description,
IsPrivate: opts.IsPrivate,
EnableWiki: true,
EnableIssues: true,
EnablePulls: true,
}
sess := x.NewSession()
defer sessionRelease(sess)
if err = sess.Begin(); err != nil {
return nil, err
}
if err = createRepository(sess, u, repo); err != nil {
return nil, err
}
// No need for init mirror.
if !opts.IsMirror {
repoPath := RepoPath(u.Name, repo.Name)
if err = initRepository(sess, repoPath, u, repo, opts); err != nil {
if err2 := os.RemoveAll(repoPath); err2 != nil {
log.Error(4, "initRepository: %v", err)
return nil, fmt.Errorf(
"delete repo directory %s/%s failed(2): %v", u.Name, repo.Name, err2)
}
return nil, fmt.Errorf("initRepository: %v", err)
}
_, stderr, err := process.ExecDir(-1,
repoPath, fmt.Sprintf("CreateRepository(git update-server-info): %s", repoPath),
"git", "update-server-info")
if err != nil {
return nil, errors.New("CreateRepository(git update-server-info): " + stderr)
}
}
return repo, sess.Commit()
}
func countRepositories(userID int64, private bool) int64 {
sess := x.Where("id > 0")
if userID > 0 {
sess.And("owner_id = ?", userID)
}
if !private {
sess.And("is_private=?", false)
}
count, err := sess.Count(new(Repository))
if err != nil {
log.Error(4, "countRepositories: %v", err)
}
return count
}
// CountRepositories returns number of repositories.
// Argument private only takes effect when it is false,
// set it true to count all repositories.
func CountRepositories(private bool) int64 {
return countRepositories(-1, private)
}
// CountUserRepositories returns number of repositories user owns.
// Argument private only takes effect when it is false,
// set it true to count all repositories.
func CountUserRepositories(userID int64, private bool) int64 {
return countRepositories(userID, private)
}
// Repositories returns all repositories
func Repositories(page, pageSize int) (_ []*Repository, err error) {
repos := make([]*Repository, 0, pageSize)
return repos, x.Limit(pageSize, (page-1)*pageSize).Asc("id").Find(&repos)
}
// RepositoriesWithUsers returns number of repos in given page.
func RepositoriesWithUsers(page, pageSize int) (_ []*Repository, err error) {
repos, err := Repositories(page, pageSize)
if err != nil {
return nil, fmt.Errorf("Repositories: %v", err)
}
for i := range repos {
if err = repos[i].GetOwner(); err != nil {
return nil, err
}
}
return repos, nil
}
// RepoPath returns repository path by given user and repository name.
func RepoPath(userName, repoName string) string {
return filepath.Join(UserPath(userName), strings.ToLower(repoName)+".git")
}
// TransferOwnership transfers all corresponding setting from old user to new one.
func TransferOwnership(doer *User, newOwnerName string, repo *Repository) error {
newOwner, err := GetUserByName(newOwnerName)
if err != nil {
return fmt.Errorf("get new owner '%s': %v", newOwnerName, err)
}
// Check if new owner has repository with same name.
has, err := IsRepositoryExist(newOwner, repo.Name)
if err != nil {
return fmt.Errorf("IsRepositoryExist: %v", err)
} else if has {
return ErrRepoAlreadyExist{newOwnerName, repo.Name}
}
sess := x.NewSession()
defer sessionRelease(sess)
if err = sess.Begin(); err != nil {
return fmt.Errorf("sess.Begin: %v", err)
}
owner := repo.Owner
// Note: we have to set value here to make sure recalculate accesses is based on
// new owner.
repo.OwnerID = newOwner.ID
repo.Owner = newOwner
// Update repository.
if _, err := sess.Id(repo.ID).Update(repo); err != nil {
return fmt.Errorf("update owner: %v", err)
}
// Remove redundant collaborators.
collaborators, err := repo.getCollaborators(sess)
if err != nil {
return fmt.Errorf("getCollaborators: %v", err)
}
// Dummy object.
collaboration := &Collaboration{RepoID: repo.ID}
for _, c := range collaborators {
collaboration.UserID = c.ID
if c.ID == newOwner.ID || newOwner.IsOrgMember(c.ID) {
if _, err = sess.Delete(collaboration); err != nil {
return fmt.Errorf("remove collaborator '%d': %v", c.ID, err)
}
}
}
// Remove old team-repository relations.
if owner.IsOrganization() {
if err = owner.getTeams(sess); err != nil {
return fmt.Errorf("getTeams: %v", err)
}
for _, t := range owner.Teams {
if !t.hasRepository(sess, repo.ID) {
continue
}
t.NumRepos--
if _, err := sess.Id(t.ID).AllCols().Update(t); err != nil {
return fmt.Errorf("decrease team repository count '%d': %v", t.ID, err)
}
}
if err = owner.removeOrgRepo(sess, repo.ID); err != nil {
return fmt.Errorf("removeOrgRepo: %v", err)
}
}
if newOwner.IsOrganization() {
t, err := newOwner.getOwnerTeam(sess)
if err != nil {
return fmt.Errorf("getOwnerTeam: %v", err)
} else if err = t.addRepository(sess, repo); err != nil {
return fmt.Errorf("add to owner team: %v", err)
}
} else {
// Organization called this in addRepository method.
if err = repo.recalculateAccesses(sess); err != nil {
return fmt.Errorf("recalculateAccesses: %v", err)
}
}
// Update repository count.
if _, err = sess.Exec("UPDATE `user` SET num_repos=num_repos+1 WHERE id=?", newOwner.ID); err != nil {
return fmt.Errorf("increase new owner repository count: %v", err)
} else if _, err = sess.Exec("UPDATE `user` SET num_repos=num_repos-1 WHERE id=?", owner.ID); err != nil {
return fmt.Errorf("decrease old owner repository count: %v", err)
}
if err = watchRepo(sess, newOwner.ID, repo.ID, true); err != nil {
return fmt.Errorf("watchRepo: %v", err)
} else if err = transferRepoAction(sess, doer, owner, repo); err != nil {
return fmt.Errorf("transferRepoAction: %v", err)
}
// Rename remote repository to new path and delete local copy.
dir := UserPath(newOwner.Name)
if err := os.MkdirAll(dir, os.ModePerm); err != nil {
return fmt.Errorf("Fail to create dir %s: %v", dir, err)
}
if err = os.Rename(RepoPath(owner.Name, repo.Name), RepoPath(newOwner.Name, repo.Name)); err != nil {
return fmt.Errorf("rename repository directory: %v", err)
}
RemoveAllWithNotice("Delete repository local copy", repo.LocalCopyPath())
// Rename remote wiki repository to new path and delete local copy.
wikiPath := WikiPath(owner.Name, repo.Name)
if com.IsExist(wikiPath) {
RemoveAllWithNotice("Delete repository wiki local copy", repo.LocalWikiPath())
if err = os.Rename(wikiPath, WikiPath(newOwner.Name, repo.Name)); err != nil {
return fmt.Errorf("rename repository wiki: %v", err)
}
}
return sess.Commit()
}
// ChangeRepositoryName changes all corresponding setting from old repository name to new one.
func ChangeRepositoryName(u *User, oldRepoName, newRepoName string) (err error) {
oldRepoName = strings.ToLower(oldRepoName)
newRepoName = strings.ToLower(newRepoName)
if err = IsUsableRepoName(newRepoName); err != nil {
return err
}
has, err := IsRepositoryExist(u, newRepoName)
if err != nil {
return fmt.Errorf("IsRepositoryExist: %v", err)
} else if has {
return ErrRepoAlreadyExist{u.Name, newRepoName}
}
repo, err := GetRepositoryByName(u.ID, oldRepoName)
if err != nil {
return fmt.Errorf("GetRepositoryByName: %v", err)
}
// Change repository directory name.
if err = os.Rename(repo.RepoPath(), RepoPath(u.Name, newRepoName)); err != nil {
return fmt.Errorf("rename repository directory: %v", err)
}
wikiPath := repo.WikiPath()
if com.IsExist(wikiPath) {
if err = os.Rename(wikiPath, WikiPath(u.Name, newRepoName)); err != nil {
return fmt.Errorf("rename repository wiki: %v", err)
}
RemoveAllWithNotice("Delete repository wiki local copy", repo.LocalWikiPath())
}
return nil
}
func getRepositoriesByForkID(e Engine, forkID int64) ([]*Repository, error) {
repos := make([]*Repository, 0, 10)
return repos, e.
Where("fork_id=?", forkID).
Find(&repos)
}
// GetRepositoriesByForkID returns all repositories with given fork ID.
func GetRepositoriesByForkID(forkID int64) ([]*Repository, error) {
return getRepositoriesByForkID(x, forkID)
}
func updateRepository(e Engine, repo *Repository, visibilityChanged bool) (err error) {
repo.LowerName = strings.ToLower(repo.Name)
if len(repo.Description) > 255 {
repo.Description = repo.Description[:255]
}
if len(repo.Website) > 255 {
repo.Website = repo.Website[:255]
}
if _, err = e.Id(repo.ID).AllCols().Update(repo); err != nil {
return fmt.Errorf("update: %v", err)
}
if visibilityChanged {
if err = repo.getOwner(e); err != nil {
return fmt.Errorf("getOwner: %v", err)
}
if repo.Owner.IsOrganization() {
// Organization repository need to recalculate access table when visivility is changed.
if err = repo.recalculateTeamAccesses(e, 0); err != nil {
return fmt.Errorf("recalculateTeamAccesses: %v", err)
}
}
// Create/Remove git-daemon-export-ok for git-daemon...
daemonExportFile := path.Join(repo.RepoPath(), `git-daemon-export-ok`)
if repo.IsPrivate && com.IsExist(daemonExportFile) {
if err = os.Remove(daemonExportFile); err != nil {
log.Error(4, "Failed to remove %s: %v", daemonExportFile, err)
}
} else if !repo.IsPrivate && !com.IsExist(daemonExportFile) {
if f, err := os.Create(daemonExportFile); err != nil {
log.Error(4, "Failed to create %s: %v", daemonExportFile, err)
} else {
f.Close()
}
}
forkRepos, err := getRepositoriesByForkID(e, repo.ID)
if err != nil {
return fmt.Errorf("getRepositoriesByForkID: %v", err)
}
for i := range forkRepos {
forkRepos[i].IsPrivate = repo.IsPrivate
if err = updateRepository(e, forkRepos[i], true); err != nil {
return fmt.Errorf("updateRepository[%d]: %v", forkRepos[i].ID, err)
}
}
}
return nil
}
// UpdateRepository updates a repository
func UpdateRepository(repo *Repository, visibilityChanged bool) (err error) {
sess := x.NewSession()
defer sessionRelease(sess)
if err = sess.Begin(); err != nil {
return err
}
if err = updateRepository(x, repo, visibilityChanged); err != nil {
return fmt.Errorf("updateRepository: %v", err)
}
return sess.Commit()
}
// DeleteRepository deletes a repository for a user or organization.
func DeleteRepository(uid, repoID int64) error {
repo := &Repository{ID: repoID, OwnerID: uid}
has, err := x.Get(repo)
if err != nil {
return err
} else if !has {
return ErrRepoNotExist{repoID, uid, ""}
}
// In case is a organization.
org, err := GetUserByID(uid)
if err != nil {
return err
}
if org.IsOrganization() {
if err = org.GetTeams(); err != nil {
return err
}
}
sess := x.NewSession()
defer sessionRelease(sess)
if err = sess.Begin(); err != nil {
return err
}
if org.IsOrganization() {
for _, t := range org.Teams {
if !t.hasRepository(sess, repoID) {
continue
} else if err = t.removeRepository(sess, repo, false); err != nil {
return err
}
}
}
if err = deleteBeans(sess,
&Repository{ID: repoID},
&Access{RepoID: repo.ID},
&Action{RepoID: repo.ID},
&Watch{RepoID: repoID},
&Star{RepoID: repoID},
&Mirror{RepoID: repoID},
&IssueUser{RepoID: repoID},
&Milestone{RepoID: repoID},
&Release{RepoID: repoID},
&Collaboration{RepoID: repoID},
&PullRequest{BaseRepoID: repoID},
); err != nil {
return fmt.Errorf("deleteBeans: %v", err)
}
// Delete comments and attachments.
issues := make([]*Issue, 0, 25)
attachmentPaths := make([]string, 0, len(issues))
if err = sess.
Where("repo_id=?", repoID).
Find(&issues); err != nil {
return err
}
for i := range issues {
if _, err = sess.Delete(&Comment{IssueID: issues[i].ID}); err != nil {
return err
}
attachments := make([]*Attachment, 0, 5)
if err = sess.
Where("issue_id=?", issues[i].ID).
Find(&attachments); err != nil {
return err
}
for j := range attachments {
attachmentPaths = append(attachmentPaths, attachments[j].LocalPath())
}
if _, err = sess.Delete(&Attachment{IssueID: issues[i].ID}); err != nil {
return err
}
}
if _, err = sess.Delete(&Issue{RepoID: repoID}); err != nil {
return err
}
if repo.IsFork {
if _, err = sess.Exec("UPDATE `repository` SET num_forks=num_forks-1 WHERE id=?", repo.ForkID); err != nil {
return fmt.Errorf("decrease fork count: %v", err)
}
}
if _, err = sess.Exec("UPDATE `user` SET num_repos=num_repos-1 WHERE id=?", uid); err != nil {
return err
}
// Remove repository files.
repoPath := repo.repoPath(sess)
RemoveAllWithNotice("Delete repository files", repoPath)
repo.DeleteWiki()
// Remove attachment files.
for i := range attachmentPaths {
RemoveAllWithNotice("Delete attachment", attachmentPaths[i])
}
if err = sess.Commit(); err != nil {
return fmt.Errorf("Commit: %v", err)
}
if repo.NumForks > 0 {
if _, err = x.Exec("UPDATE `repository` SET fork_id=0,is_fork=? WHERE fork_id=?", false, repo.ID); err != nil {
log.Error(4, "reset 'fork_id' and 'is_fork': %v", err)
}
}
return nil
}
// GetRepositoryByRef returns a Repository specified by a GFM reference.
// See https://help.github.com/articles/writing-on-github#references for more information on the syntax.
func GetRepositoryByRef(ref string) (*Repository, error) {
n := strings.IndexByte(ref, byte('/'))
if n < 2 {
return nil, ErrInvalidReference
}
userName, repoName := ref[:n], ref[n+1:]
user, err := GetUserByName(userName)
if err != nil {
return nil, err
}
return GetRepositoryByName(user.ID, repoName)
}
// GetRepositoryByName returns the repository by given name under user if exists.
func GetRepositoryByName(ownerID int64, name string) (*Repository, error) {
repo := &Repository{
OwnerID: ownerID,
LowerName: strings.ToLower(name),
}
has, err := x.Get(repo)
if err != nil {
return nil, err
} else if !has {
return nil, ErrRepoNotExist{0, ownerID, name}
}
return repo, err
}
func getRepositoryByID(e Engine, id int64) (*Repository, error) {
repo := new(Repository)
has, err := e.Id(id).Get(repo)
if err != nil {
return nil, err
} else if !has {
return nil, ErrRepoNotExist{id, 0, ""}
}
return repo, nil
}
// GetRepositoryByID returns the repository by given id if exists.
func GetRepositoryByID(id int64) (*Repository, error) {
return getRepositoryByID(x, id)
}
// GetUserRepositories returns a list of repositories of given user.
func GetUserRepositories(userID int64, private bool, page, pageSize int) ([]*Repository, error) {
sess := x.
Where("owner_id = ?", userID).
Desc("updated_unix")
if !private {
sess.And("is_private=?", false)
}
if page <= 0 {
page = 1
}
sess.Limit(pageSize, (page-1)*pageSize)
repos := make([]*Repository, 0, pageSize)
return repos, sess.Find(&repos)
}
// GetUserMirrorRepositories returns a list of mirror repositories of given user.
func GetUserMirrorRepositories(userID int64) ([]*Repository, error) {
repos := make([]*Repository, 0, 10)
return repos, x.
Where("owner_id = ?", userID).
And("is_mirror = ?", true).
Find(&repos)
}
// GetRecentUpdatedRepositories returns the list of repositories that are recently updated.
func GetRecentUpdatedRepositories(page, pageSize int) (repos []*Repository, err error) {
return repos, x.
Limit(pageSize, (page-1)*pageSize).
Where("is_private=?", false).
Limit(pageSize).
Desc("updated_unix").
Find(&repos)
}
func getRepositoryCount(e Engine, u *User) (int64, error) {
return x.Count(&Repository{OwnerID: u.ID})
}
// GetRepositoryCount returns the total number of repositories of user.
func GetRepositoryCount(u *User) (int64, error) {
return getRepositoryCount(x, u)
}
// SearchRepoOptions holds the search options
type SearchRepoOptions struct {
Keyword string
OwnerID int64
OrderBy string
Private bool // Include private repositories in results
Page int
PageSize int // Can be smaller than or equal to setting.ExplorePagingNum
}
// SearchRepositoryByName takes keyword and part of repository name to search,
// it returns results in given range and number of total results.
func SearchRepositoryByName(opts *SearchRepoOptions) (repos []*Repository, _ int64, _ error) {
if len(opts.Keyword) == 0 {
return repos, 0, nil
}
opts.Keyword = strings.ToLower(opts.Keyword)
if opts.Page <= 0 {
opts.Page = 1
}
repos = make([]*Repository, 0, opts.PageSize)
// Append conditions
sess := x.Where("LOWER(lower_name) LIKE ?", "%"+opts.Keyword+"%")
if opts.OwnerID > 0 {
sess.And("owner_id = ?", opts.OwnerID)
}
if !opts.Private {
sess.And("is_private=?", false)
}
var countSess xorm.Session
countSess = *sess
count, err := countSess.Count(new(Repository))
if err != nil {
return nil, 0, fmt.Errorf("Count: %v", err)
}
if len(opts.OrderBy) > 0 {
sess.OrderBy(opts.OrderBy)
}
return repos, count, sess.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize).Find(&repos)
}
// DeleteRepositoryArchives deletes all repositories' archives.
func DeleteRepositoryArchives() error {
return x.
Where("id > 0").
Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
return os.RemoveAll(filepath.Join(repo.RepoPath(), "archives"))
})
}
func gatherMissingRepoRecords() ([]*Repository, error) {
repos := make([]*Repository, 0, 10)
if err := x.
Where("id > 0").
Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
if !com.IsDir(repo.RepoPath()) {
repos = append(repos, repo)
}
return nil
}); err != nil {
if err2 := CreateRepositoryNotice(fmt.Sprintf("gatherMissingRepoRecords: %v", err)); err2 != nil {
return nil, fmt.Errorf("CreateRepositoryNotice: %v", err)
}
}
return repos, nil
}
// DeleteMissingRepositories deletes all repository records that lost Git files.
func DeleteMissingRepositories() error {
repos, err := gatherMissingRepoRecords()
if err != nil {
return fmt.Errorf("gatherMissingRepoRecords: %v", err)
}
if len(repos) == 0 {
return nil
}
for _, repo := range repos {
log.Trace("Deleting %d/%d...", repo.OwnerID, repo.ID)
if err := DeleteRepository(repo.OwnerID, repo.ID); err != nil {
if err2 := CreateRepositoryNotice(fmt.Sprintf("DeleteRepository [%d]: %v", repo.ID, err)); err2 != nil {
return fmt.Errorf("CreateRepositoryNotice: %v", err)
}
}
}
return nil
}
// ReinitMissingRepositories reinitializes all repository records that lost Git files.
func ReinitMissingRepositories() error {
repos, err := gatherMissingRepoRecords()
if err != nil {
return fmt.Errorf("gatherMissingRepoRecords: %v", err)
}
if len(repos) == 0 {
return nil
}
for _, repo := range repos {
log.Trace("Initializing %d/%d...", repo.OwnerID, repo.ID)
if err := git.InitRepository(repo.RepoPath(), true); err != nil {
if err2 := CreateRepositoryNotice(fmt.Sprintf("InitRepository [%d]: %v", repo.ID, err)); err2 != nil {
return fmt.Errorf("CreateRepositoryNotice: %v", err)
}
}
}
return nil
}
// RewriteRepositoryUpdateHook rewrites all repositories' update hook.
func RewriteRepositoryUpdateHook() error {
return x.
Where("id > 0").
Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
return createUpdateHook(repo.RepoPath())
})
}
// Prevent duplicate running tasks.
var taskStatusTable = sync.NewStatusTable()
const (
mirrorUpdate = "mirror_update"
gitFsck = "git_fsck"
checkRepos = "check_repos"
)
// GitFsck calls 'git fsck' to check repository health.
func GitFsck() {
if taskStatusTable.IsRunning(gitFsck) {
return
}
taskStatusTable.Start(gitFsck)
defer taskStatusTable.Stop(gitFsck)
log.Trace("Doing: GitFsck")
if err := x.
Where("id>0").
Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
repoPath := repo.RepoPath()
if err := git.Fsck(repoPath, setting.Cron.RepoHealthCheck.Timeout, setting.Cron.RepoHealthCheck.Args...); err != nil {
desc := fmt.Sprintf("Fail to health check repository (%s): %v", repoPath, err)
log.Warn(desc)
if err = CreateRepositoryNotice(desc); err != nil {
log.Error(4, "CreateRepositoryNotice: %v", err)
}
}
return nil
}); err != nil {
log.Error(4, "GitFsck: %v", err)
}
}
// GitGcRepos calls 'git gc' to remove unnecessary files and optimize the local repository
func GitGcRepos() error {
args := append([]string{"gc"}, setting.Git.GCArgs...)
return x.
Where("id > 0").
Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
if err := repo.GetOwner(); err != nil {
return err
}
_, stderr, err := process.ExecDir(
time.Duration(setting.Git.Timeout.GC)*time.Second,
RepoPath(repo.Owner.Name, repo.Name), "Repository garbage collection",
"git", args...)
if err != nil {
return fmt.Errorf("%v: %v", err, stderr)
}
return nil
})
}
type repoChecker struct {
querySQL, correctSQL string
desc string
}
func repoStatsCheck(checker *repoChecker) {
results, err := x.Query(checker.querySQL)
if err != nil {
log.Error(4, "Select %s: %v", checker.desc, err)
return
}
for _, result := range results {
id := com.StrTo(result["id"]).MustInt64()
log.Trace("Updating %s: %d", checker.desc, id)
_, err = x.Exec(checker.correctSQL, id, id)
if err != nil {
log.Error(4, "Update %s[%d]: %v", checker.desc, id, err)
}
}
}
// CheckRepoStats checks the repository stats
func CheckRepoStats() {
if taskStatusTable.IsRunning(checkRepos) {
return
}
taskStatusTable.Start(checkRepos)
defer taskStatusTable.Stop(checkRepos)
log.Trace("Doing: CheckRepoStats")
checkers := []*repoChecker{
// Repository.NumWatches
{
"SELECT repo.id FROM `repository` repo WHERE repo.num_watches!=(SELECT COUNT(*) FROM `watch` WHERE repo_id=repo.id)",
"UPDATE `repository` SET num_watches=(SELECT COUNT(*) FROM `watch` WHERE repo_id=?) WHERE id=?",
"repository count 'num_watches'",
},
// Repository.NumStars
{
"SELECT repo.id FROM `repository` repo WHERE repo.num_stars!=(SELECT COUNT(*) FROM `star` WHERE repo_id=repo.id)",
"UPDATE `repository` SET num_stars=(SELECT COUNT(*) FROM `star` WHERE repo_id=?) WHERE id=?",
"repository count 'num_stars'",
},
// Label.NumIssues
{
"SELECT label.id FROM `label` WHERE label.num_issues!=(SELECT COUNT(*) FROM `issue_label` WHERE label_id=label.id)",
"UPDATE `label` SET num_issues=(SELECT COUNT(*) FROM `issue_label` WHERE label_id=?) WHERE id=?",
"label count 'num_issues'",
},
// User.NumRepos
{
"SELECT `user`.id FROM `user` WHERE `user`.num_repos!=(SELECT COUNT(*) FROM `repository` WHERE owner_id=`user`.id)",
"UPDATE `user` SET num_repos=(SELECT COUNT(*) FROM `repository` WHERE owner_id=?) WHERE id=?",
"user count 'num_repos'",
},
// Issue.NumComments
{
"SELECT `issue`.id FROM `issue` WHERE `issue`.num_comments!=(SELECT COUNT(*) FROM `comment` WHERE issue_id=`issue`.id AND type=0)",
"UPDATE `issue` SET num_comments=(SELECT COUNT(*) FROM `comment` WHERE issue_id=? AND type=0) WHERE id=?",
"issue count 'num_comments'",
},
}
for i := range checkers {
repoStatsCheck(checkers[i])
}
// ***** START: Repository.NumClosedIssues *****
desc := "repository count 'num_closed_issues'"
results, err := x.Query("SELECT repo.id FROM `repository` repo WHERE repo.num_closed_issues!=(SELECT COUNT(*) FROM `issue` WHERE repo_id=repo.id AND is_closed=? AND is_pull=?)", true, false)
if err != nil {
log.Error(4, "Select %s: %v", desc, err)
} else {
for _, result := range results {
id := com.StrTo(result["id"]).MustInt64()
log.Trace("Updating %s: %d", desc, id)
_, err = x.Exec("UPDATE `repository` SET num_closed_issues=(SELECT COUNT(*) FROM `issue` WHERE repo_id=? AND is_closed=? AND is_pull=?) WHERE id=?", id, true, false, id)
if err != nil {
log.Error(4, "Update %s[%d]: %v", desc, id, err)
}
}
}
// ***** END: Repository.NumClosedIssues *****
// FIXME: use checker when stop supporting old fork repo format.
// ***** START: Repository.NumForks *****
results, err = x.Query("SELECT repo.id FROM `repository` repo WHERE repo.num_forks!=(SELECT COUNT(*) FROM `repository` WHERE fork_id=repo.id)")
if err != nil {
log.Error(4, "Select repository count 'num_forks': %v", err)
} else {
for _, result := range results {
id := com.StrTo(result["id"]).MustInt64()
log.Trace("Updating repository count 'num_forks': %d", id)
repo, err := GetRepositoryByID(id)
if err != nil {
log.Error(4, "GetRepositoryByID[%d]: %v", id, err)
continue
}
rawResult, err := x.Query("SELECT COUNT(*) FROM `repository` WHERE fork_id=?", repo.ID)
if err != nil {
log.Error(4, "Select count of forks[%d]: %v", repo.ID, err)
continue
}
repo.NumForks = int(parseCountResult(rawResult))
if err = UpdateRepository(repo, false); err != nil {
log.Error(4, "UpdateRepository[%d]: %v", id, err)
continue
}
}
}
// ***** END: Repository.NumForks *****
}
// RepositoryList contains a list of repositories
type RepositoryList []*Repository
func (repos RepositoryList) loadAttributes(e Engine) error {
if len(repos) == 0 {
return nil
}
// Load owners.
set := make(map[int64]*User)
for i := range repos {
set[repos[i].OwnerID] = nil
}
userIDs := make([]int64, 0, len(set))
for userID := range set {
userIDs = append(userIDs, userID)
}
users := make([]*User, 0, len(userIDs))
if err := e.
Where("id > 0").
In("id", userIDs).
Find(&users); err != nil {
return fmt.Errorf("find users: %v", err)
}
for i := range users {
set[users[i].ID] = users[i]
}
for i := range repos {
repos[i].Owner = set[repos[i].OwnerID]
}
return nil
}
// LoadAttributes loads the attributes for the given RepositoryList
func (repos RepositoryList) LoadAttributes() error {
return repos.loadAttributes(x)
}
// MirrorRepositoryList contains the mirror repositories
type MirrorRepositoryList []*Repository
func (repos MirrorRepositoryList) loadAttributes(e Engine) error {
if len(repos) == 0 {
return nil
}
// Load mirrors.
repoIDs := make([]int64, 0, len(repos))
for i := range repos {
if !repos[i].IsMirror {
continue
}
repoIDs = append(repoIDs, repos[i].ID)
}
mirrors := make([]*Mirror, 0, len(repoIDs))
if err := e.
Where("id > 0").
In("repo_id", repoIDs).
Find(&mirrors); err != nil {
return fmt.Errorf("find mirrors: %v", err)
}
set := make(map[int64]*Mirror)
for i := range mirrors {
set[mirrors[i].RepoID] = mirrors[i]
}
for i := range repos {
repos[i].Mirror = set[repos[i].ID]
}
return nil
}
// LoadAttributes loads the attributes for the given MirrorRepositoryList
func (repos MirrorRepositoryList) LoadAttributes() error {
return repos.loadAttributes(x)
}
type ScrubSensitiveDataOptions struct {
LastCommitID string
CommitMessage string
}
// ScrubSensitiveData removes names and email addresses from the manifest|project|package|status.json files and scrubs previous history.
func (repo *Repository) ScrubSensitiveData(doer *User, opts ScrubSensitiveDataOptions) error {
repoWorkingPool.CheckIn(com.ToStr(repo.ID))
defer repoWorkingPool.CheckOut(com.ToStr(repo.ID))
localPath := repo.LocalCopyPath()
if err := repo.DiscardLocalRepoBranchChanges("master"); err != nil {
return fmt.Errorf("DiscardLocalRepoBranchChanges [branch: master]: %v", err)
} else if err = repo.UpdateLocalCopyBranch("master"); err != nil {
return fmt.Errorf("UpdateLocalCopyBranch [branch: master]: %v", err)
}
if success := scrub.ScrubJsonFiles(localPath); ! success {
return fmt.Errorf("Nothing to scrub")
}
if err := git.AddChanges(localPath, true); err != nil {
return fmt.Errorf("git add --all: %v", err)
} else if err := git.CommitChanges(localPath, git.CommitChangesOptions{
Committer: doer.NewGitSig(),
Message: opts.CommitMessage,
}); err != nil {
return fmt.Errorf("CommitChanges: %v", err)
} else if err := git.PushForce(localPath, "origin", "master"); err != nil {
return fmt.Errorf("git push --force --all origin %s: %v", "master", err)
}
gitRepo, err := git.OpenRepository(repo.RepoPath())
if err != nil {
log.Error(4, "OpenRepository: %v", err)
return nil
}
commit, err := gitRepo.GetBranchCommit("master")
if err != nil {
log.Error(4, "GetBranchCommit [branch: %s]: %v", "master", err)
return nil
}
// Simulate push event.
pushCommits := &PushCommits{
Len: 1,
Commits: []*PushCommit{CommitToPushCommit(commit)},
}
oldCommitID := opts.LastCommitID
if err := CommitRepoAction(CommitRepoActionOptions{
PusherName: doer.Name,
RepoOwnerID: repo.MustOwner().ID,
RepoName: repo.Name,
RefFullName: git.BRANCH_PREFIX + "master",
OldCommitID: oldCommitID,
NewCommitID: commit.ID.String(),
Commits: pushCommits,
}); err != nil {
log.Error(4, "CommitRepoAction: %v", err)
return nil
}
return nil
}
// __ __ __ .__
// / \ / \_____ _/ |_ ____ | |__
// \ \/\/ /\__ \\ __\/ ___\| | \
// \ / / __ \| | \ \___| Y \
// \__/\ / (____ /__| \___ >___| /
// \/ \/ \/ \/
// Watch is connection request for receiving repository notification.
type Watch struct {
ID int64 `xorm:"pk autoincr"`
UserID int64 `xorm:"UNIQUE(watch)"`
RepoID int64 `xorm:"UNIQUE(watch)"`
}
func isWatching(e Engine, userID, repoID int64) bool {
has, _ := e.Get(&Watch{0, userID, repoID})
return has
}
// IsWatching checks if user has watched given repository.
func IsWatching(userID, repoID int64) bool {
return isWatching(x, userID, repoID)
}
func watchRepo(e Engine, userID, repoID int64, watch bool) (err error) {
if watch {
if isWatching(e, userID, repoID) {
return nil
}
if _, err = e.Insert(&Watch{RepoID: repoID, UserID: userID}); err != nil {
return err
}
_, err = e.Exec("UPDATE `repository` SET num_watches = num_watches + 1 WHERE id = ?", repoID)
} else {
if !isWatching(e, userID, repoID) {
return nil
}
if _, err = e.Delete(&Watch{0, userID, repoID}); err != nil {
return err
}
_, err = e.Exec("UPDATE `repository` SET num_watches = num_watches - 1 WHERE id = ?", repoID)
}
return err
}
// WatchRepo watch or unwatch repository.
func WatchRepo(userID, repoID int64, watch bool) (err error) {
return watchRepo(x, userID, repoID, watch)
}
func getWatchers(e Engine, repoID int64) ([]*Watch, error) {
watches := make([]*Watch, 0, 10)
return watches, e.Find(&watches, &Watch{RepoID: repoID})
}
// GetWatchers returns all watchers of given repository.
func GetWatchers(repoID int64) ([]*Watch, error) {
return getWatchers(x, repoID)
}
// GetWatchers returns range of users watching given repository.
func (repo *Repository) GetWatchers(page int) ([]*User, error) {
users := make([]*User, 0, ItemsPerPage)
sess := x.
Limit(ItemsPerPage, (page-1)*ItemsPerPage).
Where("watch.repo_id=?", repo.ID)
if setting.UsePostgreSQL {
sess = sess.Join("LEFT", "watch", `"user".id=watch.user_id`)
} else {
sess = sess.Join("LEFT", "watch", "user.id=watch.user_id")
}
return users, sess.Find(&users)
}
func notifyWatchers(e Engine, act *Action) error {
// Add feeds for user self and all watchers.
watches, err := getWatchers(e, act.RepoID)
if err != nil {
return fmt.Errorf("get watchers: %v", err)
}
// Add feed for actioner.
act.UserID = act.ActUserID
if _, err = e.InsertOne(act); err != nil {
return fmt.Errorf("insert new actioner: %v", err)
}
for i := range watches {
if act.ActUserID == watches[i].UserID {
continue
}
act.ID = 0
act.UserID = watches[i].UserID
if _, err = e.InsertOne(act); err != nil {
return fmt.Errorf("insert new action: %v", err)
}
}
return nil
}
// NotifyWatchers creates batch of actions for every watcher.
func NotifyWatchers(act *Action) error {
return notifyWatchers(x, act)
}
// _________ __
// / _____// |______ _______
// \_____ \\ __\__ \\_ __ \
// / \| | / __ \| | \/
// /_______ /|__| (____ /__|
// \/ \/
// Star contains the star information
type Star struct {
ID int64 `xorm:"pk autoincr"`
UID int64 `xorm:"UNIQUE(s)"`
RepoID int64 `xorm:"UNIQUE(s)"`
}
// StarRepo star or unstar repository.
func StarRepo(userID, repoID int64, star bool) (err error) {
if star {
if IsStaring(userID, repoID) {
return nil
}
if _, err = x.Insert(&Star{UID: userID, RepoID: repoID}); err != nil {
return err
} else if _, err = x.Exec("UPDATE `repository` SET num_stars = num_stars + 1 WHERE id = ?", repoID); err != nil {
return err
}
_, err = x.Exec("UPDATE `user` SET num_stars = num_stars + 1 WHERE id = ?", userID)
} else {
if !IsStaring(userID, repoID) {
return nil
}
if _, err = x.Delete(&Star{0, userID, repoID}); err != nil {
return err
} else if _, err = x.Exec("UPDATE `repository` SET num_stars = num_stars - 1 WHERE id = ?", repoID); err != nil {
return err
}
_, err = x.Exec("UPDATE `user` SET num_stars = num_stars - 1 WHERE id = ?", userID)
}
return err
}
// IsStaring checks if user has starred given repository.
func IsStaring(userID, repoID int64) bool {
has, _ := x.Get(&Star{0, userID, repoID})
return has
}
// GetStargazers returns the users who gave stars to this repository
func (repo *Repository) GetStargazers(page int) ([]*User, error) {
users := make([]*User, 0, ItemsPerPage)
sess := x.
Limit(ItemsPerPage, (page-1)*ItemsPerPage).
Where("star.repo_id=?", repo.ID)
if setting.UsePostgreSQL {
sess = sess.Join("LEFT", "star", `"user".id=star.uid`)
} else {
sess = sess.Join("LEFT", "star", "user.id=star.uid")
}
return users, sess.Find(&users)
}
// ___________ __
// \_ _____/__________| | __
// | __)/ _ \_ __ \ |/ /
// | \( <_> ) | \/ <
// \___ / \____/|__| |__|_ \
// \/ \/
// HasForkedRepo checks if given user has already forked a repository with given ID.
func HasForkedRepo(ownerID, repoID int64) (*Repository, bool) {
repo := new(Repository)
has, _ := x.
Where("owner_id=? AND fork_id=?", ownerID, repoID).
Get(repo)
return repo, has
}
// ForkRepository forks a repository
func ForkRepository(u *User, oldRepo *Repository, name, desc string) (_ *Repository, err error) {
repo := &Repository{
OwnerID: u.ID,
Owner: u,
Name: name,
LowerName: strings.ToLower(name),
Description: desc,
DefaultBranch: oldRepo.DefaultBranch,
IsPrivate: oldRepo.IsPrivate,
IsFork: true,
ForkID: oldRepo.ID,
}
sess := x.NewSession()
defer sessionRelease(sess)
if err = sess.Begin(); err != nil {
return nil, err
}
if err = createRepository(sess, u, repo); err != nil {
return nil, err
}
if _, err = sess.Exec("UPDATE `repository` SET num_forks=num_forks+1 WHERE id=?", oldRepo.ID); err != nil {
return nil, err
}
repoPath := RepoPath(u.Name, repo.Name)
_, stderr, err := process.ExecTimeout(10*time.Minute,
fmt.Sprintf("ForkRepository(git clone): %s/%s", u.Name, repo.Name),
"git", "clone", "--bare", oldRepo.RepoPath(), repoPath)
if err != nil {
return nil, fmt.Errorf("git clone: %v", stderr)
}
_, stderr, err = process.ExecDir(-1,
repoPath, fmt.Sprintf("ForkRepository(git update-server-info): %s", repoPath),
"git", "update-server-info")
if err != nil {
return nil, fmt.Errorf("git update-server-info: %v", stderr)
}
if err = createUpdateHook(repoPath); err != nil {
return nil, fmt.Errorf("createUpdateHook: %v", err)
}
return repo, sess.Commit()
}
// GetForks returns all the forks of the repository
func (repo *Repository) GetForks() ([]*Repository, error) {
forks := make([]*Repository, 0, repo.NumForks)
return forks, x.Find(&forks, &Repository{ForkID: repo.ID})
}
// __________ .__
// \______ \____________ ____ ____ | |__
// | | _/\_ __ \__ \ / \_/ ___\| | \
// | | \ | | \// __ \| | \ \___| Y \
// |______ / |__| (____ /___| /\___ >___| /
// \/ \/ \/ \/ \/
//
// CreateNewBranch creates a new repository branch
func (repo *Repository) CreateNewBranch(doer *User, oldBranchName, branchName string) (err error) {
repoWorkingPool.CheckIn(com.ToStr(repo.ID))
defer repoWorkingPool.CheckOut(com.ToStr(repo.ID))
localPath := repo.LocalCopyPath()
if err = discardLocalRepoBranchChanges(localPath, oldBranchName); err != nil {
return fmt.Errorf("discardLocalRepoChanges: %v", err)
} else if err = repo.UpdateLocalCopyBranch(oldBranchName); err != nil {
return fmt.Errorf("UpdateLocalCopyBranch: %v", err)
}
if err = repo.CheckoutNewBranch(oldBranchName, branchName); err != nil {
return fmt.Errorf("CreateNewBranch: %v", err)
}
if err = git.Push(localPath, "origin", branchName); err != nil {
return fmt.Errorf("Push: %v", err)
}
return nil
}
Fixed webhook
// Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (
"bytes"
"errors"
"fmt"
"html/template"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"sort"
"strings"
"time"
"code.gitea.io/git"
"code.gitea.io/gitea/modules/bindata"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/markdown"
"code.gitea.io/gitea/modules/process"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/sync"
api "code.gitea.io/sdk/gitea"
"github.com/Unknwon/cae/zip"
"github.com/Unknwon/com"
"github.com/go-xorm/xorm"
version "github.com/mcuadros/go-version"
ini "gopkg.in/ini.v1"
"code.gitea.io/gitea/modules/scrub"
)
const (
tplUpdateHook = "#!/usr/bin/env %s\n%s update $1 $2 $3 --config='%s'\n\"%s/custom/bin/update\" $1 $2 $3 \"%s\"\n"
)
var repoWorkingPool = sync.NewExclusivePool()
var (
// ErrRepoFileNotExist repository file does not exist error
ErrRepoFileNotExist = errors.New("Repository file does not exist")
// ErrRepoFileNotLoaded repository file not loaded error
ErrRepoFileNotLoaded = errors.New("Repository file not loaded")
// ErrMirrorNotExist mirror does not exist error
ErrMirrorNotExist = errors.New("Mirror does not exist")
// ErrInvalidReference invalid reference specified error
ErrInvalidReference = errors.New("Invalid reference specified")
// ErrNameEmpty name is empty error
ErrNameEmpty = errors.New("Name is empty")
)
var (
// Gitignores contains the gitiginore files
Gitignores []string
// Licenses contains the license files
Licenses []string
// Readmes contains the readme files
Readmes []string
// LabelTemplates contains the label template files
LabelTemplates []string
// ItemsPerPage maximum items per page in forks, watchers and stars of a repo
ItemsPerPage = 40
)
// LoadRepoConfig loads the repository config
func LoadRepoConfig() {
// Load .gitignore and license files and readme templates.
types := []string{"gitignore", "license", "readme", "label"}
typeFiles := make([][]string, 4)
for i, t := range types {
files, err := bindata.AssetDir("conf/" + t)
if err != nil {
log.Fatal(4, "Fail to get %s files: %v", t, err)
}
customPath := path.Join(setting.CustomPath, "conf", t)
if com.IsDir(customPath) {
customFiles, err := com.StatDir(customPath)
if err != nil {
log.Fatal(4, "Fail to get custom %s files: %v", t, err)
}
for _, f := range customFiles {
if !com.IsSliceContainsStr(files, f) {
files = append(files, f)
}
}
}
typeFiles[i] = files
}
Gitignores = typeFiles[0]
Licenses = typeFiles[1]
Readmes = typeFiles[2]
LabelTemplates = typeFiles[3]
sort.Strings(Gitignores)
sort.Strings(Licenses)
sort.Strings(Readmes)
sort.Strings(LabelTemplates)
// Filter out invalid names and promote preferred licenses.
sortedLicenses := make([]string, 0, len(Licenses))
for _, name := range setting.Repository.PreferredLicenses {
if com.IsSliceContainsStr(Licenses, name) {
sortedLicenses = append(sortedLicenses, name)
}
}
for _, name := range Licenses {
if !com.IsSliceContainsStr(setting.Repository.PreferredLicenses, name) {
sortedLicenses = append(sortedLicenses, name)
}
}
Licenses = sortedLicenses
}
// NewRepoContext creates a new repository context
func NewRepoContext() {
zip.Verbose = false
// Check Git installation.
if _, err := exec.LookPath("git"); err != nil {
log.Fatal(4, "Fail to test 'git' command: %v (forgotten install?)", err)
}
// Check Git version.
gitVer, err := git.BinVersion()
if err != nil {
log.Fatal(4, "Fail to get Git version: %v", err)
}
log.Info("Git Version: %s", gitVer)
if version.Compare("1.7.1", gitVer, ">") {
log.Fatal(4, "Gogs requires Git version greater or equal to 1.7.1")
}
// Git requires setting user.name and user.email in order to commit changes.
for configKey, defaultValue := range map[string]string{"user.name": "Gogs", "user.email": "gogs@fake.local"} {
if stdout, stderr, err := process.Exec("NewRepoContext(get setting)", "git", "config", "--get", configKey); err != nil || strings.TrimSpace(stdout) == "" {
// ExitError indicates this config is not set
if _, ok := err.(*exec.ExitError); ok || strings.TrimSpace(stdout) == "" {
if _, stderr, gerr := process.Exec("NewRepoContext(set "+configKey+")", "git", "config", "--global", configKey, defaultValue); gerr != nil {
log.Fatal(4, "Fail to set git %s(%s): %s", configKey, gerr, stderr)
}
log.Info("Git config %s set to %s", configKey, defaultValue)
} else {
log.Fatal(4, "Fail to get git %s(%s): %s", configKey, err, stderr)
}
}
}
// Set git some configurations.
if _, stderr, err := process.Exec("NewRepoContext(git config --global core.quotepath false)",
"git", "config", "--global", "core.quotepath", "false"); err != nil {
log.Fatal(4, "Fail to execute 'git config --global core.quotepath false': %s", stderr)
}
RemoveAllWithNotice("Clean up repository temporary data", filepath.Join(setting.AppDataPath, "tmp"))
}
// Repository represents a git repository.
type Repository struct {
ID int64 `xorm:"pk autoincr"`
OwnerID int64 `xorm:"UNIQUE(s)"`
Owner *User `xorm:"-"`
LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
Name string `xorm:"INDEX NOT NULL"`
Description string
Website string
DefaultBranch string
NumWatches int
NumStars int
NumForks int
NumIssues int
NumClosedIssues int
NumOpenIssues int `xorm:"-"`
NumPulls int
NumClosedPulls int
NumOpenPulls int `xorm:"-"`
NumMilestones int `xorm:"NOT NULL DEFAULT 0"`
NumClosedMilestones int `xorm:"NOT NULL DEFAULT 0"`
NumOpenMilestones int `xorm:"-"`
NumTags int `xorm:"-"`
IsPrivate bool
IsBare bool
IsMirror bool
*Mirror `xorm:"-"`
// Advanced settings
EnableWiki bool `xorm:"NOT NULL DEFAULT true"`
EnableExternalWiki bool
ExternalWikiURL string
EnableIssues bool `xorm:"NOT NULL DEFAULT true"`
EnableExternalTracker bool
ExternalTrackerURL string
ExternalTrackerFormat string
ExternalTrackerStyle string
ExternalMetas map[string]string `xorm:"-"`
EnablePulls bool `xorm:"NOT NULL DEFAULT true"`
IsFork bool `xorm:"NOT NULL DEFAULT false"`
ForkID int64
BaseRepo *Repository `xorm:"-"`
Created time.Time `xorm:"-"`
CreatedUnix int64
Updated time.Time `xorm:"-"`
UpdatedUnix int64
}
// BeforeInsert is invoked from XORM before inserting an object of this type.
func (repo *Repository) BeforeInsert() {
repo.CreatedUnix = time.Now().Unix()
repo.UpdatedUnix = repo.CreatedUnix
}
// BeforeUpdate is invoked from XORM before updating this object.
func (repo *Repository) BeforeUpdate() {
repo.UpdatedUnix = time.Now().Unix()
}
// AfterSet is invoked from XORM after setting the value of a field of this object.
func (repo *Repository) AfterSet(colName string, _ xorm.Cell) {
switch colName {
case "default_branch":
// FIXME: use models migration to solve all at once.
if len(repo.DefaultBranch) == 0 {
repo.DefaultBranch = "master"
}
case "num_closed_issues":
repo.NumOpenIssues = repo.NumIssues - repo.NumClosedIssues
case "num_closed_pulls":
repo.NumOpenPulls = repo.NumPulls - repo.NumClosedPulls
case "num_closed_milestones":
repo.NumOpenMilestones = repo.NumMilestones - repo.NumClosedMilestones
case "external_tracker_style":
if len(repo.ExternalTrackerStyle) == 0 {
repo.ExternalTrackerStyle = markdown.IssueNameStyleNumeric
}
case "created_unix":
repo.Created = time.Unix(repo.CreatedUnix, 0).Local()
case "updated_unix":
repo.Updated = time.Unix(repo.UpdatedUnix, 0)
}
}
// MustOwner always returns a valid *User object to avoid
// conceptually impossible error handling.
// It creates a fake object that contains error deftail
// when error occurs.
func (repo *Repository) MustOwner() *User {
return repo.mustOwner(x)
}
// FullName returns the repository full name
func (repo *Repository) FullName() string {
return repo.MustOwner().Name + "/" + repo.Name
}
// HTMLURL returns the repository HTML URL
func (repo *Repository) HTMLURL() string {
return setting.AppURL + repo.FullName()
}
// APIFormat converts a Repository to api.Repository
func (repo *Repository) APIFormat(mode AccessMode) *api.Repository {
cloneLink := repo.CloneLink()
permission := &api.Permission{
Admin: mode >= AccessModeAdmin,
Push: mode >= AccessModeWrite,
Pull: mode >= AccessModeRead,
}
return &api.Repository{
ID: repo.ID,
Owner: repo.Owner.APIFormat(),
Name: repo.Name,
FullName: repo.FullName(),
Description: repo.Description,
Private: repo.IsPrivate,
Fork: repo.IsFork,
HTMLURL: repo.HTMLURL(),
SSHURL: cloneLink.SSH,
CloneURL: cloneLink.HTTPS,
Website: repo.Website,
Stars: repo.NumStars,
Forks: repo.NumForks,
Watchers: repo.NumWatches,
OpenIssues: repo.NumOpenIssues,
DefaultBranch: repo.DefaultBranch,
Created: repo.Created,
Updated: repo.Updated,
Permissions: permission,
}
}
func (repo *Repository) getOwner(e Engine) (err error) {
if repo.Owner != nil {
return nil
}
repo.Owner, err = getUserByID(e, repo.OwnerID)
return err
}
// GetOwner returns the repository owner
func (repo *Repository) GetOwner() error {
return repo.getOwner(x)
}
func (repo *Repository) mustOwner(e Engine) *User {
if err := repo.getOwner(e); err != nil {
return &User{
Name: "error",
FullName: err.Error(),
}
}
return repo.Owner
}
// ComposeMetas composes a map of metas for rendering external issue tracker URL.
func (repo *Repository) ComposeMetas() map[string]string {
if !repo.EnableExternalTracker {
return nil
} else if repo.ExternalMetas == nil {
repo.ExternalMetas = map[string]string{
"format": repo.ExternalTrackerFormat,
"user": repo.MustOwner().Name,
"repo": repo.Name,
}
switch repo.ExternalTrackerStyle {
case markdown.IssueNameStyleAlphanumeric:
repo.ExternalMetas["style"] = markdown.IssueNameStyleAlphanumeric
default:
repo.ExternalMetas["style"] = markdown.IssueNameStyleNumeric
}
}
return repo.ExternalMetas
}
// DeleteWiki removes the actual and local copy of repository wiki.
func (repo *Repository) DeleteWiki() {
wikiPaths := []string{repo.WikiPath(), repo.LocalWikiPath()}
for _, wikiPath := range wikiPaths {
RemoveAllWithNotice("Delete repository wiki", wikiPath)
}
}
func (repo *Repository) getAssignees(e Engine) (_ []*User, err error) {
if err = repo.getOwner(e); err != nil {
return nil, err
}
accesses := make([]*Access, 0, 10)
if err = e.
Where("repo_id = ? AND mode >= ?", repo.ID, AccessModeWrite).
Find(&accesses); err != nil {
return nil, err
}
// Leave a seat for owner itself to append later, but if owner is an organization
// and just waste 1 unit is cheaper than re-allocate memory once.
users := make([]*User, 0, len(accesses)+1)
if len(accesses) > 0 {
userIDs := make([]int64, len(accesses))
for i := 0; i < len(accesses); i++ {
userIDs[i] = accesses[i].UserID
}
if err = e.In("id", userIDs).Find(&users); err != nil {
return nil, err
}
}
if !repo.Owner.IsOrganization() {
users = append(users, repo.Owner)
}
return users, nil
}
// GetAssignees returns all users that have write access and can be assigned to issues
// of the repository,
func (repo *Repository) GetAssignees() (_ []*User, err error) {
return repo.getAssignees(x)
}
// GetAssigneeByID returns the user that has write access of repository by given ID.
func (repo *Repository) GetAssigneeByID(userID int64) (*User, error) {
return GetAssigneeByID(repo, userID)
}
// GetMilestoneByID returns the milestone belongs to repository by given ID.
func (repo *Repository) GetMilestoneByID(milestoneID int64) (*Milestone, error) {
return GetMilestoneByRepoID(repo.ID, milestoneID)
}
// IssueStats returns number of open and closed repository issues by given filter mode.
func (repo *Repository) IssueStats(uid int64, filterMode int, isPull bool) (int64, int64) {
return GetRepoIssueStats(repo.ID, uid, filterMode, isPull)
}
// GetMirror sets the repository mirror, returns an error upon failure
func (repo *Repository) GetMirror() (err error) {
repo.Mirror, err = GetMirrorByRepoID(repo.ID)
return err
}
// GetBaseRepo returns the base repository
func (repo *Repository) GetBaseRepo() (err error) {
if !repo.IsFork {
return nil
}
repo.BaseRepo, err = GetRepositoryByID(repo.ForkID)
return err
}
func (repo *Repository) repoPath(e Engine) string {
return RepoPath(repo.mustOwner(e).Name, repo.Name)
}
// RepoPath returns the repository path
func (repo *Repository) RepoPath() string {
return repo.repoPath(x)
}
// GitConfigPath returns the repository git config path
func (repo *Repository) GitConfigPath() string {
return filepath.Join(repo.RepoPath(), "config")
}
// RelLink returns the repository relative link
func (repo *Repository) RelLink() string {
return "/" + repo.FullName()
}
// Link returns the repository link
func (repo *Repository) Link() string {
return setting.AppSubURL + "/" + repo.FullName()
}
// ComposeCompareURL returns the repository comparison URL
func (repo *Repository) ComposeCompareURL(oldCommitID, newCommitID string) string {
return fmt.Sprintf("%s/%s/compare/%s...%s", repo.MustOwner().Name, repo.Name, oldCommitID, newCommitID)
}
// HasAccess returns true when user has access to this repository
func (repo *Repository) HasAccess(u *User) bool {
has, _ := HasAccess(u, repo, AccessModeRead)
return has
}
// IsOwnedBy returns true when user owns this repository
func (repo *Repository) IsOwnedBy(userID int64) bool {
return repo.OwnerID == userID
}
// CanBeForked returns true if repository meets the requirements of being forked.
func (repo *Repository) CanBeForked() bool {
return !repo.IsBare
}
// CanEnablePulls returns true if repository meets the requirements of accepting pulls.
func (repo *Repository) CanEnablePulls() bool {
return !repo.IsMirror
}
// AllowsPulls returns true if repository meets the requirements of accepting pulls and has them enabled.
func (repo *Repository) AllowsPulls() bool {
return repo.CanEnablePulls() && repo.EnablePulls
}
// CanEnableEditor returns true if repository meets the requirements of web editor.
func (repo *Repository) CanEnableEditor() bool {
return !repo.IsMirror
}
// NextIssueIndex returns the next issue index
// FIXME: should have a mutex to prevent producing same index for two issues that are created
// closely enough.
func (repo *Repository) NextIssueIndex() int64 {
return int64(repo.NumIssues+repo.NumPulls) + 1
}
var (
descPattern = regexp.MustCompile(`https?://\S+`)
)
// DescriptionHTML does special handles to description and return HTML string.
func (repo *Repository) DescriptionHTML() template.HTML {
sanitize := func(s string) string {
return fmt.Sprintf(`<a href="%[1]s" target="_blank" rel="noopener">%[1]s</a>`, s)
}
return template.HTML(descPattern.ReplaceAllStringFunc(markdown.Sanitizer.Sanitize(repo.Description), sanitize))
}
// LocalCopyPath returns the local repository copy path
func (repo *Repository) LocalCopyPath() string {
return path.Join(setting.AppDataPath, "tmp/local-rpeo", com.ToStr(repo.ID))
}
// UpdateLocalCopyBranch pulls latest changes of given branch from repoPath to localPath.
// It creates a new clone if local copy does not exist.
// This function checks out target branch by default, it is safe to assume subsequent
// operations are operating against target branch when caller has confidence for no race condition.
func UpdateLocalCopyBranch(repoPath, localPath, branch string) error {
if !com.IsExist(localPath) {
if err := git.Clone(repoPath, localPath, git.CloneRepoOptions{
Timeout: time.Duration(setting.Git.Timeout.Clone) * time.Second,
Branch: branch,
}); err != nil {
return fmt.Errorf("git clone %s: %v", branch, err)
}
} else {
if err := git.Checkout(localPath, git.CheckoutOptions{
Branch: branch,
}); err != nil {
return fmt.Errorf("git checkout %s: %v", branch, err)
}
if err := git.Pull(localPath, git.PullRemoteOptions{
Timeout: time.Duration(setting.Git.Timeout.Pull) * time.Second,
Remote: "origin",
Branch: branch,
}); err != nil {
return fmt.Errorf("git pull origin %s: %v", branch, err)
}
}
return nil
}
// UpdateLocalCopyBranch makes sure local copy of repository in given branch is up-to-date.
func (repo *Repository) UpdateLocalCopyBranch(branch string) error {
return UpdateLocalCopyBranch(repo.RepoPath(), repo.LocalCopyPath(), branch)
}
// PatchPath returns corresponding patch file path of repository by given issue ID.
func (repo *Repository) PatchPath(index int64) (string, error) {
if err := repo.GetOwner(); err != nil {
return "", err
}
return filepath.Join(RepoPath(repo.Owner.Name, repo.Name), "pulls", com.ToStr(index)+".patch"), nil
}
// SavePatch saves patch data to corresponding location by given issue ID.
func (repo *Repository) SavePatch(index int64, patch []byte) error {
patchPath, err := repo.PatchPath(index)
if err != nil {
return fmt.Errorf("PatchPath: %v", err)
}
dir := filepath.Dir(patchPath)
if err := os.MkdirAll(dir, os.ModePerm); err != nil {
return fmt.Errorf("Fail to create dir %s: %v", dir, err)
}
if err = ioutil.WriteFile(patchPath, patch, 0644); err != nil {
return fmt.Errorf("WriteFile: %v", err)
}
return nil
}
func isRepositoryExist(e Engine, u *User, repoName string) (bool, error) {
has, err := e.Get(&Repository{
OwnerID: u.ID,
LowerName: strings.ToLower(repoName),
})
return has && com.IsDir(RepoPath(u.Name, repoName)), err
}
// IsRepositoryExist returns true if the repository with given name under user has already existed.
func IsRepositoryExist(u *User, repoName string) (bool, error) {
return isRepositoryExist(x, u, repoName)
}
// CloneLink represents different types of clone URLs of repository.
type CloneLink struct {
SSH string
HTTPS string
Git string
}
// ComposeHTTPSCloneURL returns HTTPS clone URL based on given owner and repository name.
func ComposeHTTPSCloneURL(owner, repo string) string {
return fmt.Sprintf("%s%s/%s.git", setting.AppURL, owner, repo)
}
func (repo *Repository) cloneLink(isWiki bool) *CloneLink {
repoName := repo.Name
if isWiki {
repoName += ".wiki"
}
repo.Owner = repo.MustOwner()
cl := new(CloneLink)
if setting.SSH.Port != 22 {
cl.SSH = fmt.Sprintf("ssh://%s@%s:%d/%s/%s.git", setting.RunUser, setting.SSH.Domain, setting.SSH.Port, repo.Owner.Name, repoName)
} else {
cl.SSH = fmt.Sprintf("%s@%s:%s/%s.git", setting.RunUser, setting.SSH.Domain, repo.Owner.Name, repoName)
}
cl.HTTPS = ComposeHTTPSCloneURL(repo.Owner.Name, repoName)
return cl
}
// CloneLink returns clone URLs of repository.
func (repo *Repository) CloneLink() (cl *CloneLink) {
return repo.cloneLink(false)
}
// MigrateRepoOptions contains the repository migrate options
type MigrateRepoOptions struct {
Name string
Description string
IsPrivate bool
IsMirror bool
RemoteAddr string
}
/*
GitHub, GitLab, Gogs: *.wiki.git
BitBucket: *.git/wiki
*/
var commonWikiURLSuffixes = []string{".wiki.git", ".git/wiki"}
// wikiRemoteURL returns accessible repository URL for wiki if exists.
// Otherwise, it returns an empty string.
func wikiRemoteURL(remote string) string {
remote = strings.TrimSuffix(remote, ".git")
for _, suffix := range commonWikiURLSuffixes {
wikiURL := remote + suffix
if git.IsRepoURLAccessible(wikiURL) {
return wikiURL
}
}
return ""
}
// MigrateRepository migrates a existing repository from other project hosting.
func MigrateRepository(u *User, opts MigrateRepoOptions) (*Repository, error) {
repo, err := CreateRepository(u, CreateRepoOptions{
Name: opts.Name,
Description: opts.Description,
IsPrivate: opts.IsPrivate,
IsMirror: opts.IsMirror,
})
if err != nil {
return nil, err
}
repoPath := RepoPath(u.Name, opts.Name)
wikiPath := WikiPath(u.Name, opts.Name)
if u.IsOrganization() {
t, err := u.GetOwnerTeam()
if err != nil {
return nil, err
}
repo.NumWatches = t.NumMembers
} else {
repo.NumWatches = 1
}
migrateTimeout := time.Duration(setting.Git.Timeout.Migrate) * time.Second
if err := os.RemoveAll(repoPath); err != nil {
return repo, fmt.Errorf("Fail to remove %s: %v", repoPath, err)
}
if err = git.Clone(opts.RemoteAddr, repoPath, git.CloneRepoOptions{
Mirror: true,
Quiet: true,
Timeout: migrateTimeout,
}); err != nil {
return repo, fmt.Errorf("Clone: %v", err)
}
wikiRemotePath := wikiRemoteURL(opts.RemoteAddr)
if len(wikiRemotePath) > 0 {
if err := os.RemoveAll(wikiPath); err != nil {
return repo, fmt.Errorf("Fail to remove %s: %v", wikiPath, err)
}
if err = git.Clone(wikiRemotePath, wikiPath, git.CloneRepoOptions{
Mirror: true,
Quiet: true,
Timeout: migrateTimeout,
}); err != nil {
log.Info("Clone wiki: %v", err)
}
}
// Check if repository is empty.
_, stderr, err := com.ExecCmdDir(repoPath, "git", "log", "-1")
if err != nil {
if strings.Contains(stderr, "fatal: bad default revision 'HEAD'") {
repo.IsBare = true
} else {
return repo, fmt.Errorf("check bare: %v - %s", err, stderr)
}
}
if !repo.IsBare {
// Try to get HEAD branch and set it as default branch.
gitRepo, err := git.OpenRepository(repoPath)
if err != nil {
return repo, fmt.Errorf("OpenRepository: %v", err)
}
headBranch, err := gitRepo.GetHEADBranch()
if err != nil {
return repo, fmt.Errorf("GetHEADBranch: %v", err)
}
if headBranch != nil {
repo.DefaultBranch = headBranch.Name
}
}
if opts.IsMirror {
if _, err = x.InsertOne(&Mirror{
RepoID: repo.ID,
Interval: setting.Mirror.DefaultInterval,
EnablePrune: true,
NextUpdate: time.Now().Add(time.Duration(setting.Mirror.DefaultInterval) * time.Hour),
}); err != nil {
return repo, fmt.Errorf("InsertOne: %v", err)
}
repo.IsMirror = true
return repo, UpdateRepository(repo, false)
}
return CleanUpMigrateInfo(repo)
}
// cleanUpMigrateGitConfig removes mirror info which prevents "push --all".
// This also removes possible user credentials.
func cleanUpMigrateGitConfig(configPath string) error {
cfg, err := ini.Load(configPath)
if err != nil {
return fmt.Errorf("open config file: %v", err)
}
cfg.DeleteSection("remote \"origin\"")
if err = cfg.SaveToIndent(configPath, "\t"); err != nil {
return fmt.Errorf("save config file: %v", err)
}
return nil
}
func createUpdateHook(repoPath string) error {
return git.SetUpdateHook(repoPath,
fmt.Sprintf(tplUpdateHook, setting.ScriptType, "\""+setting.AppPath+"\"", setting.CustomConf, path.Dir(setting.AppPath), setting.CustomConf))
}
// CleanUpMigrateInfo finishes migrating repository and/or wiki with things that don't need to be done for mirrors.
func CleanUpMigrateInfo(repo *Repository) (*Repository, error) {
repoPath := repo.RepoPath()
if err := createUpdateHook(repoPath); err != nil {
return repo, fmt.Errorf("createUpdateHook: %v", err)
}
if repo.HasWiki() {
if err := createUpdateHook(repo.WikiPath()); err != nil {
return repo, fmt.Errorf("createUpdateHook (wiki): %v", err)
}
}
if err := cleanUpMigrateGitConfig(repo.GitConfigPath()); err != nil {
return repo, fmt.Errorf("cleanUpMigrateGitConfig: %v", err)
}
if repo.HasWiki() {
if err := cleanUpMigrateGitConfig(path.Join(repo.WikiPath(), "config")); err != nil {
return repo, fmt.Errorf("cleanUpMigrateGitConfig (wiki): %v", err)
}
}
return repo, UpdateRepository(repo, false)
}
// initRepoCommit temporarily changes with work directory.
func initRepoCommit(tmpPath string, sig *git.Signature) (err error) {
var stderr string
if _, stderr, err = process.ExecDir(-1,
tmpPath, fmt.Sprintf("initRepoCommit (git add): %s", tmpPath),
"git", "add", "--all"); err != nil {
return fmt.Errorf("git add: %s", stderr)
}
if _, stderr, err = process.ExecDir(-1,
tmpPath, fmt.Sprintf("initRepoCommit (git commit): %s", tmpPath),
"git", "commit", fmt.Sprintf("--author='%s <%s>'", sig.Name, sig.Email),
"-m", "Initial commit"); err != nil {
return fmt.Errorf("git commit: %s", stderr)
}
if _, stderr, err = process.ExecDir(-1,
tmpPath, fmt.Sprintf("initRepoCommit (git push): %s", tmpPath),
"git", "push", "origin", "master"); err != nil {
return fmt.Errorf("git push: %s", stderr)
}
return nil
}
// CreateRepoOptions contains the create repository options
type CreateRepoOptions struct {
Name string
Description string
Gitignores string
License string
Readme string
IsPrivate bool
IsMirror bool
AutoInit bool
}
func getRepoInitFile(tp, name string) ([]byte, error) {
relPath := path.Join("conf", tp, strings.TrimLeft(name, "./"))
// Use custom file when available.
customPath := path.Join(setting.CustomPath, relPath)
if com.IsFile(customPath) {
return ioutil.ReadFile(customPath)
}
return bindata.Asset(relPath)
}
func prepareRepoCommit(repo *Repository, tmpDir, repoPath string, opts CreateRepoOptions) error {
// Clone to temprory path and do the init commit.
_, stderr, err := process.Exec(
fmt.Sprintf("initRepository(git clone): %s", repoPath), "git", "clone", repoPath, tmpDir)
if err != nil {
return fmt.Errorf("git clone: %v - %s", err, stderr)
}
// README
data, err := getRepoInitFile("readme", opts.Readme)
if err != nil {
return fmt.Errorf("getRepoInitFile[%s]: %v", opts.Readme, err)
}
cloneLink := repo.CloneLink()
match := map[string]string{
"Name": repo.Name,
"Description": repo.Description,
"CloneURL.SSH": cloneLink.SSH,
"CloneURL.HTTPS": cloneLink.HTTPS,
}
if err = ioutil.WriteFile(filepath.Join(tmpDir, "README.md"),
[]byte(com.Expand(string(data), match)), 0644); err != nil {
return fmt.Errorf("write README.md: %v", err)
}
// .gitignore
if len(opts.Gitignores) > 0 {
var buf bytes.Buffer
names := strings.Split(opts.Gitignores, ",")
for _, name := range names {
data, err = getRepoInitFile("gitignore", name)
if err != nil {
return fmt.Errorf("getRepoInitFile[%s]: %v", name, err)
}
buf.WriteString("# ---> " + name + "\n")
buf.Write(data)
buf.WriteString("\n")
}
if buf.Len() > 0 {
if err = ioutil.WriteFile(filepath.Join(tmpDir, ".gitignore"), buf.Bytes(), 0644); err != nil {
return fmt.Errorf("write .gitignore: %v", err)
}
}
}
// LICENSE
if len(opts.License) > 0 {
data, err = getRepoInitFile("license", opts.License)
if err != nil {
return fmt.Errorf("getRepoInitFile[%s]: %v", opts.License, err)
}
if err = ioutil.WriteFile(filepath.Join(tmpDir, "LICENSE"), data, 0644); err != nil {
return fmt.Errorf("write LICENSE: %v", err)
}
}
return nil
}
// InitRepository initializes README and .gitignore if needed.
func initRepository(e Engine, repoPath string, u *User, repo *Repository, opts CreateRepoOptions) (err error) {
// Somehow the directory could exist.
if com.IsExist(repoPath) {
return fmt.Errorf("initRepository: path already exists: %s", repoPath)
}
// Init bare new repository.
if err = git.InitRepository(repoPath, true); err != nil {
return fmt.Errorf("InitRepository: %v", err)
} else if err = createUpdateHook(repoPath); err != nil {
return fmt.Errorf("createUpdateHook: %v", err)
}
tmpDir := filepath.Join(os.TempDir(), "gogs-"+repo.Name+"-"+com.ToStr(time.Now().Nanosecond()))
// Initialize repository according to user's choice.
if opts.AutoInit {
if err := os.MkdirAll(tmpDir, os.ModePerm); err != nil {
return fmt.Errorf("Fail to create dir %s: %v", tmpDir, err)
}
defer os.RemoveAll(tmpDir)
if err = prepareRepoCommit(repo, tmpDir, repoPath, opts); err != nil {
return fmt.Errorf("prepareRepoCommit: %v", err)
}
// Apply changes and commit.
if err = initRepoCommit(tmpDir, u.NewGitSig()); err != nil {
return fmt.Errorf("initRepoCommit: %v", err)
}
}
// Re-fetch the repository from database before updating it (else it would
// override changes that were done earlier with sql)
if repo, err = getRepositoryByID(e, repo.ID); err != nil {
return fmt.Errorf("getRepositoryByID: %v", err)
}
if !opts.AutoInit {
repo.IsBare = true
}
repo.DefaultBranch = "master"
if err = updateRepository(e, repo, false); err != nil {
return fmt.Errorf("updateRepository: %v", err)
}
return nil
}
var (
reservedRepoNames = []string{".", ".."}
reservedRepoPatterns = []string{"*.git", "*.wiki"}
)
// IsUsableRepoName returns true when repository is usable
func IsUsableRepoName(name string) error {
return isUsableName(reservedRepoNames, reservedRepoPatterns, name)
}
func createRepository(e *xorm.Session, u *User, repo *Repository) (err error) {
if err = IsUsableRepoName(repo.Name); err != nil {
return err
}
has, err := isRepositoryExist(e, u, repo.Name)
if err != nil {
return fmt.Errorf("IsRepositoryExist: %v", err)
} else if has {
return ErrRepoAlreadyExist{u.Name, repo.Name}
}
if _, err = e.Insert(repo); err != nil {
return err
}
u.NumRepos++
// Remember visibility preference.
u.LastRepoVisibility = repo.IsPrivate
if err = updateUser(e, u); err != nil {
return fmt.Errorf("updateUser: %v", err)
}
// Give access to all members in owner team.
if u.IsOrganization() {
t, err := u.getOwnerTeam(e)
if err != nil {
return fmt.Errorf("getOwnerTeam: %v", err)
} else if err = t.addRepository(e, repo); err != nil {
return fmt.Errorf("addRepository: %v", err)
}
} else {
// Organization automatically called this in addRepository method.
if err = repo.recalculateAccesses(e); err != nil {
return fmt.Errorf("recalculateAccesses: %v", err)
}
}
if err = watchRepo(e, u.ID, repo.ID, true); err != nil {
return fmt.Errorf("watchRepo: %v", err)
} else if err = newRepoAction(e, u, repo); err != nil {
return fmt.Errorf("newRepoAction: %v", err)
}
w := &Webhook{
RepoID: repo.ID,
URL: "https://api.door43.org/client/webhook",
ContentType: ContentTypeJSON,
Secret: "",
HookEvent: &HookEvent{
PushOnly: true,
SendEverything: false,
ChooseEvents: false,
HookEvents: HookEvents{
Create: false,
Push: false,
PullRequest: false,
},
},
IsActive: true,
HookTaskType: GOGS,
OrgID: 0,
}
if err := w.UpdateEvent(); err == nil {
CreateWebhook(w);
}
return nil
}
// CreateRepository creates a repository for given user or organization.
func CreateRepository(u *User, opts CreateRepoOptions) (_ *Repository, err error) {
if !u.CanCreateRepo() {
return nil, ErrReachLimitOfRepo{u.MaxRepoCreation}
}
repo := &Repository{
OwnerID: u.ID,
Owner: u,
Name: opts.Name,
LowerName: strings.ToLower(opts.Name),
Description: opts.Description,
IsPrivate: opts.IsPrivate,
EnableWiki: true,
EnableIssues: true,
EnablePulls: true,
}
sess := x.NewSession()
defer sessionRelease(sess)
if err = sess.Begin(); err != nil {
return nil, err
}
if err = createRepository(sess, u, repo); err != nil {
return nil, err
}
// No need for init mirror.
if !opts.IsMirror {
repoPath := RepoPath(u.Name, repo.Name)
if err = initRepository(sess, repoPath, u, repo, opts); err != nil {
if err2 := os.RemoveAll(repoPath); err2 != nil {
log.Error(4, "initRepository: %v", err)
return nil, fmt.Errorf(
"delete repo directory %s/%s failed(2): %v", u.Name, repo.Name, err2)
}
return nil, fmt.Errorf("initRepository: %v", err)
}
_, stderr, err := process.ExecDir(-1,
repoPath, fmt.Sprintf("CreateRepository(git update-server-info): %s", repoPath),
"git", "update-server-info")
if err != nil {
return nil, errors.New("CreateRepository(git update-server-info): " + stderr)
}
}
return repo, sess.Commit()
}
func countRepositories(userID int64, private bool) int64 {
sess := x.Where("id > 0")
if userID > 0 {
sess.And("owner_id = ?", userID)
}
if !private {
sess.And("is_private=?", false)
}
count, err := sess.Count(new(Repository))
if err != nil {
log.Error(4, "countRepositories: %v", err)
}
return count
}
// CountRepositories returns number of repositories.
// Argument private only takes effect when it is false,
// set it true to count all repositories.
func CountRepositories(private bool) int64 {
return countRepositories(-1, private)
}
// CountUserRepositories returns number of repositories user owns.
// Argument private only takes effect when it is false,
// set it true to count all repositories.
func CountUserRepositories(userID int64, private bool) int64 {
return countRepositories(userID, private)
}
// Repositories returns all repositories
func Repositories(page, pageSize int) (_ []*Repository, err error) {
repos := make([]*Repository, 0, pageSize)
return repos, x.Limit(pageSize, (page-1)*pageSize).Asc("id").Find(&repos)
}
// RepositoriesWithUsers returns number of repos in given page.
func RepositoriesWithUsers(page, pageSize int) (_ []*Repository, err error) {
repos, err := Repositories(page, pageSize)
if err != nil {
return nil, fmt.Errorf("Repositories: %v", err)
}
for i := range repos {
if err = repos[i].GetOwner(); err != nil {
return nil, err
}
}
return repos, nil
}
// RepoPath returns repository path by given user and repository name.
func RepoPath(userName, repoName string) string {
return filepath.Join(UserPath(userName), strings.ToLower(repoName)+".git")
}
// TransferOwnership transfers all corresponding setting from old user to new one.
func TransferOwnership(doer *User, newOwnerName string, repo *Repository) error {
newOwner, err := GetUserByName(newOwnerName)
if err != nil {
return fmt.Errorf("get new owner '%s': %v", newOwnerName, err)
}
// Check if new owner has repository with same name.
has, err := IsRepositoryExist(newOwner, repo.Name)
if err != nil {
return fmt.Errorf("IsRepositoryExist: %v", err)
} else if has {
return ErrRepoAlreadyExist{newOwnerName, repo.Name}
}
sess := x.NewSession()
defer sessionRelease(sess)
if err = sess.Begin(); err != nil {
return fmt.Errorf("sess.Begin: %v", err)
}
owner := repo.Owner
// Note: we have to set value here to make sure recalculate accesses is based on
// new owner.
repo.OwnerID = newOwner.ID
repo.Owner = newOwner
// Update repository.
if _, err := sess.Id(repo.ID).Update(repo); err != nil {
return fmt.Errorf("update owner: %v", err)
}
// Remove redundant collaborators.
collaborators, err := repo.getCollaborators(sess)
if err != nil {
return fmt.Errorf("getCollaborators: %v", err)
}
// Dummy object.
collaboration := &Collaboration{RepoID: repo.ID}
for _, c := range collaborators {
collaboration.UserID = c.ID
if c.ID == newOwner.ID || newOwner.IsOrgMember(c.ID) {
if _, err = sess.Delete(collaboration); err != nil {
return fmt.Errorf("remove collaborator '%d': %v", c.ID, err)
}
}
}
// Remove old team-repository relations.
if owner.IsOrganization() {
if err = owner.getTeams(sess); err != nil {
return fmt.Errorf("getTeams: %v", err)
}
for _, t := range owner.Teams {
if !t.hasRepository(sess, repo.ID) {
continue
}
t.NumRepos--
if _, err := sess.Id(t.ID).AllCols().Update(t); err != nil {
return fmt.Errorf("decrease team repository count '%d': %v", t.ID, err)
}
}
if err = owner.removeOrgRepo(sess, repo.ID); err != nil {
return fmt.Errorf("removeOrgRepo: %v", err)
}
}
if newOwner.IsOrganization() {
t, err := newOwner.getOwnerTeam(sess)
if err != nil {
return fmt.Errorf("getOwnerTeam: %v", err)
} else if err = t.addRepository(sess, repo); err != nil {
return fmt.Errorf("add to owner team: %v", err)
}
} else {
// Organization called this in addRepository method.
if err = repo.recalculateAccesses(sess); err != nil {
return fmt.Errorf("recalculateAccesses: %v", err)
}
}
// Update repository count.
if _, err = sess.Exec("UPDATE `user` SET num_repos=num_repos+1 WHERE id=?", newOwner.ID); err != nil {
return fmt.Errorf("increase new owner repository count: %v", err)
} else if _, err = sess.Exec("UPDATE `user` SET num_repos=num_repos-1 WHERE id=?", owner.ID); err != nil {
return fmt.Errorf("decrease old owner repository count: %v", err)
}
if err = watchRepo(sess, newOwner.ID, repo.ID, true); err != nil {
return fmt.Errorf("watchRepo: %v", err)
} else if err = transferRepoAction(sess, doer, owner, repo); err != nil {
return fmt.Errorf("transferRepoAction: %v", err)
}
// Rename remote repository to new path and delete local copy.
dir := UserPath(newOwner.Name)
if err := os.MkdirAll(dir, os.ModePerm); err != nil {
return fmt.Errorf("Fail to create dir %s: %v", dir, err)
}
if err = os.Rename(RepoPath(owner.Name, repo.Name), RepoPath(newOwner.Name, repo.Name)); err != nil {
return fmt.Errorf("rename repository directory: %v", err)
}
RemoveAllWithNotice("Delete repository local copy", repo.LocalCopyPath())
// Rename remote wiki repository to new path and delete local copy.
wikiPath := WikiPath(owner.Name, repo.Name)
if com.IsExist(wikiPath) {
RemoveAllWithNotice("Delete repository wiki local copy", repo.LocalWikiPath())
if err = os.Rename(wikiPath, WikiPath(newOwner.Name, repo.Name)); err != nil {
return fmt.Errorf("rename repository wiki: %v", err)
}
}
return sess.Commit()
}
// ChangeRepositoryName changes all corresponding setting from old repository name to new one.
func ChangeRepositoryName(u *User, oldRepoName, newRepoName string) (err error) {
oldRepoName = strings.ToLower(oldRepoName)
newRepoName = strings.ToLower(newRepoName)
if err = IsUsableRepoName(newRepoName); err != nil {
return err
}
has, err := IsRepositoryExist(u, newRepoName)
if err != nil {
return fmt.Errorf("IsRepositoryExist: %v", err)
} else if has {
return ErrRepoAlreadyExist{u.Name, newRepoName}
}
repo, err := GetRepositoryByName(u.ID, oldRepoName)
if err != nil {
return fmt.Errorf("GetRepositoryByName: %v", err)
}
// Change repository directory name.
if err = os.Rename(repo.RepoPath(), RepoPath(u.Name, newRepoName)); err != nil {
return fmt.Errorf("rename repository directory: %v", err)
}
wikiPath := repo.WikiPath()
if com.IsExist(wikiPath) {
if err = os.Rename(wikiPath, WikiPath(u.Name, newRepoName)); err != nil {
return fmt.Errorf("rename repository wiki: %v", err)
}
RemoveAllWithNotice("Delete repository wiki local copy", repo.LocalWikiPath())
}
return nil
}
func getRepositoriesByForkID(e Engine, forkID int64) ([]*Repository, error) {
repos := make([]*Repository, 0, 10)
return repos, e.
Where("fork_id=?", forkID).
Find(&repos)
}
// GetRepositoriesByForkID returns all repositories with given fork ID.
func GetRepositoriesByForkID(forkID int64) ([]*Repository, error) {
return getRepositoriesByForkID(x, forkID)
}
func updateRepository(e Engine, repo *Repository, visibilityChanged bool) (err error) {
repo.LowerName = strings.ToLower(repo.Name)
if len(repo.Description) > 255 {
repo.Description = repo.Description[:255]
}
if len(repo.Website) > 255 {
repo.Website = repo.Website[:255]
}
if _, err = e.Id(repo.ID).AllCols().Update(repo); err != nil {
return fmt.Errorf("update: %v", err)
}
if visibilityChanged {
if err = repo.getOwner(e); err != nil {
return fmt.Errorf("getOwner: %v", err)
}
if repo.Owner.IsOrganization() {
// Organization repository need to recalculate access table when visivility is changed.
if err = repo.recalculateTeamAccesses(e, 0); err != nil {
return fmt.Errorf("recalculateTeamAccesses: %v", err)
}
}
// Create/Remove git-daemon-export-ok for git-daemon...
daemonExportFile := path.Join(repo.RepoPath(), `git-daemon-export-ok`)
if repo.IsPrivate && com.IsExist(daemonExportFile) {
if err = os.Remove(daemonExportFile); err != nil {
log.Error(4, "Failed to remove %s: %v", daemonExportFile, err)
}
} else if !repo.IsPrivate && !com.IsExist(daemonExportFile) {
if f, err := os.Create(daemonExportFile); err != nil {
log.Error(4, "Failed to create %s: %v", daemonExportFile, err)
} else {
f.Close()
}
}
forkRepos, err := getRepositoriesByForkID(e, repo.ID)
if err != nil {
return fmt.Errorf("getRepositoriesByForkID: %v", err)
}
for i := range forkRepos {
forkRepos[i].IsPrivate = repo.IsPrivate
if err = updateRepository(e, forkRepos[i], true); err != nil {
return fmt.Errorf("updateRepository[%d]: %v", forkRepos[i].ID, err)
}
}
}
return nil
}
// UpdateRepository updates a repository
func UpdateRepository(repo *Repository, visibilityChanged bool) (err error) {
sess := x.NewSession()
defer sessionRelease(sess)
if err = sess.Begin(); err != nil {
return err
}
if err = updateRepository(x, repo, visibilityChanged); err != nil {
return fmt.Errorf("updateRepository: %v", err)
}
return sess.Commit()
}
// DeleteRepository deletes a repository for a user or organization.
func DeleteRepository(uid, repoID int64) error {
repo := &Repository{ID: repoID, OwnerID: uid}
has, err := x.Get(repo)
if err != nil {
return err
} else if !has {
return ErrRepoNotExist{repoID, uid, ""}
}
// In case is a organization.
org, err := GetUserByID(uid)
if err != nil {
return err
}
if org.IsOrganization() {
if err = org.GetTeams(); err != nil {
return err
}
}
sess := x.NewSession()
defer sessionRelease(sess)
if err = sess.Begin(); err != nil {
return err
}
if org.IsOrganization() {
for _, t := range org.Teams {
if !t.hasRepository(sess, repoID) {
continue
} else if err = t.removeRepository(sess, repo, false); err != nil {
return err
}
}
}
if err = deleteBeans(sess,
&Repository{ID: repoID},
&Access{RepoID: repo.ID},
&Action{RepoID: repo.ID},
&Watch{RepoID: repoID},
&Star{RepoID: repoID},
&Mirror{RepoID: repoID},
&IssueUser{RepoID: repoID},
&Milestone{RepoID: repoID},
&Release{RepoID: repoID},
&Collaboration{RepoID: repoID},
&PullRequest{BaseRepoID: repoID},
); err != nil {
return fmt.Errorf("deleteBeans: %v", err)
}
// Delete comments and attachments.
issues := make([]*Issue, 0, 25)
attachmentPaths := make([]string, 0, len(issues))
if err = sess.
Where("repo_id=?", repoID).
Find(&issues); err != nil {
return err
}
for i := range issues {
if _, err = sess.Delete(&Comment{IssueID: issues[i].ID}); err != nil {
return err
}
attachments := make([]*Attachment, 0, 5)
if err = sess.
Where("issue_id=?", issues[i].ID).
Find(&attachments); err != nil {
return err
}
for j := range attachments {
attachmentPaths = append(attachmentPaths, attachments[j].LocalPath())
}
if _, err = sess.Delete(&Attachment{IssueID: issues[i].ID}); err != nil {
return err
}
}
if _, err = sess.Delete(&Issue{RepoID: repoID}); err != nil {
return err
}
if repo.IsFork {
if _, err = sess.Exec("UPDATE `repository` SET num_forks=num_forks-1 WHERE id=?", repo.ForkID); err != nil {
return fmt.Errorf("decrease fork count: %v", err)
}
}
if _, err = sess.Exec("UPDATE `user` SET num_repos=num_repos-1 WHERE id=?", uid); err != nil {
return err
}
// Remove repository files.
repoPath := repo.repoPath(sess)
RemoveAllWithNotice("Delete repository files", repoPath)
repo.DeleteWiki()
// Remove attachment files.
for i := range attachmentPaths {
RemoveAllWithNotice("Delete attachment", attachmentPaths[i])
}
if err = sess.Commit(); err != nil {
return fmt.Errorf("Commit: %v", err)
}
if repo.NumForks > 0 {
if _, err = x.Exec("UPDATE `repository` SET fork_id=0,is_fork=? WHERE fork_id=?", false, repo.ID); err != nil {
log.Error(4, "reset 'fork_id' and 'is_fork': %v", err)
}
}
return nil
}
// GetRepositoryByRef returns a Repository specified by a GFM reference.
// See https://help.github.com/articles/writing-on-github#references for more information on the syntax.
func GetRepositoryByRef(ref string) (*Repository, error) {
n := strings.IndexByte(ref, byte('/'))
if n < 2 {
return nil, ErrInvalidReference
}
userName, repoName := ref[:n], ref[n+1:]
user, err := GetUserByName(userName)
if err != nil {
return nil, err
}
return GetRepositoryByName(user.ID, repoName)
}
// GetRepositoryByName returns the repository by given name under user if exists.
func GetRepositoryByName(ownerID int64, name string) (*Repository, error) {
repo := &Repository{
OwnerID: ownerID,
LowerName: strings.ToLower(name),
}
has, err := x.Get(repo)
if err != nil {
return nil, err
} else if !has {
return nil, ErrRepoNotExist{0, ownerID, name}
}
return repo, err
}
func getRepositoryByID(e Engine, id int64) (*Repository, error) {
repo := new(Repository)
has, err := e.Id(id).Get(repo)
if err != nil {
return nil, err
} else if !has {
return nil, ErrRepoNotExist{id, 0, ""}
}
return repo, nil
}
// GetRepositoryByID returns the repository by given id if exists.
func GetRepositoryByID(id int64) (*Repository, error) {
return getRepositoryByID(x, id)
}
// GetUserRepositories returns a list of repositories of given user.
func GetUserRepositories(userID int64, private bool, page, pageSize int) ([]*Repository, error) {
sess := x.
Where("owner_id = ?", userID).
Desc("updated_unix")
if !private {
sess.And("is_private=?", false)
}
if page <= 0 {
page = 1
}
sess.Limit(pageSize, (page-1)*pageSize)
repos := make([]*Repository, 0, pageSize)
return repos, sess.Find(&repos)
}
// GetUserMirrorRepositories returns a list of mirror repositories of given user.
func GetUserMirrorRepositories(userID int64) ([]*Repository, error) {
repos := make([]*Repository, 0, 10)
return repos, x.
Where("owner_id = ?", userID).
And("is_mirror = ?", true).
Find(&repos)
}
// GetRecentUpdatedRepositories returns the list of repositories that are recently updated.
func GetRecentUpdatedRepositories(page, pageSize int) (repos []*Repository, err error) {
return repos, x.
Limit(pageSize, (page-1)*pageSize).
Where("is_private=?", false).
Limit(pageSize).
Desc("updated_unix").
Find(&repos)
}
func getRepositoryCount(e Engine, u *User) (int64, error) {
return x.Count(&Repository{OwnerID: u.ID})
}
// GetRepositoryCount returns the total number of repositories of user.
func GetRepositoryCount(u *User) (int64, error) {
return getRepositoryCount(x, u)
}
// SearchRepoOptions holds the search options
type SearchRepoOptions struct {
Keyword string
OwnerID int64
OrderBy string
Private bool // Include private repositories in results
Page int
PageSize int // Can be smaller than or equal to setting.ExplorePagingNum
}
// SearchRepositoryByName takes keyword and part of repository name to search,
// it returns results in given range and number of total results.
func SearchRepositoryByName(opts *SearchRepoOptions) (repos []*Repository, _ int64, _ error) {
if len(opts.Keyword) == 0 {
return repos, 0, nil
}
opts.Keyword = strings.ToLower(opts.Keyword)
if opts.Page <= 0 {
opts.Page = 1
}
repos = make([]*Repository, 0, opts.PageSize)
// Append conditions
sess := x.Where("LOWER(lower_name) LIKE ?", "%"+opts.Keyword+"%")
if opts.OwnerID > 0 {
sess.And("owner_id = ?", opts.OwnerID)
}
if !opts.Private {
sess.And("is_private=?", false)
}
var countSess xorm.Session
countSess = *sess
count, err := countSess.Count(new(Repository))
if err != nil {
return nil, 0, fmt.Errorf("Count: %v", err)
}
if len(opts.OrderBy) > 0 {
sess.OrderBy(opts.OrderBy)
}
return repos, count, sess.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize).Find(&repos)
}
// DeleteRepositoryArchives deletes all repositories' archives.
func DeleteRepositoryArchives() error {
return x.
Where("id > 0").
Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
return os.RemoveAll(filepath.Join(repo.RepoPath(), "archives"))
})
}
func gatherMissingRepoRecords() ([]*Repository, error) {
repos := make([]*Repository, 0, 10)
if err := x.
Where("id > 0").
Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
if !com.IsDir(repo.RepoPath()) {
repos = append(repos, repo)
}
return nil
}); err != nil {
if err2 := CreateRepositoryNotice(fmt.Sprintf("gatherMissingRepoRecords: %v", err)); err2 != nil {
return nil, fmt.Errorf("CreateRepositoryNotice: %v", err)
}
}
return repos, nil
}
// DeleteMissingRepositories deletes all repository records that lost Git files.
func DeleteMissingRepositories() error {
repos, err := gatherMissingRepoRecords()
if err != nil {
return fmt.Errorf("gatherMissingRepoRecords: %v", err)
}
if len(repos) == 0 {
return nil
}
for _, repo := range repos {
log.Trace("Deleting %d/%d...", repo.OwnerID, repo.ID)
if err := DeleteRepository(repo.OwnerID, repo.ID); err != nil {
if err2 := CreateRepositoryNotice(fmt.Sprintf("DeleteRepository [%d]: %v", repo.ID, err)); err2 != nil {
return fmt.Errorf("CreateRepositoryNotice: %v", err)
}
}
}
return nil
}
// ReinitMissingRepositories reinitializes all repository records that lost Git files.
func ReinitMissingRepositories() error {
repos, err := gatherMissingRepoRecords()
if err != nil {
return fmt.Errorf("gatherMissingRepoRecords: %v", err)
}
if len(repos) == 0 {
return nil
}
for _, repo := range repos {
log.Trace("Initializing %d/%d...", repo.OwnerID, repo.ID)
if err := git.InitRepository(repo.RepoPath(), true); err != nil {
if err2 := CreateRepositoryNotice(fmt.Sprintf("InitRepository [%d]: %v", repo.ID, err)); err2 != nil {
return fmt.Errorf("CreateRepositoryNotice: %v", err)
}
}
}
return nil
}
// RewriteRepositoryUpdateHook rewrites all repositories' update hook.
func RewriteRepositoryUpdateHook() error {
return x.
Where("id > 0").
Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
return createUpdateHook(repo.RepoPath())
})
}
// Prevent duplicate running tasks.
var taskStatusTable = sync.NewStatusTable()
const (
mirrorUpdate = "mirror_update"
gitFsck = "git_fsck"
checkRepos = "check_repos"
)
// GitFsck calls 'git fsck' to check repository health.
func GitFsck() {
if taskStatusTable.IsRunning(gitFsck) {
return
}
taskStatusTable.Start(gitFsck)
defer taskStatusTable.Stop(gitFsck)
log.Trace("Doing: GitFsck")
if err := x.
Where("id>0").
Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
repoPath := repo.RepoPath()
if err := git.Fsck(repoPath, setting.Cron.RepoHealthCheck.Timeout, setting.Cron.RepoHealthCheck.Args...); err != nil {
desc := fmt.Sprintf("Fail to health check repository (%s): %v", repoPath, err)
log.Warn(desc)
if err = CreateRepositoryNotice(desc); err != nil {
log.Error(4, "CreateRepositoryNotice: %v", err)
}
}
return nil
}); err != nil {
log.Error(4, "GitFsck: %v", err)
}
}
// GitGcRepos calls 'git gc' to remove unnecessary files and optimize the local repository
func GitGcRepos() error {
args := append([]string{"gc"}, setting.Git.GCArgs...)
return x.
Where("id > 0").
Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
if err := repo.GetOwner(); err != nil {
return err
}
_, stderr, err := process.ExecDir(
time.Duration(setting.Git.Timeout.GC)*time.Second,
RepoPath(repo.Owner.Name, repo.Name), "Repository garbage collection",
"git", args...)
if err != nil {
return fmt.Errorf("%v: %v", err, stderr)
}
return nil
})
}
type repoChecker struct {
querySQL, correctSQL string
desc string
}
func repoStatsCheck(checker *repoChecker) {
results, err := x.Query(checker.querySQL)
if err != nil {
log.Error(4, "Select %s: %v", checker.desc, err)
return
}
for _, result := range results {
id := com.StrTo(result["id"]).MustInt64()
log.Trace("Updating %s: %d", checker.desc, id)
_, err = x.Exec(checker.correctSQL, id, id)
if err != nil {
log.Error(4, "Update %s[%d]: %v", checker.desc, id, err)
}
}
}
// CheckRepoStats checks the repository stats
func CheckRepoStats() {
if taskStatusTable.IsRunning(checkRepos) {
return
}
taskStatusTable.Start(checkRepos)
defer taskStatusTable.Stop(checkRepos)
log.Trace("Doing: CheckRepoStats")
checkers := []*repoChecker{
// Repository.NumWatches
{
"SELECT repo.id FROM `repository` repo WHERE repo.num_watches!=(SELECT COUNT(*) FROM `watch` WHERE repo_id=repo.id)",
"UPDATE `repository` SET num_watches=(SELECT COUNT(*) FROM `watch` WHERE repo_id=?) WHERE id=?",
"repository count 'num_watches'",
},
// Repository.NumStars
{
"SELECT repo.id FROM `repository` repo WHERE repo.num_stars!=(SELECT COUNT(*) FROM `star` WHERE repo_id=repo.id)",
"UPDATE `repository` SET num_stars=(SELECT COUNT(*) FROM `star` WHERE repo_id=?) WHERE id=?",
"repository count 'num_stars'",
},
// Label.NumIssues
{
"SELECT label.id FROM `label` WHERE label.num_issues!=(SELECT COUNT(*) FROM `issue_label` WHERE label_id=label.id)",
"UPDATE `label` SET num_issues=(SELECT COUNT(*) FROM `issue_label` WHERE label_id=?) WHERE id=?",
"label count 'num_issues'",
},
// User.NumRepos
{
"SELECT `user`.id FROM `user` WHERE `user`.num_repos!=(SELECT COUNT(*) FROM `repository` WHERE owner_id=`user`.id)",
"UPDATE `user` SET num_repos=(SELECT COUNT(*) FROM `repository` WHERE owner_id=?) WHERE id=?",
"user count 'num_repos'",
},
// Issue.NumComments
{
"SELECT `issue`.id FROM `issue` WHERE `issue`.num_comments!=(SELECT COUNT(*) FROM `comment` WHERE issue_id=`issue`.id AND type=0)",
"UPDATE `issue` SET num_comments=(SELECT COUNT(*) FROM `comment` WHERE issue_id=? AND type=0) WHERE id=?",
"issue count 'num_comments'",
},
}
for i := range checkers {
repoStatsCheck(checkers[i])
}
// ***** START: Repository.NumClosedIssues *****
desc := "repository count 'num_closed_issues'"
results, err := x.Query("SELECT repo.id FROM `repository` repo WHERE repo.num_closed_issues!=(SELECT COUNT(*) FROM `issue` WHERE repo_id=repo.id AND is_closed=? AND is_pull=?)", true, false)
if err != nil {
log.Error(4, "Select %s: %v", desc, err)
} else {
for _, result := range results {
id := com.StrTo(result["id"]).MustInt64()
log.Trace("Updating %s: %d", desc, id)
_, err = x.Exec("UPDATE `repository` SET num_closed_issues=(SELECT COUNT(*) FROM `issue` WHERE repo_id=? AND is_closed=? AND is_pull=?) WHERE id=?", id, true, false, id)
if err != nil {
log.Error(4, "Update %s[%d]: %v", desc, id, err)
}
}
}
// ***** END: Repository.NumClosedIssues *****
// FIXME: use checker when stop supporting old fork repo format.
// ***** START: Repository.NumForks *****
results, err = x.Query("SELECT repo.id FROM `repository` repo WHERE repo.num_forks!=(SELECT COUNT(*) FROM `repository` WHERE fork_id=repo.id)")
if err != nil {
log.Error(4, "Select repository count 'num_forks': %v", err)
} else {
for _, result := range results {
id := com.StrTo(result["id"]).MustInt64()
log.Trace("Updating repository count 'num_forks': %d", id)
repo, err := GetRepositoryByID(id)
if err != nil {
log.Error(4, "GetRepositoryByID[%d]: %v", id, err)
continue
}
rawResult, err := x.Query("SELECT COUNT(*) FROM `repository` WHERE fork_id=?", repo.ID)
if err != nil {
log.Error(4, "Select count of forks[%d]: %v", repo.ID, err)
continue
}
repo.NumForks = int(parseCountResult(rawResult))
if err = UpdateRepository(repo, false); err != nil {
log.Error(4, "UpdateRepository[%d]: %v", id, err)
continue
}
}
}
// ***** END: Repository.NumForks *****
}
// RepositoryList contains a list of repositories
type RepositoryList []*Repository
func (repos RepositoryList) loadAttributes(e Engine) error {
if len(repos) == 0 {
return nil
}
// Load owners.
set := make(map[int64]*User)
for i := range repos {
set[repos[i].OwnerID] = nil
}
userIDs := make([]int64, 0, len(set))
for userID := range set {
userIDs = append(userIDs, userID)
}
users := make([]*User, 0, len(userIDs))
if err := e.
Where("id > 0").
In("id", userIDs).
Find(&users); err != nil {
return fmt.Errorf("find users: %v", err)
}
for i := range users {
set[users[i].ID] = users[i]
}
for i := range repos {
repos[i].Owner = set[repos[i].OwnerID]
}
return nil
}
// LoadAttributes loads the attributes for the given RepositoryList
func (repos RepositoryList) LoadAttributes() error {
return repos.loadAttributes(x)
}
// MirrorRepositoryList contains the mirror repositories
type MirrorRepositoryList []*Repository
func (repos MirrorRepositoryList) loadAttributes(e Engine) error {
if len(repos) == 0 {
return nil
}
// Load mirrors.
repoIDs := make([]int64, 0, len(repos))
for i := range repos {
if !repos[i].IsMirror {
continue
}
repoIDs = append(repoIDs, repos[i].ID)
}
mirrors := make([]*Mirror, 0, len(repoIDs))
if err := e.
Where("id > 0").
In("repo_id", repoIDs).
Find(&mirrors); err != nil {
return fmt.Errorf("find mirrors: %v", err)
}
set := make(map[int64]*Mirror)
for i := range mirrors {
set[mirrors[i].RepoID] = mirrors[i]
}
for i := range repos {
repos[i].Mirror = set[repos[i].ID]
}
return nil
}
// LoadAttributes loads the attributes for the given MirrorRepositoryList
func (repos MirrorRepositoryList) LoadAttributes() error {
return repos.loadAttributes(x)
}
type ScrubSensitiveDataOptions struct {
LastCommitID string
CommitMessage string
}
// ScrubSensitiveData removes names and email addresses from the manifest|project|package|status.json files and scrubs previous history.
func (repo *Repository) ScrubSensitiveData(doer *User, opts ScrubSensitiveDataOptions) error {
repoWorkingPool.CheckIn(com.ToStr(repo.ID))
defer repoWorkingPool.CheckOut(com.ToStr(repo.ID))
localPath := repo.LocalCopyPath()
if err := repo.DiscardLocalRepoBranchChanges("master"); err != nil {
return fmt.Errorf("DiscardLocalRepoBranchChanges [branch: master]: %v", err)
} else if err = repo.UpdateLocalCopyBranch("master"); err != nil {
return fmt.Errorf("UpdateLocalCopyBranch [branch: master]: %v", err)
}
if success := scrub.ScrubJsonFiles(localPath); ! success {
return fmt.Errorf("Nothing to scrub")
}
if err := git.AddChanges(localPath, true); err != nil {
return fmt.Errorf("git add --all: %v", err)
} else if err := git.CommitChanges(localPath, git.CommitChangesOptions{
Committer: doer.NewGitSig(),
Message: opts.CommitMessage,
}); err != nil {
return fmt.Errorf("CommitChanges: %v", err)
} else if err := git.PushForce(localPath, "origin", "master"); err != nil {
return fmt.Errorf("git push --force --all origin %s: %v", "master", err)
}
gitRepo, err := git.OpenRepository(repo.RepoPath())
if err != nil {
log.Error(4, "OpenRepository: %v", err)
return nil
}
commit, err := gitRepo.GetBranchCommit("master")
if err != nil {
log.Error(4, "GetBranchCommit [branch: %s]: %v", "master", err)
return nil
}
// Simulate push event.
pushCommits := &PushCommits{
Len: 1,
Commits: []*PushCommit{CommitToPushCommit(commit)},
}
oldCommitID := opts.LastCommitID
if err := CommitRepoAction(CommitRepoActionOptions{
PusherName: doer.Name,
RepoOwnerID: repo.MustOwner().ID,
RepoName: repo.Name,
RefFullName: git.BRANCH_PREFIX + "master",
OldCommitID: oldCommitID,
NewCommitID: commit.ID.String(),
Commits: pushCommits,
}); err != nil {
log.Error(4, "CommitRepoAction: %v", err)
return nil
}
return nil
}
// __ __ __ .__
// / \ / \_____ _/ |_ ____ | |__
// \ \/\/ /\__ \\ __\/ ___\| | \
// \ / / __ \| | \ \___| Y \
// \__/\ / (____ /__| \___ >___| /
// \/ \/ \/ \/
// Watch is connection request for receiving repository notification.
type Watch struct {
ID int64 `xorm:"pk autoincr"`
UserID int64 `xorm:"UNIQUE(watch)"`
RepoID int64 `xorm:"UNIQUE(watch)"`
}
func isWatching(e Engine, userID, repoID int64) bool {
has, _ := e.Get(&Watch{0, userID, repoID})
return has
}
// IsWatching checks if user has watched given repository.
func IsWatching(userID, repoID int64) bool {
return isWatching(x, userID, repoID)
}
func watchRepo(e Engine, userID, repoID int64, watch bool) (err error) {
if watch {
if isWatching(e, userID, repoID) {
return nil
}
if _, err = e.Insert(&Watch{RepoID: repoID, UserID: userID}); err != nil {
return err
}
_, err = e.Exec("UPDATE `repository` SET num_watches = num_watches + 1 WHERE id = ?", repoID)
} else {
if !isWatching(e, userID, repoID) {
return nil
}
if _, err = e.Delete(&Watch{0, userID, repoID}); err != nil {
return err
}
_, err = e.Exec("UPDATE `repository` SET num_watches = num_watches - 1 WHERE id = ?", repoID)
}
return err
}
// WatchRepo watch or unwatch repository.
func WatchRepo(userID, repoID int64, watch bool) (err error) {
return watchRepo(x, userID, repoID, watch)
}
func getWatchers(e Engine, repoID int64) ([]*Watch, error) {
watches := make([]*Watch, 0, 10)
return watches, e.Find(&watches, &Watch{RepoID: repoID})
}
// GetWatchers returns all watchers of given repository.
func GetWatchers(repoID int64) ([]*Watch, error) {
return getWatchers(x, repoID)
}
// GetWatchers returns range of users watching given repository.
func (repo *Repository) GetWatchers(page int) ([]*User, error) {
users := make([]*User, 0, ItemsPerPage)
sess := x.
Limit(ItemsPerPage, (page-1)*ItemsPerPage).
Where("watch.repo_id=?", repo.ID)
if setting.UsePostgreSQL {
sess = sess.Join("LEFT", "watch", `"user".id=watch.user_id`)
} else {
sess = sess.Join("LEFT", "watch", "user.id=watch.user_id")
}
return users, sess.Find(&users)
}
func notifyWatchers(e Engine, act *Action) error {
// Add feeds for user self and all watchers.
watches, err := getWatchers(e, act.RepoID)
if err != nil {
return fmt.Errorf("get watchers: %v", err)
}
// Add feed for actioner.
act.UserID = act.ActUserID
if _, err = e.InsertOne(act); err != nil {
return fmt.Errorf("insert new actioner: %v", err)
}
for i := range watches {
if act.ActUserID == watches[i].UserID {
continue
}
act.ID = 0
act.UserID = watches[i].UserID
if _, err = e.InsertOne(act); err != nil {
return fmt.Errorf("insert new action: %v", err)
}
}
return nil
}
// NotifyWatchers creates batch of actions for every watcher.
func NotifyWatchers(act *Action) error {
return notifyWatchers(x, act)
}
// _________ __
// / _____// |______ _______
// \_____ \\ __\__ \\_ __ \
// / \| | / __ \| | \/
// /_______ /|__| (____ /__|
// \/ \/
// Star contains the star information
type Star struct {
ID int64 `xorm:"pk autoincr"`
UID int64 `xorm:"UNIQUE(s)"`
RepoID int64 `xorm:"UNIQUE(s)"`
}
// StarRepo star or unstar repository.
func StarRepo(userID, repoID int64, star bool) (err error) {
if star {
if IsStaring(userID, repoID) {
return nil
}
if _, err = x.Insert(&Star{UID: userID, RepoID: repoID}); err != nil {
return err
} else if _, err = x.Exec("UPDATE `repository` SET num_stars = num_stars + 1 WHERE id = ?", repoID); err != nil {
return err
}
_, err = x.Exec("UPDATE `user` SET num_stars = num_stars + 1 WHERE id = ?", userID)
} else {
if !IsStaring(userID, repoID) {
return nil
}
if _, err = x.Delete(&Star{0, userID, repoID}); err != nil {
return err
} else if _, err = x.Exec("UPDATE `repository` SET num_stars = num_stars - 1 WHERE id = ?", repoID); err != nil {
return err
}
_, err = x.Exec("UPDATE `user` SET num_stars = num_stars - 1 WHERE id = ?", userID)
}
return err
}
// IsStaring checks if user has starred given repository.
func IsStaring(userID, repoID int64) bool {
has, _ := x.Get(&Star{0, userID, repoID})
return has
}
// GetStargazers returns the users who gave stars to this repository
func (repo *Repository) GetStargazers(page int) ([]*User, error) {
users := make([]*User, 0, ItemsPerPage)
sess := x.
Limit(ItemsPerPage, (page-1)*ItemsPerPage).
Where("star.repo_id=?", repo.ID)
if setting.UsePostgreSQL {
sess = sess.Join("LEFT", "star", `"user".id=star.uid`)
} else {
sess = sess.Join("LEFT", "star", "user.id=star.uid")
}
return users, sess.Find(&users)
}
// ___________ __
// \_ _____/__________| | __
// | __)/ _ \_ __ \ |/ /
// | \( <_> ) | \/ <
// \___ / \____/|__| |__|_ \
// \/ \/
// HasForkedRepo checks if given user has already forked a repository with given ID.
func HasForkedRepo(ownerID, repoID int64) (*Repository, bool) {
repo := new(Repository)
has, _ := x.
Where("owner_id=? AND fork_id=?", ownerID, repoID).
Get(repo)
return repo, has
}
// ForkRepository forks a repository
func ForkRepository(u *User, oldRepo *Repository, name, desc string) (_ *Repository, err error) {
repo := &Repository{
OwnerID: u.ID,
Owner: u,
Name: name,
LowerName: strings.ToLower(name),
Description: desc,
DefaultBranch: oldRepo.DefaultBranch,
IsPrivate: oldRepo.IsPrivate,
IsFork: true,
ForkID: oldRepo.ID,
}
sess := x.NewSession()
defer sessionRelease(sess)
if err = sess.Begin(); err != nil {
return nil, err
}
if err = createRepository(sess, u, repo); err != nil {
return nil, err
}
if _, err = sess.Exec("UPDATE `repository` SET num_forks=num_forks+1 WHERE id=?", oldRepo.ID); err != nil {
return nil, err
}
repoPath := RepoPath(u.Name, repo.Name)
_, stderr, err := process.ExecTimeout(10*time.Minute,
fmt.Sprintf("ForkRepository(git clone): %s/%s", u.Name, repo.Name),
"git", "clone", "--bare", oldRepo.RepoPath(), repoPath)
if err != nil {
return nil, fmt.Errorf("git clone: %v", stderr)
}
_, stderr, err = process.ExecDir(-1,
repoPath, fmt.Sprintf("ForkRepository(git update-server-info): %s", repoPath),
"git", "update-server-info")
if err != nil {
return nil, fmt.Errorf("git update-server-info: %v", stderr)
}
if err = createUpdateHook(repoPath); err != nil {
return nil, fmt.Errorf("createUpdateHook: %v", err)
}
return repo, sess.Commit()
}
// GetForks returns all the forks of the repository
func (repo *Repository) GetForks() ([]*Repository, error) {
forks := make([]*Repository, 0, repo.NumForks)
return forks, x.Find(&forks, &Repository{ForkID: repo.ID})
}
// __________ .__
// \______ \____________ ____ ____ | |__
// | | _/\_ __ \__ \ / \_/ ___\| | \
// | | \ | | \// __ \| | \ \___| Y \
// |______ / |__| (____ /___| /\___ >___| /
// \/ \/ \/ \/ \/
//
// CreateNewBranch creates a new repository branch
func (repo *Repository) CreateNewBranch(doer *User, oldBranchName, branchName string) (err error) {
repoWorkingPool.CheckIn(com.ToStr(repo.ID))
defer repoWorkingPool.CheckOut(com.ToStr(repo.ID))
localPath := repo.LocalCopyPath()
if err = discardLocalRepoBranchChanges(localPath, oldBranchName); err != nil {
return fmt.Errorf("discardLocalRepoChanges: %v", err)
} else if err = repo.UpdateLocalCopyBranch(oldBranchName); err != nil {
return fmt.Errorf("UpdateLocalCopyBranch: %v", err)
}
if err = repo.CheckoutNewBranch(oldBranchName, branchName); err != nil {
return fmt.Errorf("CreateNewBranch: %v", err)
}
if err = git.Push(localPath, "origin", branchName); err != nil {
return fmt.Errorf("Push: %v", err)
}
return nil
}
|
package models
import (
"database/sql"
"fmt"
e "github.com/techjanitor/pram-get/errors"
u "github.com/techjanitor/pram-get/utils"
)
// TagsModel holds the parameters from the request and also the key for the cache
type TagsModel struct {
Ib uint
Term string
Result TagsType
}
// TagsType is the top level of the JSON response
type TagsType struct {
Body []Tags `json:"tags"`
}
// Taglist struct
type Tags struct {
Id uint `json:"id"`
Tag string `json:"tag"`
Total uint `json:"total"`
Type uint `json:"type"`
}
// Get will gather the information from the database and return it as JSON serialized data
func (i *TagsModel) Get() (err error) {
// Initialize response header
response := TagsType{}
// Get Database handle
db, err := u.GetDb()
if err != nil {
return
}
tags := []Tags{}
searchterm := fmt.Sprintf("%%%s%%", i.Term)
rows, err := db.Query(`select count,tag_id,tag_name,tagtype_id
FROM (select count(image_id) as count,ib_id,tags.tag_id,tag_name,tagtype_id
FROM tags left join tagmap on tags.tag_id = tagmap.tag_id group by tag_id) as a
WHERE ib_id = ? AND tag_name LIKE ?
ORDER BY count DESC`, i.Ib, searchterm)
if err != nil {
return err
}
for rows.Next() {
// Initialize posts struct
tag := Tags{}
// Scan rows and place column into struct
err := rows.Scan(&tag.Total, &tag.Id, &tag.Tag, &tag.Type)
if err != nil {
return err
}
// Append rows to info struct
tags = append(tags, tag)
}
err = rows.Err()
if err != nil {
return
}
// Return 404 if there are no threads in ib
if len(tags) == 0 {
return e.ErrNotFound
}
// Add pagedresponse to the response struct
response.Body = tags
// This is the data we will serialize
i.Result = response
return
}
add search ability to tags
package models
import (
"fmt"
e "github.com/techjanitor/pram-get/errors"
u "github.com/techjanitor/pram-get/utils"
)
// TagsModel holds the parameters from the request and also the key for the cache
type TagsModel struct {
Ib uint
Term string
Result TagsType
}
// TagsType is the top level of the JSON response
type TagsType struct {
Body []Tags `json:"tags"`
}
// Taglist struct
type Tags struct {
Id uint `json:"id"`
Tag string `json:"tag"`
Total uint `json:"total"`
Type uint `json:"type"`
}
// Get will gather the information from the database and return it as JSON serialized data
func (i *TagsModel) Get() (err error) {
// Initialize response header
response := TagsType{}
// Get Database handle
db, err := u.GetDb()
if err != nil {
return
}
tags := []Tags{}
searchterm := fmt.Sprintf("%%%s%%", i.Term)
rows, err := db.Query(`select count,tag_id,tag_name,tagtype_id
FROM (select count(image_id) as count,ib_id,tags.tag_id,tag_name,tagtype_id
FROM tags left join tagmap on tags.tag_id = tagmap.tag_id group by tag_id) as a
WHERE ib_id = ? AND tag_name LIKE ?
ORDER BY count DESC`, i.Ib, searchterm)
if err != nil {
return err
}
for rows.Next() {
// Initialize posts struct
tag := Tags{}
// Scan rows and place column into struct
err := rows.Scan(&tag.Total, &tag.Id, &tag.Tag, &tag.Type)
if err != nil {
return err
}
// Append rows to info struct
tags = append(tags, tag)
}
err = rows.Err()
if err != nil {
return
}
// Return 404 if there are no threads in ib
if len(tags) == 0 {
return e.ErrNotFound
}
// Add pagedresponse to the response struct
response.Body = tags
// This is the data we will serialize
i.Result = response
return
}
|
package models
import (
"time"
"krypton-server/options"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
var (
Todo *_Todo
todoCollection = "todo"
todoIndexes = []mgo.Index{}
)
type _Todo struct{}
type TodoModel struct {
Id bson.ObjectId `bson:"_id" json:"-"`
Uid string `bson:"uid" json:"uid"`
Title string `bson:"title" json:"title"`
Content string `bson:"content" json:"content"`
Due time.Time `bson:"due" json:"due"`
Finished bool `bson:"finished" json:"finished"`
CreatedAt time.Time `bson:"created_at" json:"created_at"`
UpdatedAt time.Time `bson:"updated_at" json:"updated_at"`
isNewRecord bool `bson:"-"`
}
func (_ *_Todo) NewModel(uid, title, content string, due time.Time) *TodoModel {
return &TodoModel{
Id: bson.NewObjectId(),
Uid: uid,
Title: title,
Content: content,
Due: due,
Finished: false,
isNewRecord: true,
}
}
func (todo *TodoModel) Save() (err error) {
if !todo.Id.Valid() {
return ErrInvalidId
}
Todo.Query(func(c *mgo.Collection) {
t := time.Now()
if todo.isNewRecord {
todo.CreatedAt = t
todo.UpdatedAt = t
if err = c.Insert(todo); err == nil {
todo.isNewRecord = false
}
} else {
migration := bson.M{
"$set": bson.M{
"title": todo.Title,
"content": todo.Content,
"due": todo.Due,
"finished": todo.Finished,
"updated_at": t,
},
}
err = c.UpdateId(todo.Id, migration)
}
})
return
}
func (_ *_Todo) Find(id string) (todo *TodoModel, err error) {
if !bson.IsObjectIdHex(id) {
return nil, ErrInvalidId
}
bsonID := bson.ObjectIdHex(id)
Todo.Query(func(c *mgo.Collection) {
err = c.FindId(bsonID).One(&todo)
})
return
}
func (_ *_Todo) List(opts *options.ListTodoOpts) (total int, todos []*TodoModel, err error) {
offset := (opts.Page - 1) * opts.Limit
if offset < 0 {
offset = 0
}
query := bson.M{}
if opts.From.After(opts.To) {
return 0, nil, ErrInvalidParams
}
dueRange := bson.M{}
if !opts.From.IsZero() {
dueRange["$gte"] = opts.From
}
if !opts.To.IsZero() {
dueRange["$lte"] = opts.To
}
if len(dueRange) > 0 {
query["due"] = dueRange
}
if opts.Uid != "" {
query["uid"] = opts.Uid
}
if !opts.IsAll {
query["finished"] = opts.IsFinished
}
Todo.Query(func(c *mgo.Collection) {
err = c.Find(query).Skip(offset).Limit(opts.Limit).Sort("due").All(&todos)
if err == nil {
total, err = c.Find(query).Count()
}
})
return
}
func (_ *_Todo) Delete(id string) (err error) {
if !bson.IsObjectIdHex(id) {
return ErrInvalidId
}
Todo.Query(func(c *mgo.Collection) {
err = c.RemoveId(bson.ObjectIdHex(id))
})
return
}
func (_ *_Todo) Query(query func(c *mgo.Collection)) {
Mongo().Query(todoCollection, todoIndexes, query)
}
fix bug
package models
import (
"time"
"krypton-server/options"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
var (
Todo *_Todo
todoCollection = "todo"
todoIndexes = []mgo.Index{}
)
type _Todo struct{}
type TodoModel struct {
Id bson.ObjectId `bson:"_id" json:"-"`
Uid string `bson:"uid" json:"uid"`
Title string `bson:"title" json:"title"`
Content string `bson:"content" json:"content"`
Due time.Time `bson:"due" json:"due"`
Finished bool `bson:"finished" json:"finished"`
CreatedAt time.Time `bson:"created_at" json:"created_at"`
UpdatedAt time.Time `bson:"updated_at" json:"updated_at"`
isNewRecord bool `bson:"-"`
}
func (_ *_Todo) NewModel(uid, title, content string, due time.Time) *TodoModel {
return &TodoModel{
Id: bson.NewObjectId(),
Uid: uid,
Title: title,
Content: content,
Due: due,
Finished: false,
isNewRecord: true,
}
}
func (todo *TodoModel) Save() (err error) {
if !todo.Id.Valid() {
return ErrInvalidId
}
Todo.Query(func(c *mgo.Collection) {
t := time.Now()
if todo.isNewRecord {
todo.CreatedAt = t
todo.UpdatedAt = t
if err = c.Insert(todo); err == nil {
todo.isNewRecord = false
}
} else {
migration := bson.M{
"$set": bson.M{
"title": todo.Title,
"content": todo.Content,
"due": todo.Due,
"finished": todo.Finished,
"updated_at": t,
},
}
err = c.UpdateId(todo.Id, migration)
}
})
return
}
func (_ *_Todo) Find(id string) (todo *TodoModel, err error) {
if !bson.IsObjectIdHex(id) {
return nil, ErrInvalidId
}
bsonID := bson.ObjectIdHex(id)
Todo.Query(func(c *mgo.Collection) {
err = c.FindId(bsonID).One(&todo)
})
return
}
func (_ *_Todo) List(opts *options.ListTodoOpts) (total int, todos []*TodoModel, err error) {
offset := (opts.Page - 1) * opts.Limit
if offset < 0 {
offset = 0
}
query := bson.M{}
if opts.From.After(opts.To) {
return 0, nil, ErrInvalidParams
}
dueRange := bson.M{}
if !opts.From.IsZero() {
dueRange["$gte"] = opts.From
}
if !opts.To.IsZero() {
dueRange["$lte"] = opts.To
}
if len(dueRange) > 0 {
query["due"] = dueRange
}
if opts.Uid != "" {
query["uid"] = opts.Uid
}
Todo.Query(func(c *mgo.Collection) {
err = c.Find(query).Skip(offset).Limit(opts.Limit).Sort("due").All(&todos)
if err == nil {
total, err = c.Find(query).Count()
}
})
return
}
func (_ *_Todo) Delete(id string) (err error) {
if !bson.IsObjectIdHex(id) {
return ErrInvalidId
}
Todo.Query(func(c *mgo.Collection) {
err = c.RemoveId(bson.ObjectIdHex(id))
})
return
}
func (_ *_Todo) Query(query func(c *mgo.Collection)) {
Mongo().Query(todoCollection, todoIndexes, query)
}
|
package models
import (
"encoding/json"
"fmt"
"reflect"
"regexp"
"time"
"github.com/dockercn/docker-bucket/utils"
)
const (
ORG_MEMBER = "M"
ORG_OWNER = "O"
)
type User struct {
Username string //
Password string //
Repositories string //用户的所有 Respository
Organizations string //用户所属的所有组织
Email string //Email 可以更换,全局唯一
Fullname string //
Company string //
Location string //
Mobile string //
URL string //
Gravatar string //如果是邮件地址使用 gravatar.org 的 API 显示头像,如果是上传的用户显示头像的地址。
Created int64 //
Updated int64 //
}
//在全局 user 存储的的 Hash 中查询到 user 的 key,然后根据 key 再使用 Exists 方法查询是否存在数据
func (user *User) Has(username string) (bool, []byte, error) {
if key, err := LedisDB.HGet([]byte(GetServerKeys("user")), []byte(GetObjectKey("user", username))); err != nil {
return false, []byte(""), err
} else if key != nil {
if name, err := LedisDB.HGet(key, []byte("Username")); err != nil {
return false, []byte(""), err
} else if name != nil {
//已经存在了用户的 Key,接着判断用户是否相同
if string(name) != username {
return true, key, fmt.Errorf("已经存在了 Key,但是用户名不相同 %s ", string(name))
}
return true, key, nil
} else {
return false, []byte(""), nil
}
} else {
return false, []byte(""), nil
}
}
//创建用户数据,如果存在返回错误信息。
func (user *User) Put(username string, passwd string, email string) error {
//检查用户的 Key 是否存在
if has, _, err := user.Has(username); err != nil {
return err
} else if has == true {
//已经存在用户
return fmt.Errorf("已经存在用户 %s", username)
} else {
//检查用户名和 Organization 的 Name 是不是冲突
org := new(Organization)
if h, _, e := org.Has(username); e != nil {
return err
} else if h == true {
return fmt.Errorf("已经存在相同名称的组织 %s", username)
}
//检查用户名合法性,参考实现标准:
//https://github.com/docker/docker/blob/28f09f06326848f4117baf633ec9fc542108f051/registry/registry.go#L27
validNamespace := regexp.MustCompile(`^([a-z0-9_]{4,30})$`)
if !validNamespace.MatchString(username) {
return fmt.Errorf("用户名必须是 4 - 30 位之间,且只能由 a-z,0-9 和 下划线组成")
}
//检查密码合法性
if len(passwd) < 5 {
return fmt.Errorf("密码必须等于或大于 5 位字符以上")
}
//检查邮箱合法性
validEmail := regexp.MustCompile(`^[a-z0-9A-Z]+([\-_\.][a-z0-9A-Z]+)*@([a-z0-9A-Z]+(-[a-z0-9A-Z]+)*\.)+[a-zA-Z]+$`)
if !validEmail.MatchString(email) {
return fmt.Errorf("Email 格式不合法")
}
//生成随机的 Object Key
key := utils.GeneralKey(username)
user.Username = username
user.Password = passwd
user.Email = email
user.Updated = time.Now().Unix()
user.Created = time.Now().Unix()
//保存 User 对象的数据
if err := user.Save(key); err != nil {
return err
} else {
//在全局 @user 数据中保存 key 信息
if _, err := LedisDB.HSet([]byte(GetServerKeys("user")), []byte(GetObjectKey("user", username)), key); err != nil {
return err
}
return nil
}
}
}
//根据用户名和密码获取用户
func (user *User) Get(username, passwd string) (bool, error) {
//检查用户的 Key 是否存在
if has, key, err := user.Has(username); err != nil {
return false, err
} else if has == true {
//读取密码的值进行判断是否密码相同
if password, err := LedisDB.HGet(key, []byte("Password")); err != nil {
return false, err
} else {
if string(password) != passwd {
return false, nil
}
return true, nil
}
} else {
//没有用户的 Key 存在
return false, fmt.Errorf("不存在 %s 的用户数据", username)
}
}
//重置用户的密码
func (user *User) ResetPasswd(username, password string) error {
//检查用户的 Key 是否存在
if has, key, err := user.Has(username); err != nil {
return err
} else if has == true {
user.Password = password
user.Updated = time.Now().Unix()
if err := user.Save(key); err != nil {
return err
}
} else {
//没有用户的 Key 存在
return fmt.Errorf("不存在 %s 的用户数据", username)
}
return nil
}
//用户创建镜像仓库后,在 user 的 Repositories 字段保存相应的记录
//repository 字段是 Repository 的全局 Key
func (user *User) AddRepository(username, repository, key string) error {
var (
u []byte
has bool
err error
)
r := make(map[string]string, 0)
//检查用户的 Key 是否存在
if has, u, err = user.Has(username); err != nil {
return err
} else if has == true {
if repo, err := LedisDB.HGet(u, []byte("Repositories")); err != nil {
return err
} else if repo != nil {
if e := json.Unmarshal(repo, &r); e != nil {
return nil
}
if value, exist := r[repository]; exist == true && value == key {
return fmt.Errorf("已经存在了镜像仓库数据")
}
}
} else if has == false {
return fmt.Errorf("不存在 %s 的用户数据", username)
}
//在 Map 中增加 repository 记录
r[repository] = key
//JSON
repo, _ := json.Marshal(r)
user.Repositories = string(repo)
user.Updated = time.Now().Unix()
if err := user.Save(u); err != nil {
return err
}
return nil
}
//用户删除镜像仓库后,在 user 的 Repositories 中删除相应的记录
func (user *User) RemoveRepository(username, repository string) error {
var (
u []byte
has bool
err error
)
r := make(map[string]string, 0)
//检查用户的 Key 是否存在
if has, u, err = user.Has(username); err != nil {
return err
} else if has == true {
if repo, err := LedisDB.HGet(u, []byte("Repositories")); err != nil {
return err
} else if repo != nil {
if e := json.Unmarshal(repo, &r); e != nil {
return nil
}
if _, exist := r[repository]; exist == false {
return fmt.Errorf("不存在要删除的镜像仓库数据")
}
}
} else if has == false {
return fmt.Errorf("不存在 %s 的用户数据", username)
}
//在 Map 中删除 repository 记录
delete(r, repository)
//JSON
repo, _ := json.Marshal(r)
user.Repositories = string(repo)
user.Updated = time.Now().Unix()
if err := user.Save(u); err != nil {
return err
}
return nil
}
//向用户添加 Organization 数据
//member 的值为: ORG_MEMBER 或 ORG_OWNER
func (user *User) AddOrganization(username, org, member string) error {
var (
u []byte
has bool
err error
)
o := make(map[string]string, 0)
if has, u, err = user.Has(username); err != nil {
return err
} else if has == false {
return fmt.Errorf("不存在 %s 的用户数据", username)
} else if has == true {
if organization, err := LedisDB.HGet(u, []byte("Organizations")); err != nil {
return nil
} else if organization != nil {
if e := json.Unmarshal(organization, &o); e != nil {
return e
}
if value, exist := o[org]; exist == true && value == member {
return fmt.Errorf("已经存在了组织的数据")
}
}
}
o[org] = member
os, _ := json.Marshal(o)
user.Organizations = string(os)
user.Updated = time.Now().Unix()
if err := user.Save(u); err != nil {
return err
}
return nil
}
//从用户中删除 Organization 数据
func (user *User) RemoveOrganization(username, org string) error {
var (
u []byte
has bool
err error
)
o := make(map[string]string, 0)
if has, u, err = user.Has(username); err != nil {
return err
} else if has == false {
return fmt.Errorf("不存在 %s 的用户数据", username)
} else if has == true {
if organization, err := LedisDB.HGet(u, []byte("Organizations")); err != nil {
return nil
} else if organization != nil {
if e := json.Unmarshal(organization, &o); e != nil {
return e
}
if _, exist := o[org]; exist == false {
return fmt.Errorf("不存在要移除的用户数据")
}
}
}
delete(o, org)
os, _ := json.Marshal(o)
user.Organizations = string(os)
user.Updated = time.Now().Unix()
if err := user.Save(u); err != nil {
return err
}
return nil
}
//循环 User 的所有 Property ,保存数据
func (user *User) Save(key []byte) error {
s := reflect.TypeOf(user).Elem()
//循环处理 Struct 的每一个 Field
for i := 0; i < s.NumField(); i++ {
//获取 Field 的 Value
value := reflect.ValueOf(user).Elem().Field(s.Field(i).Index[0])
//判断 Field 不为空
if utils.IsEmptyValue(value) == false {
switch value.Kind() {
case reflect.String:
if _, err := LedisDB.HSet(key, []byte(s.Field(i).Name), []byte(value.String())); err != nil {
return err
}
case reflect.Bool:
if _, err := LedisDB.HSet(key, []byte(s.Field(i).Name), utils.BoolToBytes(value.Bool())); err != nil {
return err
}
case reflect.Int64:
if _, err := LedisDB.HSet(key, []byte(s.Field(i).Name), utils.Int64ToBytes(value.Int())); err != nil {
return err
}
default:
return fmt.Errorf("不支持的数据类型 %s:%s", s.Field(i).Name, value.Kind().String())
}
}
}
return nil
}
type Organization struct {
Owner string //用户的 Key,每个组织都由用户创建,Owner 默认是拥有所有 Repository 的读写权限
Name string //
Description string //保存 Markdown 格式
Repositories string //
Privileges string //
Users string //
Created int64 //
Updated int64 //
}
//在全局 org 存储的的 Hash 中查询到 org 的 key,然后根据 key 再使用 Exists 方法查询是否存在数据
func (org *Organization) Has(name string) (bool, []byte, error) {
if key, err := LedisDB.HGet([]byte(GetServerKeys("org")), []byte(GetObjectKey("org", name))); err != nil {
return false, []byte(""), err
} else if key != nil {
if n, err := LedisDB.HGet(key, []byte("Name")); err != nil {
return false, []byte(""), err
} else if n != nil {
if string(n) != name {
return true, key, fmt.Errorf("已经存在了 Key,但是组织名称不相同 %s", string(n))
}
return true, key, nil
} else {
return false, []byte(""), nil
}
} else {
return false, []byte(""), nil
}
}
//创建用户数据,如果存在返回错误信息。
func (org *Organization) Put(user, name, description string) error {
if has, _, err := org.Has(name); err != nil {
return err
} else if has == true {
return fmt.Errorf("组织 %s 已经存在", name)
} else {
//检查用户的命名空间是否冲突
u := new(User)
if has, _, err := u.Has(name); err != nil {
return err
} else if has == true {
return fmt.Errorf("已经存在相同名称的用户 %s", name)
}
//检查用户是否存在
if has, _, err := u.Has(user); err != nil {
return err
} else if has == false {
return fmt.Errorf("不存在用户的数据 %s", user)
}
key := utils.GeneralKey(name)
//检查组织名合法性,参考实现标准:
//https://github.com/docker/docker/blob/28f09f06326848f4117baf633ec9fc542108f051/registry/registry.go#L27
validNamespace := regexp.MustCompile(`^([a-z0-9_]{4,30})$`)
if !validNamespace.MatchString(name) {
return fmt.Errorf("组织名必须是 4 - 30 位之间,且只能由 a-z,0-9 和 下划线组成")
}
org.Owner = user
org.Name = name
org.Description = description
org.Updated = time.Now().Unix()
org.Created = time.Now().Unix()
if err := org.Save(key); err != nil {
return err
} else {
//保存成功后在全局变量 #org 中保存 Key 的信息。
if _, err := LedisDB.HSet([]byte(GetServerKeys("org")), []byte(GetObjectKey("org", name)), key); err != nil {
return err
}
//向组织添加 Owner 用户
if e := org.AddUser(name, user, ORG_OWNER); e != nil {
return e
}
//向用户添加组织的数据
if e := u.AddOrganization(user, name, ORG_MEMBER); e != nil {
return e
}
return nil
}
}
}
//向组织添加用户,member 参数的值为 [OWNER/MEMBER] 两种
func (org *Organization) AddUser(name, user, member string) error {
var (
o []byte
has bool
err error
)
users := make(map[string]string, 0)
if has, o, err = org.Has(name); err != nil {
return err
} else if has == false {
return fmt.Errorf("不存在组织的数据 %s", name)
}
u := new(User)
if has, _, err = u.Has(user); err != nil {
return err
} else if has == false {
return fmt.Errorf("不存在用户的数据 %S", user)
}
if us, err := LedisDB.HGet(o, []byte("Users")); err != nil {
return nil
} else if us != nil {
if e := json.Unmarshal(us, &users); e != nil {
return e
}
if value, exist := users[user]; exist == true && value == member {
return fmt.Errorf("已经存在了用户的数据")
}
}
users[user] = member
us, _ := json.Marshal(users)
org.Users = string(us)
org.Updated = time.Now().Unix()
if err = org.Save(o); err != nil {
return err
}
return nil
}
//从组织移除用户
func (org *Organization) RemoveUser(name, user string) error {
var (
o []byte
has bool
err error
)
users := make(map[string]string, 0)
if has, o, err = org.Has(name); err != nil {
return err
} else if has == false {
return fmt.Errorf("不存在组织的数据 %s", name)
}
u := new(User)
if has, _, err = u.Has(user); err != nil {
return err
} else if has == false {
return fmt.Errorf("不存在用户的数据 %S", user)
}
if us, err := LedisDB.HGet(o, []byte("Users")); err != nil {
return err
} else if us != nil {
if e := json.Unmarshal(us, &users); e != nil {
return e
}
if _, exist := users[user]; exist == false {
return fmt.Errorf("在组织中不存在要移除的用户数据")
}
}
delete(users, user)
us, _ := json.Marshal(users)
org.Users = string(us)
org.Updated = time.Now().Unix()
if err = org.Save(o); err != nil {
return err
}
return nil
}
//向组织添加镜像仓库
func (org *Organization) AddRepository(name, repository, key string) error {
var (
o []byte
has bool
err error
)
repos := make(map[string]string, 0)
if has, o, err = org.Has(name); err != nil {
return err
} else if has == false {
return fmt.Errorf("不存在组织的数据 %s", name)
}
repo := new(Repository)
if has, err = repo.Has(repository); err != nil {
return err
} else if has == false {
return fmt.Errorf("不存在镜像仓库的数据: %s", repository)
}
if r, err := LedisDB.HGet(o, []byte("Repositories")); err != nil {
return err
} else if r != nil {
if e := json.Unmarshal(r, &repos); e != nil {
return e
}
if value, exist := repos[repository]; exist == true && value == key {
return fmt.Errorf("在组织中已经存在要添加的镜像仓库数据")
}
}
repos[repository] = key
rs, _ := json.Marshal(repos)
org.Repositories = string(rs)
org.Updated = time.Now().Unix()
if err = org.Save(o); err != nil {
return err
}
return nil
}
//从组织移除镜像仓库
func (org *Organization) RemoveRepository(name, repository string) error {
return nil
}
//为用户@镜像仓库添加读写权限
func (org *Organization) AddPrivilege(name, user, repository, key string) error {
return nil
}
//为用户@镜像仓库移除读写权限
func (org *Organization) RemovePrivilege(name, user, repository string) error {
return nil
}
//循环 Org 的所有 Property ,保存数据
func (org *Organization) Save(key []byte) error {
s := reflect.TypeOf(org).Elem()
//循环处理 Struct 的每一个 Field
for i := 0; i < s.NumField(); i++ {
//获取 Field 的 Value
value := reflect.ValueOf(org).Elem().Field(s.Field(i).Index[0])
//判断 Field 不为空
if utils.IsEmptyValue(value) == false {
switch value.Kind() {
case reflect.String:
if _, err := LedisDB.HSet(key, []byte(s.Field(i).Name), []byte(value.String())); err != nil {
return err
}
case reflect.Bool:
if _, err := LedisDB.HSet(key, []byte(s.Field(i).Name), utils.BoolToBytes(value.Bool())); err != nil {
return err
}
case reflect.Int64:
if _, err := LedisDB.HSet(key, []byte(s.Field(i).Name), utils.Int64ToBytes(value.Int())); err != nil {
return err
}
default:
return fmt.Errorf("不支持的数据类型 %s:%s", s.Field(i).Name, value.Kind().String())
}
}
}
return nil
}
增加从组织中删除 Repository 的方法
package models
import (
"encoding/json"
"fmt"
"reflect"
"regexp"
"time"
"github.com/dockercn/docker-bucket/utils"
)
const (
ORG_MEMBER = "M"
ORG_OWNER = "O"
)
type User struct {
Username string //
Password string //
Repositories string //用户的所有 Respository
Organizations string //用户所属的所有组织
Email string //Email 可以更换,全局唯一
Fullname string //
Company string //
Location string //
Mobile string //
URL string //
Gravatar string //如果是邮件地址使用 gravatar.org 的 API 显示头像,如果是上传的用户显示头像的地址。
Created int64 //
Updated int64 //
}
//在全局 user 存储的的 Hash 中查询到 user 的 key,然后根据 key 再使用 Exists 方法查询是否存在数据
func (user *User) Has(username string) (bool, []byte, error) {
if key, err := LedisDB.HGet([]byte(GetServerKeys("user")), []byte(GetObjectKey("user", username))); err != nil {
return false, []byte(""), err
} else if key != nil {
if name, err := LedisDB.HGet(key, []byte("Username")); err != nil {
return false, []byte(""), err
} else if name != nil {
//已经存在了用户的 Key,接着判断用户是否相同
if string(name) != username {
return true, key, fmt.Errorf("已经存在了 Key,但是用户名不相同 %s ", string(name))
}
return true, key, nil
} else {
return false, []byte(""), nil
}
} else {
return false, []byte(""), nil
}
}
//创建用户数据,如果存在返回错误信息。
func (user *User) Put(username string, passwd string, email string) error {
//检查用户的 Key 是否存在
if has, _, err := user.Has(username); err != nil {
return err
} else if has == true {
//已经存在用户
return fmt.Errorf("已经存在用户 %s", username)
} else {
//检查用户名和 Organization 的 Name 是不是冲突
org := new(Organization)
if h, _, e := org.Has(username); e != nil {
return err
} else if h == true {
return fmt.Errorf("已经存在相同名称的组织 %s", username)
}
//检查用户名合法性,参考实现标准:
//https://github.com/docker/docker/blob/28f09f06326848f4117baf633ec9fc542108f051/registry/registry.go#L27
validNamespace := regexp.MustCompile(`^([a-z0-9_]{4,30})$`)
if !validNamespace.MatchString(username) {
return fmt.Errorf("用户名必须是 4 - 30 位之间,且只能由 a-z,0-9 和 下划线组成")
}
//检查密码合法性
if len(passwd) < 5 {
return fmt.Errorf("密码必须等于或大于 5 位字符以上")
}
//检查邮箱合法性
validEmail := regexp.MustCompile(`^[a-z0-9A-Z]+([\-_\.][a-z0-9A-Z]+)*@([a-z0-9A-Z]+(-[a-z0-9A-Z]+)*\.)+[a-zA-Z]+$`)
if !validEmail.MatchString(email) {
return fmt.Errorf("Email 格式不合法")
}
//生成随机的 Object Key
key := utils.GeneralKey(username)
user.Username = username
user.Password = passwd
user.Email = email
user.Updated = time.Now().Unix()
user.Created = time.Now().Unix()
//保存 User 对象的数据
if err := user.Save(key); err != nil {
return err
} else {
//在全局 @user 数据中保存 key 信息
if _, err := LedisDB.HSet([]byte(GetServerKeys("user")), []byte(GetObjectKey("user", username)), key); err != nil {
return err
}
return nil
}
}
}
//根据用户名和密码获取用户
func (user *User) Get(username, passwd string) (bool, error) {
//检查用户的 Key 是否存在
if has, key, err := user.Has(username); err != nil {
return false, err
} else if has == true {
//读取密码的值进行判断是否密码相同
if password, err := LedisDB.HGet(key, []byte("Password")); err != nil {
return false, err
} else {
if string(password) != passwd {
return false, nil
}
return true, nil
}
} else {
//没有用户的 Key 存在
return false, fmt.Errorf("不存在 %s 的用户数据", username)
}
}
//重置用户的密码
func (user *User) ResetPasswd(username, password string) error {
//检查用户的 Key 是否存在
if has, key, err := user.Has(username); err != nil {
return err
} else if has == true {
user.Password = password
user.Updated = time.Now().Unix()
if err := user.Save(key); err != nil {
return err
}
} else {
//没有用户的 Key 存在
return fmt.Errorf("不存在 %s 的用户数据", username)
}
return nil
}
//用户创建镜像仓库后,在 user 的 Repositories 字段保存相应的记录
//repository 字段是 Repository 的全局 Key
func (user *User) AddRepository(username, repository, key string) error {
var (
u []byte
has bool
err error
)
r := make(map[string]string, 0)
//检查用户的 Key 是否存在
if has, u, err = user.Has(username); err != nil {
return err
} else if has == true {
if repo, err := LedisDB.HGet(u, []byte("Repositories")); err != nil {
return err
} else if repo != nil {
if e := json.Unmarshal(repo, &r); e != nil {
return nil
}
if value, exist := r[repository]; exist == true && value == key {
return fmt.Errorf("已经存在了镜像仓库数据")
}
}
} else if has == false {
return fmt.Errorf("不存在 %s 的用户数据", username)
}
//在 Map 中增加 repository 记录
r[repository] = key
//JSON
repo, _ := json.Marshal(r)
user.Repositories = string(repo)
user.Updated = time.Now().Unix()
if err := user.Save(u); err != nil {
return err
}
return nil
}
//用户删除镜像仓库后,在 user 的 Repositories 中删除相应的记录
func (user *User) RemoveRepository(username, repository string) error {
var (
u []byte
has bool
err error
)
r := make(map[string]string, 0)
//检查用户的 Key 是否存在
if has, u, err = user.Has(username); err != nil {
return err
} else if has == true {
if repo, err := LedisDB.HGet(u, []byte("Repositories")); err != nil {
return err
} else if repo != nil {
if e := json.Unmarshal(repo, &r); e != nil {
return nil
}
if _, exist := r[repository]; exist == false {
return fmt.Errorf("不存在要删除的镜像仓库数据")
}
}
} else if has == false {
return fmt.Errorf("不存在 %s 的用户数据", username)
}
//在 Map 中删除 repository 记录
delete(r, repository)
//JSON
repo, _ := json.Marshal(r)
user.Repositories = string(repo)
user.Updated = time.Now().Unix()
if err := user.Save(u); err != nil {
return err
}
return nil
}
//向用户添加 Organization 数据
//member 的值为: ORG_MEMBER 或 ORG_OWNER
func (user *User) AddOrganization(username, org, member string) error {
var (
u []byte
has bool
err error
)
o := make(map[string]string, 0)
if has, u, err = user.Has(username); err != nil {
return err
} else if has == false {
return fmt.Errorf("不存在 %s 的用户数据", username)
} else if has == true {
if organization, err := LedisDB.HGet(u, []byte("Organizations")); err != nil {
return nil
} else if organization != nil {
if e := json.Unmarshal(organization, &o); e != nil {
return e
}
if value, exist := o[org]; exist == true && value == member {
return fmt.Errorf("已经存在了组织的数据")
}
}
}
o[org] = member
os, _ := json.Marshal(o)
user.Organizations = string(os)
user.Updated = time.Now().Unix()
if err := user.Save(u); err != nil {
return err
}
return nil
}
//从用户中删除 Organization 数据
func (user *User) RemoveOrganization(username, org string) error {
var (
u []byte
has bool
err error
)
o := make(map[string]string, 0)
if has, u, err = user.Has(username); err != nil {
return err
} else if has == false {
return fmt.Errorf("不存在 %s 的用户数据", username)
} else if has == true {
if organization, err := LedisDB.HGet(u, []byte("Organizations")); err != nil {
return nil
} else if organization != nil {
if e := json.Unmarshal(organization, &o); e != nil {
return e
}
if _, exist := o[org]; exist == false {
return fmt.Errorf("不存在要移除的用户数据")
}
}
}
delete(o, org)
os, _ := json.Marshal(o)
user.Organizations = string(os)
user.Updated = time.Now().Unix()
if err := user.Save(u); err != nil {
return err
}
return nil
}
//循环 User 的所有 Property ,保存数据
func (user *User) Save(key []byte) error {
s := reflect.TypeOf(user).Elem()
//循环处理 Struct 的每一个 Field
for i := 0; i < s.NumField(); i++ {
//获取 Field 的 Value
value := reflect.ValueOf(user).Elem().Field(s.Field(i).Index[0])
//判断 Field 不为空
if utils.IsEmptyValue(value) == false {
switch value.Kind() {
case reflect.String:
if _, err := LedisDB.HSet(key, []byte(s.Field(i).Name), []byte(value.String())); err != nil {
return err
}
case reflect.Bool:
if _, err := LedisDB.HSet(key, []byte(s.Field(i).Name), utils.BoolToBytes(value.Bool())); err != nil {
return err
}
case reflect.Int64:
if _, err := LedisDB.HSet(key, []byte(s.Field(i).Name), utils.Int64ToBytes(value.Int())); err != nil {
return err
}
default:
return fmt.Errorf("不支持的数据类型 %s:%s", s.Field(i).Name, value.Kind().String())
}
}
}
return nil
}
type Organization struct {
Owner string //用户的 Key,每个组织都由用户创建,Owner 默认是拥有所有 Repository 的读写权限
Name string //
Description string //保存 Markdown 格式
Repositories string //
Privileges string //
Users string //
Created int64 //
Updated int64 //
}
//在全局 org 存储的的 Hash 中查询到 org 的 key,然后根据 key 再使用 Exists 方法查询是否存在数据
func (org *Organization) Has(name string) (bool, []byte, error) {
if key, err := LedisDB.HGet([]byte(GetServerKeys("org")), []byte(GetObjectKey("org", name))); err != nil {
return false, []byte(""), err
} else if key != nil {
if n, err := LedisDB.HGet(key, []byte("Name")); err != nil {
return false, []byte(""), err
} else if n != nil {
if string(n) != name {
return true, key, fmt.Errorf("已经存在了 Key,但是组织名称不相同 %s", string(n))
}
return true, key, nil
} else {
return false, []byte(""), nil
}
} else {
return false, []byte(""), nil
}
}
//创建用户数据,如果存在返回错误信息。
func (org *Organization) Put(user, name, description string) error {
if has, _, err := org.Has(name); err != nil {
return err
} else if has == true {
return fmt.Errorf("组织 %s 已经存在", name)
} else {
//检查用户的命名空间是否冲突
u := new(User)
if has, _, err := u.Has(name); err != nil {
return err
} else if has == true {
return fmt.Errorf("已经存在相同名称的用户 %s", name)
}
//检查用户是否存在
if has, _, err := u.Has(user); err != nil {
return err
} else if has == false {
return fmt.Errorf("不存在用户的数据 %s", user)
}
key := utils.GeneralKey(name)
//检查组织名合法性,参考实现标准:
//https://github.com/docker/docker/blob/28f09f06326848f4117baf633ec9fc542108f051/registry/registry.go#L27
validNamespace := regexp.MustCompile(`^([a-z0-9_]{4,30})$`)
if !validNamespace.MatchString(name) {
return fmt.Errorf("组织名必须是 4 - 30 位之间,且只能由 a-z,0-9 和 下划线组成")
}
org.Owner = user
org.Name = name
org.Description = description
org.Updated = time.Now().Unix()
org.Created = time.Now().Unix()
if err := org.Save(key); err != nil {
return err
} else {
//保存成功后在全局变量 #org 中保存 Key 的信息。
if _, err := LedisDB.HSet([]byte(GetServerKeys("org")), []byte(GetObjectKey("org", name)), key); err != nil {
return err
}
//向组织添加 Owner 用户
if e := org.AddUser(name, user, ORG_OWNER); e != nil {
return e
}
//向用户添加组织的数据
if e := u.AddOrganization(user, name, ORG_MEMBER); e != nil {
return e
}
return nil
}
}
}
//向组织添加用户,member 参数的值为 [OWNER/MEMBER] 两种
func (org *Organization) AddUser(name, user, member string) error {
var (
o []byte
has bool
err error
)
users := make(map[string]string, 0)
if has, o, err = org.Has(name); err != nil {
return err
} else if has == false {
return fmt.Errorf("不存在组织的数据 %s", name)
}
u := new(User)
if has, _, err = u.Has(user); err != nil {
return err
} else if has == false {
return fmt.Errorf("不存在用户的数据 %S", user)
}
if us, err := LedisDB.HGet(o, []byte("Users")); err != nil {
return nil
} else if us != nil {
if e := json.Unmarshal(us, &users); e != nil {
return e
}
if value, exist := users[user]; exist == true && value == member {
return fmt.Errorf("已经存在了用户的数据")
}
}
users[user] = member
us, _ := json.Marshal(users)
org.Users = string(us)
org.Updated = time.Now().Unix()
if err = org.Save(o); err != nil {
return err
}
return nil
}
//从组织移除用户
func (org *Organization) RemoveUser(name, user string) error {
var (
o []byte
has bool
err error
)
users := make(map[string]string, 0)
if has, o, err = org.Has(name); err != nil {
return err
} else if has == false {
return fmt.Errorf("不存在组织的数据 %s", name)
}
u := new(User)
if has, _, err = u.Has(user); err != nil {
return err
} else if has == false {
return fmt.Errorf("不存在用户的数据 %S", user)
}
if us, err := LedisDB.HGet(o, []byte("Users")); err != nil {
return err
} else if us != nil {
if e := json.Unmarshal(us, &users); e != nil {
return e
}
if _, exist := users[user]; exist == false {
return fmt.Errorf("在组织中不存在要移除的用户数据")
}
}
delete(users, user)
us, _ := json.Marshal(users)
org.Users = string(us)
org.Updated = time.Now().Unix()
if err = org.Save(o); err != nil {
return err
}
return nil
}
//向组织添加镜像仓库
func (org *Organization) AddRepository(name, repository, key string) error {
var (
o []byte
has bool
err error
)
repos := make(map[string]string, 0)
if has, o, err = org.Has(name); err != nil {
return err
} else if has == false {
return fmt.Errorf("不存在组织的数据 %s", name)
}
repo := new(Repository)
if has, err = repo.Has(repository); err != nil {
return err
} else if has == false {
return fmt.Errorf("不存在镜像仓库的数据: %s", repository)
}
if r, err := LedisDB.HGet(o, []byte("Repositories")); err != nil {
return err
} else if r != nil {
if e := json.Unmarshal(r, &repos); e != nil {
return e
}
if value, exist := repos[repository]; exist == true && value == key {
return fmt.Errorf("在组织中已经存在要添加的镜像仓库数据")
}
}
repos[repository] = key
rs, _ := json.Marshal(repos)
org.Repositories = string(rs)
org.Updated = time.Now().Unix()
if err = org.Save(o); err != nil {
return err
}
return nil
}
//从组织移除镜像仓库
func (org *Organization) RemoveRepository(name, repository string) error {
var (
o []byte
has bool
err error
)
repos := make(map[string]string, 0)
if has, o, err = org.Has(name); err != nil {
return err
} else if has == false {
return fmt.Errorf("不存在组织的数据 %s", name)
}
repo := new(Repository)
if has, err = repo.Has(repository); err != nil {
return err
} else if has == false {
return fmt.Errorf("不存在镜像仓库的数据: %s", repository)
}
if r, err := LedisDB.HGet(o, []byte("Repositories")); err != nil {
return err
} else if r != nil {
if e := json.Unmarshal(r, &repos); e != nil {
return e
}
if _, exist := repos[repository]; exist == false {
return fmt.Errorf("在组织中不存在要删除的仓库数据")
}
}
delete(repos, repository)
rs, _ := json.Marshal(repos)
org.Repositories = string(rs)
org.Updated = time.Now().Unix()
if err = org.Save(o); err != nil {
return err
}
return nil
}
//为用户@镜像仓库添加读写权限
func (org *Organization) AddPrivilege(name, user, repository, key string) error {
return nil
}
//为用户@镜像仓库移除读写权限
func (org *Organization) RemovePrivilege(name, user, repository string) error {
return nil
}
//循环 Org 的所有 Property ,保存数据
func (org *Organization) Save(key []byte) error {
s := reflect.TypeOf(org).Elem()
//循环处理 Struct 的每一个 Field
for i := 0; i < s.NumField(); i++ {
//获取 Field 的 Value
value := reflect.ValueOf(org).Elem().Field(s.Field(i).Index[0])
//判断 Field 不为空
if utils.IsEmptyValue(value) == false {
switch value.Kind() {
case reflect.String:
if _, err := LedisDB.HSet(key, []byte(s.Field(i).Name), []byte(value.String())); err != nil {
return err
}
case reflect.Bool:
if _, err := LedisDB.HSet(key, []byte(s.Field(i).Name), utils.BoolToBytes(value.Bool())); err != nil {
return err
}
case reflect.Int64:
if _, err := LedisDB.HSet(key, []byte(s.Field(i).Name), utils.Int64ToBytes(value.Int())); err != nil {
return err
}
default:
return fmt.Errorf("不支持的数据类型 %s:%s", s.Field(i).Name, value.Kind().String())
}
}
}
return nil
}
|
// Copyright 2014 The Gogs Authors. All rights reserved.
// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (
"container/list"
"context"
"crypto/sha256"
"crypto/subtle"
"encoding/hex"
"errors"
"fmt"
_ "image/jpeg" // Needed for jpeg support
"os"
"path/filepath"
"regexp"
"strings"
"time"
"unicode/utf8"
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/generate"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/public"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/storage"
"code.gitea.io/gitea/modules/structs"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
"golang.org/x/crypto/argon2"
"golang.org/x/crypto/bcrypt"
"golang.org/x/crypto/pbkdf2"
"golang.org/x/crypto/scrypt"
"golang.org/x/crypto/ssh"
"xorm.io/builder"
)
// UserType defines the user type
type UserType int
const (
// UserTypeIndividual defines an individual user
UserTypeIndividual UserType = iota // Historic reason to make it starts at 0.
// UserTypeOrganization defines an organization
UserTypeOrganization
)
const (
algoBcrypt = "bcrypt"
algoScrypt = "scrypt"
algoArgon2 = "argon2"
algoPbkdf2 = "pbkdf2"
)
// AvailableHashAlgorithms represents the available password hashing algorithms
var AvailableHashAlgorithms = []string{
algoPbkdf2,
algoArgon2,
algoScrypt,
algoBcrypt,
}
const (
// EmailNotificationsEnabled indicates that the user would like to receive all email notifications
EmailNotificationsEnabled = "enabled"
// EmailNotificationsOnMention indicates that the user would like to be notified via email when mentioned.
EmailNotificationsOnMention = "onmention"
// EmailNotificationsDisabled indicates that the user would not like to be notified via email.
EmailNotificationsDisabled = "disabled"
)
var (
// ErrUserNotKeyOwner user does not own this key error
ErrUserNotKeyOwner = errors.New("User does not own this public key")
// ErrEmailNotExist e-mail does not exist error
ErrEmailNotExist = errors.New("E-mail does not exist")
// ErrEmailNotActivated e-mail address has not been activated error
ErrEmailNotActivated = errors.New("E-mail address has not been activated")
// ErrUserNameIllegal user name contains illegal characters error
ErrUserNameIllegal = errors.New("User name contains illegal characters")
// ErrLoginSourceNotActived login source is not actived error
ErrLoginSourceNotActived = errors.New("Login source is not actived")
// ErrUnsupportedLoginType login source is unknown error
ErrUnsupportedLoginType = errors.New("Login source is unknown")
// Characters prohibited in a user name (anything except A-Za-z0-9_.-)
alphaDashDotPattern = regexp.MustCompile(`[^\w-\.]`)
)
// User represents the object of individual and member of organization.
type User struct {
ID int64 `xorm:"pk autoincr"`
LowerName string `xorm:"UNIQUE NOT NULL"`
Name string `xorm:"UNIQUE NOT NULL"`
FullName string
// Email is the primary email address (to be used for communication)
Email string `xorm:"NOT NULL"`
KeepEmailPrivate bool
EmailNotificationsPreference string `xorm:"VARCHAR(20) NOT NULL DEFAULT 'enabled'"`
Passwd string `xorm:"NOT NULL"`
PasswdHashAlgo string `xorm:"NOT NULL DEFAULT 'argon2'"`
// MustChangePassword is an attribute that determines if a user
// is to change his/her password after registration.
MustChangePassword bool `xorm:"NOT NULL DEFAULT false"`
LoginType LoginType
LoginSource int64 `xorm:"NOT NULL DEFAULT 0"`
LoginName string
Type UserType
OwnedOrgs []*User `xorm:"-"`
Orgs []*User `xorm:"-"`
Repos []*Repository `xorm:"-"`
Location string
Website string
Rands string `xorm:"VARCHAR(10)"`
Salt string `xorm:"VARCHAR(10)"`
Language string `xorm:"VARCHAR(5)"`
Description string
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
LastLoginUnix timeutil.TimeStamp `xorm:"INDEX"`
// Remember visibility choice for convenience, true for private
LastRepoVisibility bool
// Maximum repository creation limit, -1 means use global default
MaxRepoCreation int `xorm:"NOT NULL DEFAULT -1"`
// Permissions
IsActive bool `xorm:"INDEX"` // Activate primary email
IsAdmin bool
IsRestricted bool `xorm:"NOT NULL DEFAULT false"`
AllowGitHook bool
AllowImportLocal bool // Allow migrate repository by local path
AllowCreateOrganization bool `xorm:"DEFAULT true"`
ProhibitLogin bool `xorm:"NOT NULL DEFAULT false"`
// Avatar
Avatar string `xorm:"VARCHAR(2048) NOT NULL"`
AvatarEmail string `xorm:"NOT NULL"`
UseCustomAvatar bool
// Counters
NumFollowers int
NumFollowing int `xorm:"NOT NULL DEFAULT 0"`
NumStars int
NumRepos int
// For organization
NumTeams int
NumMembers int
Teams []*Team `xorm:"-"`
Members UserList `xorm:"-"`
MembersIsPublic map[int64]bool `xorm:"-"`
Visibility structs.VisibleType `xorm:"NOT NULL DEFAULT 0"`
RepoAdminChangeTeamAccess bool `xorm:"NOT NULL DEFAULT false"`
// Preferences
DiffViewStyle string `xorm:"NOT NULL DEFAULT ''"`
Theme string `xorm:"NOT NULL DEFAULT ''"`
KeepActivityPrivate bool `xorm:"NOT NULL DEFAULT false"`
}
// SearchOrganizationsOptions options to filter organizations
type SearchOrganizationsOptions struct {
ListOptions
All bool
}
// ColorFormat writes a colored string to identify this struct
func (u *User) ColorFormat(s fmt.State) {
log.ColorFprintf(s, "%d:%s",
log.NewColoredIDValue(u.ID),
log.NewColoredValue(u.Name))
}
// BeforeUpdate is invoked from XORM before updating this object.
func (u *User) BeforeUpdate() {
if u.MaxRepoCreation < -1 {
u.MaxRepoCreation = -1
}
// Organization does not need email
u.Email = strings.ToLower(u.Email)
if !u.IsOrganization() {
if len(u.AvatarEmail) == 0 {
u.AvatarEmail = u.Email
}
}
u.LowerName = strings.ToLower(u.Name)
u.Location = base.TruncateString(u.Location, 255)
u.Website = base.TruncateString(u.Website, 255)
u.Description = base.TruncateString(u.Description, 255)
}
// AfterLoad is invoked from XORM after filling all the fields of this object.
func (u *User) AfterLoad() {
if u.Theme == "" {
u.Theme = setting.UI.DefaultTheme
}
}
// SetLastLogin set time to last login
func (u *User) SetLastLogin() {
u.LastLoginUnix = timeutil.TimeStampNow()
}
// UpdateDiffViewStyle updates the users diff view style
func (u *User) UpdateDiffViewStyle(style string) error {
u.DiffViewStyle = style
return UpdateUserCols(u, "diff_view_style")
}
// UpdateTheme updates a users' theme irrespective of the site wide theme
func (u *User) UpdateTheme(themeName string) error {
u.Theme = themeName
return UpdateUserCols(u, "theme")
}
// GetEmail returns an noreply email, if the user has set to keep his
// email address private, otherwise the primary email address.
func (u *User) GetEmail() string {
if u.KeepEmailPrivate {
return fmt.Sprintf("%s@%s", u.LowerName, setting.Service.NoReplyAddress)
}
return u.Email
}
// GetAllUsers returns a slice of all users found in DB.
func GetAllUsers() ([]*User, error) {
users := make([]*User, 0)
return users, x.OrderBy("id").Find(&users)
}
// IsLocal returns true if user login type is LoginPlain.
func (u *User) IsLocal() bool {
return u.LoginType <= LoginPlain
}
// IsOAuth2 returns true if user login type is LoginOAuth2.
func (u *User) IsOAuth2() bool {
return u.LoginType == LoginOAuth2
}
// HasForkedRepo checks if user has already forked a repository with given ID.
func (u *User) HasForkedRepo(repoID int64) bool {
_, has := HasForkedRepo(u.ID, repoID)
return has
}
// MaxCreationLimit returns the number of repositories a user is allowed to create
func (u *User) MaxCreationLimit() int {
if u.MaxRepoCreation <= -1 {
return setting.Repository.MaxCreationLimit
}
return u.MaxRepoCreation
}
// CanCreateRepo returns if user login can create a repository
// NOTE: functions calling this assume a failure due to repository count limit; if new checks are added, those functions should be revised
func (u *User) CanCreateRepo() bool {
if u.IsAdmin {
return true
}
if u.MaxRepoCreation <= -1 {
if setting.Repository.MaxCreationLimit <= -1 {
return true
}
return u.NumRepos < setting.Repository.MaxCreationLimit
}
return u.NumRepos < u.MaxRepoCreation
}
// CanCreateOrganization returns true if user can create organisation.
func (u *User) CanCreateOrganization() bool {
return u.IsAdmin || (u.AllowCreateOrganization && !setting.Admin.DisableRegularOrgCreation)
}
// CanEditGitHook returns true if user can edit Git hooks.
func (u *User) CanEditGitHook() bool {
return !setting.DisableGitHooks && (u.IsAdmin || u.AllowGitHook)
}
// CanImportLocal returns true if user can migrate repository by local path.
func (u *User) CanImportLocal() bool {
if !setting.ImportLocalPaths {
return false
}
return u.IsAdmin || u.AllowImportLocal
}
// DashboardLink returns the user dashboard page link.
func (u *User) DashboardLink() string {
if u.IsOrganization() {
return setting.AppSubURL + "/org/" + u.Name + "/dashboard/"
}
return setting.AppSubURL + "/"
}
// HomeLink returns the user or organization home page link.
func (u *User) HomeLink() string {
return setting.AppSubURL + "/" + u.Name
}
// HTMLURL returns the user or organization's full link.
func (u *User) HTMLURL() string {
return setting.AppURL + u.Name
}
// GenerateEmailActivateCode generates an activate code based on user information and given e-mail.
func (u *User) GenerateEmailActivateCode(email string) string {
code := base.CreateTimeLimitCode(
fmt.Sprintf("%d%s%s%s%s", u.ID, email, u.LowerName, u.Passwd, u.Rands),
setting.Service.ActiveCodeLives, nil)
// Add tail hex username
code += hex.EncodeToString([]byte(u.LowerName))
return code
}
// GenerateActivateCode generates an activate code based on user information.
func (u *User) GenerateActivateCode() string {
return u.GenerateEmailActivateCode(u.Email)
}
// GetFollowers returns range of user's followers.
func (u *User) GetFollowers(listOptions ListOptions) ([]*User, error) {
sess := x.
Where("follow.follow_id=?", u.ID).
Join("LEFT", "follow", "`user`.id=follow.user_id")
if listOptions.Page != 0 {
sess = listOptions.setSessionPagination(sess)
users := make([]*User, 0, listOptions.PageSize)
return users, sess.Find(&users)
}
users := make([]*User, 0, 8)
return users, sess.Find(&users)
}
// IsFollowing returns true if user is following followID.
func (u *User) IsFollowing(followID int64) bool {
return IsFollowing(u.ID, followID)
}
// GetFollowing returns range of user's following.
func (u *User) GetFollowing(listOptions ListOptions) ([]*User, error) {
sess := x.
Where("follow.user_id=?", u.ID).
Join("LEFT", "follow", "`user`.id=follow.follow_id")
if listOptions.Page != 0 {
sess = listOptions.setSessionPagination(sess)
users := make([]*User, 0, listOptions.PageSize)
return users, sess.Find(&users)
}
users := make([]*User, 0, 8)
return users, sess.Find(&users)
}
// NewGitSig generates and returns the signature of given user.
func (u *User) NewGitSig() *git.Signature {
return &git.Signature{
Name: u.GitName(),
Email: u.GetEmail(),
When: time.Now(),
}
}
func hashPassword(passwd, salt, algo string) string {
var tempPasswd []byte
switch algo {
case algoBcrypt:
tempPasswd, _ = bcrypt.GenerateFromPassword([]byte(passwd), bcrypt.DefaultCost)
return string(tempPasswd)
case algoScrypt:
tempPasswd, _ = scrypt.Key([]byte(passwd), []byte(salt), 65536, 16, 2, 50)
case algoArgon2:
tempPasswd = argon2.IDKey([]byte(passwd), []byte(salt), 2, 65536, 8, 50)
case algoPbkdf2:
fallthrough
default:
tempPasswd = pbkdf2.Key([]byte(passwd), []byte(salt), 10000, 50, sha256.New)
}
return fmt.Sprintf("%x", tempPasswd)
}
// SetPassword hashes a password using the algorithm defined in the config value of PASSWORD_HASH_ALGO
// change passwd, salt and passwd_hash_algo fields
func (u *User) SetPassword(passwd string) (err error) {
if len(passwd) == 0 {
u.Passwd = ""
u.Salt = ""
u.PasswdHashAlgo = ""
return nil
}
if u.Salt, err = GetUserSalt(); err != nil {
return err
}
u.PasswdHashAlgo = setting.PasswordHashAlgo
u.Passwd = hashPassword(passwd, u.Salt, setting.PasswordHashAlgo)
return nil
}
// ValidatePassword checks if given password matches the one belongs to the user.
func (u *User) ValidatePassword(passwd string) bool {
tempHash := hashPassword(passwd, u.Salt, u.PasswdHashAlgo)
if u.PasswdHashAlgo != algoBcrypt && subtle.ConstantTimeCompare([]byte(u.Passwd), []byte(tempHash)) == 1 {
return true
}
if u.PasswdHashAlgo == algoBcrypt && bcrypt.CompareHashAndPassword([]byte(u.Passwd), []byte(passwd)) == nil {
return true
}
return false
}
// IsPasswordSet checks if the password is set or left empty
func (u *User) IsPasswordSet() bool {
return len(u.Passwd) != 0
}
// IsOrganization returns true if user is actually a organization.
func (u *User) IsOrganization() bool {
return u.Type == UserTypeOrganization
}
// IsUserOrgOwner returns true if user is in the owner team of given organization.
func (u *User) IsUserOrgOwner(orgID int64) bool {
isOwner, err := IsOrganizationOwner(orgID, u.ID)
if err != nil {
log.Error("IsOrganizationOwner: %v", err)
return false
}
return isOwner
}
// HasMemberWithUserID returns true if user with userID is part of the u organisation.
func (u *User) HasMemberWithUserID(userID int64) bool {
return u.hasMemberWithUserID(x, userID)
}
func (u *User) hasMemberWithUserID(e Engine, userID int64) bool {
isMember, err := isOrganizationMember(e, u.ID, userID)
if err != nil {
log.Error("IsOrganizationMember: %v", err)
return false
}
return isMember
}
// IsPublicMember returns true if user public his/her membership in given organization.
func (u *User) IsPublicMember(orgID int64) bool {
isMember, err := IsPublicMembership(orgID, u.ID)
if err != nil {
log.Error("IsPublicMembership: %v", err)
return false
}
return isMember
}
func (u *User) getOrganizationCount(e Engine) (int64, error) {
return e.
Where("uid=?", u.ID).
Count(new(OrgUser))
}
// GetOrganizationCount returns count of membership of organization of user.
func (u *User) GetOrganizationCount() (int64, error) {
return u.getOrganizationCount(x)
}
// GetRepositories returns repositories that user owns, including private repositories.
func (u *User) GetRepositories(listOpts ListOptions, names ...string) (err error) {
u.Repos, _, err = GetUserRepositories(&SearchRepoOptions{Actor: u, Private: true, ListOptions: listOpts, LowerNames: names})
return err
}
// GetRepositoryIDs returns repositories IDs where user owned and has unittypes
// Caller shall check that units is not globally disabled
func (u *User) GetRepositoryIDs(units ...UnitType) ([]int64, error) {
var ids []int64
sess := x.Table("repository").Cols("repository.id")
if len(units) > 0 {
sess = sess.Join("INNER", "repo_unit", "repository.id = repo_unit.repo_id")
sess = sess.In("repo_unit.type", units)
}
return ids, sess.Where("owner_id = ?", u.ID).Find(&ids)
}
// GetActiveRepositoryIDs returns non-archived repositories IDs where user owned and has unittypes
// Caller shall check that units is not globally disabled
func (u *User) GetActiveRepositoryIDs(units ...UnitType) ([]int64, error) {
var ids []int64
sess := x.Table("repository").Cols("repository.id")
if len(units) > 0 {
sess = sess.Join("INNER", "repo_unit", "repository.id = repo_unit.repo_id")
sess = sess.In("repo_unit.type", units)
}
sess.Where(builder.Eq{"is_archived": false})
return ids, sess.Where("owner_id = ?", u.ID).GroupBy("repository.id").Find(&ids)
}
// GetOrgRepositoryIDs returns repositories IDs where user's team owned and has unittypes
// Caller shall check that units is not globally disabled
func (u *User) GetOrgRepositoryIDs(units ...UnitType) ([]int64, error) {
var ids []int64
if err := x.Table("repository").
Cols("repository.id").
Join("INNER", "team_user", "repository.owner_id = team_user.org_id").
Join("INNER", "team_repo", "(? != ? and repository.is_private != ?) OR (team_user.team_id = team_repo.team_id AND repository.id = team_repo.repo_id)", true, u.IsRestricted, true).
Where("team_user.uid = ?", u.ID).
GroupBy("repository.id").Find(&ids); err != nil {
return nil, err
}
if len(units) > 0 {
return FilterOutRepoIdsWithoutUnitAccess(u, ids, units...)
}
return ids, nil
}
// GetActiveOrgRepositoryIDs returns non-archived repositories IDs where user's team owned and has unittypes
// Caller shall check that units is not globally disabled
func (u *User) GetActiveOrgRepositoryIDs(units ...UnitType) ([]int64, error) {
var ids []int64
if err := x.Table("repository").
Cols("repository.id").
Join("INNER", "team_user", "repository.owner_id = team_user.org_id").
Join("INNER", "team_repo", "(? != ? and repository.is_private != ?) OR (team_user.team_id = team_repo.team_id AND repository.id = team_repo.repo_id)", true, u.IsRestricted, true).
Where("team_user.uid = ?", u.ID).
Where(builder.Eq{"is_archived": false}).
GroupBy("repository.id").Find(&ids); err != nil {
return nil, err
}
if len(units) > 0 {
return FilterOutRepoIdsWithoutUnitAccess(u, ids, units...)
}
return ids, nil
}
// GetAccessRepoIDs returns all repositories IDs where user's or user is a team member organizations
// Caller shall check that units is not globally disabled
func (u *User) GetAccessRepoIDs(units ...UnitType) ([]int64, error) {
ids, err := u.GetRepositoryIDs(units...)
if err != nil {
return nil, err
}
ids2, err := u.GetOrgRepositoryIDs(units...)
if err != nil {
return nil, err
}
return append(ids, ids2...), nil
}
// GetActiveAccessRepoIDs returns all non-archived repositories IDs where user's or user is a team member organizations
// Caller shall check that units is not globally disabled
func (u *User) GetActiveAccessRepoIDs(units ...UnitType) ([]int64, error) {
ids, err := u.GetActiveRepositoryIDs(units...)
if err != nil {
return nil, err
}
ids2, err := u.GetActiveOrgRepositoryIDs(units...)
if err != nil {
return nil, err
}
return append(ids, ids2...), nil
}
// GetMirrorRepositories returns mirror repositories that user owns, including private repositories.
func (u *User) GetMirrorRepositories() ([]*Repository, error) {
return GetUserMirrorRepositories(u.ID)
}
// GetOwnedOrganizations returns all organizations that user owns.
func (u *User) GetOwnedOrganizations() (err error) {
u.OwnedOrgs, err = GetOwnedOrgsByUserID(u.ID)
return err
}
// GetOrganizations returns paginated organizations that user belongs to.
// TODO: does not respect All and show orgs you privately participate
func (u *User) GetOrganizations(opts *SearchOrganizationsOptions) error {
sess := x.NewSession()
defer sess.Close()
schema, err := x.TableInfo(new(User))
if err != nil {
return err
}
groupByCols := &strings.Builder{}
for _, col := range schema.Columns() {
fmt.Fprintf(groupByCols, "`%s`.%s,", schema.Name, col.Name)
}
groupByStr := groupByCols.String()
groupByStr = groupByStr[0 : len(groupByStr)-1]
sess.Select("`user`.*, count(repo_id) as org_count").
Table("user").
Join("INNER", "org_user", "`org_user`.org_id=`user`.id").
Join("LEFT", builder.
Select("id as repo_id, owner_id as repo_owner_id").
From("repository").
Where(accessibleRepositoryCondition(u)), "`repository`.repo_owner_id = `org_user`.org_id").
And("`org_user`.uid=?", u.ID).
GroupBy(groupByStr)
if opts.PageSize != 0 {
sess = opts.setSessionPagination(sess)
}
type OrgCount struct {
User `xorm:"extends"`
OrgCount int
}
orgCounts := make([]*OrgCount, 0, 10)
if err := sess.
Asc("`user`.name").
Find(&orgCounts); err != nil {
return err
}
orgs := make([]*User, len(orgCounts))
for i, orgCount := range orgCounts {
orgCount.User.NumRepos = orgCount.OrgCount
orgs[i] = &orgCount.User
}
u.Orgs = orgs
return nil
}
// DisplayName returns full name if it's not empty,
// returns username otherwise.
func (u *User) DisplayName() string {
trimmed := strings.TrimSpace(u.FullName)
if len(trimmed) > 0 {
return trimmed
}
return u.Name
}
// GetDisplayName returns full name if it's not empty and DEFAULT_SHOW_FULL_NAME is set,
// returns username otherwise.
func (u *User) GetDisplayName() string {
if setting.UI.DefaultShowFullName {
trimmed := strings.TrimSpace(u.FullName)
if len(trimmed) > 0 {
return trimmed
}
}
return u.Name
}
func gitSafeName(name string) string {
return strings.TrimSpace(strings.NewReplacer("\n", "", "<", "", ">", "").Replace(name))
}
// GitName returns a git safe name
func (u *User) GitName() string {
gitName := gitSafeName(u.FullName)
if len(gitName) > 0 {
return gitName
}
// Although u.Name should be safe if created in our system
// LDAP users may have bad names
gitName = gitSafeName(u.Name)
if len(gitName) > 0 {
return gitName
}
// Totally pathological name so it's got to be:
return fmt.Sprintf("user-%d", u.ID)
}
// ShortName ellipses username to length
func (u *User) ShortName(length int) string {
return base.EllipsisString(u.Name, length)
}
// IsMailable checks if a user is eligible
// to receive emails.
func (u *User) IsMailable() bool {
return u.IsActive
}
// EmailNotifications returns the User's email notification preference
func (u *User) EmailNotifications() string {
return u.EmailNotificationsPreference
}
// SetEmailNotifications sets the user's email notification preference
func (u *User) SetEmailNotifications(set string) error {
u.EmailNotificationsPreference = set
if err := UpdateUserCols(u, "email_notifications_preference"); err != nil {
log.Error("SetEmailNotifications: %v", err)
return err
}
return nil
}
func isUserExist(e Engine, uid int64, name string) (bool, error) {
if len(name) == 0 {
return false, nil
}
return e.
Where("id!=?", uid).
Get(&User{LowerName: strings.ToLower(name)})
}
// IsUserExist checks if given user name exist,
// the user name should be noncased unique.
// If uid is presented, then check will rule out that one,
// it is used when update a user name in settings page.
func IsUserExist(uid int64, name string) (bool, error) {
return isUserExist(x, uid, name)
}
// GetUserSalt returns a random user salt token.
func GetUserSalt() (string, error) {
return generate.GetRandomString(10)
}
// NewGhostUser creates and returns a fake user for someone has deleted his/her account.
func NewGhostUser() *User {
return &User{
ID: -1,
Name: "Ghost",
LowerName: "ghost",
}
}
// NewReplaceUser creates and returns a fake user for external user
func NewReplaceUser(name string) *User {
return &User{
ID: -1,
Name: name,
LowerName: strings.ToLower(name),
}
}
// IsGhost check if user is fake user for a deleted account
func (u *User) IsGhost() bool {
if u == nil {
return false
}
return u.ID == -1 && u.Name == "Ghost"
}
var (
reservedUsernames = append([]string{
".",
"..",
".well-known",
"admin",
"api",
"assets",
"attachments",
"avatars",
"commits",
"debug",
"error",
"explore",
"ghost",
"help",
"install",
"issues",
"less",
"login",
"manifest.json",
"metrics",
"milestones",
"new",
"notifications",
"org",
"plugins",
"pulls",
"raw",
"repo",
"robots.txt",
"search",
"stars",
"template",
"user",
}, public.KnownPublicEntries...)
reservedUserPatterns = []string{"*.keys", "*.gpg"}
)
// isUsableName checks if name is reserved or pattern of name is not allowed
// based on given reserved names and patterns.
// Names are exact match, patterns can be prefix or suffix match with placeholder '*'.
func isUsableName(names, patterns []string, name string) error {
name = strings.TrimSpace(strings.ToLower(name))
if utf8.RuneCountInString(name) == 0 {
return ErrNameEmpty
}
for i := range names {
if name == names[i] {
return ErrNameReserved{name}
}
}
for _, pat := range patterns {
if pat[0] == '*' && strings.HasSuffix(name, pat[1:]) ||
(pat[len(pat)-1] == '*' && strings.HasPrefix(name, pat[:len(pat)-1])) {
return ErrNamePatternNotAllowed{pat}
}
}
return nil
}
// IsUsableUsername returns an error when a username is reserved
func IsUsableUsername(name string) error {
// Validate username make sure it satisfies requirement.
if alphaDashDotPattern.MatchString(name) {
// Note: usually this error is normally caught up earlier in the UI
return ErrNameCharsNotAllowed{Name: name}
}
return isUsableName(reservedUsernames, reservedUserPatterns, name)
}
// CreateUser creates record of a new user.
func CreateUser(u *User) (err error) {
if err = IsUsableUsername(u.Name); err != nil {
return err
}
sess := x.NewSession()
defer sess.Close()
if err = sess.Begin(); err != nil {
return err
}
isExist, err := isUserExist(sess, 0, u.Name)
if err != nil {
return err
} else if isExist {
return ErrUserAlreadyExist{u.Name}
}
if err = deleteUserRedirect(sess, u.Name); err != nil {
return err
}
u.Email = strings.ToLower(u.Email)
isExist, err = sess.
Where("email=?", u.Email).
Get(new(User))
if err != nil {
return err
} else if isExist {
return ErrEmailAlreadyUsed{u.Email}
}
if err = ValidateEmail(u.Email); err != nil {
return err
}
isExist, err = isEmailUsed(sess, u.Email)
if err != nil {
return err
} else if isExist {
return ErrEmailAlreadyUsed{u.Email}
}
u.KeepEmailPrivate = setting.Service.DefaultKeepEmailPrivate
u.LowerName = strings.ToLower(u.Name)
u.AvatarEmail = u.Email
if u.Rands, err = GetUserSalt(); err != nil {
return err
}
if err = u.SetPassword(u.Passwd); err != nil {
return err
}
u.AllowCreateOrganization = setting.Service.DefaultAllowCreateOrganization && !setting.Admin.DisableRegularOrgCreation
u.EmailNotificationsPreference = setting.Admin.DefaultEmailNotification
u.MaxRepoCreation = -1
u.Theme = setting.UI.DefaultTheme
if _, err = sess.Insert(u); err != nil {
return err
}
return sess.Commit()
}
func countUsers(e Engine) int64 {
count, _ := e.
Where("type=0").
Count(new(User))
return count
}
// CountUsers returns number of users.
func CountUsers() int64 {
return countUsers(x)
}
// get user by verify code
func getVerifyUser(code string) (user *User) {
if len(code) <= base.TimeLimitCodeLength {
return nil
}
// use tail hex username query user
hexStr := code[base.TimeLimitCodeLength:]
if b, err := hex.DecodeString(hexStr); err == nil {
if user, err = GetUserByName(string(b)); user != nil {
return user
}
log.Error("user.getVerifyUser: %v", err)
}
return nil
}
// VerifyUserActiveCode verifies active code when active account
func VerifyUserActiveCode(code string) (user *User) {
minutes := setting.Service.ActiveCodeLives
if user = getVerifyUser(code); user != nil {
// time limit code
prefix := code[:base.TimeLimitCodeLength]
data := fmt.Sprintf("%d%s%s%s%s", user.ID, user.Email, user.LowerName, user.Passwd, user.Rands)
if base.VerifyTimeLimitCode(data, minutes, prefix) {
return user
}
}
return nil
}
// VerifyActiveEmailCode verifies active email code when active account
func VerifyActiveEmailCode(code, email string) *EmailAddress {
minutes := setting.Service.ActiveCodeLives
if user := getVerifyUser(code); user != nil {
// time limit code
prefix := code[:base.TimeLimitCodeLength]
data := fmt.Sprintf("%d%s%s%s%s", user.ID, email, user.LowerName, user.Passwd, user.Rands)
if base.VerifyTimeLimitCode(data, minutes, prefix) {
emailAddress := &EmailAddress{UID: user.ID, Email: email}
if has, _ := x.Get(emailAddress); has {
return emailAddress
}
}
}
return nil
}
// ChangeUserName changes all corresponding setting from old user name to new one.
func ChangeUserName(u *User, newUserName string) (err error) {
oldUserName := u.Name
if err = IsUsableUsername(newUserName); err != nil {
return err
}
sess := x.NewSession()
defer sess.Close()
if err = sess.Begin(); err != nil {
return err
}
isExist, err := isUserExist(sess, 0, newUserName)
if err != nil {
return err
} else if isExist {
return ErrUserAlreadyExist{newUserName}
}
if _, err = sess.Exec("UPDATE `repository` SET owner_name=? WHERE owner_name=?", newUserName, oldUserName); err != nil {
return fmt.Errorf("Change repo owner name: %v", err)
}
// Do not fail if directory does not exist
if err = os.Rename(UserPath(oldUserName), UserPath(newUserName)); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("Rename user directory: %v", err)
}
if err = newUserRedirect(sess, u.ID, oldUserName, newUserName); err != nil {
return err
}
if err = sess.Commit(); err != nil {
if err2 := os.Rename(UserPath(newUserName), UserPath(oldUserName)); err2 != nil && !os.IsNotExist(err2) {
log.Critical("Unable to rollback directory change during failed username change from: %s to: %s. DB Error: %v. Filesystem Error: %v", oldUserName, newUserName, err, err2)
return fmt.Errorf("failed to rollback directory change during failed username change from: %s to: %s. DB Error: %w. Filesystem Error: %v", oldUserName, newUserName, err, err2)
}
return err
}
return nil
}
// checkDupEmail checks whether there are the same email with the user
func checkDupEmail(e Engine, u *User) error {
u.Email = strings.ToLower(u.Email)
has, err := e.
Where("id!=?", u.ID).
And("type=?", u.Type).
And("email=?", u.Email).
Get(new(User))
if err != nil {
return err
} else if has {
return ErrEmailAlreadyUsed{u.Email}
}
return nil
}
func updateUser(e Engine, u *User) (err error) {
u.Email = strings.ToLower(u.Email)
if err = ValidateEmail(u.Email); err != nil {
return err
}
_, err = e.ID(u.ID).AllCols().Update(u)
return err
}
// UpdateUser updates user's information.
func UpdateUser(u *User) error {
return updateUser(x, u)
}
// UpdateUserCols update user according special columns
func UpdateUserCols(u *User, cols ...string) error {
return updateUserCols(x, u, cols...)
}
func updateUserCols(e Engine, u *User, cols ...string) error {
_, err := e.ID(u.ID).Cols(cols...).Update(u)
return err
}
// UpdateUserSetting updates user's settings.
func UpdateUserSetting(u *User) (err error) {
sess := x.NewSession()
defer sess.Close()
if err = sess.Begin(); err != nil {
return err
}
if !u.IsOrganization() {
if err = checkDupEmail(sess, u); err != nil {
return err
}
}
if err = updateUser(sess, u); err != nil {
return err
}
return sess.Commit()
}
// deleteBeans deletes all given beans, beans should contain delete conditions.
func deleteBeans(e Engine, beans ...interface{}) (err error) {
for i := range beans {
if _, err = e.Delete(beans[i]); err != nil {
return err
}
}
return nil
}
func deleteUser(e Engine, u *User) error {
// Note: A user owns any repository or belongs to any organization
// cannot perform delete operation.
// Check ownership of repository.
count, err := getRepositoryCount(e, u)
if err != nil {
return fmt.Errorf("GetRepositoryCount: %v", err)
} else if count > 0 {
return ErrUserOwnRepos{UID: u.ID}
}
// Check membership of organization.
count, err = u.getOrganizationCount(e)
if err != nil {
return fmt.Errorf("GetOrganizationCount: %v", err)
} else if count > 0 {
return ErrUserHasOrgs{UID: u.ID}
}
// ***** START: Watch *****
watchedRepoIDs := make([]int64, 0, 10)
if err = e.Table("watch").Cols("watch.repo_id").
Where("watch.user_id = ?", u.ID).And("watch.mode <>?", RepoWatchModeDont).Find(&watchedRepoIDs); err != nil {
return fmt.Errorf("get all watches: %v", err)
}
if _, err = e.Decr("num_watches").In("id", watchedRepoIDs).NoAutoTime().Update(new(Repository)); err != nil {
return fmt.Errorf("decrease repository num_watches: %v", err)
}
// ***** END: Watch *****
// ***** START: Star *****
starredRepoIDs := make([]int64, 0, 10)
if err = e.Table("star").Cols("star.repo_id").
Where("star.uid = ?", u.ID).Find(&starredRepoIDs); err != nil {
return fmt.Errorf("get all stars: %v", err)
} else if _, err = e.Decr("num_stars").In("id", starredRepoIDs).NoAutoTime().Update(new(Repository)); err != nil {
return fmt.Errorf("decrease repository num_stars: %v", err)
}
// ***** END: Star *****
// ***** START: Follow *****
followeeIDs := make([]int64, 0, 10)
if err = e.Table("follow").Cols("follow.follow_id").
Where("follow.user_id = ?", u.ID).Find(&followeeIDs); err != nil {
return fmt.Errorf("get all followees: %v", err)
} else if _, err = e.Decr("num_followers").In("id", followeeIDs).Update(new(User)); err != nil {
return fmt.Errorf("decrease user num_followers: %v", err)
}
followerIDs := make([]int64, 0, 10)
if err = e.Table("follow").Cols("follow.user_id").
Where("follow.follow_id = ?", u.ID).Find(&followerIDs); err != nil {
return fmt.Errorf("get all followers: %v", err)
} else if _, err = e.Decr("num_following").In("id", followerIDs).Update(new(User)); err != nil {
return fmt.Errorf("decrease user num_following: %v", err)
}
// ***** END: Follow *****
if err = deleteBeans(e,
&AccessToken{UID: u.ID},
&Collaboration{UserID: u.ID},
&Access{UserID: u.ID},
&Watch{UserID: u.ID},
&Star{UID: u.ID},
&Follow{UserID: u.ID},
&Follow{FollowID: u.ID},
&Action{UserID: u.ID},
&IssueUser{UID: u.ID},
&EmailAddress{UID: u.ID},
&UserOpenID{UID: u.ID},
&Reaction{UserID: u.ID},
&TeamUser{UID: u.ID},
&Collaboration{UserID: u.ID},
&Stopwatch{UserID: u.ID},
); err != nil {
return fmt.Errorf("deleteBeans: %v", err)
}
if setting.Service.UserDeleteWithCommentsMaxTime != 0 &&
u.CreatedUnix.AsTime().Add(setting.Service.UserDeleteWithCommentsMaxTime).After(time.Now()) {
// Delete Comments
const batchSize = 50
for start := 0; ; start += batchSize {
comments := make([]*Comment, 0, batchSize)
if err = e.Where("type=? AND poster_id=?", CommentTypeComment, u.ID).Limit(batchSize, start).Find(&comments); err != nil {
return err
}
if len(comments) == 0 {
break
}
for _, comment := range comments {
if err = deleteComment(e, comment); err != nil {
return err
}
}
}
// Delete Reactions
if err = deleteReaction(e, &ReactionOptions{Doer: u}); err != nil {
return err
}
}
// ***** START: PublicKey *****
if _, err = e.Delete(&PublicKey{OwnerID: u.ID}); err != nil {
return fmt.Errorf("deletePublicKeys: %v", err)
}
err = rewriteAllPublicKeys(e)
if err != nil {
return err
}
err = rewriteAllPrincipalKeys(e)
if err != nil {
return err
}
// ***** END: PublicKey *****
// ***** START: GPGPublicKey *****
keys, err := listGPGKeys(e, u.ID, ListOptions{})
if err != nil {
return fmt.Errorf("ListGPGKeys: %v", err)
}
// Delete GPGKeyImport(s).
for _, key := range keys {
if _, err = e.Delete(&GPGKeyImport{KeyID: key.KeyID}); err != nil {
return fmt.Errorf("deleteGPGKeyImports: %v", err)
}
}
if _, err = e.Delete(&GPGKey{OwnerID: u.ID}); err != nil {
return fmt.Errorf("deleteGPGKeys: %v", err)
}
// ***** END: GPGPublicKey *****
// Clear assignee.
if err = clearAssigneeByUserID(e, u.ID); err != nil {
return fmt.Errorf("clear assignee: %v", err)
}
// ***** START: ExternalLoginUser *****
if err = removeAllAccountLinks(e, u); err != nil {
return fmt.Errorf("ExternalLoginUser: %v", err)
}
// ***** END: ExternalLoginUser *****
if _, err = e.ID(u.ID).Delete(new(User)); err != nil {
return fmt.Errorf("Delete: %v", err)
}
// Note: There are something just cannot be roll back,
// so just keep error logs of those operations.
path := UserPath(u.Name)
if err = util.RemoveAll(path); err != nil {
err = fmt.Errorf("Failed to RemoveAll %s: %v", path, err)
_ = createNotice(e, NoticeTask, fmt.Sprintf("delete user '%s': %v", u.Name, err))
return err
}
if len(u.Avatar) > 0 {
avatarPath := u.CustomAvatarRelativePath()
if err = storage.Avatars.Delete(avatarPath); err != nil {
err = fmt.Errorf("Failed to remove %s: %v", avatarPath, err)
_ = createNotice(e, NoticeTask, fmt.Sprintf("delete user '%s': %v", u.Name, err))
return err
}
}
return nil
}
// DeleteUser completely and permanently deletes everything of a user,
// but issues/comments/pulls will be kept and shown as someone has been deleted,
// unless the user is younger than USER_DELETE_WITH_COMMENTS_MAX_DAYS.
func DeleteUser(u *User) (err error) {
if u.IsOrganization() {
return fmt.Errorf("%s is an organization not a user", u.Name)
}
sess := x.NewSession()
defer sess.Close()
if err = sess.Begin(); err != nil {
return err
}
if err = deleteUser(sess, u); err != nil {
// Note: don't wrapper error here.
return err
}
return sess.Commit()
}
// DeleteInactiveUsers deletes all inactive users and email addresses.
func DeleteInactiveUsers(ctx context.Context, olderThan time.Duration) (err error) {
users := make([]*User, 0, 10)
if olderThan > 0 {
if err = x.
Where("is_active = ? and created_unix < ?", false, time.Now().Add(-olderThan).Unix()).
Find(&users); err != nil {
return fmt.Errorf("get all inactive users: %v", err)
}
} else {
if err = x.
Where("is_active = ?", false).
Find(&users); err != nil {
return fmt.Errorf("get all inactive users: %v", err)
}
}
// FIXME: should only update authorized_keys file once after all deletions.
for _, u := range users {
select {
case <-ctx.Done():
return ErrCancelledf("Before delete inactive user %s", u.Name)
default:
}
if err = DeleteUser(u); err != nil {
// Ignore users that were set inactive by admin.
if IsErrUserOwnRepos(err) || IsErrUserHasOrgs(err) {
continue
}
return err
}
}
_, err = x.
Where("is_activated = ?", false).
Delete(new(EmailAddress))
return err
}
// UserPath returns the path absolute path of user repositories.
func UserPath(userName string) string {
return filepath.Join(setting.RepoRootPath, strings.ToLower(userName))
}
func getUserByID(e Engine, id int64) (*User, error) {
u := new(User)
has, err := e.ID(id).Get(u)
if err != nil {
return nil, err
} else if !has {
return nil, ErrUserNotExist{id, "", 0}
}
return u, nil
}
// GetUserByID returns the user object by given ID if exists.
func GetUserByID(id int64) (*User, error) {
return getUserByID(x, id)
}
// GetUserByName returns user by given name.
func GetUserByName(name string) (*User, error) {
return getUserByName(x, name)
}
func getUserByName(e Engine, name string) (*User, error) {
if len(name) == 0 {
return nil, ErrUserNotExist{0, name, 0}
}
u := &User{LowerName: strings.ToLower(name)}
has, err := e.Get(u)
if err != nil {
return nil, err
} else if !has {
return nil, ErrUserNotExist{0, name, 0}
}
return u, nil
}
// GetUserEmailsByNames returns a list of e-mails corresponds to names of users
// that have their email notifications set to enabled or onmention.
func GetUserEmailsByNames(names []string) []string {
return getUserEmailsByNames(x, names)
}
func getUserEmailsByNames(e Engine, names []string) []string {
mails := make([]string, 0, len(names))
for _, name := range names {
u, err := getUserByName(e, name)
if err != nil {
continue
}
if u.IsMailable() && u.EmailNotifications() != EmailNotificationsDisabled {
mails = append(mails, u.Email)
}
}
return mails
}
// GetMaileableUsersByIDs gets users from ids, but only if they can receive mails
func GetMaileableUsersByIDs(ids []int64, isMention bool) ([]*User, error) {
if len(ids) == 0 {
return nil, nil
}
ous := make([]*User, 0, len(ids))
if isMention {
return ous, x.In("id", ids).
Where("`type` = ?", UserTypeIndividual).
And("`prohibit_login` = ?", false).
And("`is_active` = ?", true).
And("`email_notifications_preference` IN ( ?, ?)", EmailNotificationsEnabled, EmailNotificationsOnMention).
Find(&ous)
}
return ous, x.In("id", ids).
Where("`type` = ?", UserTypeIndividual).
And("`prohibit_login` = ?", false).
And("`is_active` = ?", true).
And("`email_notifications_preference` = ?", EmailNotificationsEnabled).
Find(&ous)
}
// GetUserNamesByIDs returns usernames for all resolved users from a list of Ids.
func GetUserNamesByIDs(ids []int64) ([]string, error) {
unames := make([]string, 0, len(ids))
err := x.In("id", ids).
Table("user").
Asc("name").
Cols("name").
Find(&unames)
return unames, err
}
// GetUsersByIDs returns all resolved users from a list of Ids.
func GetUsersByIDs(ids []int64) (UserList, error) {
ous := make([]*User, 0, len(ids))
if len(ids) == 0 {
return ous, nil
}
err := x.In("id", ids).
Asc("name").
Find(&ous)
return ous, err
}
// GetUserIDsByNames returns a slice of ids corresponds to names.
func GetUserIDsByNames(names []string, ignoreNonExistent bool) ([]int64, error) {
ids := make([]int64, 0, len(names))
for _, name := range names {
u, err := GetUserByName(name)
if err != nil {
if ignoreNonExistent {
continue
} else {
return nil, err
}
}
ids = append(ids, u.ID)
}
return ids, nil
}
// UserCommit represents a commit with validation of user.
type UserCommit struct {
User *User
*git.Commit
}
// ValidateCommitWithEmail check if author's e-mail of commit is corresponding to a user.
func ValidateCommitWithEmail(c *git.Commit) *User {
if c.Author == nil {
return nil
}
u, err := GetUserByEmail(c.Author.Email)
if err != nil {
return nil
}
return u
}
// ValidateCommitsWithEmails checks if authors' e-mails of commits are corresponding to users.
func ValidateCommitsWithEmails(oldCommits *list.List) *list.List {
var (
u *User
emails = map[string]*User{}
newCommits = list.New()
e = oldCommits.Front()
)
for e != nil {
c := e.Value.(*git.Commit)
if c.Author != nil {
if v, ok := emails[c.Author.Email]; !ok {
u, _ = GetUserByEmail(c.Author.Email)
emails[c.Author.Email] = u
} else {
u = v
}
} else {
u = nil
}
newCommits.PushBack(UserCommit{
User: u,
Commit: c,
})
e = e.Next()
}
return newCommits
}
// GetUserByEmail returns the user object by given e-mail if exists.
func GetUserByEmail(email string) (*User, error) {
return GetUserByEmailContext(DefaultDBContext(), email)
}
// GetUserByEmailContext returns the user object by given e-mail if exists with db context
func GetUserByEmailContext(ctx DBContext, email string) (*User, error) {
if len(email) == 0 {
return nil, ErrUserNotExist{0, email, 0}
}
email = strings.ToLower(email)
// First try to find the user by primary email
user := &User{Email: email}
has, err := ctx.e.Get(user)
if err != nil {
return nil, err
}
if has {
return user, nil
}
// Otherwise, check in alternative list for activated email addresses
emailAddress := &EmailAddress{Email: email, IsActivated: true}
has, err = ctx.e.Get(emailAddress)
if err != nil {
return nil, err
}
if has {
return getUserByID(ctx.e, emailAddress.UID)
}
// Finally, if email address is the protected email address:
if strings.HasSuffix(email, fmt.Sprintf("@%s", setting.Service.NoReplyAddress)) {
username := strings.TrimSuffix(email, fmt.Sprintf("@%s", setting.Service.NoReplyAddress))
user := &User{}
has, err := ctx.e.Where("lower_name=?", username).Get(user)
if err != nil {
return nil, err
}
if has {
return user, nil
}
}
return nil, ErrUserNotExist{0, email, 0}
}
// GetUser checks if a user already exists
func GetUser(user *User) (bool, error) {
return x.Get(user)
}
// SearchUserOptions contains the options for searching
type SearchUserOptions struct {
ListOptions
Keyword string
Type UserType
UID int64
OrderBy SearchOrderBy
Visible []structs.VisibleType
Actor *User // The user doing the search
IsActive util.OptionalBool
SearchByEmail bool // Search by email as well as username/full name
}
func (opts *SearchUserOptions) toConds() builder.Cond {
var cond builder.Cond = builder.Eq{"type": opts.Type}
if len(opts.Keyword) > 0 {
lowerKeyword := strings.ToLower(opts.Keyword)
keywordCond := builder.Or(
builder.Like{"lower_name", lowerKeyword},
builder.Like{"LOWER(full_name)", lowerKeyword},
)
if opts.SearchByEmail {
keywordCond = keywordCond.Or(builder.Like{"LOWER(email)", lowerKeyword})
}
cond = cond.And(keywordCond)
}
if len(opts.Visible) > 0 {
cond = cond.And(builder.In("visibility", opts.Visible))
} else {
cond = cond.And(builder.In("visibility", structs.VisibleTypePublic))
}
if opts.Actor != nil {
var exprCond builder.Cond
if setting.Database.UseMySQL {
exprCond = builder.Expr("org_user.org_id = user.id")
} else if setting.Database.UseMSSQL {
exprCond = builder.Expr("org_user.org_id = [user].id")
} else {
exprCond = builder.Expr("org_user.org_id = \"user\".id")
}
var accessCond = builder.NewCond()
if !opts.Actor.IsRestricted {
accessCond = builder.Or(
builder.In("id", builder.Select("org_id").From("org_user").LeftJoin("`user`", exprCond).Where(builder.And(builder.Eq{"uid": opts.Actor.ID}, builder.Eq{"visibility": structs.VisibleTypePrivate}))),
builder.In("visibility", structs.VisibleTypePublic, structs.VisibleTypeLimited))
} else {
// restricted users only see orgs they are a member of
accessCond = builder.In("id", builder.Select("org_id").From("org_user").LeftJoin("`user`", exprCond).Where(builder.And(builder.Eq{"uid": opts.Actor.ID})))
}
cond = cond.And(accessCond)
}
if opts.UID > 0 {
cond = cond.And(builder.Eq{"id": opts.UID})
}
if !opts.IsActive.IsNone() {
cond = cond.And(builder.Eq{"is_active": opts.IsActive.IsTrue()})
}
return cond
}
// SearchUsers takes options i.e. keyword and part of user name to search,
// it returns results in given range and number of total results.
func SearchUsers(opts *SearchUserOptions) (users []*User, _ int64, _ error) {
cond := opts.toConds()
count, err := x.Where(cond).Count(new(User))
if err != nil {
return nil, 0, fmt.Errorf("Count: %v", err)
}
if len(opts.OrderBy) == 0 {
opts.OrderBy = SearchOrderByAlphabetically
}
sess := x.Where(cond).OrderBy(opts.OrderBy.String())
if opts.Page != 0 {
sess = opts.setSessionPagination(sess)
}
users = make([]*User, 0, opts.PageSize)
return users, count, sess.Find(&users)
}
// GetStarredRepos returns the repos starred by a particular user
func GetStarredRepos(userID int64, private bool, listOptions ListOptions) ([]*Repository, error) {
sess := x.Where("star.uid=?", userID).
Join("LEFT", "star", "`repository`.id=`star`.repo_id")
if !private {
sess = sess.And("is_private=?", false)
}
if listOptions.Page != 0 {
sess = listOptions.setSessionPagination(sess)
repos := make([]*Repository, 0, listOptions.PageSize)
return repos, sess.Find(&repos)
}
repos := make([]*Repository, 0, 10)
return repos, sess.Find(&repos)
}
// GetWatchedRepos returns the repos watched by a particular user
func GetWatchedRepos(userID int64, private bool, listOptions ListOptions) ([]*Repository, error) {
sess := x.Where("watch.user_id=?", userID).
And("`watch`.mode<>?", RepoWatchModeDont).
Join("LEFT", "watch", "`repository`.id=`watch`.repo_id")
if !private {
sess = sess.And("is_private=?", false)
}
if listOptions.Page != 0 {
sess = listOptions.setSessionPagination(sess)
repos := make([]*Repository, 0, listOptions.PageSize)
return repos, sess.Find(&repos)
}
repos := make([]*Repository, 0, 10)
return repos, sess.Find(&repos)
}
// deleteKeysMarkedForDeletion returns true if ssh keys needs update
func deleteKeysMarkedForDeletion(keys []string) (bool, error) {
// Start session
sess := x.NewSession()
defer sess.Close()
if err := sess.Begin(); err != nil {
return false, err
}
// Delete keys marked for deletion
var sshKeysNeedUpdate bool
for _, KeyToDelete := range keys {
key, err := searchPublicKeyByContentWithEngine(sess, KeyToDelete)
if err != nil {
log.Error("SearchPublicKeyByContent: %v", err)
continue
}
if err = deletePublicKeys(sess, key.ID); err != nil {
log.Error("deletePublicKeys: %v", err)
continue
}
sshKeysNeedUpdate = true
}
if err := sess.Commit(); err != nil {
return false, err
}
return sshKeysNeedUpdate, nil
}
// addLdapSSHPublicKeys add a users public keys. Returns true if there are changes.
func addLdapSSHPublicKeys(usr *User, s *LoginSource, sshPublicKeys []string) bool {
var sshKeysNeedUpdate bool
for _, sshKey := range sshPublicKeys {
var err error
found := false
keys := []byte(sshKey)
loop:
for len(keys) > 0 && err == nil {
var out ssh.PublicKey
// We ignore options as they are not relevant to Gitea
out, _, _, keys, err = ssh.ParseAuthorizedKey(keys)
if err != nil {
break loop
}
found = true
marshalled := string(ssh.MarshalAuthorizedKey(out))
marshalled = marshalled[:len(marshalled)-1]
sshKeyName := fmt.Sprintf("%s-%s", s.Name, ssh.FingerprintSHA256(out))
if _, err := AddPublicKey(usr.ID, sshKeyName, marshalled, s.ID); err != nil {
if IsErrKeyAlreadyExist(err) {
log.Trace("addLdapSSHPublicKeys[%s]: LDAP Public SSH Key %s already exists for user", sshKeyName, usr.Name)
} else {
log.Error("addLdapSSHPublicKeys[%s]: Error adding LDAP Public SSH Key for user %s: %v", sshKeyName, usr.Name, err)
}
} else {
log.Trace("addLdapSSHPublicKeys[%s]: Added LDAP Public SSH Key for user %s", sshKeyName, usr.Name)
sshKeysNeedUpdate = true
}
}
if !found && err != nil {
log.Warn("addLdapSSHPublicKeys[%s]: Skipping invalid LDAP Public SSH Key for user %s: %v", s.Name, usr.Name, sshKey)
}
}
return sshKeysNeedUpdate
}
// synchronizeLdapSSHPublicKeys updates a users public keys. Returns true if there are changes.
func synchronizeLdapSSHPublicKeys(usr *User, s *LoginSource, sshPublicKeys []string) bool {
var sshKeysNeedUpdate bool
log.Trace("synchronizeLdapSSHPublicKeys[%s]: Handling LDAP Public SSH Key synchronization for user %s", s.Name, usr.Name)
// Get Public Keys from DB with current LDAP source
var giteaKeys []string
keys, err := ListPublicLdapSSHKeys(usr.ID, s.ID)
if err != nil {
log.Error("synchronizeLdapSSHPublicKeys[%s]: Error listing LDAP Public SSH Keys for user %s: %v", s.Name, usr.Name, err)
}
for _, v := range keys {
giteaKeys = append(giteaKeys, v.OmitEmail())
}
// Get Public Keys from LDAP and skip duplicate keys
var ldapKeys []string
for _, v := range sshPublicKeys {
sshKeySplit := strings.Split(v, " ")
if len(sshKeySplit) > 1 {
ldapKey := strings.Join(sshKeySplit[:2], " ")
if !util.ExistsInSlice(ldapKey, ldapKeys) {
ldapKeys = append(ldapKeys, ldapKey)
}
}
}
// Check if Public Key sync is needed
if util.IsEqualSlice(giteaKeys, ldapKeys) {
log.Trace("synchronizeLdapSSHPublicKeys[%s]: LDAP Public Keys are already in sync for %s (LDAP:%v/DB:%v)", s.Name, usr.Name, len(ldapKeys), len(giteaKeys))
return false
}
log.Trace("synchronizeLdapSSHPublicKeys[%s]: LDAP Public Key needs update for user %s (LDAP:%v/DB:%v)", s.Name, usr.Name, len(ldapKeys), len(giteaKeys))
// Add LDAP Public SSH Keys that doesn't already exist in DB
var newLdapSSHKeys []string
for _, LDAPPublicSSHKey := range ldapKeys {
if !util.ExistsInSlice(LDAPPublicSSHKey, giteaKeys) {
newLdapSSHKeys = append(newLdapSSHKeys, LDAPPublicSSHKey)
}
}
if addLdapSSHPublicKeys(usr, s, newLdapSSHKeys) {
sshKeysNeedUpdate = true
}
// Mark LDAP keys from DB that doesn't exist in LDAP for deletion
var giteaKeysToDelete []string
for _, giteaKey := range giteaKeys {
if !util.ExistsInSlice(giteaKey, ldapKeys) {
log.Trace("synchronizeLdapSSHPublicKeys[%s]: Marking LDAP Public SSH Key for deletion for user %s: %v", s.Name, usr.Name, giteaKey)
giteaKeysToDelete = append(giteaKeysToDelete, giteaKey)
}
}
// Delete LDAP keys from DB that doesn't exist in LDAP
needUpd, err := deleteKeysMarkedForDeletion(giteaKeysToDelete)
if err != nil {
log.Error("synchronizeLdapSSHPublicKeys[%s]: Error deleting LDAP Public SSH Keys marked for deletion for user %s: %v", s.Name, usr.Name, err)
}
if needUpd {
sshKeysNeedUpdate = true
}
return sshKeysNeedUpdate
}
// SyncExternalUsers is used to synchronize users with external authorization source
func SyncExternalUsers(ctx context.Context, updateExisting bool) error {
log.Trace("Doing: SyncExternalUsers")
ls, err := LoginSources()
if err != nil {
log.Error("SyncExternalUsers: %v", err)
return err
}
for _, s := range ls {
if !s.IsActived || !s.IsSyncEnabled {
continue
}
select {
case <-ctx.Done():
log.Warn("SyncExternalUsers: Cancelled before update of %s", s.Name)
return ErrCancelledf("Before update of %s", s.Name)
default:
}
if s.IsLDAP() {
log.Trace("Doing: SyncExternalUsers[%s]", s.Name)
var existingUsers []int64
var isAttributeSSHPublicKeySet = len(strings.TrimSpace(s.LDAP().AttributeSSHPublicKey)) > 0
var sshKeysNeedUpdate bool
// Find all users with this login type
var users []*User
err = x.Where("login_type = ?", LoginLDAP).
And("login_source = ?", s.ID).
Find(&users)
if err != nil {
log.Error("SyncExternalUsers: %v", err)
return err
}
select {
case <-ctx.Done():
log.Warn("SyncExternalUsers: Cancelled before update of %s", s.Name)
return ErrCancelledf("Before update of %s", s.Name)
default:
}
sr, err := s.LDAP().SearchEntries()
if err != nil {
log.Error("SyncExternalUsers LDAP source failure [%s], skipped", s.Name)
continue
}
if len(sr) == 0 {
if !s.LDAP().AllowDeactivateAll {
log.Error("LDAP search found no entries but did not report an error. Refusing to deactivate all users")
continue
} else {
log.Warn("LDAP search found no entries but did not report an error. All users will be deactivated as per settings")
}
}
for _, su := range sr {
select {
case <-ctx.Done():
log.Warn("SyncExternalUsers: Cancelled at update of %s before completed update of users", s.Name)
// Rewrite authorized_keys file if LDAP Public SSH Key attribute is set and any key was added or removed
if sshKeysNeedUpdate {
err = RewriteAllPublicKeys()
if err != nil {
log.Error("RewriteAllPublicKeys: %v", err)
}
}
return ErrCancelledf("During update of %s before completed update of users", s.Name)
default:
}
if len(su.Username) == 0 {
continue
}
if len(su.Mail) == 0 {
su.Mail = fmt.Sprintf("%s@localhost", su.Username)
}
var usr *User
// Search for existing user
for _, du := range users {
if du.LowerName == strings.ToLower(su.Username) {
usr = du
break
}
}
fullName := composeFullName(su.Name, su.Surname, su.Username)
// If no existing user found, create one
if usr == nil {
log.Trace("SyncExternalUsers[%s]: Creating user %s", s.Name, su.Username)
usr = &User{
LowerName: strings.ToLower(su.Username),
Name: su.Username,
FullName: fullName,
LoginType: s.Type,
LoginSource: s.ID,
LoginName: su.Username,
Email: su.Mail,
IsAdmin: su.IsAdmin,
IsRestricted: su.IsRestricted,
IsActive: true,
}
err = CreateUser(usr)
if err != nil {
log.Error("SyncExternalUsers[%s]: Error creating user %s: %v", s.Name, su.Username, err)
} else if isAttributeSSHPublicKeySet {
log.Trace("SyncExternalUsers[%s]: Adding LDAP Public SSH Keys for user %s", s.Name, usr.Name)
if addLdapSSHPublicKeys(usr, s, su.SSHPublicKey) {
sshKeysNeedUpdate = true
}
}
} else if updateExisting {
existingUsers = append(existingUsers, usr.ID)
// Synchronize SSH Public Key if that attribute is set
if isAttributeSSHPublicKeySet && synchronizeLdapSSHPublicKeys(usr, s, su.SSHPublicKey) {
sshKeysNeedUpdate = true
}
// Check if user data has changed
if (len(s.LDAP().AdminFilter) > 0 && usr.IsAdmin != su.IsAdmin) ||
(len(s.LDAP().RestrictedFilter) > 0 && usr.IsRestricted != su.IsRestricted) ||
!strings.EqualFold(usr.Email, su.Mail) ||
usr.FullName != fullName ||
!usr.IsActive {
log.Trace("SyncExternalUsers[%s]: Updating user %s", s.Name, usr.Name)
usr.FullName = fullName
usr.Email = su.Mail
// Change existing admin flag only if AdminFilter option is set
if len(s.LDAP().AdminFilter) > 0 {
usr.IsAdmin = su.IsAdmin
}
// Change existing restricted flag only if RestrictedFilter option is set
if !usr.IsAdmin && len(s.LDAP().RestrictedFilter) > 0 {
usr.IsRestricted = su.IsRestricted
}
usr.IsActive = true
err = UpdateUserCols(usr, "full_name", "email", "is_admin", "is_restricted", "is_active")
if err != nil {
log.Error("SyncExternalUsers[%s]: Error updating user %s: %v", s.Name, usr.Name, err)
}
}
}
}
// Rewrite authorized_keys file if LDAP Public SSH Key attribute is set and any key was added or removed
if sshKeysNeedUpdate {
err = RewriteAllPublicKeys()
if err != nil {
log.Error("RewriteAllPublicKeys: %v", err)
}
}
select {
case <-ctx.Done():
log.Warn("SyncExternalUsers: Cancelled during update of %s before delete users", s.Name)
return ErrCancelledf("During update of %s before delete users", s.Name)
default:
}
// Deactivate users not present in LDAP
if updateExisting {
for _, usr := range users {
found := false
for _, uid := range existingUsers {
if usr.ID == uid {
found = true
break
}
}
if !found {
log.Trace("SyncExternalUsers[%s]: Deactivating user %s", s.Name, usr.Name)
usr.IsActive = false
err = UpdateUserCols(usr, "is_active")
if err != nil {
log.Error("SyncExternalUsers[%s]: Error deactivating user %s: %v", s.Name, usr.Name, err)
}
}
}
}
}
}
return nil
}
// IterateUser iterate users
func IterateUser(f func(user *User) error) error {
var start int
var batchSize = setting.Database.IterateBufferSize
for {
var users = make([]*User, 0, batchSize)
if err := x.Limit(batchSize, start).Find(&users); err != nil {
return err
}
if len(users) == 0 {
return nil
}
start += len(users)
for _, user := range users {
if err := f(user); err != nil {
return err
}
}
}
}
Add "captcha" to list of reserved usernames (#14929)
Signed-off-by: Otto Richter <46f1a0bd5592a2f9244ca321b129902a06b53e03@fralix.ovh>
// Copyright 2014 The Gogs Authors. All rights reserved.
// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (
"container/list"
"context"
"crypto/sha256"
"crypto/subtle"
"encoding/hex"
"errors"
"fmt"
_ "image/jpeg" // Needed for jpeg support
"os"
"path/filepath"
"regexp"
"strings"
"time"
"unicode/utf8"
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/generate"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/public"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/storage"
"code.gitea.io/gitea/modules/structs"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
"golang.org/x/crypto/argon2"
"golang.org/x/crypto/bcrypt"
"golang.org/x/crypto/pbkdf2"
"golang.org/x/crypto/scrypt"
"golang.org/x/crypto/ssh"
"xorm.io/builder"
)
// UserType defines the user type
type UserType int
const (
// UserTypeIndividual defines an individual user
UserTypeIndividual UserType = iota // Historic reason to make it starts at 0.
// UserTypeOrganization defines an organization
UserTypeOrganization
)
const (
algoBcrypt = "bcrypt"
algoScrypt = "scrypt"
algoArgon2 = "argon2"
algoPbkdf2 = "pbkdf2"
)
// AvailableHashAlgorithms represents the available password hashing algorithms
var AvailableHashAlgorithms = []string{
algoPbkdf2,
algoArgon2,
algoScrypt,
algoBcrypt,
}
const (
// EmailNotificationsEnabled indicates that the user would like to receive all email notifications
EmailNotificationsEnabled = "enabled"
// EmailNotificationsOnMention indicates that the user would like to be notified via email when mentioned.
EmailNotificationsOnMention = "onmention"
// EmailNotificationsDisabled indicates that the user would not like to be notified via email.
EmailNotificationsDisabled = "disabled"
)
var (
// ErrUserNotKeyOwner user does not own this key error
ErrUserNotKeyOwner = errors.New("User does not own this public key")
// ErrEmailNotExist e-mail does not exist error
ErrEmailNotExist = errors.New("E-mail does not exist")
// ErrEmailNotActivated e-mail address has not been activated error
ErrEmailNotActivated = errors.New("E-mail address has not been activated")
// ErrUserNameIllegal user name contains illegal characters error
ErrUserNameIllegal = errors.New("User name contains illegal characters")
// ErrLoginSourceNotActived login source is not actived error
ErrLoginSourceNotActived = errors.New("Login source is not actived")
// ErrUnsupportedLoginType login source is unknown error
ErrUnsupportedLoginType = errors.New("Login source is unknown")
// Characters prohibited in a user name (anything except A-Za-z0-9_.-)
alphaDashDotPattern = regexp.MustCompile(`[^\w-\.]`)
)
// User represents the object of individual and member of organization.
type User struct {
ID int64 `xorm:"pk autoincr"`
LowerName string `xorm:"UNIQUE NOT NULL"`
Name string `xorm:"UNIQUE NOT NULL"`
FullName string
// Email is the primary email address (to be used for communication)
Email string `xorm:"NOT NULL"`
KeepEmailPrivate bool
EmailNotificationsPreference string `xorm:"VARCHAR(20) NOT NULL DEFAULT 'enabled'"`
Passwd string `xorm:"NOT NULL"`
PasswdHashAlgo string `xorm:"NOT NULL DEFAULT 'argon2'"`
// MustChangePassword is an attribute that determines if a user
// is to change his/her password after registration.
MustChangePassword bool `xorm:"NOT NULL DEFAULT false"`
LoginType LoginType
LoginSource int64 `xorm:"NOT NULL DEFAULT 0"`
LoginName string
Type UserType
OwnedOrgs []*User `xorm:"-"`
Orgs []*User `xorm:"-"`
Repos []*Repository `xorm:"-"`
Location string
Website string
Rands string `xorm:"VARCHAR(10)"`
Salt string `xorm:"VARCHAR(10)"`
Language string `xorm:"VARCHAR(5)"`
Description string
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
LastLoginUnix timeutil.TimeStamp `xorm:"INDEX"`
// Remember visibility choice for convenience, true for private
LastRepoVisibility bool
// Maximum repository creation limit, -1 means use global default
MaxRepoCreation int `xorm:"NOT NULL DEFAULT -1"`
// Permissions
IsActive bool `xorm:"INDEX"` // Activate primary email
IsAdmin bool
IsRestricted bool `xorm:"NOT NULL DEFAULT false"`
AllowGitHook bool
AllowImportLocal bool // Allow migrate repository by local path
AllowCreateOrganization bool `xorm:"DEFAULT true"`
ProhibitLogin bool `xorm:"NOT NULL DEFAULT false"`
// Avatar
Avatar string `xorm:"VARCHAR(2048) NOT NULL"`
AvatarEmail string `xorm:"NOT NULL"`
UseCustomAvatar bool
// Counters
NumFollowers int
NumFollowing int `xorm:"NOT NULL DEFAULT 0"`
NumStars int
NumRepos int
// For organization
NumTeams int
NumMembers int
Teams []*Team `xorm:"-"`
Members UserList `xorm:"-"`
MembersIsPublic map[int64]bool `xorm:"-"`
Visibility structs.VisibleType `xorm:"NOT NULL DEFAULT 0"`
RepoAdminChangeTeamAccess bool `xorm:"NOT NULL DEFAULT false"`
// Preferences
DiffViewStyle string `xorm:"NOT NULL DEFAULT ''"`
Theme string `xorm:"NOT NULL DEFAULT ''"`
KeepActivityPrivate bool `xorm:"NOT NULL DEFAULT false"`
}
// SearchOrganizationsOptions options to filter organizations
type SearchOrganizationsOptions struct {
ListOptions
All bool
}
// ColorFormat writes a colored string to identify this struct
func (u *User) ColorFormat(s fmt.State) {
log.ColorFprintf(s, "%d:%s",
log.NewColoredIDValue(u.ID),
log.NewColoredValue(u.Name))
}
// BeforeUpdate is invoked from XORM before updating this object.
func (u *User) BeforeUpdate() {
if u.MaxRepoCreation < -1 {
u.MaxRepoCreation = -1
}
// Organization does not need email
u.Email = strings.ToLower(u.Email)
if !u.IsOrganization() {
if len(u.AvatarEmail) == 0 {
u.AvatarEmail = u.Email
}
}
u.LowerName = strings.ToLower(u.Name)
u.Location = base.TruncateString(u.Location, 255)
u.Website = base.TruncateString(u.Website, 255)
u.Description = base.TruncateString(u.Description, 255)
}
// AfterLoad is invoked from XORM after filling all the fields of this object.
func (u *User) AfterLoad() {
if u.Theme == "" {
u.Theme = setting.UI.DefaultTheme
}
}
// SetLastLogin set time to last login
func (u *User) SetLastLogin() {
u.LastLoginUnix = timeutil.TimeStampNow()
}
// UpdateDiffViewStyle updates the users diff view style
func (u *User) UpdateDiffViewStyle(style string) error {
u.DiffViewStyle = style
return UpdateUserCols(u, "diff_view_style")
}
// UpdateTheme updates a users' theme irrespective of the site wide theme
func (u *User) UpdateTheme(themeName string) error {
u.Theme = themeName
return UpdateUserCols(u, "theme")
}
// GetEmail returns an noreply email, if the user has set to keep his
// email address private, otherwise the primary email address.
func (u *User) GetEmail() string {
if u.KeepEmailPrivate {
return fmt.Sprintf("%s@%s", u.LowerName, setting.Service.NoReplyAddress)
}
return u.Email
}
// GetAllUsers returns a slice of all users found in DB.
func GetAllUsers() ([]*User, error) {
users := make([]*User, 0)
return users, x.OrderBy("id").Find(&users)
}
// IsLocal returns true if user login type is LoginPlain.
func (u *User) IsLocal() bool {
return u.LoginType <= LoginPlain
}
// IsOAuth2 returns true if user login type is LoginOAuth2.
func (u *User) IsOAuth2() bool {
return u.LoginType == LoginOAuth2
}
// HasForkedRepo checks if user has already forked a repository with given ID.
func (u *User) HasForkedRepo(repoID int64) bool {
_, has := HasForkedRepo(u.ID, repoID)
return has
}
// MaxCreationLimit returns the number of repositories a user is allowed to create
func (u *User) MaxCreationLimit() int {
if u.MaxRepoCreation <= -1 {
return setting.Repository.MaxCreationLimit
}
return u.MaxRepoCreation
}
// CanCreateRepo returns if user login can create a repository
// NOTE: functions calling this assume a failure due to repository count limit; if new checks are added, those functions should be revised
func (u *User) CanCreateRepo() bool {
if u.IsAdmin {
return true
}
if u.MaxRepoCreation <= -1 {
if setting.Repository.MaxCreationLimit <= -1 {
return true
}
return u.NumRepos < setting.Repository.MaxCreationLimit
}
return u.NumRepos < u.MaxRepoCreation
}
// CanCreateOrganization returns true if user can create organisation.
func (u *User) CanCreateOrganization() bool {
return u.IsAdmin || (u.AllowCreateOrganization && !setting.Admin.DisableRegularOrgCreation)
}
// CanEditGitHook returns true if user can edit Git hooks.
func (u *User) CanEditGitHook() bool {
return !setting.DisableGitHooks && (u.IsAdmin || u.AllowGitHook)
}
// CanImportLocal returns true if user can migrate repository by local path.
func (u *User) CanImportLocal() bool {
if !setting.ImportLocalPaths {
return false
}
return u.IsAdmin || u.AllowImportLocal
}
// DashboardLink returns the user dashboard page link.
func (u *User) DashboardLink() string {
if u.IsOrganization() {
return setting.AppSubURL + "/org/" + u.Name + "/dashboard/"
}
return setting.AppSubURL + "/"
}
// HomeLink returns the user or organization home page link.
func (u *User) HomeLink() string {
return setting.AppSubURL + "/" + u.Name
}
// HTMLURL returns the user or organization's full link.
func (u *User) HTMLURL() string {
return setting.AppURL + u.Name
}
// GenerateEmailActivateCode generates an activate code based on user information and given e-mail.
func (u *User) GenerateEmailActivateCode(email string) string {
code := base.CreateTimeLimitCode(
fmt.Sprintf("%d%s%s%s%s", u.ID, email, u.LowerName, u.Passwd, u.Rands),
setting.Service.ActiveCodeLives, nil)
// Add tail hex username
code += hex.EncodeToString([]byte(u.LowerName))
return code
}
// GenerateActivateCode generates an activate code based on user information.
func (u *User) GenerateActivateCode() string {
return u.GenerateEmailActivateCode(u.Email)
}
// GetFollowers returns range of user's followers.
func (u *User) GetFollowers(listOptions ListOptions) ([]*User, error) {
sess := x.
Where("follow.follow_id=?", u.ID).
Join("LEFT", "follow", "`user`.id=follow.user_id")
if listOptions.Page != 0 {
sess = listOptions.setSessionPagination(sess)
users := make([]*User, 0, listOptions.PageSize)
return users, sess.Find(&users)
}
users := make([]*User, 0, 8)
return users, sess.Find(&users)
}
// IsFollowing returns true if user is following followID.
func (u *User) IsFollowing(followID int64) bool {
return IsFollowing(u.ID, followID)
}
// GetFollowing returns range of user's following.
func (u *User) GetFollowing(listOptions ListOptions) ([]*User, error) {
sess := x.
Where("follow.user_id=?", u.ID).
Join("LEFT", "follow", "`user`.id=follow.follow_id")
if listOptions.Page != 0 {
sess = listOptions.setSessionPagination(sess)
users := make([]*User, 0, listOptions.PageSize)
return users, sess.Find(&users)
}
users := make([]*User, 0, 8)
return users, sess.Find(&users)
}
// NewGitSig generates and returns the signature of given user.
func (u *User) NewGitSig() *git.Signature {
return &git.Signature{
Name: u.GitName(),
Email: u.GetEmail(),
When: time.Now(),
}
}
func hashPassword(passwd, salt, algo string) string {
var tempPasswd []byte
switch algo {
case algoBcrypt:
tempPasswd, _ = bcrypt.GenerateFromPassword([]byte(passwd), bcrypt.DefaultCost)
return string(tempPasswd)
case algoScrypt:
tempPasswd, _ = scrypt.Key([]byte(passwd), []byte(salt), 65536, 16, 2, 50)
case algoArgon2:
tempPasswd = argon2.IDKey([]byte(passwd), []byte(salt), 2, 65536, 8, 50)
case algoPbkdf2:
fallthrough
default:
tempPasswd = pbkdf2.Key([]byte(passwd), []byte(salt), 10000, 50, sha256.New)
}
return fmt.Sprintf("%x", tempPasswd)
}
// SetPassword hashes a password using the algorithm defined in the config value of PASSWORD_HASH_ALGO
// change passwd, salt and passwd_hash_algo fields
func (u *User) SetPassword(passwd string) (err error) {
if len(passwd) == 0 {
u.Passwd = ""
u.Salt = ""
u.PasswdHashAlgo = ""
return nil
}
if u.Salt, err = GetUserSalt(); err != nil {
return err
}
u.PasswdHashAlgo = setting.PasswordHashAlgo
u.Passwd = hashPassword(passwd, u.Salt, setting.PasswordHashAlgo)
return nil
}
// ValidatePassword checks if given password matches the one belongs to the user.
func (u *User) ValidatePassword(passwd string) bool {
tempHash := hashPassword(passwd, u.Salt, u.PasswdHashAlgo)
if u.PasswdHashAlgo != algoBcrypt && subtle.ConstantTimeCompare([]byte(u.Passwd), []byte(tempHash)) == 1 {
return true
}
if u.PasswdHashAlgo == algoBcrypt && bcrypt.CompareHashAndPassword([]byte(u.Passwd), []byte(passwd)) == nil {
return true
}
return false
}
// IsPasswordSet checks if the password is set or left empty
func (u *User) IsPasswordSet() bool {
return len(u.Passwd) != 0
}
// IsOrganization returns true if user is actually a organization.
func (u *User) IsOrganization() bool {
return u.Type == UserTypeOrganization
}
// IsUserOrgOwner returns true if user is in the owner team of given organization.
func (u *User) IsUserOrgOwner(orgID int64) bool {
isOwner, err := IsOrganizationOwner(orgID, u.ID)
if err != nil {
log.Error("IsOrganizationOwner: %v", err)
return false
}
return isOwner
}
// HasMemberWithUserID returns true if user with userID is part of the u organisation.
func (u *User) HasMemberWithUserID(userID int64) bool {
return u.hasMemberWithUserID(x, userID)
}
func (u *User) hasMemberWithUserID(e Engine, userID int64) bool {
isMember, err := isOrganizationMember(e, u.ID, userID)
if err != nil {
log.Error("IsOrganizationMember: %v", err)
return false
}
return isMember
}
// IsPublicMember returns true if user public his/her membership in given organization.
func (u *User) IsPublicMember(orgID int64) bool {
isMember, err := IsPublicMembership(orgID, u.ID)
if err != nil {
log.Error("IsPublicMembership: %v", err)
return false
}
return isMember
}
func (u *User) getOrganizationCount(e Engine) (int64, error) {
return e.
Where("uid=?", u.ID).
Count(new(OrgUser))
}
// GetOrganizationCount returns count of membership of organization of user.
func (u *User) GetOrganizationCount() (int64, error) {
return u.getOrganizationCount(x)
}
// GetRepositories returns repositories that user owns, including private repositories.
func (u *User) GetRepositories(listOpts ListOptions, names ...string) (err error) {
u.Repos, _, err = GetUserRepositories(&SearchRepoOptions{Actor: u, Private: true, ListOptions: listOpts, LowerNames: names})
return err
}
// GetRepositoryIDs returns repositories IDs where user owned and has unittypes
// Caller shall check that units is not globally disabled
func (u *User) GetRepositoryIDs(units ...UnitType) ([]int64, error) {
var ids []int64
sess := x.Table("repository").Cols("repository.id")
if len(units) > 0 {
sess = sess.Join("INNER", "repo_unit", "repository.id = repo_unit.repo_id")
sess = sess.In("repo_unit.type", units)
}
return ids, sess.Where("owner_id = ?", u.ID).Find(&ids)
}
// GetActiveRepositoryIDs returns non-archived repositories IDs where user owned and has unittypes
// Caller shall check that units is not globally disabled
func (u *User) GetActiveRepositoryIDs(units ...UnitType) ([]int64, error) {
var ids []int64
sess := x.Table("repository").Cols("repository.id")
if len(units) > 0 {
sess = sess.Join("INNER", "repo_unit", "repository.id = repo_unit.repo_id")
sess = sess.In("repo_unit.type", units)
}
sess.Where(builder.Eq{"is_archived": false})
return ids, sess.Where("owner_id = ?", u.ID).GroupBy("repository.id").Find(&ids)
}
// GetOrgRepositoryIDs returns repositories IDs where user's team owned and has unittypes
// Caller shall check that units is not globally disabled
func (u *User) GetOrgRepositoryIDs(units ...UnitType) ([]int64, error) {
var ids []int64
if err := x.Table("repository").
Cols("repository.id").
Join("INNER", "team_user", "repository.owner_id = team_user.org_id").
Join("INNER", "team_repo", "(? != ? and repository.is_private != ?) OR (team_user.team_id = team_repo.team_id AND repository.id = team_repo.repo_id)", true, u.IsRestricted, true).
Where("team_user.uid = ?", u.ID).
GroupBy("repository.id").Find(&ids); err != nil {
return nil, err
}
if len(units) > 0 {
return FilterOutRepoIdsWithoutUnitAccess(u, ids, units...)
}
return ids, nil
}
// GetActiveOrgRepositoryIDs returns non-archived repositories IDs where user's team owned and has unittypes
// Caller shall check that units is not globally disabled
func (u *User) GetActiveOrgRepositoryIDs(units ...UnitType) ([]int64, error) {
var ids []int64
if err := x.Table("repository").
Cols("repository.id").
Join("INNER", "team_user", "repository.owner_id = team_user.org_id").
Join("INNER", "team_repo", "(? != ? and repository.is_private != ?) OR (team_user.team_id = team_repo.team_id AND repository.id = team_repo.repo_id)", true, u.IsRestricted, true).
Where("team_user.uid = ?", u.ID).
Where(builder.Eq{"is_archived": false}).
GroupBy("repository.id").Find(&ids); err != nil {
return nil, err
}
if len(units) > 0 {
return FilterOutRepoIdsWithoutUnitAccess(u, ids, units...)
}
return ids, nil
}
// GetAccessRepoIDs returns all repositories IDs where user's or user is a team member organizations
// Caller shall check that units is not globally disabled
func (u *User) GetAccessRepoIDs(units ...UnitType) ([]int64, error) {
ids, err := u.GetRepositoryIDs(units...)
if err != nil {
return nil, err
}
ids2, err := u.GetOrgRepositoryIDs(units...)
if err != nil {
return nil, err
}
return append(ids, ids2...), nil
}
// GetActiveAccessRepoIDs returns all non-archived repositories IDs where user's or user is a team member organizations
// Caller shall check that units is not globally disabled
func (u *User) GetActiveAccessRepoIDs(units ...UnitType) ([]int64, error) {
ids, err := u.GetActiveRepositoryIDs(units...)
if err != nil {
return nil, err
}
ids2, err := u.GetActiveOrgRepositoryIDs(units...)
if err != nil {
return nil, err
}
return append(ids, ids2...), nil
}
// GetMirrorRepositories returns mirror repositories that user owns, including private repositories.
func (u *User) GetMirrorRepositories() ([]*Repository, error) {
return GetUserMirrorRepositories(u.ID)
}
// GetOwnedOrganizations returns all organizations that user owns.
func (u *User) GetOwnedOrganizations() (err error) {
u.OwnedOrgs, err = GetOwnedOrgsByUserID(u.ID)
return err
}
// GetOrganizations returns paginated organizations that user belongs to.
// TODO: does not respect All and show orgs you privately participate
func (u *User) GetOrganizations(opts *SearchOrganizationsOptions) error {
sess := x.NewSession()
defer sess.Close()
schema, err := x.TableInfo(new(User))
if err != nil {
return err
}
groupByCols := &strings.Builder{}
for _, col := range schema.Columns() {
fmt.Fprintf(groupByCols, "`%s`.%s,", schema.Name, col.Name)
}
groupByStr := groupByCols.String()
groupByStr = groupByStr[0 : len(groupByStr)-1]
sess.Select("`user`.*, count(repo_id) as org_count").
Table("user").
Join("INNER", "org_user", "`org_user`.org_id=`user`.id").
Join("LEFT", builder.
Select("id as repo_id, owner_id as repo_owner_id").
From("repository").
Where(accessibleRepositoryCondition(u)), "`repository`.repo_owner_id = `org_user`.org_id").
And("`org_user`.uid=?", u.ID).
GroupBy(groupByStr)
if opts.PageSize != 0 {
sess = opts.setSessionPagination(sess)
}
type OrgCount struct {
User `xorm:"extends"`
OrgCount int
}
orgCounts := make([]*OrgCount, 0, 10)
if err := sess.
Asc("`user`.name").
Find(&orgCounts); err != nil {
return err
}
orgs := make([]*User, len(orgCounts))
for i, orgCount := range orgCounts {
orgCount.User.NumRepos = orgCount.OrgCount
orgs[i] = &orgCount.User
}
u.Orgs = orgs
return nil
}
// DisplayName returns full name if it's not empty,
// returns username otherwise.
func (u *User) DisplayName() string {
trimmed := strings.TrimSpace(u.FullName)
if len(trimmed) > 0 {
return trimmed
}
return u.Name
}
// GetDisplayName returns full name if it's not empty and DEFAULT_SHOW_FULL_NAME is set,
// returns username otherwise.
func (u *User) GetDisplayName() string {
if setting.UI.DefaultShowFullName {
trimmed := strings.TrimSpace(u.FullName)
if len(trimmed) > 0 {
return trimmed
}
}
return u.Name
}
func gitSafeName(name string) string {
return strings.TrimSpace(strings.NewReplacer("\n", "", "<", "", ">", "").Replace(name))
}
// GitName returns a git safe name
func (u *User) GitName() string {
gitName := gitSafeName(u.FullName)
if len(gitName) > 0 {
return gitName
}
// Although u.Name should be safe if created in our system
// LDAP users may have bad names
gitName = gitSafeName(u.Name)
if len(gitName) > 0 {
return gitName
}
// Totally pathological name so it's got to be:
return fmt.Sprintf("user-%d", u.ID)
}
// ShortName ellipses username to length
func (u *User) ShortName(length int) string {
return base.EllipsisString(u.Name, length)
}
// IsMailable checks if a user is eligible
// to receive emails.
func (u *User) IsMailable() bool {
return u.IsActive
}
// EmailNotifications returns the User's email notification preference
func (u *User) EmailNotifications() string {
return u.EmailNotificationsPreference
}
// SetEmailNotifications sets the user's email notification preference
func (u *User) SetEmailNotifications(set string) error {
u.EmailNotificationsPreference = set
if err := UpdateUserCols(u, "email_notifications_preference"); err != nil {
log.Error("SetEmailNotifications: %v", err)
return err
}
return nil
}
func isUserExist(e Engine, uid int64, name string) (bool, error) {
if len(name) == 0 {
return false, nil
}
return e.
Where("id!=?", uid).
Get(&User{LowerName: strings.ToLower(name)})
}
// IsUserExist checks if given user name exist,
// the user name should be noncased unique.
// If uid is presented, then check will rule out that one,
// it is used when update a user name in settings page.
func IsUserExist(uid int64, name string) (bool, error) {
return isUserExist(x, uid, name)
}
// GetUserSalt returns a random user salt token.
func GetUserSalt() (string, error) {
return generate.GetRandomString(10)
}
// NewGhostUser creates and returns a fake user for someone has deleted his/her account.
func NewGhostUser() *User {
return &User{
ID: -1,
Name: "Ghost",
LowerName: "ghost",
}
}
// NewReplaceUser creates and returns a fake user for external user
func NewReplaceUser(name string) *User {
return &User{
ID: -1,
Name: name,
LowerName: strings.ToLower(name),
}
}
// IsGhost check if user is fake user for a deleted account
func (u *User) IsGhost() bool {
if u == nil {
return false
}
return u.ID == -1 && u.Name == "Ghost"
}
var (
reservedUsernames = append([]string{
".",
"..",
".well-known",
"admin",
"api",
"assets",
"attachments",
"avatars",
"captcha",
"commits",
"debug",
"error",
"explore",
"ghost",
"help",
"install",
"issues",
"less",
"login",
"manifest.json",
"metrics",
"milestones",
"new",
"notifications",
"org",
"plugins",
"pulls",
"raw",
"repo",
"robots.txt",
"search",
"stars",
"template",
"user",
}, public.KnownPublicEntries...)
reservedUserPatterns = []string{"*.keys", "*.gpg"}
)
// isUsableName checks if name is reserved or pattern of name is not allowed
// based on given reserved names and patterns.
// Names are exact match, patterns can be prefix or suffix match with placeholder '*'.
func isUsableName(names, patterns []string, name string) error {
name = strings.TrimSpace(strings.ToLower(name))
if utf8.RuneCountInString(name) == 0 {
return ErrNameEmpty
}
for i := range names {
if name == names[i] {
return ErrNameReserved{name}
}
}
for _, pat := range patterns {
if pat[0] == '*' && strings.HasSuffix(name, pat[1:]) ||
(pat[len(pat)-1] == '*' && strings.HasPrefix(name, pat[:len(pat)-1])) {
return ErrNamePatternNotAllowed{pat}
}
}
return nil
}
// IsUsableUsername returns an error when a username is reserved
func IsUsableUsername(name string) error {
// Validate username make sure it satisfies requirement.
if alphaDashDotPattern.MatchString(name) {
// Note: usually this error is normally caught up earlier in the UI
return ErrNameCharsNotAllowed{Name: name}
}
return isUsableName(reservedUsernames, reservedUserPatterns, name)
}
// CreateUser creates record of a new user.
func CreateUser(u *User) (err error) {
if err = IsUsableUsername(u.Name); err != nil {
return err
}
sess := x.NewSession()
defer sess.Close()
if err = sess.Begin(); err != nil {
return err
}
isExist, err := isUserExist(sess, 0, u.Name)
if err != nil {
return err
} else if isExist {
return ErrUserAlreadyExist{u.Name}
}
if err = deleteUserRedirect(sess, u.Name); err != nil {
return err
}
u.Email = strings.ToLower(u.Email)
isExist, err = sess.
Where("email=?", u.Email).
Get(new(User))
if err != nil {
return err
} else if isExist {
return ErrEmailAlreadyUsed{u.Email}
}
if err = ValidateEmail(u.Email); err != nil {
return err
}
isExist, err = isEmailUsed(sess, u.Email)
if err != nil {
return err
} else if isExist {
return ErrEmailAlreadyUsed{u.Email}
}
u.KeepEmailPrivate = setting.Service.DefaultKeepEmailPrivate
u.LowerName = strings.ToLower(u.Name)
u.AvatarEmail = u.Email
if u.Rands, err = GetUserSalt(); err != nil {
return err
}
if err = u.SetPassword(u.Passwd); err != nil {
return err
}
u.AllowCreateOrganization = setting.Service.DefaultAllowCreateOrganization && !setting.Admin.DisableRegularOrgCreation
u.EmailNotificationsPreference = setting.Admin.DefaultEmailNotification
u.MaxRepoCreation = -1
u.Theme = setting.UI.DefaultTheme
if _, err = sess.Insert(u); err != nil {
return err
}
return sess.Commit()
}
func countUsers(e Engine) int64 {
count, _ := e.
Where("type=0").
Count(new(User))
return count
}
// CountUsers returns number of users.
func CountUsers() int64 {
return countUsers(x)
}
// get user by verify code
func getVerifyUser(code string) (user *User) {
if len(code) <= base.TimeLimitCodeLength {
return nil
}
// use tail hex username query user
hexStr := code[base.TimeLimitCodeLength:]
if b, err := hex.DecodeString(hexStr); err == nil {
if user, err = GetUserByName(string(b)); user != nil {
return user
}
log.Error("user.getVerifyUser: %v", err)
}
return nil
}
// VerifyUserActiveCode verifies active code when active account
func VerifyUserActiveCode(code string) (user *User) {
minutes := setting.Service.ActiveCodeLives
if user = getVerifyUser(code); user != nil {
// time limit code
prefix := code[:base.TimeLimitCodeLength]
data := fmt.Sprintf("%d%s%s%s%s", user.ID, user.Email, user.LowerName, user.Passwd, user.Rands)
if base.VerifyTimeLimitCode(data, minutes, prefix) {
return user
}
}
return nil
}
// VerifyActiveEmailCode verifies active email code when active account
func VerifyActiveEmailCode(code, email string) *EmailAddress {
minutes := setting.Service.ActiveCodeLives
if user := getVerifyUser(code); user != nil {
// time limit code
prefix := code[:base.TimeLimitCodeLength]
data := fmt.Sprintf("%d%s%s%s%s", user.ID, email, user.LowerName, user.Passwd, user.Rands)
if base.VerifyTimeLimitCode(data, minutes, prefix) {
emailAddress := &EmailAddress{UID: user.ID, Email: email}
if has, _ := x.Get(emailAddress); has {
return emailAddress
}
}
}
return nil
}
// ChangeUserName changes all corresponding setting from old user name to new one.
func ChangeUserName(u *User, newUserName string) (err error) {
oldUserName := u.Name
if err = IsUsableUsername(newUserName); err != nil {
return err
}
sess := x.NewSession()
defer sess.Close()
if err = sess.Begin(); err != nil {
return err
}
isExist, err := isUserExist(sess, 0, newUserName)
if err != nil {
return err
} else if isExist {
return ErrUserAlreadyExist{newUserName}
}
if _, err = sess.Exec("UPDATE `repository` SET owner_name=? WHERE owner_name=?", newUserName, oldUserName); err != nil {
return fmt.Errorf("Change repo owner name: %v", err)
}
// Do not fail if directory does not exist
if err = os.Rename(UserPath(oldUserName), UserPath(newUserName)); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("Rename user directory: %v", err)
}
if err = newUserRedirect(sess, u.ID, oldUserName, newUserName); err != nil {
return err
}
if err = sess.Commit(); err != nil {
if err2 := os.Rename(UserPath(newUserName), UserPath(oldUserName)); err2 != nil && !os.IsNotExist(err2) {
log.Critical("Unable to rollback directory change during failed username change from: %s to: %s. DB Error: %v. Filesystem Error: %v", oldUserName, newUserName, err, err2)
return fmt.Errorf("failed to rollback directory change during failed username change from: %s to: %s. DB Error: %w. Filesystem Error: %v", oldUserName, newUserName, err, err2)
}
return err
}
return nil
}
// checkDupEmail checks whether there are the same email with the user
func checkDupEmail(e Engine, u *User) error {
u.Email = strings.ToLower(u.Email)
has, err := e.
Where("id!=?", u.ID).
And("type=?", u.Type).
And("email=?", u.Email).
Get(new(User))
if err != nil {
return err
} else if has {
return ErrEmailAlreadyUsed{u.Email}
}
return nil
}
func updateUser(e Engine, u *User) (err error) {
u.Email = strings.ToLower(u.Email)
if err = ValidateEmail(u.Email); err != nil {
return err
}
_, err = e.ID(u.ID).AllCols().Update(u)
return err
}
// UpdateUser updates user's information.
func UpdateUser(u *User) error {
return updateUser(x, u)
}
// UpdateUserCols update user according special columns
func UpdateUserCols(u *User, cols ...string) error {
return updateUserCols(x, u, cols...)
}
func updateUserCols(e Engine, u *User, cols ...string) error {
_, err := e.ID(u.ID).Cols(cols...).Update(u)
return err
}
// UpdateUserSetting updates user's settings.
func UpdateUserSetting(u *User) (err error) {
sess := x.NewSession()
defer sess.Close()
if err = sess.Begin(); err != nil {
return err
}
if !u.IsOrganization() {
if err = checkDupEmail(sess, u); err != nil {
return err
}
}
if err = updateUser(sess, u); err != nil {
return err
}
return sess.Commit()
}
// deleteBeans deletes all given beans, beans should contain delete conditions.
func deleteBeans(e Engine, beans ...interface{}) (err error) {
for i := range beans {
if _, err = e.Delete(beans[i]); err != nil {
return err
}
}
return nil
}
func deleteUser(e Engine, u *User) error {
// Note: A user owns any repository or belongs to any organization
// cannot perform delete operation.
// Check ownership of repository.
count, err := getRepositoryCount(e, u)
if err != nil {
return fmt.Errorf("GetRepositoryCount: %v", err)
} else if count > 0 {
return ErrUserOwnRepos{UID: u.ID}
}
// Check membership of organization.
count, err = u.getOrganizationCount(e)
if err != nil {
return fmt.Errorf("GetOrganizationCount: %v", err)
} else if count > 0 {
return ErrUserHasOrgs{UID: u.ID}
}
// ***** START: Watch *****
watchedRepoIDs := make([]int64, 0, 10)
if err = e.Table("watch").Cols("watch.repo_id").
Where("watch.user_id = ?", u.ID).And("watch.mode <>?", RepoWatchModeDont).Find(&watchedRepoIDs); err != nil {
return fmt.Errorf("get all watches: %v", err)
}
if _, err = e.Decr("num_watches").In("id", watchedRepoIDs).NoAutoTime().Update(new(Repository)); err != nil {
return fmt.Errorf("decrease repository num_watches: %v", err)
}
// ***** END: Watch *****
// ***** START: Star *****
starredRepoIDs := make([]int64, 0, 10)
if err = e.Table("star").Cols("star.repo_id").
Where("star.uid = ?", u.ID).Find(&starredRepoIDs); err != nil {
return fmt.Errorf("get all stars: %v", err)
} else if _, err = e.Decr("num_stars").In("id", starredRepoIDs).NoAutoTime().Update(new(Repository)); err != nil {
return fmt.Errorf("decrease repository num_stars: %v", err)
}
// ***** END: Star *****
// ***** START: Follow *****
followeeIDs := make([]int64, 0, 10)
if err = e.Table("follow").Cols("follow.follow_id").
Where("follow.user_id = ?", u.ID).Find(&followeeIDs); err != nil {
return fmt.Errorf("get all followees: %v", err)
} else if _, err = e.Decr("num_followers").In("id", followeeIDs).Update(new(User)); err != nil {
return fmt.Errorf("decrease user num_followers: %v", err)
}
followerIDs := make([]int64, 0, 10)
if err = e.Table("follow").Cols("follow.user_id").
Where("follow.follow_id = ?", u.ID).Find(&followerIDs); err != nil {
return fmt.Errorf("get all followers: %v", err)
} else if _, err = e.Decr("num_following").In("id", followerIDs).Update(new(User)); err != nil {
return fmt.Errorf("decrease user num_following: %v", err)
}
// ***** END: Follow *****
if err = deleteBeans(e,
&AccessToken{UID: u.ID},
&Collaboration{UserID: u.ID},
&Access{UserID: u.ID},
&Watch{UserID: u.ID},
&Star{UID: u.ID},
&Follow{UserID: u.ID},
&Follow{FollowID: u.ID},
&Action{UserID: u.ID},
&IssueUser{UID: u.ID},
&EmailAddress{UID: u.ID},
&UserOpenID{UID: u.ID},
&Reaction{UserID: u.ID},
&TeamUser{UID: u.ID},
&Collaboration{UserID: u.ID},
&Stopwatch{UserID: u.ID},
); err != nil {
return fmt.Errorf("deleteBeans: %v", err)
}
if setting.Service.UserDeleteWithCommentsMaxTime != 0 &&
u.CreatedUnix.AsTime().Add(setting.Service.UserDeleteWithCommentsMaxTime).After(time.Now()) {
// Delete Comments
const batchSize = 50
for start := 0; ; start += batchSize {
comments := make([]*Comment, 0, batchSize)
if err = e.Where("type=? AND poster_id=?", CommentTypeComment, u.ID).Limit(batchSize, start).Find(&comments); err != nil {
return err
}
if len(comments) == 0 {
break
}
for _, comment := range comments {
if err = deleteComment(e, comment); err != nil {
return err
}
}
}
// Delete Reactions
if err = deleteReaction(e, &ReactionOptions{Doer: u}); err != nil {
return err
}
}
// ***** START: PublicKey *****
if _, err = e.Delete(&PublicKey{OwnerID: u.ID}); err != nil {
return fmt.Errorf("deletePublicKeys: %v", err)
}
err = rewriteAllPublicKeys(e)
if err != nil {
return err
}
err = rewriteAllPrincipalKeys(e)
if err != nil {
return err
}
// ***** END: PublicKey *****
// ***** START: GPGPublicKey *****
keys, err := listGPGKeys(e, u.ID, ListOptions{})
if err != nil {
return fmt.Errorf("ListGPGKeys: %v", err)
}
// Delete GPGKeyImport(s).
for _, key := range keys {
if _, err = e.Delete(&GPGKeyImport{KeyID: key.KeyID}); err != nil {
return fmt.Errorf("deleteGPGKeyImports: %v", err)
}
}
if _, err = e.Delete(&GPGKey{OwnerID: u.ID}); err != nil {
return fmt.Errorf("deleteGPGKeys: %v", err)
}
// ***** END: GPGPublicKey *****
// Clear assignee.
if err = clearAssigneeByUserID(e, u.ID); err != nil {
return fmt.Errorf("clear assignee: %v", err)
}
// ***** START: ExternalLoginUser *****
if err = removeAllAccountLinks(e, u); err != nil {
return fmt.Errorf("ExternalLoginUser: %v", err)
}
// ***** END: ExternalLoginUser *****
if _, err = e.ID(u.ID).Delete(new(User)); err != nil {
return fmt.Errorf("Delete: %v", err)
}
// Note: There are something just cannot be roll back,
// so just keep error logs of those operations.
path := UserPath(u.Name)
if err = util.RemoveAll(path); err != nil {
err = fmt.Errorf("Failed to RemoveAll %s: %v", path, err)
_ = createNotice(e, NoticeTask, fmt.Sprintf("delete user '%s': %v", u.Name, err))
return err
}
if len(u.Avatar) > 0 {
avatarPath := u.CustomAvatarRelativePath()
if err = storage.Avatars.Delete(avatarPath); err != nil {
err = fmt.Errorf("Failed to remove %s: %v", avatarPath, err)
_ = createNotice(e, NoticeTask, fmt.Sprintf("delete user '%s': %v", u.Name, err))
return err
}
}
return nil
}
// DeleteUser completely and permanently deletes everything of a user,
// but issues/comments/pulls will be kept and shown as someone has been deleted,
// unless the user is younger than USER_DELETE_WITH_COMMENTS_MAX_DAYS.
func DeleteUser(u *User) (err error) {
if u.IsOrganization() {
return fmt.Errorf("%s is an organization not a user", u.Name)
}
sess := x.NewSession()
defer sess.Close()
if err = sess.Begin(); err != nil {
return err
}
if err = deleteUser(sess, u); err != nil {
// Note: don't wrapper error here.
return err
}
return sess.Commit()
}
// DeleteInactiveUsers deletes all inactive users and email addresses.
func DeleteInactiveUsers(ctx context.Context, olderThan time.Duration) (err error) {
users := make([]*User, 0, 10)
if olderThan > 0 {
if err = x.
Where("is_active = ? and created_unix < ?", false, time.Now().Add(-olderThan).Unix()).
Find(&users); err != nil {
return fmt.Errorf("get all inactive users: %v", err)
}
} else {
if err = x.
Where("is_active = ?", false).
Find(&users); err != nil {
return fmt.Errorf("get all inactive users: %v", err)
}
}
// FIXME: should only update authorized_keys file once after all deletions.
for _, u := range users {
select {
case <-ctx.Done():
return ErrCancelledf("Before delete inactive user %s", u.Name)
default:
}
if err = DeleteUser(u); err != nil {
// Ignore users that were set inactive by admin.
if IsErrUserOwnRepos(err) || IsErrUserHasOrgs(err) {
continue
}
return err
}
}
_, err = x.
Where("is_activated = ?", false).
Delete(new(EmailAddress))
return err
}
// UserPath returns the path absolute path of user repositories.
func UserPath(userName string) string {
return filepath.Join(setting.RepoRootPath, strings.ToLower(userName))
}
func getUserByID(e Engine, id int64) (*User, error) {
u := new(User)
has, err := e.ID(id).Get(u)
if err != nil {
return nil, err
} else if !has {
return nil, ErrUserNotExist{id, "", 0}
}
return u, nil
}
// GetUserByID returns the user object by given ID if exists.
func GetUserByID(id int64) (*User, error) {
return getUserByID(x, id)
}
// GetUserByName returns user by given name.
func GetUserByName(name string) (*User, error) {
return getUserByName(x, name)
}
func getUserByName(e Engine, name string) (*User, error) {
if len(name) == 0 {
return nil, ErrUserNotExist{0, name, 0}
}
u := &User{LowerName: strings.ToLower(name)}
has, err := e.Get(u)
if err != nil {
return nil, err
} else if !has {
return nil, ErrUserNotExist{0, name, 0}
}
return u, nil
}
// GetUserEmailsByNames returns a list of e-mails corresponds to names of users
// that have their email notifications set to enabled or onmention.
func GetUserEmailsByNames(names []string) []string {
return getUserEmailsByNames(x, names)
}
func getUserEmailsByNames(e Engine, names []string) []string {
mails := make([]string, 0, len(names))
for _, name := range names {
u, err := getUserByName(e, name)
if err != nil {
continue
}
if u.IsMailable() && u.EmailNotifications() != EmailNotificationsDisabled {
mails = append(mails, u.Email)
}
}
return mails
}
// GetMaileableUsersByIDs gets users from ids, but only if they can receive mails
func GetMaileableUsersByIDs(ids []int64, isMention bool) ([]*User, error) {
if len(ids) == 0 {
return nil, nil
}
ous := make([]*User, 0, len(ids))
if isMention {
return ous, x.In("id", ids).
Where("`type` = ?", UserTypeIndividual).
And("`prohibit_login` = ?", false).
And("`is_active` = ?", true).
And("`email_notifications_preference` IN ( ?, ?)", EmailNotificationsEnabled, EmailNotificationsOnMention).
Find(&ous)
}
return ous, x.In("id", ids).
Where("`type` = ?", UserTypeIndividual).
And("`prohibit_login` = ?", false).
And("`is_active` = ?", true).
And("`email_notifications_preference` = ?", EmailNotificationsEnabled).
Find(&ous)
}
// GetUserNamesByIDs returns usernames for all resolved users from a list of Ids.
func GetUserNamesByIDs(ids []int64) ([]string, error) {
unames := make([]string, 0, len(ids))
err := x.In("id", ids).
Table("user").
Asc("name").
Cols("name").
Find(&unames)
return unames, err
}
// GetUsersByIDs returns all resolved users from a list of Ids.
func GetUsersByIDs(ids []int64) (UserList, error) {
ous := make([]*User, 0, len(ids))
if len(ids) == 0 {
return ous, nil
}
err := x.In("id", ids).
Asc("name").
Find(&ous)
return ous, err
}
// GetUserIDsByNames returns a slice of ids corresponds to names.
func GetUserIDsByNames(names []string, ignoreNonExistent bool) ([]int64, error) {
ids := make([]int64, 0, len(names))
for _, name := range names {
u, err := GetUserByName(name)
if err != nil {
if ignoreNonExistent {
continue
} else {
return nil, err
}
}
ids = append(ids, u.ID)
}
return ids, nil
}
// UserCommit represents a commit with validation of user.
type UserCommit struct {
User *User
*git.Commit
}
// ValidateCommitWithEmail check if author's e-mail of commit is corresponding to a user.
func ValidateCommitWithEmail(c *git.Commit) *User {
if c.Author == nil {
return nil
}
u, err := GetUserByEmail(c.Author.Email)
if err != nil {
return nil
}
return u
}
// ValidateCommitsWithEmails checks if authors' e-mails of commits are corresponding to users.
func ValidateCommitsWithEmails(oldCommits *list.List) *list.List {
var (
u *User
emails = map[string]*User{}
newCommits = list.New()
e = oldCommits.Front()
)
for e != nil {
c := e.Value.(*git.Commit)
if c.Author != nil {
if v, ok := emails[c.Author.Email]; !ok {
u, _ = GetUserByEmail(c.Author.Email)
emails[c.Author.Email] = u
} else {
u = v
}
} else {
u = nil
}
newCommits.PushBack(UserCommit{
User: u,
Commit: c,
})
e = e.Next()
}
return newCommits
}
// GetUserByEmail returns the user object by given e-mail if exists.
func GetUserByEmail(email string) (*User, error) {
return GetUserByEmailContext(DefaultDBContext(), email)
}
// GetUserByEmailContext returns the user object by given e-mail if exists with db context
func GetUserByEmailContext(ctx DBContext, email string) (*User, error) {
if len(email) == 0 {
return nil, ErrUserNotExist{0, email, 0}
}
email = strings.ToLower(email)
// First try to find the user by primary email
user := &User{Email: email}
has, err := ctx.e.Get(user)
if err != nil {
return nil, err
}
if has {
return user, nil
}
// Otherwise, check in alternative list for activated email addresses
emailAddress := &EmailAddress{Email: email, IsActivated: true}
has, err = ctx.e.Get(emailAddress)
if err != nil {
return nil, err
}
if has {
return getUserByID(ctx.e, emailAddress.UID)
}
// Finally, if email address is the protected email address:
if strings.HasSuffix(email, fmt.Sprintf("@%s", setting.Service.NoReplyAddress)) {
username := strings.TrimSuffix(email, fmt.Sprintf("@%s", setting.Service.NoReplyAddress))
user := &User{}
has, err := ctx.e.Where("lower_name=?", username).Get(user)
if err != nil {
return nil, err
}
if has {
return user, nil
}
}
return nil, ErrUserNotExist{0, email, 0}
}
// GetUser checks if a user already exists
func GetUser(user *User) (bool, error) {
return x.Get(user)
}
// SearchUserOptions contains the options for searching
type SearchUserOptions struct {
ListOptions
Keyword string
Type UserType
UID int64
OrderBy SearchOrderBy
Visible []structs.VisibleType
Actor *User // The user doing the search
IsActive util.OptionalBool
SearchByEmail bool // Search by email as well as username/full name
}
func (opts *SearchUserOptions) toConds() builder.Cond {
var cond builder.Cond = builder.Eq{"type": opts.Type}
if len(opts.Keyword) > 0 {
lowerKeyword := strings.ToLower(opts.Keyword)
keywordCond := builder.Or(
builder.Like{"lower_name", lowerKeyword},
builder.Like{"LOWER(full_name)", lowerKeyword},
)
if opts.SearchByEmail {
keywordCond = keywordCond.Or(builder.Like{"LOWER(email)", lowerKeyword})
}
cond = cond.And(keywordCond)
}
if len(opts.Visible) > 0 {
cond = cond.And(builder.In("visibility", opts.Visible))
} else {
cond = cond.And(builder.In("visibility", structs.VisibleTypePublic))
}
if opts.Actor != nil {
var exprCond builder.Cond
if setting.Database.UseMySQL {
exprCond = builder.Expr("org_user.org_id = user.id")
} else if setting.Database.UseMSSQL {
exprCond = builder.Expr("org_user.org_id = [user].id")
} else {
exprCond = builder.Expr("org_user.org_id = \"user\".id")
}
var accessCond = builder.NewCond()
if !opts.Actor.IsRestricted {
accessCond = builder.Or(
builder.In("id", builder.Select("org_id").From("org_user").LeftJoin("`user`", exprCond).Where(builder.And(builder.Eq{"uid": opts.Actor.ID}, builder.Eq{"visibility": structs.VisibleTypePrivate}))),
builder.In("visibility", structs.VisibleTypePublic, structs.VisibleTypeLimited))
} else {
// restricted users only see orgs they are a member of
accessCond = builder.In("id", builder.Select("org_id").From("org_user").LeftJoin("`user`", exprCond).Where(builder.And(builder.Eq{"uid": opts.Actor.ID})))
}
cond = cond.And(accessCond)
}
if opts.UID > 0 {
cond = cond.And(builder.Eq{"id": opts.UID})
}
if !opts.IsActive.IsNone() {
cond = cond.And(builder.Eq{"is_active": opts.IsActive.IsTrue()})
}
return cond
}
// SearchUsers takes options i.e. keyword and part of user name to search,
// it returns results in given range and number of total results.
func SearchUsers(opts *SearchUserOptions) (users []*User, _ int64, _ error) {
cond := opts.toConds()
count, err := x.Where(cond).Count(new(User))
if err != nil {
return nil, 0, fmt.Errorf("Count: %v", err)
}
if len(opts.OrderBy) == 0 {
opts.OrderBy = SearchOrderByAlphabetically
}
sess := x.Where(cond).OrderBy(opts.OrderBy.String())
if opts.Page != 0 {
sess = opts.setSessionPagination(sess)
}
users = make([]*User, 0, opts.PageSize)
return users, count, sess.Find(&users)
}
// GetStarredRepos returns the repos starred by a particular user
func GetStarredRepos(userID int64, private bool, listOptions ListOptions) ([]*Repository, error) {
sess := x.Where("star.uid=?", userID).
Join("LEFT", "star", "`repository`.id=`star`.repo_id")
if !private {
sess = sess.And("is_private=?", false)
}
if listOptions.Page != 0 {
sess = listOptions.setSessionPagination(sess)
repos := make([]*Repository, 0, listOptions.PageSize)
return repos, sess.Find(&repos)
}
repos := make([]*Repository, 0, 10)
return repos, sess.Find(&repos)
}
// GetWatchedRepos returns the repos watched by a particular user
func GetWatchedRepos(userID int64, private bool, listOptions ListOptions) ([]*Repository, error) {
sess := x.Where("watch.user_id=?", userID).
And("`watch`.mode<>?", RepoWatchModeDont).
Join("LEFT", "watch", "`repository`.id=`watch`.repo_id")
if !private {
sess = sess.And("is_private=?", false)
}
if listOptions.Page != 0 {
sess = listOptions.setSessionPagination(sess)
repos := make([]*Repository, 0, listOptions.PageSize)
return repos, sess.Find(&repos)
}
repos := make([]*Repository, 0, 10)
return repos, sess.Find(&repos)
}
// deleteKeysMarkedForDeletion returns true if ssh keys needs update
func deleteKeysMarkedForDeletion(keys []string) (bool, error) {
// Start session
sess := x.NewSession()
defer sess.Close()
if err := sess.Begin(); err != nil {
return false, err
}
// Delete keys marked for deletion
var sshKeysNeedUpdate bool
for _, KeyToDelete := range keys {
key, err := searchPublicKeyByContentWithEngine(sess, KeyToDelete)
if err != nil {
log.Error("SearchPublicKeyByContent: %v", err)
continue
}
if err = deletePublicKeys(sess, key.ID); err != nil {
log.Error("deletePublicKeys: %v", err)
continue
}
sshKeysNeedUpdate = true
}
if err := sess.Commit(); err != nil {
return false, err
}
return sshKeysNeedUpdate, nil
}
// addLdapSSHPublicKeys add a users public keys. Returns true if there are changes.
func addLdapSSHPublicKeys(usr *User, s *LoginSource, sshPublicKeys []string) bool {
var sshKeysNeedUpdate bool
for _, sshKey := range sshPublicKeys {
var err error
found := false
keys := []byte(sshKey)
loop:
for len(keys) > 0 && err == nil {
var out ssh.PublicKey
// We ignore options as they are not relevant to Gitea
out, _, _, keys, err = ssh.ParseAuthorizedKey(keys)
if err != nil {
break loop
}
found = true
marshalled := string(ssh.MarshalAuthorizedKey(out))
marshalled = marshalled[:len(marshalled)-1]
sshKeyName := fmt.Sprintf("%s-%s", s.Name, ssh.FingerprintSHA256(out))
if _, err := AddPublicKey(usr.ID, sshKeyName, marshalled, s.ID); err != nil {
if IsErrKeyAlreadyExist(err) {
log.Trace("addLdapSSHPublicKeys[%s]: LDAP Public SSH Key %s already exists for user", sshKeyName, usr.Name)
} else {
log.Error("addLdapSSHPublicKeys[%s]: Error adding LDAP Public SSH Key for user %s: %v", sshKeyName, usr.Name, err)
}
} else {
log.Trace("addLdapSSHPublicKeys[%s]: Added LDAP Public SSH Key for user %s", sshKeyName, usr.Name)
sshKeysNeedUpdate = true
}
}
if !found && err != nil {
log.Warn("addLdapSSHPublicKeys[%s]: Skipping invalid LDAP Public SSH Key for user %s: %v", s.Name, usr.Name, sshKey)
}
}
return sshKeysNeedUpdate
}
// synchronizeLdapSSHPublicKeys updates a users public keys. Returns true if there are changes.
func synchronizeLdapSSHPublicKeys(usr *User, s *LoginSource, sshPublicKeys []string) bool {
var sshKeysNeedUpdate bool
log.Trace("synchronizeLdapSSHPublicKeys[%s]: Handling LDAP Public SSH Key synchronization for user %s", s.Name, usr.Name)
// Get Public Keys from DB with current LDAP source
var giteaKeys []string
keys, err := ListPublicLdapSSHKeys(usr.ID, s.ID)
if err != nil {
log.Error("synchronizeLdapSSHPublicKeys[%s]: Error listing LDAP Public SSH Keys for user %s: %v", s.Name, usr.Name, err)
}
for _, v := range keys {
giteaKeys = append(giteaKeys, v.OmitEmail())
}
// Get Public Keys from LDAP and skip duplicate keys
var ldapKeys []string
for _, v := range sshPublicKeys {
sshKeySplit := strings.Split(v, " ")
if len(sshKeySplit) > 1 {
ldapKey := strings.Join(sshKeySplit[:2], " ")
if !util.ExistsInSlice(ldapKey, ldapKeys) {
ldapKeys = append(ldapKeys, ldapKey)
}
}
}
// Check if Public Key sync is needed
if util.IsEqualSlice(giteaKeys, ldapKeys) {
log.Trace("synchronizeLdapSSHPublicKeys[%s]: LDAP Public Keys are already in sync for %s (LDAP:%v/DB:%v)", s.Name, usr.Name, len(ldapKeys), len(giteaKeys))
return false
}
log.Trace("synchronizeLdapSSHPublicKeys[%s]: LDAP Public Key needs update for user %s (LDAP:%v/DB:%v)", s.Name, usr.Name, len(ldapKeys), len(giteaKeys))
// Add LDAP Public SSH Keys that doesn't already exist in DB
var newLdapSSHKeys []string
for _, LDAPPublicSSHKey := range ldapKeys {
if !util.ExistsInSlice(LDAPPublicSSHKey, giteaKeys) {
newLdapSSHKeys = append(newLdapSSHKeys, LDAPPublicSSHKey)
}
}
if addLdapSSHPublicKeys(usr, s, newLdapSSHKeys) {
sshKeysNeedUpdate = true
}
// Mark LDAP keys from DB that doesn't exist in LDAP for deletion
var giteaKeysToDelete []string
for _, giteaKey := range giteaKeys {
if !util.ExistsInSlice(giteaKey, ldapKeys) {
log.Trace("synchronizeLdapSSHPublicKeys[%s]: Marking LDAP Public SSH Key for deletion for user %s: %v", s.Name, usr.Name, giteaKey)
giteaKeysToDelete = append(giteaKeysToDelete, giteaKey)
}
}
// Delete LDAP keys from DB that doesn't exist in LDAP
needUpd, err := deleteKeysMarkedForDeletion(giteaKeysToDelete)
if err != nil {
log.Error("synchronizeLdapSSHPublicKeys[%s]: Error deleting LDAP Public SSH Keys marked for deletion for user %s: %v", s.Name, usr.Name, err)
}
if needUpd {
sshKeysNeedUpdate = true
}
return sshKeysNeedUpdate
}
// SyncExternalUsers is used to synchronize users with external authorization source
func SyncExternalUsers(ctx context.Context, updateExisting bool) error {
log.Trace("Doing: SyncExternalUsers")
ls, err := LoginSources()
if err != nil {
log.Error("SyncExternalUsers: %v", err)
return err
}
for _, s := range ls {
if !s.IsActived || !s.IsSyncEnabled {
continue
}
select {
case <-ctx.Done():
log.Warn("SyncExternalUsers: Cancelled before update of %s", s.Name)
return ErrCancelledf("Before update of %s", s.Name)
default:
}
if s.IsLDAP() {
log.Trace("Doing: SyncExternalUsers[%s]", s.Name)
var existingUsers []int64
var isAttributeSSHPublicKeySet = len(strings.TrimSpace(s.LDAP().AttributeSSHPublicKey)) > 0
var sshKeysNeedUpdate bool
// Find all users with this login type
var users []*User
err = x.Where("login_type = ?", LoginLDAP).
And("login_source = ?", s.ID).
Find(&users)
if err != nil {
log.Error("SyncExternalUsers: %v", err)
return err
}
select {
case <-ctx.Done():
log.Warn("SyncExternalUsers: Cancelled before update of %s", s.Name)
return ErrCancelledf("Before update of %s", s.Name)
default:
}
sr, err := s.LDAP().SearchEntries()
if err != nil {
log.Error("SyncExternalUsers LDAP source failure [%s], skipped", s.Name)
continue
}
if len(sr) == 0 {
if !s.LDAP().AllowDeactivateAll {
log.Error("LDAP search found no entries but did not report an error. Refusing to deactivate all users")
continue
} else {
log.Warn("LDAP search found no entries but did not report an error. All users will be deactivated as per settings")
}
}
for _, su := range sr {
select {
case <-ctx.Done():
log.Warn("SyncExternalUsers: Cancelled at update of %s before completed update of users", s.Name)
// Rewrite authorized_keys file if LDAP Public SSH Key attribute is set and any key was added or removed
if sshKeysNeedUpdate {
err = RewriteAllPublicKeys()
if err != nil {
log.Error("RewriteAllPublicKeys: %v", err)
}
}
return ErrCancelledf("During update of %s before completed update of users", s.Name)
default:
}
if len(su.Username) == 0 {
continue
}
if len(su.Mail) == 0 {
su.Mail = fmt.Sprintf("%s@localhost", su.Username)
}
var usr *User
// Search for existing user
for _, du := range users {
if du.LowerName == strings.ToLower(su.Username) {
usr = du
break
}
}
fullName := composeFullName(su.Name, su.Surname, su.Username)
// If no existing user found, create one
if usr == nil {
log.Trace("SyncExternalUsers[%s]: Creating user %s", s.Name, su.Username)
usr = &User{
LowerName: strings.ToLower(su.Username),
Name: su.Username,
FullName: fullName,
LoginType: s.Type,
LoginSource: s.ID,
LoginName: su.Username,
Email: su.Mail,
IsAdmin: su.IsAdmin,
IsRestricted: su.IsRestricted,
IsActive: true,
}
err = CreateUser(usr)
if err != nil {
log.Error("SyncExternalUsers[%s]: Error creating user %s: %v", s.Name, su.Username, err)
} else if isAttributeSSHPublicKeySet {
log.Trace("SyncExternalUsers[%s]: Adding LDAP Public SSH Keys for user %s", s.Name, usr.Name)
if addLdapSSHPublicKeys(usr, s, su.SSHPublicKey) {
sshKeysNeedUpdate = true
}
}
} else if updateExisting {
existingUsers = append(existingUsers, usr.ID)
// Synchronize SSH Public Key if that attribute is set
if isAttributeSSHPublicKeySet && synchronizeLdapSSHPublicKeys(usr, s, su.SSHPublicKey) {
sshKeysNeedUpdate = true
}
// Check if user data has changed
if (len(s.LDAP().AdminFilter) > 0 && usr.IsAdmin != su.IsAdmin) ||
(len(s.LDAP().RestrictedFilter) > 0 && usr.IsRestricted != su.IsRestricted) ||
!strings.EqualFold(usr.Email, su.Mail) ||
usr.FullName != fullName ||
!usr.IsActive {
log.Trace("SyncExternalUsers[%s]: Updating user %s", s.Name, usr.Name)
usr.FullName = fullName
usr.Email = su.Mail
// Change existing admin flag only if AdminFilter option is set
if len(s.LDAP().AdminFilter) > 0 {
usr.IsAdmin = su.IsAdmin
}
// Change existing restricted flag only if RestrictedFilter option is set
if !usr.IsAdmin && len(s.LDAP().RestrictedFilter) > 0 {
usr.IsRestricted = su.IsRestricted
}
usr.IsActive = true
err = UpdateUserCols(usr, "full_name", "email", "is_admin", "is_restricted", "is_active")
if err != nil {
log.Error("SyncExternalUsers[%s]: Error updating user %s: %v", s.Name, usr.Name, err)
}
}
}
}
// Rewrite authorized_keys file if LDAP Public SSH Key attribute is set and any key was added or removed
if sshKeysNeedUpdate {
err = RewriteAllPublicKeys()
if err != nil {
log.Error("RewriteAllPublicKeys: %v", err)
}
}
select {
case <-ctx.Done():
log.Warn("SyncExternalUsers: Cancelled during update of %s before delete users", s.Name)
return ErrCancelledf("During update of %s before delete users", s.Name)
default:
}
// Deactivate users not present in LDAP
if updateExisting {
for _, usr := range users {
found := false
for _, uid := range existingUsers {
if usr.ID == uid {
found = true
break
}
}
if !found {
log.Trace("SyncExternalUsers[%s]: Deactivating user %s", s.Name, usr.Name)
usr.IsActive = false
err = UpdateUserCols(usr, "is_active")
if err != nil {
log.Error("SyncExternalUsers[%s]: Error deactivating user %s: %v", s.Name, usr.Name, err)
}
}
}
}
}
}
return nil
}
// IterateUser iterate users
func IterateUser(f func(user *User) error) error {
var start int
var batchSize = setting.Database.IterateBufferSize
for {
var users = make([]*User, 0, batchSize)
if err := x.Limit(batchSize, start).Find(&users); err != nil {
return err
}
if len(users) == 0 {
return nil
}
start += len(users)
for _, user := range users {
if err := f(user); err != nil {
return err
}
}
}
}
|
package buf
import "github.com/bufbuild/buf/internal/pkg/cli/clicobra"
const version = "0.9.0-dev"
// Main is the main.
func Main(use string, options ...RootCommandOption) {
clicobra.Main(newRootCommand(use, options...), version)
}
// NewRootCommand creates a new root Command.
func NewRootCommand(use string, options ...RootCommandOption) *clicobra.Command {
return newRootCommand(use, options...)
}
// RootCommandOption is an option for a root Command.
type RootCommandOption func(*clicobra.Command, *Flags)
Update to v0.9.0
package buf
import "github.com/bufbuild/buf/internal/pkg/cli/clicobra"
const version = "0.9.0"
// Main is the main.
func Main(use string, options ...RootCommandOption) {
clicobra.Main(newRootCommand(use, options...), version)
}
// NewRootCommand creates a new root Command.
func NewRootCommand(use string, options ...RootCommandOption) *clicobra.Command {
return newRootCommand(use, options...)
}
// RootCommandOption is an option for a root Command.
type RootCommandOption func(*clicobra.Command, *Flags)
|
package console
import (
"fmt"
"sort"
"strings"
"sync"
"time"
"github.com/cenkalti/rain/internal/jsonutil"
"github.com/cenkalti/rain/internal/rpctypes"
"github.com/cenkalti/rain/rainrpc"
"github.com/jroimartin/gocui"
)
const (
// pages
torrents int = iota
help
)
const (
// tabs
general int = iota
stats
trackers
peers
webseeds
)
type Console struct {
client *rainrpc.Client
torrents []rpctypes.Torrent
errTorrents error
selectedID string
selectedTab int
selectedPage int
tabAdjust int
stats rpctypes.Stats
trackers []rpctypes.Tracker
peers []rpctypes.Peer
webseeds []rpctypes.Webseed
errDetails error
updatingDetails bool
m sync.Mutex
updateTorrentsC chan struct{}
updateDetailsC chan struct{}
stopUpdatingC chan struct{}
updating bool
}
func New(clt *rainrpc.Client) *Console {
return &Console{
client: clt,
updateTorrentsC: make(chan struct{}, 1),
updateDetailsC: make(chan struct{}, 1),
}
}
func (c *Console) Run() error {
g, err := gocui.NewGui(gocui.OutputNormal)
if err != nil {
return err
}
defer g.Close()
g.SetManagerFunc(c.layout)
// Global keys
_ = g.SetKeybinding("", gocui.KeyCtrlC, gocui.ModNone, c.quit)
_ = g.SetKeybinding("", 'q', gocui.ModNone, c.quit)
_ = g.SetKeybinding("", '?', gocui.ModNone, c.switchHelp)
// Navigation
_ = g.SetKeybinding("torrents", 'j', gocui.ModNone, c.cursorDown)
_ = g.SetKeybinding("torrents", 'k', gocui.ModNone, c.cursorUp)
_ = g.SetKeybinding("torrents", 'j', gocui.ModAlt, c.tabAdjustDown)
_ = g.SetKeybinding("torrents", 'k', gocui.ModAlt, c.tabAdjustUp)
_ = g.SetKeybinding("torrents", 'g', gocui.ModNone, c.goTop)
_ = g.SetKeybinding("torrents", 'G', gocui.ModNone, c.goBottom)
// Tabs
_ = g.SetKeybinding("torrents", 'g', gocui.ModAlt, c.switchGeneral)
_ = g.SetKeybinding("torrents", 's', gocui.ModAlt, c.switchStats)
_ = g.SetKeybinding("torrents", 't', gocui.ModAlt, c.switchTrackers)
_ = g.SetKeybinding("torrents", 'p', gocui.ModAlt, c.switchPeers)
_ = g.SetKeybinding("torrents", 'w', gocui.ModAlt, c.switchWebseeds)
// Torrent control
_ = g.SetKeybinding("torrents", gocui.KeyCtrlS, gocui.ModNone, c.startTorrent)
_ = g.SetKeybinding("torrents", gocui.KeyCtrlS, gocui.ModAlt, c.stopTorrent)
_ = g.SetKeybinding("torrents", gocui.KeyCtrlR, gocui.ModNone, c.removeTorrent)
_ = g.SetKeybinding("torrents", gocui.KeyCtrlA, gocui.ModNone, c.announce)
_ = g.SetKeybinding("torrents", gocui.KeyCtrlV, gocui.ModNone, c.verify)
err = g.MainLoop()
if err == gocui.ErrQuit {
err = nil
}
return err
}
func (c *Console) startUpdating(g *gocui.Gui) {
if c.updating {
return
}
c.updating = true
c.stopUpdatingC = make(chan struct{})
go c.updateLoop(g, c.stopUpdatingC)
}
func (c *Console) stopUpdating() {
if !c.updating {
return
}
c.updating = false
close(c.stopUpdatingC)
}
func (c *Console) layout(g *gocui.Gui) error {
err := c.drawTitle(g)
if err != nil {
return err
}
if c.selectedPage == torrents {
c.startUpdating(g)
} else {
c.stopUpdating()
}
switch c.selectedPage {
case torrents:
err = c.drawTorrents(g)
if err != nil {
return err
}
err = c.drawDetails(g)
if err != nil {
return err
}
_, err = g.SetCurrentView("torrents")
case help:
err = c.drawHelp(g)
if err != nil {
return err
}
_, err = g.SetCurrentView("help")
}
return err
}
func (c *Console) drawTitle(g *gocui.Gui) error {
maxX, maxY := g.Size()
v, err := g.SetView("title", -1, 0, maxX, maxY)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Title = "Rain [" + c.client.Addr() + "] (Press '?' for help)"
}
return nil
}
func (c *Console) drawHelp(g *gocui.Gui) error {
maxX, maxY := g.Size()
v, err := g.SetView("help", 5, 2, maxX-6, maxY-3)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Frame = true
v.Title = "Help"
} else {
v.Clear()
}
fmt.Fprintln(v, " q Quit")
fmt.Fprintln(v, " j move down")
fmt.Fprintln(v, " k move up")
fmt.Fprintln(v, " alt+j move tab separator down")
fmt.Fprintln(v, " alt+k move tab separator up")
fmt.Fprintln(v, " g go to top")
fmt.Fprintln(v, " G go to bottom")
fmt.Fprintln(v, "")
fmt.Fprintln(v, " alt+G switch to General info page")
fmt.Fprintln(v, " alt+S switch to Stats page")
fmt.Fprintln(v, " alt+T switch to Trackers page")
fmt.Fprintln(v, " alt+P switch to Peers page")
fmt.Fprintln(v, " alt+W switch to Webseeds page")
fmt.Fprintln(v, "")
fmt.Fprintln(v, " ctrl+s Start torrent")
fmt.Fprintln(v, "ctrl+alt+s Stop torrent")
fmt.Fprintln(v, " ctrl+R Remove torrent")
fmt.Fprintln(v, " ctrl+a Announce torrent")
fmt.Fprintln(v, " ctrl+v Verify torrent")
return nil
}
func (c *Console) drawTorrents(g *gocui.Gui) error {
c.m.Lock()
defer c.m.Unlock()
maxX, maxY := g.Size()
halfY := maxY / 2
split := halfY + c.tabAdjust
if v, err := g.SetView("torrents", -1, 0, maxX, split); err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Frame = false
v.Highlight = true
v.SelBgColor = gocui.ColorGreen
v.SelFgColor = gocui.ColorBlack
v.Title = "Rain"
fmt.Fprintln(v, "loading torrents...")
} else {
v.Clear()
if c.errTorrents != nil {
fmt.Fprintln(v, "error:", c.errTorrents)
return nil
}
selectedIDrow := -1
for i, t := range c.torrents {
fmt.Fprintf(v, "%3d %s %s\n", i+1, t.ID, t.Name)
if t.ID == c.selectedID {
selectedIDrow = i
}
}
_, cy := v.Cursor()
_, oy := v.Origin()
selectedRow := cy + oy
if selectedRow < len(c.torrents) {
if c.torrents[selectedRow].ID != c.selectedID && selectedIDrow != -1 {
_ = v.SetCursor(0, selectedIDrow)
} else {
c.setSelectedID(c.torrents[selectedRow].ID)
}
}
}
return nil
}
func (c *Console) drawDetails(g *gocui.Gui) error {
c.m.Lock()
defer c.m.Unlock()
maxX, maxY := g.Size()
halfY := maxY / 2
split := halfY + c.tabAdjust
if v, err := g.SetView("details", -1, split, maxX, maxY); err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Wrap = true
fmt.Fprintln(v, "loading details...")
} else {
v.Clear()
switch c.selectedTab {
case general:
v.Title = "General Info"
case stats:
v.Title = "Stats"
case trackers:
v.Title = "Trackers"
case peers:
v.Title = "Peers"
case webseeds:
v.Title = "WebSeeds"
}
if c.selectedID == "" {
return nil
}
if c.updatingDetails {
fmt.Fprintln(v, "refreshing...")
return nil
}
if c.errDetails != nil {
fmt.Fprintln(v, "error:", c.errDetails)
return nil
}
switch c.selectedTab {
case general:
fmt.Fprintf(v, "Name: %s\n", c.stats.Name)
fmt.Fprintf(v, "Private: %v\n", c.stats.Private)
status := c.stats.Status
if status == "Stopped" && c.stats.Error != nil {
status = status + ": " + *c.stats.Error
}
fmt.Fprintf(v, "Status: %s\n", status)
var progress int
if c.stats.Pieces.Total > 0 {
switch c.stats.Status {
case "Verifying":
progress = int(c.stats.Pieces.Checked * 100 / c.stats.Pieces.Total)
case "Allocating":
progress = int(c.stats.Bytes.Allocated * 100 / c.stats.Bytes.Total)
default:
progress = int(c.stats.Pieces.Have * 100 / c.stats.Pieces.Total)
}
}
fmt.Fprintf(v, "Progress: %d\n", progress)
var ratio float64
if c.stats.Bytes.Downloaded > 0 {
ratio = float64(c.stats.Bytes.Uploaded) / float64(c.stats.Bytes.Downloaded)
}
fmt.Fprintf(v, "Ratio: %.2f\n", ratio)
var size string
switch {
case c.stats.Bytes.Total < 1<<10:
size = fmt.Sprintf("%d bytes", c.stats.Bytes.Total)
case c.stats.Bytes.Total < 1<<20:
size = fmt.Sprintf("%d KiB", c.stats.Bytes.Total/(1<<10))
default:
size = fmt.Sprintf("%d MiB", c.stats.Bytes.Total/(1<<20))
}
fmt.Fprintf(v, "Size: %s\n", size)
fmt.Fprintf(v, "Peers: %d in %d out\n", c.stats.Peers.Incoming, c.stats.Peers.Outgoing)
fmt.Fprintf(v, "Download speed: %5d KiB/s\n", c.stats.Speed.Download/1024)
fmt.Fprintf(v, "Upload speed: %5d KiB/s\n", c.stats.Speed.Upload/1024)
var eta string
if c.stats.ETA != nil {
eta = (time.Duration(*c.stats.ETA) * time.Second).String()
}
fmt.Fprintf(v, "ETA: %s\n", eta)
case stats:
b, err := jsonutil.MarshalCompactPretty(c.stats)
if err != nil {
fmt.Fprintln(v, "error:", err)
} else {
fmt.Fprintln(v, string(b))
}
case trackers:
for i, t := range c.trackers {
fmt.Fprintf(v, "#%d %s\n", i+1, t.URL)
switch t.Status {
case "Not working":
errStr := *t.Error
if t.ErrorUnknown {
errStr = errStr + " (" + *t.ErrorInternal + ")"
}
fmt.Fprintf(v, " Status: %s, Error: %s\n", t.Status, errStr)
default:
if t.Warning != nil {
fmt.Fprintf(v, " Status: %s, Seeders: %d, Leechers: %d Warning: %s\n", t.Status, t.Seeders, t.Leechers, *t.Warning)
} else {
fmt.Fprintf(v, " Status: %s, Seeders: %d, Leechers: %d\n", t.Status, t.Seeders, t.Leechers)
}
}
var nextAnnounce string
if t.NextAnnounce.IsZero() {
nextAnnounce = "Unknown"
} else {
nextAnnounce = t.NextAnnounce.Time.Format(time.RFC3339)
}
fmt.Fprintf(v, " Last announce: %s, Next announce: %s\n", t.LastAnnounce.Time.Format(time.RFC3339), nextAnnounce)
}
case peers:
format := "%2s %21s %7s %8s %6s %s\n"
fmt.Fprintf(v, format, "#", "Addr", "Flags", "Download", "Upload", "Client")
for i, p := range c.peers {
num := fmt.Sprintf("%d", i+1)
var dl string
if p.DownloadSpeed > 0 {
dl = fmt.Sprintf("%d", p.DownloadSpeed/1024)
}
var ul string
if p.UploadSpeed > 0 {
ul = fmt.Sprintf("%d", p.UploadSpeed/1024)
}
fmt.Fprintf(v, format, num, p.Addr, flags(p), dl, ul, p.Client)
}
case webseeds:
format := "%2s %40s %8s %s\n"
fmt.Fprintf(v, format, "#", "URL", "Speed", "Error")
for i, p := range c.webseeds {
num := fmt.Sprintf("%d", i+1)
var dl string
if p.DownloadSpeed > 0 {
dl = fmt.Sprintf("%d", p.DownloadSpeed/1024)
}
var errstr string
if p.Error != nil {
errstr = *p.Error
}
fmt.Fprintf(v, format, num, p.URL, dl, errstr)
}
}
}
return nil
}
func (c *Console) updateLoop(g *gocui.Gui, stop chan struct{}) {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
c.triggerUpdateTorrents()
for {
select {
case <-ticker.C:
c.triggerUpdateTorrents()
c.triggerUpdateDetails(false)
case <-c.updateTorrentsC:
c.updateTorrents(g)
case <-c.updateDetailsC:
go c.updateDetails(g)
case <-stop:
return
}
}
}
func (c *Console) updateTorrents(g *gocui.Gui) {
torrents, err := c.client.ListTorrents()
sort.Slice(torrents, func(i, j int) bool {
a, b := torrents[i], torrents[j]
if a.AddedAt.Equal(b.AddedAt.Time) {
return a.ID < b.ID
}
return a.AddedAt.Time.Before(b.AddedAt.Time)
})
c.m.Lock()
c.torrents = torrents
c.errTorrents = err
if len(c.torrents) == 0 {
c.setSelectedID("")
} else if c.selectedID == "" {
c.setSelectedID(c.torrents[0].ID)
}
c.m.Unlock()
g.Update(c.drawTorrents)
}
func (c *Console) updateDetails(g *gocui.Gui) {
c.m.Lock()
selectedID := c.selectedID
c.m.Unlock()
if selectedID == "" {
return
}
switch c.selectedTab {
case general, stats:
stats, err := c.client.GetTorrentStats(selectedID)
c.m.Lock()
c.stats = *stats
c.errDetails = err
c.m.Unlock()
case trackers:
trackers, err := c.client.GetTorrentTrackers(selectedID)
sort.Slice(trackers, func(i, j int) bool { return trackers[i].URL < trackers[j].URL })
c.m.Lock()
c.trackers = trackers
c.errDetails = err
c.m.Unlock()
case peers:
peers, err := c.client.GetTorrentPeers(selectedID)
sort.Slice(peers, func(i, j int) bool {
a, b := peers[i], peers[j]
if a.ConnectedAt.Equal(b.ConnectedAt.Time) {
return a.Addr < b.Addr
}
return a.ConnectedAt.Time.Before(b.ConnectedAt.Time)
})
c.m.Lock()
c.peers = peers
c.errDetails = err
c.m.Unlock()
case webseeds:
webseeds, err := c.client.GetTorrentWebseeds(selectedID)
sort.Slice(webseeds, func(i, j int) bool {
a, b := webseeds[i], webseeds[j]
return a.URL < b.URL
})
c.m.Lock()
c.webseeds = webseeds
c.errDetails = err
c.m.Unlock()
}
c.m.Lock()
defer c.m.Unlock()
c.updatingDetails = false
if selectedID != c.selectedID {
return
}
g.Update(c.drawDetails)
}
func (c *Console) quit(g *gocui.Gui, v *gocui.View) error {
if c.selectedPage == help {
c.selectedPage = torrents
return g.DeleteView("help")
}
return gocui.ErrQuit
}
func (c *Console) switchRow(v *gocui.View, row int) error {
switch {
case len(c.torrents) == 0:
return nil
case row < 0:
row = 0
case row >= len(c.torrents):
row = len(c.torrents) - 1
}
_, cy := v.Cursor()
_, oy := v.Origin()
_, height := v.Size()
currentRow := oy + cy
if len(c.torrents) > height {
if row > currentRow {
// sroll down
if row >= oy+height {
// move origin
_ = v.SetOrigin(0, row-height+1)
_ = v.SetCursor(0, height-1)
} else {
_ = v.SetCursor(0, row-oy)
}
} else {
// scroll up
if row < oy {
// move origin
_ = v.SetOrigin(0, row)
_ = v.SetCursor(0, 0)
} else {
_ = v.SetCursor(0, row-oy)
}
}
} else {
_ = v.SetOrigin(0, 0)
_ = v.SetCursor(0, row)
}
c.setSelectedID(c.torrents[row].ID)
return nil
}
func (c *Console) cursorDown(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
defer c.m.Unlock()
_, cy := v.Cursor()
_, oy := v.Origin()
row := cy + oy + 1
if row == len(c.torrents) {
return nil
}
return c.switchRow(v, row)
}
func (c *Console) cursorUp(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
defer c.m.Unlock()
_, cy := v.Cursor()
_, oy := v.Origin()
row := cy + oy - 1
if row == -1 {
return nil
}
return c.switchRow(v, row)
}
func (c *Console) goTop(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
defer c.m.Unlock()
if len(c.torrents) == 0 {
return nil
}
return c.switchRow(v, 0)
}
func (c *Console) goBottom(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
defer c.m.Unlock()
if len(c.torrents) == 0 {
return nil
}
return c.switchRow(v, len(c.torrents)-1)
}
func (c *Console) removeTorrent(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
id := c.selectedID
c.m.Unlock()
err := c.client.RemoveTorrent(id)
if err != nil {
return err
}
c.triggerUpdateTorrents()
return nil
}
func (c *Console) setSelectedID(id string) {
changed := id != c.selectedID
c.selectedID = id
if changed {
c.triggerUpdateDetails(true)
}
}
func (c *Console) startTorrent(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
id := c.selectedID
c.m.Unlock()
err := c.client.StartTorrent(id)
if err != nil {
return err
}
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) stopTorrent(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
id := c.selectedID
c.m.Unlock()
err := c.client.StopTorrent(id)
if err != nil {
return err
}
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) announce(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
id := c.selectedID
c.m.Unlock()
err := c.client.AnnounceTorrent(id)
if err != nil {
return err
}
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) verify(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
id := c.selectedID
c.m.Unlock()
err := c.client.VerifyTorrent(id)
if err != nil {
return err
}
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) tabAdjustDown(g *gocui.Gui, v *gocui.View) error {
_, maxY := g.Size()
halfY := maxY / 2
if c.tabAdjust < halfY {
c.tabAdjust++
}
return nil
}
func (c *Console) tabAdjustUp(g *gocui.Gui, v *gocui.View) error {
_, maxY := g.Size()
halfY := maxY / 2
if c.tabAdjust > -halfY+1 {
c.tabAdjust--
}
return nil
}
func (c *Console) switchGeneral(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
c.selectedTab = general
c.m.Unlock()
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) switchStats(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
c.selectedTab = stats
c.m.Unlock()
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) switchTrackers(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
c.selectedTab = trackers
c.m.Unlock()
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) switchPeers(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
c.selectedTab = peers
c.m.Unlock()
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) switchWebseeds(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
c.selectedTab = webseeds
c.m.Unlock()
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) switchHelp(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
c.selectedPage = help
c.m.Unlock()
return nil
}
func (c *Console) triggerUpdateDetails(clear bool) {
if clear {
c.updatingDetails = true
}
select {
case c.updateDetailsC <- struct{}{}:
default:
}
}
func (c *Console) triggerUpdateTorrents() {
select {
case c.updateTorrentsC <- struct{}{}:
default:
}
}
func flags(p rpctypes.Peer) string {
var sb strings.Builder
sb.Grow(6)
if p.ClientInterested {
if p.PeerChoking {
sb.WriteString("d")
} else {
sb.WriteString("D")
}
} else {
if !p.PeerChoking {
sb.WriteString("K")
} else {
sb.WriteString(" ")
}
}
if p.PeerInterested {
if p.ClientChoking {
sb.WriteString("u")
} else {
sb.WriteString("U")
}
} else {
if !p.ClientChoking {
sb.WriteString("?")
} else {
sb.WriteString(" ")
}
}
if p.OptimisticUnchoked {
sb.WriteString("O")
} else {
sb.WriteString(" ")
}
if p.Snubbed {
sb.WriteString("S")
} else {
sb.WriteString(" ")
}
switch p.Source {
case "DHT":
sb.WriteString("H")
case "PEX":
sb.WriteString("X")
case "INCOMING":
sb.WriteString("I")
case "MANUAL":
sb.WriteString("M")
default:
sb.WriteString(" ")
}
switch {
case p.EncryptedStream:
sb.WriteString("E")
case p.EncryptedHandshake:
sb.WriteString("e")
default:
sb.WriteString(" ")
}
return sb.String()
}
show session-stats in console
package console
import (
"fmt"
"sort"
"strings"
"sync"
"time"
"github.com/cenkalti/rain/internal/jsonutil"
"github.com/cenkalti/rain/internal/rpctypes"
"github.com/cenkalti/rain/rainrpc"
"github.com/jroimartin/gocui"
)
const (
// pages
torrents int = iota
sessionStats
help
)
const (
// tabs
general int = iota
stats
trackers
peers
webseeds
)
type Console struct {
client *rainrpc.Client
// protects global state in client
m sync.Mutex
// error from listing torrents rpc call
errTorrents error
// error from getting stats/trackers/peers/etc...
errDetails error
// error from getting session stats
errSessionStats error
// id of currently selected torrent
selectedID string
// selected detail tab
selectedTab int
// selected global page
selectedPage int
// distance Y from 0,0
tabAdjust int
// fields to hold responsed from rpc requests
torrents []rpctypes.Torrent
stats rpctypes.Stats
sessionStats rpctypes.SessionStats
trackers []rpctypes.Tracker
peers []rpctypes.Peer
webseeds []rpctypes.Webseed
// whether details tab is currently updating state
updatingDetails bool
// channels for triggering refresh after view update / key events
updateTorrentsC chan struct{}
updateDetailsC chan struct{}
// state for updater goroutine for updating torrents list and details tab
stopUpdatingTorrentsC chan struct{}
updatingTorrents bool
// state for updater goroutine for updating session-stats page
stopUpdatingSessionStatsC chan struct{}
updatingSessionStats bool
}
func New(clt *rainrpc.Client) *Console {
return &Console{
client: clt,
updateTorrentsC: make(chan struct{}, 1),
updateDetailsC: make(chan struct{}, 1),
}
}
func (c *Console) Run() error {
g, err := gocui.NewGui(gocui.OutputNormal)
if err != nil {
return err
}
defer g.Close()
g.SetManagerFunc(c.layout)
// Global keys
_ = g.SetKeybinding("", gocui.KeyCtrlC, gocui.ModNone, c.quit)
_ = g.SetKeybinding("", 'q', gocui.ModNone, c.quit)
_ = g.SetKeybinding("", '?', gocui.ModNone, c.switchHelp)
_ = g.SetKeybinding("", 'A', gocui.ModNone, c.switchSessionStats)
// Navigation
_ = g.SetKeybinding("torrents", 'j', gocui.ModNone, c.cursorDown)
_ = g.SetKeybinding("torrents", 'k', gocui.ModNone, c.cursorUp)
_ = g.SetKeybinding("torrents", 'j', gocui.ModAlt, c.tabAdjustDown)
_ = g.SetKeybinding("torrents", 'k', gocui.ModAlt, c.tabAdjustUp)
_ = g.SetKeybinding("torrents", 'g', gocui.ModNone, c.goTop)
_ = g.SetKeybinding("torrents", 'G', gocui.ModNone, c.goBottom)
// Tabs
_ = g.SetKeybinding("torrents", 'g', gocui.ModAlt, c.switchGeneral)
_ = g.SetKeybinding("torrents", 's', gocui.ModAlt, c.switchStats)
_ = g.SetKeybinding("torrents", 't', gocui.ModAlt, c.switchTrackers)
_ = g.SetKeybinding("torrents", 'p', gocui.ModAlt, c.switchPeers)
_ = g.SetKeybinding("torrents", 'w', gocui.ModAlt, c.switchWebseeds)
// Torrent control
_ = g.SetKeybinding("torrents", gocui.KeyCtrlS, gocui.ModNone, c.startTorrent)
_ = g.SetKeybinding("torrents", gocui.KeyCtrlS, gocui.ModAlt, c.stopTorrent)
_ = g.SetKeybinding("torrents", gocui.KeyCtrlR, gocui.ModNone, c.removeTorrent)
_ = g.SetKeybinding("torrents", gocui.KeyCtrlA, gocui.ModNone, c.announce)
_ = g.SetKeybinding("torrents", gocui.KeyCtrlV, gocui.ModNone, c.verify)
err = g.MainLoop()
if err == gocui.ErrQuit {
err = nil
}
return err
}
func (c *Console) startUpdatingTorrents(g *gocui.Gui) {
if c.updatingTorrents {
return
}
c.updatingTorrents = true
c.stopUpdatingTorrentsC = make(chan struct{})
go c.updateTorrentsAndDetailsLoop(g, c.stopUpdatingTorrentsC)
}
func (c *Console) stopUpdatingTorrents() {
if !c.updatingTorrents {
return
}
c.updatingTorrents = false
close(c.stopUpdatingTorrentsC)
}
func (c *Console) startUpdatingSessionStats(g *gocui.Gui) {
if c.updatingSessionStats {
return
}
c.updatingSessionStats = true
c.stopUpdatingSessionStatsC = make(chan struct{})
go c.updateSessionStatsLoop(g, c.stopUpdatingSessionStatsC)
}
func (c *Console) stopUpdatingSessionStats() {
if !c.updatingSessionStats {
return
}
c.updatingSessionStats = false
close(c.stopUpdatingSessionStatsC)
}
func (c *Console) layout(g *gocui.Gui) error {
err := c.drawTitle(g)
if err != nil {
return err
}
if c.selectedPage == torrents {
c.startUpdatingTorrents(g)
} else {
c.stopUpdatingTorrents()
}
if c.selectedPage == sessionStats {
c.startUpdatingSessionStats(g)
} else {
c.stopUpdatingSessionStats()
_ = g.DeleteView("session-stats")
}
if c.selectedPage != help {
_ = g.DeleteView("help")
}
switch c.selectedPage {
case torrents:
err = c.drawTorrents(g)
if err != nil {
return err
}
err = c.drawDetails(g)
if err != nil {
return err
}
_, err = g.SetCurrentView("torrents")
case sessionStats:
err = c.drawSessionStats(g)
if err != nil {
return err
}
_, err = g.SetCurrentView("session-stats")
case help:
err = c.drawHelp(g)
if err != nil {
return err
}
_, err = g.SetCurrentView("help")
}
return err
}
func (c *Console) drawTitle(g *gocui.Gui) error {
maxX, maxY := g.Size()
v, err := g.SetView("title", -1, 0, maxX, maxY)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Title = "Rain [" + c.client.Addr() + "] (Press '?' for help)"
}
return nil
}
func (c *Console) drawHelp(g *gocui.Gui) error {
maxX, maxY := g.Size()
v, err := g.SetView("help", 5, 2, maxX-6, maxY-3)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Frame = true
v.Title = "Help"
} else {
v.Clear()
}
fmt.Fprintln(v, " q Quit")
fmt.Fprintln(v, " j move down")
fmt.Fprintln(v, " k move up")
fmt.Fprintln(v, " alt+j move tab separator down")
fmt.Fprintln(v, " alt+k move tab separator up")
fmt.Fprintln(v, " g go to top")
fmt.Fprintln(v, " G go to bottom")
fmt.Fprintln(v, " A show session stats page")
fmt.Fprintln(v, "")
fmt.Fprintln(v, " alt+g switch to General info tab")
fmt.Fprintln(v, " alt+s switch to Stats tab")
fmt.Fprintln(v, " alt+t switch to Trackers tab")
fmt.Fprintln(v, " alt+p switch to Peers tab")
fmt.Fprintln(v, " alt+w switch to Webseeds tab")
fmt.Fprintln(v, "")
fmt.Fprintln(v, " ctrl+s Start torrent")
fmt.Fprintln(v, "ctrl+alt+s Stop torrent")
fmt.Fprintln(v, " ctrl+R Remove torrent")
fmt.Fprintln(v, " ctrl+a Announce torrent")
fmt.Fprintln(v, " ctrl+v Verify torrent")
return nil
}
func (c *Console) drawSessionStats(g *gocui.Gui) error {
maxX, maxY := g.Size()
v, err := g.SetView("session-stats", 5, 2, maxX-6, maxY-3)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Frame = true
v.Title = "Session Stats"
fmt.Fprintln(v, "loading...")
} else {
v.Clear()
if c.errSessionStats != nil {
fmt.Fprintln(v, "error:", c.errSessionStats)
return nil
}
b, err := jsonutil.MarshalCompactPretty(c.sessionStats)
if err != nil {
fmt.Fprintln(v, "error:", err)
} else {
fmt.Fprintln(v, string(b))
}
}
return nil
}
func (c *Console) drawTorrents(g *gocui.Gui) error {
c.m.Lock()
defer c.m.Unlock()
maxX, maxY := g.Size()
halfY := maxY / 2
split := halfY + c.tabAdjust
if v, err := g.SetView("torrents", -1, 0, maxX, split); err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Frame = false
v.Highlight = true
v.SelBgColor = gocui.ColorGreen
v.SelFgColor = gocui.ColorBlack
v.Title = "Rain"
fmt.Fprintln(v, "loading torrents...")
} else {
v.Clear()
if c.errTorrents != nil {
fmt.Fprintln(v, "error:", c.errTorrents)
return nil
}
selectedIDrow := -1
for i, t := range c.torrents {
fmt.Fprintf(v, "%3d %s %s\n", i+1, t.ID, t.Name)
if t.ID == c.selectedID {
selectedIDrow = i
}
}
_, cy := v.Cursor()
_, oy := v.Origin()
selectedRow := cy + oy
if selectedRow < len(c.torrents) {
if c.torrents[selectedRow].ID != c.selectedID && selectedIDrow != -1 {
_ = v.SetCursor(0, selectedIDrow)
} else {
c.setSelectedID(c.torrents[selectedRow].ID)
}
}
}
return nil
}
func (c *Console) drawDetails(g *gocui.Gui) error {
c.m.Lock()
defer c.m.Unlock()
maxX, maxY := g.Size()
halfY := maxY / 2
split := halfY + c.tabAdjust
if v, err := g.SetView("details", -1, split, maxX, maxY); err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Wrap = true
fmt.Fprintln(v, "loading details...")
} else {
v.Clear()
switch c.selectedTab {
case general:
v.Title = "General Info"
case stats:
v.Title = "Stats"
case trackers:
v.Title = "Trackers"
case peers:
v.Title = "Peers"
case webseeds:
v.Title = "WebSeeds"
}
if c.selectedID == "" {
return nil
}
if c.updatingDetails {
fmt.Fprintln(v, "refreshing...")
return nil
}
if c.errDetails != nil {
fmt.Fprintln(v, "error:", c.errDetails)
return nil
}
switch c.selectedTab {
case general:
fmt.Fprintf(v, "Name: %s\n", c.stats.Name)
fmt.Fprintf(v, "Private: %v\n", c.stats.Private)
status := c.stats.Status
if status == "Stopped" && c.stats.Error != nil {
status = status + ": " + *c.stats.Error
}
fmt.Fprintf(v, "Status: %s\n", status)
var progress int
if c.stats.Pieces.Total > 0 {
switch c.stats.Status {
case "Verifying":
progress = int(c.stats.Pieces.Checked * 100 / c.stats.Pieces.Total)
case "Allocating":
progress = int(c.stats.Bytes.Allocated * 100 / c.stats.Bytes.Total)
default:
progress = int(c.stats.Pieces.Have * 100 / c.stats.Pieces.Total)
}
}
fmt.Fprintf(v, "Progress: %d\n", progress)
var ratio float64
if c.stats.Bytes.Downloaded > 0 {
ratio = float64(c.stats.Bytes.Uploaded) / float64(c.stats.Bytes.Downloaded)
}
fmt.Fprintf(v, "Ratio: %.2f\n", ratio)
var size string
switch {
case c.stats.Bytes.Total < 1<<10:
size = fmt.Sprintf("%d bytes", c.stats.Bytes.Total)
case c.stats.Bytes.Total < 1<<20:
size = fmt.Sprintf("%d KiB", c.stats.Bytes.Total/(1<<10))
default:
size = fmt.Sprintf("%d MiB", c.stats.Bytes.Total/(1<<20))
}
fmt.Fprintf(v, "Size: %s\n", size)
fmt.Fprintf(v, "Peers: %d in %d out\n", c.stats.Peers.Incoming, c.stats.Peers.Outgoing)
fmt.Fprintf(v, "Download speed: %5d KiB/s\n", c.stats.Speed.Download/1024)
fmt.Fprintf(v, "Upload speed: %5d KiB/s\n", c.stats.Speed.Upload/1024)
var eta string
if c.stats.ETA != nil {
eta = (time.Duration(*c.stats.ETA) * time.Second).String()
}
fmt.Fprintf(v, "ETA: %s\n", eta)
case stats:
b, err := jsonutil.MarshalCompactPretty(c.stats)
if err != nil {
fmt.Fprintln(v, "error:", err)
} else {
fmt.Fprintln(v, string(b))
}
case trackers:
for i, t := range c.trackers {
fmt.Fprintf(v, "#%d %s\n", i+1, t.URL)
switch t.Status {
case "Not working":
errStr := *t.Error
if t.ErrorUnknown {
errStr = errStr + " (" + *t.ErrorInternal + ")"
}
fmt.Fprintf(v, " Status: %s, Error: %s\n", t.Status, errStr)
default:
if t.Warning != nil {
fmt.Fprintf(v, " Status: %s, Seeders: %d, Leechers: %d Warning: %s\n", t.Status, t.Seeders, t.Leechers, *t.Warning)
} else {
fmt.Fprintf(v, " Status: %s, Seeders: %d, Leechers: %d\n", t.Status, t.Seeders, t.Leechers)
}
}
var nextAnnounce string
if t.NextAnnounce.IsZero() {
nextAnnounce = "Unknown"
} else {
nextAnnounce = t.NextAnnounce.Time.Format(time.RFC3339)
}
fmt.Fprintf(v, " Last announce: %s, Next announce: %s\n", t.LastAnnounce.Time.Format(time.RFC3339), nextAnnounce)
}
case peers:
format := "%2s %21s %7s %8s %6s %s\n"
fmt.Fprintf(v, format, "#", "Addr", "Flags", "Download", "Upload", "Client")
for i, p := range c.peers {
num := fmt.Sprintf("%d", i+1)
var dl string
if p.DownloadSpeed > 0 {
dl = fmt.Sprintf("%d", p.DownloadSpeed/1024)
}
var ul string
if p.UploadSpeed > 0 {
ul = fmt.Sprintf("%d", p.UploadSpeed/1024)
}
fmt.Fprintf(v, format, num, p.Addr, flags(p), dl, ul, p.Client)
}
case webseeds:
format := "%2s %40s %8s %s\n"
fmt.Fprintf(v, format, "#", "URL", "Speed", "Error")
for i, p := range c.webseeds {
num := fmt.Sprintf("%d", i+1)
var dl string
if p.DownloadSpeed > 0 {
dl = fmt.Sprintf("%d", p.DownloadSpeed/1024)
}
var errstr string
if p.Error != nil {
errstr = *p.Error
}
fmt.Fprintf(v, format, num, p.URL, dl, errstr)
}
}
}
return nil
}
func (c *Console) updateTorrentsAndDetailsLoop(g *gocui.Gui, stop chan struct{}) {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
c.triggerUpdateTorrents()
for {
select {
case <-ticker.C:
c.triggerUpdateTorrents()
c.triggerUpdateDetails(false)
case <-c.updateTorrentsC:
c.updateTorrents(g)
case <-c.updateDetailsC:
go c.updateDetails(g)
case <-stop:
return
}
}
}
func (c *Console) updateSessionStatsLoop(g *gocui.Gui, stop chan struct{}) {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
c.updateSessionStats(g)
for {
select {
case <-ticker.C:
c.updateSessionStats(g)
case <-stop:
return
}
}
}
func (c *Console) updateTorrents(g *gocui.Gui) {
torrents, err := c.client.ListTorrents()
sort.Slice(torrents, func(i, j int) bool {
a, b := torrents[i], torrents[j]
if a.AddedAt.Equal(b.AddedAt.Time) {
return a.ID < b.ID
}
return a.AddedAt.Time.Before(b.AddedAt.Time)
})
c.m.Lock()
c.torrents = torrents
c.errTorrents = err
if len(c.torrents) == 0 {
c.setSelectedID("")
} else if c.selectedID == "" {
c.setSelectedID(c.torrents[0].ID)
}
c.m.Unlock()
g.Update(c.drawTorrents)
}
func (c *Console) updateDetails(g *gocui.Gui) {
c.m.Lock()
selectedID := c.selectedID
c.m.Unlock()
if selectedID == "" {
return
}
switch c.selectedTab {
case general, stats:
stats, err := c.client.GetTorrentStats(selectedID)
c.m.Lock()
c.stats = *stats
c.errDetails = err
c.m.Unlock()
case trackers:
trackers, err := c.client.GetTorrentTrackers(selectedID)
sort.Slice(trackers, func(i, j int) bool { return trackers[i].URL < trackers[j].URL })
c.m.Lock()
c.trackers = trackers
c.errDetails = err
c.m.Unlock()
case peers:
peers, err := c.client.GetTorrentPeers(selectedID)
sort.Slice(peers, func(i, j int) bool {
a, b := peers[i], peers[j]
if a.ConnectedAt.Equal(b.ConnectedAt.Time) {
return a.Addr < b.Addr
}
return a.ConnectedAt.Time.Before(b.ConnectedAt.Time)
})
c.m.Lock()
c.peers = peers
c.errDetails = err
c.m.Unlock()
case webseeds:
webseeds, err := c.client.GetTorrentWebseeds(selectedID)
sort.Slice(webseeds, func(i, j int) bool {
a, b := webseeds[i], webseeds[j]
return a.URL < b.URL
})
c.m.Lock()
c.webseeds = webseeds
c.errDetails = err
c.m.Unlock()
}
c.m.Lock()
defer c.m.Unlock()
c.updatingDetails = false
if selectedID != c.selectedID {
return
}
g.Update(c.drawDetails)
}
func (c *Console) updateSessionStats(g *gocui.Gui) {
stats, err := c.client.GetSessionStats()
c.m.Lock()
defer c.m.Unlock()
c.sessionStats = *stats
c.errSessionStats = err
g.Update(c.drawSessionStats)
}
func (c *Console) quit(g *gocui.Gui, v *gocui.View) error {
if c.selectedPage == help {
c.selectedPage = torrents
return nil
}
if c.selectedPage == sessionStats {
c.selectedPage = torrents
return nil
}
return gocui.ErrQuit
}
func (c *Console) switchRow(v *gocui.View, row int) error {
switch {
case len(c.torrents) == 0:
return nil
case row < 0:
row = 0
case row >= len(c.torrents):
row = len(c.torrents) - 1
}
_, cy := v.Cursor()
_, oy := v.Origin()
_, height := v.Size()
currentRow := oy + cy
if len(c.torrents) > height {
if row > currentRow {
// sroll down
if row >= oy+height {
// move origin
_ = v.SetOrigin(0, row-height+1)
_ = v.SetCursor(0, height-1)
} else {
_ = v.SetCursor(0, row-oy)
}
} else {
// scroll up
if row < oy {
// move origin
_ = v.SetOrigin(0, row)
_ = v.SetCursor(0, 0)
} else {
_ = v.SetCursor(0, row-oy)
}
}
} else {
_ = v.SetOrigin(0, 0)
_ = v.SetCursor(0, row)
}
c.setSelectedID(c.torrents[row].ID)
return nil
}
func (c *Console) cursorDown(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
defer c.m.Unlock()
_, cy := v.Cursor()
_, oy := v.Origin()
row := cy + oy + 1
if row == len(c.torrents) {
return nil
}
return c.switchRow(v, row)
}
func (c *Console) cursorUp(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
defer c.m.Unlock()
_, cy := v.Cursor()
_, oy := v.Origin()
row := cy + oy - 1
if row == -1 {
return nil
}
return c.switchRow(v, row)
}
func (c *Console) goTop(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
defer c.m.Unlock()
if len(c.torrents) == 0 {
return nil
}
return c.switchRow(v, 0)
}
func (c *Console) goBottom(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
defer c.m.Unlock()
if len(c.torrents) == 0 {
return nil
}
return c.switchRow(v, len(c.torrents)-1)
}
func (c *Console) removeTorrent(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
id := c.selectedID
c.m.Unlock()
err := c.client.RemoveTorrent(id)
if err != nil {
return err
}
c.triggerUpdateTorrents()
return nil
}
func (c *Console) setSelectedID(id string) {
changed := id != c.selectedID
c.selectedID = id
if changed {
c.triggerUpdateDetails(true)
}
}
func (c *Console) startTorrent(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
id := c.selectedID
c.m.Unlock()
err := c.client.StartTorrent(id)
if err != nil {
return err
}
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) stopTorrent(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
id := c.selectedID
c.m.Unlock()
err := c.client.StopTorrent(id)
if err != nil {
return err
}
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) announce(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
id := c.selectedID
c.m.Unlock()
err := c.client.AnnounceTorrent(id)
if err != nil {
return err
}
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) verify(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
id := c.selectedID
c.m.Unlock()
err := c.client.VerifyTorrent(id)
if err != nil {
return err
}
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) tabAdjustDown(g *gocui.Gui, v *gocui.View) error {
_, maxY := g.Size()
halfY := maxY / 2
if c.tabAdjust < halfY {
c.tabAdjust++
}
return nil
}
func (c *Console) tabAdjustUp(g *gocui.Gui, v *gocui.View) error {
_, maxY := g.Size()
halfY := maxY / 2
if c.tabAdjust > -halfY+1 {
c.tabAdjust--
}
return nil
}
func (c *Console) switchGeneral(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
c.selectedTab = general
c.m.Unlock()
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) switchStats(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
c.selectedTab = stats
c.m.Unlock()
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) switchTrackers(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
c.selectedTab = trackers
c.m.Unlock()
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) switchPeers(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
c.selectedTab = peers
c.m.Unlock()
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) switchWebseeds(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
c.selectedTab = webseeds
c.m.Unlock()
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) switchHelp(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
c.selectedPage = help
c.m.Unlock()
return nil
}
func (c *Console) switchSessionStats(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
c.selectedPage = sessionStats
c.m.Unlock()
return nil
}
func (c *Console) triggerUpdateDetails(clear bool) {
if clear {
c.updatingDetails = true
}
select {
case c.updateDetailsC <- struct{}{}:
default:
}
}
func (c *Console) triggerUpdateTorrents() {
select {
case c.updateTorrentsC <- struct{}{}:
default:
}
}
func flags(p rpctypes.Peer) string {
var sb strings.Builder
sb.Grow(6)
if p.ClientInterested {
if p.PeerChoking {
sb.WriteString("d")
} else {
sb.WriteString("D")
}
} else {
if !p.PeerChoking {
sb.WriteString("K")
} else {
sb.WriteString(" ")
}
}
if p.PeerInterested {
if p.ClientChoking {
sb.WriteString("u")
} else {
sb.WriteString("U")
}
} else {
if !p.ClientChoking {
sb.WriteString("?")
} else {
sb.WriteString(" ")
}
}
if p.OptimisticUnchoked {
sb.WriteString("O")
} else {
sb.WriteString(" ")
}
if p.Snubbed {
sb.WriteString("S")
} else {
sb.WriteString(" ")
}
switch p.Source {
case "DHT":
sb.WriteString("H")
case "PEX":
sb.WriteString("X")
case "INCOMING":
sb.WriteString("I")
case "MANUAL":
sb.WriteString("M")
default:
sb.WriteString(" ")
}
switch {
case p.EncryptedStream:
sb.WriteString("E")
case p.EncryptedHandshake:
sb.WriteString("e")
default:
sb.WriteString(" ")
}
return sb.String()
}
|
package console
import (
"fmt"
"io"
"os"
"sort"
"strings"
"sync"
"time"
"github.com/cenkalti/rain/internal/jsonutil"
"github.com/cenkalti/rain/internal/rpctypes"
"github.com/cenkalti/rain/rainrpc"
"github.com/jroimartin/gocui"
)
const (
// pages
torrents int = iota
sessionStats
addTorrent
help
)
const (
// tabs
general int = iota
stats
trackers
peers
webseeds
)
// Console is for drawing a text user interface for a remote Session.
type Console struct {
client *rainrpc.Client
columns []string
needStats bool
// protects global state in client
m sync.Mutex
// error from listing torrents rpc call
errTorrents error
// error from getting stats/trackers/peers/etc...
errDetails error
// error from getting session stats
errSessionStats error
// id of currently selected torrent
selectedID string
// selected detail tab
selectedTab int
// selected global page
selectedPage int
// distance Y from 0,0
tabAdjust int
// fields to hold responsed from rpc requests
torrents []Torrent
stats rpctypes.Stats
sessionStats rpctypes.SessionStats
trackers []rpctypes.Tracker
peers []rpctypes.Peer
webseeds []rpctypes.Webseed
// whether details tab is currently updating state
updatingDetails bool
// channels for triggering refresh after view update / key events
updateTorrentsC chan struct{}
updateDetailsC chan struct{}
// state for updater goroutine for updating torrents list and details tab
stopUpdatingTorrentsC chan struct{}
updatingTorrents bool
// state for updater goroutine for updating session-stats page
stopUpdatingSessionStatsC chan struct{}
updatingSessionStats bool
}
type Torrent struct {
rpctypes.Torrent
Stats *rpctypes.Stats
}
// New returns a new Console object that uses a RPC client to get information from a torrent.Session.
func New(clt *rainrpc.Client, columns []string) *Console {
return &Console{
client: clt,
columns: columns,
needStats: columnsNeedStats(columns),
updateTorrentsC: make(chan struct{}, 1),
updateDetailsC: make(chan struct{}, 1),
}
}
func columnsNeedStats(columns []string) bool {
l := []string{"ID", "Name", "InfoHash", "Port"}
for _, c := range columns {
for _, d := range l {
if c != d {
return true
}
}
}
return false
}
// Run the UI loop.
func (c *Console) Run() error {
g, err := gocui.NewGui(gocui.OutputNormal)
if err != nil {
return err
}
defer g.Close()
g.SetManagerFunc(c.layout)
c.keybindings(g)
err = g.MainLoop()
if err == gocui.ErrQuit {
err = nil
}
return err
}
func (c *Console) keybindings(g *gocui.Gui) {
// Global keys
_ = g.SetKeybinding("", gocui.KeyCtrlC, gocui.ModNone, c.forceQuit)
// Quit keys
_ = g.SetKeybinding("torrents", 'q', gocui.ModNone, c.forceQuit)
_ = g.SetKeybinding("help", 'q', gocui.ModNone, c.quit)
_ = g.SetKeybinding("session-stats", 'q', gocui.ModNone, c.quit)
_ = g.SetKeybinding("add-torrent", gocui.KeyCtrlQ, gocui.ModNone, c.quit)
// Navigation
_ = g.SetKeybinding("torrents", 'j', gocui.ModNone, c.cursorDown)
_ = g.SetKeybinding("torrents", 'k', gocui.ModNone, c.cursorUp)
_ = g.SetKeybinding("torrents", 'j', gocui.ModAlt, c.tabAdjustDown)
_ = g.SetKeybinding("torrents", 'k', gocui.ModAlt, c.tabAdjustUp)
_ = g.SetKeybinding("torrents", 'g', gocui.ModNone, c.goTop)
_ = g.SetKeybinding("torrents", 'G', gocui.ModNone, c.goBottom)
_ = g.SetKeybinding("torrents", 'a', gocui.ModAlt, c.switchSessionStats)
_ = g.SetKeybinding("torrents", '?', gocui.ModNone, c.switchHelp)
// Tabs
_ = g.SetKeybinding("torrents", 'g', gocui.ModAlt, c.switchGeneral)
_ = g.SetKeybinding("torrents", 's', gocui.ModAlt, c.switchStats)
_ = g.SetKeybinding("torrents", 't', gocui.ModAlt, c.switchTrackers)
_ = g.SetKeybinding("torrents", 'p', gocui.ModAlt, c.switchPeers)
_ = g.SetKeybinding("torrents", 'w', gocui.ModAlt, c.switchWebseeds)
// Torrent control
_ = g.SetKeybinding("torrents", gocui.KeyCtrlS, gocui.ModNone, c.startTorrent)
_ = g.SetKeybinding("torrents", gocui.KeyCtrlS, gocui.ModAlt, c.stopTorrent)
_ = g.SetKeybinding("torrents", gocui.KeyCtrlR, gocui.ModNone, c.removeTorrent)
_ = g.SetKeybinding("torrents", gocui.KeyCtrlA, gocui.ModAlt, c.announce)
_ = g.SetKeybinding("torrents", gocui.KeyCtrlV, gocui.ModNone, c.verify)
_ = g.SetKeybinding("torrents", gocui.KeyCtrlA, gocui.ModNone, c.switchAddTorrent)
_ = g.SetKeybinding("add-torrent", gocui.KeyEnter, gocui.ModNone, c.addTorrentHandleEnter)
}
func (c *Console) startUpdatingTorrents(g *gocui.Gui) {
if c.updatingTorrents {
return
}
c.updatingTorrents = true
c.stopUpdatingTorrentsC = make(chan struct{})
go c.updateTorrentsAndDetailsLoop(g, c.stopUpdatingTorrentsC)
}
func (c *Console) stopUpdatingTorrents() {
if !c.updatingTorrents {
return
}
c.updatingTorrents = false
close(c.stopUpdatingTorrentsC)
}
func (c *Console) startUpdatingSessionStats(g *gocui.Gui) {
if c.updatingSessionStats {
return
}
c.updatingSessionStats = true
c.stopUpdatingSessionStatsC = make(chan struct{})
go c.updateSessionStatsLoop(g, c.stopUpdatingSessionStatsC)
}
func (c *Console) stopUpdatingSessionStats() {
if !c.updatingSessionStats {
return
}
c.updatingSessionStats = false
close(c.stopUpdatingSessionStatsC)
}
func (c *Console) layout(g *gocui.Gui) error {
err := c.drawTitle(g)
if err != nil {
return err
}
if c.selectedPage == torrents {
c.startUpdatingTorrents(g)
} else {
c.stopUpdatingTorrents()
}
if c.selectedPage == sessionStats {
c.startUpdatingSessionStats(g)
} else {
c.stopUpdatingSessionStats()
_ = g.DeleteView("session-stats")
}
if c.selectedPage != help {
_ = g.DeleteView("help")
}
if c.selectedPage != addTorrent {
_ = g.DeleteView("add-torrent")
g.Cursor = false
}
switch c.selectedPage {
case torrents:
err = c.drawTorrents(g)
if err != nil {
return err
}
err = c.drawDetails(g)
if err != nil {
return err
}
_, err = g.SetCurrentView("torrents")
case sessionStats:
err = c.drawSessionStats(g)
if err != nil {
return err
}
_, err = g.SetCurrentView("session-stats")
case help:
err = c.drawHelp(g)
if err != nil {
return err
}
_, err = g.SetCurrentView("help")
case addTorrent:
err = c.drawAddTorrent(g)
if err != nil {
return err
}
g.Cursor = true
_, err = g.SetCurrentView("add-torrent")
}
return err
}
func (c *Console) drawTitle(g *gocui.Gui) error {
maxX, maxY := g.Size()
v, err := g.SetView("title", -1, 0, maxX, maxY)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Title = "Rain by put.io [" + c.client.Addr() + "] (Press '?' for help)"
}
return nil
}
func (c *Console) drawHelp(g *gocui.Gui) error {
maxX, maxY := g.Size()
v, err := g.SetView("help", 5, 2, maxX-6, maxY-3)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Frame = true
v.Title = "Help"
} else {
v.Clear()
}
fmt.Fprintln(v, " q Quit")
fmt.Fprintln(v, " j move down")
fmt.Fprintln(v, " k move up")
fmt.Fprintln(v, " alt+j move tab separator down")
fmt.Fprintln(v, " alt+k move tab separator up")
fmt.Fprintln(v, " g Go to top")
fmt.Fprintln(v, " G Go to bottom")
fmt.Fprintln(v, " alt+a show session stats page")
fmt.Fprintln(v, "")
fmt.Fprintln(v, " alt+g switch to General info tab")
fmt.Fprintln(v, " alt+s switch to Stats tab")
fmt.Fprintln(v, " alt+t switch to Trackers tab")
fmt.Fprintln(v, " alt+p switch to Peers tab")
fmt.Fprintln(v, " alt+w switch to Webseeds tab")
fmt.Fprintln(v, "")
fmt.Fprintln(v, " ctrl+s Start torrent")
fmt.Fprintln(v, "ctrl+alt+s Stop torrent")
fmt.Fprintln(v, " ctrl+R Remove torrent")
fmt.Fprintln(v, "ctrl+alt+a Announce torrent")
fmt.Fprintln(v, " ctrl+v Verify torrent")
fmt.Fprintln(v, " ctrl+a Add new torrent")
return nil
}
func (c *Console) drawAddTorrent(g *gocui.Gui) error {
maxX, maxY := g.Size()
v, err := g.SetView("add-torrent", 5, 2, maxX-6, maxY-3)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Frame = true
v.Title = "Add Torrent (Press ctrl-q to close window)"
v.Editable = true
v.Wrap = true
}
return nil
}
func (c *Console) drawSessionStats(g *gocui.Gui) error {
maxX, maxY := g.Size()
v, err := g.SetView("session-stats", 5, 2, maxX-6, maxY-3)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Frame = true
v.Title = "Session Stats"
fmt.Fprintln(v, "loading...")
} else {
v.Clear()
if c.errSessionStats != nil {
fmt.Fprintln(v, "error:", c.errSessionStats)
return nil
}
FormatSessionStats(&c.sessionStats, v)
}
return nil
}
func getHeader(columns []string) string {
header := ""
for i, column := range columns {
if i != 0 {
header += " "
}
switch column {
case "#":
header += fmt.Sprintf("%3s", column)
case "ID":
header += fmt.Sprintf("%-22s", column)
case "Name":
header += column
case "InfoHash":
header += fmt.Sprintf("%-40s", column)
case "Port":
header += fmt.Sprintf("%5s", column)
case "Status":
header += fmt.Sprintf("%-11s", column)
case "Speed":
header += fmt.Sprintf("%8s", column)
case "ETA":
header += fmt.Sprintf("%8s", column)
case "Progress":
header += fmt.Sprintf("%8s", column)
case "Ratio":
header += fmt.Sprintf("%5s", column)
case "Size":
header += fmt.Sprintf("%8s", column)
default:
panic(fmt.Sprintf("unsupported column %s", column))
}
}
return header
}
func getRow(columns []string, t Torrent, index int) string {
row := ""
for i, column := range columns {
if i != 0 {
row += " "
}
stats := t.Stats
switch column {
case "#":
row += fmt.Sprintf("%3d", index+1)
case "ID":
row += t.ID
case "Name":
row += t.Name
case "InfoHash":
row += t.InfoHash
case "Port":
row += fmt.Sprintf("%5d", t.Port)
case "Status":
if stats == nil {
row += fmt.Sprintf("%-11s", "")
} else {
status := stats.Status
if status == "Downloading Metadata" {
status = "Downloading"
}
row += fmt.Sprintf("%-11s", status)
}
case "Speed":
switch {
case stats == nil:
row += fmt.Sprintf("%8s", "")
case stats.Status == "Seeding":
row += fmt.Sprintf("%6d K", stats.Speed.Upload/1024)
default:
row += fmt.Sprintf("%6d K", stats.Speed.Download/1024)
}
case "ETA":
if stats == nil {
row += fmt.Sprintf("%8s", "")
} else {
row += fmt.Sprintf("%8s", getETA(stats))
}
case "Progress":
if stats == nil {
row += fmt.Sprintf("%8s", "")
} else {
row += fmt.Sprintf("%8d", getProgress(stats))
}
case "Ratio":
if stats == nil {
row += fmt.Sprintf("%5s", "")
} else {
row += fmt.Sprintf("%5.2f", getRatio(stats))
}
case "Size":
if stats == nil {
row += fmt.Sprintf("%8s", "")
} else {
row += fmt.Sprintf("%6d M", stats.Bytes.Total/(1<<20))
}
default:
panic(fmt.Sprintf("unsupported column %s", column))
}
}
return row + "\n"
}
func (c *Console) drawTorrents(g *gocui.Gui) error {
c.m.Lock()
defer c.m.Unlock()
maxX, maxY := g.Size()
halfY := maxY / 2
split := halfY + c.tabAdjust
if split <= 0 {
return nil
}
if v, err := g.SetView("torrents-header", -1, 0, maxX, split); err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Frame = false
fmt.Fprint(v, getHeader(c.columns))
}
if split <= 1 {
return nil
}
if v, err := g.SetView("torrents", -1, 1, maxX, split); err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Frame = false
v.Highlight = true
v.SelBgColor = gocui.ColorGreen
v.SelFgColor = gocui.ColorBlack
v.Title = "Rain"
fmt.Fprintln(v, "loading torrents...")
} else {
v.Clear()
if c.errTorrents != nil {
fmt.Fprintln(v, "error:", c.errTorrents)
return nil
}
selectedIDrow := -1
for i, t := range c.torrents {
fmt.Fprint(v, getRow(c.columns, t, i))
if t.ID == c.selectedID {
selectedIDrow = i
}
}
_, cy := v.Cursor()
_, oy := v.Origin()
selectedRow := cy + oy
if selectedRow < len(c.torrents) {
if c.torrents[selectedRow].ID != c.selectedID && selectedIDrow != -1 {
_ = v.SetCursor(0, selectedIDrow)
} else {
c.setSelectedID(c.torrents[selectedRow].ID)
}
}
}
return nil
}
func (c *Console) drawDetails(g *gocui.Gui) error {
c.m.Lock()
defer c.m.Unlock()
maxX, maxY := g.Size()
halfY := maxY / 2
split := halfY + c.tabAdjust
if v, err := g.SetView("details", -1, split, maxX, maxY); err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Wrap = true
fmt.Fprintln(v, "loading details...")
} else {
v.Clear()
switch c.selectedTab {
case general:
v.Title = "General Info"
case stats:
v.Title = "Stats"
case trackers:
v.Title = "Trackers"
case peers:
v.Title = "Peers"
case webseeds:
v.Title = "WebSeeds"
}
if c.selectedID == "" {
return nil
}
if c.updatingDetails {
fmt.Fprintln(v, "refreshing...")
return nil
}
if c.errDetails != nil {
fmt.Fprintln(v, "error:", c.errDetails)
return nil
}
switch c.selectedTab {
case general:
FormatStats(&c.stats, v)
case stats:
b, err := jsonutil.MarshalCompactPretty(c.stats)
if err != nil {
fmt.Fprintln(v, "error:", err)
} else {
fmt.Fprintln(v, string(b))
}
case trackers:
for i, t := range c.trackers {
fmt.Fprintf(v, "#%d %s\n", i+1, t.URL)
switch t.Status {
case "Not working":
errStr := t.Error
if t.ErrorUnknown {
errStr = errStr + " (" + t.ErrorInternal + ")"
}
fmt.Fprintf(v, " Status: %s, Error: %s\n", t.Status, errStr)
default:
if t.Warning != "" {
fmt.Fprintf(v, " Status: %s, Seeders: %d, Leechers: %d Warning: %s\n", t.Status, t.Seeders, t.Leechers, t.Warning)
} else {
fmt.Fprintf(v, " Status: %s, Seeders: %d, Leechers: %d\n", t.Status, t.Seeders, t.Leechers)
}
}
var nextAnnounce string
if t.NextAnnounce.IsZero() {
nextAnnounce = "Unknown"
} else {
nextAnnounce = t.NextAnnounce.Time.Format(time.RFC3339)
}
fmt.Fprintf(v, " Last announce: %s, Next announce: %s\n", t.LastAnnounce.Time.Format(time.RFC3339), nextAnnounce)
}
case peers:
format := "%2s %21s %7s %8s %6s %s\n"
fmt.Fprintf(v, format, "#", "Addr", "Flags", "Download", "Upload", "Client")
for i, p := range c.peers {
num := fmt.Sprintf("%d", i+1)
var dl string
if p.DownloadSpeed > 0 {
dl = fmt.Sprintf("%d", p.DownloadSpeed/1024)
}
var ul string
if p.UploadSpeed > 0 {
ul = fmt.Sprintf("%d", p.UploadSpeed/1024)
}
fmt.Fprintf(v, format, num, p.Addr, flags(p), dl, ul, p.Client)
}
case webseeds:
format := "%2s %40s %8s %s\n"
fmt.Fprintf(v, format, "#", "URL", "Speed", "Error")
for i, p := range c.webseeds {
num := fmt.Sprintf("%d", i+1)
var dl string
if p.DownloadSpeed > 0 {
dl = fmt.Sprintf("%d", p.DownloadSpeed/1024)
}
var errstr string
if p.Error != "" {
errstr = p.Error
}
fmt.Fprintf(v, format, num, p.URL, dl, errstr)
}
}
}
return nil
}
func (c *Console) updateTorrentsAndDetailsLoop(g *gocui.Gui, stop chan struct{}) {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
c.triggerUpdateTorrents()
for {
select {
case <-ticker.C:
c.triggerUpdateTorrents()
c.triggerUpdateDetails(false)
case <-c.updateTorrentsC:
c.updateTorrents(g)
case <-c.updateDetailsC:
go c.updateDetails(g)
case <-stop:
return
}
}
}
func (c *Console) updateSessionStatsLoop(g *gocui.Gui, stop chan struct{}) {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
c.updateSessionStats(g)
for {
select {
case <-ticker.C:
c.updateSessionStats(g)
case <-stop:
return
}
}
}
func (c *Console) updateTorrents(g *gocui.Gui) {
rpcTorrents, err := c.client.ListTorrents()
sort.Slice(rpcTorrents, func(i, j int) bool {
a, b := rpcTorrents[i], rpcTorrents[j]
if a.AddedAt.Equal(b.AddedAt.Time) {
return a.ID < b.ID
}
return a.AddedAt.Time.Before(b.AddedAt.Time)
})
torrents := make([]Torrent, 0, len(rpcTorrents))
for _, t := range rpcTorrents {
torrents = append(torrents, Torrent{Torrent: t})
}
// Get torrent stats in parallel
if c.needStats {
inside := c.rowsInsideView(g)
var wg sync.WaitGroup
for _, i := range inside {
if i < len(torrents) {
t := &torrents[i]
wg.Add(1)
go func(t *Torrent) {
t.Stats, _ = c.client.GetTorrentStats(t.ID)
wg.Done()
}(t)
}
}
wg.Wait()
}
c.m.Lock()
c.torrents = torrents
c.errTorrents = err
if len(c.torrents) == 0 {
c.setSelectedID("")
} else if c.selectedID == "" {
c.setSelectedID(c.torrents[0].ID)
}
c.m.Unlock()
g.Update(c.drawTorrents)
}
func (c *Console) rowsInsideView(g *gocui.Gui) []int {
c.m.Lock()
defer c.m.Unlock()
v, err := g.View("torrents")
if err != nil {
return nil
}
if c.errTorrents != nil {
return nil
}
_, maxY := g.Size()
halfY := maxY / 2
split := halfY + c.tabAdjust
_, oy := v.Origin()
var ret []int
for i := oy; i < oy+split-2; i++ {
ret = append(ret, i)
}
return ret
}
func (c *Console) updateDetails(g *gocui.Gui) {
c.m.Lock()
selectedID := c.selectedID
c.m.Unlock()
if selectedID == "" {
return
}
switch c.selectedTab {
case general, stats:
stats, err := c.client.GetTorrentStats(selectedID)
c.m.Lock()
c.stats = *stats
c.errDetails = err
c.m.Unlock()
case trackers:
trackers, err := c.client.GetTorrentTrackers(selectedID)
sort.Slice(trackers, func(i, j int) bool { return trackers[i].URL < trackers[j].URL })
c.m.Lock()
c.trackers = trackers
c.errDetails = err
c.m.Unlock()
case peers:
peers, err := c.client.GetTorrentPeers(selectedID)
sort.Slice(peers, func(i, j int) bool {
a, b := peers[i], peers[j]
if a.ConnectedAt.Equal(b.ConnectedAt.Time) {
return a.Addr < b.Addr
}
return a.ConnectedAt.Time.Before(b.ConnectedAt.Time)
})
c.m.Lock()
c.peers = peers
c.errDetails = err
c.m.Unlock()
case webseeds:
webseeds, err := c.client.GetTorrentWebseeds(selectedID)
sort.Slice(webseeds, func(i, j int) bool {
a, b := webseeds[i], webseeds[j]
return a.URL < b.URL
})
c.m.Lock()
c.webseeds = webseeds
c.errDetails = err
c.m.Unlock()
}
c.m.Lock()
defer c.m.Unlock()
c.updatingDetails = false
if selectedID != c.selectedID {
return
}
g.Update(c.drawDetails)
}
func (c *Console) updateSessionStats(g *gocui.Gui) {
stats, err := c.client.GetSessionStats()
c.m.Lock()
defer c.m.Unlock()
c.sessionStats = *stats
c.errSessionStats = err
g.Update(c.drawSessionStats)
}
func (c *Console) quit(g *gocui.Gui, v *gocui.View) error {
c.selectedPage = torrents
return nil
}
func (c *Console) forceQuit(g *gocui.Gui, v *gocui.View) error {
return gocui.ErrQuit
}
func isURI(arg string) bool {
return strings.HasPrefix(arg, "magnet:") || strings.HasPrefix(arg, "http://") || strings.HasPrefix(arg, "https://")
}
func (c *Console) addTorrentHandleEnter(g *gocui.Gui, v *gocui.View) error {
handleError := func(err error) error {
v.Clear()
_ = v.SetCursor(0, 0)
fmt.Fprintln(v, "error:", err)
return nil
}
for _, line := range v.BufferLines() {
line = strings.TrimSpace(line)
if line == "" {
continue
}
var err error
if isURI(line) {
_, err = c.client.AddURI(line, nil)
} else {
var f *os.File
f, err = os.Open(line)
if err != nil {
return handleError(err)
}
_, err = c.client.AddTorrent(f, nil)
_ = f.Close()
}
if err != nil {
return handleError(err)
}
}
v.Clear()
c.selectedPage = torrents
return nil
}
func (c *Console) switchRow(v *gocui.View, row int) error {
switch {
case len(c.torrents) == 0:
return nil
case row < 0:
row = 0
case row >= len(c.torrents):
row = len(c.torrents) - 1
}
_, cy := v.Cursor()
_, oy := v.Origin()
_, height := v.Size()
currentRow := oy + cy
if len(c.torrents) > height {
if row > currentRow {
// sroll down
if row >= oy+height {
// move origin
_ = v.SetOrigin(0, row-height+1)
_ = v.SetCursor(0, height-1)
} else {
_ = v.SetCursor(0, row-oy)
}
} else {
// scroll up
if row < oy {
// move origin
_ = v.SetOrigin(0, row)
_ = v.SetCursor(0, 0)
} else {
_ = v.SetCursor(0, row-oy)
}
}
} else {
_ = v.SetOrigin(0, 0)
_ = v.SetCursor(0, row)
}
c.setSelectedID(c.torrents[row].ID)
return nil
}
func (c *Console) cursorDown(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
defer c.m.Unlock()
_, cy := v.Cursor()
_, oy := v.Origin()
row := cy + oy + 1
if row == len(c.torrents) {
return nil
}
return c.switchRow(v, row)
}
func (c *Console) cursorUp(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
defer c.m.Unlock()
_, cy := v.Cursor()
_, oy := v.Origin()
row := cy + oy - 1
if row == -1 {
return nil
}
return c.switchRow(v, row)
}
func (c *Console) goTop(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
defer c.m.Unlock()
if len(c.torrents) == 0 {
return nil
}
return c.switchRow(v, 0)
}
func (c *Console) goBottom(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
defer c.m.Unlock()
if len(c.torrents) == 0 {
return nil
}
return c.switchRow(v, len(c.torrents)-1)
}
func (c *Console) removeTorrent(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
id := c.selectedID
c.m.Unlock()
err := c.client.RemoveTorrent(id)
if err != nil {
return err
}
c.triggerUpdateTorrents()
return nil
}
func (c *Console) setSelectedID(id string) {
changed := id != c.selectedID
c.selectedID = id
if changed {
c.triggerUpdateDetails(true)
}
}
func (c *Console) startTorrent(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
id := c.selectedID
c.m.Unlock()
err := c.client.StartTorrent(id)
if err != nil {
return err
}
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) stopTorrent(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
id := c.selectedID
c.m.Unlock()
err := c.client.StopTorrent(id)
if err != nil {
return err
}
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) announce(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
id := c.selectedID
c.m.Unlock()
err := c.client.AnnounceTorrent(id)
if err != nil {
return err
}
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) verify(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
id := c.selectedID
c.m.Unlock()
err := c.client.VerifyTorrent(id)
if err != nil {
return err
}
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) tabAdjustDown(g *gocui.Gui, v *gocui.View) error {
_, maxY := g.Size()
halfY := maxY / 2
if c.tabAdjust < halfY-1 {
c.tabAdjust++
}
return nil
}
func (c *Console) tabAdjustUp(g *gocui.Gui, v *gocui.View) error {
_, maxY := g.Size()
halfY := maxY / 2
if c.tabAdjust > -halfY+1 {
c.tabAdjust--
}
return nil
}
func (c *Console) switchGeneral(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
c.selectedTab = general
c.m.Unlock()
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) switchStats(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
c.selectedTab = stats
c.m.Unlock()
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) switchTrackers(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
c.selectedTab = trackers
c.m.Unlock()
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) switchPeers(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
c.selectedTab = peers
c.m.Unlock()
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) switchWebseeds(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
c.selectedTab = webseeds
c.m.Unlock()
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) switchHelp(g *gocui.Gui, v *gocui.View) error {
c.selectedPage = help
return nil
}
func (c *Console) switchSessionStats(g *gocui.Gui, v *gocui.View) error {
c.selectedPage = sessionStats
return nil
}
func (c *Console) switchAddTorrent(g *gocui.Gui, v *gocui.View) error {
c.selectedPage = addTorrent
return nil
}
func (c *Console) triggerUpdateDetails(clear bool) {
if clear {
c.updatingDetails = true
}
select {
case c.updateDetailsC <- struct{}{}:
default:
}
}
func (c *Console) triggerUpdateTorrents() {
select {
case c.updateTorrentsC <- struct{}{}:
default:
}
}
func flags(p rpctypes.Peer) string {
var sb strings.Builder
sb.Grow(6)
if p.ClientInterested {
if p.PeerChoking {
sb.WriteString("d")
} else {
sb.WriteString("D")
}
} else {
if !p.PeerChoking {
sb.WriteString("K")
} else {
sb.WriteString(" ")
}
}
if p.PeerInterested {
if p.ClientChoking {
sb.WriteString("u")
} else {
sb.WriteString("U")
}
} else {
if !p.ClientChoking {
sb.WriteString("?")
} else {
sb.WriteString(" ")
}
}
if p.OptimisticUnchoked {
sb.WriteString("O")
} else {
sb.WriteString(" ")
}
if p.Snubbed {
sb.WriteString("S")
} else {
sb.WriteString(" ")
}
switch p.Source {
case "DHT":
sb.WriteString("H")
case "PEX":
sb.WriteString("X")
case "INCOMING":
sb.WriteString("I")
case "MANUAL":
sb.WriteString("M")
default:
sb.WriteString(" ")
}
switch {
case p.EncryptedStream:
sb.WriteString("E")
case p.EncryptedHandshake:
sb.WriteString("e")
default:
sb.WriteString(" ")
}
return sb.String()
}
func getProgress(stats *rpctypes.Stats) int {
var progress int
if stats.Pieces.Total > 0 {
switch stats.Status {
case "Verifying":
progress = int(stats.Pieces.Checked * 100 / stats.Pieces.Total)
case "Allocating":
progress = int(stats.Bytes.Allocated * 100 / stats.Bytes.Total)
default:
progress = int(stats.Pieces.Have * 100 / stats.Pieces.Total)
}
}
return progress
}
func getRatio(stats *rpctypes.Stats) float64 {
var ratio float64
if stats.Bytes.Downloaded > 0 {
ratio = float64(stats.Bytes.Uploaded) / float64(stats.Bytes.Downloaded)
}
return ratio
}
func getSize(stats *rpctypes.Stats) string {
var size string
switch {
case stats.Bytes.Total < 1<<10:
size = fmt.Sprintf("%d bytes", stats.Bytes.Total)
case stats.Bytes.Total < 1<<20:
size = fmt.Sprintf("%d KiB", stats.Bytes.Total/(1<<10))
default:
size = fmt.Sprintf("%d MiB", stats.Bytes.Total/(1<<20))
}
return size
}
func getDownloadSpeed(stats *rpctypes.Stats) string {
return fmt.Sprintf("%d KiB/s", stats.Speed.Download/1024)
}
func getUploadSpeed(stats *rpctypes.Stats) string {
return fmt.Sprintf("%d KiB/s", stats.Speed.Upload/1024)
}
func getETA(stats *rpctypes.Stats) string {
var eta string
if stats.ETA != -1 {
eta = (time.Duration(stats.ETA) * time.Second).String()
}
return eta
}
// FormatStats returns the human readable representation of torrent stats object.
func FormatStats(stats *rpctypes.Stats, v io.Writer) {
fmt.Fprintf(v, "Name: %s\n", stats.Name)
fmt.Fprintf(v, "Private: %v\n", stats.Private)
status := stats.Status
if status == "Stopped" && stats.Error != "" {
status = status + ": " + stats.Error
}
fmt.Fprintf(v, "Status: %s\n", status)
fmt.Fprintf(v, "Progress: %d\n", getProgress(stats))
fmt.Fprintf(v, "Ratio: %.2f\n", getRatio(stats))
fmt.Fprintf(v, "Size: %s\n", getSize(stats))
fmt.Fprintf(v, "Peers: %d in %d out\n", stats.Peers.Incoming, stats.Peers.Outgoing)
fmt.Fprintf(v, "Download speed: %11s\n", getDownloadSpeed(stats))
fmt.Fprintf(v, "Upload speed: %11s\n", getUploadSpeed(stats))
fmt.Fprintf(v, "ETA: %s\n", getETA(stats))
}
// FormatSessionStats returns the human readable representation of session stats object.
func FormatSessionStats(s *rpctypes.SessionStats, v io.Writer) {
fmt.Fprintf(v, "Torrents: %d, Peers: %d, Uptime: %s\n", s.Torrents, s.Peers, time.Duration(s.Uptime)*time.Second)
fmt.Fprintf(v, "BlocklistRules: %d, Updated: %s ago\n", s.BlockListRules, time.Duration(s.BlockListRecency)*time.Second)
fmt.Fprintf(v, "Reads: %d/s, %dKB/s, Active: %d, Pending: %d\n", s.ReadsPerSecond, s.SpeedRead/1024, s.ReadsActive, s.ReadsPending)
fmt.Fprintf(v, "Writes: %d/s, %dKB/s, Active: %d, Pending: %d\n", s.WritesPerSecond, s.SpeedWrite/1024, s.WritesActive, s.WritesPending)
fmt.Fprintf(v, "ReadCache Objects: %d, Size: %dMB, Utilization: %d%%\n", s.ReadCacheObjects, s.ReadCacheSize/(1<<20), s.ReadCacheUtilization)
fmt.Fprintf(v, "WriteCache Objects: %d, Size: %dMB, PendingKeys: %d\n", s.WriteCacheObjects, s.WriteCacheSize/(1<<20), s.WriteCachePendingKeys)
fmt.Fprintf(v, "DownloadSpeed: %dKB/s, UploadSpeed: %dKB/s\n", s.SpeedDownload/1024, s.SpeedUpload/1024)
}
Small console improvements
This change brings small usability improvements to the console.
package console
import (
"fmt"
"io"
"os"
"sort"
"strings"
"sync"
"time"
"github.com/cenkalti/rain/internal/jsonutil"
"github.com/cenkalti/rain/internal/rpctypes"
"github.com/cenkalti/rain/rainrpc"
"github.com/jroimartin/gocui"
)
const (
// pages
torrents int = iota
sessionStats
addTorrent
help
)
const (
// tabs
general int = iota
stats
trackers
peers
webseeds
)
// Console is for drawing a text user interface for a remote Session.
type Console struct {
client *rainrpc.Client
columns []string
needStats bool
// protects global state in client
m sync.Mutex
// error from listing torrents rpc call
errTorrents error
// error from getting stats/trackers/peers/etc...
errDetails error
// error from getting session stats
errSessionStats error
// id of currently selected torrent
selectedID string
// selected detail tab
selectedTab int
// selected global page
selectedPage int
// distance Y from 0,0
tabAdjust int
// fields to hold responsed from rpc requests
torrents []Torrent
stats rpctypes.Stats
sessionStats rpctypes.SessionStats
trackers []rpctypes.Tracker
peers []rpctypes.Peer
webseeds []rpctypes.Webseed
// whether details tab is currently updating state
updatingDetails bool
// channels for triggering refresh after view update / key events
updateTorrentsC chan struct{}
updateDetailsC chan struct{}
// state for updater goroutine for updating torrents list and details tab
stopUpdatingTorrentsC chan struct{}
updatingTorrents bool
// state for updater goroutine for updating session-stats page
stopUpdatingSessionStatsC chan struct{}
updatingSessionStats bool
}
type Torrent struct {
rpctypes.Torrent
Stats *rpctypes.Stats
}
// New returns a new Console object that uses a RPC client to get information from a torrent.Session.
func New(clt *rainrpc.Client, columns []string) *Console {
return &Console{
client: clt,
columns: columns,
needStats: columnsNeedStats(columns),
updateTorrentsC: make(chan struct{}, 1),
updateDetailsC: make(chan struct{}, 1),
}
}
func columnsNeedStats(columns []string) bool {
l := []string{"ID", "Name", "InfoHash", "Port"}
for _, c := range columns {
for _, d := range l {
if c != d {
return true
}
}
}
return false
}
// Run the UI loop.
func (c *Console) Run() error {
g, err := gocui.NewGui(gocui.OutputNormal)
if err != nil {
return err
}
defer g.Close()
g.SetManagerFunc(c.layout)
c.keybindings(g)
err = g.MainLoop()
if err == gocui.ErrQuit {
err = nil
}
return err
}
func (c *Console) keybindings(g *gocui.Gui) {
// Global keys
_ = g.SetKeybinding("", gocui.KeyCtrlC, gocui.ModNone, c.forceQuit)
// Quit keys
_ = g.SetKeybinding("torrents", 'q', gocui.ModNone, c.forceQuit)
_ = g.SetKeybinding("help", 'q', gocui.ModNone, c.quit)
_ = g.SetKeybinding("session-stats", 'q', gocui.ModNone, c.quit)
_ = g.SetKeybinding("add-torrent", gocui.KeyCtrlQ, gocui.ModNone, c.quit)
// Navigation
_ = g.SetKeybinding("torrents", 'j', gocui.ModNone, c.cursorDown)
_ = g.SetKeybinding("torrents", gocui.KeyArrowDown, gocui.ModNone, c.cursorDown)
_ = g.SetKeybinding("torrents", 'k', gocui.ModNone, c.cursorUp)
_ = g.SetKeybinding("torrents", gocui.KeyArrowUp, gocui.ModNone, c.cursorUp)
_ = g.SetKeybinding("torrents", 'j', gocui.ModAlt, c.tabAdjustDown)
_ = g.SetKeybinding("torrents", 'k', gocui.ModAlt, c.tabAdjustUp)
_ = g.SetKeybinding("torrents", 'g', gocui.ModNone, c.goTop)
_ = g.SetKeybinding("torrents", gocui.KeyHome, gocui.ModNone, c.goTop)
_ = g.SetKeybinding("torrents", 'G', gocui.ModNone, c.goBottom)
_ = g.SetKeybinding("torrents", gocui.KeyEnd, gocui.ModNone, c.goBottom)
_ = g.SetKeybinding("torrents", 'a', gocui.ModAlt, c.switchSessionStats)
_ = g.SetKeybinding("torrents", '?', gocui.ModNone, c.switchHelp)
// Tabs
_ = g.SetKeybinding("torrents", 'g', gocui.ModAlt, c.switchGeneral)
_ = g.SetKeybinding("torrents", 's', gocui.ModAlt, c.switchStats)
_ = g.SetKeybinding("torrents", 't', gocui.ModAlt, c.switchTrackers)
_ = g.SetKeybinding("torrents", 'p', gocui.ModAlt, c.switchPeers)
_ = g.SetKeybinding("torrents", 'w', gocui.ModAlt, c.switchWebseeds)
// Torrent control
_ = g.SetKeybinding("torrents", gocui.KeyCtrlS, gocui.ModNone, c.startTorrent)
_ = g.SetKeybinding("torrents", gocui.KeyCtrlS, gocui.ModAlt, c.stopTorrent)
_ = g.SetKeybinding("torrents", gocui.KeyCtrlR, gocui.ModNone, c.removeTorrent)
_ = g.SetKeybinding("torrents", gocui.KeyCtrlA, gocui.ModAlt, c.announce)
_ = g.SetKeybinding("torrents", gocui.KeyCtrlV, gocui.ModNone, c.verify)
_ = g.SetKeybinding("torrents", gocui.KeyCtrlA, gocui.ModNone, c.switchAddTorrent)
_ = g.SetKeybinding("add-torrent", gocui.KeyEnter, gocui.ModNone, c.addTorrentHandleEnter)
}
func (c *Console) startUpdatingTorrents(g *gocui.Gui) {
if c.updatingTorrents {
return
}
c.updatingTorrents = true
c.stopUpdatingTorrentsC = make(chan struct{})
go c.updateTorrentsAndDetailsLoop(g, c.stopUpdatingTorrentsC)
}
func (c *Console) stopUpdatingTorrents() {
if !c.updatingTorrents {
return
}
c.updatingTorrents = false
close(c.stopUpdatingTorrentsC)
}
func (c *Console) startUpdatingSessionStats(g *gocui.Gui) {
if c.updatingSessionStats {
return
}
c.updatingSessionStats = true
c.stopUpdatingSessionStatsC = make(chan struct{})
go c.updateSessionStatsLoop(g, c.stopUpdatingSessionStatsC)
}
func (c *Console) stopUpdatingSessionStats() {
if !c.updatingSessionStats {
return
}
c.updatingSessionStats = false
close(c.stopUpdatingSessionStatsC)
}
func (c *Console) layout(g *gocui.Gui) error {
err := c.drawTitle(g)
if err != nil {
return err
}
if c.selectedPage == torrents {
c.startUpdatingTorrents(g)
} else {
c.stopUpdatingTorrents()
}
if c.selectedPage == sessionStats {
c.startUpdatingSessionStats(g)
} else {
c.stopUpdatingSessionStats()
_ = g.DeleteView("session-stats")
}
if c.selectedPage != help {
_ = g.DeleteView("help")
}
if c.selectedPage != addTorrent {
_ = g.DeleteView("add-torrent")
g.Cursor = false
}
switch c.selectedPage {
case torrents:
err = c.drawTorrents(g)
if err != nil {
return err
}
err = c.drawDetails(g)
if err != nil {
return err
}
_, err = g.SetCurrentView("torrents")
case sessionStats:
err = c.drawSessionStats(g)
if err != nil {
return err
}
_, err = g.SetCurrentView("session-stats")
case help:
err = c.drawHelp(g)
if err != nil {
return err
}
_, err = g.SetCurrentView("help")
case addTorrent:
err = c.drawAddTorrent(g)
if err != nil {
return err
}
g.Cursor = true
_, err = g.SetCurrentView("add-torrent")
}
return err
}
func (c *Console) drawTitle(g *gocui.Gui) error {
maxX, maxY := g.Size()
v, err := g.SetView("title", -1, 0, maxX, maxY)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Title = "Rain by put.io [" + c.client.Addr() + "] (Press '?' for help)"
}
return nil
}
func (c *Console) drawHelp(g *gocui.Gui) error {
maxX, maxY := g.Size()
v, err := g.SetView("help", 5, 2, maxX-6, maxY-3)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Frame = true
v.Title = "Help"
} else {
v.Clear()
}
fmt.Fprintln(v, " q Quit")
fmt.Fprintln(v, " j|down move down")
fmt.Fprintln(v, " k|up move up")
fmt.Fprintln(v, " alt+j move tab separator down")
fmt.Fprintln(v, " alt+k move tab separator up")
fmt.Fprintln(v, " g|home Go to top")
fmt.Fprintln(v, " G|end Go to bottom")
fmt.Fprintln(v, " alt+a show session stats page")
fmt.Fprintln(v, "")
fmt.Fprintln(v, " alt+g switch to General info tab")
fmt.Fprintln(v, " alt+s switch to Stats tab")
fmt.Fprintln(v, " alt+t switch to Trackers tab")
fmt.Fprintln(v, " alt+p switch to Peers tab")
fmt.Fprintln(v, " alt+w switch to Webseeds tab")
fmt.Fprintln(v, "")
fmt.Fprintln(v, " ctrl+s Start torrent")
fmt.Fprintln(v, "ctrl+alt+s Stop torrent")
fmt.Fprintln(v, " ctrl+R Remove torrent")
fmt.Fprintln(v, "ctrl+alt+a Announce torrent")
fmt.Fprintln(v, " ctrl+v Verify torrent")
fmt.Fprintln(v, " ctrl+a Add new torrent")
return nil
}
func (c *Console) drawAddTorrent(g *gocui.Gui) error {
maxX, maxY := g.Size()
v, err := g.SetView("add-torrent", 5, 2, maxX-6, maxY-3)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Frame = true
v.Title = "Add Torrent (Press ctrl-q to close window)"
v.Editable = true
v.Wrap = true
}
return nil
}
func (c *Console) drawSessionStats(g *gocui.Gui) error {
maxX, maxY := g.Size()
v, err := g.SetView("session-stats", 5, 2, maxX-6, maxY-3)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Frame = true
v.Title = "Session Stats"
fmt.Fprintln(v, "loading...")
} else {
v.Clear()
if c.errSessionStats != nil {
fmt.Fprintln(v, "error:", c.errSessionStats)
return nil
}
FormatSessionStats(&c.sessionStats, v)
}
return nil
}
func getHeader(columns []string) string {
header := ""
for i, column := range columns {
if i != 0 {
header += " "
}
switch column {
case "#":
header += fmt.Sprintf("%3s", column)
case "ID":
header += fmt.Sprintf("%-22s", column)
case "Name":
header += column
case "InfoHash":
header += fmt.Sprintf("%-40s", column)
case "Port":
header += fmt.Sprintf("%5s", column)
case "Status":
header += fmt.Sprintf("%-11s", column)
case "Speed":
header += fmt.Sprintf("%8s", column)
case "ETA":
header += fmt.Sprintf("%8s", column)
case "Progress":
header += fmt.Sprintf("%8s", column)
case "Ratio":
header += fmt.Sprintf("%5s", column)
case "Size":
header += fmt.Sprintf("%8s", column)
default:
panic(fmt.Sprintf("unsupported column %s", column))
}
}
return header
}
func getRow(columns []string, t Torrent, index int) string {
row := ""
for i, column := range columns {
if i != 0 {
row += " "
}
stats := t.Stats
switch column {
case "#":
row += fmt.Sprintf("%3d", index+1)
case "ID":
row += t.ID
case "Name":
row += t.Name
case "InfoHash":
row += t.InfoHash
case "Port":
row += fmt.Sprintf("%5d", t.Port)
case "Status":
if stats == nil {
row += fmt.Sprintf("%-11s", "")
} else {
status := stats.Status
if status == "Downloading Metadata" {
status = "Downloading"
}
row += fmt.Sprintf("%-11s", status)
}
case "Speed":
switch {
case stats == nil:
row += fmt.Sprintf("%8s", "")
case stats.Status == "Seeding":
row += fmt.Sprintf("%6d K", stats.Speed.Upload/1024)
default:
row += fmt.Sprintf("%6d K", stats.Speed.Download/1024)
}
case "ETA":
if stats == nil {
row += fmt.Sprintf("%8s", "")
} else {
row += fmt.Sprintf("%8s", getETA(stats))
}
case "Progress":
if stats == nil {
row += fmt.Sprintf("%8s", "")
} else {
row += fmt.Sprintf("%8d", getProgress(stats))
}
case "Ratio":
if stats == nil {
row += fmt.Sprintf("%5s", "")
} else {
row += fmt.Sprintf("%5.2f", getRatio(stats))
}
case "Size":
if stats == nil {
row += fmt.Sprintf("%8s", "")
} else {
row += fmt.Sprintf("%6d M", stats.Bytes.Total/(1<<20))
}
default:
panic(fmt.Sprintf("unsupported column %s", column))
}
}
return row + "\n"
}
func (c *Console) drawTorrents(g *gocui.Gui) error {
c.m.Lock()
defer c.m.Unlock()
maxX, maxY := g.Size()
halfY := maxY / 2
split := halfY + c.tabAdjust
if split <= 0 {
return nil
}
if v, err := g.SetView("torrents-header", -1, 0, maxX, split); err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Frame = false
fmt.Fprint(v, getHeader(c.columns))
}
if split <= 1 {
return nil
}
if v, err := g.SetView("torrents", -1, 1, maxX, split); err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Frame = false
v.Highlight = true
v.SelBgColor = gocui.ColorGreen
v.SelFgColor = gocui.ColorBlack
v.Title = "Rain"
fmt.Fprintln(v, "loading torrents...")
} else {
v.Clear()
if c.errTorrents != nil {
fmt.Fprintln(v, "error:", c.errTorrents)
return nil
}
selectedIDrow := -1
for i, t := range c.torrents {
fmt.Fprint(v, getRow(c.columns, t, i))
if t.ID == c.selectedID {
selectedIDrow = i
}
}
_, cy := v.Cursor()
_, oy := v.Origin()
selectedRow := cy + oy
if selectedRow < len(c.torrents) {
if c.torrents[selectedRow].ID != c.selectedID && selectedIDrow != -1 {
_ = v.SetCursor(0, selectedIDrow)
} else {
c.setSelectedID(c.torrents[selectedRow].ID)
}
}
}
return nil
}
func (c *Console) drawDetails(g *gocui.Gui) error {
c.m.Lock()
defer c.m.Unlock()
maxX, maxY := g.Size()
halfY := maxY / 2
split := halfY + c.tabAdjust
if v, err := g.SetView("details", -1, split, maxX, maxY); err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Wrap = true
fmt.Fprintln(v, "loading details...")
} else {
v.Clear()
switch c.selectedTab {
case general:
v.Title = "General Info"
case stats:
v.Title = "Stats"
case trackers:
v.Title = "Trackers"
case peers:
v.Title = "Peers"
case webseeds:
v.Title = "WebSeeds"
}
if c.selectedID == "" {
return nil
}
if c.updatingDetails {
fmt.Fprintln(v, "refreshing...")
return nil
}
if c.errDetails != nil {
fmt.Fprintln(v, "error:", c.errDetails)
return nil
}
switch c.selectedTab {
case general:
FormatStats(&c.stats, v)
case stats:
b, err := jsonutil.MarshalCompactPretty(c.stats)
if err != nil {
fmt.Fprintln(v, "error:", err)
} else {
fmt.Fprintln(v, string(b))
}
case trackers:
for i, t := range c.trackers {
fmt.Fprintf(v, "#%d %s\n", i+1, t.URL)
switch t.Status {
case "Not working":
errStr := t.Error
if t.ErrorUnknown {
errStr = errStr + " (" + t.ErrorInternal + ")"
}
fmt.Fprintf(v, " Status: %s, Error: %s\n", t.Status, errStr)
default:
if t.Warning != "" {
fmt.Fprintf(v, " Status: %s, Seeders: %d, Leechers: %d Warning: %s\n", t.Status, t.Seeders, t.Leechers, t.Warning)
} else {
fmt.Fprintf(v, " Status: %s, Seeders: %d, Leechers: %d\n", t.Status, t.Seeders, t.Leechers)
}
}
var nextAnnounce string
if t.NextAnnounce.IsZero() {
nextAnnounce = "Unknown"
} else {
nextAnnounce = t.NextAnnounce.Time.Format(time.RFC3339)
}
fmt.Fprintf(v, " Last announce: %s, Next announce: %s\n", t.LastAnnounce.Time.Format(time.RFC3339), nextAnnounce)
}
case peers:
format := "%2s %21s %7s %8s %6s %s\n"
fmt.Fprintf(v, format, "#", "Addr", "Flags", "Download", "Upload", "Client")
for i, p := range c.peers {
num := fmt.Sprintf("%d", i+1)
var dl string
if p.DownloadSpeed > 0 {
dl = fmt.Sprintf("%d", p.DownloadSpeed/1024)
}
var ul string
if p.UploadSpeed > 0 {
ul = fmt.Sprintf("%d", p.UploadSpeed/1024)
}
fmt.Fprintf(v, format, num, p.Addr, flags(p), dl, ul, p.Client)
}
case webseeds:
format := "%2s %40s %8s %s\n"
fmt.Fprintf(v, format, "#", "URL", "Speed", "Error")
for i, p := range c.webseeds {
num := fmt.Sprintf("%d", i+1)
var dl string
if p.DownloadSpeed > 0 {
dl = fmt.Sprintf("%d", p.DownloadSpeed/1024)
}
var errstr string
if p.Error != "" {
errstr = p.Error
}
fmt.Fprintf(v, format, num, p.URL, dl, errstr)
}
}
}
return nil
}
func (c *Console) updateTorrentsAndDetailsLoop(g *gocui.Gui, stop chan struct{}) {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
c.triggerUpdateTorrents()
for {
select {
case <-ticker.C:
c.triggerUpdateTorrents()
c.triggerUpdateDetails(false)
case <-c.updateTorrentsC:
c.updateTorrents(g)
case <-c.updateDetailsC:
go c.updateDetails(g)
case <-stop:
return
}
}
}
func (c *Console) updateSessionStatsLoop(g *gocui.Gui, stop chan struct{}) {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
c.updateSessionStats(g)
for {
select {
case <-ticker.C:
c.updateSessionStats(g)
case <-stop:
return
}
}
}
func (c *Console) updateTorrents(g *gocui.Gui) {
rpcTorrents, err := c.client.ListTorrents()
sort.Slice(rpcTorrents, func(i, j int) bool {
a, b := rpcTorrents[i], rpcTorrents[j]
if a.AddedAt.Equal(b.AddedAt.Time) {
return a.ID < b.ID
}
return a.AddedAt.Time.Before(b.AddedAt.Time)
})
torrents := make([]Torrent, 0, len(rpcTorrents))
for _, t := range rpcTorrents {
torrents = append(torrents, Torrent{Torrent: t})
}
// Get torrent stats in parallel
if c.needStats {
inside := c.rowsInsideView(g)
var wg sync.WaitGroup
for _, i := range inside {
if i < len(torrents) {
t := &torrents[i]
wg.Add(1)
go func(t *Torrent) {
t.Stats, _ = c.client.GetTorrentStats(t.ID)
wg.Done()
}(t)
}
}
wg.Wait()
}
c.m.Lock()
c.torrents = torrents
c.errTorrents = err
if len(c.torrents) == 0 {
c.setSelectedID("")
} else if c.selectedID == "" {
c.setSelectedID(c.torrents[0].ID)
}
c.m.Unlock()
g.Update(c.drawTorrents)
}
func (c *Console) rowsInsideView(g *gocui.Gui) []int {
c.m.Lock()
defer c.m.Unlock()
v, err := g.View("torrents")
if err != nil {
return nil
}
if c.errTorrents != nil {
return nil
}
_, maxY := g.Size()
halfY := maxY / 2
split := halfY + c.tabAdjust
_, oy := v.Origin()
var ret []int
for i := oy; i < oy+split-2; i++ {
ret = append(ret, i)
}
return ret
}
func (c *Console) updateDetails(g *gocui.Gui) {
c.m.Lock()
selectedID := c.selectedID
c.m.Unlock()
if selectedID == "" {
return
}
switch c.selectedTab {
case general, stats:
stats, err := c.client.GetTorrentStats(selectedID)
c.m.Lock()
c.stats = *stats
c.errDetails = err
c.m.Unlock()
case trackers:
trackers, err := c.client.GetTorrentTrackers(selectedID)
sort.Slice(trackers, func(i, j int) bool { return trackers[i].URL < trackers[j].URL })
c.m.Lock()
c.trackers = trackers
c.errDetails = err
c.m.Unlock()
case peers:
peers, err := c.client.GetTorrentPeers(selectedID)
sort.Slice(peers, func(i, j int) bool {
a, b := peers[i], peers[j]
if a.ConnectedAt.Equal(b.ConnectedAt.Time) {
return a.Addr < b.Addr
}
return a.ConnectedAt.Time.Before(b.ConnectedAt.Time)
})
c.m.Lock()
c.peers = peers
c.errDetails = err
c.m.Unlock()
case webseeds:
webseeds, err := c.client.GetTorrentWebseeds(selectedID)
sort.Slice(webseeds, func(i, j int) bool {
a, b := webseeds[i], webseeds[j]
return a.URL < b.URL
})
c.m.Lock()
c.webseeds = webseeds
c.errDetails = err
c.m.Unlock()
}
c.m.Lock()
defer c.m.Unlock()
c.updatingDetails = false
if selectedID != c.selectedID {
return
}
g.Update(c.drawDetails)
}
func (c *Console) updateSessionStats(g *gocui.Gui) {
stats, err := c.client.GetSessionStats()
c.m.Lock()
defer c.m.Unlock()
c.sessionStats = *stats
c.errSessionStats = err
g.Update(c.drawSessionStats)
}
func (c *Console) quit(g *gocui.Gui, v *gocui.View) error {
c.selectedPage = torrents
return nil
}
func (c *Console) forceQuit(g *gocui.Gui, v *gocui.View) error {
return gocui.ErrQuit
}
func isURI(arg string) bool {
return strings.HasPrefix(arg, "magnet:") || strings.HasPrefix(arg, "http://") || strings.HasPrefix(arg, "https://")
}
func (c *Console) addTorrentHandleEnter(g *gocui.Gui, v *gocui.View) error {
handleError := func(err error) error {
v.Clear()
_ = v.SetCursor(0, 0)
fmt.Fprintln(v, "error:", err)
return nil
}
for _, line := range v.BufferLines() {
line = strings.TrimSpace(line)
if line == "" {
continue
}
var err error
if isURI(line) {
_, err = c.client.AddURI(line, nil)
} else {
var f *os.File
f, err = os.Open(line)
if err != nil {
return handleError(err)
}
_, err = c.client.AddTorrent(f, nil)
_ = f.Close()
}
if err != nil {
return handleError(err)
}
}
v.Clear()
c.selectedPage = torrents
return nil
}
func (c *Console) switchRow(v *gocui.View, row int) error {
switch {
case len(c.torrents) == 0:
return nil
case row < 0:
row = 0
case row >= len(c.torrents):
row = len(c.torrents) - 1
}
_, cy := v.Cursor()
_, oy := v.Origin()
_, height := v.Size()
currentRow := oy + cy
if len(c.torrents) > height {
if row > currentRow {
// scroll down
if row >= oy+height {
// move origin
_ = v.SetOrigin(0, row-height+1)
_ = v.SetCursor(0, height-1)
} else {
_ = v.SetCursor(0, row-oy)
}
} else {
// scroll up
if row < oy {
// move origin
_ = v.SetOrigin(0, row)
_ = v.SetCursor(0, 0)
} else {
_ = v.SetCursor(0, row-oy)
}
}
} else {
_ = v.SetOrigin(0, 0)
_ = v.SetCursor(0, row)
}
c.setSelectedID(c.torrents[row].ID)
return nil
}
func (c *Console) cursorDown(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
defer c.m.Unlock()
_, cy := v.Cursor()
_, oy := v.Origin()
row := cy + oy + 1
if row == len(c.torrents) {
return nil
}
return c.switchRow(v, row)
}
func (c *Console) cursorUp(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
defer c.m.Unlock()
_, cy := v.Cursor()
_, oy := v.Origin()
row := cy + oy - 1
if row == -1 {
return nil
}
return c.switchRow(v, row)
}
func (c *Console) goTop(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
defer c.m.Unlock()
if len(c.torrents) == 0 {
return nil
}
return c.switchRow(v, 0)
}
func (c *Console) goBottom(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
defer c.m.Unlock()
if len(c.torrents) == 0 {
return nil
}
return c.switchRow(v, len(c.torrents)-1)
}
func (c *Console) removeTorrent(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
id := c.selectedID
c.m.Unlock()
err := c.client.RemoveTorrent(id)
if err != nil {
return err
}
c.triggerUpdateTorrents()
return nil
}
func (c *Console) setSelectedID(id string) {
changed := id != c.selectedID
c.selectedID = id
if changed {
c.triggerUpdateDetails(true)
}
}
func (c *Console) startTorrent(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
id := c.selectedID
c.m.Unlock()
err := c.client.StartTorrent(id)
if err != nil {
return err
}
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) stopTorrent(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
id := c.selectedID
c.m.Unlock()
err := c.client.StopTorrent(id)
if err != nil {
return err
}
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) announce(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
id := c.selectedID
c.m.Unlock()
err := c.client.AnnounceTorrent(id)
if err != nil {
return err
}
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) verify(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
id := c.selectedID
c.m.Unlock()
err := c.client.VerifyTorrent(id)
if err != nil {
return err
}
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) tabAdjustDown(g *gocui.Gui, v *gocui.View) error {
_, maxY := g.Size()
halfY := maxY / 2
if c.tabAdjust < halfY-1 {
c.tabAdjust++
}
return nil
}
func (c *Console) tabAdjustUp(g *gocui.Gui, v *gocui.View) error {
_, maxY := g.Size()
halfY := maxY / 2
if c.tabAdjust > -halfY+1 {
c.tabAdjust--
}
return nil
}
func (c *Console) switchGeneral(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
c.selectedTab = general
c.m.Unlock()
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) switchStats(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
c.selectedTab = stats
c.m.Unlock()
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) switchTrackers(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
c.selectedTab = trackers
c.m.Unlock()
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) switchPeers(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
c.selectedTab = peers
c.m.Unlock()
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) switchWebseeds(g *gocui.Gui, v *gocui.View) error {
c.m.Lock()
c.selectedTab = webseeds
c.m.Unlock()
c.triggerUpdateDetails(true)
return nil
}
func (c *Console) switchHelp(g *gocui.Gui, v *gocui.View) error {
c.selectedPage = help
return nil
}
func (c *Console) switchSessionStats(g *gocui.Gui, v *gocui.View) error {
c.selectedPage = sessionStats
return nil
}
func (c *Console) switchAddTorrent(g *gocui.Gui, v *gocui.View) error {
c.selectedPage = addTorrent
return nil
}
func (c *Console) triggerUpdateDetails(clear bool) {
if clear {
c.updatingDetails = true
}
select {
case c.updateDetailsC <- struct{}{}:
default:
}
}
func (c *Console) triggerUpdateTorrents() {
select {
case c.updateTorrentsC <- struct{}{}:
default:
}
}
func flags(p rpctypes.Peer) string {
var sb strings.Builder
sb.Grow(6)
if p.ClientInterested {
if p.PeerChoking {
sb.WriteString("d")
} else {
sb.WriteString("D")
}
} else {
if !p.PeerChoking {
sb.WriteString("K")
} else {
sb.WriteString(" ")
}
}
if p.PeerInterested {
if p.ClientChoking {
sb.WriteString("u")
} else {
sb.WriteString("U")
}
} else {
if !p.ClientChoking {
sb.WriteString("?")
} else {
sb.WriteString(" ")
}
}
if p.OptimisticUnchoked {
sb.WriteString("O")
} else {
sb.WriteString(" ")
}
if p.Snubbed {
sb.WriteString("S")
} else {
sb.WriteString(" ")
}
switch p.Source {
case "DHT":
sb.WriteString("H")
case "PEX":
sb.WriteString("X")
case "INCOMING":
sb.WriteString("I")
case "MANUAL":
sb.WriteString("M")
default:
sb.WriteString(" ")
}
switch {
case p.EncryptedStream:
sb.WriteString("E")
case p.EncryptedHandshake:
sb.WriteString("e")
default:
sb.WriteString(" ")
}
return sb.String()
}
func getProgress(stats *rpctypes.Stats) int {
var progress int
if stats.Pieces.Total > 0 {
switch stats.Status {
case "Verifying":
progress = int(stats.Pieces.Checked * 100 / stats.Pieces.Total)
case "Allocating":
progress = int(stats.Bytes.Allocated * 100 / stats.Bytes.Total)
default:
progress = int(stats.Pieces.Have * 100 / stats.Pieces.Total)
}
}
return progress
}
func getRatio(stats *rpctypes.Stats) float64 {
var ratio float64
if stats.Bytes.Downloaded > 0 {
ratio = float64(stats.Bytes.Uploaded) / float64(stats.Bytes.Downloaded)
}
return ratio
}
func getSize(stats *rpctypes.Stats) string {
var size string
switch {
case stats.Bytes.Total < 1<<10:
size = fmt.Sprintf("%d bytes", stats.Bytes.Total)
case stats.Bytes.Total < 1<<20:
size = fmt.Sprintf("%d KiB", stats.Bytes.Total/(1<<10))
default:
size = fmt.Sprintf("%d MiB", stats.Bytes.Total/(1<<20))
}
return size
}
func getDownloadSpeed(stats *rpctypes.Stats) string {
return fmt.Sprintf("%d KiB/s", stats.Speed.Download/1024)
}
func getUploadSpeed(stats *rpctypes.Stats) string {
return fmt.Sprintf("%d KiB/s", stats.Speed.Upload/1024)
}
func getETA(stats *rpctypes.Stats) string {
var eta string
if stats.ETA != -1 {
eta = (time.Duration(stats.ETA) * time.Second).String()
}
return eta
}
// FormatStats returns the human readable representation of torrent stats object.
func FormatStats(stats *rpctypes.Stats, v io.Writer) {
fmt.Fprintf(v, "Name: %s\n", stats.Name)
fmt.Fprintf(v, "Private: %v\n", stats.Private)
status := stats.Status
if status == "Stopped" && stats.Error != "" {
status = status + ": " + stats.Error
}
fmt.Fprintf(v, "Status: %s\n", status)
fmt.Fprintf(v, "Progress: %d%%\n", getProgress(stats))
fmt.Fprintf(v, "Ratio: %.2f\n", getRatio(stats))
fmt.Fprintf(v, "Size: %s\n", getSize(stats))
fmt.Fprintf(v, "Peers: %d in / %d out\n", stats.Peers.Incoming, stats.Peers.Outgoing)
fmt.Fprintf(v, "Download speed: %11s\n", getDownloadSpeed(stats))
fmt.Fprintf(v, "Upload speed: %11s\n", getUploadSpeed(stats))
fmt.Fprintf(v, "ETA: %s\n", getETA(stats))
}
// FormatSessionStats returns the human readable representation of session stats object.
func FormatSessionStats(s *rpctypes.SessionStats, v io.Writer) {
fmt.Fprintf(v, "Torrents: %d, Peers: %d, Uptime: %s\n", s.Torrents, s.Peers, time.Duration(s.Uptime)*time.Second)
fmt.Fprintf(v, "BlocklistRules: %d, Updated: %s ago\n", s.BlockListRules, time.Duration(s.BlockListRecency)*time.Second)
fmt.Fprintf(v, "Reads: %d/s, %dKB/s, Active: %d, Pending: %d\n", s.ReadsPerSecond, s.SpeedRead/1024, s.ReadsActive, s.ReadsPending)
fmt.Fprintf(v, "Writes: %d/s, %dKB/s, Active: %d, Pending: %d\n", s.WritesPerSecond, s.SpeedWrite/1024, s.WritesActive, s.WritesPending)
fmt.Fprintf(v, "ReadCache Objects: %d, Size: %dMB, Utilization: %d%%\n", s.ReadCacheObjects, s.ReadCacheSize/(1<<20), s.ReadCacheUtilization)
fmt.Fprintf(v, "WriteCache Objects: %d, Size: %dMB, PendingKeys: %d\n", s.WriteCacheObjects, s.WriteCacheSize/(1<<20), s.WriteCachePendingKeys)
fmt.Fprintf(v, "DownloadSpeed: %dKB/s, UploadSpeed: %dKB/s\n", s.SpeedDownload/1024, s.SpeedUpload/1024)
}
|
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package derrors defines internal error values to categorize the different
// types error semantics we support.
package derrors
import (
"errors"
"fmt"
"net/http"
)
//lint:file-ignore ST1012 prefixing error values with Err would stutter
var (
// HasIncompletePackages indicates a module containing packages that
// were processed with a 60x error code.
HasIncompletePackages = errors.New("has incomplete packages")
// NotFound indicates that a requested entity was not found (HTTP 404).
NotFound = errors.New("not found")
// InvalidArgument indicates that the input into the request is invalid in
// some way (HTTP 400).
InvalidArgument = errors.New("invalid argument")
// BadModule indicates a problem with a module.
BadModule = errors.New("bad module")
// Excluded indicates that the module is excluded. (See internal/postgres/excluded.go.)
Excluded = errors.New("excluded")
// AlternativeModule indicates that the path of the module zip file differs
// from the path specified in the go.mod file.
AlternativeModule = errors.New("alternative module")
// Unknown indicates that the error has unknown semantics.
Unknown = errors.New("unknown")
// ProxyTimedOut indicates that a request timed out when fetching from the Module Mirror.
ProxyTimedOut = errors.New("proxy timed out")
// PackageBuildContextNotSupported indicates that the build context for the
// package is not supported.
PackageBuildContextNotSupported = errors.New("package build context not supported")
// PackageMaxImportsLimitExceeded indicates that the package has too many
// imports.
PackageMaxImportsLimitExceeded = errors.New("package max imports limit exceeded")
// PackageMaxFileSizeLimitExceeded indicates that the package contains a file
// that exceeds fetch.MaxFileSize.
PackageMaxFileSizeLimitExceeded = errors.New("package max file size limit exceeded")
// PackageDocumentationHTMLTooLarge indicates that the rendered documentation
// HTML size exceeded the specified limit for dochtml.RenderOptions.
PackageDocumentationHTMLTooLarge = errors.New("package documentation HTML is too large")
// PackageBadImportPath represents an error loading a package because its
// contents do not make up a valid package. This can happen, for
// example, if the .go files fail to parse or declare different package
// names.
// Go files were found in a directory, but the resulting import path is invalid.
PackageBadImportPath = errors.New("package bad import path")
// PackageInvalidContents represents an error loading a package because
// its contents do not make up a valid package. This can happen, for
// example, if the .go files fail to parse or declare different package
// names.
PackageInvalidContents = errors.New("package invalid contents")
// DBModuleInsertInvalid represents a module that was successfully
// fetched but could not be inserted due to invalid arguments to
// postgres.InsertModule.
DBModuleInsertInvalid = errors.New("db module insert invalid")
// ReprocessStatusOK indicates that the module to be reprocessed
// previously had a status of http.StatusOK.
ReprocessStatusOK = errors.New("reprocess status ok")
// ReprocessHasIncompletePackages indicates that the module to be reprocessed
// previously had a status of 290.
ReprocessHasIncompletePackages = errors.New("reprocess has incomplete packages")
// ReprocessBadModule indicates that the module to be reprocessed
// previously had a status of derrors.BadModule.
ReprocessBadModule = errors.New("reprocess bad module")
// ReprocessAlternativeModule indicates that the module to be reprocessed
// previously had a status of derrors.AlternativeModule.
ReprocessAlternative = errors.New("reprocess alternative module")
// ReprocessDBModuleInsertInvalid represents a module to be reprocessed
// that was successfully fetched but could not be inserted due to invalid
// arguments to postgres.InsertModule.
ReprocessDBModuleInsertInvalid = errors.New("reprocess db module insert invalid")
)
var codes = []struct {
err error
code int
}{
{NotFound, http.StatusNotFound},
{InvalidArgument, http.StatusBadRequest},
{Excluded, http.StatusForbidden},
// Since the following aren't HTTP statuses, pick unused codes.
{HasIncompletePackages, 290},
{DBModuleInsertInvalid, 480},
{BadModule, 490},
{AlternativeModule, 491},
{ProxyTimedOut, http.StatusGatewayTimeout},
// 52x and 54x errors represents modules that need to be reprocessed, and the
// previous status code the module had. Note that the status code
// matters for determining reprocessing order.
{ReprocessStatusOK, 520},
{ReprocessHasIncompletePackages, 521},
{ReprocessBadModule, 540},
{ReprocessAlternative, 541},
{ReprocessDBModuleInsertInvalid, 542},
// 60x errors represents errors that occurred when processing a
// package.
{PackageBuildContextNotSupported, 600},
{PackageMaxImportsLimitExceeded, 601},
{PackageMaxFileSizeLimitExceeded, 602},
{PackageDocumentationHTMLTooLarge, 603},
{PackageInvalidContents, 604},
{PackageBadImportPath, 605},
}
// FromStatus generates an error according for the given status code. It uses
// the given format string and arguments to create the error string according
// to the fmt package. If format is the empty string, then the error
// corresponding to the code is returned unwrapped.
//
// If code is http.StatusOK, it returns nil.
func FromStatus(code int, format string, args ...interface{}) error {
if code == http.StatusOK {
return nil
}
var innerErr = Unknown
for _, e := range codes {
if e.code == code {
innerErr = e.err
break
}
}
if format == "" {
return innerErr
}
return fmt.Errorf(format+": %w", append(args, innerErr)...)
}
// ToStatus returns a status code corresponding to err.
func ToStatus(err error) int {
if err == nil {
return http.StatusOK
}
for _, e := range codes {
if errors.Is(err, e.err) {
return e.code
}
}
return http.StatusInternalServerError
}
// ToReprocessStatus returns the reprocess status code corresponding to the
// provided status.
func ToReprocessStatus(status int) int {
switch status {
case http.StatusOK:
return ToStatus(ReprocessStatusOK)
case ToStatus(HasIncompletePackages):
return ToStatus(ReprocessHasIncompletePackages)
case ToStatus(BadModule):
return ToStatus(ReprocessBadModule)
case ToStatus(AlternativeModule):
return ToStatus(ReprocessAlternative)
case ToStatus(DBModuleInsertInvalid):
return ToStatus(ReprocessDBModuleInsertInvalid)
default:
return status
}
}
// Add adds context to the error.
// The result cannot be unwrapped to recover the original error.
// It does nothing when *errp == nil.
//
// Example:
//
// defer derrors.Add(&err, "copy(%s, %s)", src, dst)
//
// See Wrap for an equivalent function that allows
// the result to be unwrapped.
func Add(errp *error, format string, args ...interface{}) {
if *errp != nil {
*errp = fmt.Errorf("%s: %v", fmt.Sprintf(format, args...), *errp)
}
}
// Wrap adds context to the error and allows
// unwrapping the result to recover the original error.
//
// Example:
//
// defer derrors.Wrap(&err, "copy(%s, %s)", src, dst)
//
// See Add for an equivalent function that does not allow
// the result to be unwrapped.
func Wrap(errp *error, format string, args ...interface{}) {
if *errp != nil {
*errp = fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), *errp)
}
}
internal/derrors: add ModuleTooLarge code
Add a status code that we'll use to mark modules as being
currently too large to process.
For b/168493193
Change-Id: I96f5931f68ce5569a22b282c7f038c6997256871
Reviewed-on: https://go-review.googlesource.com/c/pkgsite/+/254678
Trust: Jonathan Amsterdam <e3d3698b2ccd5955e4adf250d0785062d0f9018b@google.com>
Run-TryBot: Jonathan Amsterdam <e3d3698b2ccd5955e4adf250d0785062d0f9018b@google.com>
Reviewed-by: Julie Qiu <8d32267b6b4884cf35adeaccde2b6857ae11aace@golang.org>
TryBot-Result: kokoro <2ac7b1f3fa578934c95181d4272be0d3bca00121@google.com>
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package derrors defines internal error values to categorize the different
// types error semantics we support.
package derrors
import (
"errors"
"fmt"
"net/http"
)
//lint:file-ignore ST1012 prefixing error values with Err would stutter
var (
// HasIncompletePackages indicates a module containing packages that
// were processed with a 60x error code.
HasIncompletePackages = errors.New("has incomplete packages")
// NotFound indicates that a requested entity was not found (HTTP 404).
NotFound = errors.New("not found")
// InvalidArgument indicates that the input into the request is invalid in
// some way (HTTP 400).
InvalidArgument = errors.New("invalid argument")
// BadModule indicates a problem with a module.
BadModule = errors.New("bad module")
// Excluded indicates that the module is excluded. (See internal/postgres/excluded.go.)
Excluded = errors.New("excluded")
// AlternativeModule indicates that the path of the module zip file differs
// from the path specified in the go.mod file.
AlternativeModule = errors.New("alternative module")
// ModuleTooLarge indicates that the module is too large for us to process.
// This should be temporary: we should obtain sufficient resources to process
// any module, up to the max size allowed by the proxy.
ModuleTooLarge = errors.New("module too large")
// Unknown indicates that the error has unknown semantics.
Unknown = errors.New("unknown")
// ProxyTimedOut indicates that a request timed out when fetching from the Module Mirror.
ProxyTimedOut = errors.New("proxy timed out")
// PackageBuildContextNotSupported indicates that the build context for the
// package is not supported.
PackageBuildContextNotSupported = errors.New("package build context not supported")
// PackageMaxImportsLimitExceeded indicates that the package has too many
// imports.
PackageMaxImportsLimitExceeded = errors.New("package max imports limit exceeded")
// PackageMaxFileSizeLimitExceeded indicates that the package contains a file
// that exceeds fetch.MaxFileSize.
PackageMaxFileSizeLimitExceeded = errors.New("package max file size limit exceeded")
// PackageDocumentationHTMLTooLarge indicates that the rendered documentation
// HTML size exceeded the specified limit for dochtml.RenderOptions.
PackageDocumentationHTMLTooLarge = errors.New("package documentation HTML is too large")
// PackageBadImportPath represents an error loading a package because its
// contents do not make up a valid package. This can happen, for
// example, if the .go files fail to parse or declare different package
// names.
// Go files were found in a directory, but the resulting import path is invalid.
PackageBadImportPath = errors.New("package bad import path")
// PackageInvalidContents represents an error loading a package because
// its contents do not make up a valid package. This can happen, for
// example, if the .go files fail to parse or declare different package
// names.
PackageInvalidContents = errors.New("package invalid contents")
// DBModuleInsertInvalid represents a module that was successfully
// fetched but could not be inserted due to invalid arguments to
// postgres.InsertModule.
DBModuleInsertInvalid = errors.New("db module insert invalid")
// ReprocessStatusOK indicates that the module to be reprocessed
// previously had a status of http.StatusOK.
ReprocessStatusOK = errors.New("reprocess status ok")
// ReprocessHasIncompletePackages indicates that the module to be reprocessed
// previously had a status of 290.
ReprocessHasIncompletePackages = errors.New("reprocess has incomplete packages")
// ReprocessBadModule indicates that the module to be reprocessed
// previously had a status of derrors.BadModule.
ReprocessBadModule = errors.New("reprocess bad module")
// ReprocessAlternativeModule indicates that the module to be reprocessed
// previously had a status of derrors.AlternativeModule.
ReprocessAlternative = errors.New("reprocess alternative module")
// ReprocessDBModuleInsertInvalid represents a module to be reprocessed
// that was successfully fetched but could not be inserted due to invalid
// arguments to postgres.InsertModule.
ReprocessDBModuleInsertInvalid = errors.New("reprocess db module insert invalid")
)
var codes = []struct {
err error
code int
}{
{NotFound, http.StatusNotFound},
{InvalidArgument, http.StatusBadRequest},
{Excluded, http.StatusForbidden},
// Since the following aren't HTTP statuses, pick unused codes.
{HasIncompletePackages, 290},
{DBModuleInsertInvalid, 480},
{BadModule, 490},
{AlternativeModule, 491},
{ModuleTooLarge, 492},
{ProxyTimedOut, http.StatusGatewayTimeout},
// 52x and 54x errors represents modules that need to be reprocessed, and the
// previous status code the module had. Note that the status code
// matters for determining reprocessing order.
{ReprocessStatusOK, 520},
{ReprocessHasIncompletePackages, 521},
{ReprocessBadModule, 540},
{ReprocessAlternative, 541},
{ReprocessDBModuleInsertInvalid, 542},
// 60x errors represents errors that occurred when processing a
// package.
{PackageBuildContextNotSupported, 600},
{PackageMaxImportsLimitExceeded, 601},
{PackageMaxFileSizeLimitExceeded, 602},
{PackageDocumentationHTMLTooLarge, 603},
{PackageInvalidContents, 604},
{PackageBadImportPath, 605},
}
// FromStatus generates an error according for the given status code. It uses
// the given format string and arguments to create the error string according
// to the fmt package. If format is the empty string, then the error
// corresponding to the code is returned unwrapped.
//
// If code is http.StatusOK, it returns nil.
func FromStatus(code int, format string, args ...interface{}) error {
if code == http.StatusOK {
return nil
}
var innerErr = Unknown
for _, e := range codes {
if e.code == code {
innerErr = e.err
break
}
}
if format == "" {
return innerErr
}
return fmt.Errorf(format+": %w", append(args, innerErr)...)
}
// ToStatus returns a status code corresponding to err.
func ToStatus(err error) int {
if err == nil {
return http.StatusOK
}
for _, e := range codes {
if errors.Is(err, e.err) {
return e.code
}
}
return http.StatusInternalServerError
}
// ToReprocessStatus returns the reprocess status code corresponding to the
// provided status.
func ToReprocessStatus(status int) int {
switch status {
case http.StatusOK:
return ToStatus(ReprocessStatusOK)
case ToStatus(HasIncompletePackages):
return ToStatus(ReprocessHasIncompletePackages)
case ToStatus(BadModule):
return ToStatus(ReprocessBadModule)
case ToStatus(AlternativeModule):
return ToStatus(ReprocessAlternative)
case ToStatus(DBModuleInsertInvalid):
return ToStatus(ReprocessDBModuleInsertInvalid)
default:
return status
}
}
// Add adds context to the error.
// The result cannot be unwrapped to recover the original error.
// It does nothing when *errp == nil.
//
// Example:
//
// defer derrors.Add(&err, "copy(%s, %s)", src, dst)
//
// See Wrap for an equivalent function that allows
// the result to be unwrapped.
func Add(errp *error, format string, args ...interface{}) {
if *errp != nil {
*errp = fmt.Errorf("%s: %v", fmt.Sprintf(format, args...), *errp)
}
}
// Wrap adds context to the error and allows
// unwrapping the result to recover the original error.
//
// Example:
//
// defer derrors.Wrap(&err, "copy(%s, %s)", src, dst)
//
// See Add for an equivalent function that does not allow
// the result to be unwrapped.
func Wrap(errp *error, format string, args ...interface{}) {
if *errp != nil {
*errp = fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), *errp)
}
}
|
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package driver
import (
"bytes"
"fmt"
"io"
"os"
"os/exec"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/google/pprof/internal/plugin"
"github.com/google/pprof/internal/report"
)
// commands describes the commands accepted by pprof.
type commands map[string]*command
// command describes the actions for a pprof command. Includes a
// function for command-line completion, the report format to use
// during report generation, any postprocessing functions, and whether
// the command expects a regexp parameter (typically a function name).
type command struct {
format int // report format to generate
postProcess PostProcessor // postprocessing to run on report
visualizer PostProcessor // display output using some callback
hasParam bool // collect a parameter from the CLI
description string // single-line description text saying what the command does
usage string // multi-line help text saying how the command is used
}
// help returns a help string for a command.
func (c *command) help(name string) string {
message := c.description + "\n"
if c.usage != "" {
message += " Usage:\n"
lines := strings.Split(c.usage, "\n")
for _, line := range lines {
message += fmt.Sprintf(" %s\n", line)
}
}
return message + "\n"
}
// AddCommand adds an additional command to the set of commands
// accepted by pprof. This enables extensions to add new commands for
// specialized visualization formats. If the command specified already
// exists, it is overwritten.
func AddCommand(cmd string, format int, post PostProcessor, desc, usage string) {
pprofCommands[cmd] = &command{format, post, nil, false, desc, usage}
}
// SetVariableDefault sets the default value for a pprof
// variable. This enables extensions to set their own defaults.
func SetVariableDefault(variable, value string) {
if v := pprofVariables[variable]; v != nil {
v.value = value
}
}
// PostProcessor is a function that applies post-processing to the report output
type PostProcessor func(input io.Reader, output io.Writer, ui plugin.UI) error
// interactiveMode is true if pprof is running on interactive mode, reading
// commands from its shell.
var interactiveMode = false
// pprofCommands are the report generation commands recognized by pprof.
var pprofCommands = commands{
// Commands that require no post-processing.
"comments": {report.Comments, nil, nil, false, "Output all profile comments", ""},
"disasm": {report.Dis, nil, nil, true, "Output assembly listings annotated with samples", listHelp("disasm", true)},
"dot": {report.Dot, nil, nil, false, "Outputs a graph in DOT format", reportHelp("dot", false, true)},
"list": {report.List, nil, nil, true, "Output annotated source for functions matching regexp", listHelp("list", false)},
"peek": {report.Tree, nil, nil, true, "Output callers/callees of functions matching regexp", "peek func_regex\nDisplay callers and callees of functions matching func_regex."},
"raw": {report.Raw, nil, nil, false, "Outputs a text representation of the raw profile", ""},
"tags": {report.Tags, nil, nil, false, "Outputs all tags in the profile", "tags [tag_regex]* [-ignore_regex]* [>file]\nList tags with key:value matching tag_regex and exclude ignore_regex."},
"text": {report.Text, nil, nil, false, "Outputs top entries in text form", reportHelp("text", true, true)},
"top": {report.Text, nil, nil, false, "Outputs top entries in text form", reportHelp("top", true, true)},
"traces": {report.Traces, nil, nil, false, "Outputs all profile samples in text form", ""},
"tree": {report.Tree, nil, nil, false, "Outputs a text rendering of call graph", reportHelp("tree", true, true)},
// Save binary formats to a file
"callgrind": {report.Callgrind, nil, awayFromTTY("callgraph.out"), false, "Outputs a graph in callgrind format", reportHelp("callgrind", false, true)},
"proto": {report.Proto, nil, awayFromTTY("pb.gz"), false, "Outputs the profile in compressed protobuf format", ""},
"topproto": {report.TopProto, nil, awayFromTTY("pb.gz"), false, "Outputs top entries in compressed protobuf format", ""},
// Generate report in DOT format and postprocess with dot
"gif": {report.Dot, invokeDot("gif"), awayFromTTY("gif"), false, "Outputs a graph image in GIF format", reportHelp("gif", false, true)},
"pdf": {report.Dot, invokeDot("pdf"), awayFromTTY("pdf"), false, "Outputs a graph in PDF format", reportHelp("pdf", false, true)},
"png": {report.Dot, invokeDot("png"), awayFromTTY("png"), false, "Outputs a graph image in PNG format", reportHelp("png", false, true)},
"ps": {report.Dot, invokeDot("ps"), awayFromTTY("ps"), false, "Outputs a graph in PS format", reportHelp("ps", false, true)},
// Save SVG output into a file
"svg": {report.Dot, massageDotSVG(), awayFromTTY("svg"), false, "Outputs a graph in SVG format", reportHelp("svg", false, true)},
// Visualize postprocessed dot output
"eog": {report.Dot, invokeDot("svg"), invokeVisualizer("svg", []string{"eog"}), false, "Visualize graph through eog", reportHelp("eog", false, false)},
"evince": {report.Dot, invokeDot("pdf"), invokeVisualizer("pdf", []string{"evince"}), false, "Visualize graph through evince", reportHelp("evince", false, false)},
"gv": {report.Dot, invokeDot("ps"), invokeVisualizer("ps", []string{"gv --noantialias"}), false, "Visualize graph through gv", reportHelp("gv", false, false)},
"web": {report.Dot, massageDotSVG(), invokeVisualizer("svg", browsers()), false, "Visualize graph through web browser", reportHelp("web", false, false)},
// Visualize callgrind output
"kcachegrind": {report.Callgrind, nil, invokeVisualizer("grind", kcachegrind), false, "Visualize report in KCachegrind", reportHelp("kcachegrind", false, false)},
// Visualize HTML directly generated by report.
"weblist": {report.WebList, nil, invokeVisualizer("html", browsers()), true, "Display annotated source in a web browser", listHelp("weblist", false)},
}
// pprofVariables are the configuration parameters that affect the
// reported generated by pprof.
var pprofVariables = variables{
// Filename for file-based output formats, stdout by default.
"output": &variable{stringKind, "", "", helpText("Output filename for file-based outputs")},
// Comparisons.
"drop_negative": &variable{boolKind, "f", "", helpText(
"Ignore negative differences",
"Do not show any locations with values <0.")},
// Graph handling options.
"call_tree": &variable{boolKind, "f", "", helpText(
"Create a context-sensitive call tree",
"Treat locations reached through different paths as separate.")},
// Display options.
"relative_percentages": &variable{boolKind, "f", "", helpText(
"Show percentages relative to focused subgraph",
"If unset, percentages are relative to full graph before focusing",
"to facilitate comparison with original graph.")},
"unit": &variable{stringKind, "minimum", "", helpText(
"Measurement units to display",
"Scale the sample values to this unit.",
"For time-based profiles, use seconds, milliseconds, nanoseconds, etc.",
"For memory profiles, use megabytes, kilobytes, bytes, etc.",
"Using auto will scale each value independently to the most natural unit.")},
"compact_labels": &variable{boolKind, "f", "", "Show minimal headers"},
"source_path": &variable{stringKind, "", "", "Search path for source files"},
"trim_path": &variable{stringKind, "", "", "Path to trim from source paths before search"},
// Filtering options
"nodecount": &variable{intKind, "-1", "", helpText(
"Max number of nodes to show",
"Uses heuristics to limit the number of locations to be displayed.",
"On graphs, dotted edges represent paths through nodes that have been removed.")},
"nodefraction": &variable{floatKind, "0.005", "", "Hide nodes below <f>*total"},
"edgefraction": &variable{floatKind, "0.001", "", "Hide edges below <f>*total"},
"trim": &variable{boolKind, "t", "", helpText(
"Honor nodefraction/edgefraction/nodecount defaults",
"Set to false to get the full profile, without any trimming.")},
"focus": &variable{stringKind, "", "", helpText(
"Restricts to samples going through a node matching regexp",
"Discard samples that do not include a node matching this regexp.",
"Matching includes the function name, filename or object name.")},
"ignore": &variable{stringKind, "", "", helpText(
"Skips paths going through any nodes matching regexp",
"If set, discard samples that include a node matching this regexp.",
"Matching includes the function name, filename or object name.")},
"prune_from": &variable{stringKind, "", "", helpText(
"Drops any functions below the matched frame.",
"If set, any frames matching the specified regexp and any frames",
"below it will be dropped from each sample.")},
"hide": &variable{stringKind, "", "", helpText(
"Skips nodes matching regexp",
"Discard nodes that match this location.",
"Other nodes from samples that include this location will be shown.",
"Matching includes the function name, filename or object name.")},
"show": &variable{stringKind, "", "", helpText(
"Only show nodes matching regexp",
"If set, only show nodes that match this location.",
"Matching includes the function name, filename or object name.")},
"show_from": &variable{stringKind, "", "", helpText(
"Drops functions above the highest matched frame.",
"If set, all frames above the highest match are dropped from every sample.",
"Matching includes the function name, filename or object name.")},
"tagfocus": &variable{stringKind, "", "", helpText(
"Restricts to samples with tags in range or matched by regexp",
"Use name=value syntax to limit the matching to a specific tag.",
"Numeric tag filter examples: 1kb, 1kb:10kb, memory=32mb:",
"String tag filter examples: foo, foo.*bar, mytag=foo.*bar")},
"tagignore": &variable{stringKind, "", "", helpText(
"Discard samples with tags in range or matched by regexp",
"Use name=value syntax to limit the matching to a specific tag.",
"Numeric tag filter examples: 1kb, 1kb:10kb, memory=32mb:",
"String tag filter examples: foo, foo.*bar, mytag=foo.*bar")},
"tagshow": &variable{stringKind, "", "", helpText(
"Only consider tags matching this regexp",
"Discard tags that do not match this regexp")},
"taghide": &variable{stringKind, "", "", helpText(
"Skip tags matching this regexp",
"Discard tags that match this regexp")},
// Heap profile options
"divide_by": &variable{floatKind, "1", "", helpText(
"Ratio to divide all samples before visualization",
"Divide all samples values by a constant, eg the number of processors or jobs.")},
"mean": &variable{boolKind, "f", "", helpText(
"Average sample value over first value (count)",
"For memory profiles, report average memory per allocation.",
"For time-based profiles, report average time per event.")},
"sample_index": &variable{stringKind, "", "", helpText(
"Sample value to report (0-based index or name)",
"Profiles contain multiple values per sample.",
"Use sample_index=i to select the ith value (starting at 0).")},
"normalize": &variable{boolKind, "f", "", helpText(
"Scales profile based on the base profile.")},
// Data sorting criteria
"flat": &variable{boolKind, "t", "cumulative", helpText("Sort entries based on own weight")},
"cum": &variable{boolKind, "f", "cumulative", helpText("Sort entries based on cumulative weight")},
// Output granularity
"functions": &variable{boolKind, "t", "granularity", helpText(
"Aggregate at the function level.",
"Takes into account the filename/lineno where the function was defined.")},
"files": &variable{boolKind, "f", "granularity", "Aggregate at the file level."},
"lines": &variable{boolKind, "f", "granularity", "Aggregate at the source code line level."},
"addresses": &variable{boolKind, "f", "granularity", helpText(
"Aggregate at the function level.",
"Includes functions' addresses in the output.")},
"noinlines": &variable{boolKind, "f", "granularity", helpText(
"Aggregate at the function level.",
"Attributes inlined functions to their first out-of-line caller.")},
"addressnoinlines": &variable{boolKind, "f", "granularity", helpText(
"Aggregate at the function level, including functions' addresses in the output.",
"Attributes inlined functions to their first out-of-line caller.")},
}
func helpText(s ...string) string {
return strings.Join(s, "\n") + "\n"
}
// usage returns a string describing the pprof commands and variables.
// if commandLine is set, the output reflect cli usage.
func usage(commandLine bool) string {
var prefix string
if commandLine {
prefix = "-"
}
fmtHelp := func(c, d string) string {
return fmt.Sprintf(" %-16s %s", c, strings.SplitN(d, "\n", 2)[0])
}
var commands []string
for name, cmd := range pprofCommands {
commands = append(commands, fmtHelp(prefix+name, cmd.description))
}
sort.Strings(commands)
var help string
if commandLine {
help = " Output formats (select at most one):\n"
} else {
help = " Commands:\n"
commands = append(commands, fmtHelp("o/options", "List options and their current values"))
commands = append(commands, fmtHelp("quit/exit/^D", "Exit pprof"))
}
help = help + strings.Join(commands, "\n") + "\n\n" +
" Options:\n"
// Print help for variables after sorting them.
// Collect radio variables by their group name to print them together.
radioOptions := make(map[string][]string)
var variables []string
for name, vr := range pprofVariables {
if vr.group != "" {
radioOptions[vr.group] = append(radioOptions[vr.group], name)
continue
}
variables = append(variables, fmtHelp(prefix+name, vr.help))
}
sort.Strings(variables)
help = help + strings.Join(variables, "\n") + "\n\n" +
" Option groups (only set one per group):\n"
var radioStrings []string
for radio, ops := range radioOptions {
sort.Strings(ops)
s := []string{fmtHelp(radio, "")}
for _, op := range ops {
s = append(s, " "+fmtHelp(prefix+op, pprofVariables[op].help))
}
radioStrings = append(radioStrings, strings.Join(s, "\n"))
}
sort.Strings(radioStrings)
return help + strings.Join(radioStrings, "\n")
}
func reportHelp(c string, cum, redirect bool) string {
h := []string{
c + " [n] [focus_regex]* [-ignore_regex]*",
"Include up to n samples",
"Include samples matching focus_regex, and exclude ignore_regex.",
}
if cum {
h[0] += " [-cum]"
h = append(h, "-cum sorts the output by cumulative weight")
}
if redirect {
h[0] += " >f"
h = append(h, "Optionally save the report on the file f")
}
return strings.Join(h, "\n")
}
func listHelp(c string, redirect bool) string {
h := []string{
c + "<func_regex|address> [-focus_regex]* [-ignore_regex]*",
"Include functions matching func_regex, or including the address specified.",
"Include samples matching focus_regex, and exclude ignore_regex.",
}
if redirect {
h[0] += " >f"
h = append(h, "Optionally save the report on the file f")
}
return strings.Join(h, "\n")
}
// browsers returns a list of commands to attempt for web visualization.
func browsers() []string {
var cmds []string
if userBrowser := os.Getenv("BROWSER"); userBrowser != "" {
cmds = append(cmds, userBrowser)
}
switch runtime.GOOS {
case "darwin":
cmds = append(cmds, "/usr/bin/open")
case "windows":
cmds = append(cmds, "cmd /c start")
default:
if os.Getenv("DISPLAY") != "" {
// xdg-open is only for use in a desktop environment.
cmds = append(cmds, "xdg-open")
}
cmds = append(cmds, "sensible-browser")
}
return append(cmds, []string{"chrome", "google-chrome", "chromium", "firefox"}...)
}
var kcachegrind = []string{"kcachegrind"}
// awayFromTTY saves the output in a file if it would otherwise go to
// the terminal screen. This is used to avoid dumping binary data on
// the screen.
func awayFromTTY(format string) PostProcessor {
return func(input io.Reader, output io.Writer, ui plugin.UI) error {
if output == os.Stdout && (ui.IsTerminal() || interactiveMode) {
tempFile, err := newTempFile("", "profile", "."+format)
if err != nil {
return err
}
ui.PrintErr("Generating report in ", tempFile.Name())
output = tempFile
}
_, err := io.Copy(output, input)
return err
}
}
func invokeDot(format string) PostProcessor {
return func(input io.Reader, output io.Writer, ui plugin.UI) error {
cmd := exec.Command("dot", "-T"+format)
cmd.Stdin, cmd.Stdout, cmd.Stderr = input, output, os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("Failed to execute dot. Is Graphviz installed? Error: %v", err)
}
return nil
}
}
// massageDotSVG invokes the dot tool to generate an SVG image and alters
// the image to have panning capabilities when viewed in a browser.
func massageDotSVG() PostProcessor {
generateSVG := invokeDot("svg")
return func(input io.Reader, output io.Writer, ui plugin.UI) error {
baseSVG := new(bytes.Buffer)
if err := generateSVG(input, baseSVG, ui); err != nil {
return err
}
_, err := output.Write([]byte(massageSVG(baseSVG.String())))
return err
}
}
func invokeVisualizer(suffix string, visualizers []string) PostProcessor {
return func(input io.Reader, output io.Writer, ui plugin.UI) error {
tempFile, err := newTempFile(os.TempDir(), "pprof", "."+suffix)
if err != nil {
return err
}
deferDeleteTempFile(tempFile.Name())
if _, err := io.Copy(tempFile, input); err != nil {
return err
}
tempFile.Close()
// Try visualizers until one is successful
for _, v := range visualizers {
// Separate command and arguments for exec.Command.
args := strings.Split(v, " ")
if len(args) == 0 {
continue
}
viewer := exec.Command(args[0], append(args[1:], tempFile.Name())...)
viewer.Stderr = os.Stderr
if err = viewer.Start(); err == nil {
// Wait for a second so that the visualizer has a chance to
// open the input file. This needs to be done even if we're
// waiting for the visualizer as it can be just a wrapper that
// spawns a browser tab and returns right away.
defer func(t <-chan time.Time) {
<-t
}(time.After(time.Second))
// On interactive mode, let the visualizer run in the background
// so other commands can be issued.
if !interactiveMode {
return viewer.Wait()
}
return nil
}
}
return err
}
}
// variables describe the configuration parameters recognized by pprof.
type variables map[string]*variable
// variable is a single configuration parameter.
type variable struct {
kind int // How to interpret the value, must be one of the enums below.
value string // Effective value. Only values appropriate for the Kind should be set.
group string // boolKind variables with the same Group != "" cannot be set simultaneously.
help string // Text describing the variable, in multiple lines separated by newline.
}
const (
// variable.kind must be one of these variables.
boolKind = iota
intKind
floatKind
stringKind
)
// set updates the value of a variable, checking that the value is
// suitable for the variable Kind.
func (vars variables) set(name, value string) error {
v := vars[name]
if v == nil {
return fmt.Errorf("no variable %s", name)
}
var err error
switch v.kind {
case boolKind:
var b bool
if b, err = stringToBool(value); err == nil {
if v.group != "" && !b {
err = fmt.Errorf("%q can only be set to true", name)
}
}
case intKind:
_, err = strconv.Atoi(value)
case floatKind:
_, err = strconv.ParseFloat(value, 64)
case stringKind:
// Remove quotes, particularly useful for empty values.
if len(value) > 1 && strings.HasPrefix(value, `"`) && strings.HasSuffix(value, `"`) {
value = value[1 : len(value)-1]
}
}
if err != nil {
return err
}
vars[name].value = value
if group := vars[name].group; group != "" {
for vname, vvar := range vars {
if vvar.group == group && vname != name {
vvar.value = "f"
}
}
}
return err
}
// boolValue returns the value of a boolean variable.
func (v *variable) boolValue() bool {
b, err := stringToBool(v.value)
if err != nil {
panic("unexpected value " + v.value + " for bool ")
}
return b
}
// intValue returns the value of an intKind variable.
func (v *variable) intValue() int {
i, err := strconv.Atoi(v.value)
if err != nil {
panic("unexpected value " + v.value + " for int ")
}
return i
}
// floatValue returns the value of a Float variable.
func (v *variable) floatValue() float64 {
f, err := strconv.ParseFloat(v.value, 64)
if err != nil {
panic("unexpected value " + v.value + " for float ")
}
return f
}
// stringValue returns a canonical representation for a variable.
func (v *variable) stringValue() string {
switch v.kind {
case boolKind:
return fmt.Sprint(v.boolValue())
case intKind:
return fmt.Sprint(v.intValue())
case floatKind:
return fmt.Sprint(v.floatValue())
}
return v.value
}
func stringToBool(s string) (bool, error) {
switch strings.ToLower(s) {
case "true", "t", "yes", "y", "1", "":
return true, nil
case "false", "f", "no", "n", "0":
return false, nil
default:
return false, fmt.Errorf(`illegal value "%s" for bool variable`, s)
}
}
// makeCopy returns a duplicate of a set of shell variables.
func (vars variables) makeCopy() variables {
varscopy := make(variables, len(vars))
for n, v := range vars {
vcopy := *v
varscopy[n] = &vcopy
}
return varscopy
}
Change order of commands returned by browser() (#418)
* change order of commands returned by browser()
* Add comment to explain browser command order
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package driver
import (
"bytes"
"fmt"
"io"
"os"
"os/exec"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/google/pprof/internal/plugin"
"github.com/google/pprof/internal/report"
)
// commands describes the commands accepted by pprof.
type commands map[string]*command
// command describes the actions for a pprof command. Includes a
// function for command-line completion, the report format to use
// during report generation, any postprocessing functions, and whether
// the command expects a regexp parameter (typically a function name).
type command struct {
format int // report format to generate
postProcess PostProcessor // postprocessing to run on report
visualizer PostProcessor // display output using some callback
hasParam bool // collect a parameter from the CLI
description string // single-line description text saying what the command does
usage string // multi-line help text saying how the command is used
}
// help returns a help string for a command.
func (c *command) help(name string) string {
message := c.description + "\n"
if c.usage != "" {
message += " Usage:\n"
lines := strings.Split(c.usage, "\n")
for _, line := range lines {
message += fmt.Sprintf(" %s\n", line)
}
}
return message + "\n"
}
// AddCommand adds an additional command to the set of commands
// accepted by pprof. This enables extensions to add new commands for
// specialized visualization formats. If the command specified already
// exists, it is overwritten.
func AddCommand(cmd string, format int, post PostProcessor, desc, usage string) {
pprofCommands[cmd] = &command{format, post, nil, false, desc, usage}
}
// SetVariableDefault sets the default value for a pprof
// variable. This enables extensions to set their own defaults.
func SetVariableDefault(variable, value string) {
if v := pprofVariables[variable]; v != nil {
v.value = value
}
}
// PostProcessor is a function that applies post-processing to the report output
type PostProcessor func(input io.Reader, output io.Writer, ui plugin.UI) error
// interactiveMode is true if pprof is running on interactive mode, reading
// commands from its shell.
var interactiveMode = false
// pprofCommands are the report generation commands recognized by pprof.
var pprofCommands = commands{
// Commands that require no post-processing.
"comments": {report.Comments, nil, nil, false, "Output all profile comments", ""},
"disasm": {report.Dis, nil, nil, true, "Output assembly listings annotated with samples", listHelp("disasm", true)},
"dot": {report.Dot, nil, nil, false, "Outputs a graph in DOT format", reportHelp("dot", false, true)},
"list": {report.List, nil, nil, true, "Output annotated source for functions matching regexp", listHelp("list", false)},
"peek": {report.Tree, nil, nil, true, "Output callers/callees of functions matching regexp", "peek func_regex\nDisplay callers and callees of functions matching func_regex."},
"raw": {report.Raw, nil, nil, false, "Outputs a text representation of the raw profile", ""},
"tags": {report.Tags, nil, nil, false, "Outputs all tags in the profile", "tags [tag_regex]* [-ignore_regex]* [>file]\nList tags with key:value matching tag_regex and exclude ignore_regex."},
"text": {report.Text, nil, nil, false, "Outputs top entries in text form", reportHelp("text", true, true)},
"top": {report.Text, nil, nil, false, "Outputs top entries in text form", reportHelp("top", true, true)},
"traces": {report.Traces, nil, nil, false, "Outputs all profile samples in text form", ""},
"tree": {report.Tree, nil, nil, false, "Outputs a text rendering of call graph", reportHelp("tree", true, true)},
// Save binary formats to a file
"callgrind": {report.Callgrind, nil, awayFromTTY("callgraph.out"), false, "Outputs a graph in callgrind format", reportHelp("callgrind", false, true)},
"proto": {report.Proto, nil, awayFromTTY("pb.gz"), false, "Outputs the profile in compressed protobuf format", ""},
"topproto": {report.TopProto, nil, awayFromTTY("pb.gz"), false, "Outputs top entries in compressed protobuf format", ""},
// Generate report in DOT format and postprocess with dot
"gif": {report.Dot, invokeDot("gif"), awayFromTTY("gif"), false, "Outputs a graph image in GIF format", reportHelp("gif", false, true)},
"pdf": {report.Dot, invokeDot("pdf"), awayFromTTY("pdf"), false, "Outputs a graph in PDF format", reportHelp("pdf", false, true)},
"png": {report.Dot, invokeDot("png"), awayFromTTY("png"), false, "Outputs a graph image in PNG format", reportHelp("png", false, true)},
"ps": {report.Dot, invokeDot("ps"), awayFromTTY("ps"), false, "Outputs a graph in PS format", reportHelp("ps", false, true)},
// Save SVG output into a file
"svg": {report.Dot, massageDotSVG(), awayFromTTY("svg"), false, "Outputs a graph in SVG format", reportHelp("svg", false, true)},
// Visualize postprocessed dot output
"eog": {report.Dot, invokeDot("svg"), invokeVisualizer("svg", []string{"eog"}), false, "Visualize graph through eog", reportHelp("eog", false, false)},
"evince": {report.Dot, invokeDot("pdf"), invokeVisualizer("pdf", []string{"evince"}), false, "Visualize graph through evince", reportHelp("evince", false, false)},
"gv": {report.Dot, invokeDot("ps"), invokeVisualizer("ps", []string{"gv --noantialias"}), false, "Visualize graph through gv", reportHelp("gv", false, false)},
"web": {report.Dot, massageDotSVG(), invokeVisualizer("svg", browsers()), false, "Visualize graph through web browser", reportHelp("web", false, false)},
// Visualize callgrind output
"kcachegrind": {report.Callgrind, nil, invokeVisualizer("grind", kcachegrind), false, "Visualize report in KCachegrind", reportHelp("kcachegrind", false, false)},
// Visualize HTML directly generated by report.
"weblist": {report.WebList, nil, invokeVisualizer("html", browsers()), true, "Display annotated source in a web browser", listHelp("weblist", false)},
}
// pprofVariables are the configuration parameters that affect the
// reported generated by pprof.
var pprofVariables = variables{
// Filename for file-based output formats, stdout by default.
"output": &variable{stringKind, "", "", helpText("Output filename for file-based outputs")},
// Comparisons.
"drop_negative": &variable{boolKind, "f", "", helpText(
"Ignore negative differences",
"Do not show any locations with values <0.")},
// Graph handling options.
"call_tree": &variable{boolKind, "f", "", helpText(
"Create a context-sensitive call tree",
"Treat locations reached through different paths as separate.")},
// Display options.
"relative_percentages": &variable{boolKind, "f", "", helpText(
"Show percentages relative to focused subgraph",
"If unset, percentages are relative to full graph before focusing",
"to facilitate comparison with original graph.")},
"unit": &variable{stringKind, "minimum", "", helpText(
"Measurement units to display",
"Scale the sample values to this unit.",
"For time-based profiles, use seconds, milliseconds, nanoseconds, etc.",
"For memory profiles, use megabytes, kilobytes, bytes, etc.",
"Using auto will scale each value independently to the most natural unit.")},
"compact_labels": &variable{boolKind, "f", "", "Show minimal headers"},
"source_path": &variable{stringKind, "", "", "Search path for source files"},
"trim_path": &variable{stringKind, "", "", "Path to trim from source paths before search"},
// Filtering options
"nodecount": &variable{intKind, "-1", "", helpText(
"Max number of nodes to show",
"Uses heuristics to limit the number of locations to be displayed.",
"On graphs, dotted edges represent paths through nodes that have been removed.")},
"nodefraction": &variable{floatKind, "0.005", "", "Hide nodes below <f>*total"},
"edgefraction": &variable{floatKind, "0.001", "", "Hide edges below <f>*total"},
"trim": &variable{boolKind, "t", "", helpText(
"Honor nodefraction/edgefraction/nodecount defaults",
"Set to false to get the full profile, without any trimming.")},
"focus": &variable{stringKind, "", "", helpText(
"Restricts to samples going through a node matching regexp",
"Discard samples that do not include a node matching this regexp.",
"Matching includes the function name, filename or object name.")},
"ignore": &variable{stringKind, "", "", helpText(
"Skips paths going through any nodes matching regexp",
"If set, discard samples that include a node matching this regexp.",
"Matching includes the function name, filename or object name.")},
"prune_from": &variable{stringKind, "", "", helpText(
"Drops any functions below the matched frame.",
"If set, any frames matching the specified regexp and any frames",
"below it will be dropped from each sample.")},
"hide": &variable{stringKind, "", "", helpText(
"Skips nodes matching regexp",
"Discard nodes that match this location.",
"Other nodes from samples that include this location will be shown.",
"Matching includes the function name, filename or object name.")},
"show": &variable{stringKind, "", "", helpText(
"Only show nodes matching regexp",
"If set, only show nodes that match this location.",
"Matching includes the function name, filename or object name.")},
"show_from": &variable{stringKind, "", "", helpText(
"Drops functions above the highest matched frame.",
"If set, all frames above the highest match are dropped from every sample.",
"Matching includes the function name, filename or object name.")},
"tagfocus": &variable{stringKind, "", "", helpText(
"Restricts to samples with tags in range or matched by regexp",
"Use name=value syntax to limit the matching to a specific tag.",
"Numeric tag filter examples: 1kb, 1kb:10kb, memory=32mb:",
"String tag filter examples: foo, foo.*bar, mytag=foo.*bar")},
"tagignore": &variable{stringKind, "", "", helpText(
"Discard samples with tags in range or matched by regexp",
"Use name=value syntax to limit the matching to a specific tag.",
"Numeric tag filter examples: 1kb, 1kb:10kb, memory=32mb:",
"String tag filter examples: foo, foo.*bar, mytag=foo.*bar")},
"tagshow": &variable{stringKind, "", "", helpText(
"Only consider tags matching this regexp",
"Discard tags that do not match this regexp")},
"taghide": &variable{stringKind, "", "", helpText(
"Skip tags matching this regexp",
"Discard tags that match this regexp")},
// Heap profile options
"divide_by": &variable{floatKind, "1", "", helpText(
"Ratio to divide all samples before visualization",
"Divide all samples values by a constant, eg the number of processors or jobs.")},
"mean": &variable{boolKind, "f", "", helpText(
"Average sample value over first value (count)",
"For memory profiles, report average memory per allocation.",
"For time-based profiles, report average time per event.")},
"sample_index": &variable{stringKind, "", "", helpText(
"Sample value to report (0-based index or name)",
"Profiles contain multiple values per sample.",
"Use sample_index=i to select the ith value (starting at 0).")},
"normalize": &variable{boolKind, "f", "", helpText(
"Scales profile based on the base profile.")},
// Data sorting criteria
"flat": &variable{boolKind, "t", "cumulative", helpText("Sort entries based on own weight")},
"cum": &variable{boolKind, "f", "cumulative", helpText("Sort entries based on cumulative weight")},
// Output granularity
"functions": &variable{boolKind, "t", "granularity", helpText(
"Aggregate at the function level.",
"Takes into account the filename/lineno where the function was defined.")},
"files": &variable{boolKind, "f", "granularity", "Aggregate at the file level."},
"lines": &variable{boolKind, "f", "granularity", "Aggregate at the source code line level."},
"addresses": &variable{boolKind, "f", "granularity", helpText(
"Aggregate at the function level.",
"Includes functions' addresses in the output.")},
"noinlines": &variable{boolKind, "f", "granularity", helpText(
"Aggregate at the function level.",
"Attributes inlined functions to their first out-of-line caller.")},
"addressnoinlines": &variable{boolKind, "f", "granularity", helpText(
"Aggregate at the function level, including functions' addresses in the output.",
"Attributes inlined functions to their first out-of-line caller.")},
}
func helpText(s ...string) string {
return strings.Join(s, "\n") + "\n"
}
// usage returns a string describing the pprof commands and variables.
// if commandLine is set, the output reflect cli usage.
func usage(commandLine bool) string {
var prefix string
if commandLine {
prefix = "-"
}
fmtHelp := func(c, d string) string {
return fmt.Sprintf(" %-16s %s", c, strings.SplitN(d, "\n", 2)[0])
}
var commands []string
for name, cmd := range pprofCommands {
commands = append(commands, fmtHelp(prefix+name, cmd.description))
}
sort.Strings(commands)
var help string
if commandLine {
help = " Output formats (select at most one):\n"
} else {
help = " Commands:\n"
commands = append(commands, fmtHelp("o/options", "List options and their current values"))
commands = append(commands, fmtHelp("quit/exit/^D", "Exit pprof"))
}
help = help + strings.Join(commands, "\n") + "\n\n" +
" Options:\n"
// Print help for variables after sorting them.
// Collect radio variables by their group name to print them together.
radioOptions := make(map[string][]string)
var variables []string
for name, vr := range pprofVariables {
if vr.group != "" {
radioOptions[vr.group] = append(radioOptions[vr.group], name)
continue
}
variables = append(variables, fmtHelp(prefix+name, vr.help))
}
sort.Strings(variables)
help = help + strings.Join(variables, "\n") + "\n\n" +
" Option groups (only set one per group):\n"
var radioStrings []string
for radio, ops := range radioOptions {
sort.Strings(ops)
s := []string{fmtHelp(radio, "")}
for _, op := range ops {
s = append(s, " "+fmtHelp(prefix+op, pprofVariables[op].help))
}
radioStrings = append(radioStrings, strings.Join(s, "\n"))
}
sort.Strings(radioStrings)
return help + strings.Join(radioStrings, "\n")
}
func reportHelp(c string, cum, redirect bool) string {
h := []string{
c + " [n] [focus_regex]* [-ignore_regex]*",
"Include up to n samples",
"Include samples matching focus_regex, and exclude ignore_regex.",
}
if cum {
h[0] += " [-cum]"
h = append(h, "-cum sorts the output by cumulative weight")
}
if redirect {
h[0] += " >f"
h = append(h, "Optionally save the report on the file f")
}
return strings.Join(h, "\n")
}
func listHelp(c string, redirect bool) string {
h := []string{
c + "<func_regex|address> [-focus_regex]* [-ignore_regex]*",
"Include functions matching func_regex, or including the address specified.",
"Include samples matching focus_regex, and exclude ignore_regex.",
}
if redirect {
h[0] += " >f"
h = append(h, "Optionally save the report on the file f")
}
return strings.Join(h, "\n")
}
// browsers returns a list of commands to attempt for web visualization.
// Commands which definitely will open a browser are prioritized over other
// commands like xdg-open, which may not open the javascript embedded SVG
// files produced by the -web command in a browser.
func browsers() []string {
var cmds []string
if userBrowser := os.Getenv("BROWSER"); userBrowser != "" {
cmds = append(cmds, userBrowser)
}
cmds = append(cmds, []string{"chrome", "google-chrome", "chromium", "firefox"}...)
switch runtime.GOOS {
case "darwin":
cmds = append(cmds, "/usr/bin/open")
case "windows":
cmds = append(cmds, "cmd /c start")
default:
if os.Getenv("DISPLAY") != "" {
cmds = append(cmds, "xdg-open")
}
cmds = append(cmds, "sensible-browser")
}
return cmds
}
var kcachegrind = []string{"kcachegrind"}
// awayFromTTY saves the output in a file if it would otherwise go to
// the terminal screen. This is used to avoid dumping binary data on
// the screen.
func awayFromTTY(format string) PostProcessor {
return func(input io.Reader, output io.Writer, ui plugin.UI) error {
if output == os.Stdout && (ui.IsTerminal() || interactiveMode) {
tempFile, err := newTempFile("", "profile", "."+format)
if err != nil {
return err
}
ui.PrintErr("Generating report in ", tempFile.Name())
output = tempFile
}
_, err := io.Copy(output, input)
return err
}
}
func invokeDot(format string) PostProcessor {
return func(input io.Reader, output io.Writer, ui plugin.UI) error {
cmd := exec.Command("dot", "-T"+format)
cmd.Stdin, cmd.Stdout, cmd.Stderr = input, output, os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("Failed to execute dot. Is Graphviz installed? Error: %v", err)
}
return nil
}
}
// massageDotSVG invokes the dot tool to generate an SVG image and alters
// the image to have panning capabilities when viewed in a browser.
func massageDotSVG() PostProcessor {
generateSVG := invokeDot("svg")
return func(input io.Reader, output io.Writer, ui plugin.UI) error {
baseSVG := new(bytes.Buffer)
if err := generateSVG(input, baseSVG, ui); err != nil {
return err
}
_, err := output.Write([]byte(massageSVG(baseSVG.String())))
return err
}
}
func invokeVisualizer(suffix string, visualizers []string) PostProcessor {
return func(input io.Reader, output io.Writer, ui plugin.UI) error {
tempFile, err := newTempFile(os.TempDir(), "pprof", "."+suffix)
if err != nil {
return err
}
deferDeleteTempFile(tempFile.Name())
if _, err := io.Copy(tempFile, input); err != nil {
return err
}
tempFile.Close()
// Try visualizers until one is successful
for _, v := range visualizers {
// Separate command and arguments for exec.Command.
args := strings.Split(v, " ")
if len(args) == 0 {
continue
}
viewer := exec.Command(args[0], append(args[1:], tempFile.Name())...)
viewer.Stderr = os.Stderr
if err = viewer.Start(); err == nil {
// Wait for a second so that the visualizer has a chance to
// open the input file. This needs to be done even if we're
// waiting for the visualizer as it can be just a wrapper that
// spawns a browser tab and returns right away.
defer func(t <-chan time.Time) {
<-t
}(time.After(time.Second))
// On interactive mode, let the visualizer run in the background
// so other commands can be issued.
if !interactiveMode {
return viewer.Wait()
}
return nil
}
}
return err
}
}
// variables describe the configuration parameters recognized by pprof.
type variables map[string]*variable
// variable is a single configuration parameter.
type variable struct {
kind int // How to interpret the value, must be one of the enums below.
value string // Effective value. Only values appropriate for the Kind should be set.
group string // boolKind variables with the same Group != "" cannot be set simultaneously.
help string // Text describing the variable, in multiple lines separated by newline.
}
const (
// variable.kind must be one of these variables.
boolKind = iota
intKind
floatKind
stringKind
)
// set updates the value of a variable, checking that the value is
// suitable for the variable Kind.
func (vars variables) set(name, value string) error {
v := vars[name]
if v == nil {
return fmt.Errorf("no variable %s", name)
}
var err error
switch v.kind {
case boolKind:
var b bool
if b, err = stringToBool(value); err == nil {
if v.group != "" && !b {
err = fmt.Errorf("%q can only be set to true", name)
}
}
case intKind:
_, err = strconv.Atoi(value)
case floatKind:
_, err = strconv.ParseFloat(value, 64)
case stringKind:
// Remove quotes, particularly useful for empty values.
if len(value) > 1 && strings.HasPrefix(value, `"`) && strings.HasSuffix(value, `"`) {
value = value[1 : len(value)-1]
}
}
if err != nil {
return err
}
vars[name].value = value
if group := vars[name].group; group != "" {
for vname, vvar := range vars {
if vvar.group == group && vname != name {
vvar.value = "f"
}
}
}
return err
}
// boolValue returns the value of a boolean variable.
func (v *variable) boolValue() bool {
b, err := stringToBool(v.value)
if err != nil {
panic("unexpected value " + v.value + " for bool ")
}
return b
}
// intValue returns the value of an intKind variable.
func (v *variable) intValue() int {
i, err := strconv.Atoi(v.value)
if err != nil {
panic("unexpected value " + v.value + " for int ")
}
return i
}
// floatValue returns the value of a Float variable.
func (v *variable) floatValue() float64 {
f, err := strconv.ParseFloat(v.value, 64)
if err != nil {
panic("unexpected value " + v.value + " for float ")
}
return f
}
// stringValue returns a canonical representation for a variable.
func (v *variable) stringValue() string {
switch v.kind {
case boolKind:
return fmt.Sprint(v.boolValue())
case intKind:
return fmt.Sprint(v.intValue())
case floatKind:
return fmt.Sprint(v.floatValue())
}
return v.value
}
func stringToBool(s string) (bool, error) {
switch strings.ToLower(s) {
case "true", "t", "yes", "y", "1", "":
return true, nil
case "false", "f", "no", "n", "0":
return false, nil
default:
return false, fmt.Errorf(`illegal value "%s" for bool variable`, s)
}
}
// makeCopy returns a duplicate of a set of shell variables.
func (vars variables) makeCopy() variables {
varscopy := make(variables, len(vars))
for n, v := range vars {
vcopy := *v
varscopy[n] = &vcopy
}
return varscopy
}
|
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cache
import (
"context"
"fmt"
"go/ast"
"go/parser"
"go/scanner"
"go/token"
"os"
"path/filepath"
"strings"
"sync"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/memoize"
"golang.org/x/tools/internal/span"
)
type parseKey struct {
file source.FileIdentity
mode source.ParseMode
}
type parseGoHandle struct {
handle *memoize.Handle
file source.FileHandle
mode source.ParseMode
}
type parseGoData struct {
memoize.NoCopy
ast *ast.File
err error
}
// We use a counting semaphore to limit
// the number of parallel I/O calls per process.
var ioLimit = make(chan bool, 20)
func (c *cache) ParseGo(fh source.FileHandle, mode source.ParseMode) source.ParseGoHandle {
key := parseKey{
file: fh.Identity(),
mode: mode,
}
h := c.store.Bind(key, func(ctx context.Context) interface{} {
data := &parseGoData{}
data.ast, data.err = parseGo(ctx, c, fh, mode)
return data
})
return &parseGoHandle{
handle: h,
}
}
func (h *parseGoHandle) File() source.FileHandle {
return h.file
}
func (h *parseGoHandle) Mode() source.ParseMode {
return h.mode
}
func (h *parseGoHandle) Parse(ctx context.Context) (*ast.File, error) {
v := h.handle.Get(ctx)
if v == nil {
return nil, ctx.Err()
}
data := v.(*parseGoData)
return data.ast, data.err
}
func parseGo(ctx context.Context, c *cache, fh source.FileHandle, mode source.ParseMode) (*ast.File, error) {
buf, _, err := fh.Read(ctx)
if err != nil {
return nil, err
}
parserMode := parser.AllErrors | parser.ParseComments
if mode == source.ParseHeader {
parserMode = parser.ImportsOnly
}
ast, err := parser.ParseFile(c.fset, fh.Identity().URI.Filename(), buf, parserMode)
if err != nil {
return ast, err
}
if mode == source.ParseExported {
trimAST(ast)
}
//TODO: move the ast fixup code into here
return ast, nil
}
// parseFiles reads and parses the Go source files and returns the ASTs
// of the ones that could be at least partially parsed, along with a list
// parse errors encountered, and a fatal error that prevented parsing.
//
// Because files are scanned in parallel, the token.Pos
// positions of the resulting ast.Files are not ordered.
//
func (imp *importer) parseFiles(filenames []string, ignoreFuncBodies bool) ([]*astFile, []error, error) {
var (
wg sync.WaitGroup
n = len(filenames)
parsed = make([]*astFile, n)
errors = make([]error, n)
)
// TODO: change this function to return the handles
// TODO: eliminate the wait group at this layer, it should be done in the parser
for i, filename := range filenames {
if err := imp.ctx.Err(); err != nil {
return nil, nil, err
}
// get a file handle
fh := imp.view.session.GetFile(span.FileURI(filename))
// now get a parser
mode := source.ParseFull
if ignoreFuncBodies {
mode = source.ParseExported
}
ph := imp.view.session.cache.ParseGo(fh, mode)
// now read and parse in parallel
wg.Add(1)
go func(i int, filename string) {
ioLimit <- true // wait
defer func() {
<-ioLimit // signal done
wg.Done()
}()
// ParseFile may return a partial AST and an error.
f, err := ph.Parse(imp.ctx)
parsed[i], errors[i] = &astFile{
file: f,
err: err,
isTrimmed: ignoreFuncBodies,
}, err
// TODO: move fixup into the parse function
// Fix any badly parsed parts of the AST.
if f != nil {
tok := imp.fset.File(f.Pos())
src, _, err := fh.Read(imp.ctx)
if err == nil {
imp.view.fix(imp.ctx, f, tok, src)
}
}
}(i, filename)
}
wg.Wait()
// Eliminate nils, preserving order.
var o int
for _, f := range parsed {
if f != nil {
parsed[o] = f
o++
}
}
parsed = parsed[:o]
o = 0
for _, err := range errors {
if err != nil {
errors[o] = err
o++
}
}
errors = errors[:o]
return parsed, errors, nil
}
// sameFile returns true if x and y have the same basename and denote
// the same file.
//
func sameFile(x, y string) bool {
if x == y {
// It could be the case that y doesn't exist.
// For instance, it may be an overlay file that
// hasn't been written to disk. To handle that case
// let x == y through. (We added the exact absolute path
// string to the CompiledGoFiles list, so the unwritten
// overlay case implies x==y.)
return true
}
if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation)
if xi, err := os.Stat(x); err == nil {
if yi, err := os.Stat(y); err == nil {
return os.SameFile(xi, yi)
}
}
}
return false
}
// trimAST clears any part of the AST not relevant to type checking
// expressions at pos.
func trimAST(file *ast.File) {
ast.Inspect(file, func(n ast.Node) bool {
if n == nil {
return false
}
switch n := n.(type) {
case *ast.FuncDecl:
n.Body = nil
case *ast.BlockStmt:
n.List = nil
case *ast.CaseClause:
n.Body = nil
case *ast.CommClause:
n.Body = nil
case *ast.CompositeLit:
// Leave elts in place for [...]T
// array literals, because they can
// affect the expression's type.
if !isEllipsisArray(n.Type) {
n.Elts = nil
}
}
return true
})
}
func isEllipsisArray(n ast.Expr) bool {
at, ok := n.(*ast.ArrayType)
if !ok {
return false
}
_, ok = at.Len.(*ast.Ellipsis)
return ok
}
// fix inspects and potentially modifies any *ast.BadStmts or *ast.BadExprs in the AST.
// We attempt to modify the AST such that we can type-check it more effectively.
func (v *view) fix(ctx context.Context, file *ast.File, tok *token.File, src []byte) {
var parent ast.Node
ast.Inspect(file, func(n ast.Node) bool {
if n == nil {
return false
}
switch n := n.(type) {
case *ast.BadStmt:
if err := v.parseDeferOrGoStmt(n, parent, tok, src); err != nil {
v.Session().Logger().Debugf(ctx, "unable to parse defer or go from *ast.BadStmt: %v", err)
}
return false
default:
parent = n
return true
}
})
}
// parseDeferOrGoStmt tries to parse an *ast.BadStmt into a defer or a go statement.
//
// go/parser packages a statement of the form "defer x." as an *ast.BadStmt because
// it does not include a call expression. This means that go/types skips type-checking
// this statement entirely, and we can't use the type information when completing.
// Here, we try to generate a fake *ast.DeferStmt or *ast.GoStmt to put into the AST,
// instead of the *ast.BadStmt.
func (v *view) parseDeferOrGoStmt(bad *ast.BadStmt, parent ast.Node, tok *token.File, src []byte) error {
// Check if we have a bad statement containing either a "go" or "defer".
s := &scanner.Scanner{}
s.Init(tok, src, nil, 0)
var pos token.Pos
var tkn token.Token
var lit string
for {
if tkn == token.EOF {
return fmt.Errorf("reached the end of the file")
}
if pos >= bad.From {
break
}
pos, tkn, lit = s.Scan()
}
var stmt ast.Stmt
switch lit {
case "defer":
stmt = &ast.DeferStmt{
Defer: pos,
}
case "go":
stmt = &ast.GoStmt{
Go: pos,
}
default:
return fmt.Errorf("no defer or go statement found")
}
// The expression after the "defer" or "go" starts at this position.
from, _, _ := s.Scan()
var to, curr token.Pos
FindTo:
for {
curr, tkn, lit = s.Scan()
// TODO(rstambler): This still needs more handling to work correctly.
// We encounter a specific issue with code that looks like this:
//
// defer x.<>
// y := 1
//
// In this scenario, we parse it as "defer x.y", which then fails to
// type-check, and we don't get completions as expected.
switch tkn {
case token.COMMENT, token.EOF, token.SEMICOLON, token.DEFINE:
break FindTo
}
// to is the end of expression that should become the Fun part of the call.
to = curr
}
if !from.IsValid() || tok.Offset(from) >= len(src) {
return fmt.Errorf("invalid from position")
}
if !to.IsValid() || tok.Offset(to)+1 >= len(src) {
return fmt.Errorf("invalid to position")
}
exprstr := string(src[tok.Offset(from) : tok.Offset(to)+1])
expr, err := parser.ParseExpr(exprstr)
if expr == nil {
return fmt.Errorf("no expr in %s: %v", exprstr, err)
}
// parser.ParseExpr returns undefined positions.
// Adjust them for the current file.
v.offsetPositions(expr, from-1)
// Package the expression into a fake *ast.CallExpr and re-insert into the function.
call := &ast.CallExpr{
Fun: expr,
Lparen: to,
Rparen: to,
}
switch stmt := stmt.(type) {
case *ast.DeferStmt:
stmt.Call = call
case *ast.GoStmt:
stmt.Call = call
}
switch parent := parent.(type) {
case *ast.BlockStmt:
for i, s := range parent.List {
if s == bad {
parent.List[i] = stmt
break
}
}
}
return nil
}
// offsetPositions applies an offset to the positions in an ast.Node.
// TODO(rstambler): Add more cases here as they become necessary.
func (v *view) offsetPositions(expr ast.Expr, offset token.Pos) {
ast.Inspect(expr, func(n ast.Node) bool {
switch n := n.(type) {
case *ast.Ident:
n.NamePos += offset
return false
default:
return true
}
})
}
internal/lsp: move the fixup and parallel limits into the main parse function
Previously these were only applied from inside parseFiles, which also made it
harder to refactor the remaining parse logic.
This theoretically means fixup is now called in more places than it was before,
but should cause no change in behaviour.
Change-Id: Ic6d006c1d36daca7514626653aaedf90d76e1d0f
Reviewed-on: https://go-review.googlesource.com/c/tools/+/181544
Run-TryBot: Ian Cottrell <52376736e5e84615ec49f9c1843b8acc178b1d8d@google.com>
TryBot-Result: Gobot Gobot <66cb808b70d30c07676d5e946fee83fd561249e5@golang.org>
Reviewed-by: Rebecca Stambler <d7cf11e5f299d88ea8348366a9fc4f7c335c3afa@golang.org>
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cache
import (
"context"
"fmt"
"go/ast"
"go/parser"
"go/scanner"
"go/token"
"os"
"path/filepath"
"strings"
"sync"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/memoize"
"golang.org/x/tools/internal/span"
)
// Limits the number of parallel parser calls per process.
var parseLimit = make(chan bool, 20)
type parseKey struct {
file source.FileIdentity
mode source.ParseMode
}
type parseGoHandle struct {
handle *memoize.Handle
file source.FileHandle
mode source.ParseMode
}
type parseGoData struct {
memoize.NoCopy
ast *ast.File
err error
}
func (c *cache) ParseGo(fh source.FileHandle, mode source.ParseMode) source.ParseGoHandle {
key := parseKey{
file: fh.Identity(),
mode: mode,
}
h := c.store.Bind(key, func(ctx context.Context) interface{} {
data := &parseGoData{}
data.ast, data.err = parseGo(ctx, c, fh, mode)
return data
})
return &parseGoHandle{
handle: h,
}
}
func (h *parseGoHandle) File() source.FileHandle {
return h.file
}
func (h *parseGoHandle) Mode() source.ParseMode {
return h.mode
}
func (h *parseGoHandle) Parse(ctx context.Context) (*ast.File, error) {
v := h.handle.Get(ctx)
if v == nil {
return nil, ctx.Err()
}
data := v.(*parseGoData)
return data.ast, data.err
}
func parseGo(ctx context.Context, c *cache, fh source.FileHandle, mode source.ParseMode) (*ast.File, error) {
buf, _, err := fh.Read(ctx)
if err != nil {
return nil, err
}
parseLimit <- true
defer func() { <-parseLimit }()
parserMode := parser.AllErrors | parser.ParseComments
if mode == source.ParseHeader {
parserMode = parser.ImportsOnly
}
ast, err := parser.ParseFile(c.fset, fh.Identity().URI.Filename(), buf, parserMode)
if ast != nil {
if mode == source.ParseExported {
trimAST(ast)
}
// Fix any badly parsed parts of the AST.
tok := c.fset.File(ast.Pos())
if err := fix(ctx, ast, tok, buf); err != nil {
//TODO: we should do something with the error, but we have no access to a logger in here
}
}
return ast, err
}
// parseFiles reads and parses the Go source files and returns the ASTs
// of the ones that could be at least partially parsed, along with a list
// parse errors encountered, and a fatal error that prevented parsing.
//
// Because files are scanned in parallel, the token.Pos
// positions of the resulting ast.Files are not ordered.
//
func (imp *importer) parseFiles(filenames []string, ignoreFuncBodies bool) ([]*astFile, []error, error) {
var (
wg sync.WaitGroup
n = len(filenames)
parsed = make([]*astFile, n)
errors = make([]error, n)
)
// TODO: change this function to return the handles
for i, filename := range filenames {
if err := imp.ctx.Err(); err != nil {
return nil, nil, err
}
// get a file handle
fh := imp.view.session.GetFile(span.FileURI(filename))
// now get a parser
mode := source.ParseFull
if ignoreFuncBodies {
mode = source.ParseExported
}
ph := imp.view.session.cache.ParseGo(fh, mode)
// now read and parse in parallel
wg.Add(1)
go func(i int, filename string) {
defer wg.Done()
// ParseFile may return a partial AST and an error.
f, err := ph.Parse(imp.ctx)
parsed[i], errors[i] = &astFile{
file: f,
err: err,
isTrimmed: ignoreFuncBodies,
}, err
}(i, filename)
}
wg.Wait()
// Eliminate nils, preserving order.
var o int
for _, f := range parsed {
if f != nil {
parsed[o] = f
o++
}
}
parsed = parsed[:o]
o = 0
for _, err := range errors {
if err != nil {
errors[o] = err
o++
}
}
errors = errors[:o]
return parsed, errors, nil
}
// sameFile returns true if x and y have the same basename and denote
// the same file.
//
func sameFile(x, y string) bool {
if x == y {
// It could be the case that y doesn't exist.
// For instance, it may be an overlay file that
// hasn't been written to disk. To handle that case
// let x == y through. (We added the exact absolute path
// string to the CompiledGoFiles list, so the unwritten
// overlay case implies x==y.)
return true
}
if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation)
if xi, err := os.Stat(x); err == nil {
if yi, err := os.Stat(y); err == nil {
return os.SameFile(xi, yi)
}
}
}
return false
}
// trimAST clears any part of the AST not relevant to type checking
// expressions at pos.
func trimAST(file *ast.File) {
ast.Inspect(file, func(n ast.Node) bool {
if n == nil {
return false
}
switch n := n.(type) {
case *ast.FuncDecl:
n.Body = nil
case *ast.BlockStmt:
n.List = nil
case *ast.CaseClause:
n.Body = nil
case *ast.CommClause:
n.Body = nil
case *ast.CompositeLit:
// Leave elts in place for [...]T
// array literals, because they can
// affect the expression's type.
if !isEllipsisArray(n.Type) {
n.Elts = nil
}
}
return true
})
}
func isEllipsisArray(n ast.Expr) bool {
at, ok := n.(*ast.ArrayType)
if !ok {
return false
}
_, ok = at.Len.(*ast.Ellipsis)
return ok
}
// fix inspects and potentially modifies any *ast.BadStmts or *ast.BadExprs in the AST.
// We attempt to modify the AST such that we can type-check it more effectively.
func fix(ctx context.Context, file *ast.File, tok *token.File, src []byte) error {
var parent ast.Node
var err error
ast.Inspect(file, func(n ast.Node) bool {
if n == nil {
return false
}
switch n := n.(type) {
case *ast.BadStmt:
if err := parseDeferOrGoStmt(n, parent, tok, src); err != nil {
err = fmt.Errorf("unable to parse defer or go from *ast.BadStmt: %v", err)
}
return false
default:
parent = n
return true
}
})
return err
}
// parseDeferOrGoStmt tries to parse an *ast.BadStmt into a defer or a go statement.
//
// go/parser packages a statement of the form "defer x." as an *ast.BadStmt because
// it does not include a call expression. This means that go/types skips type-checking
// this statement entirely, and we can't use the type information when completing.
// Here, we try to generate a fake *ast.DeferStmt or *ast.GoStmt to put into the AST,
// instead of the *ast.BadStmt.
func parseDeferOrGoStmt(bad *ast.BadStmt, parent ast.Node, tok *token.File, src []byte) error {
// Check if we have a bad statement containing either a "go" or "defer".
s := &scanner.Scanner{}
s.Init(tok, src, nil, 0)
var pos token.Pos
var tkn token.Token
var lit string
for {
if tkn == token.EOF {
return fmt.Errorf("reached the end of the file")
}
if pos >= bad.From {
break
}
pos, tkn, lit = s.Scan()
}
var stmt ast.Stmt
switch lit {
case "defer":
stmt = &ast.DeferStmt{
Defer: pos,
}
case "go":
stmt = &ast.GoStmt{
Go: pos,
}
default:
return fmt.Errorf("no defer or go statement found")
}
// The expression after the "defer" or "go" starts at this position.
from, _, _ := s.Scan()
var to, curr token.Pos
FindTo:
for {
curr, tkn, lit = s.Scan()
// TODO(rstambler): This still needs more handling to work correctly.
// We encounter a specific issue with code that looks like this:
//
// defer x.<>
// y := 1
//
// In this scenario, we parse it as "defer x.y", which then fails to
// type-check, and we don't get completions as expected.
switch tkn {
case token.COMMENT, token.EOF, token.SEMICOLON, token.DEFINE:
break FindTo
}
// to is the end of expression that should become the Fun part of the call.
to = curr
}
if !from.IsValid() || tok.Offset(from) >= len(src) {
return fmt.Errorf("invalid from position")
}
if !to.IsValid() || tok.Offset(to)+1 >= len(src) {
return fmt.Errorf("invalid to position")
}
exprstr := string(src[tok.Offset(from) : tok.Offset(to)+1])
expr, err := parser.ParseExpr(exprstr)
if expr == nil {
return fmt.Errorf("no expr in %s: %v", exprstr, err)
}
// parser.ParseExpr returns undefined positions.
// Adjust them for the current file.
offsetPositions(expr, from-1)
// Package the expression into a fake *ast.CallExpr and re-insert into the function.
call := &ast.CallExpr{
Fun: expr,
Lparen: to,
Rparen: to,
}
switch stmt := stmt.(type) {
case *ast.DeferStmt:
stmt.Call = call
case *ast.GoStmt:
stmt.Call = call
}
switch parent := parent.(type) {
case *ast.BlockStmt:
for i, s := range parent.List {
if s == bad {
parent.List[i] = stmt
break
}
}
}
return nil
}
// offsetPositions applies an offset to the positions in an ast.Node.
// TODO(rstambler): Add more cases here as they become necessary.
func offsetPositions(expr ast.Expr, offset token.Pos) {
ast.Inspect(expr, func(n ast.Node) bool {
switch n := n.(type) {
case *ast.Ident:
n.NamePos += offset
return false
default:
return true
}
})
}
|
package app
import (
"fmt"
"github.com/mohae/contour"
jww "github.com/spf13/jwalterweatherman"
)
// BuildDistro creates a build based on the target distro's defaults. The
// ArgsFilter contains information on the target distro and any overrides that
// are to be applied to the build. Returns either a processing message or an
// error.
func BuildDistro() (string, error) {
if !DistroDefaults.IsSet {
err := DistroDefaults.Set()
if err != nil {
err = fmt.Errorf("BuildDistro failed: %s", err)
jww.ERROR.Println(err)
return "", err
}
}
message, err := buildPackerTemplateFromDistro()
if err != nil {
err = fmt.Errorf("BuildDistro failed: %s", err)
jww.ERROR.Println(err)
}
return message, err
}
// Create Packer templates from specified build templates.
// TODO: refactor to match updated handling
func buildPackerTemplateFromDistro() (string, error) {
var rTpl *rawTemplate
jww.INFO.Println("creating template using distro defaults for " + contour.GetString("distro"))
// Get the default for this distro, if one isn't found then it isn't
// Supported.
rTpl, err := DistroDefaults.GetTemplate(contour.GetString("distro"))
if err != nil {
err = Error{slug: "get template", err: err}
jww.ERROR.Println(err)
return "", err
}
// If there were any overrides, set them.
if contour.GetString("arch") != "" {
rTpl.Arch = contour.GetString("arch")
}
if contour.GetString("image") != "" {
rTpl.Image = contour.GetString("image")
}
if contour.GetString("release") != "" {
rTpl.Release = contour.GetString("release")
}
// Since distro builds don't actually have a build name, we create one out
// of the args used to create it.
rTpl.BuildName = fmt.Sprintf("%s-%s-%s-%s", rTpl.Distro, rTpl.Release, rTpl.Arch, rTpl.Image)
// Now that the raw template has been made, create a Packer template out of it
pTpl, err := rTpl.createPackerTemplate()
if err != nil {
err = Error{slug: "get template", err: err}
jww.ERROR.Println(err)
return "", err
}
// Create the JSON version of the Packer template. This also handles
// creation of the build directory and copying all files that the Packer
// template needs to the build directory.
err = pTpl.create(rTpl.IODirInf, rTpl.BuildInf, rTpl.dirs, rTpl.files)
if err != nil {
jww.ERROR.Println(err)
return "", err
}
msg := fmt.Sprintf("build for %q complete: Packer template name is %q", rTpl.Distro, rTpl.BuildName)
jww.INFO.Println(msg)
return msg, nil
}
// BuildBuilds manages the process of creating Packer Build templates out of
// the passed build names. All builds are done concurrently. Returns either a
// message providing information about the processing of the requested builds
// or an error.
func BuildBuilds(buildNames ...string) (string, error) {
if buildNames[0] == "" {
err := fmt.Errorf("builds failed: no build name was received")
jww.ERROR.Println(err)
return "", err
}
// Only load supported if it hasn't been loaded.
if !DistroDefaults.IsSet {
jww.DEBUG.Println("loading distro defaults")
err := DistroDefaults.Set()
if err != nil {
err = fmt.Errorf("builds failed: %s", err)
jww.ERROR.Println(err)
return "", err
}
}
// First load the build information
err := loadBuilds()
if err != nil {
err = fmt.Errorf("builds failed: %s", err)
jww.ERROR.Println(err)
return "", err
}
// Make as many channels as there are build requests. A channel per build
// is fine for now. If a large number of builds needs to be supported,
// switching to a queue and worker pool would be a better choice.
var errorCount, builtCount int
nBuilds := len(buildNames)
doneCh := make(chan error, nBuilds)
// Process each build request
for i := 0; i < nBuilds; i++ {
go buildPackerTemplateFromNamedBuild(buildNames[i], doneCh)
}
// Wait for channel done responses.
for i := 0; i < nBuilds; i++ {
err := <-doneCh
if err != nil {
jww.ERROR.Println(err)
errorCount++
} else {
jww.TRACE.Println("a template as successfully created")
builtCount++
}
}
var msg string
if nBuilds == 1 {
if builtCount > 0 {
msg = fmt.Sprintf("%s was successfully processed and its Packer template was created", buildNames[0])
goto done
}
msg = fmt.Sprintf("Processing of the %s build failed with an error.", buildNames[0])
goto done
}
msg = fmt.Sprintf("BuildBuilds: %v Builds were successfully processed and their Packer templates were created, %v Builds were unsucessfully process and resulted in errors..", builtCount, errorCount)
done:
jww.INFO.Println(msg)
return msg, nil
}
// buildPackerTemplateFromNamedBuild creates a Packer tmeplate and associated
// artifacts for the passed build.
func buildPackerTemplateFromNamedBuild(name string, doneCh chan error) {
if name == "" {
err := fmt.Errorf("unable to build Packer template: no build name was received")
doneCh <- err
return
}
jww.INFO.Printf("Start creation of Packer template %s\n", name)
defer jww.INFO.Printf("End creation of Packer template %s\n", name)
var ok bool
// Check the type and create the defaults for that type, if it doesn't already exist.
bTpl, err := getBuildTemplate(name)
if err != nil {
doneCh <- fmt.Errorf("processing of build template %q failed: %s", name, err)
return
}
// See if the distro default exists.
rTpl := rawTemplate{}
rTpl, ok = DistroDefaults.Templates[ParseDistro(bTpl.Distro)]
if !ok {
doneCh <- fmt.Errorf("creation of Packer template for %s failed: %s not supported", name, bTpl.Distro)
return
}
// TODO: this is probably where the merging of parent build would occur
rTpl.Name = name
err = rTpl.updateBuildSettings(bTpl)
if err != nil {
doneCh <- Error{name, err}
}
if contour.GetBool(Example) {
rTpl.IsExample = true
rTpl.ExampleDir = contour.GetString(ExampleDir)
rTpl.setExampleDirs()
}
pTpl, err := rTpl.createPackerTemplate()
if err != nil {
doneCh <- err
return
}
err = pTpl.create(rTpl.IODirInf, rTpl.BuildInf, rTpl.dirs, rTpl.files)
if err != nil {
doneCh <- err
return
}
doneCh <- nil
return
}
make build error messages clearer
package app
import (
"fmt"
"github.com/mohae/contour"
jww "github.com/spf13/jwalterweatherman"
)
// BuildDistro creates a build based on the target distro's defaults. The
// ArgsFilter contains information on the target distro and any overrides that
// are to be applied to the build. Returns either a processing message or an
// error.
func BuildDistro() (string, error) {
if !DistroDefaults.IsSet {
err := DistroDefaults.Set()
if err != nil {
err = fmt.Errorf("build packer template from distro failed: %s", err)
jww.ERROR.Println(err)
return "", err
}
}
message, err := buildPackerTemplateFromDistro()
if err != nil {
err = fmt.Errorf("build packer template from distro failed: %s", err)
jww.ERROR.Println(err)
}
return message, err
}
// Create Packer templates from specified build templates.
// TODO: refactor to match updated handling
func buildPackerTemplateFromDistro() (string, error) {
var rTpl *rawTemplate
jww.INFO.Println("creating template using distro defaults for " + contour.GetString("distro"))
// Get the default for this distro, if one isn't found then it isn't
// Supported.
rTpl, err := DistroDefaults.GetTemplate(contour.GetString("distro"))
if err != nil {
err = Error{slug: "get template", err: err}
jww.ERROR.Println(err)
return "", err
}
// If there were any overrides, set them.
if contour.GetString("arch") != "" {
rTpl.Arch = contour.GetString("arch")
}
if contour.GetString("image") != "" {
rTpl.Image = contour.GetString("image")
}
if contour.GetString("release") != "" {
rTpl.Release = contour.GetString("release")
}
// Since distro builds don't actually have a build name, we create one out
// of the args used to create it.
rTpl.BuildName = fmt.Sprintf("%s-%s-%s-%s", rTpl.Distro, rTpl.Release, rTpl.Arch, rTpl.Image)
// Now that the raw template has been made, create a Packer template out of it
pTpl, err := rTpl.createPackerTemplate()
if err != nil {
err = Error{slug: "get template", err: err}
jww.ERROR.Println(err)
return "", err
}
// Create the JSON version of the Packer template. This also handles
// creation of the build directory and copying all files that the Packer
// template needs to the build directory.
err = pTpl.create(rTpl.IODirInf, rTpl.BuildInf, rTpl.dirs, rTpl.files)
if err != nil {
jww.ERROR.Println(err)
return "", err
}
msg := fmt.Sprintf("build for %q complete: Packer template name is %q", rTpl.Distro, rTpl.BuildName)
jww.INFO.Println(msg)
return msg, nil
}
// BuildBuilds manages the process of creating Packer Build templates out of
// the passed build names. All builds are done concurrently. Returns either a
// message providing information about the processing of the requested builds
// or an error.
func BuildBuilds(buildNames ...string) (string, error) {
if buildNames[0] == "" {
err := fmt.Errorf("builds failed: no build name was received")
jww.ERROR.Println(err)
return "", err
}
// Only load supported if it hasn't been loaded.
if !DistroDefaults.IsSet {
jww.DEBUG.Println("loading distro defaults")
err := DistroDefaults.Set()
if err != nil {
err = fmt.Errorf("builds failed: %s", err)
jww.ERROR.Println(err)
return "", err
}
}
// First load the build information
err := loadBuilds()
if err != nil {
err = fmt.Errorf("builds failed: %s", err)
jww.ERROR.Println(err)
return "", err
}
// Make as many channels as there are build requests. A channel per build
// is fine for now. If a large number of builds needs to be supported,
// switching to a queue and worker pool would be a better choice.
var errorCount, builtCount int
nBuilds := len(buildNames)
doneCh := make(chan error, nBuilds)
// Process each build request
for i := 0; i < nBuilds; i++ {
go buildPackerTemplateFromNamedBuild(buildNames[i], doneCh)
}
// Wait for channel done responses.
for i := 0; i < nBuilds; i++ {
err := <-doneCh
if err != nil {
jww.ERROR.Println(err)
errorCount++
} else {
jww.TRACE.Println("a template as successfully created")
builtCount++
}
}
var msg string
if nBuilds == 1 {
if builtCount > 0 {
msg = fmt.Sprintf("%s was successfully processed and its Packer template was created", buildNames[0])
goto done
}
msg = fmt.Sprintf("Processing of the %s build failed with an error.", buildNames[0])
goto done
}
msg = fmt.Sprintf("BuildBuilds: %v Builds were successfully processed and their Packer templates were created, %v Builds were unsucessfully process and resulted in errors..", builtCount, errorCount)
done:
jww.INFO.Println(msg)
return msg, nil
}
// buildPackerTemplateFromNamedBuild creates a Packer tmeplate and associated
// artifacts for the passed build.
func buildPackerTemplateFromNamedBuild(name string, doneCh chan error) {
if name == "" {
err := fmt.Errorf("build packer template failed: no build name was received")
doneCh <- err
return
}
jww.INFO.Printf("Start creation of Packer template %s\n", name)
defer jww.INFO.Printf("End creation of Packer template %s\n", name)
var ok bool
// Check the type and create the defaults for that type, if it doesn't already exist.
bTpl, err := getBuildTemplate(name)
if err != nil {
doneCh <- fmt.Errorf("processing of build template %q failed: %s", name, err)
return
}
// See if the distro default exists.
rTpl := rawTemplate{}
rTpl, ok = DistroDefaults.Templates[ParseDistro(bTpl.Distro)]
if !ok {
doneCh <- fmt.Errorf("creation of Packer template for %s failed: %s not supported", name, bTpl.Distro)
return
}
// TODO: this is probably where the merging of parent build would occur
rTpl.Name = name
err = rTpl.updateBuildSettings(bTpl)
if err != nil {
doneCh <- Error{name, err}
}
if contour.GetBool(Example) {
rTpl.IsExample = true
rTpl.ExampleDir = contour.GetString(ExampleDir)
rTpl.setExampleDirs()
}
pTpl, err := rTpl.createPackerTemplate()
if err != nil {
doneCh <- err
return
}
err = pTpl.create(rTpl.IODirInf, rTpl.BuildInf, rTpl.dirs, rTpl.files)
if err != nil {
doneCh <- err
return
}
doneCh <- nil
return
}
|
package emailnotifier
import (
"errors"
"fmt"
"koding/db/mongodb/modelhelper"
socialmodels "socialapi/models"
"socialapi/workers/notification/models"
"github.com/koding/logging"
"github.com/koding/rabbitmq"
"github.com/koding/worker"
"github.com/robfig/cron"
"github.com/sendgrid/sendgrid-go"
"github.com/streadway/amqp"
"labix.org/v2/mgo"
"labix.org/v2/mgo/bson"
)
const SCHEDULE = "0 0 0 * * *"
var cronJob *cron.Cron
var emailConfig = map[string]string{
models.NotificationContent_TYPE_COMMENT: "comment",
models.NotificationContent_TYPE_LIKE: "likeActivities",
models.NotificationContent_TYPE_FOLLOW: "followActions",
models.NotificationContent_TYPE_JOIN: "groupJoined",
models.NotificationContent_TYPE_LEAVE: "groupLeft",
models.NotificationContent_TYPE_MENTION: "mention",
}
type Action func(*Controller, []byte) error
type Controller struct {
routes map[string]Action
log logging.Logger
rmqConn *amqp.Connection
settings *EmailSettings
}
type EmailSettings struct {
Username string
Password string
FromName string
FromMail string
}
func (n *Controller) DefaultErrHandler(delivery amqp.Delivery, err error) bool {
n.log.Error("an error occured: %s", err)
delivery.Ack(false)
return false
}
func (n *Controller) HandleEvent(event string, data []byte) error {
n.log.Debug("New Event Received %s", event)
handler, ok := n.routes[event]
if !ok {
return worker.HandlerNotFoundErr
}
return handler(n, data)
}
func New(rmq *rabbitmq.RabbitMQ, log logging.Logger, es *EmailSettings) (*Controller, error) {
rmqConn, err := rmq.Connect("NewEmailNotifierWorkerController")
if err != nil {
return nil, err
}
nwc := &Controller{
log: log,
rmqConn: rmqConn.Conn(),
settings: es,
}
routes := map[string]Action{
"notification.notification_created": (*Controller).SendInstantEmail,
"notification.notification_updated": (*Controller).SendInstantEmail,
}
nwc.routes = routes
nwc.initDailyEmailCron()
return nwc, nil
}
func (n *EmailNotifierWorkerController) initDailyEmailCron() {
cronJob = cron.New()
cronJob.AddFunc(SCHEDULE, n.sendDailyMails)
cronJob.Start()
}
func (n *Controller) SendInstantEmail(data []byte) error {
channel, err := n.rmqConn.Channel()
if err != nil {
return errors.New("channel connection error")
}
defer channel.Close()
notification := models.NewNotification()
if err := notification.MapMessage(data); err != nil {
return err
}
// fetch latest activity for checking actor
activity, nc, err := notification.FetchLastActivity()
if err != nil {
return err
}
if !validNotification(activity, notification) {
return nil
}
uc, err := fetchUserContact(notification.AccountId)
if err != nil {
return fmt.Errorf("an error occurred while fetching user contact: %s", err)
}
if !checkMailSettings(uc, nc) {
return nil
}
container, err := buildContainer(activity, nc, notification)
if err != nil {
return err
}
body, err := renderTemplate(uc, container)
if err != nil {
return fmt.Errorf("an error occurred while preparing notification email: %s", err)
}
subject := prepareSubject(container)
if err := createToken(uc, nc, container.Token); err != nil {
return err
}
return n.SendMail(uc, body, subject)
}
type UserContact struct {
UserOldId bson.ObjectId
Email string
FirstName string
LastName string
Username string
Hash string
EmailSettings map[string]bool
}
func validNotification(a *models.NotificationActivity, n *models.Notification) bool {
// do not notify actor for her own action
if a.ActorId == n.AccountId {
return false
}
// do not notify user when notification is not yet activated
return !n.ActivatedAt.IsZero()
}
func checkMailSettings(uc *UserContact, nc *models.NotificationContent) bool {
// notifications are disabled
if val := uc.EmailSettings["global"]; !val {
return false
}
// daily notifications are enabled
if val := uc.EmailSettings["daily"]; val {
return false
}
// get config
return uc.EmailSettings[emailConfig[nc.TypeConstant]]
}
func buildContainer(a *models.NotificationActivity, nc *models.NotificationContent,
n *models.Notification) (*NotificationContainer, error) {
// if content type not valid return
contentType, err := nc.GetContentType()
if err != nil {
return nil, err
}
container := &NotificationContainer{
Activity: a,
Content: nc,
Notification: n,
}
container.Token, err = generateToken()
if err != nil {
return nil, err
}
// if notification target is related with an object (comment/status update)
if containsObject(nc) {
target := socialmodels.NewChannelMessage()
if err := target.ById(nc.TargetId); err != nil {
return nil, fmt.Errorf("target message not found")
}
prepareGroup(container, target)
prepareSlug(container, target)
prepareObjectType(container, target)
container.Message = fetchContentBody(nc, target)
contentType.SetActorId(target.AccountId)
contentType.SetListerId(n.AccountId)
}
container.ActivityMessage = contentType.GetActivity()
return container, nil
}
func prepareGroup(container *NotificationContainer, cm *socialmodels.ChannelMessage) {
c := socialmodels.NewChannel()
if err := c.ById(cm.InitialChannelId); err != nil {
return
}
// TODO fix these Slug and Name
container.Group = GroupContent{
Slug: c.GroupName,
Name: c.GroupName,
}
}
func prepareSlug(container *NotificationContainer, cm *socialmodels.ChannelMessage) {
switch cm.TypeConstant {
case socialmodels.ChannelMessage_TYPE_POST:
container.Slug = cm.Slug
case socialmodels.ChannelMessage_TYPE_REPLY:
// TODO we need append something like comment id to parent message slug
container.Slug = fetchRepliedMessage(cm.Id).Slug
}
}
func prepareObjectType(container *NotificationContainer, cm *socialmodels.ChannelMessage) {
switch cm.TypeConstant {
case socialmodels.ChannelMessage_TYPE_POST:
container.ObjectType = "status update"
case socialmodels.ChannelMessage_TYPE_REPLY:
container.ObjectType = "comment"
}
}
// fetchUserContact gets user and account details with given account id
func fetchUserContact(accountId int64) (*UserContact, error) {
a := socialmodels.NewAccount()
if err := a.ById(accountId); err != nil {
return nil, err
}
account, err := modelhelper.GetAccountById(a.OldId)
if err != nil {
if err == mgo.ErrNotFound {
return nil, errors.New("old account not found")
}
return nil, err
}
user, err := modelhelper.GetUser(account.Profile.Nickname)
if err != nil {
if err == mgo.ErrNotFound {
return nil, errors.New("user not found")
}
return nil, err
}
uc := &UserContact{
UserOldId: user.ObjectId,
Email: user.Email,
FirstName: account.Profile.FirstName,
LastName: account.Profile.LastName,
Username: account.Profile.Nickname,
Hash: account.Profile.Hash,
EmailSettings: user.EmailFrequency,
}
return uc, nil
}
func containsObject(nc *models.NotificationContent) bool {
return nc.TypeConstant == models.NotificationContent_TYPE_LIKE ||
nc.TypeConstant == models.NotificationContent_TYPE_MENTION ||
nc.TypeConstant == models.NotificationContent_TYPE_COMMENT
}
func fetchContentBody(nc *models.NotificationContent, cm *socialmodels.ChannelMessage) string {
switch nc.TypeConstant {
case models.NotificationContent_TYPE_LIKE:
return cm.Body
case models.NotificationContent_TYPE_MENTION:
return cm.Body
case models.NotificationContent_TYPE_COMMENT:
return fetchLastReplyBody(cm.Id)
}
return ""
}
func fetchLastReplyBody(targetId int64) string {
mr := socialmodels.NewMessageReply()
mr.MessageId = targetId
query := socialmodels.NewQuery()
query.Limit = 1
messages, err := mr.List(query)
if err != nil {
return ""
}
if len(messages) == 0 {
return ""
}
return messages[0].Body
}
func fetchRepliedMessage(replyId int64) *socialmodels.ChannelMessage {
mr := socialmodels.NewMessageReply()
mr.ReplyId = replyId
parent, err := mr.FetchRepliedMessage()
if err != nil {
parent = socialmodels.NewChannelMessage()
}
return parent
}
func (n *Controller) SendMail(uc *UserContact, body, subject string) error {
es := n.settings
sg := sendgrid.NewSendGridClient(es.Username, es.Password)
fullname := fmt.Sprintf("%s %s", uc.FirstName, uc.LastName)
message := sendgrid.NewMail()
message.AddTo(uc.Email)
message.AddToName(fullname)
message.SetSubject(subject)
message.SetHTML(body)
message.SetFrom(es.FromMail)
message.SetFromName(es.FromName)
if err := sg.Send(message); err != nil {
return fmt.Errorf("an error occurred while sending notification email to %s", uc.Username)
}
n.log.Info("%s notified by email", uc.Username)
return nil
}
Notification: token is moved to UserContact
package emailnotifier
import (
"errors"
"fmt"
"koding/db/mongodb/modelhelper"
socialmodels "socialapi/models"
"socialapi/workers/notification/models"
"github.com/koding/logging"
"github.com/koding/rabbitmq"
"github.com/koding/worker"
"github.com/robfig/cron"
"github.com/sendgrid/sendgrid-go"
"github.com/streadway/amqp"
"labix.org/v2/mgo"
"labix.org/v2/mgo/bson"
)
const SCHEDULE = "0 0 0 * * *"
var cronJob *cron.Cron
var emailConfig = map[string]string{
models.NotificationContent_TYPE_COMMENT: "comment",
models.NotificationContent_TYPE_LIKE: "likeActivities",
models.NotificationContent_TYPE_FOLLOW: "followActions",
models.NotificationContent_TYPE_JOIN: "groupJoined",
models.NotificationContent_TYPE_LEAVE: "groupLeft",
models.NotificationContent_TYPE_MENTION: "mention",
}
type Action func(*Controller, []byte) error
type Controller struct {
routes map[string]Action
log logging.Logger
rmqConn *amqp.Connection
settings *EmailSettings
}
type EmailSettings struct {
Username string
Password string
FromName string
FromMail string
}
func (n *Controller) DefaultErrHandler(delivery amqp.Delivery, err error) bool {
n.log.Error("an error occured: %s", err)
delivery.Ack(false)
return false
}
func (n *Controller) HandleEvent(event string, data []byte) error {
n.log.Debug("New Event Received %s", event)
handler, ok := n.routes[event]
if !ok {
return worker.HandlerNotFoundErr
}
return handler(n, data)
}
func New(rmq *rabbitmq.RabbitMQ, log logging.Logger, es *EmailSettings) (*Controller, error) {
rmqConn, err := rmq.Connect("NewEmailNotifierWorkerController")
if err != nil {
return nil, err
}
nwc := &Controller{
log: log,
rmqConn: rmqConn.Conn(),
settings: es,
}
routes := map[string]Action{
"notification.notification_created": (*Controller).SendInstantEmail,
"notification.notification_updated": (*Controller).SendInstantEmail,
}
nwc.routes = routes
nwc.initDailyEmailCron()
return nwc, nil
}
func (n *EmailNotifierWorkerController) initDailyEmailCron() {
cronJob = cron.New()
cronJob.AddFunc(SCHEDULE, n.sendDailyMails)
cronJob.Start()
}
func (n *Controller) SendInstantEmail(data []byte) error {
channel, err := n.rmqConn.Channel()
if err != nil {
return errors.New("channel connection error")
}
defer channel.Close()
notification := models.NewNotification()
if err := notification.MapMessage(data); err != nil {
return err
}
// fetch latest activity for checking actor
activity, nc, err := notification.FetchLastActivity()
if err != nil {
return err
}
if !validNotification(activity, notification) {
return nil
}
uc, err := fetchUserContact(notification.AccountId)
if err != nil {
return fmt.Errorf("an error occurred while fetching user contact: %s", err)
}
if !checkMailSettings(uc, nc) {
return nil
}
container, err := buildContainer(activity, nc, notification)
if err != nil {
return err
}
body, err := renderTemplate(uc, container)
if err != nil {
return fmt.Errorf("an error occurred while preparing notification email: %s", err)
}
subject := prepareSubject(container)
if err := createToken(uc, nc, container.Token); err != nil {
return err
}
return n.SendMail(uc, body, subject)
}
type UserContact struct {
UserOldId bson.ObjectId
Email string
FirstName string
LastName string
Username string
Hash string
Token string
EmailSettings map[string]bool
}
func validNotification(a *models.NotificationActivity, n *models.Notification) bool {
// do not notify actor for her own action
if a.ActorId == n.AccountId {
return false
}
// do not notify user when notification is not yet activated
return !n.ActivatedAt.IsZero()
}
func checkMailSettings(uc *UserContact, nc *models.NotificationContent) bool {
// notifications are disabled
if val := uc.EmailSettings["global"]; !val {
return false
}
// daily notifications are enabled
if val := uc.EmailSettings["daily"]; val {
return false
}
// get config
return uc.EmailSettings[emailConfig[nc.TypeConstant]]
}
func buildContainer(a *models.NotificationActivity, nc *models.NotificationContent,
n *models.Notification) (*NotificationContainer, error) {
// if content type not valid return
contentType, err := nc.GetContentType()
if err != nil {
return nil, err
}
container := &NotificationContainer{
Activity: a,
Content: nc,
Notification: n,
}
container.Token, err = generateToken()
if err != nil {
return nil, err
}
// if notification target is related with an object (comment/status update)
if containsObject(nc) {
target := socialmodels.NewChannelMessage()
if err := target.ById(nc.TargetId); err != nil {
return nil, fmt.Errorf("target message not found")
}
prepareGroup(container, target)
prepareSlug(container, target)
prepareObjectType(container, target)
container.Message = fetchContentBody(nc, target)
contentType.SetActorId(target.AccountId)
contentType.SetListerId(n.AccountId)
}
container.ActivityMessage = contentType.GetActivity()
return container, nil
}
func prepareGroup(container *NotificationContainer, cm *socialmodels.ChannelMessage) {
c := socialmodels.NewChannel()
if err := c.ById(cm.InitialChannelId); err != nil {
return
}
// TODO fix these Slug and Name
container.Group = GroupContent{
Slug: c.GroupName,
Name: c.GroupName,
}
}
func prepareSlug(container *NotificationContainer, cm *socialmodels.ChannelMessage) {
switch cm.TypeConstant {
case socialmodels.ChannelMessage_TYPE_POST:
container.Slug = cm.Slug
case socialmodels.ChannelMessage_TYPE_REPLY:
// TODO we need append something like comment id to parent message slug
container.Slug = fetchRepliedMessage(cm.Id).Slug
}
}
func prepareObjectType(container *NotificationContainer, cm *socialmodels.ChannelMessage) {
switch cm.TypeConstant {
case socialmodels.ChannelMessage_TYPE_POST:
container.ObjectType = "status update"
case socialmodels.ChannelMessage_TYPE_REPLY:
container.ObjectType = "comment"
}
}
// fetchUserContact gets user and account details with given account id
func fetchUserContact(accountId int64) (*UserContact, error) {
a := socialmodels.NewAccount()
if err := a.ById(accountId); err != nil {
return nil, err
}
account, err := modelhelper.GetAccountById(a.OldId)
if err != nil {
if err == mgo.ErrNotFound {
return nil, errors.New("old account not found")
}
return nil, err
}
user, err := modelhelper.GetUser(account.Profile.Nickname)
if err != nil {
if err == mgo.ErrNotFound {
return nil, errors.New("user not found")
}
return nil, err
}
token, err := generateToken()
if err != nil {
return nil, err
}
uc := &UserContact{
UserOldId: user.ObjectId,
Email: user.Email,
FirstName: account.Profile.FirstName,
LastName: account.Profile.LastName,
Username: account.Profile.Nickname,
Hash: account.Profile.Hash,
EmailSettings: user.EmailFrequency,
Token: token,
}
return uc, nil
}
func containsObject(nc *models.NotificationContent) bool {
return nc.TypeConstant == models.NotificationContent_TYPE_LIKE ||
nc.TypeConstant == models.NotificationContent_TYPE_MENTION ||
nc.TypeConstant == models.NotificationContent_TYPE_COMMENT
}
func fetchContentBody(nc *models.NotificationContent, cm *socialmodels.ChannelMessage) string {
switch nc.TypeConstant {
case models.NotificationContent_TYPE_LIKE:
return cm.Body
case models.NotificationContent_TYPE_MENTION:
return cm.Body
case models.NotificationContent_TYPE_COMMENT:
return fetchLastReplyBody(cm.Id)
}
return ""
}
func fetchLastReplyBody(targetId int64) string {
mr := socialmodels.NewMessageReply()
mr.MessageId = targetId
query := socialmodels.NewQuery()
query.Limit = 1
messages, err := mr.List(query)
if err != nil {
return ""
}
if len(messages) == 0 {
return ""
}
return messages[0].Body
}
func fetchRepliedMessage(replyId int64) *socialmodels.ChannelMessage {
mr := socialmodels.NewMessageReply()
mr.ReplyId = replyId
parent, err := mr.FetchRepliedMessage()
if err != nil {
parent = socialmodels.NewChannelMessage()
}
return parent
}
func (n *Controller) SendMail(uc *UserContact, body, subject string) error {
es := n.settings
sg := sendgrid.NewSendGridClient(es.Username, es.Password)
fullname := fmt.Sprintf("%s %s", uc.FirstName, uc.LastName)
message := sendgrid.NewMail()
message.AddTo(uc.Email)
message.AddToName(fullname)
message.SetSubject(subject)
message.SetHTML(body)
message.SetFrom(es.FromMail)
message.SetFromName(es.FromName)
if err := sg.Send(message); err != nil {
return fmt.Errorf("an error occurred while sending notification email to %s", uc.Username)
}
n.log.Info("%s notified by email", uc.Username)
return nil
}
|
package models
import (
"errors"
"time"
"github.com/guregu/null"
)
// [BENCHMARK] 0.000 sec / 0.000 sec
func GetActiveWarsByID(id int64) ([]CRESTRef, error) {
w := []CRESTRef{}
if err := database.Select(&w, `
SELECT K.id, crestRef, type FROM
(SELECT defenderID AS id FROM evedata.wars WHERE (timeFinished = "0001-01-01 00:00:00" OR timeFinished IS NULL OR timeFinished >= UTC_TIMESTAMP()) AND timeStarted <= UTC_TIMESTAMP() AND aggressorID = ?
UNION
SELECT aggressorID AS id FROM evedata.wars WHERE (timeFinished = "0001-01-01 00:00:00" OR timeFinished IS NULL OR timeFinished >= UTC_TIMESTAMP()) AND timeStarted <= UTC_TIMESTAMP() AND defenderID = ?
UNION
SELECT aggressorID AS id FROM evedata.wars W INNER JOIN evedata.warAllies A on A.id = W.id WHERE (timeFinished = "0001-01-01 00:00:00" OR timeFinished IS NULL OR timeFinished >= UTC_TIMESTAMP()) AND timeStarted <= UTC_TIMESTAMP() AND allyID = ?
UNION
SELECT allyID AS id FROM evedata.wars W INNER JOIN evedata.warAllies A on A.id = W.id WHERE (timeFinished = "0001-01-01 00:00:00" OR timeFinished IS NULL OR timeFinished >= UTC_TIMESTAMP()) AND timeStarted <= UTC_TIMESTAMP() AND aggressorID = ?) AS K
INNER JOIN evedata.crestID C ON C.id = K.id
`, id, id, id, id); err != nil {
return nil, err
}
return w, nil
}
// [BENCHMARK] 0.000 sec / 0.000 sec
func GetPendingWarsByID(id int64) ([]CRESTRef, error) {
w := []CRESTRef{}
if err := database.Select(&w, `
SELECT K.id, crestRef, type FROM
(SELECT defenderID AS id FROM evedata.wars WHERE timeStarted > timeDeclared AND timeStarted > UTC_TIMESTAMP() AND aggressorID = ?
UNION
SELECT aggressorID AS id FROM evedata.wars WHERE timeStarted > timeDeclared AND timeStarted > UTC_TIMESTAMP() AND defenderID = ?
UNION
SELECT aggressorID AS id FROM evedata.wars W INNER JOIN evedata.warAllies A on A.id = W.id WHERE timeStarted > timeDeclared AND timeStarted > UTC_TIMESTAMP() AND allyID = ?
UNION
SELECT allyID AS id FROM evedata.wars W INNER JOIN evedata.warAllies A on A.id = W.id WHERE timeStarted > timeDeclared AND timeStarted > UTC_TIMESTAMP() AND aggressorID = ?) AS K
INNER JOIN evedata.crestID C ON C.id = K.id
`, id, id, id, id); err != nil {
return nil, err
}
return w, nil
}
// [BENCHMARK] 0.000 sec / 0.000 sec
func GetFinishedWarsByID(id int64) ([]CRESTRef, error) {
w := []CRESTRef{}
if err := database.Select(&w, `
SELECT K.id, crestRef, type FROM
(SELECT defenderID AS id FROM evedata.wars WHERE timeFinished < UTC_TIMESTAMP() AND aggressorID = ?
UNION
SELECT aggressorID AS id FROM evedata.wars WHERE timeFinished < UTC_TIMESTAMP() AND defenderID = ?
UNION
SELECT aggressorID AS id FROM evedata.wars W INNER JOIN evedata.warAllies A on A.id = W.id WHERE timeFinished < UTC_TIMESTAMP() AND allyID = ?
UNION
SELECT allyID AS id FROM evedata.wars W INNER JOIN evedata.warAllies A on A.id = W.id WHERE timeFinished < UTC_TIMESTAMP() AND aggressorID = ?) AS K
INNER JOIN evedata.crestID C ON C.id = K.id
`, id, id, id, id); err != nil {
return nil, err
}
return w, nil
}
type ActiveWarList struct {
WarID int64 `db:"warID" json:"warID"`
TimeStarted time.Time `db:"timeStarted" json:"timeStarted"`
TimeFinished time.Time `db:"timeFinished" json:"timeFinished"`
OpenForAllies bool `db:"openForAllies" json:"openForAllies"`
AggressorID int64 `db:"aggressorID" json:"aggressorID"`
AggressorType null.String `db:"aggressorType" json:"aggressorType"`
AggressorName null.String `db:"aggressorName" json:"aggressorName"`
DefenderID int64 `db:"defenderID" json:"defenderID"`
DefenderType null.String `db:"defenderType" json:"defenderType"`
DefenderName null.String `db:"defenderName" json:"defenderName"`
Mutual bool `db:"mutual" json:"mutual"`
WarKills int64 `db:"warKills" json:"warKills"`
WarLosses int64 `db:"warLosses" json:"warLosses"`
Efficiency float64 `db:"efficiency" json:"efficiency"`
Kills int64 `db:"kills" json:"kills"`
Losses int64 `db:"losses" json:"losses"`
}
// [BENCHMARK] 1.469 sec / 0.094 sec
func GetActiveWarList() ([]ActiveWarList, error) {
wars := []ActiveWarList{}
if err := database.Select(&wars, `
SELECT
W.id AS warID,
timeStarted,
timeFinished,
openForAllies,
aggressorID,
Ag.Type AS aggressorType,
defenderID,
Df.type AS defenderType,
mutual,
IFNULL(K.kills,0) as warKills,
IFNULL(L.losses,0) as warLosses,
IF(AA.allianceID > 0, AA.name, AC.name) AS aggressorName,
IF(DA.allianceID > 0, DA.name, DC.name) AS defenderName,
IFNULL(S.efficiency,1) AS efficiency,
IFNULL(S.kills,0) AS kills,
IFNULL(S.losses,0) AS losses
FROM evedata.wars W
INNER JOIN evedata.crestID Ag ON Ag.id = aggressorID
INNER JOIN evedata.crestID Df ON Df.id = defenderID
LEFT OUTER JOIN evedata.alliances AA on AA.allianceID = aggressorID
LEFT OUTER JOIN evedata.alliances DA on DA.allianceID = defenderID
LEFT OUTER JOIN evedata.corporations AC on AC.corporationID = aggressorID
LEFT OUTER JOIN evedata.corporations DC on DC.corporationID = defenderID
LEFT OUTER JOIN evedata.entityKillStats S ON S.id = aggressorID
LEFT OUTER JOIN
( -- Kills by the Aggressor
SELECT
W.id,
count(*) AS kills
FROM evedata.wars W
INNER JOIN evedata.killmails K ON K.warID = W.id AND
(
K.victimAllianceID != W.aggressorID AND
K.victimCorporationID != W.aggressorID
)
WHERE killTime > DATE_SUB(UTC_TIMESTAMP, INTERVAL 31 DAY)
GROUP BY W.id
) AS K ON W.id = K.id
LEFT OUTER JOIN
( -- Kills by the Defenders
SELECT
W.id,
count(*) AS losses
FROM evedata.wars W
INNER JOIN evedata.killmails L ON L.warID = W.id AND
(
L.victimAllianceID = W.aggressorID OR
L.victimCorporationID = W.aggressorID
)
WHERE killTime > DATE_SUB(UTC_TIMESTAMP, INTERVAL 31 DAY)
GROUP BY W.id
) AS L ON W.id = L.id
WHERE mutual = 0 AND
(timeFinished > UTC_TIMESTAMP() OR
timeFinished = "0001-01-01 00:00:00")`); err != nil {
return nil, err
}
return wars, nil
}
func GetWarsForEntityByID(id int64) ([]ActiveWarList, error) {
wars := []ActiveWarList{}
if err := database.Select(&wars, `
SELECT
W.id AS warID,
timeStarted,
timeFinished,
openForAllies,
aggressorID,
Ag.Type AS aggressorType,
defenderID,
Df.type AS defenderType,
mutual,
IFNULL(kills,0) as kills,
IFNULL(losses,0) as losses,
IF(AA.allianceID > 0, AA.name, AC.name) AS aggressorName,
IF(DA.allianceID > 0, DA.name, DC.name) AS defenderName
FROM evedata.wars W
INNER JOIN evedata.crestID Ag ON Ag.id = aggressorID
INNER JOIN evedata.crestID Df ON Df.id = defenderID
LEFT OUTER JOIN evedata.warAllies A ON A.id = W.id
LEFT OUTER JOIN evedata.alliances AA on AA.allianceID = aggressorID
LEFT OUTER JOIN evedata.alliances DA on DA.allianceID = defenderID
LEFT OUTER JOIN evedata.corporations AC on AC.corporationID = aggressorID
LEFT OUTER JOIN evedata.corporations DC on DC.corporationID = defenderID
LEFT OUTER JOIN
( -- Kills by the Aggressor
SELECT
W.id,
count(*) AS kills
FROM evedata.wars W
INNER JOIN evedata.killmails K ON K.warID = W.id AND
(
K.victimAllianceID != W.aggressorID AND
K.victimCorporationID != W.aggressorID
)
WHERE killTime > DATE_SUB(UTC_TIMESTAMP, INTERVAL 31 DAY)
GROUP BY W.id
) AS K ON W.id = K.id
LEFT OUTER JOIN
( -- Kills by the Defenders
SELECT
W.id,
count(*) AS losses
FROM evedata.wars W
INNER JOIN evedata.killmails L ON L.warID = W.id AND
(
L.victimAllianceID = W.aggressorID OR
L.victimCorporationID = W.aggressorID
)
WHERE killTime > DATE_SUB(UTC_TIMESTAMP, INTERVAL 31 DAY)
GROUP BY W.id
) AS L ON W.id = L.id
WHERE (aggressorID = ? OR defenderID = ? OR allyID = ?) AND
(timeFinished > UTC_TIMESTAMP() OR
timeFinished = "0001-01-01 00:00:00")`, id, id, id); err != nil {
return nil, err
}
return wars, nil
}
type KnownAllies struct {
Number int64 `db:"number" json:"number"`
AllyID int64 `db:"allyID" json:"id"`
Name string `db:"name" json:"name"`
Type string `db:"type" json:"type"`
}
// [BENCHMARK] 0.000 sec / 0.000 sec
func GetKnownAlliesByID(id int64) ([]KnownAllies, error) {
w := []KnownAllies{}
if err := database.Select(&w, `
SELECT
COUNT(DISTINCT W.id) AS number,
allyID,
CREST.type,
IFNULL(DA.name, DC.name) AS name
FROM evedata.wars W
INNER JOIN evedata.warAllies A ON W.id = A.id
INNER JOIN evedata.crestID CREST ON CREST.id = A.allyID
LEFT OUTER JOIN evedata.alliances DA on DA.allianceID = A.allyID
LEFT OUTER JOIN evedata.corporations DC on DC.corporationID = A.allyID
WHERE defenderID = ? AND W.timeStarted > DATE_SUB(UTC_TIMESTAMP(), INTERVAL 12 MONTH)
GROUP BY allyID
`, id); err != nil {
return nil, err
}
return w, nil
}
// Factions resolves faction name to ID
var FactionsByName = map[string]int32{"Caldari": 500001, "Minmatar": 500002, "Amarr": 500003, "Gallente": 500004}
// Factions resolves faction ID to Name
var FactionsByID = map[int32]string{500001: "Caldari", 500002: "Minmatar", 500003: "Amarr", 500004: "Gallente"}
// FactionsAtWar resolves two enemy parties for each factionID
var FactionsAtWar = map[int32][]int32{
500001: {500002, 500004}, // Caldari : Minmatar, Gallente
500003: {500002, 500004}, // Amarr : Minmatar, Gallente
500002: {500001, 500003}, // Minmatar : Caldari, Amarr
500004: {500001, 500003}, // Gallente : Caldari, Amarr
}
type FactionWarEntities struct {
ID int64 `db:"id" json:"id"`
Name string `db:"name" json:"name"`
Type string `db:"type" json:"type"`
}
// [BENCHMARK] 0.031 sec / 0.000 sec
func GetFactionWarEntitiesForID(factionID int32) ([]FactionWarEntities, error) {
if FactionsByID[factionID] == "" {
return nil, errors.New("Unknown FactionID")
}
// Due to CCP limitation, make sure count is under 1024, cut stuff off until it is.
wars := FactionsAtWar[factionID]
w := []FactionWarEntities{}
if err := database.Select(&w, `
SELECT
DISTINCT IF(C.allianceID > 0, C.allianceID, corporationID) AS id,
IF(C.allianceID > 0, A.name, C.name) AS name,
IF(C.allianceID > 0, "alliance", "corporation") AS type
FROM evedata.corporations C
LEFT OUTER JOIN evedata.alliances A ON C.allianceID = A.allianceID
INNER JOIN evedata.entityKillStats K ON K.id = IF(C.allianceID > 0, C.allianceID, C.corporationID)
WHERE factionID IN (?, ?) AND C.memberCount > 0
ORDER BY K.kills + K.losses + C.memberCount DESC, name ASC;
`, wars[0], wars[1]); err != nil {
return nil, err
}
return w, nil
}
Prevent listing duplicate wars when allied.
package models
import (
"errors"
"time"
"github.com/guregu/null"
)
// [BENCHMARK] 0.000 sec / 0.000 sec
func GetActiveWarsByID(id int64) ([]CRESTRef, error) {
w := []CRESTRef{}
if err := database.Select(&w, `
SELECT K.id, crestRef, type FROM
(SELECT defenderID AS id FROM evedata.wars WHERE (timeFinished = "0001-01-01 00:00:00" OR timeFinished IS NULL OR timeFinished >= UTC_TIMESTAMP()) AND timeStarted <= UTC_TIMESTAMP() AND aggressorID = ?
UNION
SELECT aggressorID AS id FROM evedata.wars WHERE (timeFinished = "0001-01-01 00:00:00" OR timeFinished IS NULL OR timeFinished >= UTC_TIMESTAMP()) AND timeStarted <= UTC_TIMESTAMP() AND defenderID = ?
UNION
SELECT aggressorID AS id FROM evedata.wars W INNER JOIN evedata.warAllies A on A.id = W.id WHERE (timeFinished = "0001-01-01 00:00:00" OR timeFinished IS NULL OR timeFinished >= UTC_TIMESTAMP()) AND timeStarted <= UTC_TIMESTAMP() AND allyID = ?
UNION
SELECT allyID AS id FROM evedata.wars W INNER JOIN evedata.warAllies A on A.id = W.id WHERE (timeFinished = "0001-01-01 00:00:00" OR timeFinished IS NULL OR timeFinished >= UTC_TIMESTAMP()) AND timeStarted <= UTC_TIMESTAMP() AND aggressorID = ?) AS K
INNER JOIN evedata.crestID C ON C.id = K.id
`, id, id, id, id); err != nil {
return nil, err
}
return w, nil
}
// [BENCHMARK] 0.000 sec / 0.000 sec
func GetPendingWarsByID(id int64) ([]CRESTRef, error) {
w := []CRESTRef{}
if err := database.Select(&w, `
SELECT K.id, crestRef, type FROM
(SELECT defenderID AS id FROM evedata.wars WHERE timeStarted > timeDeclared AND timeStarted > UTC_TIMESTAMP() AND aggressorID = ?
UNION
SELECT aggressorID AS id FROM evedata.wars WHERE timeStarted > timeDeclared AND timeStarted > UTC_TIMESTAMP() AND defenderID = ?
UNION
SELECT aggressorID AS id FROM evedata.wars W INNER JOIN evedata.warAllies A on A.id = W.id WHERE timeStarted > timeDeclared AND timeStarted > UTC_TIMESTAMP() AND allyID = ?
UNION
SELECT allyID AS id FROM evedata.wars W INNER JOIN evedata.warAllies A on A.id = W.id WHERE timeStarted > timeDeclared AND timeStarted > UTC_TIMESTAMP() AND aggressorID = ?) AS K
INNER JOIN evedata.crestID C ON C.id = K.id
`, id, id, id, id); err != nil {
return nil, err
}
return w, nil
}
// [BENCHMARK] 0.000 sec / 0.000 sec
func GetFinishedWarsByID(id int64) ([]CRESTRef, error) {
w := []CRESTRef{}
if err := database.Select(&w, `
SELECT K.id, crestRef, type FROM
(SELECT defenderID AS id FROM evedata.wars WHERE timeFinished < UTC_TIMESTAMP() AND aggressorID = ?
UNION
SELECT aggressorID AS id FROM evedata.wars WHERE timeFinished < UTC_TIMESTAMP() AND defenderID = ?
UNION
SELECT aggressorID AS id FROM evedata.wars W INNER JOIN evedata.warAllies A on A.id = W.id WHERE timeFinished < UTC_TIMESTAMP() AND allyID = ?
UNION
SELECT allyID AS id FROM evedata.wars W INNER JOIN evedata.warAllies A on A.id = W.id WHERE timeFinished < UTC_TIMESTAMP() AND aggressorID = ?) AS K
INNER JOIN evedata.crestID C ON C.id = K.id
`, id, id, id, id); err != nil {
return nil, err
}
return w, nil
}
type ActiveWarList struct {
WarID int64 `db:"warID" json:"warID"`
TimeStarted time.Time `db:"timeStarted" json:"timeStarted"`
TimeFinished time.Time `db:"timeFinished" json:"timeFinished"`
OpenForAllies bool `db:"openForAllies" json:"openForAllies"`
AggressorID int64 `db:"aggressorID" json:"aggressorID"`
AggressorType null.String `db:"aggressorType" json:"aggressorType"`
AggressorName null.String `db:"aggressorName" json:"aggressorName"`
DefenderID int64 `db:"defenderID" json:"defenderID"`
DefenderType null.String `db:"defenderType" json:"defenderType"`
DefenderName null.String `db:"defenderName" json:"defenderName"`
Mutual bool `db:"mutual" json:"mutual"`
WarKills int64 `db:"warKills" json:"warKills"`
WarLosses int64 `db:"warLosses" json:"warLosses"`
Efficiency float64 `db:"efficiency" json:"efficiency"`
Kills int64 `db:"kills" json:"kills"`
Losses int64 `db:"losses" json:"losses"`
}
// [BENCHMARK] 1.469 sec / 0.094 sec
func GetActiveWarList() ([]ActiveWarList, error) {
wars := []ActiveWarList{}
if err := database.Select(&wars, `
SELECT
W.id AS warID,
timeStarted,
timeFinished,
openForAllies,
aggressorID,
Ag.Type AS aggressorType,
defenderID,
Df.type AS defenderType,
mutual,
IFNULL(K.kills,0) as warKills,
IFNULL(L.losses,0) as warLosses,
IF(AA.allianceID > 0, AA.name, AC.name) AS aggressorName,
IF(DA.allianceID > 0, DA.name, DC.name) AS defenderName,
IFNULL(S.efficiency,1) AS efficiency,
IFNULL(S.kills,0) AS kills,
IFNULL(S.losses,0) AS losses
FROM evedata.wars W
INNER JOIN evedata.crestID Ag ON Ag.id = aggressorID
INNER JOIN evedata.crestID Df ON Df.id = defenderID
LEFT OUTER JOIN evedata.alliances AA on AA.allianceID = aggressorID
LEFT OUTER JOIN evedata.alliances DA on DA.allianceID = defenderID
LEFT OUTER JOIN evedata.corporations AC on AC.corporationID = aggressorID
LEFT OUTER JOIN evedata.corporations DC on DC.corporationID = defenderID
LEFT OUTER JOIN evedata.entityKillStats S ON S.id = aggressorID
LEFT OUTER JOIN
( -- Kills by the Aggressor
SELECT
W.id,
count(*) AS kills
FROM evedata.wars W
INNER JOIN evedata.killmails K ON K.warID = W.id AND
(
K.victimAllianceID != W.aggressorID AND
K.victimCorporationID != W.aggressorID
)
WHERE killTime > DATE_SUB(UTC_TIMESTAMP, INTERVAL 31 DAY)
GROUP BY W.id
) AS K ON W.id = K.id
LEFT OUTER JOIN
( -- Kills by the Defenders
SELECT
W.id,
count(*) AS losses
FROM evedata.wars W
INNER JOIN evedata.killmails L ON L.warID = W.id AND
(
L.victimAllianceID = W.aggressorID OR
L.victimCorporationID = W.aggressorID
)
WHERE killTime > DATE_SUB(UTC_TIMESTAMP, INTERVAL 31 DAY)
GROUP BY W.id
) AS L ON W.id = L.id
WHERE mutual = 0 AND
(timeFinished > UTC_TIMESTAMP() OR
timeFinished = "0001-01-01 00:00:00")`); err != nil {
return nil, err
}
return wars, nil
}
func GetWarsForEntityByID(id int64) ([]ActiveWarList, error) {
wars := []ActiveWarList{}
if err := database.Select(&wars, `
SELECT DISTINCT
W.id AS warID,
timeStarted,
timeFinished,
openForAllies,
aggressorID,
Ag.Type AS aggressorType,
defenderID,
Df.type AS defenderType,
mutual,
IFNULL(kills,0) as kills,
IFNULL(losses,0) as losses,
IF(AA.allianceID > 0, AA.name, AC.name) AS aggressorName,
IF(DA.allianceID > 0, DA.name, DC.name) AS defenderName
FROM evedata.wars W
INNER JOIN evedata.crestID Ag ON Ag.id = aggressorID
INNER JOIN evedata.crestID Df ON Df.id = defenderID
LEFT OUTER JOIN evedata.warAllies A ON A.id = W.id
LEFT OUTER JOIN evedata.alliances AA on AA.allianceID = aggressorID
LEFT OUTER JOIN evedata.alliances DA on DA.allianceID = defenderID
LEFT OUTER JOIN evedata.corporations AC on AC.corporationID = aggressorID
LEFT OUTER JOIN evedata.corporations DC on DC.corporationID = defenderID
LEFT OUTER JOIN
( -- Kills by the Aggressor
SELECT
W.id,
count(*) AS kills
FROM evedata.wars W
INNER JOIN evedata.killmails K ON K.warID = W.id AND
(
K.victimAllianceID != W.aggressorID AND
K.victimCorporationID != W.aggressorID
)
WHERE killTime > DATE_SUB(UTC_TIMESTAMP, INTERVAL 31 DAY)
GROUP BY W.id
) AS K ON W.id = K.id
LEFT OUTER JOIN
( -- Kills by the Defenders
SELECT
W.id,
count(*) AS losses
FROM evedata.wars W
INNER JOIN evedata.killmails L ON L.warID = W.id AND
(
L.victimAllianceID = W.aggressorID OR
L.victimCorporationID = W.aggressorID
)
WHERE killTime > DATE_SUB(UTC_TIMESTAMP, INTERVAL 31 DAY)
GROUP BY W.id
) AS L ON W.id = L.id
WHERE (aggressorID = ? OR defenderID = ? OR allyID = ?) AND
(timeFinished > UTC_TIMESTAMP() OR
timeFinished = "0001-01-01 00:00:00")`, id, id, id); err != nil {
return nil, err
}
return wars, nil
}
type KnownAllies struct {
Number int64 `db:"number" json:"number"`
AllyID int64 `db:"allyID" json:"id"`
Name string `db:"name" json:"name"`
Type string `db:"type" json:"type"`
}
// [BENCHMARK] 0.000 sec / 0.000 sec
func GetKnownAlliesByID(id int64) ([]KnownAllies, error) {
w := []KnownAllies{}
if err := database.Select(&w, `
SELECT
COUNT(DISTINCT W.id) AS number,
allyID,
CREST.type,
IFNULL(DA.name, DC.name) AS name
FROM evedata.wars W
INNER JOIN evedata.warAllies A ON W.id = A.id
INNER JOIN evedata.crestID CREST ON CREST.id = A.allyID
LEFT OUTER JOIN evedata.alliances DA on DA.allianceID = A.allyID
LEFT OUTER JOIN evedata.corporations DC on DC.corporationID = A.allyID
WHERE defenderID = ? AND W.timeStarted > DATE_SUB(UTC_TIMESTAMP(), INTERVAL 12 MONTH)
GROUP BY allyID
`, id); err != nil {
return nil, err
}
return w, nil
}
// Factions resolves faction name to ID
var FactionsByName = map[string]int32{"Caldari": 500001, "Minmatar": 500002, "Amarr": 500003, "Gallente": 500004}
// Factions resolves faction ID to Name
var FactionsByID = map[int32]string{500001: "Caldari", 500002: "Minmatar", 500003: "Amarr", 500004: "Gallente"}
// FactionsAtWar resolves two enemy parties for each factionID
var FactionsAtWar = map[int32][]int32{
500001: {500002, 500004}, // Caldari : Minmatar, Gallente
500003: {500002, 500004}, // Amarr : Minmatar, Gallente
500002: {500001, 500003}, // Minmatar : Caldari, Amarr
500004: {500001, 500003}, // Gallente : Caldari, Amarr
}
type FactionWarEntities struct {
ID int64 `db:"id" json:"id"`
Name string `db:"name" json:"name"`
Type string `db:"type" json:"type"`
}
// [BENCHMARK] 0.031 sec / 0.000 sec
func GetFactionWarEntitiesForID(factionID int32) ([]FactionWarEntities, error) {
if FactionsByID[factionID] == "" {
return nil, errors.New("Unknown FactionID")
}
// Due to CCP limitation, make sure count is under 1024, cut stuff off until it is.
wars := FactionsAtWar[factionID]
w := []FactionWarEntities{}
if err := database.Select(&w, `
SELECT
DISTINCT IF(C.allianceID > 0, C.allianceID, corporationID) AS id,
IF(C.allianceID > 0, A.name, C.name) AS name,
IF(C.allianceID > 0, "alliance", "corporation") AS type
FROM evedata.corporations C
LEFT OUTER JOIN evedata.alliances A ON C.allianceID = A.allianceID
INNER JOIN evedata.entityKillStats K ON K.id = IF(C.allianceID > 0, C.allianceID, C.corporationID)
WHERE factionID IN (?, ?) AND C.memberCount > 0
ORDER BY K.kills + K.losses + C.memberCount DESC, name ASC;
`, wars[0], wars[1]); err != nil {
return nil, err
}
return w, nil
}
|
// +build go1.3
/*
* Licensed to Qualys, Inc. (QUALYS) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* QUALYS licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import "crypto/tls"
import "encoding/json"
import "flag"
import "fmt"
import "io/ioutil"
import "bufio"
import "os"
import "log"
import "math/rand"
import "net"
import "net/http"
import "net/url"
import "strconv"
import "strings"
import "sync/atomic"
import "time"
import "sort"
const (
LOG_NONE = -1
LOG_EMERG = 0
LOG_ALERT = 1
LOG_CRITICAL = 2
LOG_ERROR = 3
LOG_WARNING = 4
LOG_NOTICE = 5
LOG_INFO = 6
LOG_DEBUG = 7
LOG_TRACE = 8
)
var USER_AGENT = "ssllabs-scan v0.1"
var logLevel = LOG_INFO
var activeAssessments = 0
var maxAssessments = 1
var requestCounter uint64 = 0
var apiLocation = "https://api.dev.ssllabs.com/api/fa78d5a4/"
var clearCache = true
var fromCache = false
var httpClient *http.Client
type LabsError struct {
Field string
Message string
}
type LabsErrorResponse struct {
ResponseErrors []LabsError `json:"errors"`
}
func (e LabsErrorResponse) Error() string {
msg, err := json.Marshal(e)
if err != nil {
return err.Error()
} else {
return string(msg)
}
}
type LabsKey struct {
Size int
Strength int
Alg string
DebianFlaw bool
Q int
}
type LabsCert struct {
Subject string
CommonNames []string
AltNames []string
NotBefore uint64
NotAfter uint64
IssuerSubject string
SigAlg string
IssuerLabel string
RevocationInfo int
CrlURIs []string
OcspURIs []string
RevocationStatus int
Sgc bool
ValidationType string
Issues int
}
type LabsChainCert struct {
Subject string
Label string
IssuerSubject string
IssuerLabel string
Issues int
Raw string
}
type LabsChain struct {
Certs []LabsChainCert
Issues int
}
type LabsProtocol struct {
Id int
Name string
Version string
V2SuitesDisabled bool
ErrorMessage bool
Q int
}
type LabsSimClient struct {
Id int
Name string
Platform string
Version string
IsReference bool
}
type LabsSimulation struct {
Client LabsSimClient
ErrorCode int
Attempts int
ProtocolId int
SuiteId int
}
type LabsSimDetails struct {
Results []LabsSimulation
}
type LabsSuite struct {
Id int
Name string
CipherStrength int
DhStrength int
DhP int
DhG int
DhYs int
EcdhBits int
EcdhStrength int
Q int
}
type LabsSuites struct {
List []LabsSuite
Preference bool
}
type LabsEndpointDetails struct {
HostStartTime uint64
Key LabsKey
Cert LabsCert
Chain LabsChain
Protocols LabsProtocol
Suites LabsSuites
ServerSignature string
PrefixDelegation bool
NonPrefixDelegation bool
VulnBeast bool
RenegSupport int
StsResponseHeader string
StsMaxAge uint64
StsSubdomains bool
PkpResponseHeader string
SessionResumption int
CompressionMethods int
SupportsNpn bool
NpnProtocols string
SessionTickets int
OcspStapling bool
SniRequired bool
HttpStatusCode int
HttpForwarding string
SupportsRc4 bool
ForwardSecrecy int
Rc4WithModern bool
Sims LabsSimDetails
Heartbleed bool
Heartbeat bool
OpenSslCcs int
}
type LabsEndpoint struct {
IpAddress string
ServerName string
StatusMessage string
StatusDetailsMessage string
Grade string
HasWarnings bool
IsExceptional bool
Progress int
Duration int
Eta int
Delegation int
Details LabsEndpointDetails
}
type LabsReport struct {
Host string
Port int
Protocol string
IsPublic bool
Status string
StatusMessage string
StartTime uint64
TestTime uint64
EngineVersion string
CriteriaVersion string
CacheExpiryTime int64
Endpoints []LabsEndpoint
CertHostnames []string
rawJSON string
}
type LabsResults struct {
reports []LabsReport
responses []string
}
type LabsInfo struct {
EngineVersion string
CriteriaVersion string
ClientMaxAssessments int
}
func invokeGetRepeatedly(url string) (*http.Response, []byte, error) {
retryCount := 0
for {
var reqId = atomic.AddUint64(&requestCounter, 1)
if logLevel >= LOG_DEBUG {
log.Printf("[DEBUG] Request (%v): %v", reqId, url)
}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, nil, err
}
req.Header.Add("User-Agent", USER_AGENT)
resp, err := httpClient.Do(req)
if err == nil {
if logLevel >= LOG_DEBUG {
log.Printf("[DEBUG] Response status code (%v): %v", resp.StatusCode, reqId)
}
// Adjust maximum concurrent requests.
headerValue := resp.Header.Get("X-ClientMaxAssessments")
if headerValue != "" {
i, err := strconv.Atoi(headerValue)
if err == nil {
if maxAssessments != i {
maxAssessments = i
if logLevel >= LOG_INFO {
log.Printf("[INFO] Server set max concurrent assessments to %v", headerValue)
}
}
} else {
if logLevel >= LOG_WARNING {
log.Printf("[WARNING] Ignoring invalid X-ClientMaxAssessments value (%v): %v", headerValue, err)
}
}
}
// Retrieve the response body.
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, nil, err
}
if logLevel >= LOG_TRACE {
log.Printf("[TRACE] Response (%v):\n%v", reqId, string(body))
}
return resp, body, nil
} else {
if err.Error() == "EOF" {
// Server closed a persistent connection on us, which
// Go doesn't seem to be handling well. So we'll try one
// more time.
if retryCount > 1 {
log.Fatalf("[ERROR] Too many HTTP requests failed with EOF")
}
} else {
if retryCount > 5 {
log.Fatalf("[ERROR] Too many failed HTTP requests")
}
time.Sleep(30 * time.Second)
}
retryCount++
}
}
}
func invokeApi(command string) (*http.Response, []byte, error) {
var url = apiLocation + "/" + command
for {
resp, body, err := invokeGetRepeatedly(url)
if err != nil {
return nil, nil, err
}
// Status codes 429, 503, and 529 essentially mean try later. Thus,
// if we encounter them, we sleep for a while and try again.
if (resp.StatusCode == 429) || (resp.StatusCode == 503) {
if logLevel >= LOG_NOTICE {
log.Printf("[NOTICE] Sleeping for 5 minutes after a %v response", resp.StatusCode)
}
time.Sleep(5 * time.Minute)
} else if resp.StatusCode == 529 {
// In case of the overloaded server, randomize the sleep time so
// that some clients reconnect earlier and some later.
sleepTime := 15 + rand.Int31n(15)
if logLevel >= LOG_NOTICE {
log.Printf("[NOTICE] Sleeping for %v minutes after a 529 response", sleepTime)
}
time.Sleep(time.Duration(sleepTime) * time.Minute)
} else if (resp.StatusCode != 200) && (resp.StatusCode != 400) {
log.Fatalf("[ERROR] Unexpected response status code %v", resp.StatusCode)
} else {
return resp, body, nil
}
}
}
func invokeInfo() (*LabsInfo, error) {
var command = "info"
_, body, err := invokeApi(command)
if err != nil {
return nil, err
}
var labsInfo LabsInfo
err = json.Unmarshal(body, &labsInfo)
if err != nil {
return nil, err
}
return &labsInfo, nil
}
func invokeAnalyze(host string, clearCache bool, fromCache bool) (*LabsReport, error) {
var command = "analyze?host=" + host + "&all=done"
if fromCache {
command = command + "&fromCache=on"
} else if clearCache {
command = command + "&clearCache=on"
}
resp, body, err := invokeApi(command)
if err != nil {
return nil, err
}
// Use the status code to determine if the response is an error.
if resp.StatusCode == 400 {
// Parameter validation error.
var apiError LabsErrorResponse
err = json.Unmarshal(body, &apiError)
if err != nil {
return nil, err
}
return nil, apiError
} else {
// We should have a proper response.
var analyzeResponse LabsReport
err = json.Unmarshal(body, &analyzeResponse)
if err != nil {
return nil, err
}
// Add the JSON body to the response
analyzeResponse.rawJSON = string(body)
return &analyzeResponse, nil
}
}
type Event struct {
host string
eventType int
report *LabsReport
}
const (
ASSESSMENT_STARTING = 0
ASSESSMENT_COMPLETE = 1
)
func NewAssessment(host string, eventChannel chan Event) {
eventChannel <- Event{host, ASSESSMENT_STARTING, nil}
var report *LabsReport
var startTime = -1
for {
myResponse, err := invokeAnalyze(host, clearCache, fromCache)
if err != nil {
log.Fatalf("[ERROR] Assessment failed: %v", err)
}
if startTime == -1 {
startTime = myResponse.StartTime
clearCache = false
} else {
if myResponse.StartTime != startTime {
log.Fatalf("[ERROR] Inconsistent startTime. Expected %v, got %v.", startTime, myResponse.StartTime)
}
}
if (myResponse.Status == "READY") || (myResponse.Status == "ERROR") {
report = myResponse
break
}
time.Sleep(5 * time.Second)
}
eventChannel <- Event{host, ASSESSMENT_COMPLETE, report}
}
type HostProvider struct {
hostnames []string
i int
}
func NewHostProvider(hs []string) *HostProvider {
hostProvider := HostProvider{hs, 0}
return &hostProvider
}
func (hp *HostProvider) next() (string, bool) {
if hp.i < len(hp.hostnames) {
host := hp.hostnames[hp.i]
hp.i = hp.i + 1
return host, true
} else {
return "", false
}
}
type Manager struct {
hostProvider *HostProvider
FrontendEventChannel chan Event
BackendEventChannel chan Event
results *LabsResults
}
func NewManager(hostProvider *HostProvider) *Manager {
manager := Manager{
hostProvider: hostProvider,
FrontendEventChannel: make(chan Event),
BackendEventChannel: make(chan Event),
results: &LabsResults{reports: make([]LabsReport, 0)},
}
go manager.run()
return &manager
}
func (manager *Manager) startAssessment(h string) {
go NewAssessment(h, manager.BackendEventChannel)
activeAssessments++
}
func (manager *Manager) run() {
transport := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: false},
}
httpClient = &http.Client{Transport: transport}
// Ping SSL Labs to determine how many concurrent
// assessments we're allowed to use. Print the API version
// information and the limits.
labsInfo, err := invokeInfo()
if err != nil {
// TODO Signal error so that we return the correct exit code
close(manager.FrontendEventChannel)
}
if logLevel >= LOG_INFO {
log.Printf("[INFO] SSL Labs v%v (criteria version %v)", labsInfo.EngineVersion, labsInfo.CriteriaVersion)
}
moreAssessments := true
for {
select {
// Handle assessment events (e.g., starting and finishing).
case e := <-manager.BackendEventChannel:
if e.eventType == ASSESSMENT_STARTING {
if logLevel >= LOG_INFO {
log.Printf("[INFO] Assessment starting: %v", e.host)
}
}
if e.eventType == ASSESSMENT_COMPLETE {
if logLevel >= LOG_INFO {
msg := ""
// Missing C's ternary operator here.
if len(e.report.Endpoints) == 0 {
msg = fmt.Sprintf("[INFO] Assessment failed: %v (%v)", e.host, e.report.StatusMessage)
} else if len(e.report.Endpoints) > 1 {
msg = fmt.Sprintf("[INFO] Assessment complete: %v (%v hosts in %v seconds)",
e.host, len(e.report.Endpoints), (e.report.TestTime-e.report.StartTime)/1000)
} else {
msg = fmt.Sprintf("[INFO] Assessment complete: %v (%v host in %v seconds)",
e.host, len(e.report.Endpoints), (e.report.TestTime-e.report.StartTime)/1000)
}
for _, endpoint := range e.report.Endpoints {
if endpoint.Grade != "" {
msg = msg + "\n " + endpoint.IpAddress + ": " + endpoint.Grade
} else {
msg = msg + "\n " + endpoint.IpAddress + ": Err: " + endpoint.StatusMessage
}
}
log.Println(msg)
}
activeAssessments--
manager.results.reports = append(manager.results.reports, *e.report)
manager.results.responses = append(manager.results.responses, e.report.rawJSON)
// Are we done?
if (activeAssessments == 0) && (moreAssessments == false) {
close(manager.FrontendEventChannel)
return
}
}
break
// Once a second, start a new assessment, provided there are
// hostnames left and we're not over the concurrent assessment limit.
default:
<-time.NewTimer(time.Second).C
if moreAssessments {
if activeAssessments < maxAssessments {
host, hasNext := manager.hostProvider.next()
if hasNext {
manager.startAssessment(host)
} else {
// We've run out of hostnames and now just need
// to wait for all the assessments to complete.
moreAssessments = false
}
}
}
break
}
}
}
func parseLogLevel(level string) int {
switch {
case level == "error":
return LOG_ERROR
case level == "info":
return LOG_INFO
case level == "debug":
return LOG_DEBUG
case level == "trace":
return LOG_TRACE
}
log.Fatalf("[ERROR] Unrecognized log level: %v", level)
return -1
}
func flattenJSON(inputJSON map[string]interface{}, rootKey string, flattened *map[string]interface{}) {
var keysep = "." // Char to separate keys
var Q = "\"" // Char to envelope strings
for rkey, value := range inputJSON {
key := rootKey + rkey
if _, ok := value.(string); ok {
(*flattened)[key] = Q+value.(string)+Q
} else if _, ok := value.(float64); ok {
(*flattened)[key] = value.(float64)
} else if _, ok := value.(bool); ok {
(*flattened)[key] = value.(bool)
} else if _, ok := value.([]interface{}); ok {
for i := 0; i < len(value.([]interface{})); i++ {
aKey := key+keysep+strconv.Itoa(i)
if _, ok := value.([]interface{})[i].(string); ok {
(*flattened)[aKey] = Q+value.([]interface{})[i].(string)+Q
} else if _, ok := value.([]interface{})[i].(float64); ok {
(*flattened)[aKey] = value.([]interface{})[i].(float64)
} else if _, ok := value.([]interface{})[i].(bool); ok {
(*flattened)[aKey] = value.([]interface{})[i].(bool)
} else {
flattenJSON(value.([]interface{})[i].(map[string]interface{}), key+keysep+strconv.Itoa(i)+keysep, flattened)
}
}
} else if value == nil {
(*flattened)[key] = nil
} else {
flattenJSON(value.(map[string]interface{}), key+keysep, flattened)
}
}
}
func flattenAndFormatJSON(inputJSON []byte) (*[]string) {
var flattened = make(map[string]interface{})
mappedJSON := map[string]interface{}{}
err := json.Unmarshal(inputJSON, &mappedJSON)
if err != nil {
log.Fatalf("[ERROR] Reconsitution of JSON failed: %v", err)
}
// Flatten the JSON structure, recursively
flattenJSON(mappedJSON, "", &flattened)
// Make a sorted index, so we can print keys in order
kIndex := make([]string, len(flattened))
ki := 0
for key, _ := range flattened {
kIndex[ki] = key
ki++
}
sort.Strings(kIndex)
// Ordered flattened data
var flatStrings []string
for _, value := range kIndex {
flatStrings = append(flatStrings,fmt.Sprintf("\"%v\": %v\n", value, flattened[value]))
}
return &flatStrings
}
func readLines(path *string) ([]string, error) {
file, err := os.Open(*path)
if err != nil {
return nil, err
}
defer file.Close()
var lines []string
scanner := bufio.NewScanner(file)
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
return lines, scanner.Err()
}
func validateURL(URL string) bool {
_, err := url.Parse(URL)
if err != nil {
return false
} else {
return true
}
}
func validateHostname(hostname string) bool {
addrs, err := net.LookupHost(hostname)
// In some cases there is no error
// but there are also no addresses
if err != nil || len(addrs) < 1 {
return false
} else {
return true
}
}
func main() {
var conf_api = flag.String("api", "BUILTIN", "API entry point, for example https://www.example.com/api/")
var conf_verbosity = flag.String("verbosity", "info", "Configure log verbosity: error, info, debug, or trace.")
var conf_json_pretty = flag.Bool("json-pretty", false, "Enable pretty JSON output")
var conf_quiet = flag.Bool("quiet", false, "Disable status messages (logging)")
var conf_json_flat = flag.Bool("json-flat", false, "Output results in flattened JSON format")
var conf_rawoutput = flag.Bool("rawoutput", false, "Print RAW JSON response")
var conf_hostfile = flag.String("hostfile", "", "File containing hosts to scan (one per line)")
var conf_usecache = flag.Bool("usecache", false, "If true, accept cached results (if available), else force live scan.")
flag.Parse()
logLevel = parseLogLevel(strings.ToLower(*conf_verbosity))
if *conf_quiet {
logLevel = LOG_NONE
}
// We prefer cached results
if *conf_usecache {
fromCache = true
clearCache = false
}
// Verify that the API entry point is a URL.
if *conf_api != "BUILTIN" {
apiLocation = *conf_api
}
if validateURL(apiLocation) == false {
log.Fatalf("[ERROR] Invalid API URL: %v", apiLocation)
}
var hostnames []string
if *conf_hostfile != "" {
// Open file, and read it
var err error
hostnames, err = readLines(conf_hostfile)
if err != nil {
log.Fatalf("[ERROR] Reading from specified hostfile failed: %v", err)
}
} else {
// Read hostnames from the rest of the args
hostnames = flag.Args()
}
// Validate all hostnames before we attempt to test them. At least
// one hostname is required.
for _, host := range hostnames {
if validateHostname(host) == false {
log.Fatalf("[ERROR] Invalid hostname: %v", host)
}
}
hp := NewHostProvider(hostnames)
manager := NewManager(hp)
// Respond to events until all the work is done.
for {
_, running := <-manager.FrontendEventChannel
if running == false {
var results []byte
var err error
if *conf_json_pretty {
// Pretty JSON output
results, err = json.MarshalIndent(manager.results.reports, "", " ")
} else if *conf_json_flat && !*conf_rawoutput {
// Flat JSON, but not RAW
for i := range manager.results.reports {
results, err := json.Marshal(manager.results.reports[i])
if err != nil {
log.Fatalf("[ERROR] Output to JSON failed: %v", err)
}
flattened := flattenAndFormatJSON(results)
// Print the flattened data
fmt.Println(*flattened)
}
} else if *conf_json_flat && *conf_rawoutput {
// Flat JSON and RAW
for i := range manager.results.responses {
results := []byte(manager.results.responses[i])
flattened := flattenAndFormatJSON(results)
// Print the flattened data
fmt.Println(*flattened)
}
} else if *conf_rawoutput {
// Raw (non-Go-mangled) JSON output
fmt.Println(manager.results.responses)
} else {
// Regular JSON output
results, err = json.Marshal(manager.results.reports)
}
if err != nil {
log.Fatalf("[ERROR] Output to JSON failed: %v", err)
}
fmt.Println(string(results))
if logLevel >= LOG_INFO {
log.Println("[INFO] All assessments complete; shutting down")
}
return
}
}
}
Use int64 instead of uint64, which breaks the code.
// +build go1.3
/*
* Licensed to Qualys, Inc. (QUALYS) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* QUALYS licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import "crypto/tls"
import "encoding/json"
import "flag"
import "fmt"
import "io/ioutil"
import "bufio"
import "os"
import "log"
import "math/rand"
import "net"
import "net/http"
import "net/url"
import "strconv"
import "strings"
import "sync/atomic"
import "time"
import "sort"
const (
LOG_NONE = -1
LOG_EMERG = 0
LOG_ALERT = 1
LOG_CRITICAL = 2
LOG_ERROR = 3
LOG_WARNING = 4
LOG_NOTICE = 5
LOG_INFO = 6
LOG_DEBUG = 7
LOG_TRACE = 8
)
var USER_AGENT = "ssllabs-scan v0.1"
var logLevel = LOG_INFO
var activeAssessments = 0
var maxAssessments = 1
var requestCounter uint64 = 0
var apiLocation = "https://api.dev.ssllabs.com/api/fa78d5a4/"
var clearCache = true
var fromCache = false
var httpClient *http.Client
type LabsError struct {
Field string
Message string
}
type LabsErrorResponse struct {
ResponseErrors []LabsError `json:"errors"`
}
func (e LabsErrorResponse) Error() string {
msg, err := json.Marshal(e)
if err != nil {
return err.Error()
} else {
return string(msg)
}
}
type LabsKey struct {
Size int
Strength int
Alg string
DebianFlaw bool
Q int
}
type LabsCert struct {
Subject string
CommonNames []string
AltNames []string
NotBefore int64
NotAfter int64
IssuerSubject string
SigAlg string
IssuerLabel string
RevocationInfo int
CrlURIs []string
OcspURIs []string
RevocationStatus int
Sgc bool
ValidationType string
Issues int
}
type LabsChainCert struct {
Subject string
Label string
IssuerSubject string
IssuerLabel string
Issues int
Raw string
}
type LabsChain struct {
Certs []LabsChainCert
Issues int
}
type LabsProtocol struct {
Id int
Name string
Version string
V2SuitesDisabled bool
ErrorMessage bool
Q int
}
type LabsSimClient struct {
Id int
Name string
Platform string
Version string
IsReference bool
}
type LabsSimulation struct {
Client LabsSimClient
ErrorCode int
Attempts int
ProtocolId int
SuiteId int
}
type LabsSimDetails struct {
Results []LabsSimulation
}
type LabsSuite struct {
Id int
Name string
CipherStrength int
DhStrength int
DhP int
DhG int
DhYs int
EcdhBits int
EcdhStrength int
Q int
}
type LabsSuites struct {
List []LabsSuite
Preference bool
}
type LabsEndpointDetails struct {
HostStartTime int64
Key LabsKey
Cert LabsCert
Chain LabsChain
Protocols LabsProtocol
Suites LabsSuites
ServerSignature string
PrefixDelegation bool
NonPrefixDelegation bool
VulnBeast bool
RenegSupport int
StsResponseHeader string
StsMaxAge int64
StsSubdomains bool
PkpResponseHeader string
SessionResumption int
CompressionMethods int
SupportsNpn bool
NpnProtocols string
SessionTickets int
OcspStapling bool
SniRequired bool
HttpStatusCode int
HttpForwarding string
SupportsRc4 bool
ForwardSecrecy int
Rc4WithModern bool
Sims LabsSimDetails
Heartbleed bool
Heartbeat bool
OpenSslCcs int
}
type LabsEndpoint struct {
IpAddress string
ServerName string
StatusMessage string
StatusDetailsMessage string
Grade string
HasWarnings bool
IsExceptional bool
Progress int
Duration int
Eta int
Delegation int
Details LabsEndpointDetails
}
type LabsReport struct {
Host string
Port int
Protocol string
IsPublic bool
Status string
StatusMessage string
StartTime int64
TestTime int64
EngineVersion string
CriteriaVersion string
CacheExpiryTime int64
Endpoints []LabsEndpoint
CertHostnames []string
rawJSON string
}
type LabsResults struct {
reports []LabsReport
responses []string
}
type LabsInfo struct {
EngineVersion string
CriteriaVersion string
ClientMaxAssessments int
}
func invokeGetRepeatedly(url string) (*http.Response, []byte, error) {
retryCount := 0
for {
var reqId = atomic.AddUint64(&requestCounter, 1)
if logLevel >= LOG_DEBUG {
log.Printf("[DEBUG] Request (%v): %v", reqId, url)
}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, nil, err
}
req.Header.Add("User-Agent", USER_AGENT)
resp, err := httpClient.Do(req)
if err == nil {
if logLevel >= LOG_DEBUG {
log.Printf("[DEBUG] Response status code (%v): %v", resp.StatusCode, reqId)
}
// Adjust maximum concurrent requests.
headerValue := resp.Header.Get("X-ClientMaxAssessments")
if headerValue != "" {
i, err := strconv.Atoi(headerValue)
if err == nil {
if maxAssessments != i {
maxAssessments = i
if logLevel >= LOG_INFO {
log.Printf("[INFO] Server set max concurrent assessments to %v", headerValue)
}
}
} else {
if logLevel >= LOG_WARNING {
log.Printf("[WARNING] Ignoring invalid X-ClientMaxAssessments value (%v): %v", headerValue, err)
}
}
}
// Retrieve the response body.
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, nil, err
}
if logLevel >= LOG_TRACE {
log.Printf("[TRACE] Response (%v):\n%v", reqId, string(body))
}
return resp, body, nil
} else {
if err.Error() == "EOF" {
// Server closed a persistent connection on us, which
// Go doesn't seem to be handling well. So we'll try one
// more time.
if retryCount > 1 {
log.Fatalf("[ERROR] Too many HTTP requests failed with EOF")
}
} else {
if retryCount > 5 {
log.Fatalf("[ERROR] Too many failed HTTP requests")
}
time.Sleep(30 * time.Second)
}
retryCount++
}
}
}
func invokeApi(command string) (*http.Response, []byte, error) {
var url = apiLocation + "/" + command
for {
resp, body, err := invokeGetRepeatedly(url)
if err != nil {
return nil, nil, err
}
// Status codes 429, 503, and 529 essentially mean try later. Thus,
// if we encounter them, we sleep for a while and try again.
if (resp.StatusCode == 429) || (resp.StatusCode == 503) {
if logLevel >= LOG_NOTICE {
log.Printf("[NOTICE] Sleeping for 5 minutes after a %v response", resp.StatusCode)
}
time.Sleep(5 * time.Minute)
} else if resp.StatusCode == 529 {
// In case of the overloaded server, randomize the sleep time so
// that some clients reconnect earlier and some later.
sleepTime := 15 + rand.Int31n(15)
if logLevel >= LOG_NOTICE {
log.Printf("[NOTICE] Sleeping for %v minutes after a 529 response", sleepTime)
}
time.Sleep(time.Duration(sleepTime) * time.Minute)
} else if (resp.StatusCode != 200) && (resp.StatusCode != 400) {
log.Fatalf("[ERROR] Unexpected response status code %v", resp.StatusCode)
} else {
return resp, body, nil
}
}
}
func invokeInfo() (*LabsInfo, error) {
var command = "info"
_, body, err := invokeApi(command)
if err != nil {
return nil, err
}
var labsInfo LabsInfo
err = json.Unmarshal(body, &labsInfo)
if err != nil {
return nil, err
}
return &labsInfo, nil
}
func invokeAnalyze(host string, clearCache bool, fromCache bool) (*LabsReport, error) {
var command = "analyze?host=" + host + "&all=done"
if fromCache {
command = command + "&fromCache=on"
} else if clearCache {
command = command + "&clearCache=on"
}
resp, body, err := invokeApi(command)
if err != nil {
return nil, err
}
// Use the status code to determine if the response is an error.
if resp.StatusCode == 400 {
// Parameter validation error.
var apiError LabsErrorResponse
err = json.Unmarshal(body, &apiError)
if err != nil {
return nil, err
}
return nil, apiError
} else {
// We should have a proper response.
var analyzeResponse LabsReport
err = json.Unmarshal(body, &analyzeResponse)
if err != nil {
return nil, err
}
// Add the JSON body to the response
analyzeResponse.rawJSON = string(body)
return &analyzeResponse, nil
}
}
type Event struct {
host string
eventType int
report *LabsReport
}
const (
ASSESSMENT_STARTING = 0
ASSESSMENT_COMPLETE = 1
)
func NewAssessment(host string, eventChannel chan Event) {
eventChannel <- Event{host, ASSESSMENT_STARTING, nil}
var report *LabsReport
var startTime int64 = -1
for {
myResponse, err := invokeAnalyze(host, clearCache, fromCache)
if err != nil {
log.Fatalf("[ERROR] Assessment failed: %v", err)
}
if startTime == -1 {
startTime = myResponse.StartTime
clearCache = false
} else {
if myResponse.StartTime != startTime {
log.Fatalf("[ERROR] Inconsistent startTime. Expected %v, got %v.", startTime, myResponse.StartTime)
}
}
if (myResponse.Status == "READY") || (myResponse.Status == "ERROR") {
report = myResponse
break
}
time.Sleep(5 * time.Second)
}
eventChannel <- Event{host, ASSESSMENT_COMPLETE, report}
}
type HostProvider struct {
hostnames []string
i int
}
func NewHostProvider(hs []string) *HostProvider {
hostProvider := HostProvider{hs, 0}
return &hostProvider
}
func (hp *HostProvider) next() (string, bool) {
if hp.i < len(hp.hostnames) {
host := hp.hostnames[hp.i]
hp.i = hp.i + 1
return host, true
} else {
return "", false
}
}
type Manager struct {
hostProvider *HostProvider
FrontendEventChannel chan Event
BackendEventChannel chan Event
results *LabsResults
}
func NewManager(hostProvider *HostProvider) *Manager {
manager := Manager{
hostProvider: hostProvider,
FrontendEventChannel: make(chan Event),
BackendEventChannel: make(chan Event),
results: &LabsResults{reports: make([]LabsReport, 0)},
}
go manager.run()
return &manager
}
func (manager *Manager) startAssessment(h string) {
go NewAssessment(h, manager.BackendEventChannel)
activeAssessments++
}
func (manager *Manager) run() {
transport := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: false},
}
httpClient = &http.Client{Transport: transport}
// Ping SSL Labs to determine how many concurrent
// assessments we're allowed to use. Print the API version
// information and the limits.
labsInfo, err := invokeInfo()
if err != nil {
// TODO Signal error so that we return the correct exit code
close(manager.FrontendEventChannel)
}
if logLevel >= LOG_INFO {
log.Printf("[INFO] SSL Labs v%v (criteria version %v)", labsInfo.EngineVersion, labsInfo.CriteriaVersion)
}
moreAssessments := true
for {
select {
// Handle assessment events (e.g., starting and finishing).
case e := <-manager.BackendEventChannel:
if e.eventType == ASSESSMENT_STARTING {
if logLevel >= LOG_INFO {
log.Printf("[INFO] Assessment starting: %v", e.host)
}
}
if e.eventType == ASSESSMENT_COMPLETE {
if logLevel >= LOG_INFO {
msg := ""
// Missing C's ternary operator here.
if len(e.report.Endpoints) == 0 {
msg = fmt.Sprintf("[INFO] Assessment failed: %v (%v)", e.host, e.report.StatusMessage)
} else if len(e.report.Endpoints) > 1 {
msg = fmt.Sprintf("[INFO] Assessment complete: %v (%v hosts in %v seconds)",
e.host, len(e.report.Endpoints), (e.report.TestTime-e.report.StartTime)/1000)
} else {
msg = fmt.Sprintf("[INFO] Assessment complete: %v (%v host in %v seconds)",
e.host, len(e.report.Endpoints), (e.report.TestTime-e.report.StartTime)/1000)
}
for _, endpoint := range e.report.Endpoints {
if endpoint.Grade != "" {
msg = msg + "\n " + endpoint.IpAddress + ": " + endpoint.Grade
} else {
msg = msg + "\n " + endpoint.IpAddress + ": Err: " + endpoint.StatusMessage
}
}
log.Println(msg)
}
activeAssessments--
manager.results.reports = append(manager.results.reports, *e.report)
manager.results.responses = append(manager.results.responses, e.report.rawJSON)
// Are we done?
if (activeAssessments == 0) && (moreAssessments == false) {
close(manager.FrontendEventChannel)
return
}
}
break
// Once a second, start a new assessment, provided there are
// hostnames left and we're not over the concurrent assessment limit.
default:
<-time.NewTimer(time.Second).C
if moreAssessments {
if activeAssessments < maxAssessments {
host, hasNext := manager.hostProvider.next()
if hasNext {
manager.startAssessment(host)
} else {
// We've run out of hostnames and now just need
// to wait for all the assessments to complete.
moreAssessments = false
}
}
}
break
}
}
}
func parseLogLevel(level string) int {
switch {
case level == "error":
return LOG_ERROR
case level == "info":
return LOG_INFO
case level == "debug":
return LOG_DEBUG
case level == "trace":
return LOG_TRACE
}
log.Fatalf("[ERROR] Unrecognized log level: %v", level)
return -1
}
func flattenJSON(inputJSON map[string]interface{}, rootKey string, flattened *map[string]interface{}) {
var keysep = "." // Char to separate keys
var Q = "\"" // Char to envelope strings
for rkey, value := range inputJSON {
key := rootKey + rkey
if _, ok := value.(string); ok {
(*flattened)[key] = Q+value.(string)+Q
} else if _, ok := value.(float64); ok {
(*flattened)[key] = value.(float64)
} else if _, ok := value.(bool); ok {
(*flattened)[key] = value.(bool)
} else if _, ok := value.([]interface{}); ok {
for i := 0; i < len(value.([]interface{})); i++ {
aKey := key+keysep+strconv.Itoa(i)
if _, ok := value.([]interface{})[i].(string); ok {
(*flattened)[aKey] = Q+value.([]interface{})[i].(string)+Q
} else if _, ok := value.([]interface{})[i].(float64); ok {
(*flattened)[aKey] = value.([]interface{})[i].(float64)
} else if _, ok := value.([]interface{})[i].(bool); ok {
(*flattened)[aKey] = value.([]interface{})[i].(bool)
} else {
flattenJSON(value.([]interface{})[i].(map[string]interface{}), key+keysep+strconv.Itoa(i)+keysep, flattened)
}
}
} else if value == nil {
(*flattened)[key] = nil
} else {
flattenJSON(value.(map[string]interface{}), key+keysep, flattened)
}
}
}
func flattenAndFormatJSON(inputJSON []byte) (*[]string) {
var flattened = make(map[string]interface{})
mappedJSON := map[string]interface{}{}
err := json.Unmarshal(inputJSON, &mappedJSON)
if err != nil {
log.Fatalf("[ERROR] Reconsitution of JSON failed: %v", err)
}
// Flatten the JSON structure, recursively
flattenJSON(mappedJSON, "", &flattened)
// Make a sorted index, so we can print keys in order
kIndex := make([]string, len(flattened))
ki := 0
for key, _ := range flattened {
kIndex[ki] = key
ki++
}
sort.Strings(kIndex)
// Ordered flattened data
var flatStrings []string
for _, value := range kIndex {
flatStrings = append(flatStrings,fmt.Sprintf("\"%v\": %v\n", value, flattened[value]))
}
return &flatStrings
}
func readLines(path *string) ([]string, error) {
file, err := os.Open(*path)
if err != nil {
return nil, err
}
defer file.Close()
var lines []string
scanner := bufio.NewScanner(file)
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
return lines, scanner.Err()
}
func validateURL(URL string) bool {
_, err := url.Parse(URL)
if err != nil {
return false
} else {
return true
}
}
func validateHostname(hostname string) bool {
addrs, err := net.LookupHost(hostname)
// In some cases there is no error
// but there are also no addresses
if err != nil || len(addrs) < 1 {
return false
} else {
return true
}
}
func main() {
var conf_api = flag.String("api", "BUILTIN", "API entry point, for example https://www.example.com/api/")
var conf_verbosity = flag.String("verbosity", "info", "Configure log verbosity: error, info, debug, or trace.")
var conf_json_pretty = flag.Bool("json-pretty", false, "Enable pretty JSON output")
var conf_quiet = flag.Bool("quiet", false, "Disable status messages (logging)")
var conf_json_flat = flag.Bool("json-flat", false, "Output results in flattened JSON format")
var conf_rawoutput = flag.Bool("rawoutput", false, "Print RAW JSON response")
var conf_hostfile = flag.String("hostfile", "", "File containing hosts to scan (one per line)")
var conf_usecache = flag.Bool("usecache", false, "If true, accept cached results (if available), else force live scan.")
flag.Parse()
logLevel = parseLogLevel(strings.ToLower(*conf_verbosity))
if *conf_quiet {
logLevel = LOG_NONE
}
// We prefer cached results
if *conf_usecache {
fromCache = true
clearCache = false
}
// Verify that the API entry point is a URL.
if *conf_api != "BUILTIN" {
apiLocation = *conf_api
}
if validateURL(apiLocation) == false {
log.Fatalf("[ERROR] Invalid API URL: %v", apiLocation)
}
var hostnames []string
if *conf_hostfile != "" {
// Open file, and read it
var err error
hostnames, err = readLines(conf_hostfile)
if err != nil {
log.Fatalf("[ERROR] Reading from specified hostfile failed: %v", err)
}
} else {
// Read hostnames from the rest of the args
hostnames = flag.Args()
}
// Validate all hostnames before we attempt to test them. At least
// one hostname is required.
for _, host := range hostnames {
if validateHostname(host) == false {
log.Fatalf("[ERROR] Invalid hostname: %v", host)
}
}
hp := NewHostProvider(hostnames)
manager := NewManager(hp)
// Respond to events until all the work is done.
for {
_, running := <-manager.FrontendEventChannel
if running == false {
var results []byte
var err error
if *conf_json_pretty {
// Pretty JSON output
results, err = json.MarshalIndent(manager.results.reports, "", " ")
} else if *conf_json_flat && !*conf_rawoutput {
// Flat JSON, but not RAW
for i := range manager.results.reports {
results, err := json.Marshal(manager.results.reports[i])
if err != nil {
log.Fatalf("[ERROR] Output to JSON failed: %v", err)
}
flattened := flattenAndFormatJSON(results)
// Print the flattened data
fmt.Println(*flattened)
}
} else if *conf_json_flat && *conf_rawoutput {
// Flat JSON and RAW
for i := range manager.results.responses {
results := []byte(manager.results.responses[i])
flattened := flattenAndFormatJSON(results)
// Print the flattened data
fmt.Println(*flattened)
}
} else if *conf_rawoutput {
// Raw (non-Go-mangled) JSON output
fmt.Println(manager.results.responses)
} else {
// Regular JSON output
results, err = json.Marshal(manager.results.reports)
}
if err != nil {
log.Fatalf("[ERROR] Output to JSON failed: %v", err)
}
fmt.Println(string(results))
if logLevel >= LOG_INFO {
log.Println("[INFO] All assessments complete; shutting down")
}
return
}
}
}
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"context"
"fmt"
"sync"
"time"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/perf-tests/clusterloader2/pkg/execservice"
"k8s.io/perf-tests/clusterloader2/pkg/measurement"
measurementutil "k8s.io/perf-tests/clusterloader2/pkg/measurement/util"
"k8s.io/perf-tests/clusterloader2/pkg/measurement/util/checker"
"k8s.io/perf-tests/clusterloader2/pkg/measurement/util/informer"
"k8s.io/perf-tests/clusterloader2/pkg/measurement/util/workerqueue"
"k8s.io/perf-tests/clusterloader2/pkg/util"
)
const (
serviceCreationLatencyName = "ServiceCreationLatency"
serviceCreationLatencyWorkers = 10
defaultServiceCreationLatencyTimeout = 10 * time.Minute
defaultCheckInterval = 10 * time.Second
pingBackoff = 1 * time.Second
pingChecks = 10
creatingPhase = "creating"
ipAssigningPhase = "ipAssigning"
reachabilityPhase = "reachability"
)
func init() {
if err := measurement.Register(serviceCreationLatencyName, createServiceCreationLatencyMeasurement); err != nil {
klog.Fatalf("cant register service %v", err)
}
}
func createServiceCreationLatencyMeasurement() measurement.Measurement {
return &serviceCreationLatencyMeasurement{
selector: util.NewObjectSelector(),
queue: workerqueue.NewWorkerQueue(serviceCreationLatencyWorkers),
creationTimes: measurementutil.NewObjectTransitionTimes(serviceCreationLatencyName),
pingCheckers: checker.NewMap(),
}
}
type serviceCreationLatencyMeasurement struct {
selector *util.ObjectSelector
waitTimeout time.Duration
stopCh chan struct{}
isRunning bool
queue workerqueue.Interface
client clientset.Interface
creationTimes *measurementutil.ObjectTransitionTimes
pingCheckers checker.Map
lock sync.Mutex
}
// Execute executes service startup latency measurement actions.
// Services can be specified by field and/or label selectors.
// If namespace is not passed by parameter, all-namespace scope is assumed.
// "start" action starts observation of the services.
// "waitForReady" waits until all services are reachable.
// "gather" returns service created latency summary.
// This measurement only works for services with ClusterIP, NodePort and LoadBalancer type.
func (s *serviceCreationLatencyMeasurement) Execute(config *measurement.Config) ([]measurement.Summary, error) {
s.client = config.ClusterFramework.GetClientSets().GetClient()
action, err := util.GetString(config.Params, "action")
if err != nil {
return nil, err
}
if !config.ClusterLoaderConfig.ExecServiceConfig.Enable {
return nil, fmt.Errorf("enable-exec-service flag not enabled")
}
switch action {
case "start":
if err := s.selector.Parse(config.Params); err != nil {
return nil, err
}
s.waitTimeout, err = util.GetDurationOrDefault(config.Params, "waitTimeout", defaultServiceCreationLatencyTimeout)
if err != nil {
return nil, err
}
return nil, s.start()
case "waitForReady":
return nil, s.waitForReady()
case "gather":
return s.gather(config.Identifier)
default:
return nil, fmt.Errorf("unknown action %v", action)
}
}
// Dispose cleans up after the measurement.
func (s *serviceCreationLatencyMeasurement) Dispose() {
if s.isRunning {
s.isRunning = false
close(s.stopCh)
}
s.queue.Stop()
s.lock.Lock()
defer s.lock.Unlock()
s.pingCheckers.Dispose()
}
// String returns a string representation of the metric.
func (s *serviceCreationLatencyMeasurement) String() string {
return serviceCreationLatencyName + ": " + s.selector.String()
}
func (s *serviceCreationLatencyMeasurement) start() error {
if s.isRunning {
klog.V(2).Infof("%s: service creation latency measurement already running", s)
return nil
}
klog.V(2).Infof("%s: starting service creation latency measurement...", s)
s.isRunning = true
s.stopCh = make(chan struct{})
i := informer.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
s.selector.ApplySelectors(&options)
return s.client.CoreV1().Services(s.selector.Namespace).List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
s.selector.ApplySelectors(&options)
return s.client.CoreV1().Services(s.selector.Namespace).Watch(context.TODO(), options)
},
},
func(oldObj, newObj interface{}) {
f := func() {
s.handleObject(oldObj, newObj)
}
s.queue.Add(&f)
},
)
return informer.StartAndSync(i, s.stopCh, informerSyncTimeout)
}
func (s *serviceCreationLatencyMeasurement) waitForReady() error {
return wait.Poll(defaultCheckInterval, s.waitTimeout, func() (bool, error) {
for _, svcType := range []corev1.ServiceType{corev1.ServiceTypeClusterIP, corev1.ServiceTypeNodePort, corev1.ServiceTypeLoadBalancer} {
reachable := s.creationTimes.Count(phaseName(reachabilityPhase, svcType))
created := s.creationTimes.Count(phaseName(creatingPhase, svcType))
klog.V(2).Infof("%s type %s: %d created, %d reachable", s, svcType, created, reachable)
if created != reachable {
return false, nil
}
}
return true, nil
})
}
var serviceCreationTransitions = map[string]measurementutil.Transition{
"create_to_available_clusterip": {
From: phaseName(creatingPhase, corev1.ServiceTypeClusterIP),
To: phaseName(reachabilityPhase, corev1.ServiceTypeClusterIP),
},
"create_to_available_nodeport": {
From: phaseName(creatingPhase, corev1.ServiceTypeNodePort),
To: phaseName(reachabilityPhase, corev1.ServiceTypeNodePort),
},
"create_to_assigned_loadbalancer": {
From: phaseName(creatingPhase, corev1.ServiceTypeLoadBalancer),
To: phaseName(ipAssigningPhase, corev1.ServiceTypeLoadBalancer),
},
"assigned_to_available_loadbalancer": {
From: phaseName(ipAssigningPhase, corev1.ServiceTypeLoadBalancer),
To: phaseName(reachabilityPhase, corev1.ServiceTypeLoadBalancer),
},
"create_to_available_loadbalancer": {
From: phaseName(creatingPhase, corev1.ServiceTypeLoadBalancer),
To: phaseName(reachabilityPhase, corev1.ServiceTypeLoadBalancer),
},
}
func (s *serviceCreationLatencyMeasurement) gather(identifier string) ([]measurement.Summary, error) {
klog.V(2).Infof("%s: gathering service created latency measurement...", s)
if !s.isRunning {
return nil, fmt.Errorf("metric %s has not been started", s)
}
// NOTE: For ClusterIP or NodePort type of service, the cluster ip or node port is assigned as part of service creation API call, so the ipAssigning phase is no sense.
serviceCreationLatency := s.creationTimes.CalculateTransitionsLatency(serviceCreationTransitions, measurementutil.MatchAll)
content, err := util.PrettyPrintJSON(measurementutil.LatencyMapToPerfData(serviceCreationLatency))
if err != nil {
return nil, err
}
summary := measurement.CreateSummary(fmt.Sprintf("%s_%s", serviceCreationLatencyName, identifier), "json", content)
return []measurement.Summary{summary}, nil
}
func (s *serviceCreationLatencyMeasurement) handleObject(oldObj, newObj interface{}) {
var oldService *corev1.Service
var newService *corev1.Service
var ok bool
oldService, ok = oldObj.(*corev1.Service)
if oldObj != nil && !ok {
klog.Errorf("%s: uncastable old object: %v", s, oldObj)
return
}
newService, ok = newObj.(*corev1.Service)
if newObj != nil && !ok {
klog.Errorf("%s: uncastable new object: %v", s, newObj)
return
}
if isEqual := oldService != nil &&
newService != nil &&
equality.Semantic.DeepEqual(oldService.Spec, newService.Spec) &&
equality.Semantic.DeepEqual(oldService.Status, newService.Status); isEqual {
return
}
// TODO(#680): Make it thread-safe.
if !s.isRunning {
return
}
if newObj == nil {
if err := s.deleteObject(oldService); err != nil {
klog.Errorf("%s: delete checker error: %v", s, err)
}
return
}
if err := s.updateObject(newService); err != nil {
klog.Errorf("%s: create checker error: %v", s, err)
}
}
func (s *serviceCreationLatencyMeasurement) deleteObject(svc *corev1.Service) error {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(svc)
if err != nil {
return fmt.Errorf("meta key created error: %v", err)
}
s.lock.Lock()
defer s.lock.Unlock()
s.pingCheckers.DeleteAndStop(key)
return nil
}
func (s *serviceCreationLatencyMeasurement) updateObject(svc *corev1.Service) error {
// This measurement only works for services with ClusterIP, NodePort and LoadBalancer type.
if svc.Spec.Type != corev1.ServiceTypeClusterIP && svc.Spec.Type != corev1.ServiceTypeNodePort && svc.Spec.Type != corev1.ServiceTypeLoadBalancer {
return nil
}
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(svc)
if err != nil {
return fmt.Errorf("meta key created error: %v", err)
}
if _, exists := s.creationTimes.Get(key, phaseName(creatingPhase, svc.Spec.Type)); !exists {
s.creationTimes.Set(key, phaseName(creatingPhase, svc.Spec.Type), svc.CreationTimestamp.Time)
}
if svc.Spec.Type == corev1.ServiceTypeLoadBalancer && len(svc.Status.LoadBalancer.Ingress) < 1 {
return nil
}
// NOTE: For ClusterIP or NodePort type of service, the cluster ip or node port is assigned as part of service creation API call, so the ipAssigning phase is no sense.
if svc.Spec.Type == corev1.ServiceTypeLoadBalancer {
if _, exists := s.creationTimes.Get(key, phaseName(ipAssigningPhase, svc.Spec.Type)); exists {
return nil
}
s.creationTimes.Set(key, phaseName(ipAssigningPhase, svc.Spec.Type), time.Now())
}
pc := &pingChecker{
callerName: s.String(),
svc: svc,
creationTimes: s.creationTimes,
stopCh: make(chan struct{}),
}
pc.run()
s.lock.Lock()
defer s.lock.Unlock()
s.pingCheckers.Add(key, pc)
return nil
}
func phaseName(phase string, serviceType corev1.ServiceType) string {
return fmt.Sprintf("%s_%s", phase, serviceType)
}
type pingChecker struct {
callerName string
svc *corev1.Service
creationTimes *measurementutil.ObjectTransitionTimes
stopCh chan struct{}
}
func (p *pingChecker) run() {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(p.svc)
if err != nil {
klog.Errorf("%s: meta key created error: %v", p.callerName, err)
return
}
success := 0
for {
select {
case <-p.stopCh:
return
default:
if _, exists := p.creationTimes.Get(key, phaseName(reachabilityPhase, p.svc.Spec.Type)); exists {
return
}
// TODO(#685): Make ping checks less communication heavy.
pod, err := execservice.GetPod()
if err != nil {
klog.Warningf("call to execservice.GetPod() ended with error: %v", err)
success = 0
time.Sleep(pingBackoff)
continue
}
switch p.svc.Spec.Type {
case corev1.ServiceTypeClusterIP:
cmd := fmt.Sprintf("curl %s:%d", p.svc.Spec.ClusterIP, p.svc.Spec.Ports[0].Port)
_, err = execservice.RunCommand(pod, cmd)
case corev1.ServiceTypeNodePort:
cmd := fmt.Sprintf("curl %s:%d", pod.Status.HostIP, p.svc.Spec.Ports[0].NodePort)
_, err = execservice.RunCommand(pod, cmd)
case corev1.ServiceTypeLoadBalancer:
cmd := fmt.Sprintf("curl %s:%d", p.svc.Status.LoadBalancer.Ingress[0].IP, p.svc.Spec.Ports[0].Port)
_, err = execservice.RunCommand(pod, cmd)
}
if err != nil {
success = 0
time.Sleep(pingBackoff)
continue
}
success++
if success == pingChecks {
p.creationTimes.Set(key, phaseName(reachabilityPhase, p.svc.Spec.Type), time.Now())
}
}
}
}
func (p *pingChecker) Stop() {
close(p.stopCh)
}
Add checking all IPs in Service reachibility verification
This is adding support for Dual Stack Kubernetes services
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"context"
"fmt"
"net"
"sync"
"time"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/perf-tests/clusterloader2/pkg/execservice"
"k8s.io/perf-tests/clusterloader2/pkg/measurement"
measurementutil "k8s.io/perf-tests/clusterloader2/pkg/measurement/util"
"k8s.io/perf-tests/clusterloader2/pkg/measurement/util/checker"
"k8s.io/perf-tests/clusterloader2/pkg/measurement/util/informer"
"k8s.io/perf-tests/clusterloader2/pkg/measurement/util/workerqueue"
"k8s.io/perf-tests/clusterloader2/pkg/util"
)
const (
serviceCreationLatencyName = "ServiceCreationLatency"
serviceCreationLatencyWorkers = 10
defaultServiceCreationLatencyTimeout = 10 * time.Minute
defaultCheckInterval = 10 * time.Second
pingBackoff = 1 * time.Second
pingChecks = 10
creatingPhase = "creating"
ipAssigningPhase = "ipAssigning"
reachabilityPhase = "reachability"
)
func init() {
if err := measurement.Register(serviceCreationLatencyName, createServiceCreationLatencyMeasurement); err != nil {
klog.Fatalf("cant register service %v", err)
}
}
func createServiceCreationLatencyMeasurement() measurement.Measurement {
return &serviceCreationLatencyMeasurement{
selector: util.NewObjectSelector(),
queue: workerqueue.NewWorkerQueue(serviceCreationLatencyWorkers),
creationTimes: measurementutil.NewObjectTransitionTimes(serviceCreationLatencyName),
pingCheckers: checker.NewMap(),
}
}
type serviceCreationLatencyMeasurement struct {
selector *util.ObjectSelector
waitTimeout time.Duration
stopCh chan struct{}
isRunning bool
queue workerqueue.Interface
client clientset.Interface
creationTimes *measurementutil.ObjectTransitionTimes
pingCheckers checker.Map
lock sync.Mutex
}
// Execute executes service startup latency measurement actions.
// Services can be specified by field and/or label selectors.
// If namespace is not passed by parameter, all-namespace scope is assumed.
// "start" action starts observation of the services.
// "waitForReady" waits until all services are reachable.
// "gather" returns service created latency summary.
// This measurement only works for services with ClusterIP, NodePort and LoadBalancer type.
func (s *serviceCreationLatencyMeasurement) Execute(config *measurement.Config) ([]measurement.Summary, error) {
s.client = config.ClusterFramework.GetClientSets().GetClient()
action, err := util.GetString(config.Params, "action")
if err != nil {
return nil, err
}
if !config.ClusterLoaderConfig.ExecServiceConfig.Enable {
return nil, fmt.Errorf("enable-exec-service flag not enabled")
}
switch action {
case "start":
if err := s.selector.Parse(config.Params); err != nil {
return nil, err
}
s.waitTimeout, err = util.GetDurationOrDefault(config.Params, "waitTimeout", defaultServiceCreationLatencyTimeout)
if err != nil {
return nil, err
}
return nil, s.start()
case "waitForReady":
return nil, s.waitForReady()
case "gather":
return s.gather(config.Identifier)
default:
return nil, fmt.Errorf("unknown action %v", action)
}
}
// Dispose cleans up after the measurement.
func (s *serviceCreationLatencyMeasurement) Dispose() {
if s.isRunning {
s.isRunning = false
close(s.stopCh)
}
s.queue.Stop()
s.lock.Lock()
defer s.lock.Unlock()
s.pingCheckers.Dispose()
}
// String returns a string representation of the metric.
func (s *serviceCreationLatencyMeasurement) String() string {
return serviceCreationLatencyName + ": " + s.selector.String()
}
func (s *serviceCreationLatencyMeasurement) start() error {
if s.isRunning {
klog.V(2).Infof("%s: service creation latency measurement already running", s)
return nil
}
klog.V(2).Infof("%s: starting service creation latency measurement...", s)
s.isRunning = true
s.stopCh = make(chan struct{})
i := informer.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
s.selector.ApplySelectors(&options)
return s.client.CoreV1().Services(s.selector.Namespace).List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
s.selector.ApplySelectors(&options)
return s.client.CoreV1().Services(s.selector.Namespace).Watch(context.TODO(), options)
},
},
func(oldObj, newObj interface{}) {
f := func() {
s.handleObject(oldObj, newObj)
}
s.queue.Add(&f)
},
)
return informer.StartAndSync(i, s.stopCh, informerSyncTimeout)
}
func (s *serviceCreationLatencyMeasurement) waitForReady() error {
return wait.Poll(defaultCheckInterval, s.waitTimeout, func() (bool, error) {
for _, svcType := range []corev1.ServiceType{corev1.ServiceTypeClusterIP, corev1.ServiceTypeNodePort, corev1.ServiceTypeLoadBalancer} {
reachable := s.creationTimes.Count(phaseName(reachabilityPhase, svcType))
created := s.creationTimes.Count(phaseName(creatingPhase, svcType))
klog.V(2).Infof("%s type %s: %d created, %d reachable", s, svcType, created, reachable)
if created != reachable {
return false, nil
}
}
return true, nil
})
}
var serviceCreationTransitions = map[string]measurementutil.Transition{
"create_to_available_clusterip": {
From: phaseName(creatingPhase, corev1.ServiceTypeClusterIP),
To: phaseName(reachabilityPhase, corev1.ServiceTypeClusterIP),
},
"create_to_available_nodeport": {
From: phaseName(creatingPhase, corev1.ServiceTypeNodePort),
To: phaseName(reachabilityPhase, corev1.ServiceTypeNodePort),
},
"create_to_assigned_loadbalancer": {
From: phaseName(creatingPhase, corev1.ServiceTypeLoadBalancer),
To: phaseName(ipAssigningPhase, corev1.ServiceTypeLoadBalancer),
},
"assigned_to_available_loadbalancer": {
From: phaseName(ipAssigningPhase, corev1.ServiceTypeLoadBalancer),
To: phaseName(reachabilityPhase, corev1.ServiceTypeLoadBalancer),
},
"create_to_available_loadbalancer": {
From: phaseName(creatingPhase, corev1.ServiceTypeLoadBalancer),
To: phaseName(reachabilityPhase, corev1.ServiceTypeLoadBalancer),
},
}
func (s *serviceCreationLatencyMeasurement) gather(identifier string) ([]measurement.Summary, error) {
klog.V(2).Infof("%s: gathering service created latency measurement...", s)
if !s.isRunning {
return nil, fmt.Errorf("metric %s has not been started", s)
}
// NOTE: For ClusterIP or NodePort type of service, the cluster ip or node port is assigned as part of service creation API call, so the ipAssigning phase is no sense.
serviceCreationLatency := s.creationTimes.CalculateTransitionsLatency(serviceCreationTransitions, measurementutil.MatchAll)
content, err := util.PrettyPrintJSON(measurementutil.LatencyMapToPerfData(serviceCreationLatency))
if err != nil {
return nil, err
}
summary := measurement.CreateSummary(fmt.Sprintf("%s_%s", serviceCreationLatencyName, identifier), "json", content)
return []measurement.Summary{summary}, nil
}
func (s *serviceCreationLatencyMeasurement) handleObject(oldObj, newObj interface{}) {
var oldService *corev1.Service
var newService *corev1.Service
var ok bool
oldService, ok = oldObj.(*corev1.Service)
if oldObj != nil && !ok {
klog.Errorf("%s: uncastable old object: %v", s, oldObj)
return
}
newService, ok = newObj.(*corev1.Service)
if newObj != nil && !ok {
klog.Errorf("%s: uncastable new object: %v", s, newObj)
return
}
if isEqual := oldService != nil &&
newService != nil &&
equality.Semantic.DeepEqual(oldService.Spec, newService.Spec) &&
equality.Semantic.DeepEqual(oldService.Status, newService.Status); isEqual {
return
}
// TODO(#680): Make it thread-safe.
if !s.isRunning {
return
}
if newObj == nil {
if err := s.deleteObject(oldService); err != nil {
klog.Errorf("%s: delete checker error: %v", s, err)
}
return
}
if err := s.updateObject(newService); err != nil {
klog.Errorf("%s: create checker error: %v", s, err)
}
}
func (s *serviceCreationLatencyMeasurement) deleteObject(svc *corev1.Service) error {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(svc)
if err != nil {
return fmt.Errorf("meta key created error: %v", err)
}
s.lock.Lock()
defer s.lock.Unlock()
s.pingCheckers.DeleteAndStop(key)
return nil
}
func (s *serviceCreationLatencyMeasurement) updateObject(svc *corev1.Service) error {
// This measurement only works for services with ClusterIP, NodePort and LoadBalancer type.
if svc.Spec.Type != corev1.ServiceTypeClusterIP && svc.Spec.Type != corev1.ServiceTypeNodePort && svc.Spec.Type != corev1.ServiceTypeLoadBalancer {
return nil
}
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(svc)
if err != nil {
return fmt.Errorf("meta key created error: %v", err)
}
if _, exists := s.creationTimes.Get(key, phaseName(creatingPhase, svc.Spec.Type)); !exists {
s.creationTimes.Set(key, phaseName(creatingPhase, svc.Spec.Type), svc.CreationTimestamp.Time)
}
if svc.Spec.Type == corev1.ServiceTypeLoadBalancer && len(svc.Status.LoadBalancer.Ingress) < 1 {
return nil
}
// NOTE: For ClusterIP or NodePort type of service, the cluster ip or node port is assigned as part of service creation API call, so the ipAssigning phase is no sense.
if svc.Spec.Type == corev1.ServiceTypeLoadBalancer {
if _, exists := s.creationTimes.Get(key, phaseName(ipAssigningPhase, svc.Spec.Type)); exists {
return nil
}
s.creationTimes.Set(key, phaseName(ipAssigningPhase, svc.Spec.Type), time.Now())
}
pc := &pingChecker{
callerName: s.String(),
svc: svc,
creationTimes: s.creationTimes,
stopCh: make(chan struct{}),
}
pc.run()
s.lock.Lock()
defer s.lock.Unlock()
s.pingCheckers.Add(key, pc)
return nil
}
func phaseName(phase string, serviceType corev1.ServiceType) string {
return fmt.Sprintf("%s_%s", phase, serviceType)
}
type pingChecker struct {
callerName string
svc *corev1.Service
creationTimes *measurementutil.ObjectTransitionTimes
stopCh chan struct{}
}
func (p *pingChecker) run() {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(p.svc)
if err != nil {
klog.Errorf("%s: meta key created error: %v", p.callerName, err)
return
}
success := 0
for {
select {
case <-p.stopCh:
return
default:
if _, exists := p.creationTimes.Get(key, phaseName(reachabilityPhase, p.svc.Spec.Type)); exists {
return
}
// TODO(#685): Make ping checks less communication heavy.
pod, err := execservice.GetPod()
if err != nil {
klog.Warningf("call to execservice.GetPod() ended with error: %v", err)
success = 0
time.Sleep(pingBackoff)
continue
}
var ips []string
var port int32
switch p.svc.Spec.Type {
case corev1.ServiceTypeClusterIP:
ips = p.svc.Spec.ClusterIPs
port = p.svc.Spec.Ports[0].Port
case corev1.ServiceTypeNodePort:
ips = []string{pod.Status.HostIP}
port = p.svc.Spec.Ports[0].NodePort
case corev1.ServiceTypeLoadBalancer:
for _, ingress := range p.svc.Status.LoadBalancer.Ingress {
ips = append(ips, ingress.IP)
}
port = p.svc.Spec.Ports[0].Port
}
for _, ip := range ips {
address := net.JoinHostPort(ip, fmt.Sprint(port))
command := fmt.Sprintf("curl %s", address)
_, err = execservice.RunCommand(pod, command)
if err != nil {
break
}
}
if err != nil {
success = 0
time.Sleep(pingBackoff)
continue
}
success++
if success == pingChecks {
p.creationTimes.Set(key, phaseName(reachabilityPhase, p.svc.Spec.Type), time.Now())
}
}
}
}
func (p *pingChecker) Stop() {
close(p.stopCh)
}
|
package modules
import (
"compress/gzip"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"crypto/sha256"
"fmt"
"io"
"os"
"time"
"code.cloudfoundry.org/bytefmt"
)
func init() {
Modules["cpu"] = &CPU{}
}
type CPU struct {
Sha256 float64
Gzip float64
Aes float64
}
func (stat *CPU) Run() error {
zero, err := os.Open("/dev/zero")
defer zero.Close()
if err != nil {
return err
}
null, err := os.Create("/dev/null")
defer null.Close()
if err != nil {
return err
}
hashStart := time.Now()
hash := sha256.New()
if _, err := io.CopyN(hash, zero, bytefmt.GIGABYTE); err != nil {
return err
}
hash.Sum(nil)
stat.Sha256 = time.Since(hashStart).Seconds()
gzipStart := time.Now()
gz := gzip.NewWriter(null)
if _, err := io.CopyN(gz, zero, bytefmt.GIGABYTE); err != nil {
return err
}
gz.Close()
stat.Gzip = time.Since(gzipStart).Seconds()
stat.aes()
return nil
}
func (stat *CPU) Print() {
fmt.Printf("Sha256 : %.2f seconds\n", stat.Sha256)
fmt.Printf("Gzip : %.2f seconds\n", stat.Gzip)
fmt.Printf("AES : %.2f seconds\n", stat.Aes)
}
// aes encrypts 100MB of random data using AES-256-GCM
// crypto sourced from https://github.com/gtank/cryptopasta
func (stat *CPU) aes() error {
data := [bytefmt.MEGABYTE]byte{}
if _, err := io.ReadFull(rand.Reader, data[:]); err != nil {
return err
}
key := [32]byte{}
if _, err := io.ReadFull(rand.Reader, key[:]); err != nil {
return err
}
aesStart := time.Now()
for i := 0; i < bytefmt.GIGABYTE*5; i += bytefmt.MEGABYTE {
block, err := aes.NewCipher(key[:])
if err != nil {
return err
}
gcm, err := cipher.NewGCM(block)
if err != nil {
return err
}
nonce := make([]byte, gcm.NonceSize())
if _, err = io.ReadFull(rand.Reader, nonce); err != nil {
return err
}
gcm.Seal(nonce, nonce, data[:], nil)
}
stat.Aes = time.Since(aesStart).Seconds()
return nil
}
cpu: display results in MB/s rather than seconds
package modules
import (
"compress/gzip"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"crypto/sha256"
"fmt"
"io"
"os"
"time"
"code.cloudfoundry.org/bytefmt"
)
func init() {
Modules["cpu"] = &CPU{}
}
type CPU struct {
Sha256 float64
Gzip float64
Aes float64
}
func (stat *CPU) Run() error {
zero, err := os.Open("/dev/zero")
defer zero.Close()
if err != nil {
return err
}
null, err := os.Create("/dev/null")
defer null.Close()
if err != nil {
return err
}
hashStart := time.Now()
hash := sha256.New()
if _, err := io.CopyN(hash, zero, bytefmt.GIGABYTE); err != nil {
return err
}
hash.Sum(nil)
stat.Sha256 = (bytefmt.GIGABYTE / time.Since(hashStart).Seconds()) / bytefmt.MEGABYTE
gzipStart := time.Now()
gz := gzip.NewWriter(null)
if _, err := io.CopyN(gz, zero, bytefmt.GIGABYTE); err != nil {
return err
}
gz.Close()
stat.Gzip = (bytefmt.GIGABYTE / time.Since(gzipStart).Seconds()) / bytefmt.MEGABYTE
stat.aes()
return nil
}
func (stat *CPU) Print() {
fmt.Printf("Sha256 : %7.2f MB/s\n", stat.Sha256)
fmt.Printf("Gzip : %7.2f MB/s\n", stat.Gzip)
fmt.Printf("AES : %7.2f MB/s\n", stat.Aes)
}
// aes encrypts 5GB of random data using AES-256-GCM
// crypto sourced from https://github.com/gtank/cryptopasta
func (stat *CPU) aes() error {
data := [bytefmt.MEGABYTE]byte{}
if _, err := io.ReadFull(rand.Reader, data[:]); err != nil {
return err
}
key := [32]byte{}
if _, err := io.ReadFull(rand.Reader, key[:]); err != nil {
return err
}
aesStart := time.Now()
for i := 0; i < bytefmt.GIGABYTE*5; i += bytefmt.MEGABYTE {
block, err := aes.NewCipher(key[:])
if err != nil {
return err
}
gcm, err := cipher.NewGCM(block)
if err != nil {
return err
}
nonce := make([]byte, gcm.NonceSize())
if _, err = io.ReadFull(rand.Reader, nonce); err != nil {
return err
}
gcm.Seal(nonce, nonce, data[:], nil)
}
stat.Aes = (bytefmt.GIGABYTE * 5 / time.Since(aesStart).Seconds()) / bytefmt.MEGABYTE
return nil
}
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package unixchild
import (
"bufio"
"encoding/binary"
"fmt"
"io"
"net"
"os"
"os/exec"
"strings"
"sync"
"time"
log "github.com/Sirupsen/logrus"
)
type UcAcceptError struct {
Text string
}
func (err *UcAcceptError) Error() string {
return err.Text
}
func NewUcAcceptError(text string) *UcAcceptError {
return &UcAcceptError{
Text: text,
}
}
func IsUcAcceptError(err error) bool {
_, ok := err.(*UcAcceptError)
return ok
}
type Config struct {
SockPath string
ChildPath string
ChildArgs []string
Depth int
MaxMsgSz int
AcceptTimeout time.Duration
}
type clientState uint32
const (
CLIENT_STATE_STOPPED clientState = iota
CLIENT_STATE_STARTED
CLIENT_STATE_STOPPING
)
type Client struct {
FromChild chan []byte
ErrChild chan error
toChild chan []byte
childPath string
sockPath string
childArgs []string
maxMsgSz int
acceptTimeout time.Duration
stop chan bool
stopped chan bool
state clientState
}
func New(conf Config) *Client {
c := &Client{
childPath: conf.ChildPath,
sockPath: conf.SockPath,
childArgs: conf.ChildArgs,
maxMsgSz: conf.MaxMsgSz,
FromChild: make(chan []byte, conf.Depth),
ErrChild: make(chan error),
toChild: make(chan []byte, conf.Depth),
acceptTimeout: conf.AcceptTimeout,
stop: make(chan bool),
stopped: make(chan bool),
}
if c.maxMsgSz == 0 {
c.maxMsgSz = 1024
}
return c
}
func (c *Client) startChild() (*exec.Cmd, error) {
subProcess := exec.Command(c.childPath, c.childArgs...)
subProcess.SysProcAttr = SetSysProcAttrSetPGID()
stdin, err := subProcess.StdinPipe()
if err != nil {
return nil, err
}
stdin.Close()
stdout, _ := subProcess.StdoutPipe()
stderr, _ := subProcess.StderrPipe()
if err = subProcess.Start(); err != nil {
return nil, err
}
go func() {
br := bufio.NewReader(stdout)
for {
s, err := br.ReadString('\n')
if err != nil {
return
}
log.Debugf("child stdout: %s", strings.TrimSuffix(s, "\n"))
}
}()
go func() {
br := bufio.NewReader(stderr)
for {
s, err := br.ReadString('\n')
if err != nil {
return
}
log.Debugf("child stderr: %s", strings.TrimSuffix(s, "\n"))
}
}()
go subProcess.Wait() // reap dead children
return subProcess, nil
}
func (c *Client) handleChild(con net.Conn) {
var wg sync.WaitGroup
bail := make(chan bool)
fromDataPump := func() {
defer wg.Done()
for {
var mlen uint16
err := binary.Read(con, binary.BigEndian, &mlen)
if err != nil {
log.Debugln("fromDataPump error: ", err)
bail <- true
return
}
buf := make([]byte, mlen)
_, err = io.ReadFull(con, buf)
if err != nil {
log.Debugln("fromDataPump error: ", err)
bail <- true
return
}
c.FromChild <- buf
}
}
toDataPump := func() {
defer wg.Done()
for {
select {
case buf := <-c.toChild:
mlen := uint16(len(buf))
err := binary.Write(con, binary.BigEndian, mlen)
if err != nil {
log.Debugln("toDataPump error: ", err)
return
}
_, err = con.Write(buf)
if err != nil {
log.Debugln("toDataPump error: ", err)
return
}
case <-bail:
log.Debugln("toDataPump bail")
return
}
}
}
wg.Add(1)
go fromDataPump()
wg.Add(1)
go toDataPump()
wg.Wait()
}
func (c *Client) Stop() {
if c.state != CLIENT_STATE_STARTED {
return
}
c.state = CLIENT_STATE_STOPPING
log.Debugf("Stopping client")
c.stop <- true
select {
case <-c.stopped:
c.deleteSocket()
c.state = CLIENT_STATE_STOPPED
log.Debugf("Stopped client")
return
}
}
func (c *Client) acceptDeadline() *time.Time {
if c.acceptTimeout == 0 {
return nil
}
t := time.Now().Add(c.acceptTimeout)
return &t
}
func (c *Client) deleteSocket() {
log.Debugf("deleting socket")
os.Remove(c.sockPath)
}
func (c *Client) Start() error {
if c.state != CLIENT_STATE_STOPPED {
return fmt.Errorf("Attempt to start unixchild twice")
}
l, err := net.Listen("unix", c.sockPath)
if err != nil {
c.deleteSocket()
return err
}
cmd, err := c.startChild()
if err != nil {
err = fmt.Errorf("unixchild start error: %s", err.Error())
log.Debugf("%s", err.Error())
c.deleteSocket()
return err
}
if t := c.acceptDeadline(); t != nil {
l.(*net.UnixListener).SetDeadline(*t)
}
fd, err := l.Accept()
if err != nil {
err = NewUcAcceptError(fmt.Sprintf("unixchild accept error: %s",
err.Error()))
c.deleteSocket()
return err
}
c.state = CLIENT_STATE_STARTED
go func() {
c.handleChild(fd)
c.Stop()
c.ErrChild <- fmt.Errorf("child process terminated")
}()
go func() {
<-c.stop
l.Close()
if cmd != nil {
cmd.Process.Kill()
}
c.deleteSocket()
c.stopped <- true
}()
return nil
}
func (c *Client) TxToChild(data []byte) error {
if c.state != CLIENT_STATE_STARTED {
return fmt.Errorf("transmit over unixchild before it is fully started")
}
c.toChild <- data
return nil
}
unixchild - fix race condition
This commit gives the unixchild ErrChan a depth of 1 (was 0). Prior to
this change, there was a race condition when the child process
terminated at the same time that the calling code stopped the unixchild
library.
A depth of 1 is sufficient because there will never be more than one
error. When the first error is encountered, the library terminates.
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package unixchild
import (
"bufio"
"encoding/binary"
"fmt"
"io"
"net"
"os"
"os/exec"
"strings"
"sync"
"time"
log "github.com/Sirupsen/logrus"
)
type UcAcceptError struct {
Text string
}
func (err *UcAcceptError) Error() string {
return err.Text
}
func NewUcAcceptError(text string) *UcAcceptError {
return &UcAcceptError{
Text: text,
}
}
func IsUcAcceptError(err error) bool {
_, ok := err.(*UcAcceptError)
return ok
}
type Config struct {
SockPath string
ChildPath string
ChildArgs []string
Depth int
MaxMsgSz int
AcceptTimeout time.Duration
}
type clientState uint32
const (
CLIENT_STATE_STOPPED clientState = iota
CLIENT_STATE_STARTED
CLIENT_STATE_STOPPING
)
type Client struct {
FromChild chan []byte
ErrChild chan error
toChild chan []byte
childPath string
sockPath string
childArgs []string
maxMsgSz int
acceptTimeout time.Duration
stop chan bool
stopped chan bool
state clientState
}
func New(conf Config) *Client {
c := &Client{
childPath: conf.ChildPath,
sockPath: conf.SockPath,
childArgs: conf.ChildArgs,
maxMsgSz: conf.MaxMsgSz,
FromChild: make(chan []byte, conf.Depth),
ErrChild: make(chan error, 1),
toChild: make(chan []byte, conf.Depth),
acceptTimeout: conf.AcceptTimeout,
stop: make(chan bool),
stopped: make(chan bool),
}
if c.maxMsgSz == 0 {
c.maxMsgSz = 1024
}
return c
}
func (c *Client) startChild() (*exec.Cmd, error) {
subProcess := exec.Command(c.childPath, c.childArgs...)
subProcess.SysProcAttr = SetSysProcAttrSetPGID()
stdin, err := subProcess.StdinPipe()
if err != nil {
return nil, err
}
stdin.Close()
stdout, _ := subProcess.StdoutPipe()
stderr, _ := subProcess.StderrPipe()
if err = subProcess.Start(); err != nil {
return nil, err
}
go func() {
br := bufio.NewReader(stdout)
for {
s, err := br.ReadString('\n')
if err != nil {
return
}
log.Debugf("child stdout: %s", strings.TrimSuffix(s, "\n"))
}
}()
go func() {
br := bufio.NewReader(stderr)
for {
s, err := br.ReadString('\n')
if err != nil {
return
}
log.Debugf("child stderr: %s", strings.TrimSuffix(s, "\n"))
}
}()
go subProcess.Wait() // reap dead children
return subProcess, nil
}
func (c *Client) handleChild(con net.Conn) {
var wg sync.WaitGroup
bail := make(chan bool)
fromDataPump := func() {
defer wg.Done()
for {
var mlen uint16
err := binary.Read(con, binary.BigEndian, &mlen)
if err != nil {
log.Debugln("fromDataPump error: ", err)
bail <- true
return
}
buf := make([]byte, mlen)
_, err = io.ReadFull(con, buf)
if err != nil {
log.Debugln("fromDataPump error: ", err)
bail <- true
return
}
c.FromChild <- buf
}
}
toDataPump := func() {
defer wg.Done()
for {
select {
case buf := <-c.toChild:
mlen := uint16(len(buf))
err := binary.Write(con, binary.BigEndian, mlen)
if err != nil {
log.Debugln("toDataPump error: ", err)
return
}
_, err = con.Write(buf)
if err != nil {
log.Debugln("toDataPump error: ", err)
return
}
case <-bail:
log.Debugln("toDataPump bail")
return
}
}
}
wg.Add(1)
go fromDataPump()
wg.Add(1)
go toDataPump()
wg.Wait()
}
func (c *Client) Stop() {
if c.state != CLIENT_STATE_STARTED {
return
}
c.state = CLIENT_STATE_STOPPING
log.Debugf("Stopping client")
c.stop <- true
select {
case <-c.stopped:
c.deleteSocket()
c.state = CLIENT_STATE_STOPPED
log.Debugf("Stopped client")
return
}
}
func (c *Client) acceptDeadline() *time.Time {
if c.acceptTimeout == 0 {
return nil
}
t := time.Now().Add(c.acceptTimeout)
return &t
}
func (c *Client) deleteSocket() {
log.Debugf("deleting socket")
os.Remove(c.sockPath)
}
func (c *Client) Start() error {
if c.state != CLIENT_STATE_STOPPED {
return fmt.Errorf("Attempt to start unixchild twice")
}
l, err := net.Listen("unix", c.sockPath)
if err != nil {
c.deleteSocket()
return err
}
cmd, err := c.startChild()
if err != nil {
err = fmt.Errorf("unixchild start error: %s", err.Error())
log.Debugf("%s", err.Error())
c.deleteSocket()
return err
}
if t := c.acceptDeadline(); t != nil {
l.(*net.UnixListener).SetDeadline(*t)
}
fd, err := l.Accept()
if err != nil {
err = NewUcAcceptError(fmt.Sprintf("unixchild accept error: %s",
err.Error()))
c.deleteSocket()
return err
}
c.state = CLIENT_STATE_STARTED
go func() {
c.handleChild(fd)
c.Stop()
c.ErrChild <- fmt.Errorf("child process terminated")
}()
go func() {
<-c.stop
l.Close()
if cmd != nil {
cmd.Process.Kill()
}
c.deleteSocket()
c.stopped <- true
}()
return nil
}
func (c *Client) TxToChild(data []byte) error {
if c.state != CLIENT_STATE_STARTED {
return fmt.Errorf("transmit over unixchild before it is fully started")
}
c.toChild <- data
return nil
}
|
package main
import (
"encoding/pem"
"fmt"
"io/ioutil"
"net"
"os"
"os/exec"
"strconv"
"strings"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"golang.org/x/sys/unix"
"gopkg.in/yaml.v2"
lxd "github.com/lxc/lxd/client"
"github.com/lxc/lxd/lxd/cluster"
"github.com/lxc/lxd/lxd/network"
"github.com/lxc/lxd/lxd/project"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
cli "github.com/lxc/lxd/shared/cmd"
"github.com/lxc/lxd/shared/idmap"
"github.com/lxc/lxd/shared/validate"
"github.com/lxc/lxd/shared/version"
)
func (c *cmdInit) RunInteractive(cmd *cobra.Command, args []string, d lxd.InstanceServer) (*cmdInitData, error) {
// Initialize config
config := cmdInitData{}
config.Node.Config = map[string]interface{}{}
config.Node.Networks = []internalClusterPostNetwork{}
config.Node.StoragePools = []api.StoragePoolsPost{}
config.Node.Profiles = []api.ProfilesPost{
{
Name: "default",
ProfilePut: api.ProfilePut{
Config: map[string]string{},
Devices: map[string]map[string]string{},
},
},
}
// Clustering
err := c.askClustering(&config, d)
if err != nil {
return nil, err
}
// Ask all the other questions
if config.Cluster == nil || config.Cluster.ClusterAddress == "" {
// Storage
err = c.askStorage(&config, d)
if err != nil {
return nil, err
}
// MAAS
err = c.askMAAS(&config, d)
if err != nil {
return nil, err
}
// Networking
err = c.askNetworking(&config, d)
if err != nil {
return nil, err
}
// Daemon config
err = c.askDaemon(&config, d)
if err != nil {
return nil, err
}
}
// Print the YAML
if cli.AskBool("Would you like a YAML \"lxd init\" preseed to be printed? (yes/no) [default=no]: ", "no") {
var object cmdInitData
// If the user has chosen to join an existing cluster, print
// only YAML for the cluster section, which is the only
// relevant one. Otherwise print the regular config.
if config.Cluster != nil && config.Cluster.ClusterAddress != "" {
object = cmdInitData{}
object.Cluster = config.Cluster
} else {
object = config
}
out, err := yaml.Marshal(object)
if err != nil {
return nil, errors.Wrap(err, "Failed to render the config")
}
fmt.Printf("%s\n", out)
}
return &config, nil
}
func (c *cmdInit) askClustering(config *cmdInitData, d lxd.InstanceServer) error {
if cli.AskBool("Would you like to use LXD clustering? (yes/no) [default=no]: ", "no") {
config.Cluster = &initDataCluster{}
config.Cluster.Enabled = true
// Cluster server name
serverName, err := os.Hostname()
if err != nil {
serverName = "lxd"
}
config.Cluster.ServerName = cli.AskString(
fmt.Sprintf("What name should be used to identify this node in the cluster? [default=%s]: ", serverName), serverName, nil)
// Cluster server address
address := util.NetworkInterfaceAddress()
validateServerAddress := func(value string) error {
address := util.CanonicalNetworkAddress(value)
host, _, _ := net.SplitHostPort(address)
if shared.StringInSlice(host, []string{"", "[::]", "0.0.0.0"}) {
return fmt.Errorf("Invalid IP address or DNS name")
}
s, _, err := d.GetServer()
if err == nil {
if s.Config["cluster.https_address"] == address || s.Config["core.https_address"] == address {
// We already own the address, just move on.
return nil
}
}
listener, err := net.Listen("tcp", address)
if err != nil {
return errors.Wrapf(err, "Can't bind address %q", address)
}
listener.Close()
return nil
}
serverAddress := util.CanonicalNetworkAddress(cli.AskString(
fmt.Sprintf("What IP address or DNS name should be used to reach this node? [default=%s]: ", address), address, validateServerAddress))
config.Node.Config["core.https_address"] = serverAddress
if cli.AskBool("Are you joining an existing cluster? (yes/no) [default=no]: ", "no") {
// Existing cluster
config.Cluster.ServerAddress = serverAddress
// Root is required to access the certificate files
if os.Geteuid() != 0 {
return fmt.Errorf("Joining an existing cluster requires root privileges")
}
if cli.AskBool("Do you have a join token? (yes/no) [default=no]: ", "no") {
var joinToken *api.ClusterMemberJoinToken
validJoinToken := func(input string) error {
j, err := clusterMemberJoinTokenDecode(input)
if err != nil {
return errors.Wrapf(err, "Invalid join token")
}
joinToken = j // Store valid decoded join token
return nil
}
rawJoinToken := cli.AskString("Please provide join token: ", "", validJoinToken)
if joinToken.ServerName != config.Cluster.ServerName {
return fmt.Errorf("Server name does not match the one specified in join token")
}
for _, clusterAddress := range joinToken.Addresses {
// Cluster URL
_, _, err := net.SplitHostPort(clusterAddress)
if err != nil {
clusterAddress = fmt.Sprintf("%s:%d", clusterAddress, shared.DefaultPort)
}
config.Cluster.ClusterAddress = clusterAddress
// Cluster certificate
cert, err := shared.GetRemoteCertificate(fmt.Sprintf("https://%s", config.Cluster.ClusterAddress), version.UserAgent)
if err != nil {
fmt.Printf("Error connecting to existing cluster node %q: %v\n", clusterAddress, err)
continue
}
certDigest := shared.CertFingerprint(cert)
if joinToken.Fingerprint != certDigest {
return fmt.Errorf("Certificate fingerprint mismatch between join token and cluster member %q", clusterAddress)
}
config.Cluster.ClusterCertificate = string(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}))
}
if config.Cluster.ClusterCertificate == "" {
return fmt.Errorf("Unable to connect to any of the cluster members specified in join token")
}
// Raw join token used as cluster password so it can be validated.
config.Cluster.ClusterPassword = rawJoinToken
} else {
for {
// Cluster URL
clusterAddress := cli.AskString("IP address or FQDN of an existing cluster node: ", "", nil)
_, _, err := net.SplitHostPort(clusterAddress)
if err != nil {
clusterAddress = fmt.Sprintf("%s:%d", clusterAddress, shared.DefaultPort)
}
config.Cluster.ClusterAddress = clusterAddress
// Cluster certificate
cert, err := shared.GetRemoteCertificate(fmt.Sprintf("https://%s", config.Cluster.ClusterAddress), version.UserAgent)
if err != nil {
fmt.Printf("Error connecting to existing cluster node: %v\n", err)
continue
}
certDigest := shared.CertFingerprint(cert)
fmt.Printf("Cluster fingerprint: %s\n", certDigest)
fmt.Printf("You can validate this fingerprint by running \"lxc info\" locally on an existing node.\n")
if !cli.AskBool("Is this the correct fingerprint? (yes/no) [default=no]: ", "no") {
return fmt.Errorf("User aborted configuration")
}
config.Cluster.ClusterCertificate = string(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}))
// Cluster password
config.Cluster.ClusterPassword = cli.AskPasswordOnce("Cluster trust password: ")
break
}
}
// Confirm wiping
if !cli.AskBool("All existing data is lost when joining a cluster, continue? (yes/no) [default=no] ", "no") {
return fmt.Errorf("User aborted configuration")
}
// Connect to existing cluster
serverCert, err := util.LoadServerCert(shared.VarPath(""))
if err != nil {
return err
}
err = cluster.SetupTrust(serverCert, serverName, config.Cluster.ClusterAddress, config.Cluster.ClusterCertificate, config.Cluster.ClusterPassword)
if err != nil {
return errors.Wrap(err, "Failed to setup trust relationship with cluster")
}
// Now we have setup trust, don't send to server, othwerwise it will try and setup trust
// again and if using a one-time join token, will fail.
config.Cluster.ClusterPassword = ""
// Client parameters to connect to the target cluster node.
args := &lxd.ConnectionArgs{
TLSClientCert: string(serverCert.PublicKey()),
TLSClientKey: string(serverCert.PrivateKey()),
TLSServerCert: string(config.Cluster.ClusterCertificate),
UserAgent: version.UserAgent,
}
client, err := lxd.ConnectLXD(fmt.Sprintf("https://%s", config.Cluster.ClusterAddress), args)
if err != nil {
return err
}
// Get the list of required member config keys.
cluster, _, err := client.GetCluster()
if err != nil {
return errors.Wrap(err, "Failed to retrieve cluster information")
}
validator := func(string) error { return nil }
for i, config := range cluster.MemberConfig {
question := fmt.Sprintf("Choose %s: ", config.Description)
cluster.MemberConfig[i].Value = cli.AskString(question, "", validator)
}
config.Cluster.MemberConfig = cluster.MemberConfig
} else {
// Password authentication
if cli.AskBool("Setup password authentication on the cluster? (yes/no) [default=yes]: ", "yes") {
config.Node.Config["core.trust_password"] = cli.AskPassword("Trust password for new clients: ")
}
}
}
return nil
}
func (c *cmdInit) askMAAS(config *cmdInitData, d lxd.InstanceServer) error {
if !cli.AskBool("Would you like to connect to a MAAS server? (yes/no) [default=no]: ", "no") {
return nil
}
serverName, err := os.Hostname()
if err != nil {
serverName = "lxd"
}
maasHostname := cli.AskString(fmt.Sprintf("What's the name of this host in MAAS? [default=%s]: ", serverName), serverName, nil)
if maasHostname != serverName {
config.Node.Config["maas.machine"] = maasHostname
}
config.Node.Config["maas.api.url"] = cli.AskString("URL of your MAAS server (e.g. http://1.2.3.4:5240/MAAS): ", "", nil)
config.Node.Config["maas.api.key"] = cli.AskString("API key for your MAAS server: ", "", nil)
return nil
}
func (c *cmdInit) askNetworking(config *cmdInitData, d lxd.InstanceServer) error {
if config.Cluster != nil || !cli.AskBool("Would you like to create a new local network bridge? (yes/no) [default=yes]: ", "yes") {
// At this time, only the Ubuntu kernel supports the Fan, detect it
fanKernel := false
if shared.PathExists("/proc/sys/kernel/version") {
content, _ := ioutil.ReadFile("/proc/sys/kernel/version")
if content != nil && strings.Contains(string(content), "Ubuntu") {
fanKernel = true
}
}
if cli.AskBool("Would you like to configure LXD to use an existing bridge or host interface? (yes/no) [default=no]: ", "no") {
for {
name := cli.AskString("Name of the existing bridge or host interface: ", "", nil)
if !shared.PathExists(fmt.Sprintf("/sys/class/net/%s", name)) {
fmt.Println("The requested interface doesn't exist. Please choose another one.")
continue
}
// Add to the default profile
config.Node.Profiles[0].Devices["eth0"] = map[string]string{
"type": "nic",
"nictype": "macvlan",
"name": "eth0",
"parent": name,
}
if shared.PathExists(fmt.Sprintf("/sys/class/net/%s/bridge", name)) {
config.Node.Profiles[0].Devices["eth0"]["nictype"] = "bridged"
}
if config.Node.Config["maas.api.url"] != nil && cli.AskBool("Is this interface connected to your MAAS server? (yes/no) [default=yes]: ", "yes") {
maasSubnetV4 := cli.AskString("MAAS IPv4 subnet name for this interface (empty for no subnet): ", "",
func(input string) error { return nil })
if maasSubnetV4 != "" {
config.Node.Profiles[0].Devices["eth0"]["maas.subnet.ipv4"] = maasSubnetV4
}
maasSubnetV6 := cli.AskString("MAAS IPv6 subnet name for this interface (empty for no subnet): ", "",
func(input string) error { return nil })
if maasSubnetV6 != "" {
config.Node.Profiles[0].Devices["eth0"]["maas.subnet.ipv6"] = maasSubnetV6
}
}
break
}
} else if config.Cluster != nil && fanKernel && cli.AskBool("Would you like to create a new Fan overlay network? (yes/no) [default=yes]: ", "yes") {
// Define the network
networkPost := internalClusterPostNetwork{}
networkPost.Name = "lxdfan0"
networkPost.Project = project.Default
networkPost.Config = map[string]string{
"bridge.mode": "fan",
}
// Select the underlay
networkPost.Config["fan.underlay_subnet"] = cli.AskString("What subnet should be used as the Fan underlay? [default=auto]: ", "auto", func(value string) error {
var err error
var subnet *net.IPNet
// Handle auto
if value == "auto" {
subnet, _, err = network.DefaultGatewaySubnetV4()
if err != nil {
return err
}
} else {
_, subnet, err = net.ParseCIDR(value)
if err != nil {
return err
}
}
size, _ := subnet.Mask.Size()
if size != 16 && size != 24 {
if value == "auto" {
return fmt.Errorf("The auto-detected underlay (%s) isn't a /16 or /24, please specify manually", subnet.String())
} else {
return fmt.Errorf("The underlay subnet must be a /16 or a /24")
}
}
return nil
})
// Add the new network
config.Node.Networks = append(config.Node.Networks, networkPost)
// Add to the default profile
config.Node.Profiles[0].Devices["eth0"] = map[string]string{
"type": "nic",
"name": "eth0",
"network": "lxdfan0",
}
}
return nil
}
for {
// Define the network
net := internalClusterPostNetwork{}
net.Config = map[string]string{}
net.Project = project.Default
// Network name
net.Name = cli.AskString("What should the new bridge be called? [default=lxdbr0]: ", "lxdbr0", func(netName string) error {
netType, err := network.LoadByType("bridge")
if err != nil {
return err
}
return netType.ValidateName(netName)
})
_, _, err := d.GetNetwork(net.Name)
if err == nil {
fmt.Printf("The requested network bridge \"%s\" already exists. Please choose another name.\n", net.Name)
continue
}
// Add to the default profile
config.Node.Profiles[0].Devices["eth0"] = map[string]string{
"type": "nic",
"name": "eth0",
"network": net.Name,
}
// IPv4
net.Config["ipv4.address"] = cli.AskString("What IPv4 address should be used? (CIDR subnet notation, “auto” or “none”) [default=auto]: ", "auto", func(value string) error {
if shared.StringInSlice(value, []string{"auto", "none"}) {
return nil
}
return validate.Optional(validate.IsNetworkAddressCIDRV4)(value)
})
if !shared.StringInSlice(net.Config["ipv4.address"], []string{"auto", "none"}) {
net.Config["ipv4.nat"] = fmt.Sprintf("%v",
cli.AskBool("Would you like LXD to NAT IPv4 traffic on your bridge? [default=yes]: ", "yes"))
}
// IPv6
net.Config["ipv6.address"] = cli.AskString("What IPv6 address should be used? (CIDR subnet notation, “auto” or “none”) [default=auto]: ", "auto", func(value string) error {
if shared.StringInSlice(value, []string{"auto", "none"}) {
return nil
}
return validate.Optional(validate.IsNetworkAddressCIDRV6)(value)
})
if !shared.StringInSlice(net.Config["ipv6.address"], []string{"auto", "none"}) {
net.Config["ipv6.nat"] = fmt.Sprintf("%v",
cli.AskBool("Would you like LXD to NAT IPv6 traffic on your bridge? [default=yes]: ", "yes"))
}
// Add the new network
config.Node.Networks = append(config.Node.Networks, net)
break
}
return nil
}
func (c *cmdInit) askStorage(config *cmdInitData, d lxd.InstanceServer) error {
if config.Cluster != nil {
if cli.AskBool("Do you want to configure a new local storage pool? (yes/no) [default=yes]: ", "yes") {
err := c.askStoragePool(config, d, "local")
if err != nil {
return err
}
}
if cli.AskBool("Do you want to configure a new remote storage pool? (yes/no) [default=no]: ", "no") {
err := c.askStoragePool(config, d, "remote")
if err != nil {
return err
}
}
return nil
}
if !cli.AskBool("Do you want to configure a new storage pool? (yes/no) [default=yes]: ", "yes") {
return nil
}
return c.askStoragePool(config, d, "all")
}
func (c *cmdInit) askStoragePool(config *cmdInitData, d lxd.InstanceServer, poolType string) error {
// Figure out the preferred storage driver
availableBackends := c.availableStorageDrivers(poolType)
if len(availableBackends) == 0 {
return fmt.Errorf("No %s storage backends available", poolType)
}
backingFs, err := util.FilesystemDetect(shared.VarPath())
if err != nil {
backingFs = "dir"
}
defaultStorage := "dir"
if backingFs == "btrfs" && shared.StringInSlice("btrfs", availableBackends) {
defaultStorage = "btrfs"
} else if shared.StringInSlice("zfs", availableBackends) {
defaultStorage = "zfs"
} else if shared.StringInSlice("btrfs", availableBackends) {
defaultStorage = "btrfs"
}
for {
// Define the pool
pool := api.StoragePoolsPost{}
pool.Config = map[string]string{}
if poolType == "all" {
pool.Name = cli.AskString("Name of the new storage pool [default=default]: ", "default", nil)
} else {
pool.Name = poolType
}
_, _, err := d.GetStoragePool(pool.Name)
if err == nil {
if poolType == "all" {
fmt.Printf("The requested storage pool \"%s\" already exists. Please choose another name.\n", pool.Name)
continue
}
return fmt.Errorf("The %s storage pool already exists", poolType)
}
// Add to the default profile
if config.Node.Profiles[0].Devices["root"] == nil {
config.Node.Profiles[0].Devices["root"] = map[string]string{
"type": "disk",
"path": "/",
"pool": pool.Name,
}
}
// Storage backend
if len(availableBackends) > 1 {
defaultBackend := defaultStorage
if poolType == "remote" {
defaultBackend = "ceph"
}
pool.Driver = cli.AskChoice(
fmt.Sprintf("Name of the storage backend to use (%s) [default=%s]: ", strings.Join(availableBackends, ", "), defaultBackend), availableBackends, defaultBackend)
} else {
pool.Driver = availableBackends[0]
}
// Optimization for dir
if pool.Driver == "dir" {
config.Node.StoragePools = append(config.Node.StoragePools, pool)
break
}
// Optimization for btrfs on btrfs
if pool.Driver == "btrfs" && backingFs == "btrfs" {
if cli.AskBool(fmt.Sprintf("Would you like to create a new btrfs subvolume under %s? (yes/no) [default=yes]: ", shared.VarPath("")), "yes") {
pool.Config["source"] = shared.VarPath("storage-pools", pool.Name)
config.Node.StoragePools = append(config.Node.StoragePools, pool)
break
}
}
// Optimization for zfs on zfs (when using Ubuntu's bpool/rpool)
if pool.Driver == "zfs" && backingFs == "zfs" {
poolName, _ := shared.RunCommand("zpool", "get", "-H", "-o", "value", "name", "rpool")
if strings.TrimSpace(poolName) == "rpool" && cli.AskBool("Would you like to create a new zfs dataset under rpool/lxd? (yes/no) [default=yes]: ", "yes") {
pool.Config["source"] = "rpool/lxd"
config.Node.StoragePools = append(config.Node.StoragePools, pool)
break
}
}
if cli.AskBool(fmt.Sprintf("Create a new %s pool? (yes/no) [default=yes]: ", strings.ToUpper(pool.Driver)), "yes") {
if pool.Driver == "ceph" {
// Ask for the name of the cluster
pool.Config["ceph.cluster_name"] = cli.AskString("Name of the existing CEPH cluster [default=ceph]: ", "ceph", nil)
// Ask for the name of the osd pool
pool.Config["ceph.osd.pool_name"] = cli.AskString("Name of the OSD storage pool [default=lxd]: ", "lxd", nil)
// Ask for the number of placement groups
pool.Config["ceph.osd.pg_num"] = cli.AskString("Number of placement groups [default=32]: ", "32", nil)
} else if pool.Driver == "cephfs" {
// Ask for the name of the cluster
pool.Config["cephfs.cluster_name"] = cli.AskString("Name of the existing CEPHfs cluster [default=ceph]: ", "ceph", nil)
// Ask for the name of the cluster
pool.Config["source"] = cli.AskString("Name of the CEPHfs volume: ", "", nil)
} else if cli.AskBool("Would you like to use an existing empty block device (e.g. a disk or partition)? (yes/no) [default=no]: ", "no") {
deviceExists := func(path string) error {
if !shared.IsBlockdevPath(path) {
return fmt.Errorf("'%s' is not a block device", path)
}
return nil
}
pool.Config["source"] = cli.AskString("Path to the existing block device: ", "", deviceExists)
} else {
st := unix.Statfs_t{}
err := unix.Statfs(shared.VarPath(), &st)
if err != nil {
return errors.Wrapf(err, "Couldn't statfs %s", shared.VarPath())
}
/* choose 5 GB < x < 30GB, where x is 20% of the disk size */
defaultSize := uint64(st.Frsize) * st.Blocks / (1024 * 1024 * 1024) / 5
if defaultSize > 30 {
defaultSize = 30
}
if defaultSize < 5 {
defaultSize = 5
}
pool.Config["size"] = cli.AskString(
fmt.Sprintf("Size in GB of the new loop device (1GB minimum) [default=%dGB]: ", defaultSize),
fmt.Sprintf("%dGB", defaultSize),
func(input string) error {
input = strings.Split(input, "GB")[0]
result, err := strconv.ParseInt(input, 10, 64)
if err != nil {
return err
}
if result < 1 {
return fmt.Errorf("Minimum size is 1GB")
}
return nil
})
if !strings.HasSuffix(pool.Config["size"], "GB") {
pool.Config["size"] = fmt.Sprintf("%sGB", pool.Config["size"])
}
}
} else {
if pool.Driver == "ceph" {
// ask for the name of the cluster
pool.Config["ceph.cluster_name"] = cli.AskString("Name of the existing CEPH cluster [default=ceph]: ", "ceph", nil)
// ask for the name of the existing pool
pool.Config["source"] = cli.AskString("Name of the existing OSD storage pool [default=lxd]: ", "lxd", nil)
pool.Config["ceph.osd.pool_name"] = pool.Config["source"]
} else {
question := fmt.Sprintf("Name of the existing %s pool or dataset: ", strings.ToUpper(pool.Driver))
pool.Config["source"] = cli.AskString(question, "", nil)
}
}
if pool.Driver == "lvm" {
_, err := exec.LookPath("thin_check")
if err != nil {
fmt.Printf(`
The LVM thin provisioning tools couldn't be found. LVM can still be used
without thin provisioning but this will disable over-provisioning,
increase the space requirements and creation time of images, containers
and snapshots.
If you wish to use thin provisioning, abort now, install the tools from
your Linux distribution and run "lxd init" again afterwards.
`)
if !cli.AskBool("Do you want to continue without thin provisioning? (yes/no) [default=yes]: ", "yes") {
return fmt.Errorf("The LVM thin provisioning tools couldn't be found on the system")
}
pool.Config["lvm.use_thinpool"] = "false"
}
}
config.Node.StoragePools = append(config.Node.StoragePools, pool)
break
}
return nil
}
func (c *cmdInit) askDaemon(config *cmdInitData, d lxd.InstanceServer) error {
// Detect lack of uid/gid
idmapset, err := idmap.DefaultIdmapSet("", "")
if (err != nil || len(idmapset.Idmap) == 0 || idmapset.Usable() != nil) && shared.RunningInUserNS() {
fmt.Printf(`
We detected that you are running inside an unprivileged container.
This means that unless you manually configured your host otherwise,
you will not have enough uids and gids to allocate to your containers.
LXD can re-use your container's own allocation to avoid the problem.
Doing so makes your nested containers slightly less safe as they could
in theory attack their parent container and gain more privileges than
they otherwise would.
`)
if cli.AskBool("Would you like to have your containers share their parent's allocation? (yes/no) [default=yes]: ", "yes") {
config.Node.Profiles[0].Config["security.privileged"] = "true"
}
}
// Network listener
if config.Cluster == nil && cli.AskBool("Would you like the LXD server to be available over the network? (yes/no) [default=no]: ", "no") {
isIPAddress := func(s string) error {
if s != "all" && net.ParseIP(s) == nil {
return fmt.Errorf("'%s' is not an IP address", s)
}
return nil
}
netAddr := cli.AskString("Address to bind LXD to (not including port) [default=all]: ", "all", isIPAddress)
if netAddr == "all" {
netAddr = "::"
}
if net.ParseIP(netAddr).To4() == nil {
netAddr = fmt.Sprintf("[%s]", netAddr)
}
netPort := cli.AskInt(fmt.Sprintf("Port to bind LXD to [default=%d]: ", shared.DefaultPort), 1, 65535, fmt.Sprintf("%d", shared.DefaultPort), func(netPort int64) error {
address := util.CanonicalNetworkAddressFromAddressAndPort(netAddr, int(netPort))
s, _, err := d.GetServer()
if err == nil {
if s.Config["cluster.https_address"] == address || s.Config["core.https_address"] == address {
// We already own the address, just move on.
return nil
}
}
listener, err := net.Listen("tcp", address)
if err != nil {
return fmt.Errorf("Can't bind address %q: %v", address, err)
}
listener.Close()
return nil
})
config.Node.Config["core.https_address"] = util.CanonicalNetworkAddressFromAddressAndPort(netAddr, int(netPort))
config.Node.Config["core.trust_password"] = cli.AskPassword("Trust password for new clients: ")
if config.Node.Config["core.trust_password"] == "" {
fmt.Printf("No password set, client certificates will have to be manually trusted.")
}
}
// Ask if the user wants images to be automatically refreshed
if !cli.AskBool("Would you like stale cached images to be updated automatically? (yes/no) [default=yes] ", "yes") {
config.Node.Config["images.auto_update_interval"] = "0"
}
return nil
}
lxd/main/init/interactive: Don't attempt to connect to all join token candidates
Stop at first successful one.
Signed-off-by: Thomas Parrott <6b778ce645fb0e3dde76d79eccad490955b1ae74@canonical.com>
package main
import (
"encoding/pem"
"fmt"
"io/ioutil"
"net"
"os"
"os/exec"
"strconv"
"strings"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"golang.org/x/sys/unix"
"gopkg.in/yaml.v2"
lxd "github.com/lxc/lxd/client"
"github.com/lxc/lxd/lxd/cluster"
"github.com/lxc/lxd/lxd/network"
"github.com/lxc/lxd/lxd/project"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
cli "github.com/lxc/lxd/shared/cmd"
"github.com/lxc/lxd/shared/idmap"
"github.com/lxc/lxd/shared/validate"
"github.com/lxc/lxd/shared/version"
)
func (c *cmdInit) RunInteractive(cmd *cobra.Command, args []string, d lxd.InstanceServer) (*cmdInitData, error) {
// Initialize config
config := cmdInitData{}
config.Node.Config = map[string]interface{}{}
config.Node.Networks = []internalClusterPostNetwork{}
config.Node.StoragePools = []api.StoragePoolsPost{}
config.Node.Profiles = []api.ProfilesPost{
{
Name: "default",
ProfilePut: api.ProfilePut{
Config: map[string]string{},
Devices: map[string]map[string]string{},
},
},
}
// Clustering
err := c.askClustering(&config, d)
if err != nil {
return nil, err
}
// Ask all the other questions
if config.Cluster == nil || config.Cluster.ClusterAddress == "" {
// Storage
err = c.askStorage(&config, d)
if err != nil {
return nil, err
}
// MAAS
err = c.askMAAS(&config, d)
if err != nil {
return nil, err
}
// Networking
err = c.askNetworking(&config, d)
if err != nil {
return nil, err
}
// Daemon config
err = c.askDaemon(&config, d)
if err != nil {
return nil, err
}
}
// Print the YAML
if cli.AskBool("Would you like a YAML \"lxd init\" preseed to be printed? (yes/no) [default=no]: ", "no") {
var object cmdInitData
// If the user has chosen to join an existing cluster, print
// only YAML for the cluster section, which is the only
// relevant one. Otherwise print the regular config.
if config.Cluster != nil && config.Cluster.ClusterAddress != "" {
object = cmdInitData{}
object.Cluster = config.Cluster
} else {
object = config
}
out, err := yaml.Marshal(object)
if err != nil {
return nil, errors.Wrap(err, "Failed to render the config")
}
fmt.Printf("%s\n", out)
}
return &config, nil
}
func (c *cmdInit) askClustering(config *cmdInitData, d lxd.InstanceServer) error {
if cli.AskBool("Would you like to use LXD clustering? (yes/no) [default=no]: ", "no") {
config.Cluster = &initDataCluster{}
config.Cluster.Enabled = true
// Cluster server name
serverName, err := os.Hostname()
if err != nil {
serverName = "lxd"
}
config.Cluster.ServerName = cli.AskString(
fmt.Sprintf("What name should be used to identify this node in the cluster? [default=%s]: ", serverName), serverName, nil)
// Cluster server address
address := util.NetworkInterfaceAddress()
validateServerAddress := func(value string) error {
address := util.CanonicalNetworkAddress(value)
host, _, _ := net.SplitHostPort(address)
if shared.StringInSlice(host, []string{"", "[::]", "0.0.0.0"}) {
return fmt.Errorf("Invalid IP address or DNS name")
}
s, _, err := d.GetServer()
if err == nil {
if s.Config["cluster.https_address"] == address || s.Config["core.https_address"] == address {
// We already own the address, just move on.
return nil
}
}
listener, err := net.Listen("tcp", address)
if err != nil {
return errors.Wrapf(err, "Can't bind address %q", address)
}
listener.Close()
return nil
}
serverAddress := util.CanonicalNetworkAddress(cli.AskString(
fmt.Sprintf("What IP address or DNS name should be used to reach this node? [default=%s]: ", address), address, validateServerAddress))
config.Node.Config["core.https_address"] = serverAddress
if cli.AskBool("Are you joining an existing cluster? (yes/no) [default=no]: ", "no") {
// Existing cluster
config.Cluster.ServerAddress = serverAddress
// Root is required to access the certificate files
if os.Geteuid() != 0 {
return fmt.Errorf("Joining an existing cluster requires root privileges")
}
if cli.AskBool("Do you have a join token? (yes/no) [default=no]: ", "no") {
var joinToken *api.ClusterMemberJoinToken
validJoinToken := func(input string) error {
j, err := clusterMemberJoinTokenDecode(input)
if err != nil {
return errors.Wrapf(err, "Invalid join token")
}
joinToken = j // Store valid decoded join token
return nil
}
rawJoinToken := cli.AskString("Please provide join token: ", "", validJoinToken)
if joinToken.ServerName != config.Cluster.ServerName {
return fmt.Errorf("Server name does not match the one specified in join token")
}
// Attempt to find a working cluster member to use for joining by retrieving the
// cluster certificate from each address in the join token until we succeed.
for _, clusterAddress := range joinToken.Addresses {
// Cluster URL
_, _, err := net.SplitHostPort(clusterAddress)
if err != nil {
clusterAddress = fmt.Sprintf("%s:%d", clusterAddress, shared.DefaultPort)
}
config.Cluster.ClusterAddress = clusterAddress
// Cluster certificate
cert, err := shared.GetRemoteCertificate(fmt.Sprintf("https://%s", config.Cluster.ClusterAddress), version.UserAgent)
if err != nil {
fmt.Printf("Error connecting to existing cluster node %q: %v\n", clusterAddress, err)
continue
}
certDigest := shared.CertFingerprint(cert)
if joinToken.Fingerprint != certDigest {
return fmt.Errorf("Certificate fingerprint mismatch between join token and cluster member %q", clusterAddress)
}
config.Cluster.ClusterCertificate = string(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}))
break // We've found a working cluster member.
}
if config.Cluster.ClusterCertificate == "" {
return fmt.Errorf("Unable to connect to any of the cluster members specified in join token")
}
// Raw join token used as cluster password so it can be validated.
config.Cluster.ClusterPassword = rawJoinToken
} else {
for {
// Cluster URL
clusterAddress := cli.AskString("IP address or FQDN of an existing cluster node: ", "", nil)
_, _, err := net.SplitHostPort(clusterAddress)
if err != nil {
clusterAddress = fmt.Sprintf("%s:%d", clusterAddress, shared.DefaultPort)
}
config.Cluster.ClusterAddress = clusterAddress
// Cluster certificate
cert, err := shared.GetRemoteCertificate(fmt.Sprintf("https://%s", config.Cluster.ClusterAddress), version.UserAgent)
if err != nil {
fmt.Printf("Error connecting to existing cluster node: %v\n", err)
continue
}
certDigest := shared.CertFingerprint(cert)
fmt.Printf("Cluster fingerprint: %s\n", certDigest)
fmt.Printf("You can validate this fingerprint by running \"lxc info\" locally on an existing node.\n")
if !cli.AskBool("Is this the correct fingerprint? (yes/no) [default=no]: ", "no") {
return fmt.Errorf("User aborted configuration")
}
config.Cluster.ClusterCertificate = string(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}))
// Cluster password
config.Cluster.ClusterPassword = cli.AskPasswordOnce("Cluster trust password: ")
break
}
}
// Confirm wiping
if !cli.AskBool("All existing data is lost when joining a cluster, continue? (yes/no) [default=no] ", "no") {
return fmt.Errorf("User aborted configuration")
}
// Connect to existing cluster
serverCert, err := util.LoadServerCert(shared.VarPath(""))
if err != nil {
return err
}
err = cluster.SetupTrust(serverCert, serverName, config.Cluster.ClusterAddress, config.Cluster.ClusterCertificate, config.Cluster.ClusterPassword)
if err != nil {
return errors.Wrap(err, "Failed to setup trust relationship with cluster")
}
// Now we have setup trust, don't send to server, othwerwise it will try and setup trust
// again and if using a one-time join token, will fail.
config.Cluster.ClusterPassword = ""
// Client parameters to connect to the target cluster node.
args := &lxd.ConnectionArgs{
TLSClientCert: string(serverCert.PublicKey()),
TLSClientKey: string(serverCert.PrivateKey()),
TLSServerCert: string(config.Cluster.ClusterCertificate),
UserAgent: version.UserAgent,
}
client, err := lxd.ConnectLXD(fmt.Sprintf("https://%s", config.Cluster.ClusterAddress), args)
if err != nil {
return err
}
// Get the list of required member config keys.
cluster, _, err := client.GetCluster()
if err != nil {
return errors.Wrap(err, "Failed to retrieve cluster information")
}
validator := func(string) error { return nil }
for i, config := range cluster.MemberConfig {
question := fmt.Sprintf("Choose %s: ", config.Description)
cluster.MemberConfig[i].Value = cli.AskString(question, "", validator)
}
config.Cluster.MemberConfig = cluster.MemberConfig
} else {
// Password authentication
if cli.AskBool("Setup password authentication on the cluster? (yes/no) [default=yes]: ", "yes") {
config.Node.Config["core.trust_password"] = cli.AskPassword("Trust password for new clients: ")
}
}
}
return nil
}
func (c *cmdInit) askMAAS(config *cmdInitData, d lxd.InstanceServer) error {
if !cli.AskBool("Would you like to connect to a MAAS server? (yes/no) [default=no]: ", "no") {
return nil
}
serverName, err := os.Hostname()
if err != nil {
serverName = "lxd"
}
maasHostname := cli.AskString(fmt.Sprintf("What's the name of this host in MAAS? [default=%s]: ", serverName), serverName, nil)
if maasHostname != serverName {
config.Node.Config["maas.machine"] = maasHostname
}
config.Node.Config["maas.api.url"] = cli.AskString("URL of your MAAS server (e.g. http://1.2.3.4:5240/MAAS): ", "", nil)
config.Node.Config["maas.api.key"] = cli.AskString("API key for your MAAS server: ", "", nil)
return nil
}
func (c *cmdInit) askNetworking(config *cmdInitData, d lxd.InstanceServer) error {
if config.Cluster != nil || !cli.AskBool("Would you like to create a new local network bridge? (yes/no) [default=yes]: ", "yes") {
// At this time, only the Ubuntu kernel supports the Fan, detect it
fanKernel := false
if shared.PathExists("/proc/sys/kernel/version") {
content, _ := ioutil.ReadFile("/proc/sys/kernel/version")
if content != nil && strings.Contains(string(content), "Ubuntu") {
fanKernel = true
}
}
if cli.AskBool("Would you like to configure LXD to use an existing bridge or host interface? (yes/no) [default=no]: ", "no") {
for {
name := cli.AskString("Name of the existing bridge or host interface: ", "", nil)
if !shared.PathExists(fmt.Sprintf("/sys/class/net/%s", name)) {
fmt.Println("The requested interface doesn't exist. Please choose another one.")
continue
}
// Add to the default profile
config.Node.Profiles[0].Devices["eth0"] = map[string]string{
"type": "nic",
"nictype": "macvlan",
"name": "eth0",
"parent": name,
}
if shared.PathExists(fmt.Sprintf("/sys/class/net/%s/bridge", name)) {
config.Node.Profiles[0].Devices["eth0"]["nictype"] = "bridged"
}
if config.Node.Config["maas.api.url"] != nil && cli.AskBool("Is this interface connected to your MAAS server? (yes/no) [default=yes]: ", "yes") {
maasSubnetV4 := cli.AskString("MAAS IPv4 subnet name for this interface (empty for no subnet): ", "",
func(input string) error { return nil })
if maasSubnetV4 != "" {
config.Node.Profiles[0].Devices["eth0"]["maas.subnet.ipv4"] = maasSubnetV4
}
maasSubnetV6 := cli.AskString("MAAS IPv6 subnet name for this interface (empty for no subnet): ", "",
func(input string) error { return nil })
if maasSubnetV6 != "" {
config.Node.Profiles[0].Devices["eth0"]["maas.subnet.ipv6"] = maasSubnetV6
}
}
break
}
} else if config.Cluster != nil && fanKernel && cli.AskBool("Would you like to create a new Fan overlay network? (yes/no) [default=yes]: ", "yes") {
// Define the network
networkPost := internalClusterPostNetwork{}
networkPost.Name = "lxdfan0"
networkPost.Project = project.Default
networkPost.Config = map[string]string{
"bridge.mode": "fan",
}
// Select the underlay
networkPost.Config["fan.underlay_subnet"] = cli.AskString("What subnet should be used as the Fan underlay? [default=auto]: ", "auto", func(value string) error {
var err error
var subnet *net.IPNet
// Handle auto
if value == "auto" {
subnet, _, err = network.DefaultGatewaySubnetV4()
if err != nil {
return err
}
} else {
_, subnet, err = net.ParseCIDR(value)
if err != nil {
return err
}
}
size, _ := subnet.Mask.Size()
if size != 16 && size != 24 {
if value == "auto" {
return fmt.Errorf("The auto-detected underlay (%s) isn't a /16 or /24, please specify manually", subnet.String())
} else {
return fmt.Errorf("The underlay subnet must be a /16 or a /24")
}
}
return nil
})
// Add the new network
config.Node.Networks = append(config.Node.Networks, networkPost)
// Add to the default profile
config.Node.Profiles[0].Devices["eth0"] = map[string]string{
"type": "nic",
"name": "eth0",
"network": "lxdfan0",
}
}
return nil
}
for {
// Define the network
net := internalClusterPostNetwork{}
net.Config = map[string]string{}
net.Project = project.Default
// Network name
net.Name = cli.AskString("What should the new bridge be called? [default=lxdbr0]: ", "lxdbr0", func(netName string) error {
netType, err := network.LoadByType("bridge")
if err != nil {
return err
}
return netType.ValidateName(netName)
})
_, _, err := d.GetNetwork(net.Name)
if err == nil {
fmt.Printf("The requested network bridge \"%s\" already exists. Please choose another name.\n", net.Name)
continue
}
// Add to the default profile
config.Node.Profiles[0].Devices["eth0"] = map[string]string{
"type": "nic",
"name": "eth0",
"network": net.Name,
}
// IPv4
net.Config["ipv4.address"] = cli.AskString("What IPv4 address should be used? (CIDR subnet notation, “auto” or “none”) [default=auto]: ", "auto", func(value string) error {
if shared.StringInSlice(value, []string{"auto", "none"}) {
return nil
}
return validate.Optional(validate.IsNetworkAddressCIDRV4)(value)
})
if !shared.StringInSlice(net.Config["ipv4.address"], []string{"auto", "none"}) {
net.Config["ipv4.nat"] = fmt.Sprintf("%v",
cli.AskBool("Would you like LXD to NAT IPv4 traffic on your bridge? [default=yes]: ", "yes"))
}
// IPv6
net.Config["ipv6.address"] = cli.AskString("What IPv6 address should be used? (CIDR subnet notation, “auto” or “none”) [default=auto]: ", "auto", func(value string) error {
if shared.StringInSlice(value, []string{"auto", "none"}) {
return nil
}
return validate.Optional(validate.IsNetworkAddressCIDRV6)(value)
})
if !shared.StringInSlice(net.Config["ipv6.address"], []string{"auto", "none"}) {
net.Config["ipv6.nat"] = fmt.Sprintf("%v",
cli.AskBool("Would you like LXD to NAT IPv6 traffic on your bridge? [default=yes]: ", "yes"))
}
// Add the new network
config.Node.Networks = append(config.Node.Networks, net)
break
}
return nil
}
func (c *cmdInit) askStorage(config *cmdInitData, d lxd.InstanceServer) error {
if config.Cluster != nil {
if cli.AskBool("Do you want to configure a new local storage pool? (yes/no) [default=yes]: ", "yes") {
err := c.askStoragePool(config, d, "local")
if err != nil {
return err
}
}
if cli.AskBool("Do you want to configure a new remote storage pool? (yes/no) [default=no]: ", "no") {
err := c.askStoragePool(config, d, "remote")
if err != nil {
return err
}
}
return nil
}
if !cli.AskBool("Do you want to configure a new storage pool? (yes/no) [default=yes]: ", "yes") {
return nil
}
return c.askStoragePool(config, d, "all")
}
func (c *cmdInit) askStoragePool(config *cmdInitData, d lxd.InstanceServer, poolType string) error {
// Figure out the preferred storage driver
availableBackends := c.availableStorageDrivers(poolType)
if len(availableBackends) == 0 {
return fmt.Errorf("No %s storage backends available", poolType)
}
backingFs, err := util.FilesystemDetect(shared.VarPath())
if err != nil {
backingFs = "dir"
}
defaultStorage := "dir"
if backingFs == "btrfs" && shared.StringInSlice("btrfs", availableBackends) {
defaultStorage = "btrfs"
} else if shared.StringInSlice("zfs", availableBackends) {
defaultStorage = "zfs"
} else if shared.StringInSlice("btrfs", availableBackends) {
defaultStorage = "btrfs"
}
for {
// Define the pool
pool := api.StoragePoolsPost{}
pool.Config = map[string]string{}
if poolType == "all" {
pool.Name = cli.AskString("Name of the new storage pool [default=default]: ", "default", nil)
} else {
pool.Name = poolType
}
_, _, err := d.GetStoragePool(pool.Name)
if err == nil {
if poolType == "all" {
fmt.Printf("The requested storage pool \"%s\" already exists. Please choose another name.\n", pool.Name)
continue
}
return fmt.Errorf("The %s storage pool already exists", poolType)
}
// Add to the default profile
if config.Node.Profiles[0].Devices["root"] == nil {
config.Node.Profiles[0].Devices["root"] = map[string]string{
"type": "disk",
"path": "/",
"pool": pool.Name,
}
}
// Storage backend
if len(availableBackends) > 1 {
defaultBackend := defaultStorage
if poolType == "remote" {
defaultBackend = "ceph"
}
pool.Driver = cli.AskChoice(
fmt.Sprintf("Name of the storage backend to use (%s) [default=%s]: ", strings.Join(availableBackends, ", "), defaultBackend), availableBackends, defaultBackend)
} else {
pool.Driver = availableBackends[0]
}
// Optimization for dir
if pool.Driver == "dir" {
config.Node.StoragePools = append(config.Node.StoragePools, pool)
break
}
// Optimization for btrfs on btrfs
if pool.Driver == "btrfs" && backingFs == "btrfs" {
if cli.AskBool(fmt.Sprintf("Would you like to create a new btrfs subvolume under %s? (yes/no) [default=yes]: ", shared.VarPath("")), "yes") {
pool.Config["source"] = shared.VarPath("storage-pools", pool.Name)
config.Node.StoragePools = append(config.Node.StoragePools, pool)
break
}
}
// Optimization for zfs on zfs (when using Ubuntu's bpool/rpool)
if pool.Driver == "zfs" && backingFs == "zfs" {
poolName, _ := shared.RunCommand("zpool", "get", "-H", "-o", "value", "name", "rpool")
if strings.TrimSpace(poolName) == "rpool" && cli.AskBool("Would you like to create a new zfs dataset under rpool/lxd? (yes/no) [default=yes]: ", "yes") {
pool.Config["source"] = "rpool/lxd"
config.Node.StoragePools = append(config.Node.StoragePools, pool)
break
}
}
if cli.AskBool(fmt.Sprintf("Create a new %s pool? (yes/no) [default=yes]: ", strings.ToUpper(pool.Driver)), "yes") {
if pool.Driver == "ceph" {
// Ask for the name of the cluster
pool.Config["ceph.cluster_name"] = cli.AskString("Name of the existing CEPH cluster [default=ceph]: ", "ceph", nil)
// Ask for the name of the osd pool
pool.Config["ceph.osd.pool_name"] = cli.AskString("Name of the OSD storage pool [default=lxd]: ", "lxd", nil)
// Ask for the number of placement groups
pool.Config["ceph.osd.pg_num"] = cli.AskString("Number of placement groups [default=32]: ", "32", nil)
} else if pool.Driver == "cephfs" {
// Ask for the name of the cluster
pool.Config["cephfs.cluster_name"] = cli.AskString("Name of the existing CEPHfs cluster [default=ceph]: ", "ceph", nil)
// Ask for the name of the cluster
pool.Config["source"] = cli.AskString("Name of the CEPHfs volume: ", "", nil)
} else if cli.AskBool("Would you like to use an existing empty block device (e.g. a disk or partition)? (yes/no) [default=no]: ", "no") {
deviceExists := func(path string) error {
if !shared.IsBlockdevPath(path) {
return fmt.Errorf("'%s' is not a block device", path)
}
return nil
}
pool.Config["source"] = cli.AskString("Path to the existing block device: ", "", deviceExists)
} else {
st := unix.Statfs_t{}
err := unix.Statfs(shared.VarPath(), &st)
if err != nil {
return errors.Wrapf(err, "Couldn't statfs %s", shared.VarPath())
}
/* choose 5 GB < x < 30GB, where x is 20% of the disk size */
defaultSize := uint64(st.Frsize) * st.Blocks / (1024 * 1024 * 1024) / 5
if defaultSize > 30 {
defaultSize = 30
}
if defaultSize < 5 {
defaultSize = 5
}
pool.Config["size"] = cli.AskString(
fmt.Sprintf("Size in GB of the new loop device (1GB minimum) [default=%dGB]: ", defaultSize),
fmt.Sprintf("%dGB", defaultSize),
func(input string) error {
input = strings.Split(input, "GB")[0]
result, err := strconv.ParseInt(input, 10, 64)
if err != nil {
return err
}
if result < 1 {
return fmt.Errorf("Minimum size is 1GB")
}
return nil
})
if !strings.HasSuffix(pool.Config["size"], "GB") {
pool.Config["size"] = fmt.Sprintf("%sGB", pool.Config["size"])
}
}
} else {
if pool.Driver == "ceph" {
// ask for the name of the cluster
pool.Config["ceph.cluster_name"] = cli.AskString("Name of the existing CEPH cluster [default=ceph]: ", "ceph", nil)
// ask for the name of the existing pool
pool.Config["source"] = cli.AskString("Name of the existing OSD storage pool [default=lxd]: ", "lxd", nil)
pool.Config["ceph.osd.pool_name"] = pool.Config["source"]
} else {
question := fmt.Sprintf("Name of the existing %s pool or dataset: ", strings.ToUpper(pool.Driver))
pool.Config["source"] = cli.AskString(question, "", nil)
}
}
if pool.Driver == "lvm" {
_, err := exec.LookPath("thin_check")
if err != nil {
fmt.Printf(`
The LVM thin provisioning tools couldn't be found. LVM can still be used
without thin provisioning but this will disable over-provisioning,
increase the space requirements and creation time of images, containers
and snapshots.
If you wish to use thin provisioning, abort now, install the tools from
your Linux distribution and run "lxd init" again afterwards.
`)
if !cli.AskBool("Do you want to continue without thin provisioning? (yes/no) [default=yes]: ", "yes") {
return fmt.Errorf("The LVM thin provisioning tools couldn't be found on the system")
}
pool.Config["lvm.use_thinpool"] = "false"
}
}
config.Node.StoragePools = append(config.Node.StoragePools, pool)
break
}
return nil
}
func (c *cmdInit) askDaemon(config *cmdInitData, d lxd.InstanceServer) error {
// Detect lack of uid/gid
idmapset, err := idmap.DefaultIdmapSet("", "")
if (err != nil || len(idmapset.Idmap) == 0 || idmapset.Usable() != nil) && shared.RunningInUserNS() {
fmt.Printf(`
We detected that you are running inside an unprivileged container.
This means that unless you manually configured your host otherwise,
you will not have enough uids and gids to allocate to your containers.
LXD can re-use your container's own allocation to avoid the problem.
Doing so makes your nested containers slightly less safe as they could
in theory attack their parent container and gain more privileges than
they otherwise would.
`)
if cli.AskBool("Would you like to have your containers share their parent's allocation? (yes/no) [default=yes]: ", "yes") {
config.Node.Profiles[0].Config["security.privileged"] = "true"
}
}
// Network listener
if config.Cluster == nil && cli.AskBool("Would you like the LXD server to be available over the network? (yes/no) [default=no]: ", "no") {
isIPAddress := func(s string) error {
if s != "all" && net.ParseIP(s) == nil {
return fmt.Errorf("'%s' is not an IP address", s)
}
return nil
}
netAddr := cli.AskString("Address to bind LXD to (not including port) [default=all]: ", "all", isIPAddress)
if netAddr == "all" {
netAddr = "::"
}
if net.ParseIP(netAddr).To4() == nil {
netAddr = fmt.Sprintf("[%s]", netAddr)
}
netPort := cli.AskInt(fmt.Sprintf("Port to bind LXD to [default=%d]: ", shared.DefaultPort), 1, 65535, fmt.Sprintf("%d", shared.DefaultPort), func(netPort int64) error {
address := util.CanonicalNetworkAddressFromAddressAndPort(netAddr, int(netPort))
s, _, err := d.GetServer()
if err == nil {
if s.Config["cluster.https_address"] == address || s.Config["core.https_address"] == address {
// We already own the address, just move on.
return nil
}
}
listener, err := net.Listen("tcp", address)
if err != nil {
return fmt.Errorf("Can't bind address %q: %v", address, err)
}
listener.Close()
return nil
})
config.Node.Config["core.https_address"] = util.CanonicalNetworkAddressFromAddressAndPort(netAddr, int(netPort))
config.Node.Config["core.trust_password"] = cli.AskPassword("Trust password for new clients: ")
if config.Node.Config["core.trust_password"] == "" {
fmt.Printf("No password set, client certificates will have to be manually trusted.")
}
}
// Ask if the user wants images to be automatically refreshed
if !cli.AskBool("Would you like stale cached images to be updated automatically? (yes/no) [default=yes] ", "yes") {
config.Node.Config["images.auto_update_interval"] = "0"
}
return nil
}
|
package network
import (
"bufio"
"encoding/binary"
"fmt"
"hash/fnv"
"io"
"io/ioutil"
"math/rand"
"net"
"os"
"os/exec"
"reflect"
"strconv"
"strings"
"sync"
"github.com/pkg/errors"
"github.com/lxc/lxd/lxd/apparmor"
"github.com/lxc/lxd/lxd/cluster"
"github.com/lxc/lxd/lxd/cluster/request"
"github.com/lxc/lxd/lxd/daemon"
"github.com/lxc/lxd/lxd/db"
"github.com/lxc/lxd/lxd/dnsmasq"
"github.com/lxc/lxd/lxd/dnsmasq/dhcpalloc"
"github.com/lxc/lxd/lxd/network/openvswitch"
"github.com/lxc/lxd/lxd/node"
"github.com/lxc/lxd/lxd/revert"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/subprocess"
"github.com/lxc/lxd/shared/validate"
"github.com/lxc/lxd/shared/version"
)
// ForkdnsServersListPath defines the path that contains the forkdns server candidate file.
const ForkdnsServersListPath = "forkdns.servers"
// ForkdnsServersListFile file that contains the server candidates list.
const ForkdnsServersListFile = "servers.conf"
var forkdnsServersLock sync.Mutex
// bridge represents a LXD bridge network.
type bridge struct {
common
}
// Type returns the network type.
func (n *bridge) Type() string {
return "bridge"
}
// DBType returns the network type DB ID.
func (n *bridge) DBType() db.NetworkType {
return db.NetworkTypeBridge
}
// checkClusterWideMACSafe returns whether it is safe to use the same MAC address for the bridge interface on all
// cluster nodes. It is not suitable to use a static MAC address when "bridge.external_interfaces" is non-empty and
// the bridge interface has no IPv4 or IPv6 address set. This is because in a clustered environment the same bridge
// config is applied to all nodes, and if the bridge is being used to connect multiple nodes to the same network
// segment it would cause MAC conflicts to use the the same MAC on all nodes. If an IP address is specified then
// connecting multiple nodes to the same network segment would also cause IP conflicts, so if an IP is defined
// then we assume this is not being done. However if IP addresses are explicitly set to "none" and
// "bridge.external_interfaces" is set then it may not be safe to use a the same MAC address on all nodes.
func (n *bridge) checkClusterWideMACSafe(config map[string]string) error {
// Fan mode breaks if using the same MAC address on each node.
if config["bridge.mode"] == "fan" {
return fmt.Errorf(`Cannot use static "bridge.hwaddr" MAC address in fan mode`)
}
// We can't be sure that multiple clustered nodes aren't connected to the same network segment so don't
// use a static MAC address for the bridge interface to avoid introducing a MAC conflict.
if config["bridge.external_interfaces"] != "" && config["ipv4.address"] == "none" && config["ipv6.address"] == "none" {
return fmt.Errorf(`Cannot use static "bridge.hwaddr" MAC address when bridge has no IP addresses and has external interfaces set`)
}
return nil
}
// FillConfig fills requested config with any default values.
func (n *bridge) FillConfig(config map[string]string) error {
// Set some default values where needed.
if config["bridge.mode"] == "fan" {
if config["fan.underlay_subnet"] == "" {
config["fan.underlay_subnet"] = "auto"
}
// We enable NAT by default even if address is manually specified.
if config["ipv4.nat"] == "" {
config["ipv4.nat"] = "true"
}
} else {
if config["ipv4.address"] == "" {
config["ipv4.address"] = "auto"
}
if config["ipv4.address"] == "auto" && config["ipv4.nat"] == "" {
config["ipv4.nat"] = "true"
}
if config["ipv6.address"] == "" {
content, err := ioutil.ReadFile("/proc/sys/net/ipv6/conf/default/disable_ipv6")
if err == nil && string(content) == "0\n" {
config["ipv6.address"] = "auto"
}
}
if config["ipv6.address"] == "auto" && config["ipv6.nat"] == "" {
config["ipv6.nat"] = "true"
}
}
// Now replace any "auto" keys with generated values.
err := n.populateAutoConfig(config)
if err != nil {
return errors.Wrapf(err, "Failed generating auto config")
}
return nil
}
// populateAutoConfig replaces "auto" in config with generated values.
func (n *bridge) populateAutoConfig(config map[string]string) error {
changedConfig := false
// Now populate "auto" values where needed.
if config["ipv4.address"] == "auto" {
subnet, err := randomSubnetV4()
if err != nil {
return err
}
config["ipv4.address"] = subnet
changedConfig = true
}
if config["ipv6.address"] == "auto" {
subnet, err := randomSubnetV6()
if err != nil {
return err
}
config["ipv6.address"] = subnet
changedConfig = true
}
if config["fan.underlay_subnet"] == "auto" {
subnet, _, err := DefaultGatewaySubnetV4()
if err != nil {
return err
}
config["fan.underlay_subnet"] = subnet.String()
changedConfig = true
}
// Re-validate config if changed.
if changedConfig && n.state != nil {
return n.Validate(config)
}
return nil
}
// ValidateName validates network name.
func (n *bridge) ValidateName(name string) error {
err := validInterfaceName(name)
if err != nil {
return err
}
// Apply common name validation that applies to all network types.
return n.common.ValidateName(name)
}
// Validate network config.
func (n *bridge) Validate(config map[string]string) error {
// Build driver specific rules dynamically.
rules := map[string]func(value string) error{
"bridge.driver": func(value string) error {
return validate.IsOneOf(value, []string{"native", "openvswitch"})
},
"bridge.external_interfaces": validate.Optional(func(value string) error {
for _, entry := range strings.Split(value, ",") {
entry = strings.TrimSpace(entry)
if err := validInterfaceName(entry); err != nil {
return errors.Wrapf(err, "Invalid interface name %q", entry)
}
}
return nil
}),
"bridge.hwaddr": validate.Optional(validate.IsNetworkMAC),
"bridge.mtu": validate.Optional(validate.IsNetworkMTU),
"bridge.mode": func(value string) error {
return validate.IsOneOf(value, []string{"standard", "fan"})
},
"fan.overlay_subnet": validate.Optional(validate.IsNetworkV4),
"fan.underlay_subnet": func(value string) error {
if value == "auto" {
return nil
}
return validate.Optional(validate.IsNetworkV4)(value)
},
"fan.type": func(value string) error {
return validate.IsOneOf(value, []string{"vxlan", "ipip"})
},
"ipv4.address": func(value string) error {
if validate.IsOneOf(value, []string{"none", "auto"}) == nil {
return nil
}
return validate.Optional(validate.IsNetworkAddressCIDRV4)(value)
},
"ipv4.firewall": validate.Optional(validate.IsBool),
"ipv4.nat": validate.Optional(validate.IsBool),
"ipv4.nat.order": func(value string) error {
return validate.IsOneOf(value, []string{"before", "after"})
},
"ipv4.nat.address": validate.Optional(validate.IsNetworkAddressV4),
"ipv4.dhcp": validate.Optional(validate.IsBool),
"ipv4.dhcp.gateway": validate.Optional(validate.IsNetworkAddressV4),
"ipv4.dhcp.expiry": validate.IsAny,
"ipv4.dhcp.ranges": validate.Optional(validate.IsNetworkRangeV4List),
"ipv4.routes": validate.Optional(validate.IsNetworkV4List),
"ipv4.routing": validate.Optional(validate.IsBool),
"ipv4.ovn.ranges": validate.Optional(validate.IsNetworkRangeV4List),
"ipv6.address": func(value string) error {
if validate.IsOneOf(value, []string{"none", "auto"}) == nil {
return nil
}
return validate.Optional(validate.IsNetworkAddressCIDRV6)(value)
},
"ipv6.firewall": validate.Optional(validate.IsBool),
"ipv6.nat": validate.Optional(validate.IsBool),
"ipv6.nat.order": func(value string) error {
return validate.IsOneOf(value, []string{"before", "after"})
},
"ipv6.nat.address": validate.Optional(validate.IsNetworkAddressV6),
"ipv6.dhcp": validate.Optional(validate.IsBool),
"ipv6.dhcp.expiry": validate.IsAny,
"ipv6.dhcp.stateful": validate.Optional(validate.IsBool),
"ipv6.dhcp.ranges": validate.Optional(validate.IsNetworkRangeV6List),
"ipv6.routes": validate.Optional(validate.IsNetworkV6List),
"ipv6.routing": validate.Optional(validate.IsBool),
"ipv6.ovn.ranges": validate.Optional(validate.IsNetworkRangeV6List),
"dns.domain": validate.IsAny,
"dns.search": validate.IsAny,
"dns.mode": func(value string) error {
return validate.IsOneOf(value, []string{"dynamic", "managed", "none"})
},
"raw.dnsmasq": validate.IsAny,
"maas.subnet.ipv4": validate.IsAny,
"maas.subnet.ipv6": validate.IsAny,
}
// Add dynamic validation rules.
for k := range config {
// Tunnel keys have the remote name in their name, so extract the real key
if strings.HasPrefix(k, "tunnel.") {
// Validate remote name in key.
fields := strings.Split(k, ".")
if len(fields) != 3 {
return fmt.Errorf("Invalid network configuration key: %s", k)
}
if len(n.name)+len(fields[1]) > 14 {
return fmt.Errorf("Network name too long for tunnel interface: %s-%s", n.name, fields[1])
}
tunnelKey := fields[2]
// Add the correct validation rule for the dynamic field based on last part of key.
switch tunnelKey {
case "protocol":
rules[k] = func(value string) error {
return validate.IsOneOf(value, []string{"gre", "vxlan"})
}
case "local":
rules[k] = validate.Optional(validate.IsNetworkAddress)
case "remote":
rules[k] = validate.Optional(validate.IsNetworkAddress)
case "port":
rules[k] = networkValidPort
case "group":
rules[k] = validate.Optional(validate.IsNetworkAddress)
case "id":
rules[k] = validate.Optional(validate.IsInt64)
case "inteface":
rules[k] = validInterfaceName
case "ttl":
rules[k] = validate.Optional(validate.IsUint8)
}
}
}
err := n.validate(config, rules)
if err != nil {
return err
}
// Peform composite key checks after per-key validation.
// Validate network name when used in fan mode.
bridgeMode := config["bridge.mode"]
if bridgeMode == "fan" && len(n.name) > 11 {
return fmt.Errorf("Network name too long to use with the FAN (must be 11 characters or less)")
}
for k, v := range config {
key := k
// Bridge mode checks
if bridgeMode == "fan" && strings.HasPrefix(key, "ipv4.") && !shared.StringInSlice(key, []string{"ipv4.dhcp.expiry", "ipv4.firewall", "ipv4.nat", "ipv4.nat.order"}) && v != "" {
return fmt.Errorf("IPv4 configuration may not be set when in 'fan' mode")
}
if bridgeMode == "fan" && strings.HasPrefix(key, "ipv6.") && v != "" {
return fmt.Errorf("IPv6 configuration may not be set when in 'fan' mode")
}
if bridgeMode != "fan" && strings.HasPrefix(key, "fan.") && v != "" {
return fmt.Errorf("FAN configuration may only be set when in 'fan' mode")
}
// MTU checks
if key == "bridge.mtu" && v != "" {
mtu, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return fmt.Errorf("Invalid value for an integer: %s", v)
}
ipv6 := config["ipv6.address"]
if ipv6 != "" && ipv6 != "none" && mtu < 1280 {
return fmt.Errorf("The minimum MTU for an IPv6 network is 1280")
}
ipv4 := config["ipv4.address"]
if ipv4 != "" && ipv4 != "none" && mtu < 68 {
return fmt.Errorf("The minimum MTU for an IPv4 network is 68")
}
if config["bridge.mode"] == "fan" {
if config["fan.type"] == "ipip" {
if mtu > 1480 {
return fmt.Errorf("Maximum MTU for an IPIP FAN bridge is 1480")
}
} else {
if mtu > 1450 {
return fmt.Errorf("Maximum MTU for a VXLAN FAN bridge is 1450")
}
}
}
}
}
// Check using same MAC address on every cluster node is safe.
if config["bridge.hwaddr"] != "" {
err = n.checkClusterWideMACSafe(config)
if err != nil {
return err
}
}
// Check IPv4 OVN ranges.
if config["ipv4.ovn.ranges"] != "" {
dhcpSubnet := n.DHCPv4Subnet()
allowedNets := []*net.IPNet{}
if dhcpSubnet != nil {
if config["ipv4.dhcp.ranges"] == "" {
return fmt.Errorf(`"ipv4.ovn.ranges" must be used in conjunction with non-overlapping "ipv4.dhcp.ranges" when DHCPv4 is enabled`)
}
allowedNets = append(allowedNets, dhcpSubnet)
}
ovnRanges, err := parseIPRanges(config["ipv4.ovn.ranges"], allowedNets...)
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv4.ovn.ranges")
}
dhcpRanges, err := parseIPRanges(config["ipv4.dhcp.ranges"], allowedNets...)
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv4.dhcp.ranges")
}
for _, ovnRange := range ovnRanges {
for _, dhcpRange := range dhcpRanges {
if IPRangesOverlap(ovnRange, dhcpRange) {
return fmt.Errorf(`The range specified in "ipv4.ovn.ranges" (%q) cannot overlap with "ipv4.dhcp.ranges"`, ovnRange)
}
}
}
}
// Check IPv6 OVN ranges.
if config["ipv6.ovn.ranges"] != "" {
dhcpSubnet := n.DHCPv6Subnet()
allowedNets := []*net.IPNet{}
if dhcpSubnet != nil {
if config["ipv6.dhcp.ranges"] == "" && shared.IsTrue(config["ipv6.dhcp.stateful"]) {
return fmt.Errorf(`"ipv6.ovn.ranges" must be used in conjunction with non-overlapping "ipv6.dhcp.ranges" when stateful DHCPv6 is enabled`)
}
allowedNets = append(allowedNets, dhcpSubnet)
}
ovnRanges, err := parseIPRanges(config["ipv6.ovn.ranges"], allowedNets...)
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv6.ovn.ranges")
}
// If stateful DHCPv6 is enabled, check OVN ranges don't overlap with DHCPv6 stateful ranges.
// Otherwise SLAAC will be being used to generate client IPs and predefined ranges aren't used.
if dhcpSubnet != nil && shared.IsTrue(config["ipv6.dhcp.stateful"]) {
dhcpRanges, err := parseIPRanges(config["ipv6.dhcp.ranges"], allowedNets...)
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv6.dhcp.ranges")
}
for _, ovnRange := range ovnRanges {
for _, dhcpRange := range dhcpRanges {
if IPRangesOverlap(ovnRange, dhcpRange) {
return fmt.Errorf(`The range specified in "ipv6.ovn.ranges" (%q) cannot overlap with "ipv6.dhcp.ranges"`, ovnRange)
}
}
}
}
}
return nil
}
// Create checks whether the bridge interface name is used already.
func (n *bridge) Create(clientType request.ClientType) error {
n.logger.Debug("Create", log.Ctx{"clientType": clientType, "config": n.config})
if InterfaceExists(n.name) {
return fmt.Errorf("Network interface %q already exists", n.name)
}
return n.common.create(clientType)
}
// isRunning returns whether the network is up.
func (n *bridge) isRunning() bool {
return InterfaceExists(n.name)
}
// Delete deletes a network.
func (n *bridge) Delete(clientType request.ClientType) error {
n.logger.Debug("Delete", log.Ctx{"clientType": clientType})
if n.isRunning() {
err := n.Stop()
if err != nil {
return err
}
}
// Delete apparmor profiles.
err := apparmor.NetworkDelete(n.state, n)
if err != nil {
return err
}
return n.common.delete(clientType)
}
// Rename renames a network.
func (n *bridge) Rename(newName string) error {
n.logger.Debug("Rename", log.Ctx{"newName": newName})
if InterfaceExists(newName) {
return fmt.Errorf("Network interface %q already exists", newName)
}
// Bring the network down.
if n.isRunning() {
err := n.Stop()
if err != nil {
return err
}
}
// Rename forkdns log file.
forkDNSLogPath := fmt.Sprintf("forkdns.%s.log", n.name)
if shared.PathExists(shared.LogPath(forkDNSLogPath)) {
err := os.Rename(forkDNSLogPath, shared.LogPath(fmt.Sprintf("forkdns.%s.log", newName)))
if err != nil {
return err
}
}
// Rename common steps.
err := n.common.rename(newName)
if err != nil {
return err
}
// Bring the network up.
err = n.Start()
if err != nil {
return err
}
return nil
}
// Start starts the network.
func (n *bridge) Start() error {
n.logger.Debug("Start")
return n.setup(nil)
}
// setup restarts the network.
func (n *bridge) setup(oldConfig map[string]string) error {
// If we are in mock mode, just no-op.
if n.state.OS.MockMode {
return nil
}
n.logger.Debug("Setting up network")
revert := revert.New()
defer revert.Fail()
// Create directory.
if !shared.PathExists(shared.VarPath("networks", n.name)) {
err := os.MkdirAll(shared.VarPath("networks", n.name), 0711)
if err != nil {
return err
}
}
// Create the bridge interface if doesn't exist.
if !n.isRunning() {
if n.config["bridge.driver"] == "openvswitch" {
ovs := openvswitch.NewOVS()
if !ovs.Installed() {
return fmt.Errorf("Open vSwitch isn't installed on this system")
}
err := ovs.BridgeAdd(n.name, false)
if err != nil {
return err
}
revert.Add(func() { ovs.BridgeDelete(n.name) })
} else {
_, err := shared.RunCommand("ip", "link", "add", "dev", n.name, "type", "bridge")
if err != nil {
return err
}
revert.Add(func() { shared.RunCommand("ip", "link", "delete", "dev", n.name) })
}
}
// Get a list of tunnels.
tunnels := n.getTunnels()
// IPv6 bridge configuration.
if !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
if !shared.PathExists("/proc/sys/net/ipv6") {
return fmt.Errorf("Network has ipv6.address but kernel IPv6 support is missing")
}
err := util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/autoconf", n.name), "0")
if err != nil {
return err
}
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/accept_dad", n.name), "0")
if err != nil {
return err
}
}
// Get a list of interfaces.
ifaces, err := net.Interfaces()
if err != nil {
return err
}
// Cleanup any existing tunnel device.
for _, iface := range ifaces {
if strings.HasPrefix(iface.Name, fmt.Sprintf("%s-", n.name)) {
_, err = shared.RunCommand("ip", "link", "del", "dev", iface.Name)
if err != nil {
return err
}
}
}
// Set the MTU.
mtu := ""
if n.config["bridge.mtu"] != "" {
mtu = n.config["bridge.mtu"]
} else if len(tunnels) > 0 {
mtu = "1400"
} else if n.config["bridge.mode"] == "fan" {
if n.config["fan.type"] == "ipip" {
mtu = "1480"
} else {
mtu = "1450"
}
}
// Attempt to add a dummy device to the bridge to force the MTU.
if mtu != "" && n.config["bridge.driver"] != "openvswitch" {
_, err = shared.RunCommand("ip", "link", "add", "dev", fmt.Sprintf("%s-mtu", n.name), "mtu", mtu, "type", "dummy")
if err == nil {
revert.Add(func() { shared.RunCommand("ip", "link", "delete", "dev", fmt.Sprintf("%s-mtu", n.name)) })
_, err = shared.RunCommand("ip", "link", "set", "dev", fmt.Sprintf("%s-mtu", n.name), "up")
if err == nil {
AttachInterface(n.name, fmt.Sprintf("%s-mtu", n.name))
}
}
}
// Now, set a default MTU.
if mtu == "" {
mtu = "1500"
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "mtu", mtu)
if err != nil {
return err
}
// Always prefer static MAC address if set.
hwAddr := n.config["bridge.hwaddr"]
// If no cluster wide static MAC address set, then generate one.
if hwAddr == "" {
var seedNodeID int64
if n.checkClusterWideMACSafe(n.config) != nil {
// If not safe to use a cluster wide MAC or in in fan mode, then use cluster node's ID to
// generate a stable per-node & network derived random MAC.
seedNodeID = n.state.Cluster.GetNodeID()
} else {
// If safe to use a cluster wide MAC, then use a static cluster node of 0 to generate a
// stable per-network derived random MAC.
seedNodeID = 0
}
// Load server certificate. This is needs to be the same certificate for all nodes in a cluster.
cert, err := util.LoadCert(n.state.OS.VarDir)
if err != nil {
return err
}
// Generate the random seed, this uses the server certificate fingerprint (to ensure that multiple
// standalone nodes with the same network ID connected to the same external network don't generate
// the same MAC for their networks). It relies on the certificate being the same for all nodes in a
// cluster to allow the same MAC to be generated on each bridge interface in the network when
// seedNodeID is 0 (when safe to do so).
seed := fmt.Sprintf("%s.%d.%d", cert.Fingerprint(), seedNodeID, n.ID())
// Generate a hash from the randSourceNodeID and network ID to use as seed for random MAC.
// Use the FNV-1a hash algorithm to convert our seed string into an int64 for use as seed.
hash := fnv.New64a()
_, err = io.WriteString(hash, seed)
if err != nil {
return err
}
// Initialise a non-cryptographic random number generator using the stable seed.
r := rand.New(rand.NewSource(int64(hash.Sum64())))
hwAddr = randomHwaddr(r)
n.logger.Debug("Stable MAC generated", log.Ctx{"seed": seed, "hwAddr": hwAddr})
}
// Set the MAC address on the bridge interface if specified.
if hwAddr != "" {
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "address", hwAddr)
if err != nil {
return err
}
}
// Enable VLAN filtering for Linux bridges.
if n.config["bridge.driver"] != "openvswitch" {
err = BridgeVLANFilterSetStatus(n.name, "1")
if err != nil {
n.logger.Warn(fmt.Sprintf("%v", err))
}
// Set the default PVID for new ports to 1.
err = BridgeVLANSetDefaultPVID(n.name, "1")
if err != nil {
n.logger.Warn(fmt.Sprintf("%v", err))
}
}
// Bring it up.
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "up")
if err != nil {
return err
}
// Add any listed existing external interface.
if n.config["bridge.external_interfaces"] != "" {
for _, entry := range strings.Split(n.config["bridge.external_interfaces"], ",") {
entry = strings.TrimSpace(entry)
iface, err := net.InterfaceByName(entry)
if err != nil {
n.logger.Warn("Skipping attaching missing external interface", log.Ctx{"interface": entry})
continue
}
unused := true
addrs, err := iface.Addrs()
if err == nil {
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if ip != nil && err == nil && ip.IsGlobalUnicast() {
unused = false
break
}
}
}
if !unused {
return fmt.Errorf("Only unconfigured network interfaces can be bridged")
}
err = AttachInterface(n.name, entry)
if err != nil {
return err
}
}
}
// Remove any existing IPv4 firewall rules.
if usesIPv4Firewall(n.config) || usesIPv4Firewall(oldConfig) {
err = n.state.Firewall.NetworkClear(n.name, 4)
if err != nil {
return err
}
}
// Snapshot container specific IPv4 routes (added with boot proto) before removing IPv4 addresses.
// This is because the kernel removes any static routes on an interface when all addresses removed.
ctRoutes, err := n.bootRoutesV4()
if err != nil {
return err
}
// Flush all IPv4 addresses and routes.
_, err = shared.RunCommand("ip", "-4", "addr", "flush", "dev", n.name, "scope", "global")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "-4", "route", "flush", "dev", n.name, "proto", "static")
if err != nil {
return err
}
// Configure IPv4 firewall (includes fan).
if n.config["bridge.mode"] == "fan" || !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) {
if n.DHCPv4Subnet() != nil && n.hasIPv4Firewall() {
// Setup basic iptables overrides for DHCP/DNS.
err = n.state.Firewall.NetworkSetupDHCPDNSAccess(n.name, 4)
if err != nil {
return err
}
}
// Attempt a workaround for broken DHCP clients.
if n.hasIPv4Firewall() {
err = n.state.Firewall.NetworkSetupDHCPv4Checksum(n.name)
if err != nil {
return err
}
}
// Allow forwarding.
if n.config["bridge.mode"] == "fan" || n.config["ipv4.routing"] == "" || shared.IsTrue(n.config["ipv4.routing"]) {
err = util.SysctlSet("net/ipv4/ip_forward", "1")
if err != nil {
return err
}
if n.hasIPv4Firewall() {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 4, true)
if err != nil {
return err
}
}
} else {
if n.hasIPv4Firewall() {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 4, false)
if err != nil {
return err
}
}
}
}
// Start building process using subprocess package.
command := "dnsmasq"
dnsmasqCmd := []string{"--keep-in-foreground", "--strict-order", "--bind-interfaces",
"--except-interface=lo",
"--pid-file=", // Disable attempt at writing a PID file.
"--no-ping", // --no-ping is very important to prevent delays to lease file updates.
fmt.Sprintf("--interface=%s", n.name)}
dnsmasqVersion, err := dnsmasq.GetVersion()
if err != nil {
return err
}
// --dhcp-rapid-commit option is only supported on >2.79.
minVer, _ := version.NewDottedVersion("2.79")
if dnsmasqVersion.Compare(minVer) > 0 {
dnsmasqCmd = append(dnsmasqCmd, "--dhcp-rapid-commit")
}
if !daemon.Debug {
// --quiet options are only supported on >2.67.
minVer, _ := version.NewDottedVersion("2.67")
if err == nil && dnsmasqVersion.Compare(minVer) > 0 {
dnsmasqCmd = append(dnsmasqCmd, []string{"--quiet-dhcp", "--quiet-dhcp6", "--quiet-ra"}...)
}
}
// Configure IPv4.
if !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) {
// Parse the subnet.
ip, subnet, err := net.ParseCIDR(n.config["ipv4.address"])
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv4.address")
}
// Update the dnsmasq config.
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--listen-address=%s", ip.String()))
if n.DHCPv4Subnet() != nil {
if !shared.StringInSlice("--dhcp-no-override", dnsmasqCmd) {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-no-override", "--dhcp-authoritative", fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")), fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts"))}...)
}
if n.config["ipv4.dhcp.gateway"] != "" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=3,%s", n.config["ipv4.dhcp.gateway"]))
}
if mtu != "1500" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=26,%s", mtu))
}
dnsSearch := n.config["dns.search"]
if dnsSearch != "" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=119,%s", strings.Trim(dnsSearch, " ")))
}
expiry := "1h"
if n.config["ipv4.dhcp.expiry"] != "" {
expiry = n.config["ipv4.dhcp.expiry"]
}
if n.config["ipv4.dhcp.ranges"] != "" {
for _, dhcpRange := range strings.Split(n.config["ipv4.dhcp.ranges"], ",") {
dhcpRange = strings.TrimSpace(dhcpRange)
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s", strings.Replace(dhcpRange, "-", ",", -1), expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s,%s", dhcpalloc.GetIP(subnet, 2).String(), dhcpalloc.GetIP(subnet, -2).String(), expiry)}...)
}
}
// Add the address.
_, err = shared.RunCommand("ip", "-4", "addr", "add", "dev", n.name, n.config["ipv4.address"])
if err != nil {
return err
}
// Configure NAT
if shared.IsTrue(n.config["ipv4.nat"]) {
//If a SNAT source address is specified, use that, otherwise default to MASQUERADE mode.
var srcIP net.IP
if n.config["ipv4.nat.address"] != "" {
srcIP = net.ParseIP(n.config["ipv4.nat.address"])
}
if n.config["ipv4.nat.order"] == "after" {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, true)
if err != nil {
return err
}
} else {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, false)
if err != nil {
return err
}
}
}
// Add additional routes.
if n.config["ipv4.routes"] != "" {
for _, route := range strings.Split(n.config["ipv4.routes"], ",") {
route = strings.TrimSpace(route)
_, err = shared.RunCommand("ip", "-4", "route", "add", "dev", n.name, route, "proto", "static")
if err != nil {
return err
}
}
}
// Restore container specific IPv4 routes to interface.
n.applyBootRoutesV4(ctRoutes)
}
// Remove any existing IPv6 firewall rules.
if usesIPv6Firewall(n.config) || usesIPv6Firewall(oldConfig) {
err = n.state.Firewall.NetworkClear(n.name, 6)
if err != nil {
return err
}
}
// Snapshot container specific IPv6 routes (added with boot proto) before removing IPv6 addresses.
// This is because the kernel removes any static routes on an interface when all addresses removed.
ctRoutes, err = n.bootRoutesV6()
if err != nil {
return err
}
// Flush all IPv6 addresses and routes.
_, err = shared.RunCommand("ip", "-6", "addr", "flush", "dev", n.name, "scope", "global")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "-6", "route", "flush", "dev", n.name, "proto", "static")
if err != nil {
return err
}
// Configure IPv6.
if !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
// Enable IPv6 for the subnet.
err := util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/disable_ipv6", n.name), "0")
if err != nil {
return err
}
// Parse the subnet.
ip, subnet, err := net.ParseCIDR(n.config["ipv6.address"])
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv6.address")
}
subnetSize, _ := subnet.Mask.Size()
if subnetSize > 64 {
n.logger.Warn("IPv6 networks with a prefix larger than 64 aren't properly supported by dnsmasq")
}
// Update the dnsmasq config.
dnsmasqCmd = append(dnsmasqCmd, []string{fmt.Sprintf("--listen-address=%s", ip.String()), "--enable-ra"}...)
if n.DHCPv6Subnet() != nil {
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
// Setup basic iptables overrides for DHCP/DNS.
err = n.state.Firewall.NetworkSetupDHCPDNSAccess(n.name, 6)
if err != nil {
return err
}
}
// Build DHCP configuration.
if !shared.StringInSlice("--dhcp-no-override", dnsmasqCmd) {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-no-override", "--dhcp-authoritative", fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")), fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts"))}...)
}
expiry := "1h"
if n.config["ipv6.dhcp.expiry"] != "" {
expiry = n.config["ipv6.dhcp.expiry"]
}
if shared.IsTrue(n.config["ipv6.dhcp.stateful"]) {
if n.config["ipv6.dhcp.ranges"] != "" {
for _, dhcpRange := range strings.Split(n.config["ipv6.dhcp.ranges"], ",") {
dhcpRange = strings.TrimSpace(dhcpRange)
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%d,%s", strings.Replace(dhcpRange, "-", ",", -1), subnetSize, expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s,%d,%s", dhcpalloc.GetIP(subnet, 2), dhcpalloc.GetIP(subnet, -1), subnetSize, expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("::,constructor:%s,ra-stateless,ra-names", n.name)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("::,constructor:%s,ra-only", n.name)}...)
}
// Allow forwarding.
if n.config["ipv6.routing"] == "" || shared.IsTrue(n.config["ipv6.routing"]) {
// Get a list of proc entries.
entries, err := ioutil.ReadDir("/proc/sys/net/ipv6/conf/")
if err != nil {
return err
}
// First set accept_ra to 2 for everything.
for _, entry := range entries {
content, err := ioutil.ReadFile(fmt.Sprintf("/proc/sys/net/ipv6/conf/%s/accept_ra", entry.Name()))
if err == nil && string(content) != "1\n" {
continue
}
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/accept_ra", entry.Name()), "2")
if err != nil && !os.IsNotExist(err) {
return err
}
}
// Then set forwarding for all of them.
for _, entry := range entries {
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/forwarding", entry.Name()), "1")
if err != nil && !os.IsNotExist(err) {
return err
}
}
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 6, true)
if err != nil {
return err
}
}
} else {
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 6, false)
if err != nil {
return err
}
}
}
// Add the address.
_, err = shared.RunCommand("ip", "-6", "addr", "add", "dev", n.name, n.config["ipv6.address"])
if err != nil {
return err
}
// Configure NAT.
if shared.IsTrue(n.config["ipv6.nat"]) {
var srcIP net.IP
if n.config["ipv6.nat.address"] != "" {
srcIP = net.ParseIP(n.config["ipv6.nat.address"])
}
if n.config["ipv6.nat.order"] == "after" {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, true)
if err != nil {
return err
}
} else {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, false)
if err != nil {
return err
}
}
}
// Add additional routes.
if n.config["ipv6.routes"] != "" {
for _, route := range strings.Split(n.config["ipv6.routes"], ",") {
route = strings.TrimSpace(route)
_, err = shared.RunCommand("ip", "-6", "route", "add", "dev", n.name, route, "proto", "static")
if err != nil {
return err
}
}
}
// Restore container specific IPv6 routes to interface.
n.applyBootRoutesV6(ctRoutes)
}
// Configure the fan.
dnsClustered := false
dnsClusteredAddress := ""
var overlaySubnet *net.IPNet
if n.config["bridge.mode"] == "fan" {
tunName := fmt.Sprintf("%s-fan", n.name)
// Parse the underlay.
underlay := n.config["fan.underlay_subnet"]
_, underlaySubnet, err := net.ParseCIDR(underlay)
if err != nil {
return errors.Wrapf(err, "Failed parsing fan.underlay_subnet")
}
// Parse the overlay.
overlay := n.config["fan.overlay_subnet"]
if overlay == "" {
overlay = "240.0.0.0/8"
}
_, overlaySubnet, err = net.ParseCIDR(overlay)
if err != nil {
return errors.Wrapf(err, "Failed parsing fan.overlay_subnet")
}
// Get the address.
fanAddress, devName, devAddr, err := n.fanAddress(underlaySubnet, overlaySubnet)
if err != nil {
return err
}
addr := strings.Split(fanAddress, "/")
if n.config["fan.type"] == "ipip" {
fanAddress = fmt.Sprintf("%s/24", addr[0])
}
// Update the MTU based on overlay device (if available).
fanMtuInt, err := GetDevMTU(devName)
if err == nil {
// Apply overhead.
if n.config["fan.type"] == "ipip" {
fanMtuInt = fanMtuInt - 20
} else {
fanMtuInt = fanMtuInt - 50
}
// Apply changes.
fanMtu := fmt.Sprintf("%d", fanMtuInt)
if fanMtu != mtu {
mtu = fanMtu
if n.config["bridge.driver"] != "openvswitch" {
_, err = shared.RunCommand("ip", "link", "set", "dev", fmt.Sprintf("%s-mtu", n.name), "mtu", mtu)
if err != nil {
return err
}
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "mtu", mtu)
if err != nil {
return err
}
}
}
// Parse the host subnet.
_, hostSubnet, err := net.ParseCIDR(fmt.Sprintf("%s/24", addr[0]))
if err != nil {
return errors.Wrapf(err, "Failed parsing fan address")
}
// Add the address.
_, err = shared.RunCommand("ip", "-4", "addr", "add", "dev", n.name, fanAddress)
if err != nil {
return err
}
// Update the dnsmasq config.
expiry := "1h"
if n.config["ipv4.dhcp.expiry"] != "" {
expiry = n.config["ipv4.dhcp.expiry"]
}
dnsmasqCmd = append(dnsmasqCmd, []string{
fmt.Sprintf("--listen-address=%s", addr[0]),
"--dhcp-no-override", "--dhcp-authoritative",
fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")),
fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts")),
"--dhcp-range", fmt.Sprintf("%s,%s,%s", dhcpalloc.GetIP(hostSubnet, 2).String(), dhcpalloc.GetIP(hostSubnet, -2).String(), expiry)}...)
// Setup the tunnel.
if n.config["fan.type"] == "ipip" {
_, err = shared.RunCommand("ip", "-4", "route", "flush", "dev", "tunl0")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", "tunl0", "up")
if err != nil {
return err
}
// Fails if the map is already set.
shared.RunCommand("ip", "link", "change", "dev", "tunl0", "type", "ipip", "fan-map", fmt.Sprintf("%s:%s", overlay, underlay))
_, err = shared.RunCommand("ip", "route", "add", overlay, "dev", "tunl0", "src", addr[0])
if err != nil {
return err
}
} else {
vxlanID := fmt.Sprintf("%d", binary.BigEndian.Uint32(overlaySubnet.IP.To4())>>8)
_, err = shared.RunCommand("ip", "link", "add", tunName, "type", "vxlan", "id", vxlanID, "dev", devName, "dstport", "0", "local", devAddr, "fan-map", fmt.Sprintf("%s:%s", overlay, underlay))
if err != nil {
return err
}
err = AttachInterface(n.name, tunName)
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", tunName, "mtu", mtu, "up")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "up")
if err != nil {
return err
}
}
// Configure NAT.
if shared.IsTrue(n.config["ipv4.nat"]) {
if n.config["ipv4.nat.order"] == "after" {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, overlaySubnet, nil, true)
if err != nil {
return err
}
} else {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, overlaySubnet, nil, false)
if err != nil {
return err
}
}
}
// Setup clustered DNS.
clusterAddress, err := node.ClusterAddress(n.state.Node)
if err != nil {
return err
}
// If clusterAddress is non-empty, this indicates the intention for this node to be
// part of a cluster and so we should ensure that dnsmasq and forkdns are started
// in cluster mode. Note: During LXD initialisation the cluster may not actually be
// setup yet, but we want the DNS processes to be ready for when it is.
if clusterAddress != "" {
dnsClustered = true
}
dnsClusteredAddress = strings.Split(fanAddress, "/")[0]
}
// Configure tunnels.
for _, tunnel := range tunnels {
getConfig := func(key string) string {
return n.config[fmt.Sprintf("tunnel.%s.%s", tunnel, key)]
}
tunProtocol := getConfig("protocol")
tunLocal := getConfig("local")
tunRemote := getConfig("remote")
tunName := fmt.Sprintf("%s-%s", n.name, tunnel)
// Configure the tunnel.
cmd := []string{"ip", "link", "add", "dev", tunName}
if tunProtocol == "gre" {
// Skip partial configs.
if tunProtocol == "" || tunLocal == "" || tunRemote == "" {
continue
}
cmd = append(cmd, []string{"type", "gretap", "local", tunLocal, "remote", tunRemote}...)
} else if tunProtocol == "vxlan" {
tunGroup := getConfig("group")
tunInterface := getConfig("interface")
// Skip partial configs.
if tunProtocol == "" {
continue
}
cmd = append(cmd, []string{"type", "vxlan"}...)
if tunLocal != "" && tunRemote != "" {
cmd = append(cmd, []string{"local", tunLocal, "remote", tunRemote}...)
} else {
if tunGroup == "" {
tunGroup = "239.0.0.1"
}
devName := tunInterface
if devName == "" {
_, devName, err = DefaultGatewaySubnetV4()
if err != nil {
return err
}
}
cmd = append(cmd, []string{"group", tunGroup, "dev", devName}...)
}
tunPort := getConfig("port")
if tunPort == "" {
tunPort = "0"
}
cmd = append(cmd, []string{"dstport", tunPort}...)
tunID := getConfig("id")
if tunID == "" {
tunID = "1"
}
cmd = append(cmd, []string{"id", tunID}...)
tunTTL := getConfig("ttl")
if tunTTL == "" {
tunTTL = "1"
}
cmd = append(cmd, []string{"ttl", tunTTL}...)
}
// Create the interface.
_, err = shared.RunCommand(cmd[0], cmd[1:]...)
if err != nil {
return err
}
// Bridge it and bring up.
err = AttachInterface(n.name, tunName)
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", tunName, "mtu", mtu, "up")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "up")
if err != nil {
return err
}
}
// Generate and load apparmor profiles.
err = apparmor.NetworkLoad(n.state, n)
if err != nil {
return err
}
// Kill any existing dnsmasq and forkdns daemon for this network.
err = dnsmasq.Kill(n.name, false)
if err != nil {
return err
}
err = n.killForkDNS()
if err != nil {
return err
}
// Configure dnsmasq.
if n.config["bridge.mode"] == "fan" || !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) || !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
// Setup the dnsmasq domain.
dnsDomain := n.config["dns.domain"]
if dnsDomain == "" {
dnsDomain = "lxd"
}
if n.config["dns.mode"] != "none" {
if dnsClustered {
dnsmasqCmd = append(dnsmasqCmd, "-s", dnsDomain)
dnsmasqCmd = append(dnsmasqCmd, "-S", fmt.Sprintf("/%s/%s#1053", dnsDomain, dnsClusteredAddress))
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--rev-server=%s,%s#1053", overlaySubnet, dnsClusteredAddress))
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"-s", dnsDomain, "-S", fmt.Sprintf("/%s/", dnsDomain)}...)
}
}
// Create a config file to contain additional config (and to prevent dnsmasq from reading /etc/dnsmasq.conf)
err = ioutil.WriteFile(shared.VarPath("networks", n.name, "dnsmasq.raw"), []byte(fmt.Sprintf("%s\n", n.config["raw.dnsmasq"])), 0644)
if err != nil {
return err
}
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--conf-file=%s", shared.VarPath("networks", n.name, "dnsmasq.raw")))
// Attempt to drop privileges.
if n.state.OS.UnprivUser != "" {
dnsmasqCmd = append(dnsmasqCmd, []string{"-u", n.state.OS.UnprivUser}...)
}
if n.state.OS.UnprivGroup != "" {
dnsmasqCmd = append(dnsmasqCmd, []string{"-g", n.state.OS.UnprivGroup}...)
}
// Create DHCP hosts directory.
if !shared.PathExists(shared.VarPath("networks", n.name, "dnsmasq.hosts")) {
err = os.MkdirAll(shared.VarPath("networks", n.name, "dnsmasq.hosts"), 0755)
if err != nil {
return err
}
}
// Check for dnsmasq.
_, err := exec.LookPath("dnsmasq")
if err != nil {
return fmt.Errorf("dnsmasq is required for LXD managed bridges")
}
// Update the static leases.
err = UpdateDNSMasqStatic(n.state, n.name)
if err != nil {
return err
}
// Create subprocess object dnsmasq.
p, err := subprocess.NewProcess(command, dnsmasqCmd, "", "")
if err != nil {
return fmt.Errorf("Failed to create subprocess: %s", err)
}
// Apply AppArmor confinement.
if n.config["raw.dnsmasq"] == "" {
p.SetApparmor(apparmor.DnsmasqProfileName(n))
} else {
n.logger.Warn("Skipping AppArmor for dnsmasq due to raw.dnsmasq being set", log.Ctx{"name": n.name})
}
// Start dnsmasq.
err = p.Start()
if err != nil {
return fmt.Errorf("Failed to run: %s %s: %v", command, strings.Join(dnsmasqCmd, " "), err)
}
err = p.Save(shared.VarPath("networks", n.name, "dnsmasq.pid"))
if err != nil {
// Kill Process if started, but could not save the file.
err2 := p.Stop()
if err != nil {
return fmt.Errorf("Could not kill subprocess while handling saving error: %s: %s", err, err2)
}
return fmt.Errorf("Failed to save subprocess details: %s", err)
}
// Spawn DNS forwarder if needed (backgrounded to avoid deadlocks during cluster boot).
if dnsClustered {
// Create forkdns servers directory.
if !shared.PathExists(shared.VarPath("networks", n.name, ForkdnsServersListPath)) {
err = os.MkdirAll(shared.VarPath("networks", n.name, ForkdnsServersListPath), 0755)
if err != nil {
return err
}
}
// Create forkdns servers.conf file if doesn't exist.
f, err := os.OpenFile(shared.VarPath("networks", n.name, ForkdnsServersListPath+"/"+ForkdnsServersListFile), os.O_RDONLY|os.O_CREATE, 0666)
if err != nil {
return err
}
f.Close()
err = n.spawnForkDNS(dnsClusteredAddress)
if err != nil {
return err
}
}
} else {
// Clean up old dnsmasq config if exists and we are not starting dnsmasq.
leasesPath := shared.VarPath("networks", n.name, "dnsmasq.leases")
if shared.PathExists(leasesPath) {
err := os.Remove(leasesPath)
if err != nil {
return errors.Wrapf(err, "Failed to remove old dnsmasq leases file '%s'", leasesPath)
}
}
// And same for our PID file.
pidPath := shared.VarPath("networks", n.name, "dnsmasq.pid")
if shared.PathExists(pidPath) {
err := os.Remove(pidPath)
if err != nil {
return errors.Wrapf(err, "Failed to remove old dnsmasq pid file '%s'", pidPath)
}
}
}
revert.Success()
return nil
}
// Stop stops the network.
func (n *bridge) Stop() error {
n.logger.Debug("Stop")
if !n.isRunning() {
return nil
}
// Destroy the bridge interface
if n.config["bridge.driver"] == "openvswitch" {
ovs := openvswitch.NewOVS()
err := ovs.BridgeDelete(n.name)
if err != nil {
return err
}
} else {
_, err := shared.RunCommand("ip", "link", "del", "dev", n.name)
if err != nil {
return err
}
}
// Cleanup firewall rules.
if usesIPv4Firewall(n.config) {
err := n.state.Firewall.NetworkClear(n.name, 4)
if err != nil {
return err
}
}
if usesIPv6Firewall(n.config) {
err := n.state.Firewall.NetworkClear(n.name, 6)
if err != nil {
return err
}
}
// Kill any existing dnsmasq and forkdns daemon for this network
err := dnsmasq.Kill(n.name, false)
if err != nil {
return err
}
err = n.killForkDNS()
if err != nil {
return err
}
// Get a list of interfaces
ifaces, err := net.Interfaces()
if err != nil {
return err
}
// Cleanup any existing tunnel device
for _, iface := range ifaces {
if strings.HasPrefix(iface.Name, fmt.Sprintf("%s-", n.name)) {
_, err = shared.RunCommand("ip", "link", "del", "dev", iface.Name)
if err != nil {
return err
}
}
}
// Unload apparmor profiles.
err = apparmor.NetworkUnload(n.state, n)
if err != nil {
return err
}
return nil
}
// Update updates the network. Accepts notification boolean indicating if this update request is coming from a
// cluster notification, in which case do not update the database, just apply local changes needed.
func (n *bridge) Update(newNetwork api.NetworkPut, targetNode string, clientType request.ClientType) error {
n.logger.Debug("Update", log.Ctx{"clientType": clientType, "newNetwork": newNetwork})
err := n.populateAutoConfig(newNetwork.Config)
if err != nil {
return errors.Wrapf(err, "Failed generating auto config")
}
dbUpdateNeeeded, changedKeys, oldNetwork, err := n.common.configChanged(newNetwork)
if err != nil {
return err
}
if !dbUpdateNeeeded {
return nil // Nothing changed.
}
// If the network as a whole has not had any previous creation attempts, or the node itself is still
// pending, then don't apply the new settings to the node, just to the database record (ready for the
// actual global create request to be initiated).
if n.Status() == api.NetworkStatusPending || n.LocalStatus() == api.NetworkStatusPending {
return n.common.update(newNetwork, targetNode, clientType)
}
revert := revert.New()
defer revert.Fail()
// Perform any pre-update cleanup needed if local node network was already created.
if len(changedKeys) > 0 {
// Define a function which reverts everything.
revert.Add(func() {
// Reset changes to all nodes and database.
n.common.update(oldNetwork, targetNode, clientType)
// Reset any change that was made to local bridge.
n.setup(newNetwork.Config)
})
// Bring the bridge down entirely if the driver has changed.
if shared.StringInSlice("bridge.driver", changedKeys) && n.isRunning() {
err = n.Stop()
if err != nil {
return err
}
}
// Detach any external interfaces should no longer be attached.
if shared.StringInSlice("bridge.external_interfaces", changedKeys) && n.isRunning() {
devices := []string{}
for _, dev := range strings.Split(newNetwork.Config["bridge.external_interfaces"], ",") {
dev = strings.TrimSpace(dev)
devices = append(devices, dev)
}
for _, dev := range strings.Split(oldNetwork.Config["bridge.external_interfaces"], ",") {
dev = strings.TrimSpace(dev)
if dev == "" {
continue
}
if !shared.StringInSlice(dev, devices) && InterfaceExists(dev) {
err = DetachInterface(n.name, dev)
if err != nil {
return err
}
}
}
}
}
// Apply changes to all nodes and database.
err = n.common.update(newNetwork, targetNode, clientType)
if err != nil {
return err
}
// Restart the network if needed.
if len(changedKeys) > 0 {
err = n.setup(oldNetwork.Config)
if err != nil {
return err
}
}
revert.Success()
return nil
}
func (n *bridge) spawnForkDNS(listenAddress string) error {
// Setup the dnsmasq domain
dnsDomain := n.config["dns.domain"]
if dnsDomain == "" {
dnsDomain = "lxd"
}
// Spawn the daemon using subprocess
command := n.state.OS.ExecPath
forkdnsargs := []string{"forkdns",
fmt.Sprintf("%s:1053", listenAddress),
dnsDomain,
n.name}
logPath := shared.LogPath(fmt.Sprintf("forkdns.%s.log", n.name))
p, err := subprocess.NewProcess(command, forkdnsargs, logPath, logPath)
if err != nil {
return fmt.Errorf("Failed to create subprocess: %s", err)
}
// Drop privileges.
p.SetCreds(n.state.OS.UnprivUID, n.state.OS.UnprivGID)
// Apply AppArmor profile.
p.SetApparmor(apparmor.ForkdnsProfileName(n))
err = p.Start()
if err != nil {
return fmt.Errorf("Failed to run: %s %s: %v", command, strings.Join(forkdnsargs, " "), err)
}
err = p.Save(shared.VarPath("networks", n.name, "forkdns.pid"))
if err != nil {
// Kill Process if started, but could not save the file
err2 := p.Stop()
if err != nil {
return fmt.Errorf("Could not kill subprocess while handling saving error: %s: %s", err, err2)
}
return fmt.Errorf("Failed to save subprocess details: %s", err)
}
return nil
}
// HandleHeartbeat refreshes forkdns servers. Retrieves the IPv4 address of each cluster node (excluding ourselves)
// for this network. It then updates the forkdns server list file if there are changes.
func (n *bridge) HandleHeartbeat(heartbeatData *cluster.APIHeartbeat) error {
addresses := []string{}
localAddress, err := node.HTTPSAddress(n.state.Node)
if err != nil {
return err
}
n.logger.Info("Refreshing forkdns peers")
cert := n.state.Endpoints.NetworkCert()
for _, node := range heartbeatData.Members {
if node.Address == localAddress {
// No need to query ourselves.
continue
}
client, err := cluster.Connect(node.Address, cert, true)
if err != nil {
return err
}
state, err := client.GetNetworkState(n.name)
if err != nil {
return err
}
for _, addr := range state.Addresses {
// Only get IPv4 addresses of nodes on network.
if addr.Family != "inet" || addr.Scope != "global" {
continue
}
addresses = append(addresses, addr.Address)
break
}
}
// Compare current stored list to retrieved list and see if we need to update.
curList, err := ForkdnsServersList(n.name)
if err != nil {
// Only warn here, but continue on to regenerate the servers list from cluster info.
n.logger.Warn("Failed to load existing forkdns server list", log.Ctx{"err": err})
}
// If current list is same as cluster list, nothing to do.
if err == nil && reflect.DeepEqual(curList, addresses) {
return nil
}
err = n.updateForkdnsServersFile(addresses)
if err != nil {
return err
}
n.logger.Info("Updated forkdns server list", log.Ctx{"nodes": addresses})
return nil
}
func (n *bridge) getTunnels() []string {
tunnels := []string{}
for k := range n.config {
if !strings.HasPrefix(k, "tunnel.") {
continue
}
fields := strings.Split(k, ".")
if !shared.StringInSlice(fields[1], tunnels) {
tunnels = append(tunnels, fields[1])
}
}
return tunnels
}
// bootRoutesV4 returns a list of IPv4 boot routes on the network's device.
func (n *bridge) bootRoutesV4() ([]string, error) {
routes := []string{}
cmd := exec.Command("ip", "-4", "route", "show", "dev", n.name, "proto", "boot")
ipOut, err := cmd.StdoutPipe()
if err != nil {
return routes, err
}
cmd.Start()
scanner := bufio.NewScanner(ipOut)
for scanner.Scan() {
route := strings.Replace(scanner.Text(), "linkdown", "", -1)
routes = append(routes, route)
}
cmd.Wait()
return routes, nil
}
// bootRoutesV6 returns a list of IPv6 boot routes on the network's device.
func (n *bridge) bootRoutesV6() ([]string, error) {
routes := []string{}
cmd := exec.Command("ip", "-6", "route", "show", "dev", n.name, "proto", "boot")
ipOut, err := cmd.StdoutPipe()
if err != nil {
return routes, err
}
cmd.Start()
scanner := bufio.NewScanner(ipOut)
for scanner.Scan() {
route := strings.Replace(scanner.Text(), "linkdown", "", -1)
routes = append(routes, route)
}
cmd.Wait()
return routes, nil
}
// applyBootRoutesV4 applies a list of IPv4 boot routes to the network's device.
func (n *bridge) applyBootRoutesV4(routes []string) {
for _, route := range routes {
cmd := []string{"-4", "route", "replace", "dev", n.name, "proto", "boot"}
cmd = append(cmd, strings.Fields(route)...)
_, err := shared.RunCommand("ip", cmd...)
if err != nil {
// If it fails, then we can't stop as the route has already gone, so just log and continue.
n.logger.Error("Failed to restore route", log.Ctx{"err": err})
}
}
}
// applyBootRoutesV6 applies a list of IPv6 boot routes to the network's device.
func (n *bridge) applyBootRoutesV6(routes []string) {
for _, route := range routes {
cmd := []string{"-6", "route", "replace", "dev", n.name, "proto", "boot"}
cmd = append(cmd, strings.Fields(route)...)
_, err := shared.RunCommand("ip", cmd...)
if err != nil {
// If it fails, then we can't stop as the route has already gone, so just log and continue.
n.logger.Error("Failed to restore route", log.Ctx{"err": err})
}
}
}
func (n *bridge) fanAddress(underlay *net.IPNet, overlay *net.IPNet) (string, string, string, error) {
// Sanity checks
underlaySize, _ := underlay.Mask.Size()
if underlaySize != 16 && underlaySize != 24 {
return "", "", "", fmt.Errorf("Only /16 or /24 underlays are supported at this time")
}
overlaySize, _ := overlay.Mask.Size()
if overlaySize != 8 && overlaySize != 16 {
return "", "", "", fmt.Errorf("Only /8 or /16 overlays are supported at this time")
}
if overlaySize+(32-underlaySize)+8 > 32 {
return "", "", "", fmt.Errorf("Underlay or overlay networks too large to accommodate the FAN")
}
// Get the IP
ip, dev, err := n.addressForSubnet(underlay)
if err != nil {
return "", "", "", err
}
ipStr := ip.String()
// Force into IPv4 format
ipBytes := ip.To4()
if ipBytes == nil {
return "", "", "", fmt.Errorf("Invalid IPv4: %s", ip)
}
// Compute the IP
ipBytes[0] = overlay.IP[0]
if overlaySize == 16 {
ipBytes[1] = overlay.IP[1]
ipBytes[2] = ipBytes[3]
} else if underlaySize == 24 {
ipBytes[1] = ipBytes[3]
ipBytes[2] = 0
} else if underlaySize == 16 {
ipBytes[1] = ipBytes[2]
ipBytes[2] = ipBytes[3]
}
ipBytes[3] = 1
return fmt.Sprintf("%s/%d", ipBytes.String(), overlaySize), dev, ipStr, err
}
func (n *bridge) addressForSubnet(subnet *net.IPNet) (net.IP, string, error) {
ifaces, err := net.Interfaces()
if err != nil {
return net.IP{}, "", err
}
for _, iface := range ifaces {
// Skip addresses on lo interface in case VIPs are being used on that interface that are part of
// the underlay subnet as is unlikely to be the actual intended underlay subnet interface.
if iface.Name == "lo" {
continue
}
addrs, err := iface.Addrs()
if err != nil {
continue
}
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if err != nil {
continue
}
if subnet.Contains(ip) {
return ip, iface.Name, nil
}
}
}
return net.IP{}, "", fmt.Errorf("No address found in subnet")
}
func (n *bridge) killForkDNS() error {
// Check if we have a running forkdns at all
pidPath := shared.VarPath("networks", n.name, "forkdns.pid")
// If the pid file doesn't exist, there is no process to kill.
if !shared.PathExists(pidPath) {
return nil
}
p, err := subprocess.ImportProcess(pidPath)
if err != nil {
return fmt.Errorf("Could not read pid file: %s", err)
}
err = p.Stop()
if err != nil && err != subprocess.ErrNotRunning {
return fmt.Errorf("Unable to kill dnsmasq: %s", err)
}
return nil
}
// updateForkdnsServersFile takes a list of node addresses and writes them atomically to
// the forkdns.servers file ready for forkdns to notice and re-apply its config.
func (n *bridge) updateForkdnsServersFile(addresses []string) error {
// We don't want to race with ourselves here
forkdnsServersLock.Lock()
defer forkdnsServersLock.Unlock()
permName := shared.VarPath("networks", n.name, ForkdnsServersListPath+"/"+ForkdnsServersListFile)
tmpName := permName + ".tmp"
// Open tmp file and truncate
tmpFile, err := os.Create(tmpName)
if err != nil {
return err
}
defer tmpFile.Close()
for _, address := range addresses {
_, err := tmpFile.WriteString(address + "\n")
if err != nil {
return err
}
}
tmpFile.Close()
// Atomically rename finished file into permanent location so forkdns can pick it up.
err = os.Rename(tmpName, permName)
if err != nil {
return err
}
return nil
}
// hasIPv4Firewall indicates whether the network has IPv4 firewall enabled.
func (n *bridge) hasIPv4Firewall() bool {
if n.config["ipv4.firewall"] == "" || shared.IsTrue(n.config["ipv4.firewall"]) {
return true
}
return false
}
// hasIPv6Firewall indicates whether the network has IPv6 firewall enabled.
func (n *bridge) hasIPv6Firewall() bool {
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
return true
}
return false
}
// DHCPv4Subnet returns the DHCPv4 subnet (if DHCP is enabled on network).
func (n *bridge) DHCPv4Subnet() *net.IPNet {
// DHCP is disabled on this network (an empty ipv4.dhcp setting indicates enabled by default).
if n.config["ipv4.dhcp"] != "" && !shared.IsTrue(n.config["ipv4.dhcp"]) {
return nil
}
// Fan mode. Extract DHCP subnet from fan bridge address. Only detectable once network has started.
// But if there is no address on the fan bridge then DHCP won't work anyway.
if n.config["bridge.mode"] == "fan" {
iface, err := net.InterfaceByName(n.name)
if err != nil {
return nil
}
addrs, err := iface.Addrs()
if err != nil {
return nil
}
for _, addr := range addrs {
ip, subnet, err := net.ParseCIDR(addr.String())
if err != nil {
continue
}
if ip != nil && err == nil && ip.To4() != nil && ip.IsGlobalUnicast() {
return subnet // Use first IPv4 unicast address on host for DHCP subnet.
}
}
return nil // No addresses found, means DHCP must be disabled.
}
// Non-fan mode. Return configured bridge subnet directly.
_, subnet, err := net.ParseCIDR(n.config["ipv4.address"])
if err != nil {
return nil
}
return subnet
}
// DHCPv6Subnet returns the DHCPv6 subnet (if DHCP or SLAAC is enabled on network).
func (n *bridge) DHCPv6Subnet() *net.IPNet {
// DHCP is disabled on this network (an empty ipv6.dhcp setting indicates enabled by default).
if n.config["ipv6.dhcp"] != "" && !shared.IsTrue(n.config["ipv6.dhcp"]) {
return nil
}
_, subnet, err := net.ParseCIDR(n.config["ipv6.address"])
if err != nil {
return nil
}
return subnet
}
lxd/network/driver/bridge: Ensure that DHCP firewall rules are added in fan mode
Recent change in 8075f1d8235ba49d390e3fc1fcdfbb4800ab6c3a and 12eff7c16266bae721324ec001b288896ea5db59 to allow static NIC IPs with fan mode has broken fan firewall setup to allow DHCP.
Signed-off-by: Thomas Parrott <6b778ce645fb0e3dde76d79eccad490955b1ae74@canonical.com>
package network
import (
"bufio"
"encoding/binary"
"fmt"
"hash/fnv"
"io"
"io/ioutil"
"math/rand"
"net"
"os"
"os/exec"
"reflect"
"strconv"
"strings"
"sync"
"github.com/pkg/errors"
"github.com/lxc/lxd/lxd/apparmor"
"github.com/lxc/lxd/lxd/cluster"
"github.com/lxc/lxd/lxd/cluster/request"
"github.com/lxc/lxd/lxd/daemon"
"github.com/lxc/lxd/lxd/db"
"github.com/lxc/lxd/lxd/dnsmasq"
"github.com/lxc/lxd/lxd/dnsmasq/dhcpalloc"
"github.com/lxc/lxd/lxd/network/openvswitch"
"github.com/lxc/lxd/lxd/node"
"github.com/lxc/lxd/lxd/revert"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/subprocess"
"github.com/lxc/lxd/shared/validate"
"github.com/lxc/lxd/shared/version"
)
// ForkdnsServersListPath defines the path that contains the forkdns server candidate file.
const ForkdnsServersListPath = "forkdns.servers"
// ForkdnsServersListFile file that contains the server candidates list.
const ForkdnsServersListFile = "servers.conf"
var forkdnsServersLock sync.Mutex
// bridge represents a LXD bridge network.
type bridge struct {
common
}
// Type returns the network type.
func (n *bridge) Type() string {
return "bridge"
}
// DBType returns the network type DB ID.
func (n *bridge) DBType() db.NetworkType {
return db.NetworkTypeBridge
}
// checkClusterWideMACSafe returns whether it is safe to use the same MAC address for the bridge interface on all
// cluster nodes. It is not suitable to use a static MAC address when "bridge.external_interfaces" is non-empty and
// the bridge interface has no IPv4 or IPv6 address set. This is because in a clustered environment the same bridge
// config is applied to all nodes, and if the bridge is being used to connect multiple nodes to the same network
// segment it would cause MAC conflicts to use the the same MAC on all nodes. If an IP address is specified then
// connecting multiple nodes to the same network segment would also cause IP conflicts, so if an IP is defined
// then we assume this is not being done. However if IP addresses are explicitly set to "none" and
// "bridge.external_interfaces" is set then it may not be safe to use a the same MAC address on all nodes.
func (n *bridge) checkClusterWideMACSafe(config map[string]string) error {
// Fan mode breaks if using the same MAC address on each node.
if config["bridge.mode"] == "fan" {
return fmt.Errorf(`Cannot use static "bridge.hwaddr" MAC address in fan mode`)
}
// We can't be sure that multiple clustered nodes aren't connected to the same network segment so don't
// use a static MAC address for the bridge interface to avoid introducing a MAC conflict.
if config["bridge.external_interfaces"] != "" && config["ipv4.address"] == "none" && config["ipv6.address"] == "none" {
return fmt.Errorf(`Cannot use static "bridge.hwaddr" MAC address when bridge has no IP addresses and has external interfaces set`)
}
return nil
}
// FillConfig fills requested config with any default values.
func (n *bridge) FillConfig(config map[string]string) error {
// Set some default values where needed.
if config["bridge.mode"] == "fan" {
if config["fan.underlay_subnet"] == "" {
config["fan.underlay_subnet"] = "auto"
}
// We enable NAT by default even if address is manually specified.
if config["ipv4.nat"] == "" {
config["ipv4.nat"] = "true"
}
} else {
if config["ipv4.address"] == "" {
config["ipv4.address"] = "auto"
}
if config["ipv4.address"] == "auto" && config["ipv4.nat"] == "" {
config["ipv4.nat"] = "true"
}
if config["ipv6.address"] == "" {
content, err := ioutil.ReadFile("/proc/sys/net/ipv6/conf/default/disable_ipv6")
if err == nil && string(content) == "0\n" {
config["ipv6.address"] = "auto"
}
}
if config["ipv6.address"] == "auto" && config["ipv6.nat"] == "" {
config["ipv6.nat"] = "true"
}
}
// Now replace any "auto" keys with generated values.
err := n.populateAutoConfig(config)
if err != nil {
return errors.Wrapf(err, "Failed generating auto config")
}
return nil
}
// populateAutoConfig replaces "auto" in config with generated values.
func (n *bridge) populateAutoConfig(config map[string]string) error {
changedConfig := false
// Now populate "auto" values where needed.
if config["ipv4.address"] == "auto" {
subnet, err := randomSubnetV4()
if err != nil {
return err
}
config["ipv4.address"] = subnet
changedConfig = true
}
if config["ipv6.address"] == "auto" {
subnet, err := randomSubnetV6()
if err != nil {
return err
}
config["ipv6.address"] = subnet
changedConfig = true
}
if config["fan.underlay_subnet"] == "auto" {
subnet, _, err := DefaultGatewaySubnetV4()
if err != nil {
return err
}
config["fan.underlay_subnet"] = subnet.String()
changedConfig = true
}
// Re-validate config if changed.
if changedConfig && n.state != nil {
return n.Validate(config)
}
return nil
}
// ValidateName validates network name.
func (n *bridge) ValidateName(name string) error {
err := validInterfaceName(name)
if err != nil {
return err
}
// Apply common name validation that applies to all network types.
return n.common.ValidateName(name)
}
// Validate network config.
func (n *bridge) Validate(config map[string]string) error {
// Build driver specific rules dynamically.
rules := map[string]func(value string) error{
"bridge.driver": func(value string) error {
return validate.IsOneOf(value, []string{"native", "openvswitch"})
},
"bridge.external_interfaces": validate.Optional(func(value string) error {
for _, entry := range strings.Split(value, ",") {
entry = strings.TrimSpace(entry)
if err := validInterfaceName(entry); err != nil {
return errors.Wrapf(err, "Invalid interface name %q", entry)
}
}
return nil
}),
"bridge.hwaddr": validate.Optional(validate.IsNetworkMAC),
"bridge.mtu": validate.Optional(validate.IsNetworkMTU),
"bridge.mode": func(value string) error {
return validate.IsOneOf(value, []string{"standard", "fan"})
},
"fan.overlay_subnet": validate.Optional(validate.IsNetworkV4),
"fan.underlay_subnet": func(value string) error {
if value == "auto" {
return nil
}
return validate.Optional(validate.IsNetworkV4)(value)
},
"fan.type": func(value string) error {
return validate.IsOneOf(value, []string{"vxlan", "ipip"})
},
"ipv4.address": func(value string) error {
if validate.IsOneOf(value, []string{"none", "auto"}) == nil {
return nil
}
return validate.Optional(validate.IsNetworkAddressCIDRV4)(value)
},
"ipv4.firewall": validate.Optional(validate.IsBool),
"ipv4.nat": validate.Optional(validate.IsBool),
"ipv4.nat.order": func(value string) error {
return validate.IsOneOf(value, []string{"before", "after"})
},
"ipv4.nat.address": validate.Optional(validate.IsNetworkAddressV4),
"ipv4.dhcp": validate.Optional(validate.IsBool),
"ipv4.dhcp.gateway": validate.Optional(validate.IsNetworkAddressV4),
"ipv4.dhcp.expiry": validate.IsAny,
"ipv4.dhcp.ranges": validate.Optional(validate.IsNetworkRangeV4List),
"ipv4.routes": validate.Optional(validate.IsNetworkV4List),
"ipv4.routing": validate.Optional(validate.IsBool),
"ipv4.ovn.ranges": validate.Optional(validate.IsNetworkRangeV4List),
"ipv6.address": func(value string) error {
if validate.IsOneOf(value, []string{"none", "auto"}) == nil {
return nil
}
return validate.Optional(validate.IsNetworkAddressCIDRV6)(value)
},
"ipv6.firewall": validate.Optional(validate.IsBool),
"ipv6.nat": validate.Optional(validate.IsBool),
"ipv6.nat.order": func(value string) error {
return validate.IsOneOf(value, []string{"before", "after"})
},
"ipv6.nat.address": validate.Optional(validate.IsNetworkAddressV6),
"ipv6.dhcp": validate.Optional(validate.IsBool),
"ipv6.dhcp.expiry": validate.IsAny,
"ipv6.dhcp.stateful": validate.Optional(validate.IsBool),
"ipv6.dhcp.ranges": validate.Optional(validate.IsNetworkRangeV6List),
"ipv6.routes": validate.Optional(validate.IsNetworkV6List),
"ipv6.routing": validate.Optional(validate.IsBool),
"ipv6.ovn.ranges": validate.Optional(validate.IsNetworkRangeV6List),
"dns.domain": validate.IsAny,
"dns.search": validate.IsAny,
"dns.mode": func(value string) error {
return validate.IsOneOf(value, []string{"dynamic", "managed", "none"})
},
"raw.dnsmasq": validate.IsAny,
"maas.subnet.ipv4": validate.IsAny,
"maas.subnet.ipv6": validate.IsAny,
}
// Add dynamic validation rules.
for k := range config {
// Tunnel keys have the remote name in their name, so extract the real key
if strings.HasPrefix(k, "tunnel.") {
// Validate remote name in key.
fields := strings.Split(k, ".")
if len(fields) != 3 {
return fmt.Errorf("Invalid network configuration key: %s", k)
}
if len(n.name)+len(fields[1]) > 14 {
return fmt.Errorf("Network name too long for tunnel interface: %s-%s", n.name, fields[1])
}
tunnelKey := fields[2]
// Add the correct validation rule for the dynamic field based on last part of key.
switch tunnelKey {
case "protocol":
rules[k] = func(value string) error {
return validate.IsOneOf(value, []string{"gre", "vxlan"})
}
case "local":
rules[k] = validate.Optional(validate.IsNetworkAddress)
case "remote":
rules[k] = validate.Optional(validate.IsNetworkAddress)
case "port":
rules[k] = networkValidPort
case "group":
rules[k] = validate.Optional(validate.IsNetworkAddress)
case "id":
rules[k] = validate.Optional(validate.IsInt64)
case "inteface":
rules[k] = validInterfaceName
case "ttl":
rules[k] = validate.Optional(validate.IsUint8)
}
}
}
err := n.validate(config, rules)
if err != nil {
return err
}
// Peform composite key checks after per-key validation.
// Validate network name when used in fan mode.
bridgeMode := config["bridge.mode"]
if bridgeMode == "fan" && len(n.name) > 11 {
return fmt.Errorf("Network name too long to use with the FAN (must be 11 characters or less)")
}
for k, v := range config {
key := k
// Bridge mode checks
if bridgeMode == "fan" && strings.HasPrefix(key, "ipv4.") && !shared.StringInSlice(key, []string{"ipv4.dhcp.expiry", "ipv4.firewall", "ipv4.nat", "ipv4.nat.order"}) && v != "" {
return fmt.Errorf("IPv4 configuration may not be set when in 'fan' mode")
}
if bridgeMode == "fan" && strings.HasPrefix(key, "ipv6.") && v != "" {
return fmt.Errorf("IPv6 configuration may not be set when in 'fan' mode")
}
if bridgeMode != "fan" && strings.HasPrefix(key, "fan.") && v != "" {
return fmt.Errorf("FAN configuration may only be set when in 'fan' mode")
}
// MTU checks
if key == "bridge.mtu" && v != "" {
mtu, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return fmt.Errorf("Invalid value for an integer: %s", v)
}
ipv6 := config["ipv6.address"]
if ipv6 != "" && ipv6 != "none" && mtu < 1280 {
return fmt.Errorf("The minimum MTU for an IPv6 network is 1280")
}
ipv4 := config["ipv4.address"]
if ipv4 != "" && ipv4 != "none" && mtu < 68 {
return fmt.Errorf("The minimum MTU for an IPv4 network is 68")
}
if config["bridge.mode"] == "fan" {
if config["fan.type"] == "ipip" {
if mtu > 1480 {
return fmt.Errorf("Maximum MTU for an IPIP FAN bridge is 1480")
}
} else {
if mtu > 1450 {
return fmt.Errorf("Maximum MTU for a VXLAN FAN bridge is 1450")
}
}
}
}
}
// Check using same MAC address on every cluster node is safe.
if config["bridge.hwaddr"] != "" {
err = n.checkClusterWideMACSafe(config)
if err != nil {
return err
}
}
// Check IPv4 OVN ranges.
if config["ipv4.ovn.ranges"] != "" {
dhcpSubnet := n.DHCPv4Subnet()
allowedNets := []*net.IPNet{}
if dhcpSubnet != nil {
if config["ipv4.dhcp.ranges"] == "" {
return fmt.Errorf(`"ipv4.ovn.ranges" must be used in conjunction with non-overlapping "ipv4.dhcp.ranges" when DHCPv4 is enabled`)
}
allowedNets = append(allowedNets, dhcpSubnet)
}
ovnRanges, err := parseIPRanges(config["ipv4.ovn.ranges"], allowedNets...)
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv4.ovn.ranges")
}
dhcpRanges, err := parseIPRanges(config["ipv4.dhcp.ranges"], allowedNets...)
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv4.dhcp.ranges")
}
for _, ovnRange := range ovnRanges {
for _, dhcpRange := range dhcpRanges {
if IPRangesOverlap(ovnRange, dhcpRange) {
return fmt.Errorf(`The range specified in "ipv4.ovn.ranges" (%q) cannot overlap with "ipv4.dhcp.ranges"`, ovnRange)
}
}
}
}
// Check IPv6 OVN ranges.
if config["ipv6.ovn.ranges"] != "" {
dhcpSubnet := n.DHCPv6Subnet()
allowedNets := []*net.IPNet{}
if dhcpSubnet != nil {
if config["ipv6.dhcp.ranges"] == "" && shared.IsTrue(config["ipv6.dhcp.stateful"]) {
return fmt.Errorf(`"ipv6.ovn.ranges" must be used in conjunction with non-overlapping "ipv6.dhcp.ranges" when stateful DHCPv6 is enabled`)
}
allowedNets = append(allowedNets, dhcpSubnet)
}
ovnRanges, err := parseIPRanges(config["ipv6.ovn.ranges"], allowedNets...)
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv6.ovn.ranges")
}
// If stateful DHCPv6 is enabled, check OVN ranges don't overlap with DHCPv6 stateful ranges.
// Otherwise SLAAC will be being used to generate client IPs and predefined ranges aren't used.
if dhcpSubnet != nil && shared.IsTrue(config["ipv6.dhcp.stateful"]) {
dhcpRanges, err := parseIPRanges(config["ipv6.dhcp.ranges"], allowedNets...)
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv6.dhcp.ranges")
}
for _, ovnRange := range ovnRanges {
for _, dhcpRange := range dhcpRanges {
if IPRangesOverlap(ovnRange, dhcpRange) {
return fmt.Errorf(`The range specified in "ipv6.ovn.ranges" (%q) cannot overlap with "ipv6.dhcp.ranges"`, ovnRange)
}
}
}
}
}
return nil
}
// Create checks whether the bridge interface name is used already.
func (n *bridge) Create(clientType request.ClientType) error {
n.logger.Debug("Create", log.Ctx{"clientType": clientType, "config": n.config})
if InterfaceExists(n.name) {
return fmt.Errorf("Network interface %q already exists", n.name)
}
return n.common.create(clientType)
}
// isRunning returns whether the network is up.
func (n *bridge) isRunning() bool {
return InterfaceExists(n.name)
}
// Delete deletes a network.
func (n *bridge) Delete(clientType request.ClientType) error {
n.logger.Debug("Delete", log.Ctx{"clientType": clientType})
if n.isRunning() {
err := n.Stop()
if err != nil {
return err
}
}
// Delete apparmor profiles.
err := apparmor.NetworkDelete(n.state, n)
if err != nil {
return err
}
return n.common.delete(clientType)
}
// Rename renames a network.
func (n *bridge) Rename(newName string) error {
n.logger.Debug("Rename", log.Ctx{"newName": newName})
if InterfaceExists(newName) {
return fmt.Errorf("Network interface %q already exists", newName)
}
// Bring the network down.
if n.isRunning() {
err := n.Stop()
if err != nil {
return err
}
}
// Rename forkdns log file.
forkDNSLogPath := fmt.Sprintf("forkdns.%s.log", n.name)
if shared.PathExists(shared.LogPath(forkDNSLogPath)) {
err := os.Rename(forkDNSLogPath, shared.LogPath(fmt.Sprintf("forkdns.%s.log", newName)))
if err != nil {
return err
}
}
// Rename common steps.
err := n.common.rename(newName)
if err != nil {
return err
}
// Bring the network up.
err = n.Start()
if err != nil {
return err
}
return nil
}
// Start starts the network.
func (n *bridge) Start() error {
n.logger.Debug("Start")
return n.setup(nil)
}
// setup restarts the network.
func (n *bridge) setup(oldConfig map[string]string) error {
// If we are in mock mode, just no-op.
if n.state.OS.MockMode {
return nil
}
n.logger.Debug("Setting up network")
revert := revert.New()
defer revert.Fail()
// Create directory.
if !shared.PathExists(shared.VarPath("networks", n.name)) {
err := os.MkdirAll(shared.VarPath("networks", n.name), 0711)
if err != nil {
return err
}
}
// Create the bridge interface if doesn't exist.
if !n.isRunning() {
if n.config["bridge.driver"] == "openvswitch" {
ovs := openvswitch.NewOVS()
if !ovs.Installed() {
return fmt.Errorf("Open vSwitch isn't installed on this system")
}
err := ovs.BridgeAdd(n.name, false)
if err != nil {
return err
}
revert.Add(func() { ovs.BridgeDelete(n.name) })
} else {
_, err := shared.RunCommand("ip", "link", "add", "dev", n.name, "type", "bridge")
if err != nil {
return err
}
revert.Add(func() { shared.RunCommand("ip", "link", "delete", "dev", n.name) })
}
}
// Get a list of tunnels.
tunnels := n.getTunnels()
// IPv6 bridge configuration.
if !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
if !shared.PathExists("/proc/sys/net/ipv6") {
return fmt.Errorf("Network has ipv6.address but kernel IPv6 support is missing")
}
err := util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/autoconf", n.name), "0")
if err != nil {
return err
}
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/accept_dad", n.name), "0")
if err != nil {
return err
}
}
// Get a list of interfaces.
ifaces, err := net.Interfaces()
if err != nil {
return err
}
// Cleanup any existing tunnel device.
for _, iface := range ifaces {
if strings.HasPrefix(iface.Name, fmt.Sprintf("%s-", n.name)) {
_, err = shared.RunCommand("ip", "link", "del", "dev", iface.Name)
if err != nil {
return err
}
}
}
// Set the MTU.
mtu := ""
if n.config["bridge.mtu"] != "" {
mtu = n.config["bridge.mtu"]
} else if len(tunnels) > 0 {
mtu = "1400"
} else if n.config["bridge.mode"] == "fan" {
if n.config["fan.type"] == "ipip" {
mtu = "1480"
} else {
mtu = "1450"
}
}
// Attempt to add a dummy device to the bridge to force the MTU.
if mtu != "" && n.config["bridge.driver"] != "openvswitch" {
_, err = shared.RunCommand("ip", "link", "add", "dev", fmt.Sprintf("%s-mtu", n.name), "mtu", mtu, "type", "dummy")
if err == nil {
revert.Add(func() { shared.RunCommand("ip", "link", "delete", "dev", fmt.Sprintf("%s-mtu", n.name)) })
_, err = shared.RunCommand("ip", "link", "set", "dev", fmt.Sprintf("%s-mtu", n.name), "up")
if err == nil {
AttachInterface(n.name, fmt.Sprintf("%s-mtu", n.name))
}
}
}
// Now, set a default MTU.
if mtu == "" {
mtu = "1500"
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "mtu", mtu)
if err != nil {
return err
}
// Always prefer static MAC address if set.
hwAddr := n.config["bridge.hwaddr"]
// If no cluster wide static MAC address set, then generate one.
if hwAddr == "" {
var seedNodeID int64
if n.checkClusterWideMACSafe(n.config) != nil {
// If not safe to use a cluster wide MAC or in in fan mode, then use cluster node's ID to
// generate a stable per-node & network derived random MAC.
seedNodeID = n.state.Cluster.GetNodeID()
} else {
// If safe to use a cluster wide MAC, then use a static cluster node of 0 to generate a
// stable per-network derived random MAC.
seedNodeID = 0
}
// Load server certificate. This is needs to be the same certificate for all nodes in a cluster.
cert, err := util.LoadCert(n.state.OS.VarDir)
if err != nil {
return err
}
// Generate the random seed, this uses the server certificate fingerprint (to ensure that multiple
// standalone nodes with the same network ID connected to the same external network don't generate
// the same MAC for their networks). It relies on the certificate being the same for all nodes in a
// cluster to allow the same MAC to be generated on each bridge interface in the network when
// seedNodeID is 0 (when safe to do so).
seed := fmt.Sprintf("%s.%d.%d", cert.Fingerprint(), seedNodeID, n.ID())
// Generate a hash from the randSourceNodeID and network ID to use as seed for random MAC.
// Use the FNV-1a hash algorithm to convert our seed string into an int64 for use as seed.
hash := fnv.New64a()
_, err = io.WriteString(hash, seed)
if err != nil {
return err
}
// Initialise a non-cryptographic random number generator using the stable seed.
r := rand.New(rand.NewSource(int64(hash.Sum64())))
hwAddr = randomHwaddr(r)
n.logger.Debug("Stable MAC generated", log.Ctx{"seed": seed, "hwAddr": hwAddr})
}
// Set the MAC address on the bridge interface if specified.
if hwAddr != "" {
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "address", hwAddr)
if err != nil {
return err
}
}
// Enable VLAN filtering for Linux bridges.
if n.config["bridge.driver"] != "openvswitch" {
err = BridgeVLANFilterSetStatus(n.name, "1")
if err != nil {
n.logger.Warn(fmt.Sprintf("%v", err))
}
// Set the default PVID for new ports to 1.
err = BridgeVLANSetDefaultPVID(n.name, "1")
if err != nil {
n.logger.Warn(fmt.Sprintf("%v", err))
}
}
// Bring it up.
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "up")
if err != nil {
return err
}
// Add any listed existing external interface.
if n.config["bridge.external_interfaces"] != "" {
for _, entry := range strings.Split(n.config["bridge.external_interfaces"], ",") {
entry = strings.TrimSpace(entry)
iface, err := net.InterfaceByName(entry)
if err != nil {
n.logger.Warn("Skipping attaching missing external interface", log.Ctx{"interface": entry})
continue
}
unused := true
addrs, err := iface.Addrs()
if err == nil {
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if ip != nil && err == nil && ip.IsGlobalUnicast() {
unused = false
break
}
}
}
if !unused {
return fmt.Errorf("Only unconfigured network interfaces can be bridged")
}
err = AttachInterface(n.name, entry)
if err != nil {
return err
}
}
}
// Remove any existing IPv4 firewall rules.
if usesIPv4Firewall(n.config) || usesIPv4Firewall(oldConfig) {
err = n.state.Firewall.NetworkClear(n.name, 4)
if err != nil {
return err
}
}
// Snapshot container specific IPv4 routes (added with boot proto) before removing IPv4 addresses.
// This is because the kernel removes any static routes on an interface when all addresses removed.
ctRoutes, err := n.bootRoutesV4()
if err != nil {
return err
}
// Flush all IPv4 addresses and routes.
_, err = shared.RunCommand("ip", "-4", "addr", "flush", "dev", n.name, "scope", "global")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "-4", "route", "flush", "dev", n.name, "proto", "static")
if err != nil {
return err
}
// Configure IPv4 firewall (includes fan).
if n.config["bridge.mode"] == "fan" || !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) {
if n.hasDHCPv4() && n.hasIPv4Firewall() {
// Setup basic iptables overrides for DHCP/DNS.
err = n.state.Firewall.NetworkSetupDHCPDNSAccess(n.name, 4)
if err != nil {
return err
}
// Attempt a workaround for broken DHCP clients.
err = n.state.Firewall.NetworkSetupDHCPv4Checksum(n.name)
if err != nil {
return err
}
}
// Allow forwarding.
if n.config["bridge.mode"] == "fan" || n.config["ipv4.routing"] == "" || shared.IsTrue(n.config["ipv4.routing"]) {
err = util.SysctlSet("net/ipv4/ip_forward", "1")
if err != nil {
return err
}
if n.hasIPv4Firewall() {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 4, true)
if err != nil {
return err
}
}
} else {
if n.hasIPv4Firewall() {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 4, false)
if err != nil {
return err
}
}
}
}
// Start building process using subprocess package.
command := "dnsmasq"
dnsmasqCmd := []string{"--keep-in-foreground", "--strict-order", "--bind-interfaces",
"--except-interface=lo",
"--pid-file=", // Disable attempt at writing a PID file.
"--no-ping", // --no-ping is very important to prevent delays to lease file updates.
fmt.Sprintf("--interface=%s", n.name)}
dnsmasqVersion, err := dnsmasq.GetVersion()
if err != nil {
return err
}
// --dhcp-rapid-commit option is only supported on >2.79.
minVer, _ := version.NewDottedVersion("2.79")
if dnsmasqVersion.Compare(minVer) > 0 {
dnsmasqCmd = append(dnsmasqCmd, "--dhcp-rapid-commit")
}
if !daemon.Debug {
// --quiet options are only supported on >2.67.
minVer, _ := version.NewDottedVersion("2.67")
if err == nil && dnsmasqVersion.Compare(minVer) > 0 {
dnsmasqCmd = append(dnsmasqCmd, []string{"--quiet-dhcp", "--quiet-dhcp6", "--quiet-ra"}...)
}
}
// Configure IPv4.
if !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) {
// Parse the subnet.
ip, subnet, err := net.ParseCIDR(n.config["ipv4.address"])
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv4.address")
}
// Update the dnsmasq config.
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--listen-address=%s", ip.String()))
if n.DHCPv4Subnet() != nil {
if !shared.StringInSlice("--dhcp-no-override", dnsmasqCmd) {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-no-override", "--dhcp-authoritative", fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")), fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts"))}...)
}
if n.config["ipv4.dhcp.gateway"] != "" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=3,%s", n.config["ipv4.dhcp.gateway"]))
}
if mtu != "1500" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=26,%s", mtu))
}
dnsSearch := n.config["dns.search"]
if dnsSearch != "" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=119,%s", strings.Trim(dnsSearch, " ")))
}
expiry := "1h"
if n.config["ipv4.dhcp.expiry"] != "" {
expiry = n.config["ipv4.dhcp.expiry"]
}
if n.config["ipv4.dhcp.ranges"] != "" {
for _, dhcpRange := range strings.Split(n.config["ipv4.dhcp.ranges"], ",") {
dhcpRange = strings.TrimSpace(dhcpRange)
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s", strings.Replace(dhcpRange, "-", ",", -1), expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s,%s", dhcpalloc.GetIP(subnet, 2).String(), dhcpalloc.GetIP(subnet, -2).String(), expiry)}...)
}
}
// Add the address.
_, err = shared.RunCommand("ip", "-4", "addr", "add", "dev", n.name, n.config["ipv4.address"])
if err != nil {
return err
}
// Configure NAT
if shared.IsTrue(n.config["ipv4.nat"]) {
//If a SNAT source address is specified, use that, otherwise default to MASQUERADE mode.
var srcIP net.IP
if n.config["ipv4.nat.address"] != "" {
srcIP = net.ParseIP(n.config["ipv4.nat.address"])
}
if n.config["ipv4.nat.order"] == "after" {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, true)
if err != nil {
return err
}
} else {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, false)
if err != nil {
return err
}
}
}
// Add additional routes.
if n.config["ipv4.routes"] != "" {
for _, route := range strings.Split(n.config["ipv4.routes"], ",") {
route = strings.TrimSpace(route)
_, err = shared.RunCommand("ip", "-4", "route", "add", "dev", n.name, route, "proto", "static")
if err != nil {
return err
}
}
}
// Restore container specific IPv4 routes to interface.
n.applyBootRoutesV4(ctRoutes)
}
// Remove any existing IPv6 firewall rules.
if usesIPv6Firewall(n.config) || usesIPv6Firewall(oldConfig) {
err = n.state.Firewall.NetworkClear(n.name, 6)
if err != nil {
return err
}
}
// Snapshot container specific IPv6 routes (added with boot proto) before removing IPv6 addresses.
// This is because the kernel removes any static routes on an interface when all addresses removed.
ctRoutes, err = n.bootRoutesV6()
if err != nil {
return err
}
// Flush all IPv6 addresses and routes.
_, err = shared.RunCommand("ip", "-6", "addr", "flush", "dev", n.name, "scope", "global")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "-6", "route", "flush", "dev", n.name, "proto", "static")
if err != nil {
return err
}
// Configure IPv6.
if !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
// Enable IPv6 for the subnet.
err := util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/disable_ipv6", n.name), "0")
if err != nil {
return err
}
// Parse the subnet.
ip, subnet, err := net.ParseCIDR(n.config["ipv6.address"])
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv6.address")
}
subnetSize, _ := subnet.Mask.Size()
if subnetSize > 64 {
n.logger.Warn("IPv6 networks with a prefix larger than 64 aren't properly supported by dnsmasq")
}
// Update the dnsmasq config.
dnsmasqCmd = append(dnsmasqCmd, []string{fmt.Sprintf("--listen-address=%s", ip.String()), "--enable-ra"}...)
if n.DHCPv6Subnet() != nil {
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
// Setup basic iptables overrides for DHCP/DNS.
err = n.state.Firewall.NetworkSetupDHCPDNSAccess(n.name, 6)
if err != nil {
return err
}
}
// Build DHCP configuration.
if !shared.StringInSlice("--dhcp-no-override", dnsmasqCmd) {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-no-override", "--dhcp-authoritative", fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")), fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts"))}...)
}
expiry := "1h"
if n.config["ipv6.dhcp.expiry"] != "" {
expiry = n.config["ipv6.dhcp.expiry"]
}
if shared.IsTrue(n.config["ipv6.dhcp.stateful"]) {
if n.config["ipv6.dhcp.ranges"] != "" {
for _, dhcpRange := range strings.Split(n.config["ipv6.dhcp.ranges"], ",") {
dhcpRange = strings.TrimSpace(dhcpRange)
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%d,%s", strings.Replace(dhcpRange, "-", ",", -1), subnetSize, expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s,%d,%s", dhcpalloc.GetIP(subnet, 2), dhcpalloc.GetIP(subnet, -1), subnetSize, expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("::,constructor:%s,ra-stateless,ra-names", n.name)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("::,constructor:%s,ra-only", n.name)}...)
}
// Allow forwarding.
if n.config["ipv6.routing"] == "" || shared.IsTrue(n.config["ipv6.routing"]) {
// Get a list of proc entries.
entries, err := ioutil.ReadDir("/proc/sys/net/ipv6/conf/")
if err != nil {
return err
}
// First set accept_ra to 2 for everything.
for _, entry := range entries {
content, err := ioutil.ReadFile(fmt.Sprintf("/proc/sys/net/ipv6/conf/%s/accept_ra", entry.Name()))
if err == nil && string(content) != "1\n" {
continue
}
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/accept_ra", entry.Name()), "2")
if err != nil && !os.IsNotExist(err) {
return err
}
}
// Then set forwarding for all of them.
for _, entry := range entries {
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/forwarding", entry.Name()), "1")
if err != nil && !os.IsNotExist(err) {
return err
}
}
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 6, true)
if err != nil {
return err
}
}
} else {
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 6, false)
if err != nil {
return err
}
}
}
// Add the address.
_, err = shared.RunCommand("ip", "-6", "addr", "add", "dev", n.name, n.config["ipv6.address"])
if err != nil {
return err
}
// Configure NAT.
if shared.IsTrue(n.config["ipv6.nat"]) {
var srcIP net.IP
if n.config["ipv6.nat.address"] != "" {
srcIP = net.ParseIP(n.config["ipv6.nat.address"])
}
if n.config["ipv6.nat.order"] == "after" {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, true)
if err != nil {
return err
}
} else {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, false)
if err != nil {
return err
}
}
}
// Add additional routes.
if n.config["ipv6.routes"] != "" {
for _, route := range strings.Split(n.config["ipv6.routes"], ",") {
route = strings.TrimSpace(route)
_, err = shared.RunCommand("ip", "-6", "route", "add", "dev", n.name, route, "proto", "static")
if err != nil {
return err
}
}
}
// Restore container specific IPv6 routes to interface.
n.applyBootRoutesV6(ctRoutes)
}
// Configure the fan.
dnsClustered := false
dnsClusteredAddress := ""
var overlaySubnet *net.IPNet
if n.config["bridge.mode"] == "fan" {
tunName := fmt.Sprintf("%s-fan", n.name)
// Parse the underlay.
underlay := n.config["fan.underlay_subnet"]
_, underlaySubnet, err := net.ParseCIDR(underlay)
if err != nil {
return errors.Wrapf(err, "Failed parsing fan.underlay_subnet")
}
// Parse the overlay.
overlay := n.config["fan.overlay_subnet"]
if overlay == "" {
overlay = "240.0.0.0/8"
}
_, overlaySubnet, err = net.ParseCIDR(overlay)
if err != nil {
return errors.Wrapf(err, "Failed parsing fan.overlay_subnet")
}
// Get the address.
fanAddress, devName, devAddr, err := n.fanAddress(underlaySubnet, overlaySubnet)
if err != nil {
return err
}
addr := strings.Split(fanAddress, "/")
if n.config["fan.type"] == "ipip" {
fanAddress = fmt.Sprintf("%s/24", addr[0])
}
// Update the MTU based on overlay device (if available).
fanMtuInt, err := GetDevMTU(devName)
if err == nil {
// Apply overhead.
if n.config["fan.type"] == "ipip" {
fanMtuInt = fanMtuInt - 20
} else {
fanMtuInt = fanMtuInt - 50
}
// Apply changes.
fanMtu := fmt.Sprintf("%d", fanMtuInt)
if fanMtu != mtu {
mtu = fanMtu
if n.config["bridge.driver"] != "openvswitch" {
_, err = shared.RunCommand("ip", "link", "set", "dev", fmt.Sprintf("%s-mtu", n.name), "mtu", mtu)
if err != nil {
return err
}
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "mtu", mtu)
if err != nil {
return err
}
}
}
// Parse the host subnet.
_, hostSubnet, err := net.ParseCIDR(fmt.Sprintf("%s/24", addr[0]))
if err != nil {
return errors.Wrapf(err, "Failed parsing fan address")
}
// Add the address.
_, err = shared.RunCommand("ip", "-4", "addr", "add", "dev", n.name, fanAddress)
if err != nil {
return err
}
// Update the dnsmasq config.
expiry := "1h"
if n.config["ipv4.dhcp.expiry"] != "" {
expiry = n.config["ipv4.dhcp.expiry"]
}
dnsmasqCmd = append(dnsmasqCmd, []string{
fmt.Sprintf("--listen-address=%s", addr[0]),
"--dhcp-no-override", "--dhcp-authoritative",
fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")),
fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts")),
"--dhcp-range", fmt.Sprintf("%s,%s,%s", dhcpalloc.GetIP(hostSubnet, 2).String(), dhcpalloc.GetIP(hostSubnet, -2).String(), expiry)}...)
// Setup the tunnel.
if n.config["fan.type"] == "ipip" {
_, err = shared.RunCommand("ip", "-4", "route", "flush", "dev", "tunl0")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", "tunl0", "up")
if err != nil {
return err
}
// Fails if the map is already set.
shared.RunCommand("ip", "link", "change", "dev", "tunl0", "type", "ipip", "fan-map", fmt.Sprintf("%s:%s", overlay, underlay))
_, err = shared.RunCommand("ip", "route", "add", overlay, "dev", "tunl0", "src", addr[0])
if err != nil {
return err
}
} else {
vxlanID := fmt.Sprintf("%d", binary.BigEndian.Uint32(overlaySubnet.IP.To4())>>8)
_, err = shared.RunCommand("ip", "link", "add", tunName, "type", "vxlan", "id", vxlanID, "dev", devName, "dstport", "0", "local", devAddr, "fan-map", fmt.Sprintf("%s:%s", overlay, underlay))
if err != nil {
return err
}
err = AttachInterface(n.name, tunName)
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", tunName, "mtu", mtu, "up")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "up")
if err != nil {
return err
}
}
// Configure NAT.
if shared.IsTrue(n.config["ipv4.nat"]) {
if n.config["ipv4.nat.order"] == "after" {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, overlaySubnet, nil, true)
if err != nil {
return err
}
} else {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, overlaySubnet, nil, false)
if err != nil {
return err
}
}
}
// Setup clustered DNS.
clusterAddress, err := node.ClusterAddress(n.state.Node)
if err != nil {
return err
}
// If clusterAddress is non-empty, this indicates the intention for this node to be
// part of a cluster and so we should ensure that dnsmasq and forkdns are started
// in cluster mode. Note: During LXD initialisation the cluster may not actually be
// setup yet, but we want the DNS processes to be ready for when it is.
if clusterAddress != "" {
dnsClustered = true
}
dnsClusteredAddress = strings.Split(fanAddress, "/")[0]
}
// Configure tunnels.
for _, tunnel := range tunnels {
getConfig := func(key string) string {
return n.config[fmt.Sprintf("tunnel.%s.%s", tunnel, key)]
}
tunProtocol := getConfig("protocol")
tunLocal := getConfig("local")
tunRemote := getConfig("remote")
tunName := fmt.Sprintf("%s-%s", n.name, tunnel)
// Configure the tunnel.
cmd := []string{"ip", "link", "add", "dev", tunName}
if tunProtocol == "gre" {
// Skip partial configs.
if tunProtocol == "" || tunLocal == "" || tunRemote == "" {
continue
}
cmd = append(cmd, []string{"type", "gretap", "local", tunLocal, "remote", tunRemote}...)
} else if tunProtocol == "vxlan" {
tunGroup := getConfig("group")
tunInterface := getConfig("interface")
// Skip partial configs.
if tunProtocol == "" {
continue
}
cmd = append(cmd, []string{"type", "vxlan"}...)
if tunLocal != "" && tunRemote != "" {
cmd = append(cmd, []string{"local", tunLocal, "remote", tunRemote}...)
} else {
if tunGroup == "" {
tunGroup = "239.0.0.1"
}
devName := tunInterface
if devName == "" {
_, devName, err = DefaultGatewaySubnetV4()
if err != nil {
return err
}
}
cmd = append(cmd, []string{"group", tunGroup, "dev", devName}...)
}
tunPort := getConfig("port")
if tunPort == "" {
tunPort = "0"
}
cmd = append(cmd, []string{"dstport", tunPort}...)
tunID := getConfig("id")
if tunID == "" {
tunID = "1"
}
cmd = append(cmd, []string{"id", tunID}...)
tunTTL := getConfig("ttl")
if tunTTL == "" {
tunTTL = "1"
}
cmd = append(cmd, []string{"ttl", tunTTL}...)
}
// Create the interface.
_, err = shared.RunCommand(cmd[0], cmd[1:]...)
if err != nil {
return err
}
// Bridge it and bring up.
err = AttachInterface(n.name, tunName)
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", tunName, "mtu", mtu, "up")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "up")
if err != nil {
return err
}
}
// Generate and load apparmor profiles.
err = apparmor.NetworkLoad(n.state, n)
if err != nil {
return err
}
// Kill any existing dnsmasq and forkdns daemon for this network.
err = dnsmasq.Kill(n.name, false)
if err != nil {
return err
}
err = n.killForkDNS()
if err != nil {
return err
}
// Configure dnsmasq.
if n.config["bridge.mode"] == "fan" || !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) || !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
// Setup the dnsmasq domain.
dnsDomain := n.config["dns.domain"]
if dnsDomain == "" {
dnsDomain = "lxd"
}
if n.config["dns.mode"] != "none" {
if dnsClustered {
dnsmasqCmd = append(dnsmasqCmd, "-s", dnsDomain)
dnsmasqCmd = append(dnsmasqCmd, "-S", fmt.Sprintf("/%s/%s#1053", dnsDomain, dnsClusteredAddress))
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--rev-server=%s,%s#1053", overlaySubnet, dnsClusteredAddress))
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"-s", dnsDomain, "-S", fmt.Sprintf("/%s/", dnsDomain)}...)
}
}
// Create a config file to contain additional config (and to prevent dnsmasq from reading /etc/dnsmasq.conf)
err = ioutil.WriteFile(shared.VarPath("networks", n.name, "dnsmasq.raw"), []byte(fmt.Sprintf("%s\n", n.config["raw.dnsmasq"])), 0644)
if err != nil {
return err
}
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--conf-file=%s", shared.VarPath("networks", n.name, "dnsmasq.raw")))
// Attempt to drop privileges.
if n.state.OS.UnprivUser != "" {
dnsmasqCmd = append(dnsmasqCmd, []string{"-u", n.state.OS.UnprivUser}...)
}
if n.state.OS.UnprivGroup != "" {
dnsmasqCmd = append(dnsmasqCmd, []string{"-g", n.state.OS.UnprivGroup}...)
}
// Create DHCP hosts directory.
if !shared.PathExists(shared.VarPath("networks", n.name, "dnsmasq.hosts")) {
err = os.MkdirAll(shared.VarPath("networks", n.name, "dnsmasq.hosts"), 0755)
if err != nil {
return err
}
}
// Check for dnsmasq.
_, err := exec.LookPath("dnsmasq")
if err != nil {
return fmt.Errorf("dnsmasq is required for LXD managed bridges")
}
// Update the static leases.
err = UpdateDNSMasqStatic(n.state, n.name)
if err != nil {
return err
}
// Create subprocess object dnsmasq.
p, err := subprocess.NewProcess(command, dnsmasqCmd, "", "")
if err != nil {
return fmt.Errorf("Failed to create subprocess: %s", err)
}
// Apply AppArmor confinement.
if n.config["raw.dnsmasq"] == "" {
p.SetApparmor(apparmor.DnsmasqProfileName(n))
} else {
n.logger.Warn("Skipping AppArmor for dnsmasq due to raw.dnsmasq being set", log.Ctx{"name": n.name})
}
// Start dnsmasq.
err = p.Start()
if err != nil {
return fmt.Errorf("Failed to run: %s %s: %v", command, strings.Join(dnsmasqCmd, " "), err)
}
err = p.Save(shared.VarPath("networks", n.name, "dnsmasq.pid"))
if err != nil {
// Kill Process if started, but could not save the file.
err2 := p.Stop()
if err != nil {
return fmt.Errorf("Could not kill subprocess while handling saving error: %s: %s", err, err2)
}
return fmt.Errorf("Failed to save subprocess details: %s", err)
}
// Spawn DNS forwarder if needed (backgrounded to avoid deadlocks during cluster boot).
if dnsClustered {
// Create forkdns servers directory.
if !shared.PathExists(shared.VarPath("networks", n.name, ForkdnsServersListPath)) {
err = os.MkdirAll(shared.VarPath("networks", n.name, ForkdnsServersListPath), 0755)
if err != nil {
return err
}
}
// Create forkdns servers.conf file if doesn't exist.
f, err := os.OpenFile(shared.VarPath("networks", n.name, ForkdnsServersListPath+"/"+ForkdnsServersListFile), os.O_RDONLY|os.O_CREATE, 0666)
if err != nil {
return err
}
f.Close()
err = n.spawnForkDNS(dnsClusteredAddress)
if err != nil {
return err
}
}
} else {
// Clean up old dnsmasq config if exists and we are not starting dnsmasq.
leasesPath := shared.VarPath("networks", n.name, "dnsmasq.leases")
if shared.PathExists(leasesPath) {
err := os.Remove(leasesPath)
if err != nil {
return errors.Wrapf(err, "Failed to remove old dnsmasq leases file '%s'", leasesPath)
}
}
// And same for our PID file.
pidPath := shared.VarPath("networks", n.name, "dnsmasq.pid")
if shared.PathExists(pidPath) {
err := os.Remove(pidPath)
if err != nil {
return errors.Wrapf(err, "Failed to remove old dnsmasq pid file '%s'", pidPath)
}
}
}
revert.Success()
return nil
}
// Stop stops the network.
func (n *bridge) Stop() error {
n.logger.Debug("Stop")
if !n.isRunning() {
return nil
}
// Destroy the bridge interface
if n.config["bridge.driver"] == "openvswitch" {
ovs := openvswitch.NewOVS()
err := ovs.BridgeDelete(n.name)
if err != nil {
return err
}
} else {
_, err := shared.RunCommand("ip", "link", "del", "dev", n.name)
if err != nil {
return err
}
}
// Cleanup firewall rules.
if usesIPv4Firewall(n.config) {
err := n.state.Firewall.NetworkClear(n.name, 4)
if err != nil {
return err
}
}
if usesIPv6Firewall(n.config) {
err := n.state.Firewall.NetworkClear(n.name, 6)
if err != nil {
return err
}
}
// Kill any existing dnsmasq and forkdns daemon for this network
err := dnsmasq.Kill(n.name, false)
if err != nil {
return err
}
err = n.killForkDNS()
if err != nil {
return err
}
// Get a list of interfaces
ifaces, err := net.Interfaces()
if err != nil {
return err
}
// Cleanup any existing tunnel device
for _, iface := range ifaces {
if strings.HasPrefix(iface.Name, fmt.Sprintf("%s-", n.name)) {
_, err = shared.RunCommand("ip", "link", "del", "dev", iface.Name)
if err != nil {
return err
}
}
}
// Unload apparmor profiles.
err = apparmor.NetworkUnload(n.state, n)
if err != nil {
return err
}
return nil
}
// Update updates the network. Accepts notification boolean indicating if this update request is coming from a
// cluster notification, in which case do not update the database, just apply local changes needed.
func (n *bridge) Update(newNetwork api.NetworkPut, targetNode string, clientType request.ClientType) error {
n.logger.Debug("Update", log.Ctx{"clientType": clientType, "newNetwork": newNetwork})
err := n.populateAutoConfig(newNetwork.Config)
if err != nil {
return errors.Wrapf(err, "Failed generating auto config")
}
dbUpdateNeeeded, changedKeys, oldNetwork, err := n.common.configChanged(newNetwork)
if err != nil {
return err
}
if !dbUpdateNeeeded {
return nil // Nothing changed.
}
// If the network as a whole has not had any previous creation attempts, or the node itself is still
// pending, then don't apply the new settings to the node, just to the database record (ready for the
// actual global create request to be initiated).
if n.Status() == api.NetworkStatusPending || n.LocalStatus() == api.NetworkStatusPending {
return n.common.update(newNetwork, targetNode, clientType)
}
revert := revert.New()
defer revert.Fail()
// Perform any pre-update cleanup needed if local node network was already created.
if len(changedKeys) > 0 {
// Define a function which reverts everything.
revert.Add(func() {
// Reset changes to all nodes and database.
n.common.update(oldNetwork, targetNode, clientType)
// Reset any change that was made to local bridge.
n.setup(newNetwork.Config)
})
// Bring the bridge down entirely if the driver has changed.
if shared.StringInSlice("bridge.driver", changedKeys) && n.isRunning() {
err = n.Stop()
if err != nil {
return err
}
}
// Detach any external interfaces should no longer be attached.
if shared.StringInSlice("bridge.external_interfaces", changedKeys) && n.isRunning() {
devices := []string{}
for _, dev := range strings.Split(newNetwork.Config["bridge.external_interfaces"], ",") {
dev = strings.TrimSpace(dev)
devices = append(devices, dev)
}
for _, dev := range strings.Split(oldNetwork.Config["bridge.external_interfaces"], ",") {
dev = strings.TrimSpace(dev)
if dev == "" {
continue
}
if !shared.StringInSlice(dev, devices) && InterfaceExists(dev) {
err = DetachInterface(n.name, dev)
if err != nil {
return err
}
}
}
}
}
// Apply changes to all nodes and database.
err = n.common.update(newNetwork, targetNode, clientType)
if err != nil {
return err
}
// Restart the network if needed.
if len(changedKeys) > 0 {
err = n.setup(oldNetwork.Config)
if err != nil {
return err
}
}
revert.Success()
return nil
}
func (n *bridge) spawnForkDNS(listenAddress string) error {
// Setup the dnsmasq domain
dnsDomain := n.config["dns.domain"]
if dnsDomain == "" {
dnsDomain = "lxd"
}
// Spawn the daemon using subprocess
command := n.state.OS.ExecPath
forkdnsargs := []string{"forkdns",
fmt.Sprintf("%s:1053", listenAddress),
dnsDomain,
n.name}
logPath := shared.LogPath(fmt.Sprintf("forkdns.%s.log", n.name))
p, err := subprocess.NewProcess(command, forkdnsargs, logPath, logPath)
if err != nil {
return fmt.Errorf("Failed to create subprocess: %s", err)
}
// Drop privileges.
p.SetCreds(n.state.OS.UnprivUID, n.state.OS.UnprivGID)
// Apply AppArmor profile.
p.SetApparmor(apparmor.ForkdnsProfileName(n))
err = p.Start()
if err != nil {
return fmt.Errorf("Failed to run: %s %s: %v", command, strings.Join(forkdnsargs, " "), err)
}
err = p.Save(shared.VarPath("networks", n.name, "forkdns.pid"))
if err != nil {
// Kill Process if started, but could not save the file
err2 := p.Stop()
if err != nil {
return fmt.Errorf("Could not kill subprocess while handling saving error: %s: %s", err, err2)
}
return fmt.Errorf("Failed to save subprocess details: %s", err)
}
return nil
}
// HandleHeartbeat refreshes forkdns servers. Retrieves the IPv4 address of each cluster node (excluding ourselves)
// for this network. It then updates the forkdns server list file if there are changes.
func (n *bridge) HandleHeartbeat(heartbeatData *cluster.APIHeartbeat) error {
addresses := []string{}
localAddress, err := node.HTTPSAddress(n.state.Node)
if err != nil {
return err
}
n.logger.Info("Refreshing forkdns peers")
cert := n.state.Endpoints.NetworkCert()
for _, node := range heartbeatData.Members {
if node.Address == localAddress {
// No need to query ourselves.
continue
}
client, err := cluster.Connect(node.Address, cert, true)
if err != nil {
return err
}
state, err := client.GetNetworkState(n.name)
if err != nil {
return err
}
for _, addr := range state.Addresses {
// Only get IPv4 addresses of nodes on network.
if addr.Family != "inet" || addr.Scope != "global" {
continue
}
addresses = append(addresses, addr.Address)
break
}
}
// Compare current stored list to retrieved list and see if we need to update.
curList, err := ForkdnsServersList(n.name)
if err != nil {
// Only warn here, but continue on to regenerate the servers list from cluster info.
n.logger.Warn("Failed to load existing forkdns server list", log.Ctx{"err": err})
}
// If current list is same as cluster list, nothing to do.
if err == nil && reflect.DeepEqual(curList, addresses) {
return nil
}
err = n.updateForkdnsServersFile(addresses)
if err != nil {
return err
}
n.logger.Info("Updated forkdns server list", log.Ctx{"nodes": addresses})
return nil
}
func (n *bridge) getTunnels() []string {
tunnels := []string{}
for k := range n.config {
if !strings.HasPrefix(k, "tunnel.") {
continue
}
fields := strings.Split(k, ".")
if !shared.StringInSlice(fields[1], tunnels) {
tunnels = append(tunnels, fields[1])
}
}
return tunnels
}
// bootRoutesV4 returns a list of IPv4 boot routes on the network's device.
func (n *bridge) bootRoutesV4() ([]string, error) {
routes := []string{}
cmd := exec.Command("ip", "-4", "route", "show", "dev", n.name, "proto", "boot")
ipOut, err := cmd.StdoutPipe()
if err != nil {
return routes, err
}
cmd.Start()
scanner := bufio.NewScanner(ipOut)
for scanner.Scan() {
route := strings.Replace(scanner.Text(), "linkdown", "", -1)
routes = append(routes, route)
}
cmd.Wait()
return routes, nil
}
// bootRoutesV6 returns a list of IPv6 boot routes on the network's device.
func (n *bridge) bootRoutesV6() ([]string, error) {
routes := []string{}
cmd := exec.Command("ip", "-6", "route", "show", "dev", n.name, "proto", "boot")
ipOut, err := cmd.StdoutPipe()
if err != nil {
return routes, err
}
cmd.Start()
scanner := bufio.NewScanner(ipOut)
for scanner.Scan() {
route := strings.Replace(scanner.Text(), "linkdown", "", -1)
routes = append(routes, route)
}
cmd.Wait()
return routes, nil
}
// applyBootRoutesV4 applies a list of IPv4 boot routes to the network's device.
func (n *bridge) applyBootRoutesV4(routes []string) {
for _, route := range routes {
cmd := []string{"-4", "route", "replace", "dev", n.name, "proto", "boot"}
cmd = append(cmd, strings.Fields(route)...)
_, err := shared.RunCommand("ip", cmd...)
if err != nil {
// If it fails, then we can't stop as the route has already gone, so just log and continue.
n.logger.Error("Failed to restore route", log.Ctx{"err": err})
}
}
}
// applyBootRoutesV6 applies a list of IPv6 boot routes to the network's device.
func (n *bridge) applyBootRoutesV6(routes []string) {
for _, route := range routes {
cmd := []string{"-6", "route", "replace", "dev", n.name, "proto", "boot"}
cmd = append(cmd, strings.Fields(route)...)
_, err := shared.RunCommand("ip", cmd...)
if err != nil {
// If it fails, then we can't stop as the route has already gone, so just log and continue.
n.logger.Error("Failed to restore route", log.Ctx{"err": err})
}
}
}
func (n *bridge) fanAddress(underlay *net.IPNet, overlay *net.IPNet) (string, string, string, error) {
// Sanity checks
underlaySize, _ := underlay.Mask.Size()
if underlaySize != 16 && underlaySize != 24 {
return "", "", "", fmt.Errorf("Only /16 or /24 underlays are supported at this time")
}
overlaySize, _ := overlay.Mask.Size()
if overlaySize != 8 && overlaySize != 16 {
return "", "", "", fmt.Errorf("Only /8 or /16 overlays are supported at this time")
}
if overlaySize+(32-underlaySize)+8 > 32 {
return "", "", "", fmt.Errorf("Underlay or overlay networks too large to accommodate the FAN")
}
// Get the IP
ip, dev, err := n.addressForSubnet(underlay)
if err != nil {
return "", "", "", err
}
ipStr := ip.String()
// Force into IPv4 format
ipBytes := ip.To4()
if ipBytes == nil {
return "", "", "", fmt.Errorf("Invalid IPv4: %s", ip)
}
// Compute the IP
ipBytes[0] = overlay.IP[0]
if overlaySize == 16 {
ipBytes[1] = overlay.IP[1]
ipBytes[2] = ipBytes[3]
} else if underlaySize == 24 {
ipBytes[1] = ipBytes[3]
ipBytes[2] = 0
} else if underlaySize == 16 {
ipBytes[1] = ipBytes[2]
ipBytes[2] = ipBytes[3]
}
ipBytes[3] = 1
return fmt.Sprintf("%s/%d", ipBytes.String(), overlaySize), dev, ipStr, err
}
func (n *bridge) addressForSubnet(subnet *net.IPNet) (net.IP, string, error) {
ifaces, err := net.Interfaces()
if err != nil {
return net.IP{}, "", err
}
for _, iface := range ifaces {
// Skip addresses on lo interface in case VIPs are being used on that interface that are part of
// the underlay subnet as is unlikely to be the actual intended underlay subnet interface.
if iface.Name == "lo" {
continue
}
addrs, err := iface.Addrs()
if err != nil {
continue
}
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if err != nil {
continue
}
if subnet.Contains(ip) {
return ip, iface.Name, nil
}
}
}
return net.IP{}, "", fmt.Errorf("No address found in subnet")
}
func (n *bridge) killForkDNS() error {
// Check if we have a running forkdns at all
pidPath := shared.VarPath("networks", n.name, "forkdns.pid")
// If the pid file doesn't exist, there is no process to kill.
if !shared.PathExists(pidPath) {
return nil
}
p, err := subprocess.ImportProcess(pidPath)
if err != nil {
return fmt.Errorf("Could not read pid file: %s", err)
}
err = p.Stop()
if err != nil && err != subprocess.ErrNotRunning {
return fmt.Errorf("Unable to kill dnsmasq: %s", err)
}
return nil
}
// updateForkdnsServersFile takes a list of node addresses and writes them atomically to
// the forkdns.servers file ready for forkdns to notice and re-apply its config.
func (n *bridge) updateForkdnsServersFile(addresses []string) error {
// We don't want to race with ourselves here
forkdnsServersLock.Lock()
defer forkdnsServersLock.Unlock()
permName := shared.VarPath("networks", n.name, ForkdnsServersListPath+"/"+ForkdnsServersListFile)
tmpName := permName + ".tmp"
// Open tmp file and truncate
tmpFile, err := os.Create(tmpName)
if err != nil {
return err
}
defer tmpFile.Close()
for _, address := range addresses {
_, err := tmpFile.WriteString(address + "\n")
if err != nil {
return err
}
}
tmpFile.Close()
// Atomically rename finished file into permanent location so forkdns can pick it up.
err = os.Rename(tmpName, permName)
if err != nil {
return err
}
return nil
}
// hasIPv4Firewall indicates whether the network has IPv4 firewall enabled.
func (n *bridge) hasIPv4Firewall() bool {
if n.config["ipv4.firewall"] == "" || shared.IsTrue(n.config["ipv4.firewall"]) {
return true
}
return false
}
// hasIPv6Firewall indicates whether the network has IPv6 firewall enabled.
func (n *bridge) hasIPv6Firewall() bool {
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
return true
}
return false
}
// hasDHCPv4 indicates whether the network has DHCPv4 enabled.
// An empty ipv4.dhcp setting indicates enabled by default.
func (n *bridge) hasDHCPv4() bool {
if n.config["ipv4.dhcp"] == "" || shared.IsTrue(n.config["ipv4.dhcp"]) {
return true
}
return false
}
// hasDHCPv6 indicates whether the network has DHCPv6 enabled.
// An empty ipv6.dhcp setting indicates enabled by default.
func (n *bridge) hasDHCPv6() bool {
if n.config["ipv6.dhcp"] == "" || shared.IsTrue(n.config["ipv6.dhcp"]) {
return true
}
return false
}
// DHCPv4Subnet returns the DHCPv4 subnet (if DHCP is enabled on network).
func (n *bridge) DHCPv4Subnet() *net.IPNet {
// DHCP is disabled on this network.
if !n.hasDHCPv4() {
return nil
}
// Fan mode. Extract DHCP subnet from fan bridge address. Only detectable once network has started.
// But if there is no address on the fan bridge then DHCP won't work anyway.
if n.config["bridge.mode"] == "fan" {
iface, err := net.InterfaceByName(n.name)
if err != nil {
return nil
}
addrs, err := iface.Addrs()
if err != nil {
return nil
}
for _, addr := range addrs {
ip, subnet, err := net.ParseCIDR(addr.String())
if err != nil {
continue
}
if ip != nil && err == nil && ip.To4() != nil && ip.IsGlobalUnicast() {
return subnet // Use first IPv4 unicast address on host for DHCP subnet.
}
}
return nil // No addresses found, means DHCP must be disabled.
}
// Non-fan mode. Return configured bridge subnet directly.
_, subnet, err := net.ParseCIDR(n.config["ipv4.address"])
if err != nil {
return nil
}
return subnet
}
// DHCPv6Subnet returns the DHCPv6 subnet (if DHCP or SLAAC is enabled on network).
func (n *bridge) DHCPv6Subnet() *net.IPNet {
// DHCP is disabled on this network.
if !n.hasDHCPv6() {
return nil
}
_, subnet, err := net.ParseCIDR(n.config["ipv6.address"])
if err != nil {
return nil
}
return subnet
}
|
package network
import (
"context"
"encoding/binary"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"os/exec"
"reflect"
"strconv"
"strings"
"sync"
"time"
"github.com/mdlayher/netx/eui64"
"github.com/pkg/errors"
"github.com/lxc/lxd/client"
"github.com/lxc/lxd/lxd/apparmor"
"github.com/lxc/lxd/lxd/cluster"
"github.com/lxc/lxd/lxd/cluster/request"
"github.com/lxc/lxd/lxd/daemon"
"github.com/lxc/lxd/lxd/db"
dbCluster "github.com/lxc/lxd/lxd/db/cluster"
deviceConfig "github.com/lxc/lxd/lxd/device/config"
"github.com/lxc/lxd/lxd/device/nictype"
"github.com/lxc/lxd/lxd/dnsmasq"
"github.com/lxc/lxd/lxd/dnsmasq/dhcpalloc"
firewallDrivers "github.com/lxc/lxd/lxd/firewall/drivers"
"github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/ip"
"github.com/lxc/lxd/lxd/network/acl"
"github.com/lxc/lxd/lxd/network/openvswitch"
"github.com/lxc/lxd/lxd/node"
"github.com/lxc/lxd/lxd/project"
"github.com/lxc/lxd/lxd/revert"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/lxd/warnings"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/subprocess"
"github.com/lxc/lxd/shared/validate"
"github.com/lxc/lxd/shared/version"
)
// ForkdnsServersListPath defines the path that contains the forkdns server candidate file.
const ForkdnsServersListPath = "forkdns.servers"
// ForkdnsServersListFile file that contains the server candidates list.
const ForkdnsServersListFile = "servers.conf"
var forkdnsServersLock sync.Mutex
// bridge represents a LXD bridge network.
type bridge struct {
common
}
// Type returns the network type.
func (n *bridge) Type() string {
return "bridge"
}
// DBType returns the network type DB ID.
func (n *bridge) DBType() db.NetworkType {
return db.NetworkTypeBridge
}
// Config returns the network driver info.
func (n *bridge) Info() Info {
info := n.common.Info()
info.AddressForwards = true
return info
}
// checkClusterWideMACSafe returns whether it is safe to use the same MAC address for the bridge interface on all
// cluster nodes. It is not suitable to use a static MAC address when "bridge.external_interfaces" is non-empty and
// the bridge interface has no IPv4 or IPv6 address set. This is because in a clustered environment the same bridge
// config is applied to all nodes, and if the bridge is being used to connect multiple nodes to the same network
// segment it would cause MAC conflicts to use the the same MAC on all nodes. If an IP address is specified then
// connecting multiple nodes to the same network segment would also cause IP conflicts, so if an IP is defined
// then we assume this is not being done. However if IP addresses are explicitly set to "none" and
// "bridge.external_interfaces" is set then it may not be safe to use a the same MAC address on all nodes.
func (n *bridge) checkClusterWideMACSafe(config map[string]string) error {
// Fan mode breaks if using the same MAC address on each node.
if config["bridge.mode"] == "fan" {
return fmt.Errorf(`Cannot use static "bridge.hwaddr" MAC address in fan mode`)
}
// We can't be sure that multiple clustered nodes aren't connected to the same network segment so don't
// use a static MAC address for the bridge interface to avoid introducing a MAC conflict.
if config["bridge.external_interfaces"] != "" && config["ipv4.address"] == "none" && config["ipv6.address"] == "none" {
return fmt.Errorf(`Cannot use static "bridge.hwaddr" MAC address when bridge has no IP addresses and has external interfaces set`)
}
return nil
}
// FillConfig fills requested config with any default values.
func (n *bridge) FillConfig(config map[string]string) error {
// Set some default values where needed.
if config["bridge.mode"] == "fan" {
if config["fan.underlay_subnet"] == "" {
config["fan.underlay_subnet"] = "auto"
}
// We enable NAT by default even if address is manually specified.
if config["ipv4.nat"] == "" {
config["ipv4.nat"] = "true"
}
} else {
if config["ipv4.address"] == "" {
config["ipv4.address"] = "auto"
}
if config["ipv4.address"] == "auto" && config["ipv4.nat"] == "" {
config["ipv4.nat"] = "true"
}
if config["ipv6.address"] == "" {
content, err := ioutil.ReadFile("/proc/sys/net/ipv6/conf/default/disable_ipv6")
if err == nil && string(content) == "0\n" {
config["ipv6.address"] = "auto"
}
}
if config["ipv6.address"] == "auto" && config["ipv6.nat"] == "" {
config["ipv6.nat"] = "true"
}
}
// Now replace any "auto" keys with generated values.
err := n.populateAutoConfig(config)
if err != nil {
return errors.Wrapf(err, "Failed generating auto config")
}
return nil
}
// populateAutoConfig replaces "auto" in config with generated values.
func (n *bridge) populateAutoConfig(config map[string]string) error {
changedConfig := false
// Now populate "auto" values where needed.
if config["ipv4.address"] == "auto" {
subnet, err := randomSubnetV4()
if err != nil {
return err
}
config["ipv4.address"] = subnet
changedConfig = true
}
if config["ipv6.address"] == "auto" {
subnet, err := randomSubnetV6()
if err != nil {
return err
}
config["ipv6.address"] = subnet
changedConfig = true
}
if config["fan.underlay_subnet"] == "auto" {
subnet, _, err := DefaultGatewaySubnetV4()
if err != nil {
return err
}
config["fan.underlay_subnet"] = subnet.String()
changedConfig = true
}
// Re-validate config if changed.
if changedConfig && n.state != nil {
return n.Validate(config)
}
return nil
}
// ValidateName validates network name.
func (n *bridge) ValidateName(name string) error {
err := validate.IsInterfaceName(name)
if err != nil {
return err
}
// Apply common name validation that applies to all network types.
return n.common.ValidateName(name)
}
// Validate network config.
func (n *bridge) Validate(config map[string]string) error {
// Build driver specific rules dynamically.
rules := map[string]func(value string) error{
"bgp.ipv4.nexthop": validate.Optional(validate.IsNetworkAddressV4),
"bgp.ipv6.nexthop": validate.Optional(validate.IsNetworkAddressV6),
"bridge.driver": validate.Optional(validate.IsOneOf("native", "openvswitch")),
"bridge.external_interfaces": validate.Optional(func(value string) error {
for _, entry := range strings.Split(value, ",") {
entry = strings.TrimSpace(entry)
if err := validate.IsInterfaceName(entry); err != nil {
return errors.Wrapf(err, "Invalid interface name %q", entry)
}
}
return nil
}),
"bridge.hwaddr": validate.Optional(validate.IsNetworkMAC),
"bridge.mtu": validate.Optional(validate.IsNetworkMTU),
"bridge.mode": validate.Optional(validate.IsOneOf("standard", "fan")),
"fan.overlay_subnet": validate.Optional(validate.IsNetworkV4),
"fan.underlay_subnet": validate.Optional(func(value string) error {
if value == "auto" {
return nil
}
return validate.IsNetworkV4(value)
}),
"fan.type": validate.Optional(validate.IsOneOf("vxlan", "ipip")),
"ipv4.address": validate.Optional(func(value string) error {
if validate.IsOneOf("none", "auto")(value) == nil {
return nil
}
return validate.IsNetworkAddressCIDRV4(value)
}),
"ipv4.firewall": validate.Optional(validate.IsBool),
"ipv4.nat": validate.Optional(validate.IsBool),
"ipv4.nat.order": validate.Optional(validate.IsOneOf("before", "after")),
"ipv4.nat.address": validate.Optional(validate.IsNetworkAddressV4),
"ipv4.dhcp": validate.Optional(validate.IsBool),
"ipv4.dhcp.gateway": validate.Optional(validate.IsNetworkAddressV4),
"ipv4.dhcp.expiry": validate.IsAny,
"ipv4.dhcp.ranges": validate.Optional(validate.IsNetworkRangeV4List),
"ipv4.routes": validate.Optional(validate.IsNetworkV4List),
"ipv4.routing": validate.Optional(validate.IsBool),
"ipv4.ovn.ranges": validate.Optional(validate.IsNetworkRangeV4List),
"ipv6.address": validate.Optional(func(value string) error {
if validate.IsOneOf("none", "auto")(value) == nil {
return nil
}
return validate.IsNetworkAddressCIDRV6(value)
}),
"ipv6.firewall": validate.Optional(validate.IsBool),
"ipv6.nat": validate.Optional(validate.IsBool),
"ipv6.nat.order": validate.Optional(validate.IsOneOf("before", "after")),
"ipv6.nat.address": validate.Optional(validate.IsNetworkAddressV6),
"ipv6.dhcp": validate.Optional(validate.IsBool),
"ipv6.dhcp.expiry": validate.IsAny,
"ipv6.dhcp.stateful": validate.Optional(validate.IsBool),
"ipv6.dhcp.ranges": validate.Optional(validate.IsNetworkRangeV6List),
"ipv6.routes": validate.Optional(validate.IsNetworkV6List),
"ipv6.routing": validate.Optional(validate.IsBool),
"ipv6.ovn.ranges": validate.Optional(validate.IsNetworkRangeV6List),
"dns.domain": validate.IsAny,
"dns.mode": validate.Optional(validate.IsOneOf("dynamic", "managed", "none")),
"dns.search": validate.IsAny,
"dns.zone.forward": validate.Optional(n.validateZoneName),
"dns.zone.reverse.ipv4": validate.Optional(n.validateZoneName),
"dns.zone.reverse.ipv6": validate.Optional(n.validateZoneName),
"raw.dnsmasq": validate.IsAny,
"maas.subnet.ipv4": validate.IsAny,
"maas.subnet.ipv6": validate.IsAny,
"security.acls": validate.IsAny,
"security.acls.default.ingress.action": validate.Optional(validate.IsOneOf(acl.ValidActions...)),
"security.acls.default.egress.action": validate.Optional(validate.IsOneOf(acl.ValidActions...)),
"security.acls.default.ingress.logged": validate.Optional(validate.IsBool),
"security.acls.default.egress.logged": validate.Optional(validate.IsBool),
}
// Add dynamic validation rules.
for k := range config {
// Tunnel keys have the remote name in their name, extract the suffix.
if strings.HasPrefix(k, "tunnel.") {
// Validate remote name in key.
fields := strings.Split(k, ".")
if len(fields) != 3 {
return fmt.Errorf("Invalid network configuration key: %s", k)
}
if len(n.name)+len(fields[1]) > 14 {
return fmt.Errorf("Network name too long for tunnel interface: %s-%s", n.name, fields[1])
}
tunnelKey := fields[2]
// Add the correct validation rule for the dynamic field based on last part of key.
switch tunnelKey {
case "protocol":
rules[k] = validate.Optional(validate.IsOneOf("gre", "vxlan"))
case "local":
rules[k] = validate.Optional(validate.IsNetworkAddress)
case "remote":
rules[k] = validate.Optional(validate.IsNetworkAddress)
case "port":
rules[k] = networkValidPort
case "group":
rules[k] = validate.Optional(validate.IsNetworkAddress)
case "id":
rules[k] = validate.Optional(validate.IsInt64)
case "inteface":
rules[k] = validate.IsInterfaceName
case "ttl":
rules[k] = validate.Optional(validate.IsUint8)
}
}
}
// Add the BGP validation rules.
bgpRules, err := n.bgpValidationRules(config)
if err != nil {
return err
}
for k, v := range bgpRules {
rules[k] = v
}
// Validate the configuration.
err = n.validate(config, rules)
if err != nil {
return err
}
// Peform composite key checks after per-key validation.
// Validate network name when used in fan mode.
bridgeMode := config["bridge.mode"]
if bridgeMode == "fan" && len(n.name) > 11 {
return fmt.Errorf("Network name too long to use with the FAN (must be 11 characters or less)")
}
for k, v := range config {
key := k
// Bridge mode checks
if bridgeMode == "fan" && strings.HasPrefix(key, "ipv4.") && !shared.StringInSlice(key, []string{"ipv4.dhcp.expiry", "ipv4.firewall", "ipv4.nat", "ipv4.nat.order"}) && v != "" {
return fmt.Errorf("IPv4 configuration may not be set when in 'fan' mode")
}
if bridgeMode == "fan" && strings.HasPrefix(key, "ipv6.") && v != "" {
return fmt.Errorf("IPv6 configuration may not be set when in 'fan' mode")
}
if bridgeMode != "fan" && strings.HasPrefix(key, "fan.") && v != "" {
return fmt.Errorf("FAN configuration may only be set when in 'fan' mode")
}
// MTU checks
if key == "bridge.mtu" && v != "" {
mtu, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return fmt.Errorf("Invalid value for an integer: %s", v)
}
ipv6 := config["ipv6.address"]
if ipv6 != "" && ipv6 != "none" && mtu < 1280 {
return fmt.Errorf("The minimum MTU for an IPv6 network is 1280")
}
ipv4 := config["ipv4.address"]
if ipv4 != "" && ipv4 != "none" && mtu < 68 {
return fmt.Errorf("The minimum MTU for an IPv4 network is 68")
}
if config["bridge.mode"] == "fan" {
if config["fan.type"] == "ipip" {
if mtu > 1480 {
return fmt.Errorf("Maximum MTU for an IPIP FAN bridge is 1480")
}
} else {
if mtu > 1450 {
return fmt.Errorf("Maximum MTU for a VXLAN FAN bridge is 1450")
}
}
}
}
}
// Check using same MAC address on every cluster node is safe.
if config["bridge.hwaddr"] != "" {
err = n.checkClusterWideMACSafe(config)
if err != nil {
return err
}
}
// Check IPv4 OVN ranges.
if config["ipv4.ovn.ranges"] != "" {
dhcpSubnet := n.DHCPv4Subnet()
allowedNets := []*net.IPNet{}
if dhcpSubnet != nil {
if config["ipv4.dhcp.ranges"] == "" {
return fmt.Errorf(`"ipv4.ovn.ranges" must be used in conjunction with non-overlapping "ipv4.dhcp.ranges" when DHCPv4 is enabled`)
}
allowedNets = append(allowedNets, dhcpSubnet)
}
ovnRanges, err := parseIPRanges(config["ipv4.ovn.ranges"], allowedNets...)
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv4.ovn.ranges")
}
dhcpRanges, err := parseIPRanges(config["ipv4.dhcp.ranges"], allowedNets...)
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv4.dhcp.ranges")
}
for _, ovnRange := range ovnRanges {
for _, dhcpRange := range dhcpRanges {
if IPRangesOverlap(ovnRange, dhcpRange) {
return fmt.Errorf(`The range specified in "ipv4.ovn.ranges" (%q) cannot overlap with "ipv4.dhcp.ranges"`, ovnRange)
}
}
}
}
// Check IPv6 OVN ranges.
if config["ipv6.ovn.ranges"] != "" {
dhcpSubnet := n.DHCPv6Subnet()
allowedNets := []*net.IPNet{}
if dhcpSubnet != nil {
if config["ipv6.dhcp.ranges"] == "" && shared.IsTrue(config["ipv6.dhcp.stateful"]) {
return fmt.Errorf(`"ipv6.ovn.ranges" must be used in conjunction with non-overlapping "ipv6.dhcp.ranges" when stateful DHCPv6 is enabled`)
}
allowedNets = append(allowedNets, dhcpSubnet)
}
ovnRanges, err := parseIPRanges(config["ipv6.ovn.ranges"], allowedNets...)
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv6.ovn.ranges")
}
// If stateful DHCPv6 is enabled, check OVN ranges don't overlap with DHCPv6 stateful ranges.
// Otherwise SLAAC will be being used to generate client IPs and predefined ranges aren't used.
if dhcpSubnet != nil && shared.IsTrue(config["ipv6.dhcp.stateful"]) {
dhcpRanges, err := parseIPRanges(config["ipv6.dhcp.ranges"], allowedNets...)
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv6.dhcp.ranges")
}
for _, ovnRange := range ovnRanges {
for _, dhcpRange := range dhcpRanges {
if IPRangesOverlap(ovnRange, dhcpRange) {
return fmt.Errorf(`The range specified in "ipv6.ovn.ranges" (%q) cannot overlap with "ipv6.dhcp.ranges"`, ovnRange)
}
}
}
}
}
// Check Security ACLs are supported and exist.
if config["security.acls"] != "" {
err = acl.Exists(n.state, n.Project(), util.SplitNTrimSpace(config["security.acls"], ",", -1, true)...)
if err != nil {
return err
}
}
return nil
}
// Create checks whether the bridge interface name is used already.
func (n *bridge) Create(clientType request.ClientType) error {
n.logger.Debug("Create", log.Ctx{"clientType": clientType, "config": n.config})
if InterfaceExists(n.name) {
return fmt.Errorf("Network interface %q already exists", n.name)
}
return nil
}
// isRunning returns whether the network is up.
func (n *bridge) isRunning() bool {
return InterfaceExists(n.name)
}
// Delete deletes a network.
func (n *bridge) Delete(clientType request.ClientType) error {
n.logger.Debug("Delete", log.Ctx{"clientType": clientType})
// Delete all warnings regarding this network
err := warnings.DeleteWarningsByLocalNodeAndProjectAndEntity(n.state.Cluster, n.project, dbCluster.TypeNetwork, int(n.id))
if err != nil {
n.logger.Warn("Failed to delete warnings", log.Ctx{"err": err})
}
if n.isRunning() {
err := n.Stop()
if err != nil {
return err
}
}
// Delete apparmor profiles.
err = apparmor.NetworkDelete(n.state, n)
if err != nil {
return err
}
return n.common.delete(clientType)
}
// Rename renames a network.
func (n *bridge) Rename(newName string) error {
n.logger.Debug("Rename", log.Ctx{"newName": newName})
if InterfaceExists(newName) {
return fmt.Errorf("Network interface %q already exists", newName)
}
// Bring the network down.
if n.isRunning() {
err := n.Stop()
if err != nil {
return err
}
}
// Rename forkdns log file.
forkDNSLogPath := fmt.Sprintf("forkdns.%s.log", n.name)
if shared.PathExists(shared.LogPath(forkDNSLogPath)) {
err := os.Rename(forkDNSLogPath, shared.LogPath(fmt.Sprintf("forkdns.%s.log", newName)))
if err != nil {
return err
}
}
// Rename common steps.
err := n.common.rename(newName)
if err != nil {
return err
}
// Bring the network up.
err = n.Start()
if err != nil {
return err
}
return nil
}
// Start starts the network.
func (n *bridge) Start() error {
n.logger.Debug("Start")
err := n.setup(nil)
if err != nil {
err := n.state.Cluster.UpsertWarningLocalNode(n.project, dbCluster.TypeNetwork, int(n.id), db.WarningNetworkStartupFailure, err.Error())
if err != nil {
n.logger.Warn("Failed to create warning", log.Ctx{"err": err})
}
} else {
err := warnings.ResolveWarningsByLocalNodeAndProjectAndTypeAndEntity(n.state.Cluster, n.project, db.WarningNetworkStartupFailure, dbCluster.TypeNetwork, int(n.id))
if err != nil {
n.logger.Warn("Failed to resolve warning", log.Ctx{"err": err})
}
}
return err
}
// setup restarts the network.
func (n *bridge) setup(oldConfig map[string]string) error {
// If we are in mock mode, just no-op.
if n.state.OS.MockMode {
return nil
}
n.logger.Debug("Setting up network")
revert := revert.New()
defer revert.Fail()
// Create directory.
if !shared.PathExists(shared.VarPath("networks", n.name)) {
err := os.MkdirAll(shared.VarPath("networks", n.name), 0711)
if err != nil {
return err
}
}
bridgeLink := &ip.Link{Name: n.name}
// Create the bridge interface if doesn't exist.
if !n.isRunning() {
if n.config["bridge.driver"] == "openvswitch" {
ovs := openvswitch.NewOVS()
if !ovs.Installed() {
return fmt.Errorf("Open vSwitch isn't installed on this system")
}
err := ovs.BridgeAdd(n.name, false)
if err != nil {
return err
}
revert.Add(func() { ovs.BridgeDelete(n.name) })
} else {
bridge := &ip.Bridge{
Link: *bridgeLink,
}
err := bridge.Add()
if err != nil {
return err
}
revert.Add(func() { bridge.Delete() })
}
}
// Get a list of tunnels.
tunnels := n.getTunnels()
// IPv6 bridge configuration.
if !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
if !shared.PathExists("/proc/sys/net/ipv6") {
return fmt.Errorf("Network has ipv6.address but kernel IPv6 support is missing")
}
err := util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/autoconf", n.name), "0")
if err != nil {
return err
}
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/accept_dad", n.name), "0")
if err != nil {
return err
}
}
// Get a list of interfaces.
ifaces, err := net.Interfaces()
if err != nil {
return err
}
// Cleanup any existing tunnel device.
for _, iface := range ifaces {
if strings.HasPrefix(iface.Name, fmt.Sprintf("%s-", n.name)) {
tunLink := &ip.Link{Name: iface.Name}
err = tunLink.Delete()
if err != nil {
return err
}
}
}
// Set the MTU.
mtu := ""
if n.config["bridge.mtu"] != "" {
mtu = n.config["bridge.mtu"]
} else if len(tunnels) > 0 {
mtu = "1400"
} else if n.config["bridge.mode"] == "fan" {
if n.config["fan.type"] == "ipip" {
mtu = "1480"
} else {
mtu = "1450"
}
}
// Attempt to add a dummy device to the bridge to force the MTU.
if mtu != "" && n.config["bridge.driver"] != "openvswitch" {
dummy := &ip.Dummy{
Link: ip.Link{Name: fmt.Sprintf("%s-mtu", n.name), MTU: mtu},
}
err = dummy.Add()
if err == nil {
revert.Add(func() { dummy.Delete() })
err = dummy.SetUp()
if err == nil {
AttachInterface(n.name, fmt.Sprintf("%s-mtu", n.name))
}
}
}
// Now, set a default MTU.
if mtu == "" {
mtu = "1500"
}
err = bridgeLink.SetMTU(mtu)
if err != nil {
return err
}
// Always prefer static MAC address if set.
hwAddr := n.config["bridge.hwaddr"]
// If no cluster wide static MAC address set, then generate one.
if hwAddr == "" {
var seedNodeID int64
if n.checkClusterWideMACSafe(n.config) != nil {
// If not safe to use a cluster wide MAC or in in fan mode, then use cluster node's ID to
// generate a stable per-node & network derived random MAC.
seedNodeID = n.state.Cluster.GetNodeID()
} else {
// If safe to use a cluster wide MAC, then use a static cluster node of 0 to generate a
// stable per-network derived random MAC.
seedNodeID = 0
}
// Load server certificate. This is needs to be the same certificate for all nodes in a cluster.
cert, err := util.LoadCert(n.state.OS.VarDir)
if err != nil {
return err
}
// Generate the random seed, this uses the server certificate fingerprint (to ensure that multiple
// standalone nodes with the same network ID connected to the same external network don't generate
// the same MAC for their networks). It relies on the certificate being the same for all nodes in a
// cluster to allow the same MAC to be generated on each bridge interface in the network when
// seedNodeID is 0 (when safe to do so).
seed := fmt.Sprintf("%s.%d.%d", cert.Fingerprint(), seedNodeID, n.ID())
r, err := util.GetStableRandomGenerator(seed)
if err != nil {
return errors.Wrapf(err, "Failed generating stable random bridge MAC")
}
hwAddr = randomHwaddr(r)
n.logger.Debug("Stable MAC generated", log.Ctx{"seed": seed, "hwAddr": hwAddr})
}
// Set the MAC address on the bridge interface if specified.
if hwAddr != "" {
err = bridgeLink.SetAddress(hwAddr)
if err != nil {
return err
}
}
// Enable VLAN filtering for Linux bridges.
if n.config["bridge.driver"] != "openvswitch" {
err = BridgeVLANFilterSetStatus(n.name, "1")
if err != nil {
n.logger.Warn(fmt.Sprintf("%v", err))
}
// Set the default PVID for new ports to 1.
err = BridgeVLANSetDefaultPVID(n.name, "1")
if err != nil {
n.logger.Warn(fmt.Sprintf("%v", err))
}
}
// Bring it up.
err = bridgeLink.SetUp()
if err != nil {
return err
}
// Add any listed existing external interface.
if n.config["bridge.external_interfaces"] != "" {
for _, entry := range strings.Split(n.config["bridge.external_interfaces"], ",") {
entry = strings.TrimSpace(entry)
iface, err := net.InterfaceByName(entry)
if err != nil {
n.logger.Warn("Skipping attaching missing external interface", log.Ctx{"interface": entry})
continue
}
unused := true
addrs, err := iface.Addrs()
if err == nil {
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if ip != nil && err == nil && ip.IsGlobalUnicast() {
unused = false
break
}
}
}
if !unused {
return fmt.Errorf("Only unconfigured network interfaces can be bridged")
}
err = AttachInterface(n.name, entry)
if err != nil {
return err
}
}
}
// Remove any existing firewall rules.
fwClearIPVersions := []uint{}
if usesIPv4Firewall(n.config) || usesIPv4Firewall(oldConfig) {
fwClearIPVersions = append(fwClearIPVersions, 4)
}
if usesIPv6Firewall(n.config) || usesIPv6Firewall(oldConfig) {
fwClearIPVersions = append(fwClearIPVersions, 6)
}
if len(fwClearIPVersions) > 0 {
n.logger.Debug("Clearing firewall")
err = n.state.Firewall.NetworkClear(n.name, false, fwClearIPVersions)
if err != nil {
return errors.Wrapf(err, "Failed clearing firewall")
}
}
// Initialise a new firewall option set.
fwOpts := firewallDrivers.Opts{}
if n.hasIPv4Firewall() {
fwOpts.FeaturesV4 = &firewallDrivers.FeatureOpts{}
}
if n.hasIPv6Firewall() {
fwOpts.FeaturesV6 = &firewallDrivers.FeatureOpts{}
}
if n.config["security.acls"] != "" {
fwOpts.ACL = true
}
// Snapshot container specific IPv4 routes (added with boot proto) before removing IPv4 addresses.
// This is because the kernel removes any static routes on an interface when all addresses removed.
ctRoutes, err := n.bootRoutesV4()
if err != nil {
return err
}
// Flush all IPv4 addresses and routes.
addr := &ip.Addr{
DevName: n.name,
Scope: "global",
Family: ip.FamilyV4,
}
err = addr.Flush()
if err != nil {
return err
}
r := &ip.Route{
DevName: n.name,
Proto: "static",
Family: ip.FamilyV4,
}
err = r.Flush()
if err != nil {
return err
}
// Configure IPv4 firewall (includes fan).
if n.config["bridge.mode"] == "fan" || !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) {
if n.hasDHCPv4() && n.hasIPv4Firewall() {
fwOpts.FeaturesV4.ICMPDHCPDNSAccess = true
}
// Allow forwarding.
if n.config["bridge.mode"] == "fan" || n.config["ipv4.routing"] == "" || shared.IsTrue(n.config["ipv4.routing"]) {
err = util.SysctlSet("net/ipv4/ip_forward", "1")
if err != nil {
return err
}
if n.hasIPv4Firewall() {
fwOpts.FeaturesV4.ForwardingAllow = true
}
}
}
// Start building process using subprocess package.
command := "dnsmasq"
dnsmasqCmd := []string{"--keep-in-foreground", "--strict-order", "--bind-interfaces",
"--except-interface=lo",
"--pid-file=", // Disable attempt at writing a PID file.
"--no-ping", // --no-ping is very important to prevent delays to lease file updates.
fmt.Sprintf("--interface=%s", n.name)}
dnsmasqVersion, err := dnsmasq.GetVersion()
if err != nil {
return err
}
// --dhcp-rapid-commit option is only supported on >2.79.
minVer, _ := version.NewDottedVersion("2.79")
if dnsmasqVersion.Compare(minVer) > 0 {
dnsmasqCmd = append(dnsmasqCmd, "--dhcp-rapid-commit")
}
if !daemon.Debug {
// --quiet options are only supported on >2.67.
minVer, _ := version.NewDottedVersion("2.67")
if err == nil && dnsmasqVersion.Compare(minVer) > 0 {
dnsmasqCmd = append(dnsmasqCmd, []string{"--quiet-dhcp", "--quiet-dhcp6", "--quiet-ra"}...)
}
}
// Configure IPv4.
if !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) {
// Parse the subnet.
ipAddress, subnet, err := net.ParseCIDR(n.config["ipv4.address"])
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv4.address")
}
// Update the dnsmasq config.
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--listen-address=%s", ipAddress.String()))
if n.DHCPv4Subnet() != nil {
if !shared.StringInSlice("--dhcp-no-override", dnsmasqCmd) {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-no-override", "--dhcp-authoritative", fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")), fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts"))}...)
}
if n.config["ipv4.dhcp.gateway"] != "" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=3,%s", n.config["ipv4.dhcp.gateway"]))
}
if mtu != "1500" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=26,%s", mtu))
}
dnsSearch := n.config["dns.search"]
if dnsSearch != "" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=119,%s", strings.Trim(dnsSearch, " ")))
}
expiry := "1h"
if n.config["ipv4.dhcp.expiry"] != "" {
expiry = n.config["ipv4.dhcp.expiry"]
}
if n.config["ipv4.dhcp.ranges"] != "" {
for _, dhcpRange := range strings.Split(n.config["ipv4.dhcp.ranges"], ",") {
dhcpRange = strings.TrimSpace(dhcpRange)
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s", strings.Replace(dhcpRange, "-", ",", -1), expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s,%s", dhcpalloc.GetIP(subnet, 2).String(), dhcpalloc.GetIP(subnet, -2).String(), expiry)}...)
}
}
// Add the address.
addr := &ip.Addr{
DevName: n.name,
Address: n.config["ipv4.address"],
Family: ip.FamilyV4,
}
err = addr.Add()
if err != nil {
return err
}
// Configure NAT.
if shared.IsTrue(n.config["ipv4.nat"]) {
//If a SNAT source address is specified, use that, otherwise default to MASQUERADE mode.
var srcIP net.IP
if n.config["ipv4.nat.address"] != "" {
srcIP = net.ParseIP(n.config["ipv4.nat.address"])
}
fwOpts.SNATV4 = &firewallDrivers.SNATOpts{
SNATAddress: srcIP,
Subnet: subnet,
}
if n.config["ipv4.nat.order"] == "after" {
fwOpts.SNATV4.Append = true
}
}
// Add additional routes.
if n.config["ipv4.routes"] != "" {
for _, route := range strings.Split(n.config["ipv4.routes"], ",") {
route = strings.TrimSpace(route)
r := &ip.Route{
DevName: n.name,
Route: route,
Proto: "static",
Family: ip.FamilyV4,
}
err = r.Add()
if err != nil {
return err
}
}
}
// Restore container specific IPv4 routes to interface.
n.applyBootRoutesV4(ctRoutes)
}
// Snapshot container specific IPv6 routes (added with boot proto) before removing IPv6 addresses.
// This is because the kernel removes any static routes on an interface when all addresses removed.
ctRoutes, err = n.bootRoutesV6()
if err != nil {
return err
}
// Flush all IPv6 addresses and routes.
addr = &ip.Addr{
DevName: n.name,
Scope: "global",
Family: ip.FamilyV6,
}
err = addr.Flush()
if err != nil {
return err
}
r = &ip.Route{
DevName: n.name,
Proto: "static",
Family: ip.FamilyV6,
}
err = r.Flush()
if err != nil {
return err
}
// Configure IPv6.
if !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
// Enable IPv6 for the subnet.
err := util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/disable_ipv6", n.name), "0")
if err != nil {
return err
}
// Parse the subnet.
ipAddress, subnet, err := net.ParseCIDR(n.config["ipv6.address"])
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv6.address")
}
subnetSize, _ := subnet.Mask.Size()
if subnetSize > 64 {
n.logger.Warn("IPv6 networks with a prefix larger than 64 aren't properly supported by dnsmasq")
err = n.state.Cluster.UpsertWarningLocalNode(n.project, dbCluster.TypeNetwork, int(n.id), db.WarningLargerIPv6PrefixThanSupported, "")
if err != nil {
n.logger.Warn("Failed to create warning", log.Ctx{"err": err})
}
} else {
err = warnings.ResolveWarningsByLocalNodeAndProjectAndTypeAndEntity(n.state.Cluster, n.project, db.WarningLargerIPv6PrefixThanSupported, dbCluster.TypeNetwork, int(n.id))
if err != nil {
n.logger.Warn("Failed to resolve warning", log.Ctx{"err": err})
}
}
// Update the dnsmasq config.
dnsmasqCmd = append(dnsmasqCmd, []string{fmt.Sprintf("--listen-address=%s", ipAddress.String()), "--enable-ra"}...)
if n.DHCPv6Subnet() != nil {
if n.hasIPv6Firewall() {
fwOpts.FeaturesV6.ICMPDHCPDNSAccess = true
}
// Build DHCP configuration.
if !shared.StringInSlice("--dhcp-no-override", dnsmasqCmd) {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-no-override", "--dhcp-authoritative", fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")), fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts"))}...)
}
expiry := "1h"
if n.config["ipv6.dhcp.expiry"] != "" {
expiry = n.config["ipv6.dhcp.expiry"]
}
if shared.IsTrue(n.config["ipv6.dhcp.stateful"]) {
if n.config["ipv6.dhcp.ranges"] != "" {
for _, dhcpRange := range strings.Split(n.config["ipv6.dhcp.ranges"], ",") {
dhcpRange = strings.TrimSpace(dhcpRange)
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%d,%s", strings.Replace(dhcpRange, "-", ",", -1), subnetSize, expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s,%d,%s", dhcpalloc.GetIP(subnet, 2), dhcpalloc.GetIP(subnet, -1), subnetSize, expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("::,constructor:%s,ra-stateless,ra-names", n.name)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("::,constructor:%s,ra-only", n.name)}...)
}
// Allow forwarding.
if n.config["ipv6.routing"] == "" || shared.IsTrue(n.config["ipv6.routing"]) {
// Get a list of proc entries.
entries, err := ioutil.ReadDir("/proc/sys/net/ipv6/conf/")
if err != nil {
return err
}
// First set accept_ra to 2 for everything.
for _, entry := range entries {
content, err := ioutil.ReadFile(fmt.Sprintf("/proc/sys/net/ipv6/conf/%s/accept_ra", entry.Name()))
if err == nil && string(content) != "1\n" {
continue
}
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/accept_ra", entry.Name()), "2")
if err != nil && !os.IsNotExist(err) {
return err
}
}
// Then set forwarding for all of them.
for _, entry := range entries {
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/forwarding", entry.Name()), "1")
if err != nil && !os.IsNotExist(err) {
return err
}
}
if n.hasIPv6Firewall() {
fwOpts.FeaturesV6.ForwardingAllow = true
}
}
// Add the address.
addr := &ip.Addr{
DevName: n.name,
Address: n.config["ipv6.address"],
Family: ip.FamilyV6,
}
err = addr.Add()
if err != nil {
return err
}
// Configure NAT.
if shared.IsTrue(n.config["ipv6.nat"]) {
//If a SNAT source address is specified, use that, otherwise default to MASQUERADE mode.
var srcIP net.IP
if n.config["ipv6.nat.address"] != "" {
srcIP = net.ParseIP(n.config["ipv6.nat.address"])
}
fwOpts.SNATV6 = &firewallDrivers.SNATOpts{
SNATAddress: srcIP,
Subnet: subnet,
}
if n.config["ipv6.nat.order"] == "after" {
fwOpts.SNATV6.Append = true
}
}
// Add additional routes.
if n.config["ipv6.routes"] != "" {
for _, route := range strings.Split(n.config["ipv6.routes"], ",") {
route = strings.TrimSpace(route)
r := &ip.Route{
DevName: n.name,
Route: route,
Proto: "static",
Family: ip.FamilyV6,
}
err = r.Add()
if err != nil {
return err
}
}
}
// Restore container specific IPv6 routes to interface.
n.applyBootRoutesV6(ctRoutes)
}
// Configure the fan.
dnsClustered := false
dnsClusteredAddress := ""
var overlaySubnet *net.IPNet
if n.config["bridge.mode"] == "fan" {
tunName := fmt.Sprintf("%s-fan", n.name)
// Parse the underlay.
underlay := n.config["fan.underlay_subnet"]
_, underlaySubnet, err := net.ParseCIDR(underlay)
if err != nil {
return errors.Wrapf(err, "Failed parsing fan.underlay_subnet")
}
// Parse the overlay.
overlay := n.config["fan.overlay_subnet"]
if overlay == "" {
overlay = "240.0.0.0/8"
}
_, overlaySubnet, err = net.ParseCIDR(overlay)
if err != nil {
return errors.Wrapf(err, "Failed parsing fan.overlay_subnet")
}
// Get the address.
fanAddress, devName, devAddr, err := n.fanAddress(underlaySubnet, overlaySubnet)
if err != nil {
return err
}
addr := strings.Split(fanAddress, "/")
if n.config["fan.type"] == "ipip" {
fanAddress = fmt.Sprintf("%s/24", addr[0])
}
// Update the MTU based on overlay device (if available).
fanMtuInt, err := GetDevMTU(devName)
if err == nil {
// Apply overhead.
if n.config["fan.type"] == "ipip" {
fanMtuInt = fanMtuInt - 20
} else {
fanMtuInt = fanMtuInt - 50
}
// Apply changes.
fanMtu := fmt.Sprintf("%d", fanMtuInt)
if fanMtu != mtu {
mtu = fanMtu
if n.config["bridge.driver"] != "openvswitch" {
mtuLink := &ip.Link{Name: fmt.Sprintf("%s-mtu", n.name)}
err = mtuLink.SetMTU(mtu)
if err != nil {
return err
}
}
err = bridgeLink.SetMTU(mtu)
if err != nil {
return err
}
}
}
// Parse the host subnet.
_, hostSubnet, err := net.ParseCIDR(fmt.Sprintf("%s/24", addr[0]))
if err != nil {
return errors.Wrapf(err, "Failed parsing fan address")
}
// Add the address.
ipAddr := &ip.Addr{
DevName: n.name,
Address: fanAddress,
Family: ip.FamilyV4,
}
err = ipAddr.Add()
if err != nil {
return err
}
// Update the dnsmasq config.
expiry := "1h"
if n.config["ipv4.dhcp.expiry"] != "" {
expiry = n.config["ipv4.dhcp.expiry"]
}
dnsmasqCmd = append(dnsmasqCmd, []string{
fmt.Sprintf("--listen-address=%s", addr[0]),
"--dhcp-no-override", "--dhcp-authoritative",
fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")),
fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts")),
"--dhcp-range", fmt.Sprintf("%s,%s,%s", dhcpalloc.GetIP(hostSubnet, 2).String(), dhcpalloc.GetIP(hostSubnet, -2).String(), expiry)}...)
// Setup the tunnel.
if n.config["fan.type"] == "ipip" {
r := &ip.Route{
DevName: "tunl0",
Family: ip.FamilyV4,
}
err = r.Flush()
if err != nil {
return err
}
tunLink := &ip.Link{Name: "tunl0"}
err = tunLink.SetUp()
if err != nil {
return err
}
// Fails if the map is already set.
tunLink.Change("ipip", fmt.Sprintf("%s:%s", overlay, underlay))
r = &ip.Route{
DevName: "tunl0",
Route: overlay,
Src: addr[0],
Proto: "static",
}
err = r.Add()
if err != nil {
return err
}
} else {
vxlanID := fmt.Sprintf("%d", binary.BigEndian.Uint32(overlaySubnet.IP.To4())>>8)
vxlan := &ip.Vxlan{
Link: ip.Link{Name: tunName},
VxlanID: vxlanID,
DevName: devName,
DstPort: "0",
Local: devAddr,
FanMap: fmt.Sprintf("%s:%s", overlay, underlay),
}
err = vxlan.Add()
if err != nil {
return err
}
err = AttachInterface(n.name, tunName)
if err != nil {
return err
}
err = vxlan.SetMTU(mtu)
if err != nil {
return err
}
err = vxlan.SetUp()
if err != nil {
return err
}
err = bridgeLink.SetUp()
if err != nil {
return err
}
}
// Configure NAT.
if shared.IsTrue(n.config["ipv4.nat"]) {
fwOpts.SNATV4 = &firewallDrivers.SNATOpts{
SNATAddress: nil, // Use MASQUERADE mode.
Subnet: overlaySubnet,
}
if n.config["ipv4.nat.order"] == "after" {
fwOpts.SNATV4.Append = true
}
}
// Setup clustered DNS.
clusterAddress, err := node.ClusterAddress(n.state.Node)
if err != nil {
return err
}
// If clusterAddress is non-empty, this indicates the intention for this node to be
// part of a cluster and so we should ensure that dnsmasq and forkdns are started
// in cluster mode. Note: During LXD initialisation the cluster may not actually be
// setup yet, but we want the DNS processes to be ready for when it is.
if clusterAddress != "" {
dnsClustered = true
}
dnsClusteredAddress = strings.Split(fanAddress, "/")[0]
}
// Configure tunnels.
for _, tunnel := range tunnels {
getConfig := func(key string) string {
return n.config[fmt.Sprintf("tunnel.%s.%s", tunnel, key)]
}
tunProtocol := getConfig("protocol")
tunLocal := getConfig("local")
tunRemote := getConfig("remote")
tunName := fmt.Sprintf("%s-%s", n.name, tunnel)
// Configure the tunnel.
if tunProtocol == "gre" {
// Skip partial configs.
if tunProtocol == "" || tunLocal == "" || tunRemote == "" {
continue
}
gretap := &ip.Gretap{
Link: ip.Link{Name: tunName},
Local: tunLocal,
Remote: tunRemote,
}
err := gretap.Add()
if err != nil {
return err
}
} else if tunProtocol == "vxlan" {
tunGroup := getConfig("group")
tunInterface := getConfig("interface")
// Skip partial configs.
if tunProtocol == "" {
continue
}
vxlan := &ip.Vxlan{
Link: ip.Link{Name: tunName},
}
if tunLocal != "" && tunRemote != "" {
vxlan.Local = tunLocal
vxlan.Remote = tunRemote
} else {
if tunGroup == "" {
tunGroup = "239.0.0.1"
}
devName := tunInterface
if devName == "" {
_, devName, err = DefaultGatewaySubnetV4()
if err != nil {
return err
}
}
vxlan.Group = tunGroup
vxlan.DevName = devName
}
tunPort := getConfig("port")
if tunPort == "" {
tunPort = "0"
}
vxlan.DstPort = tunPort
tunID := getConfig("id")
if tunID == "" {
tunID = "1"
}
vxlan.VxlanID = tunID
tunTTL := getConfig("ttl")
if tunTTL == "" {
tunTTL = "1"
}
vxlan.TTL = tunTTL
err := vxlan.Add()
if err != nil {
return err
}
}
// Bridge it and bring up.
err = AttachInterface(n.name, tunName)
if err != nil {
return err
}
tunLink := &ip.Link{Name: tunName}
err = tunLink.SetMTU(mtu)
if err != nil {
return err
}
// Bring up tunnel interface.
err = tunLink.SetUp()
if err != nil {
return err
}
// Bring up network interface.
err = bridgeLink.SetUp()
if err != nil {
return err
}
}
// Generate and load apparmor profiles.
err = apparmor.NetworkLoad(n.state, n)
if err != nil {
return err
}
// Kill any existing dnsmasq and forkdns daemon for this network.
err = dnsmasq.Kill(n.name, false)
if err != nil {
return err
}
err = n.killForkDNS()
if err != nil {
return err
}
// Configure dnsmasq.
if n.config["bridge.mode"] == "fan" || !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) || !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
// Setup the dnsmasq domain.
dnsDomain := n.config["dns.domain"]
if dnsDomain == "" {
dnsDomain = "lxd"
}
if n.config["dns.mode"] != "none" {
dnsmasqCmd = append(dnsmasqCmd, "-s", dnsDomain)
dnsmasqCmd = append(dnsmasqCmd, "--interface-name", fmt.Sprintf("_gateway.%s,%s", dnsDomain, n.name))
if dnsClustered {
dnsmasqCmd = append(dnsmasqCmd, "-S", fmt.Sprintf("/%s/%s#1053", dnsDomain, dnsClusteredAddress))
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--rev-server=%s,%s#1053", overlaySubnet, dnsClusteredAddress))
} else {
dnsmasqCmd = append(dnsmasqCmd, "-S", fmt.Sprintf("/%s/", dnsDomain))
}
}
// Create a config file to contain additional config (and to prevent dnsmasq from reading /etc/dnsmasq.conf)
err = ioutil.WriteFile(shared.VarPath("networks", n.name, "dnsmasq.raw"), []byte(fmt.Sprintf("%s\n", n.config["raw.dnsmasq"])), 0644)
if err != nil {
return err
}
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--conf-file=%s", shared.VarPath("networks", n.name, "dnsmasq.raw")))
// Attempt to drop privileges.
if n.state.OS.UnprivUser != "" {
dnsmasqCmd = append(dnsmasqCmd, []string{"-u", n.state.OS.UnprivUser}...)
}
if n.state.OS.UnprivGroup != "" {
dnsmasqCmd = append(dnsmasqCmd, []string{"-g", n.state.OS.UnprivGroup}...)
}
// Create DHCP hosts directory.
if !shared.PathExists(shared.VarPath("networks", n.name, "dnsmasq.hosts")) {
err = os.MkdirAll(shared.VarPath("networks", n.name, "dnsmasq.hosts"), 0755)
if err != nil {
return err
}
}
// Check for dnsmasq.
_, err := exec.LookPath("dnsmasq")
if err != nil {
return fmt.Errorf("dnsmasq is required for LXD managed bridges")
}
// Update the static leases.
err = UpdateDNSMasqStatic(n.state, n.name)
if err != nil {
return err
}
// Create subprocess object dnsmasq.
dnsmasqLogPath := shared.LogPath(fmt.Sprintf("dnsmasq.%s.log", n.name))
p, err := subprocess.NewProcess(command, dnsmasqCmd, "", dnsmasqLogPath)
if err != nil {
return fmt.Errorf("Failed to create subprocess: %s", err)
}
// Apply AppArmor confinement.
if n.config["raw.dnsmasq"] == "" {
p.SetApparmor(apparmor.DnsmasqProfileName(n))
err = warnings.ResolveWarningsByLocalNodeAndProjectAndTypeAndEntity(n.state.Cluster, n.project, db.WarningAppArmorDisabledDueToRawDnsmasq, dbCluster.TypeNetwork, int(n.id))
if err != nil {
n.logger.Warn("Failed to resolve warning", log.Ctx{"err": err})
}
} else {
n.logger.Warn("Skipping AppArmor for dnsmasq due to raw.dnsmasq being set", log.Ctx{"name": n.name})
err = n.state.Cluster.UpsertWarningLocalNode(n.project, dbCluster.TypeNetwork, int(n.id), db.WarningAppArmorDisabledDueToRawDnsmasq, "")
if err != nil {
n.logger.Warn("Failed to create warning", log.Ctx{"err": err})
}
}
// Start dnsmasq.
err = p.Start()
if err != nil {
return fmt.Errorf("Failed to run: %s %s: %v", command, strings.Join(dnsmasqCmd, " "), err)
}
// Check dnsmasq started OK.
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Millisecond*time.Duration(500)))
_, err = p.Wait(ctx)
if errors.Cause(err) != context.DeadlineExceeded {
stderr, _ := ioutil.ReadFile(dnsmasqLogPath)
// Just log an error if dnsmasq has exited, and still proceed with normal setup so we
// don't leave the firewall in an inconsistent state.
n.logger.Error("The dnsmasq process exited prematurely", log.Ctx{"err": err, "stderr": strings.TrimSpace(string(stderr))})
}
cancel()
err = p.Save(shared.VarPath("networks", n.name, "dnsmasq.pid"))
if err != nil {
// Kill Process if started, but could not save the file.
err2 := p.Stop()
if err != nil {
return fmt.Errorf("Could not kill subprocess while handling saving error: %s: %s", err, err2)
}
return fmt.Errorf("Failed to save subprocess details: %s", err)
}
// Spawn DNS forwarder if needed (backgrounded to avoid deadlocks during cluster boot).
if dnsClustered {
// Create forkdns servers directory.
if !shared.PathExists(shared.VarPath("networks", n.name, ForkdnsServersListPath)) {
err = os.MkdirAll(shared.VarPath("networks", n.name, ForkdnsServersListPath), 0755)
if err != nil {
return err
}
}
// Create forkdns servers.conf file if doesn't exist.
f, err := os.OpenFile(shared.VarPath("networks", n.name, ForkdnsServersListPath+"/"+ForkdnsServersListFile), os.O_RDONLY|os.O_CREATE, 0666)
if err != nil {
return err
}
f.Close()
err = n.spawnForkDNS(dnsClusteredAddress)
if err != nil {
return err
}
}
} else {
// Clean up old dnsmasq config if exists and we are not starting dnsmasq.
leasesPath := shared.VarPath("networks", n.name, "dnsmasq.leases")
if shared.PathExists(leasesPath) {
err := os.Remove(leasesPath)
if err != nil {
return errors.Wrapf(err, "Failed to remove old dnsmasq leases file %q", leasesPath)
}
}
// And same for our PID file.
pidPath := shared.VarPath("networks", n.name, "dnsmasq.pid")
if shared.PathExists(pidPath) {
err := os.Remove(pidPath)
if err != nil {
return errors.Wrapf(err, "Failed to remove old dnsmasq pid file %q", pidPath)
}
}
}
// Setup firewall.
n.logger.Debug("Setting up firewall")
err = n.state.Firewall.NetworkSetup(n.name, fwOpts)
if err != nil {
return errors.Wrapf(err, "Failed to setup firewall")
}
if fwOpts.ACL {
aclNet := acl.NetworkACLUsage{
Name: n.Name(),
Type: n.Type(),
ID: n.ID(),
Config: n.Config(),
}
n.logger.Debug("Applying up firewall ACLs")
err = acl.FirewallApplyACLRules(n.state, n.logger, n.Project(), aclNet)
if err != nil {
return err
}
}
// Setup network address forwards.
err = n.forwardSetupFirewall()
if err != nil {
return err
}
// Setup BGP.
err = n.bgpSetup(oldConfig)
if err != nil {
return err
}
revert.Success()
return nil
}
// Stop stops the network.
func (n *bridge) Stop() error {
n.logger.Debug("Stop")
if !n.isRunning() {
return nil
}
// Clear BGP.
err := n.bgpClear(n.config)
if err != nil {
return err
}
// Destroy the bridge interface
if n.config["bridge.driver"] == "openvswitch" {
ovs := openvswitch.NewOVS()
err := ovs.BridgeDelete(n.name)
if err != nil {
return err
}
} else {
bridgeLink := &ip.Link{Name: n.name}
err := bridgeLink.Delete()
if err != nil {
return err
}
}
// Fully clear firewall setup.
fwClearIPVersions := []uint{}
if usesIPv4Firewall(n.config) {
fwClearIPVersions = append(fwClearIPVersions, 4)
}
if usesIPv6Firewall(n.config) {
fwClearIPVersions = append(fwClearIPVersions, 6)
}
if len(fwClearIPVersions) > 0 {
n.logger.Debug("Deleting firewall")
err := n.state.Firewall.NetworkClear(n.name, true, fwClearIPVersions)
if err != nil {
return errors.Wrapf(err, "Failed deleting firewall")
}
}
// Kill any existing dnsmasq and forkdns daemon for this network
err = dnsmasq.Kill(n.name, false)
if err != nil {
return err
}
err = n.killForkDNS()
if err != nil {
return err
}
// Get a list of interfaces
ifaces, err := net.Interfaces()
if err != nil {
return err
}
// Cleanup any existing tunnel device
for _, iface := range ifaces {
if strings.HasPrefix(iface.Name, fmt.Sprintf("%s-", n.name)) {
tunLink := &ip.Link{Name: iface.Name}
err = tunLink.Delete()
if err != nil {
return err
}
}
}
// Unload apparmor profiles.
err = apparmor.NetworkUnload(n.state, n)
if err != nil {
return err
}
return nil
}
// Update updates the network. Accepts notification boolean indicating if this update request is coming from a
// cluster notification, in which case do not update the database, just apply local changes needed.
func (n *bridge) Update(newNetwork api.NetworkPut, targetNode string, clientType request.ClientType) error {
n.logger.Debug("Update", log.Ctx{"clientType": clientType, "newNetwork": newNetwork})
err := n.populateAutoConfig(newNetwork.Config)
if err != nil {
return errors.Wrapf(err, "Failed generating auto config")
}
dbUpdateNeeeded, changedKeys, oldNetwork, err := n.common.configChanged(newNetwork)
if err != nil {
return err
}
if !dbUpdateNeeeded {
return nil // Nothing changed.
}
// If the network as a whole has not had any previous creation attempts, or the node itself is still
// pending, then don't apply the new settings to the node, just to the database record (ready for the
// actual global create request to be initiated).
if n.Status() == api.NetworkStatusPending || n.LocalStatus() == api.NetworkStatusPending {
return n.common.update(newNetwork, targetNode, clientType)
}
revert := revert.New()
defer revert.Fail()
// Perform any pre-update cleanup needed if local node network was already created.
if len(changedKeys) > 0 {
// Define a function which reverts everything.
revert.Add(func() {
// Reset changes to all nodes and database.
n.common.update(oldNetwork, targetNode, clientType)
// Reset any change that was made to local bridge.
n.setup(newNetwork.Config)
})
// Bring the bridge down entirely if the driver has changed.
if shared.StringInSlice("bridge.driver", changedKeys) && n.isRunning() {
err = n.Stop()
if err != nil {
return err
}
}
// Detach any external interfaces should no longer be attached.
if shared.StringInSlice("bridge.external_interfaces", changedKeys) && n.isRunning() {
devices := []string{}
for _, dev := range strings.Split(newNetwork.Config["bridge.external_interfaces"], ",") {
dev = strings.TrimSpace(dev)
devices = append(devices, dev)
}
for _, dev := range strings.Split(oldNetwork.Config["bridge.external_interfaces"], ",") {
dev = strings.TrimSpace(dev)
if dev == "" {
continue
}
if !shared.StringInSlice(dev, devices) && InterfaceExists(dev) {
err = DetachInterface(n.name, dev)
if err != nil {
return err
}
}
}
}
}
// Apply changes to all nodes and database.
err = n.common.update(newNetwork, targetNode, clientType)
if err != nil {
return err
}
// Restart the network if needed.
if len(changedKeys) > 0 {
err = n.setup(oldNetwork.Config)
if err != nil {
return err
}
}
revert.Success()
return nil
}
func (n *bridge) spawnForkDNS(listenAddress string) error {
// Setup the dnsmasq domain
dnsDomain := n.config["dns.domain"]
if dnsDomain == "" {
dnsDomain = "lxd"
}
// Spawn the daemon using subprocess
command := n.state.OS.ExecPath
forkdnsargs := []string{"forkdns",
fmt.Sprintf("%s:1053", listenAddress),
dnsDomain,
n.name}
logPath := shared.LogPath(fmt.Sprintf("forkdns.%s.log", n.name))
p, err := subprocess.NewProcess(command, forkdnsargs, logPath, logPath)
if err != nil {
return fmt.Errorf("Failed to create subprocess: %s", err)
}
// Drop privileges.
p.SetCreds(n.state.OS.UnprivUID, n.state.OS.UnprivGID)
// Apply AppArmor profile.
p.SetApparmor(apparmor.ForkdnsProfileName(n))
err = p.Start()
if err != nil {
return fmt.Errorf("Failed to run: %s %s: %v", command, strings.Join(forkdnsargs, " "), err)
}
err = p.Save(shared.VarPath("networks", n.name, "forkdns.pid"))
if err != nil {
// Kill Process if started, but could not save the file
err2 := p.Stop()
if err != nil {
return fmt.Errorf("Could not kill subprocess while handling saving error: %s: %s", err, err2)
}
return fmt.Errorf("Failed to save subprocess details: %s", err)
}
return nil
}
// HandleHeartbeat refreshes forkdns servers. Retrieves the IPv4 address of each cluster node (excluding ourselves)
// for this network. It then updates the forkdns server list file if there are changes.
func (n *bridge) HandleHeartbeat(heartbeatData *cluster.APIHeartbeat) error {
addresses := []string{}
localAddress, err := node.HTTPSAddress(n.state.Node)
if err != nil {
return err
}
n.logger.Info("Refreshing forkdns peers")
networkCert := n.state.Endpoints.NetworkCert()
for _, node := range heartbeatData.Members {
if node.Address == localAddress {
// No need to query ourselves.
continue
}
client, err := cluster.Connect(node.Address, networkCert, n.state.ServerCert(), nil, true)
if err != nil {
return err
}
state, err := client.GetNetworkState(n.name)
if err != nil {
return err
}
for _, addr := range state.Addresses {
// Only get IPv4 addresses of nodes on network.
if addr.Family != "inet" || addr.Scope != "global" {
continue
}
addresses = append(addresses, addr.Address)
break
}
}
// Compare current stored list to retrieved list and see if we need to update.
curList, err := ForkdnsServersList(n.name)
if err != nil {
// Only warn here, but continue on to regenerate the servers list from cluster info.
n.logger.Warn("Failed to load existing forkdns server list", log.Ctx{"err": err})
}
// If current list is same as cluster list, nothing to do.
if err == nil && reflect.DeepEqual(curList, addresses) {
return nil
}
err = n.updateForkdnsServersFile(addresses)
if err != nil {
return err
}
n.logger.Info("Updated forkdns server list", log.Ctx{"nodes": addresses})
return nil
}
func (n *bridge) getTunnels() []string {
tunnels := []string{}
for k := range n.config {
if !strings.HasPrefix(k, "tunnel.") {
continue
}
fields := strings.Split(k, ".")
if !shared.StringInSlice(fields[1], tunnels) {
tunnels = append(tunnels, fields[1])
}
}
return tunnels
}
// bootRoutesV4 returns a list of IPv4 boot routes on the network's device.
func (n *bridge) bootRoutesV4() ([]string, error) {
r := &ip.Route{
DevName: n.name,
Proto: "boot",
Family: ip.FamilyV4,
}
routes, err := r.Show()
if err != nil {
return nil, err
}
return routes, nil
}
// bootRoutesV6 returns a list of IPv6 boot routes on the network's device.
func (n *bridge) bootRoutesV6() ([]string, error) {
r := &ip.Route{
DevName: n.name,
Proto: "boot",
Family: ip.FamilyV6,
}
routes, err := r.Show()
if err != nil {
return nil, err
}
return routes, nil
}
// applyBootRoutesV4 applies a list of IPv4 boot routes to the network's device.
func (n *bridge) applyBootRoutesV4(routes []string) {
for _, route := range routes {
r := &ip.Route{
DevName: n.name,
Proto: "boot",
Family: ip.FamilyV4,
}
err := r.Replace(strings.Fields(route))
if err != nil {
// If it fails, then we can't stop as the route has already gone, so just log and continue.
n.logger.Error("Failed to restore route", log.Ctx{"err": err})
}
}
}
// applyBootRoutesV6 applies a list of IPv6 boot routes to the network's device.
func (n *bridge) applyBootRoutesV6(routes []string) {
for _, route := range routes {
r := &ip.Route{
DevName: n.name,
Proto: "boot",
Family: ip.FamilyV6,
}
err := r.Replace(strings.Fields(route))
if err != nil {
// If it fails, then we can't stop as the route has already gone, so just log and continue.
n.logger.Error("Failed to restore route", log.Ctx{"err": err})
}
}
}
func (n *bridge) fanAddress(underlay *net.IPNet, overlay *net.IPNet) (string, string, string, error) {
// Quick checks.
underlaySize, _ := underlay.Mask.Size()
if underlaySize != 16 && underlaySize != 24 {
return "", "", "", fmt.Errorf("Only /16 or /24 underlays are supported at this time")
}
overlaySize, _ := overlay.Mask.Size()
if overlaySize != 8 && overlaySize != 16 {
return "", "", "", fmt.Errorf("Only /8 or /16 overlays are supported at this time")
}
if overlaySize+(32-underlaySize)+8 > 32 {
return "", "", "", fmt.Errorf("Underlay or overlay networks too large to accommodate the FAN")
}
// Get the IP
ip, dev, err := n.addressForSubnet(underlay)
if err != nil {
return "", "", "", err
}
ipStr := ip.String()
// Force into IPv4 format
ipBytes := ip.To4()
if ipBytes == nil {
return "", "", "", fmt.Errorf("Invalid IPv4: %s", ip)
}
// Compute the IP
ipBytes[0] = overlay.IP[0]
if overlaySize == 16 {
ipBytes[1] = overlay.IP[1]
ipBytes[2] = ipBytes[3]
} else if underlaySize == 24 {
ipBytes[1] = ipBytes[3]
ipBytes[2] = 0
} else if underlaySize == 16 {
ipBytes[1] = ipBytes[2]
ipBytes[2] = ipBytes[3]
}
ipBytes[3] = 1
return fmt.Sprintf("%s/%d", ipBytes.String(), overlaySize), dev, ipStr, err
}
func (n *bridge) addressForSubnet(subnet *net.IPNet) (net.IP, string, error) {
ifaces, err := net.Interfaces()
if err != nil {
return net.IP{}, "", err
}
for _, iface := range ifaces {
// Skip addresses on lo interface in case VIPs are being used on that interface that are part of
// the underlay subnet as is unlikely to be the actual intended underlay subnet interface.
if iface.Name == "lo" {
continue
}
addrs, err := iface.Addrs()
if err != nil {
continue
}
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if err != nil {
continue
}
if subnet.Contains(ip) {
return ip, iface.Name, nil
}
}
}
return net.IP{}, "", fmt.Errorf("No address found in subnet")
}
func (n *bridge) killForkDNS() error {
// Check if we have a running forkdns at all
pidPath := shared.VarPath("networks", n.name, "forkdns.pid")
// If the pid file doesn't exist, there is no process to kill.
if !shared.PathExists(pidPath) {
return nil
}
p, err := subprocess.ImportProcess(pidPath)
if err != nil {
return fmt.Errorf("Could not read pid file: %s", err)
}
err = p.Stop()
if err != nil && err != subprocess.ErrNotRunning {
return fmt.Errorf("Unable to kill dnsmasq: %s", err)
}
return nil
}
// updateForkdnsServersFile takes a list of node addresses and writes them atomically to
// the forkdns.servers file ready for forkdns to notice and re-apply its config.
func (n *bridge) updateForkdnsServersFile(addresses []string) error {
// We don't want to race with ourselves here
forkdnsServersLock.Lock()
defer forkdnsServersLock.Unlock()
permName := shared.VarPath("networks", n.name, ForkdnsServersListPath+"/"+ForkdnsServersListFile)
tmpName := permName + ".tmp"
// Open tmp file and truncate
tmpFile, err := os.Create(tmpName)
if err != nil {
return err
}
defer tmpFile.Close()
for _, address := range addresses {
_, err := tmpFile.WriteString(address + "\n")
if err != nil {
return err
}
}
tmpFile.Close()
// Atomically rename finished file into permanent location so forkdns can pick it up.
err = os.Rename(tmpName, permName)
if err != nil {
return err
}
return nil
}
// hasIPv4Firewall indicates whether the network has IPv4 firewall enabled.
func (n *bridge) hasIPv4Firewall() bool {
// IPv4 firewall is only enabled if there is a bridge ipv4.address or fan mode, and ipv4.firewall enabled.
// When using fan bridge.mode, there can be an empty ipv4.address, so we assume it is active.
if (n.config["bridge.mode"] == "fan" || !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"})) && (n.config["ipv4.firewall"] == "" || shared.IsTrue(n.config["ipv4.firewall"])) {
return true
}
return false
}
// hasIPv6Firewall indicates whether the network has IPv6 firewall enabled.
func (n *bridge) hasIPv6Firewall() bool {
// IPv6 firewall is only enabled if there is a bridge ipv6.address and ipv6.firewall enabled.
if !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) && (n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"])) {
return true
}
return false
}
// hasDHCPv4 indicates whether the network has DHCPv4 enabled.
// An empty ipv4.dhcp setting indicates enabled by default.
func (n *bridge) hasDHCPv4() bool {
if n.config["ipv4.dhcp"] == "" || shared.IsTrue(n.config["ipv4.dhcp"]) {
return true
}
return false
}
// hasDHCPv6 indicates whether the network has DHCPv6 enabled.
// An empty ipv6.dhcp setting indicates enabled by default.
func (n *bridge) hasDHCPv6() bool {
if n.config["ipv6.dhcp"] == "" || shared.IsTrue(n.config["ipv6.dhcp"]) {
return true
}
return false
}
// DHCPv4Subnet returns the DHCPv4 subnet (if DHCP is enabled on network).
func (n *bridge) DHCPv4Subnet() *net.IPNet {
// DHCP is disabled on this network.
if !n.hasDHCPv4() {
return nil
}
// Fan mode. Extract DHCP subnet from fan bridge address. Only detectable once network has started.
// But if there is no address on the fan bridge then DHCP won't work anyway.
if n.config["bridge.mode"] == "fan" {
iface, err := net.InterfaceByName(n.name)
if err != nil {
return nil
}
addrs, err := iface.Addrs()
if err != nil {
return nil
}
for _, addr := range addrs {
ip, subnet, err := net.ParseCIDR(addr.String())
if err != nil {
continue
}
if ip != nil && err == nil && ip.To4() != nil && ip.IsGlobalUnicast() {
return subnet // Use first IPv4 unicast address on host for DHCP subnet.
}
}
return nil // No addresses found, means DHCP must be disabled.
}
// Non-fan mode. Return configured bridge subnet directly.
_, subnet, err := net.ParseCIDR(n.config["ipv4.address"])
if err != nil {
return nil
}
return subnet
}
// DHCPv6Subnet returns the DHCPv6 subnet (if DHCP or SLAAC is enabled on network).
func (n *bridge) DHCPv6Subnet() *net.IPNet {
// DHCP is disabled on this network.
if !n.hasDHCPv6() {
return nil
}
_, subnet, err := net.ParseCIDR(n.config["ipv6.address"])
if err != nil {
return nil
}
return subnet
}
// forwardConvertToFirewallForward converts forwards into format compatible with the firewall package.
func (n *bridge) forwardConvertToFirewallForwards(listenAddress net.IP, defaultTargetAddress net.IP, portMaps []*forwardPortMap) []firewallDrivers.AddressForward {
var vips []firewallDrivers.AddressForward
if defaultTargetAddress != nil {
vips = append(vips, firewallDrivers.AddressForward{
ListenAddress: listenAddress,
TargetAddress: defaultTargetAddress,
})
}
for _, portMap := range portMaps {
vips = append(vips, firewallDrivers.AddressForward{
ListenAddress: listenAddress,
Protocol: portMap.protocol,
TargetAddress: portMap.targetAddress,
ListenPorts: portMap.listenPorts,
TargetPorts: portMap.targetPorts,
})
}
return vips
}
// bridgeProjectNetworks takes a map of all networks in all projects and returns a filtered map of bridge networks.
func (n *bridge) bridgeProjectNetworks(projectNetworks map[string]map[int64]api.Network) map[string][]*api.Network {
bridgeProjectNetworks := make(map[string][]*api.Network)
for netProject, networks := range projectNetworks {
for _, ni := range networks {
network := ni // Local var creating pointer to rather than iterator.
// Skip non-bridge networks.
if network.Type != "bridge" {
continue
}
if bridgeProjectNetworks[netProject] == nil {
bridgeProjectNetworks[netProject] = []*api.Network{&network}
} else {
bridgeProjectNetworks[netProject] = append(bridgeProjectNetworks[netProject], &network)
}
}
}
return bridgeProjectNetworks
}
// bridgeNetworkExternalSubnets returns a list of external subnets used by bridge networks. Networks are considered
// to be using external subnets for their ipv4.address and/or ipv6.address if they have NAT disabled, and/or if
// they have external NAT addresses specified.
func (n *bridge) bridgeNetworkExternalSubnets(bridgeProjectNetworks map[string][]*api.Network) ([]externalSubnetUsage, error) {
externalSubnets := make([]externalSubnetUsage, 0)
for netProject, networks := range bridgeProjectNetworks {
for _, netInfo := range networks {
for _, keyPrefix := range []string{"ipv4", "ipv6"} {
// If NAT is disabled, then network subnet is an external subnet.
if !shared.IsTrue(netInfo.Config[fmt.Sprintf("%s.nat", keyPrefix)]) {
key := fmt.Sprintf("%s.address", keyPrefix)
_, ipNet, err := net.ParseCIDR(netInfo.Config[key])
if err != nil {
continue // Skip invalid/unspecified network addresses.
}
externalSubnets = append(externalSubnets, externalSubnetUsage{
subnet: *ipNet,
networkProject: netProject,
networkName: netInfo.Name,
})
}
// Find any external subnets used for network SNAT.
if netInfo.Config[fmt.Sprintf("%s.nat.address", keyPrefix)] != "" {
key := fmt.Sprintf("%s.nat.address", keyPrefix)
subnetSize := 128
if keyPrefix == "ipv4" {
subnetSize = 32
}
_, ipNet, err := net.ParseCIDR(fmt.Sprintf("%s/%d", netInfo.Config[key], subnetSize))
if err != nil {
return nil, errors.Wrapf(err, "Failed parsing %q of %q in project %q", key, netInfo.Name, netProject)
}
externalSubnets = append(externalSubnets, externalSubnetUsage{
subnet: *ipNet,
networkProject: netProject,
networkName: netInfo.Name,
networkSNAT: true,
})
}
// Find any routes being used by the network.
for _, cidr := range util.SplitNTrimSpace(netInfo.Config[fmt.Sprintf("%s.routes", keyPrefix)], ",", -1, true) {
_, ipNet, err := net.ParseCIDR(cidr)
if err != nil {
continue // Skip invalid/unspecified network addresses.
}
externalSubnets = append(externalSubnets, externalSubnetUsage{
subnet: *ipNet,
networkProject: netProject,
networkName: netInfo.Name,
})
}
}
}
}
return externalSubnets, nil
}
// bridgedNICExternalRoutes returns a list of external routes currently used by bridged NICs that are connected to
// networks specified.
func (n *bridge) bridgedNICExternalRoutes(bridgeProjectNetworks map[string][]*api.Network) ([]externalSubnetUsage, error) {
externalRoutes := make([]externalSubnetUsage, 0)
err := n.state.Cluster.InstanceList(nil, func(inst db.Instance, p db.Project, profiles []api.Profile) error {
// Get the instance's effective network project name.
instNetworkProject := project.NetworkProjectFromRecord(&p)
if instNetworkProject != project.Default {
return nil // Managed bridge networks can only exist in default project.
}
devices := db.ExpandInstanceDevices(deviceConfig.NewDevices(db.DevicesToAPI(inst.Devices)), profiles)
// Iterate through each of the instance's devices, looking for bridged NICs that are linked to
// networks specified.
for devName, devConfig := range devices {
if devConfig["type"] != "nic" {
continue
}
// Check whether the NIC device references one of the networks supplied.
if !NICUsesNetwork(devConfig, bridgeProjectNetworks[instNetworkProject]...) {
continue
}
// For bridged NICs that are connected to networks specified, check if they have any
// routes or external routes configured, and if so add them to the list to return.
for _, key := range []string{"ipv4.routes", "ipv6.routes", "ipv4.routes.external", "ipv6.routes.external"} {
for _, cidr := range util.SplitNTrimSpace(devConfig[key], ",", -1, true) {
_, ipNet, _ := net.ParseCIDR(cidr)
if ipNet == nil {
// Skip if NIC device doesn't have a valid route.
continue
}
externalRoutes = append(externalRoutes, externalSubnetUsage{
subnet: *ipNet,
networkProject: instNetworkProject,
networkName: devConfig["network"],
instanceProject: inst.Project,
instanceName: inst.Name,
instanceDevice: devName,
})
}
}
}
return nil
})
if err != nil {
return nil, err
}
return externalRoutes, nil
}
// getExternalSubnetInUse returns information about usage of external subnets by bridge networks (and NICs
// connected to them) on this member.
func (n *bridge) getExternalSubnetInUse() ([]externalSubnetUsage, error) {
var err error
var projectNetworks map[string]map[int64]api.Network
var projectNetworksForwardsOnUplink map[string]map[int64][]string
err = n.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
// Get all managed networks across all projects.
projectNetworks, err = tx.GetCreatedNetworks()
if err != nil {
return errors.Wrapf(err, "Failed to load all networks")
}
// Get all network forward listen addresses for forwards assigned to this specific cluster member.
projectNetworksForwardsOnUplink, err = tx.GetProjectNetworkForwardListenAddressesOnMember()
if err != nil {
return errors.Wrapf(err, "Failed loading network forward listen addresses")
}
return nil
})
if err != nil {
return nil, err
}
// Get managed bridge networks.
bridgeProjectNetworks := n.bridgeProjectNetworks(projectNetworks)
// Get external subnets used by other managed bridge networks.
bridgeNetworkExternalSubnets, err := n.bridgeNetworkExternalSubnets(bridgeProjectNetworks)
if err != nil {
return nil, err
}
// Get external routes configured on bridged NICs.
bridgedNICExternalRoutes, err := n.bridgedNICExternalRoutes(bridgeProjectNetworks)
if err != nil {
return nil, err
}
externalSubnets := make([]externalSubnetUsage, 0, len(bridgeNetworkExternalSubnets)+len(bridgedNICExternalRoutes))
externalSubnets = append(externalSubnets, bridgeNetworkExternalSubnets...)
externalSubnets = append(externalSubnets, bridgedNICExternalRoutes...)
// Add forward listen addresses to this list.
for projectName, networks := range projectNetworksForwardsOnUplink {
for networkID, listenAddresses := range networks {
for _, listenAddress := range listenAddresses {
// Convert listen address to subnet.
listenAddressNet, err := ParseIPToNet(listenAddress)
if err != nil {
return nil, fmt.Errorf("Invalid existing forward listen address %q", listenAddress)
}
// Create an externalSubnetUsage for the listen address by using the network ID
// of the listen address to retrieve the already loaded network name from the
// projectNetworks map.
externalSubnets = append(externalSubnets, externalSubnetUsage{
subnet: *listenAddressNet,
networkProject: projectName,
networkName: projectNetworks[projectName][networkID].Name,
})
}
}
}
return externalSubnets, nil
}
// ForwardCreate creates a network forward.
func (n *bridge) ForwardCreate(forward api.NetworkForwardsPost, clientType request.ClientType) error {
memberSpecific := true // bridge supports per-member forwards.
// Check if there is an existing forward using the same listen address.
_, _, err := n.state.Cluster.GetNetworkForward(n.ID(), memberSpecific, forward.ListenAddress)
if err == nil {
return api.StatusErrorf(http.StatusConflict, "A forward for that listen address already exists")
}
// Convert listen address to subnet so we can check its valid and can be used.
listenAddressNet, err := ParseIPToNet(forward.ListenAddress)
if err != nil {
return errors.Wrapf(err, "Failed parsing address forward listen address %q", forward.ListenAddress)
}
_, err = n.forwardValidate(listenAddressNet.IP, &forward.NetworkForwardPut)
if err != nil {
return err
}
externalSubnetsInUse, err := n.getExternalSubnetInUse()
if err != nil {
return err
}
// Check the listen address subnet doesn't fall within any existing network external subnets.
for _, externalSubnetUser := range externalSubnetsInUse {
// Skip our own network's SNAT address (as it can be used for NICs in the network).
if externalSubnetUser.networkSNAT && externalSubnetUser.networkProject == n.project && externalSubnetUser.networkName == n.name {
continue
}
// Skip our own network (but not NIC devices on our own network).
if externalSubnetUser.networkProject == n.project && externalSubnetUser.networkName == n.name && externalSubnetUser.instanceDevice == "" {
continue
}
if SubnetContains(&externalSubnetUser.subnet, listenAddressNet) || SubnetContains(listenAddressNet, &externalSubnetUser.subnet) {
// This error is purposefully vague so that it doesn't reveal any names of
// resources potentially outside of the network.
return fmt.Errorf("Forward listen address %q overlaps with another network or NIC", listenAddressNet.String())
}
}
revert := revert.New()
defer revert.Fail()
// Create forward DB record.
forwardID, err := n.state.Cluster.CreateNetworkForward(n.ID(), memberSpecific, &forward)
if err != nil {
return err
}
revert.Add(func() {
n.state.Cluster.DeleteNetworkForward(n.ID(), forwardID)
n.forwardSetupFirewall()
n.forwardBGPSetupPrefixes()
})
err = n.forwardSetupFirewall()
if err != nil {
return err
}
// Check if hairpin mode needs to be enabled on active NIC bridge ports.
if n.config["bridge.driver"] != "openvswitch" {
brNetfilterEnabled := false
for _, ipVersion := range []uint{4, 6} {
if BridgeNetfilterEnabled(ipVersion) == nil {
brNetfilterEnabled = true
break
}
}
// If br_netfilter is enabled and bridge has forwards, we enable hairpin mode on each NIC's bridge
// port in case any of the forwards target the NIC and the instance attempts to connect to the
// forward's listener. Without hairpin mode on the target of the forward will not be able to
// connect to the listener.
if brNetfilterEnabled {
listenAddresses, err := n.state.Cluster.GetNetworkForwardListenAddresses(n.ID(), true)
if err != nil {
return fmt.Errorf("Failed loading network forwards: %w", err)
}
// If we are the first forward on this bridge, enable hairpin mode on active NIC ports.
if len(listenAddresses) <= 1 {
var localNode string
err = n.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
localNode, err = tx.GetLocalNodeName()
if err != nil {
return errors.Wrapf(err, "Failed to get local member name")
}
return err
})
if err != nil {
return err
}
filter := db.InstanceFilter{
Node: &localNode,
}
err = n.state.Cluster.InstanceList(&filter, func(inst db.Instance, p db.Project, profiles []api.Profile) error {
// Get the instance's effective network project name.
instNetworkProject := project.NetworkProjectFromRecord(&p)
if instNetworkProject != project.Default {
return nil // Managed bridge networks can only exist in default project.
}
devices := db.ExpandInstanceDevices(deviceConfig.NewDevices(db.DevicesToAPI(inst.Devices)), profiles)
// Iterate through each of the instance's devices, looking for bridged NICs
// that are linked to this network.
for devName, devConfig := range devices {
if devConfig["type"] != "nic" {
continue
}
// Check whether the NIC device references our network..
if !NICUsesNetwork(devConfig, &api.Network{Name: n.Name()}) {
continue
}
hostName := inst.Config[fmt.Sprintf("volatile.%s.host_name", devName)]
if InterfaceExists(hostName) {
link := &ip.Link{Name: hostName}
err = link.BridgeLinkSetHairpin(true)
if err != nil {
return errors.Wrapf(err, "Error enabling hairpin mode on bridge port %q", link.Name)
}
n.logger.Debug("Enabled hairpin mode on NIC bridge port", log.Ctx{"inst": inst.Name, "project": inst.Project, "device": devName, "dev": link.Name})
}
}
return nil
})
if err != nil {
return err
}
}
}
}
// Refresh exported BGP prefixes on local member.
err = n.forwardBGPSetupPrefixes()
if err != nil {
return fmt.Errorf("Failed applying BGP prefixes for address forwards: %w", err)
}
revert.Success()
return nil
}
// ForwardUpdate updates a network forward.
func (n *bridge) ForwardUpdate(listenAddress string, req api.NetworkForwardPut, clientType request.ClientType) error {
memberSpecific := true // bridge supports per-member forwards.
curForwardID, curForward, err := n.state.Cluster.GetNetworkForward(n.ID(), memberSpecific, listenAddress)
if err != nil {
return err
}
_, err = n.forwardValidate(net.ParseIP(curForward.ListenAddress), &req)
if err != nil {
return err
}
curForwardEtagHash, err := util.EtagHash(curForward.Etag())
if err != nil {
return err
}
newForward := api.NetworkForward{
ListenAddress: curForward.ListenAddress,
NetworkForwardPut: req,
}
newForwardEtagHash, err := util.EtagHash(newForward.Etag())
if err != nil {
return err
}
if curForwardEtagHash == newForwardEtagHash {
return nil // Nothing has changed.
}
revert := revert.New()
defer revert.Fail()
err = n.state.Cluster.UpdateNetworkForward(n.ID(), curForwardID, &newForward.NetworkForwardPut)
if err != nil {
return err
}
revert.Add(func() {
n.state.Cluster.UpdateNetworkForward(n.ID(), curForwardID, &curForward.NetworkForwardPut)
n.forwardSetupFirewall()
n.forwardBGPSetupPrefixes()
})
err = n.forwardSetupFirewall()
if err != nil {
return err
}
// Refresh exported BGP prefixes on local member.
err = n.forwardBGPSetupPrefixes()
if err != nil {
return fmt.Errorf("Failed applying BGP prefixes for address forwards: %w", err)
}
revert.Success()
return nil
}
// ForwardDelete deletes a network forward.
func (n *bridge) ForwardDelete(listenAddress string, clientType request.ClientType) error {
memberSpecific := true // bridge supports per-member forwards.
forwardID, forward, err := n.state.Cluster.GetNetworkForward(n.ID(), memberSpecific, listenAddress)
if err != nil {
return err
}
revert := revert.New()
defer revert.Fail()
err = n.state.Cluster.DeleteNetworkForward(n.ID(), forwardID)
if err != nil {
return err
}
revert.Add(func() {
newForward := api.NetworkForwardsPost{
NetworkForwardPut: forward.NetworkForwardPut,
ListenAddress: forward.ListenAddress,
}
n.state.Cluster.CreateNetworkForward(n.ID(), memberSpecific, &newForward)
n.forwardSetupFirewall()
n.forwardBGPSetupPrefixes()
})
err = n.forwardSetupFirewall()
if err != nil {
return err
}
// Refresh exported BGP prefixes on local member.
err = n.forwardBGPSetupPrefixes()
if err != nil {
return fmt.Errorf("Failed applying BGP prefixes for address forwards: %w", err)
}
revert.Success()
return nil
}
// forwardSetupFirewall applies all network address forwards defined for this network and this member.
func (n *bridge) forwardSetupFirewall() error {
memberSpecific := true // Get all forwards for this cluster member.
forwards, err := n.state.Cluster.GetNetworkForwards(n.ID(), memberSpecific)
if err != nil {
return fmt.Errorf("Failed loading network forwards: %w", err)
}
var fwForwards []firewallDrivers.AddressForward
ipVersions := make(map[uint]struct{})
for _, forward := range forwards {
// Convert listen address to subnet so we can check its valid and can be used.
listenAddressNet, err := ParseIPToNet(forward.ListenAddress)
if err != nil {
return errors.Wrapf(err, "Failed parsing address forward listen address %q", forward.ListenAddress)
}
// Track which IP versions we are using.
if listenAddressNet.IP.To4() == nil {
ipVersions[6] = struct{}{}
} else {
ipVersions[4] = struct{}{}
}
portMaps, err := n.forwardValidate(listenAddressNet.IP, &forward.NetworkForwardPut)
if err != nil {
return fmt.Errorf("Failed validating firewall address forward for listen address %q: %w", forward.ListenAddress, err)
}
fwForwards = append(fwForwards, n.forwardConvertToFirewallForwards(listenAddressNet.IP, net.ParseIP(forward.Config["target_address"]), portMaps)...)
}
if len(forwards) > 0 {
// Check if br_netfilter is enabled to, and warn if not.
brNetfilterWarning := false
for ipVersion := range ipVersions {
err = BridgeNetfilterEnabled(ipVersion)
if err != nil {
brNetfilterWarning = true
msg := fmt.Sprintf("IPv%d bridge netfilter not enabled. Instances using the bridge will not be able to connect to the forward listen IPs", ipVersion)
n.logger.Warn(msg, log.Ctx{"err": err})
err = n.state.Cluster.UpsertWarningLocalNode(n.project, dbCluster.TypeNetwork, int(n.id), db.WarningProxyBridgeNetfilterNotEnabled, fmt.Sprintf("%s: %v", msg, err))
if err != nil {
n.logger.Warn("Failed to create warning", log.Ctx{"err": err})
}
}
}
if !brNetfilterWarning {
err = warnings.ResolveWarningsByLocalNodeAndProjectAndTypeAndEntity(n.state.Cluster, n.project, db.WarningProxyBridgeNetfilterNotEnabled, dbCluster.TypeNetwork, int(n.id))
if err != nil {
n.logger.Warn("Failed to resolve warning", log.Ctx{"err": err})
}
}
}
err = n.state.Firewall.NetworkApplyForwards(n.name, fwForwards)
if err != nil {
return fmt.Errorf("Failed applying firewall address forwards: %w", err)
}
return nil
}
// Leases returns a list of leases for the bridged network. It will reach out to other cluster members as needed.
// The projectName passed here refers to the initial project from the API request which may differ from the network's project.
func (n *bridge) Leases(projectName string, clientType request.ClientType) ([]api.NetworkLease, error) {
leases := []api.NetworkLease{}
projectMacs := []string{}
// Get all static leases.
if clientType == request.ClientTypeNormal {
// Get the downstream networks.
if n.project == project.Default {
var err error
// Load all the networks.
var projectNetworks map[string]map[int64]api.Network
err = n.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
projectNetworks, err = tx.GetCreatedNetworks()
return err
})
if err != nil {
return nil, err
}
// Look for networks using the current network as an uplink.
for projectName, networks := range projectNetworks {
for _, network := range networks {
if network.Config["network"] != n.name {
continue
}
// Found a network, add leases.
for _, k := range []string{"volatile.network.ipv4.address", "volatile.network.ipv6.address"} {
v := network.Config[k]
if v != "" {
leases = append(leases, api.NetworkLease{
Hostname: fmt.Sprintf("%s-%s.uplink", projectName, network.Name),
Address: v,
Type: "uplink",
})
}
}
}
}
}
// Get all the instances.
instances, err := instance.LoadByProject(n.state, projectName)
if err != nil {
return nil, err
}
for _, inst := range instances {
// Go through all its devices (including profiles).
for k, dev := range inst.ExpandedDevices() {
// Skip uninteresting entries.
if dev["type"] != "nic" {
continue
}
nicType, err := nictype.NICType(n.state, inst.Project(), dev)
if err != nil || nicType != "bridged" {
continue
}
// Temporarily populate parent from network setting if used.
if dev["network"] != "" {
dev["parent"] = dev["network"]
}
if dev["parent"] != n.name {
continue
}
// Fill in the hwaddr from volatile.
if dev["hwaddr"] == "" {
dev["hwaddr"] = inst.LocalConfig()[fmt.Sprintf("volatile.%s.hwaddr", k)]
}
// Record the MAC.
if dev["hwaddr"] != "" {
projectMacs = append(projectMacs, dev["hwaddr"])
}
// Add the lease.
if dev["ipv4.address"] != "" {
leases = append(leases, api.NetworkLease{
Hostname: inst.Name(),
Address: dev["ipv4.address"],
Hwaddr: dev["hwaddr"],
Type: "static",
Location: inst.Location(),
})
}
if dev["ipv6.address"] != "" {
leases = append(leases, api.NetworkLease{
Hostname: inst.Name(),
Address: dev["ipv6.address"],
Hwaddr: dev["hwaddr"],
Type: "static",
Location: inst.Location(),
})
}
// Add EUI64 records.
ipv6Address := n.config["ipv6.address"]
if ipv6Address != "" && ipv6Address != "none" && !shared.IsTrue(n.config["ipv6.dhcp.stateful"]) {
_, netAddress, _ := net.ParseCIDR(ipv6Address)
hwAddr, _ := net.ParseMAC(dev["hwaddr"])
if netAddress != nil && hwAddr != nil {
ipv6, err := eui64.ParseMAC(netAddress.IP, hwAddr)
if err == nil {
leases = append(leases, api.NetworkLease{
Hostname: inst.Name(),
Address: ipv6.String(),
Hwaddr: dev["hwaddr"],
Type: "dynamic",
Location: inst.Location(),
})
}
}
}
}
}
}
// Local server name.
var err error
var serverName string
err = n.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
serverName, err = tx.GetLocalNodeName()
return err
})
if err != nil {
return nil, err
}
// Get dynamic leases.
leaseFile := shared.VarPath("networks", n.name, "dnsmasq.leases")
if !shared.PathExists(leaseFile) {
return leases, nil
}
content, err := ioutil.ReadFile(leaseFile)
if err != nil {
return nil, err
}
for _, lease := range strings.Split(string(content), "\n") {
fields := strings.Fields(lease)
if len(fields) >= 5 {
// Parse the MAC.
mac := GetMACSlice(fields[1])
macStr := strings.Join(mac, ":")
if len(macStr) < 17 && fields[4] != "" {
macStr = fields[4][len(fields[4])-17:]
}
// Look for an existing static entry.
found := false
for _, entry := range leases {
if entry.Hwaddr == macStr && entry.Address == fields[2] {
found = true
break
}
}
if found {
continue
}
// DHCPv6 leases can't be tracked down to a MAC so clear the field.
// This means that instance project filtering will not work on IPv6 leases.
if strings.Contains(fields[2], ":") {
macStr = ""
}
// Skip leases that don't match any of the instance MACs from the project (only when we
// have populated the projectMacs list in ClientTypeNormal mode). Otherwise get all local
// leases and they will be filtered on the server handling the end user request.
if clientType == request.ClientTypeNormal && macStr != "" && !shared.StringInSlice(macStr, projectMacs) {
continue
}
// Add the lease to the list.
leases = append(leases, api.NetworkLease{
Hostname: fields[3],
Address: fields[2],
Hwaddr: macStr,
Type: "dynamic",
Location: serverName,
})
}
}
// Collect leases from other servers.
if clientType == request.ClientTypeNormal {
notifier, err := cluster.NewNotifier(n.state, n.state.Endpoints.NetworkCert(), n.state.ServerCert(), cluster.NotifyAll)
if err != nil {
return nil, err
}
err = notifier(func(client lxd.InstanceServer) error {
memberLeases, err := client.GetNetworkLeases(n.name)
if err != nil {
return err
}
// Add local leases from other members, filtering them for MACs that belong to the project.
for _, lease := range memberLeases {
if lease.Hwaddr != "" && shared.StringInSlice(lease.Hwaddr, projectMacs) {
leases = append(leases, lease)
}
}
return nil
})
if err != nil {
return nil, err
}
}
return leases, nil
}
lxd/network/driver/bridge: Exclude offline peers in HandleHeartbeat
Signed-off-by: Thomas Parrott <6b778ce645fb0e3dde76d79eccad490955b1ae74@canonical.com>
package network
import (
"context"
"encoding/binary"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"os/exec"
"reflect"
"strconv"
"strings"
"sync"
"time"
"github.com/mdlayher/netx/eui64"
"github.com/pkg/errors"
"github.com/lxc/lxd/client"
"github.com/lxc/lxd/lxd/apparmor"
"github.com/lxc/lxd/lxd/cluster"
"github.com/lxc/lxd/lxd/cluster/request"
"github.com/lxc/lxd/lxd/daemon"
"github.com/lxc/lxd/lxd/db"
dbCluster "github.com/lxc/lxd/lxd/db/cluster"
deviceConfig "github.com/lxc/lxd/lxd/device/config"
"github.com/lxc/lxd/lxd/device/nictype"
"github.com/lxc/lxd/lxd/dnsmasq"
"github.com/lxc/lxd/lxd/dnsmasq/dhcpalloc"
firewallDrivers "github.com/lxc/lxd/lxd/firewall/drivers"
"github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/ip"
"github.com/lxc/lxd/lxd/network/acl"
"github.com/lxc/lxd/lxd/network/openvswitch"
"github.com/lxc/lxd/lxd/node"
"github.com/lxc/lxd/lxd/project"
"github.com/lxc/lxd/lxd/revert"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/lxd/warnings"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/subprocess"
"github.com/lxc/lxd/shared/validate"
"github.com/lxc/lxd/shared/version"
)
// ForkdnsServersListPath defines the path that contains the forkdns server candidate file.
const ForkdnsServersListPath = "forkdns.servers"
// ForkdnsServersListFile file that contains the server candidates list.
const ForkdnsServersListFile = "servers.conf"
var forkdnsServersLock sync.Mutex
// bridge represents a LXD bridge network.
type bridge struct {
common
}
// Type returns the network type.
func (n *bridge) Type() string {
return "bridge"
}
// DBType returns the network type DB ID.
func (n *bridge) DBType() db.NetworkType {
return db.NetworkTypeBridge
}
// Config returns the network driver info.
func (n *bridge) Info() Info {
info := n.common.Info()
info.AddressForwards = true
return info
}
// checkClusterWideMACSafe returns whether it is safe to use the same MAC address for the bridge interface on all
// cluster nodes. It is not suitable to use a static MAC address when "bridge.external_interfaces" is non-empty and
// the bridge interface has no IPv4 or IPv6 address set. This is because in a clustered environment the same bridge
// config is applied to all nodes, and if the bridge is being used to connect multiple nodes to the same network
// segment it would cause MAC conflicts to use the the same MAC on all nodes. If an IP address is specified then
// connecting multiple nodes to the same network segment would also cause IP conflicts, so if an IP is defined
// then we assume this is not being done. However if IP addresses are explicitly set to "none" and
// "bridge.external_interfaces" is set then it may not be safe to use a the same MAC address on all nodes.
func (n *bridge) checkClusterWideMACSafe(config map[string]string) error {
// Fan mode breaks if using the same MAC address on each node.
if config["bridge.mode"] == "fan" {
return fmt.Errorf(`Cannot use static "bridge.hwaddr" MAC address in fan mode`)
}
// We can't be sure that multiple clustered nodes aren't connected to the same network segment so don't
// use a static MAC address for the bridge interface to avoid introducing a MAC conflict.
if config["bridge.external_interfaces"] != "" && config["ipv4.address"] == "none" && config["ipv6.address"] == "none" {
return fmt.Errorf(`Cannot use static "bridge.hwaddr" MAC address when bridge has no IP addresses and has external interfaces set`)
}
return nil
}
// FillConfig fills requested config with any default values.
func (n *bridge) FillConfig(config map[string]string) error {
// Set some default values where needed.
if config["bridge.mode"] == "fan" {
if config["fan.underlay_subnet"] == "" {
config["fan.underlay_subnet"] = "auto"
}
// We enable NAT by default even if address is manually specified.
if config["ipv4.nat"] == "" {
config["ipv4.nat"] = "true"
}
} else {
if config["ipv4.address"] == "" {
config["ipv4.address"] = "auto"
}
if config["ipv4.address"] == "auto" && config["ipv4.nat"] == "" {
config["ipv4.nat"] = "true"
}
if config["ipv6.address"] == "" {
content, err := ioutil.ReadFile("/proc/sys/net/ipv6/conf/default/disable_ipv6")
if err == nil && string(content) == "0\n" {
config["ipv6.address"] = "auto"
}
}
if config["ipv6.address"] == "auto" && config["ipv6.nat"] == "" {
config["ipv6.nat"] = "true"
}
}
// Now replace any "auto" keys with generated values.
err := n.populateAutoConfig(config)
if err != nil {
return errors.Wrapf(err, "Failed generating auto config")
}
return nil
}
// populateAutoConfig replaces "auto" in config with generated values.
func (n *bridge) populateAutoConfig(config map[string]string) error {
changedConfig := false
// Now populate "auto" values where needed.
if config["ipv4.address"] == "auto" {
subnet, err := randomSubnetV4()
if err != nil {
return err
}
config["ipv4.address"] = subnet
changedConfig = true
}
if config["ipv6.address"] == "auto" {
subnet, err := randomSubnetV6()
if err != nil {
return err
}
config["ipv6.address"] = subnet
changedConfig = true
}
if config["fan.underlay_subnet"] == "auto" {
subnet, _, err := DefaultGatewaySubnetV4()
if err != nil {
return err
}
config["fan.underlay_subnet"] = subnet.String()
changedConfig = true
}
// Re-validate config if changed.
if changedConfig && n.state != nil {
return n.Validate(config)
}
return nil
}
// ValidateName validates network name.
func (n *bridge) ValidateName(name string) error {
err := validate.IsInterfaceName(name)
if err != nil {
return err
}
// Apply common name validation that applies to all network types.
return n.common.ValidateName(name)
}
// Validate network config.
func (n *bridge) Validate(config map[string]string) error {
// Build driver specific rules dynamically.
rules := map[string]func(value string) error{
"bgp.ipv4.nexthop": validate.Optional(validate.IsNetworkAddressV4),
"bgp.ipv6.nexthop": validate.Optional(validate.IsNetworkAddressV6),
"bridge.driver": validate.Optional(validate.IsOneOf("native", "openvswitch")),
"bridge.external_interfaces": validate.Optional(func(value string) error {
for _, entry := range strings.Split(value, ",") {
entry = strings.TrimSpace(entry)
if err := validate.IsInterfaceName(entry); err != nil {
return errors.Wrapf(err, "Invalid interface name %q", entry)
}
}
return nil
}),
"bridge.hwaddr": validate.Optional(validate.IsNetworkMAC),
"bridge.mtu": validate.Optional(validate.IsNetworkMTU),
"bridge.mode": validate.Optional(validate.IsOneOf("standard", "fan")),
"fan.overlay_subnet": validate.Optional(validate.IsNetworkV4),
"fan.underlay_subnet": validate.Optional(func(value string) error {
if value == "auto" {
return nil
}
return validate.IsNetworkV4(value)
}),
"fan.type": validate.Optional(validate.IsOneOf("vxlan", "ipip")),
"ipv4.address": validate.Optional(func(value string) error {
if validate.IsOneOf("none", "auto")(value) == nil {
return nil
}
return validate.IsNetworkAddressCIDRV4(value)
}),
"ipv4.firewall": validate.Optional(validate.IsBool),
"ipv4.nat": validate.Optional(validate.IsBool),
"ipv4.nat.order": validate.Optional(validate.IsOneOf("before", "after")),
"ipv4.nat.address": validate.Optional(validate.IsNetworkAddressV4),
"ipv4.dhcp": validate.Optional(validate.IsBool),
"ipv4.dhcp.gateway": validate.Optional(validate.IsNetworkAddressV4),
"ipv4.dhcp.expiry": validate.IsAny,
"ipv4.dhcp.ranges": validate.Optional(validate.IsNetworkRangeV4List),
"ipv4.routes": validate.Optional(validate.IsNetworkV4List),
"ipv4.routing": validate.Optional(validate.IsBool),
"ipv4.ovn.ranges": validate.Optional(validate.IsNetworkRangeV4List),
"ipv6.address": validate.Optional(func(value string) error {
if validate.IsOneOf("none", "auto")(value) == nil {
return nil
}
return validate.IsNetworkAddressCIDRV6(value)
}),
"ipv6.firewall": validate.Optional(validate.IsBool),
"ipv6.nat": validate.Optional(validate.IsBool),
"ipv6.nat.order": validate.Optional(validate.IsOneOf("before", "after")),
"ipv6.nat.address": validate.Optional(validate.IsNetworkAddressV6),
"ipv6.dhcp": validate.Optional(validate.IsBool),
"ipv6.dhcp.expiry": validate.IsAny,
"ipv6.dhcp.stateful": validate.Optional(validate.IsBool),
"ipv6.dhcp.ranges": validate.Optional(validate.IsNetworkRangeV6List),
"ipv6.routes": validate.Optional(validate.IsNetworkV6List),
"ipv6.routing": validate.Optional(validate.IsBool),
"ipv6.ovn.ranges": validate.Optional(validate.IsNetworkRangeV6List),
"dns.domain": validate.IsAny,
"dns.mode": validate.Optional(validate.IsOneOf("dynamic", "managed", "none")),
"dns.search": validate.IsAny,
"dns.zone.forward": validate.Optional(n.validateZoneName),
"dns.zone.reverse.ipv4": validate.Optional(n.validateZoneName),
"dns.zone.reverse.ipv6": validate.Optional(n.validateZoneName),
"raw.dnsmasq": validate.IsAny,
"maas.subnet.ipv4": validate.IsAny,
"maas.subnet.ipv6": validate.IsAny,
"security.acls": validate.IsAny,
"security.acls.default.ingress.action": validate.Optional(validate.IsOneOf(acl.ValidActions...)),
"security.acls.default.egress.action": validate.Optional(validate.IsOneOf(acl.ValidActions...)),
"security.acls.default.ingress.logged": validate.Optional(validate.IsBool),
"security.acls.default.egress.logged": validate.Optional(validate.IsBool),
}
// Add dynamic validation rules.
for k := range config {
// Tunnel keys have the remote name in their name, extract the suffix.
if strings.HasPrefix(k, "tunnel.") {
// Validate remote name in key.
fields := strings.Split(k, ".")
if len(fields) != 3 {
return fmt.Errorf("Invalid network configuration key: %s", k)
}
if len(n.name)+len(fields[1]) > 14 {
return fmt.Errorf("Network name too long for tunnel interface: %s-%s", n.name, fields[1])
}
tunnelKey := fields[2]
// Add the correct validation rule for the dynamic field based on last part of key.
switch tunnelKey {
case "protocol":
rules[k] = validate.Optional(validate.IsOneOf("gre", "vxlan"))
case "local":
rules[k] = validate.Optional(validate.IsNetworkAddress)
case "remote":
rules[k] = validate.Optional(validate.IsNetworkAddress)
case "port":
rules[k] = networkValidPort
case "group":
rules[k] = validate.Optional(validate.IsNetworkAddress)
case "id":
rules[k] = validate.Optional(validate.IsInt64)
case "inteface":
rules[k] = validate.IsInterfaceName
case "ttl":
rules[k] = validate.Optional(validate.IsUint8)
}
}
}
// Add the BGP validation rules.
bgpRules, err := n.bgpValidationRules(config)
if err != nil {
return err
}
for k, v := range bgpRules {
rules[k] = v
}
// Validate the configuration.
err = n.validate(config, rules)
if err != nil {
return err
}
// Peform composite key checks after per-key validation.
// Validate network name when used in fan mode.
bridgeMode := config["bridge.mode"]
if bridgeMode == "fan" && len(n.name) > 11 {
return fmt.Errorf("Network name too long to use with the FAN (must be 11 characters or less)")
}
for k, v := range config {
key := k
// Bridge mode checks
if bridgeMode == "fan" && strings.HasPrefix(key, "ipv4.") && !shared.StringInSlice(key, []string{"ipv4.dhcp.expiry", "ipv4.firewall", "ipv4.nat", "ipv4.nat.order"}) && v != "" {
return fmt.Errorf("IPv4 configuration may not be set when in 'fan' mode")
}
if bridgeMode == "fan" && strings.HasPrefix(key, "ipv6.") && v != "" {
return fmt.Errorf("IPv6 configuration may not be set when in 'fan' mode")
}
if bridgeMode != "fan" && strings.HasPrefix(key, "fan.") && v != "" {
return fmt.Errorf("FAN configuration may only be set when in 'fan' mode")
}
// MTU checks
if key == "bridge.mtu" && v != "" {
mtu, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return fmt.Errorf("Invalid value for an integer: %s", v)
}
ipv6 := config["ipv6.address"]
if ipv6 != "" && ipv6 != "none" && mtu < 1280 {
return fmt.Errorf("The minimum MTU for an IPv6 network is 1280")
}
ipv4 := config["ipv4.address"]
if ipv4 != "" && ipv4 != "none" && mtu < 68 {
return fmt.Errorf("The minimum MTU for an IPv4 network is 68")
}
if config["bridge.mode"] == "fan" {
if config["fan.type"] == "ipip" {
if mtu > 1480 {
return fmt.Errorf("Maximum MTU for an IPIP FAN bridge is 1480")
}
} else {
if mtu > 1450 {
return fmt.Errorf("Maximum MTU for a VXLAN FAN bridge is 1450")
}
}
}
}
}
// Check using same MAC address on every cluster node is safe.
if config["bridge.hwaddr"] != "" {
err = n.checkClusterWideMACSafe(config)
if err != nil {
return err
}
}
// Check IPv4 OVN ranges.
if config["ipv4.ovn.ranges"] != "" {
dhcpSubnet := n.DHCPv4Subnet()
allowedNets := []*net.IPNet{}
if dhcpSubnet != nil {
if config["ipv4.dhcp.ranges"] == "" {
return fmt.Errorf(`"ipv4.ovn.ranges" must be used in conjunction with non-overlapping "ipv4.dhcp.ranges" when DHCPv4 is enabled`)
}
allowedNets = append(allowedNets, dhcpSubnet)
}
ovnRanges, err := parseIPRanges(config["ipv4.ovn.ranges"], allowedNets...)
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv4.ovn.ranges")
}
dhcpRanges, err := parseIPRanges(config["ipv4.dhcp.ranges"], allowedNets...)
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv4.dhcp.ranges")
}
for _, ovnRange := range ovnRanges {
for _, dhcpRange := range dhcpRanges {
if IPRangesOverlap(ovnRange, dhcpRange) {
return fmt.Errorf(`The range specified in "ipv4.ovn.ranges" (%q) cannot overlap with "ipv4.dhcp.ranges"`, ovnRange)
}
}
}
}
// Check IPv6 OVN ranges.
if config["ipv6.ovn.ranges"] != "" {
dhcpSubnet := n.DHCPv6Subnet()
allowedNets := []*net.IPNet{}
if dhcpSubnet != nil {
if config["ipv6.dhcp.ranges"] == "" && shared.IsTrue(config["ipv6.dhcp.stateful"]) {
return fmt.Errorf(`"ipv6.ovn.ranges" must be used in conjunction with non-overlapping "ipv6.dhcp.ranges" when stateful DHCPv6 is enabled`)
}
allowedNets = append(allowedNets, dhcpSubnet)
}
ovnRanges, err := parseIPRanges(config["ipv6.ovn.ranges"], allowedNets...)
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv6.ovn.ranges")
}
// If stateful DHCPv6 is enabled, check OVN ranges don't overlap with DHCPv6 stateful ranges.
// Otherwise SLAAC will be being used to generate client IPs and predefined ranges aren't used.
if dhcpSubnet != nil && shared.IsTrue(config["ipv6.dhcp.stateful"]) {
dhcpRanges, err := parseIPRanges(config["ipv6.dhcp.ranges"], allowedNets...)
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv6.dhcp.ranges")
}
for _, ovnRange := range ovnRanges {
for _, dhcpRange := range dhcpRanges {
if IPRangesOverlap(ovnRange, dhcpRange) {
return fmt.Errorf(`The range specified in "ipv6.ovn.ranges" (%q) cannot overlap with "ipv6.dhcp.ranges"`, ovnRange)
}
}
}
}
}
// Check Security ACLs are supported and exist.
if config["security.acls"] != "" {
err = acl.Exists(n.state, n.Project(), util.SplitNTrimSpace(config["security.acls"], ",", -1, true)...)
if err != nil {
return err
}
}
return nil
}
// Create checks whether the bridge interface name is used already.
func (n *bridge) Create(clientType request.ClientType) error {
n.logger.Debug("Create", log.Ctx{"clientType": clientType, "config": n.config})
if InterfaceExists(n.name) {
return fmt.Errorf("Network interface %q already exists", n.name)
}
return nil
}
// isRunning returns whether the network is up.
func (n *bridge) isRunning() bool {
return InterfaceExists(n.name)
}
// Delete deletes a network.
func (n *bridge) Delete(clientType request.ClientType) error {
n.logger.Debug("Delete", log.Ctx{"clientType": clientType})
// Delete all warnings regarding this network
err := warnings.DeleteWarningsByLocalNodeAndProjectAndEntity(n.state.Cluster, n.project, dbCluster.TypeNetwork, int(n.id))
if err != nil {
n.logger.Warn("Failed to delete warnings", log.Ctx{"err": err})
}
if n.isRunning() {
err := n.Stop()
if err != nil {
return err
}
}
// Delete apparmor profiles.
err = apparmor.NetworkDelete(n.state, n)
if err != nil {
return err
}
return n.common.delete(clientType)
}
// Rename renames a network.
func (n *bridge) Rename(newName string) error {
n.logger.Debug("Rename", log.Ctx{"newName": newName})
if InterfaceExists(newName) {
return fmt.Errorf("Network interface %q already exists", newName)
}
// Bring the network down.
if n.isRunning() {
err := n.Stop()
if err != nil {
return err
}
}
// Rename forkdns log file.
forkDNSLogPath := fmt.Sprintf("forkdns.%s.log", n.name)
if shared.PathExists(shared.LogPath(forkDNSLogPath)) {
err := os.Rename(forkDNSLogPath, shared.LogPath(fmt.Sprintf("forkdns.%s.log", newName)))
if err != nil {
return err
}
}
// Rename common steps.
err := n.common.rename(newName)
if err != nil {
return err
}
// Bring the network up.
err = n.Start()
if err != nil {
return err
}
return nil
}
// Start starts the network.
func (n *bridge) Start() error {
n.logger.Debug("Start")
err := n.setup(nil)
if err != nil {
err := n.state.Cluster.UpsertWarningLocalNode(n.project, dbCluster.TypeNetwork, int(n.id), db.WarningNetworkStartupFailure, err.Error())
if err != nil {
n.logger.Warn("Failed to create warning", log.Ctx{"err": err})
}
} else {
err := warnings.ResolveWarningsByLocalNodeAndProjectAndTypeAndEntity(n.state.Cluster, n.project, db.WarningNetworkStartupFailure, dbCluster.TypeNetwork, int(n.id))
if err != nil {
n.logger.Warn("Failed to resolve warning", log.Ctx{"err": err})
}
}
return err
}
// setup restarts the network.
func (n *bridge) setup(oldConfig map[string]string) error {
// If we are in mock mode, just no-op.
if n.state.OS.MockMode {
return nil
}
n.logger.Debug("Setting up network")
revert := revert.New()
defer revert.Fail()
// Create directory.
if !shared.PathExists(shared.VarPath("networks", n.name)) {
err := os.MkdirAll(shared.VarPath("networks", n.name), 0711)
if err != nil {
return err
}
}
bridgeLink := &ip.Link{Name: n.name}
// Create the bridge interface if doesn't exist.
if !n.isRunning() {
if n.config["bridge.driver"] == "openvswitch" {
ovs := openvswitch.NewOVS()
if !ovs.Installed() {
return fmt.Errorf("Open vSwitch isn't installed on this system")
}
err := ovs.BridgeAdd(n.name, false)
if err != nil {
return err
}
revert.Add(func() { ovs.BridgeDelete(n.name) })
} else {
bridge := &ip.Bridge{
Link: *bridgeLink,
}
err := bridge.Add()
if err != nil {
return err
}
revert.Add(func() { bridge.Delete() })
}
}
// Get a list of tunnels.
tunnels := n.getTunnels()
// IPv6 bridge configuration.
if !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
if !shared.PathExists("/proc/sys/net/ipv6") {
return fmt.Errorf("Network has ipv6.address but kernel IPv6 support is missing")
}
err := util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/autoconf", n.name), "0")
if err != nil {
return err
}
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/accept_dad", n.name), "0")
if err != nil {
return err
}
}
// Get a list of interfaces.
ifaces, err := net.Interfaces()
if err != nil {
return err
}
// Cleanup any existing tunnel device.
for _, iface := range ifaces {
if strings.HasPrefix(iface.Name, fmt.Sprintf("%s-", n.name)) {
tunLink := &ip.Link{Name: iface.Name}
err = tunLink.Delete()
if err != nil {
return err
}
}
}
// Set the MTU.
mtu := ""
if n.config["bridge.mtu"] != "" {
mtu = n.config["bridge.mtu"]
} else if len(tunnels) > 0 {
mtu = "1400"
} else if n.config["bridge.mode"] == "fan" {
if n.config["fan.type"] == "ipip" {
mtu = "1480"
} else {
mtu = "1450"
}
}
// Attempt to add a dummy device to the bridge to force the MTU.
if mtu != "" && n.config["bridge.driver"] != "openvswitch" {
dummy := &ip.Dummy{
Link: ip.Link{Name: fmt.Sprintf("%s-mtu", n.name), MTU: mtu},
}
err = dummy.Add()
if err == nil {
revert.Add(func() { dummy.Delete() })
err = dummy.SetUp()
if err == nil {
AttachInterface(n.name, fmt.Sprintf("%s-mtu", n.name))
}
}
}
// Now, set a default MTU.
if mtu == "" {
mtu = "1500"
}
err = bridgeLink.SetMTU(mtu)
if err != nil {
return err
}
// Always prefer static MAC address if set.
hwAddr := n.config["bridge.hwaddr"]
// If no cluster wide static MAC address set, then generate one.
if hwAddr == "" {
var seedNodeID int64
if n.checkClusterWideMACSafe(n.config) != nil {
// If not safe to use a cluster wide MAC or in in fan mode, then use cluster node's ID to
// generate a stable per-node & network derived random MAC.
seedNodeID = n.state.Cluster.GetNodeID()
} else {
// If safe to use a cluster wide MAC, then use a static cluster node of 0 to generate a
// stable per-network derived random MAC.
seedNodeID = 0
}
// Load server certificate. This is needs to be the same certificate for all nodes in a cluster.
cert, err := util.LoadCert(n.state.OS.VarDir)
if err != nil {
return err
}
// Generate the random seed, this uses the server certificate fingerprint (to ensure that multiple
// standalone nodes with the same network ID connected to the same external network don't generate
// the same MAC for their networks). It relies on the certificate being the same for all nodes in a
// cluster to allow the same MAC to be generated on each bridge interface in the network when
// seedNodeID is 0 (when safe to do so).
seed := fmt.Sprintf("%s.%d.%d", cert.Fingerprint(), seedNodeID, n.ID())
r, err := util.GetStableRandomGenerator(seed)
if err != nil {
return errors.Wrapf(err, "Failed generating stable random bridge MAC")
}
hwAddr = randomHwaddr(r)
n.logger.Debug("Stable MAC generated", log.Ctx{"seed": seed, "hwAddr": hwAddr})
}
// Set the MAC address on the bridge interface if specified.
if hwAddr != "" {
err = bridgeLink.SetAddress(hwAddr)
if err != nil {
return err
}
}
// Enable VLAN filtering for Linux bridges.
if n.config["bridge.driver"] != "openvswitch" {
err = BridgeVLANFilterSetStatus(n.name, "1")
if err != nil {
n.logger.Warn(fmt.Sprintf("%v", err))
}
// Set the default PVID for new ports to 1.
err = BridgeVLANSetDefaultPVID(n.name, "1")
if err != nil {
n.logger.Warn(fmt.Sprintf("%v", err))
}
}
// Bring it up.
err = bridgeLink.SetUp()
if err != nil {
return err
}
// Add any listed existing external interface.
if n.config["bridge.external_interfaces"] != "" {
for _, entry := range strings.Split(n.config["bridge.external_interfaces"], ",") {
entry = strings.TrimSpace(entry)
iface, err := net.InterfaceByName(entry)
if err != nil {
n.logger.Warn("Skipping attaching missing external interface", log.Ctx{"interface": entry})
continue
}
unused := true
addrs, err := iface.Addrs()
if err == nil {
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if ip != nil && err == nil && ip.IsGlobalUnicast() {
unused = false
break
}
}
}
if !unused {
return fmt.Errorf("Only unconfigured network interfaces can be bridged")
}
err = AttachInterface(n.name, entry)
if err != nil {
return err
}
}
}
// Remove any existing firewall rules.
fwClearIPVersions := []uint{}
if usesIPv4Firewall(n.config) || usesIPv4Firewall(oldConfig) {
fwClearIPVersions = append(fwClearIPVersions, 4)
}
if usesIPv6Firewall(n.config) || usesIPv6Firewall(oldConfig) {
fwClearIPVersions = append(fwClearIPVersions, 6)
}
if len(fwClearIPVersions) > 0 {
n.logger.Debug("Clearing firewall")
err = n.state.Firewall.NetworkClear(n.name, false, fwClearIPVersions)
if err != nil {
return errors.Wrapf(err, "Failed clearing firewall")
}
}
// Initialise a new firewall option set.
fwOpts := firewallDrivers.Opts{}
if n.hasIPv4Firewall() {
fwOpts.FeaturesV4 = &firewallDrivers.FeatureOpts{}
}
if n.hasIPv6Firewall() {
fwOpts.FeaturesV6 = &firewallDrivers.FeatureOpts{}
}
if n.config["security.acls"] != "" {
fwOpts.ACL = true
}
// Snapshot container specific IPv4 routes (added with boot proto) before removing IPv4 addresses.
// This is because the kernel removes any static routes on an interface when all addresses removed.
ctRoutes, err := n.bootRoutesV4()
if err != nil {
return err
}
// Flush all IPv4 addresses and routes.
addr := &ip.Addr{
DevName: n.name,
Scope: "global",
Family: ip.FamilyV4,
}
err = addr.Flush()
if err != nil {
return err
}
r := &ip.Route{
DevName: n.name,
Proto: "static",
Family: ip.FamilyV4,
}
err = r.Flush()
if err != nil {
return err
}
// Configure IPv4 firewall (includes fan).
if n.config["bridge.mode"] == "fan" || !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) {
if n.hasDHCPv4() && n.hasIPv4Firewall() {
fwOpts.FeaturesV4.ICMPDHCPDNSAccess = true
}
// Allow forwarding.
if n.config["bridge.mode"] == "fan" || n.config["ipv4.routing"] == "" || shared.IsTrue(n.config["ipv4.routing"]) {
err = util.SysctlSet("net/ipv4/ip_forward", "1")
if err != nil {
return err
}
if n.hasIPv4Firewall() {
fwOpts.FeaturesV4.ForwardingAllow = true
}
}
}
// Start building process using subprocess package.
command := "dnsmasq"
dnsmasqCmd := []string{"--keep-in-foreground", "--strict-order", "--bind-interfaces",
"--except-interface=lo",
"--pid-file=", // Disable attempt at writing a PID file.
"--no-ping", // --no-ping is very important to prevent delays to lease file updates.
fmt.Sprintf("--interface=%s", n.name)}
dnsmasqVersion, err := dnsmasq.GetVersion()
if err != nil {
return err
}
// --dhcp-rapid-commit option is only supported on >2.79.
minVer, _ := version.NewDottedVersion("2.79")
if dnsmasqVersion.Compare(minVer) > 0 {
dnsmasqCmd = append(dnsmasqCmd, "--dhcp-rapid-commit")
}
if !daemon.Debug {
// --quiet options are only supported on >2.67.
minVer, _ := version.NewDottedVersion("2.67")
if err == nil && dnsmasqVersion.Compare(minVer) > 0 {
dnsmasqCmd = append(dnsmasqCmd, []string{"--quiet-dhcp", "--quiet-dhcp6", "--quiet-ra"}...)
}
}
// Configure IPv4.
if !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) {
// Parse the subnet.
ipAddress, subnet, err := net.ParseCIDR(n.config["ipv4.address"])
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv4.address")
}
// Update the dnsmasq config.
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--listen-address=%s", ipAddress.String()))
if n.DHCPv4Subnet() != nil {
if !shared.StringInSlice("--dhcp-no-override", dnsmasqCmd) {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-no-override", "--dhcp-authoritative", fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")), fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts"))}...)
}
if n.config["ipv4.dhcp.gateway"] != "" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=3,%s", n.config["ipv4.dhcp.gateway"]))
}
if mtu != "1500" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=26,%s", mtu))
}
dnsSearch := n.config["dns.search"]
if dnsSearch != "" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=119,%s", strings.Trim(dnsSearch, " ")))
}
expiry := "1h"
if n.config["ipv4.dhcp.expiry"] != "" {
expiry = n.config["ipv4.dhcp.expiry"]
}
if n.config["ipv4.dhcp.ranges"] != "" {
for _, dhcpRange := range strings.Split(n.config["ipv4.dhcp.ranges"], ",") {
dhcpRange = strings.TrimSpace(dhcpRange)
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s", strings.Replace(dhcpRange, "-", ",", -1), expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s,%s", dhcpalloc.GetIP(subnet, 2).String(), dhcpalloc.GetIP(subnet, -2).String(), expiry)}...)
}
}
// Add the address.
addr := &ip.Addr{
DevName: n.name,
Address: n.config["ipv4.address"],
Family: ip.FamilyV4,
}
err = addr.Add()
if err != nil {
return err
}
// Configure NAT.
if shared.IsTrue(n.config["ipv4.nat"]) {
//If a SNAT source address is specified, use that, otherwise default to MASQUERADE mode.
var srcIP net.IP
if n.config["ipv4.nat.address"] != "" {
srcIP = net.ParseIP(n.config["ipv4.nat.address"])
}
fwOpts.SNATV4 = &firewallDrivers.SNATOpts{
SNATAddress: srcIP,
Subnet: subnet,
}
if n.config["ipv4.nat.order"] == "after" {
fwOpts.SNATV4.Append = true
}
}
// Add additional routes.
if n.config["ipv4.routes"] != "" {
for _, route := range strings.Split(n.config["ipv4.routes"], ",") {
route = strings.TrimSpace(route)
r := &ip.Route{
DevName: n.name,
Route: route,
Proto: "static",
Family: ip.FamilyV4,
}
err = r.Add()
if err != nil {
return err
}
}
}
// Restore container specific IPv4 routes to interface.
n.applyBootRoutesV4(ctRoutes)
}
// Snapshot container specific IPv6 routes (added with boot proto) before removing IPv6 addresses.
// This is because the kernel removes any static routes on an interface when all addresses removed.
ctRoutes, err = n.bootRoutesV6()
if err != nil {
return err
}
// Flush all IPv6 addresses and routes.
addr = &ip.Addr{
DevName: n.name,
Scope: "global",
Family: ip.FamilyV6,
}
err = addr.Flush()
if err != nil {
return err
}
r = &ip.Route{
DevName: n.name,
Proto: "static",
Family: ip.FamilyV6,
}
err = r.Flush()
if err != nil {
return err
}
// Configure IPv6.
if !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
// Enable IPv6 for the subnet.
err := util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/disable_ipv6", n.name), "0")
if err != nil {
return err
}
// Parse the subnet.
ipAddress, subnet, err := net.ParseCIDR(n.config["ipv6.address"])
if err != nil {
return errors.Wrapf(err, "Failed parsing ipv6.address")
}
subnetSize, _ := subnet.Mask.Size()
if subnetSize > 64 {
n.logger.Warn("IPv6 networks with a prefix larger than 64 aren't properly supported by dnsmasq")
err = n.state.Cluster.UpsertWarningLocalNode(n.project, dbCluster.TypeNetwork, int(n.id), db.WarningLargerIPv6PrefixThanSupported, "")
if err != nil {
n.logger.Warn("Failed to create warning", log.Ctx{"err": err})
}
} else {
err = warnings.ResolveWarningsByLocalNodeAndProjectAndTypeAndEntity(n.state.Cluster, n.project, db.WarningLargerIPv6PrefixThanSupported, dbCluster.TypeNetwork, int(n.id))
if err != nil {
n.logger.Warn("Failed to resolve warning", log.Ctx{"err": err})
}
}
// Update the dnsmasq config.
dnsmasqCmd = append(dnsmasqCmd, []string{fmt.Sprintf("--listen-address=%s", ipAddress.String()), "--enable-ra"}...)
if n.DHCPv6Subnet() != nil {
if n.hasIPv6Firewall() {
fwOpts.FeaturesV6.ICMPDHCPDNSAccess = true
}
// Build DHCP configuration.
if !shared.StringInSlice("--dhcp-no-override", dnsmasqCmd) {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-no-override", "--dhcp-authoritative", fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")), fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts"))}...)
}
expiry := "1h"
if n.config["ipv6.dhcp.expiry"] != "" {
expiry = n.config["ipv6.dhcp.expiry"]
}
if shared.IsTrue(n.config["ipv6.dhcp.stateful"]) {
if n.config["ipv6.dhcp.ranges"] != "" {
for _, dhcpRange := range strings.Split(n.config["ipv6.dhcp.ranges"], ",") {
dhcpRange = strings.TrimSpace(dhcpRange)
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%d,%s", strings.Replace(dhcpRange, "-", ",", -1), subnetSize, expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s,%d,%s", dhcpalloc.GetIP(subnet, 2), dhcpalloc.GetIP(subnet, -1), subnetSize, expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("::,constructor:%s,ra-stateless,ra-names", n.name)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("::,constructor:%s,ra-only", n.name)}...)
}
// Allow forwarding.
if n.config["ipv6.routing"] == "" || shared.IsTrue(n.config["ipv6.routing"]) {
// Get a list of proc entries.
entries, err := ioutil.ReadDir("/proc/sys/net/ipv6/conf/")
if err != nil {
return err
}
// First set accept_ra to 2 for everything.
for _, entry := range entries {
content, err := ioutil.ReadFile(fmt.Sprintf("/proc/sys/net/ipv6/conf/%s/accept_ra", entry.Name()))
if err == nil && string(content) != "1\n" {
continue
}
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/accept_ra", entry.Name()), "2")
if err != nil && !os.IsNotExist(err) {
return err
}
}
// Then set forwarding for all of them.
for _, entry := range entries {
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/forwarding", entry.Name()), "1")
if err != nil && !os.IsNotExist(err) {
return err
}
}
if n.hasIPv6Firewall() {
fwOpts.FeaturesV6.ForwardingAllow = true
}
}
// Add the address.
addr := &ip.Addr{
DevName: n.name,
Address: n.config["ipv6.address"],
Family: ip.FamilyV6,
}
err = addr.Add()
if err != nil {
return err
}
// Configure NAT.
if shared.IsTrue(n.config["ipv6.nat"]) {
//If a SNAT source address is specified, use that, otherwise default to MASQUERADE mode.
var srcIP net.IP
if n.config["ipv6.nat.address"] != "" {
srcIP = net.ParseIP(n.config["ipv6.nat.address"])
}
fwOpts.SNATV6 = &firewallDrivers.SNATOpts{
SNATAddress: srcIP,
Subnet: subnet,
}
if n.config["ipv6.nat.order"] == "after" {
fwOpts.SNATV6.Append = true
}
}
// Add additional routes.
if n.config["ipv6.routes"] != "" {
for _, route := range strings.Split(n.config["ipv6.routes"], ",") {
route = strings.TrimSpace(route)
r := &ip.Route{
DevName: n.name,
Route: route,
Proto: "static",
Family: ip.FamilyV6,
}
err = r.Add()
if err != nil {
return err
}
}
}
// Restore container specific IPv6 routes to interface.
n.applyBootRoutesV6(ctRoutes)
}
// Configure the fan.
dnsClustered := false
dnsClusteredAddress := ""
var overlaySubnet *net.IPNet
if n.config["bridge.mode"] == "fan" {
tunName := fmt.Sprintf("%s-fan", n.name)
// Parse the underlay.
underlay := n.config["fan.underlay_subnet"]
_, underlaySubnet, err := net.ParseCIDR(underlay)
if err != nil {
return errors.Wrapf(err, "Failed parsing fan.underlay_subnet")
}
// Parse the overlay.
overlay := n.config["fan.overlay_subnet"]
if overlay == "" {
overlay = "240.0.0.0/8"
}
_, overlaySubnet, err = net.ParseCIDR(overlay)
if err != nil {
return errors.Wrapf(err, "Failed parsing fan.overlay_subnet")
}
// Get the address.
fanAddress, devName, devAddr, err := n.fanAddress(underlaySubnet, overlaySubnet)
if err != nil {
return err
}
addr := strings.Split(fanAddress, "/")
if n.config["fan.type"] == "ipip" {
fanAddress = fmt.Sprintf("%s/24", addr[0])
}
// Update the MTU based on overlay device (if available).
fanMtuInt, err := GetDevMTU(devName)
if err == nil {
// Apply overhead.
if n.config["fan.type"] == "ipip" {
fanMtuInt = fanMtuInt - 20
} else {
fanMtuInt = fanMtuInt - 50
}
// Apply changes.
fanMtu := fmt.Sprintf("%d", fanMtuInt)
if fanMtu != mtu {
mtu = fanMtu
if n.config["bridge.driver"] != "openvswitch" {
mtuLink := &ip.Link{Name: fmt.Sprintf("%s-mtu", n.name)}
err = mtuLink.SetMTU(mtu)
if err != nil {
return err
}
}
err = bridgeLink.SetMTU(mtu)
if err != nil {
return err
}
}
}
// Parse the host subnet.
_, hostSubnet, err := net.ParseCIDR(fmt.Sprintf("%s/24", addr[0]))
if err != nil {
return errors.Wrapf(err, "Failed parsing fan address")
}
// Add the address.
ipAddr := &ip.Addr{
DevName: n.name,
Address: fanAddress,
Family: ip.FamilyV4,
}
err = ipAddr.Add()
if err != nil {
return err
}
// Update the dnsmasq config.
expiry := "1h"
if n.config["ipv4.dhcp.expiry"] != "" {
expiry = n.config["ipv4.dhcp.expiry"]
}
dnsmasqCmd = append(dnsmasqCmd, []string{
fmt.Sprintf("--listen-address=%s", addr[0]),
"--dhcp-no-override", "--dhcp-authoritative",
fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")),
fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts")),
"--dhcp-range", fmt.Sprintf("%s,%s,%s", dhcpalloc.GetIP(hostSubnet, 2).String(), dhcpalloc.GetIP(hostSubnet, -2).String(), expiry)}...)
// Setup the tunnel.
if n.config["fan.type"] == "ipip" {
r := &ip.Route{
DevName: "tunl0",
Family: ip.FamilyV4,
}
err = r.Flush()
if err != nil {
return err
}
tunLink := &ip.Link{Name: "tunl0"}
err = tunLink.SetUp()
if err != nil {
return err
}
// Fails if the map is already set.
tunLink.Change("ipip", fmt.Sprintf("%s:%s", overlay, underlay))
r = &ip.Route{
DevName: "tunl0",
Route: overlay,
Src: addr[0],
Proto: "static",
}
err = r.Add()
if err != nil {
return err
}
} else {
vxlanID := fmt.Sprintf("%d", binary.BigEndian.Uint32(overlaySubnet.IP.To4())>>8)
vxlan := &ip.Vxlan{
Link: ip.Link{Name: tunName},
VxlanID: vxlanID,
DevName: devName,
DstPort: "0",
Local: devAddr,
FanMap: fmt.Sprintf("%s:%s", overlay, underlay),
}
err = vxlan.Add()
if err != nil {
return err
}
err = AttachInterface(n.name, tunName)
if err != nil {
return err
}
err = vxlan.SetMTU(mtu)
if err != nil {
return err
}
err = vxlan.SetUp()
if err != nil {
return err
}
err = bridgeLink.SetUp()
if err != nil {
return err
}
}
// Configure NAT.
if shared.IsTrue(n.config["ipv4.nat"]) {
fwOpts.SNATV4 = &firewallDrivers.SNATOpts{
SNATAddress: nil, // Use MASQUERADE mode.
Subnet: overlaySubnet,
}
if n.config["ipv4.nat.order"] == "after" {
fwOpts.SNATV4.Append = true
}
}
// Setup clustered DNS.
clusterAddress, err := node.ClusterAddress(n.state.Node)
if err != nil {
return err
}
// If clusterAddress is non-empty, this indicates the intention for this node to be
// part of a cluster and so we should ensure that dnsmasq and forkdns are started
// in cluster mode. Note: During LXD initialisation the cluster may not actually be
// setup yet, but we want the DNS processes to be ready for when it is.
if clusterAddress != "" {
dnsClustered = true
}
dnsClusteredAddress = strings.Split(fanAddress, "/")[0]
}
// Configure tunnels.
for _, tunnel := range tunnels {
getConfig := func(key string) string {
return n.config[fmt.Sprintf("tunnel.%s.%s", tunnel, key)]
}
tunProtocol := getConfig("protocol")
tunLocal := getConfig("local")
tunRemote := getConfig("remote")
tunName := fmt.Sprintf("%s-%s", n.name, tunnel)
// Configure the tunnel.
if tunProtocol == "gre" {
// Skip partial configs.
if tunProtocol == "" || tunLocal == "" || tunRemote == "" {
continue
}
gretap := &ip.Gretap{
Link: ip.Link{Name: tunName},
Local: tunLocal,
Remote: tunRemote,
}
err := gretap.Add()
if err != nil {
return err
}
} else if tunProtocol == "vxlan" {
tunGroup := getConfig("group")
tunInterface := getConfig("interface")
// Skip partial configs.
if tunProtocol == "" {
continue
}
vxlan := &ip.Vxlan{
Link: ip.Link{Name: tunName},
}
if tunLocal != "" && tunRemote != "" {
vxlan.Local = tunLocal
vxlan.Remote = tunRemote
} else {
if tunGroup == "" {
tunGroup = "239.0.0.1"
}
devName := tunInterface
if devName == "" {
_, devName, err = DefaultGatewaySubnetV4()
if err != nil {
return err
}
}
vxlan.Group = tunGroup
vxlan.DevName = devName
}
tunPort := getConfig("port")
if tunPort == "" {
tunPort = "0"
}
vxlan.DstPort = tunPort
tunID := getConfig("id")
if tunID == "" {
tunID = "1"
}
vxlan.VxlanID = tunID
tunTTL := getConfig("ttl")
if tunTTL == "" {
tunTTL = "1"
}
vxlan.TTL = tunTTL
err := vxlan.Add()
if err != nil {
return err
}
}
// Bridge it and bring up.
err = AttachInterface(n.name, tunName)
if err != nil {
return err
}
tunLink := &ip.Link{Name: tunName}
err = tunLink.SetMTU(mtu)
if err != nil {
return err
}
// Bring up tunnel interface.
err = tunLink.SetUp()
if err != nil {
return err
}
// Bring up network interface.
err = bridgeLink.SetUp()
if err != nil {
return err
}
}
// Generate and load apparmor profiles.
err = apparmor.NetworkLoad(n.state, n)
if err != nil {
return err
}
// Kill any existing dnsmasq and forkdns daemon for this network.
err = dnsmasq.Kill(n.name, false)
if err != nil {
return err
}
err = n.killForkDNS()
if err != nil {
return err
}
// Configure dnsmasq.
if n.config["bridge.mode"] == "fan" || !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) || !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
// Setup the dnsmasq domain.
dnsDomain := n.config["dns.domain"]
if dnsDomain == "" {
dnsDomain = "lxd"
}
if n.config["dns.mode"] != "none" {
dnsmasqCmd = append(dnsmasqCmd, "-s", dnsDomain)
dnsmasqCmd = append(dnsmasqCmd, "--interface-name", fmt.Sprintf("_gateway.%s,%s", dnsDomain, n.name))
if dnsClustered {
dnsmasqCmd = append(dnsmasqCmd, "-S", fmt.Sprintf("/%s/%s#1053", dnsDomain, dnsClusteredAddress))
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--rev-server=%s,%s#1053", overlaySubnet, dnsClusteredAddress))
} else {
dnsmasqCmd = append(dnsmasqCmd, "-S", fmt.Sprintf("/%s/", dnsDomain))
}
}
// Create a config file to contain additional config (and to prevent dnsmasq from reading /etc/dnsmasq.conf)
err = ioutil.WriteFile(shared.VarPath("networks", n.name, "dnsmasq.raw"), []byte(fmt.Sprintf("%s\n", n.config["raw.dnsmasq"])), 0644)
if err != nil {
return err
}
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--conf-file=%s", shared.VarPath("networks", n.name, "dnsmasq.raw")))
// Attempt to drop privileges.
if n.state.OS.UnprivUser != "" {
dnsmasqCmd = append(dnsmasqCmd, []string{"-u", n.state.OS.UnprivUser}...)
}
if n.state.OS.UnprivGroup != "" {
dnsmasqCmd = append(dnsmasqCmd, []string{"-g", n.state.OS.UnprivGroup}...)
}
// Create DHCP hosts directory.
if !shared.PathExists(shared.VarPath("networks", n.name, "dnsmasq.hosts")) {
err = os.MkdirAll(shared.VarPath("networks", n.name, "dnsmasq.hosts"), 0755)
if err != nil {
return err
}
}
// Check for dnsmasq.
_, err := exec.LookPath("dnsmasq")
if err != nil {
return fmt.Errorf("dnsmasq is required for LXD managed bridges")
}
// Update the static leases.
err = UpdateDNSMasqStatic(n.state, n.name)
if err != nil {
return err
}
// Create subprocess object dnsmasq.
dnsmasqLogPath := shared.LogPath(fmt.Sprintf("dnsmasq.%s.log", n.name))
p, err := subprocess.NewProcess(command, dnsmasqCmd, "", dnsmasqLogPath)
if err != nil {
return fmt.Errorf("Failed to create subprocess: %s", err)
}
// Apply AppArmor confinement.
if n.config["raw.dnsmasq"] == "" {
p.SetApparmor(apparmor.DnsmasqProfileName(n))
err = warnings.ResolveWarningsByLocalNodeAndProjectAndTypeAndEntity(n.state.Cluster, n.project, db.WarningAppArmorDisabledDueToRawDnsmasq, dbCluster.TypeNetwork, int(n.id))
if err != nil {
n.logger.Warn("Failed to resolve warning", log.Ctx{"err": err})
}
} else {
n.logger.Warn("Skipping AppArmor for dnsmasq due to raw.dnsmasq being set", log.Ctx{"name": n.name})
err = n.state.Cluster.UpsertWarningLocalNode(n.project, dbCluster.TypeNetwork, int(n.id), db.WarningAppArmorDisabledDueToRawDnsmasq, "")
if err != nil {
n.logger.Warn("Failed to create warning", log.Ctx{"err": err})
}
}
// Start dnsmasq.
err = p.Start()
if err != nil {
return fmt.Errorf("Failed to run: %s %s: %v", command, strings.Join(dnsmasqCmd, " "), err)
}
// Check dnsmasq started OK.
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Millisecond*time.Duration(500)))
_, err = p.Wait(ctx)
if errors.Cause(err) != context.DeadlineExceeded {
stderr, _ := ioutil.ReadFile(dnsmasqLogPath)
// Just log an error if dnsmasq has exited, and still proceed with normal setup so we
// don't leave the firewall in an inconsistent state.
n.logger.Error("The dnsmasq process exited prematurely", log.Ctx{"err": err, "stderr": strings.TrimSpace(string(stderr))})
}
cancel()
err = p.Save(shared.VarPath("networks", n.name, "dnsmasq.pid"))
if err != nil {
// Kill Process if started, but could not save the file.
err2 := p.Stop()
if err != nil {
return fmt.Errorf("Could not kill subprocess while handling saving error: %s: %s", err, err2)
}
return fmt.Errorf("Failed to save subprocess details: %s", err)
}
// Spawn DNS forwarder if needed (backgrounded to avoid deadlocks during cluster boot).
if dnsClustered {
// Create forkdns servers directory.
if !shared.PathExists(shared.VarPath("networks", n.name, ForkdnsServersListPath)) {
err = os.MkdirAll(shared.VarPath("networks", n.name, ForkdnsServersListPath), 0755)
if err != nil {
return err
}
}
// Create forkdns servers.conf file if doesn't exist.
f, err := os.OpenFile(shared.VarPath("networks", n.name, ForkdnsServersListPath+"/"+ForkdnsServersListFile), os.O_RDONLY|os.O_CREATE, 0666)
if err != nil {
return err
}
f.Close()
err = n.spawnForkDNS(dnsClusteredAddress)
if err != nil {
return err
}
}
} else {
// Clean up old dnsmasq config if exists and we are not starting dnsmasq.
leasesPath := shared.VarPath("networks", n.name, "dnsmasq.leases")
if shared.PathExists(leasesPath) {
err := os.Remove(leasesPath)
if err != nil {
return errors.Wrapf(err, "Failed to remove old dnsmasq leases file %q", leasesPath)
}
}
// And same for our PID file.
pidPath := shared.VarPath("networks", n.name, "dnsmasq.pid")
if shared.PathExists(pidPath) {
err := os.Remove(pidPath)
if err != nil {
return errors.Wrapf(err, "Failed to remove old dnsmasq pid file %q", pidPath)
}
}
}
// Setup firewall.
n.logger.Debug("Setting up firewall")
err = n.state.Firewall.NetworkSetup(n.name, fwOpts)
if err != nil {
return errors.Wrapf(err, "Failed to setup firewall")
}
if fwOpts.ACL {
aclNet := acl.NetworkACLUsage{
Name: n.Name(),
Type: n.Type(),
ID: n.ID(),
Config: n.Config(),
}
n.logger.Debug("Applying up firewall ACLs")
err = acl.FirewallApplyACLRules(n.state, n.logger, n.Project(), aclNet)
if err != nil {
return err
}
}
// Setup network address forwards.
err = n.forwardSetupFirewall()
if err != nil {
return err
}
// Setup BGP.
err = n.bgpSetup(oldConfig)
if err != nil {
return err
}
revert.Success()
return nil
}
// Stop stops the network.
func (n *bridge) Stop() error {
n.logger.Debug("Stop")
if !n.isRunning() {
return nil
}
// Clear BGP.
err := n.bgpClear(n.config)
if err != nil {
return err
}
// Destroy the bridge interface
if n.config["bridge.driver"] == "openvswitch" {
ovs := openvswitch.NewOVS()
err := ovs.BridgeDelete(n.name)
if err != nil {
return err
}
} else {
bridgeLink := &ip.Link{Name: n.name}
err := bridgeLink.Delete()
if err != nil {
return err
}
}
// Fully clear firewall setup.
fwClearIPVersions := []uint{}
if usesIPv4Firewall(n.config) {
fwClearIPVersions = append(fwClearIPVersions, 4)
}
if usesIPv6Firewall(n.config) {
fwClearIPVersions = append(fwClearIPVersions, 6)
}
if len(fwClearIPVersions) > 0 {
n.logger.Debug("Deleting firewall")
err := n.state.Firewall.NetworkClear(n.name, true, fwClearIPVersions)
if err != nil {
return errors.Wrapf(err, "Failed deleting firewall")
}
}
// Kill any existing dnsmasq and forkdns daemon for this network
err = dnsmasq.Kill(n.name, false)
if err != nil {
return err
}
err = n.killForkDNS()
if err != nil {
return err
}
// Get a list of interfaces
ifaces, err := net.Interfaces()
if err != nil {
return err
}
// Cleanup any existing tunnel device
for _, iface := range ifaces {
if strings.HasPrefix(iface.Name, fmt.Sprintf("%s-", n.name)) {
tunLink := &ip.Link{Name: iface.Name}
err = tunLink.Delete()
if err != nil {
return err
}
}
}
// Unload apparmor profiles.
err = apparmor.NetworkUnload(n.state, n)
if err != nil {
return err
}
return nil
}
// Update updates the network. Accepts notification boolean indicating if this update request is coming from a
// cluster notification, in which case do not update the database, just apply local changes needed.
func (n *bridge) Update(newNetwork api.NetworkPut, targetNode string, clientType request.ClientType) error {
n.logger.Debug("Update", log.Ctx{"clientType": clientType, "newNetwork": newNetwork})
err := n.populateAutoConfig(newNetwork.Config)
if err != nil {
return errors.Wrapf(err, "Failed generating auto config")
}
dbUpdateNeeeded, changedKeys, oldNetwork, err := n.common.configChanged(newNetwork)
if err != nil {
return err
}
if !dbUpdateNeeeded {
return nil // Nothing changed.
}
// If the network as a whole has not had any previous creation attempts, or the node itself is still
// pending, then don't apply the new settings to the node, just to the database record (ready for the
// actual global create request to be initiated).
if n.Status() == api.NetworkStatusPending || n.LocalStatus() == api.NetworkStatusPending {
return n.common.update(newNetwork, targetNode, clientType)
}
revert := revert.New()
defer revert.Fail()
// Perform any pre-update cleanup needed if local node network was already created.
if len(changedKeys) > 0 {
// Define a function which reverts everything.
revert.Add(func() {
// Reset changes to all nodes and database.
n.common.update(oldNetwork, targetNode, clientType)
// Reset any change that was made to local bridge.
n.setup(newNetwork.Config)
})
// Bring the bridge down entirely if the driver has changed.
if shared.StringInSlice("bridge.driver", changedKeys) && n.isRunning() {
err = n.Stop()
if err != nil {
return err
}
}
// Detach any external interfaces should no longer be attached.
if shared.StringInSlice("bridge.external_interfaces", changedKeys) && n.isRunning() {
devices := []string{}
for _, dev := range strings.Split(newNetwork.Config["bridge.external_interfaces"], ",") {
dev = strings.TrimSpace(dev)
devices = append(devices, dev)
}
for _, dev := range strings.Split(oldNetwork.Config["bridge.external_interfaces"], ",") {
dev = strings.TrimSpace(dev)
if dev == "" {
continue
}
if !shared.StringInSlice(dev, devices) && InterfaceExists(dev) {
err = DetachInterface(n.name, dev)
if err != nil {
return err
}
}
}
}
}
// Apply changes to all nodes and database.
err = n.common.update(newNetwork, targetNode, clientType)
if err != nil {
return err
}
// Restart the network if needed.
if len(changedKeys) > 0 {
err = n.setup(oldNetwork.Config)
if err != nil {
return err
}
}
revert.Success()
return nil
}
func (n *bridge) spawnForkDNS(listenAddress string) error {
// Setup the dnsmasq domain
dnsDomain := n.config["dns.domain"]
if dnsDomain == "" {
dnsDomain = "lxd"
}
// Spawn the daemon using subprocess
command := n.state.OS.ExecPath
forkdnsargs := []string{"forkdns",
fmt.Sprintf("%s:1053", listenAddress),
dnsDomain,
n.name}
logPath := shared.LogPath(fmt.Sprintf("forkdns.%s.log", n.name))
p, err := subprocess.NewProcess(command, forkdnsargs, logPath, logPath)
if err != nil {
return fmt.Errorf("Failed to create subprocess: %s", err)
}
// Drop privileges.
p.SetCreds(n.state.OS.UnprivUID, n.state.OS.UnprivGID)
// Apply AppArmor profile.
p.SetApparmor(apparmor.ForkdnsProfileName(n))
err = p.Start()
if err != nil {
return fmt.Errorf("Failed to run: %s %s: %v", command, strings.Join(forkdnsargs, " "), err)
}
err = p.Save(shared.VarPath("networks", n.name, "forkdns.pid"))
if err != nil {
// Kill Process if started, but could not save the file
err2 := p.Stop()
if err != nil {
return fmt.Errorf("Could not kill subprocess while handling saving error: %s: %s", err, err2)
}
return fmt.Errorf("Failed to save subprocess details: %s", err)
}
return nil
}
// HandleHeartbeat refreshes forkdns servers. Retrieves the IPv4 address of each cluster node (excluding ourselves)
// for this network. It then updates the forkdns server list file if there are changes.
func (n *bridge) HandleHeartbeat(heartbeatData *cluster.APIHeartbeat) error {
addresses := []string{}
localAddress, err := node.HTTPSAddress(n.state.Node)
if err != nil {
return err
}
n.logger.Info("Refreshing forkdns peers")
networkCert := n.state.Endpoints.NetworkCert()
for _, node := range heartbeatData.Members {
if node.Address == localAddress {
// No need to query ourselves.
continue
}
if !node.Online {
n.logger.Warn("Excluding offline member from DNS peers refresh", log.Ctx{"address": node.Address, "ID": node.ID, "raftID": node.RaftID, "lastHeartbeat": node.LastHeartbeat})
continue
}
client, err := cluster.Connect(node.Address, networkCert, n.state.ServerCert(), nil, true)
if err != nil {
return err
}
state, err := client.GetNetworkState(n.name)
if err != nil {
return err
}
for _, addr := range state.Addresses {
// Only get IPv4 addresses of nodes on network.
if addr.Family != "inet" || addr.Scope != "global" {
continue
}
addresses = append(addresses, addr.Address)
break
}
}
// Compare current stored list to retrieved list and see if we need to update.
curList, err := ForkdnsServersList(n.name)
if err != nil {
// Only warn here, but continue on to regenerate the servers list from cluster info.
n.logger.Warn("Failed to load existing forkdns server list", log.Ctx{"err": err})
}
// If current list is same as cluster list, nothing to do.
if err == nil && reflect.DeepEqual(curList, addresses) {
return nil
}
err = n.updateForkdnsServersFile(addresses)
if err != nil {
return err
}
n.logger.Info("Updated forkdns server list", log.Ctx{"nodes": addresses})
return nil
}
func (n *bridge) getTunnels() []string {
tunnels := []string{}
for k := range n.config {
if !strings.HasPrefix(k, "tunnel.") {
continue
}
fields := strings.Split(k, ".")
if !shared.StringInSlice(fields[1], tunnels) {
tunnels = append(tunnels, fields[1])
}
}
return tunnels
}
// bootRoutesV4 returns a list of IPv4 boot routes on the network's device.
func (n *bridge) bootRoutesV4() ([]string, error) {
r := &ip.Route{
DevName: n.name,
Proto: "boot",
Family: ip.FamilyV4,
}
routes, err := r.Show()
if err != nil {
return nil, err
}
return routes, nil
}
// bootRoutesV6 returns a list of IPv6 boot routes on the network's device.
func (n *bridge) bootRoutesV6() ([]string, error) {
r := &ip.Route{
DevName: n.name,
Proto: "boot",
Family: ip.FamilyV6,
}
routes, err := r.Show()
if err != nil {
return nil, err
}
return routes, nil
}
// applyBootRoutesV4 applies a list of IPv4 boot routes to the network's device.
func (n *bridge) applyBootRoutesV4(routes []string) {
for _, route := range routes {
r := &ip.Route{
DevName: n.name,
Proto: "boot",
Family: ip.FamilyV4,
}
err := r.Replace(strings.Fields(route))
if err != nil {
// If it fails, then we can't stop as the route has already gone, so just log and continue.
n.logger.Error("Failed to restore route", log.Ctx{"err": err})
}
}
}
// applyBootRoutesV6 applies a list of IPv6 boot routes to the network's device.
func (n *bridge) applyBootRoutesV6(routes []string) {
for _, route := range routes {
r := &ip.Route{
DevName: n.name,
Proto: "boot",
Family: ip.FamilyV6,
}
err := r.Replace(strings.Fields(route))
if err != nil {
// If it fails, then we can't stop as the route has already gone, so just log and continue.
n.logger.Error("Failed to restore route", log.Ctx{"err": err})
}
}
}
func (n *bridge) fanAddress(underlay *net.IPNet, overlay *net.IPNet) (string, string, string, error) {
// Quick checks.
underlaySize, _ := underlay.Mask.Size()
if underlaySize != 16 && underlaySize != 24 {
return "", "", "", fmt.Errorf("Only /16 or /24 underlays are supported at this time")
}
overlaySize, _ := overlay.Mask.Size()
if overlaySize != 8 && overlaySize != 16 {
return "", "", "", fmt.Errorf("Only /8 or /16 overlays are supported at this time")
}
if overlaySize+(32-underlaySize)+8 > 32 {
return "", "", "", fmt.Errorf("Underlay or overlay networks too large to accommodate the FAN")
}
// Get the IP
ip, dev, err := n.addressForSubnet(underlay)
if err != nil {
return "", "", "", err
}
ipStr := ip.String()
// Force into IPv4 format
ipBytes := ip.To4()
if ipBytes == nil {
return "", "", "", fmt.Errorf("Invalid IPv4: %s", ip)
}
// Compute the IP
ipBytes[0] = overlay.IP[0]
if overlaySize == 16 {
ipBytes[1] = overlay.IP[1]
ipBytes[2] = ipBytes[3]
} else if underlaySize == 24 {
ipBytes[1] = ipBytes[3]
ipBytes[2] = 0
} else if underlaySize == 16 {
ipBytes[1] = ipBytes[2]
ipBytes[2] = ipBytes[3]
}
ipBytes[3] = 1
return fmt.Sprintf("%s/%d", ipBytes.String(), overlaySize), dev, ipStr, err
}
func (n *bridge) addressForSubnet(subnet *net.IPNet) (net.IP, string, error) {
ifaces, err := net.Interfaces()
if err != nil {
return net.IP{}, "", err
}
for _, iface := range ifaces {
// Skip addresses on lo interface in case VIPs are being used on that interface that are part of
// the underlay subnet as is unlikely to be the actual intended underlay subnet interface.
if iface.Name == "lo" {
continue
}
addrs, err := iface.Addrs()
if err != nil {
continue
}
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if err != nil {
continue
}
if subnet.Contains(ip) {
return ip, iface.Name, nil
}
}
}
return net.IP{}, "", fmt.Errorf("No address found in subnet")
}
func (n *bridge) killForkDNS() error {
// Check if we have a running forkdns at all
pidPath := shared.VarPath("networks", n.name, "forkdns.pid")
// If the pid file doesn't exist, there is no process to kill.
if !shared.PathExists(pidPath) {
return nil
}
p, err := subprocess.ImportProcess(pidPath)
if err != nil {
return fmt.Errorf("Could not read pid file: %s", err)
}
err = p.Stop()
if err != nil && err != subprocess.ErrNotRunning {
return fmt.Errorf("Unable to kill dnsmasq: %s", err)
}
return nil
}
// updateForkdnsServersFile takes a list of node addresses and writes them atomically to
// the forkdns.servers file ready for forkdns to notice and re-apply its config.
func (n *bridge) updateForkdnsServersFile(addresses []string) error {
// We don't want to race with ourselves here
forkdnsServersLock.Lock()
defer forkdnsServersLock.Unlock()
permName := shared.VarPath("networks", n.name, ForkdnsServersListPath+"/"+ForkdnsServersListFile)
tmpName := permName + ".tmp"
// Open tmp file and truncate
tmpFile, err := os.Create(tmpName)
if err != nil {
return err
}
defer tmpFile.Close()
for _, address := range addresses {
_, err := tmpFile.WriteString(address + "\n")
if err != nil {
return err
}
}
tmpFile.Close()
// Atomically rename finished file into permanent location so forkdns can pick it up.
err = os.Rename(tmpName, permName)
if err != nil {
return err
}
return nil
}
// hasIPv4Firewall indicates whether the network has IPv4 firewall enabled.
func (n *bridge) hasIPv4Firewall() bool {
// IPv4 firewall is only enabled if there is a bridge ipv4.address or fan mode, and ipv4.firewall enabled.
// When using fan bridge.mode, there can be an empty ipv4.address, so we assume it is active.
if (n.config["bridge.mode"] == "fan" || !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"})) && (n.config["ipv4.firewall"] == "" || shared.IsTrue(n.config["ipv4.firewall"])) {
return true
}
return false
}
// hasIPv6Firewall indicates whether the network has IPv6 firewall enabled.
func (n *bridge) hasIPv6Firewall() bool {
// IPv6 firewall is only enabled if there is a bridge ipv6.address and ipv6.firewall enabled.
if !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) && (n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"])) {
return true
}
return false
}
// hasDHCPv4 indicates whether the network has DHCPv4 enabled.
// An empty ipv4.dhcp setting indicates enabled by default.
func (n *bridge) hasDHCPv4() bool {
if n.config["ipv4.dhcp"] == "" || shared.IsTrue(n.config["ipv4.dhcp"]) {
return true
}
return false
}
// hasDHCPv6 indicates whether the network has DHCPv6 enabled.
// An empty ipv6.dhcp setting indicates enabled by default.
func (n *bridge) hasDHCPv6() bool {
if n.config["ipv6.dhcp"] == "" || shared.IsTrue(n.config["ipv6.dhcp"]) {
return true
}
return false
}
// DHCPv4Subnet returns the DHCPv4 subnet (if DHCP is enabled on network).
func (n *bridge) DHCPv4Subnet() *net.IPNet {
// DHCP is disabled on this network.
if !n.hasDHCPv4() {
return nil
}
// Fan mode. Extract DHCP subnet from fan bridge address. Only detectable once network has started.
// But if there is no address on the fan bridge then DHCP won't work anyway.
if n.config["bridge.mode"] == "fan" {
iface, err := net.InterfaceByName(n.name)
if err != nil {
return nil
}
addrs, err := iface.Addrs()
if err != nil {
return nil
}
for _, addr := range addrs {
ip, subnet, err := net.ParseCIDR(addr.String())
if err != nil {
continue
}
if ip != nil && err == nil && ip.To4() != nil && ip.IsGlobalUnicast() {
return subnet // Use first IPv4 unicast address on host for DHCP subnet.
}
}
return nil // No addresses found, means DHCP must be disabled.
}
// Non-fan mode. Return configured bridge subnet directly.
_, subnet, err := net.ParseCIDR(n.config["ipv4.address"])
if err != nil {
return nil
}
return subnet
}
// DHCPv6Subnet returns the DHCPv6 subnet (if DHCP or SLAAC is enabled on network).
func (n *bridge) DHCPv6Subnet() *net.IPNet {
// DHCP is disabled on this network.
if !n.hasDHCPv6() {
return nil
}
_, subnet, err := net.ParseCIDR(n.config["ipv6.address"])
if err != nil {
return nil
}
return subnet
}
// forwardConvertToFirewallForward converts forwards into format compatible with the firewall package.
func (n *bridge) forwardConvertToFirewallForwards(listenAddress net.IP, defaultTargetAddress net.IP, portMaps []*forwardPortMap) []firewallDrivers.AddressForward {
var vips []firewallDrivers.AddressForward
if defaultTargetAddress != nil {
vips = append(vips, firewallDrivers.AddressForward{
ListenAddress: listenAddress,
TargetAddress: defaultTargetAddress,
})
}
for _, portMap := range portMaps {
vips = append(vips, firewallDrivers.AddressForward{
ListenAddress: listenAddress,
Protocol: portMap.protocol,
TargetAddress: portMap.targetAddress,
ListenPorts: portMap.listenPorts,
TargetPorts: portMap.targetPorts,
})
}
return vips
}
// bridgeProjectNetworks takes a map of all networks in all projects and returns a filtered map of bridge networks.
func (n *bridge) bridgeProjectNetworks(projectNetworks map[string]map[int64]api.Network) map[string][]*api.Network {
bridgeProjectNetworks := make(map[string][]*api.Network)
for netProject, networks := range projectNetworks {
for _, ni := range networks {
network := ni // Local var creating pointer to rather than iterator.
// Skip non-bridge networks.
if network.Type != "bridge" {
continue
}
if bridgeProjectNetworks[netProject] == nil {
bridgeProjectNetworks[netProject] = []*api.Network{&network}
} else {
bridgeProjectNetworks[netProject] = append(bridgeProjectNetworks[netProject], &network)
}
}
}
return bridgeProjectNetworks
}
// bridgeNetworkExternalSubnets returns a list of external subnets used by bridge networks. Networks are considered
// to be using external subnets for their ipv4.address and/or ipv6.address if they have NAT disabled, and/or if
// they have external NAT addresses specified.
func (n *bridge) bridgeNetworkExternalSubnets(bridgeProjectNetworks map[string][]*api.Network) ([]externalSubnetUsage, error) {
externalSubnets := make([]externalSubnetUsage, 0)
for netProject, networks := range bridgeProjectNetworks {
for _, netInfo := range networks {
for _, keyPrefix := range []string{"ipv4", "ipv6"} {
// If NAT is disabled, then network subnet is an external subnet.
if !shared.IsTrue(netInfo.Config[fmt.Sprintf("%s.nat", keyPrefix)]) {
key := fmt.Sprintf("%s.address", keyPrefix)
_, ipNet, err := net.ParseCIDR(netInfo.Config[key])
if err != nil {
continue // Skip invalid/unspecified network addresses.
}
externalSubnets = append(externalSubnets, externalSubnetUsage{
subnet: *ipNet,
networkProject: netProject,
networkName: netInfo.Name,
})
}
// Find any external subnets used for network SNAT.
if netInfo.Config[fmt.Sprintf("%s.nat.address", keyPrefix)] != "" {
key := fmt.Sprintf("%s.nat.address", keyPrefix)
subnetSize := 128
if keyPrefix == "ipv4" {
subnetSize = 32
}
_, ipNet, err := net.ParseCIDR(fmt.Sprintf("%s/%d", netInfo.Config[key], subnetSize))
if err != nil {
return nil, errors.Wrapf(err, "Failed parsing %q of %q in project %q", key, netInfo.Name, netProject)
}
externalSubnets = append(externalSubnets, externalSubnetUsage{
subnet: *ipNet,
networkProject: netProject,
networkName: netInfo.Name,
networkSNAT: true,
})
}
// Find any routes being used by the network.
for _, cidr := range util.SplitNTrimSpace(netInfo.Config[fmt.Sprintf("%s.routes", keyPrefix)], ",", -1, true) {
_, ipNet, err := net.ParseCIDR(cidr)
if err != nil {
continue // Skip invalid/unspecified network addresses.
}
externalSubnets = append(externalSubnets, externalSubnetUsage{
subnet: *ipNet,
networkProject: netProject,
networkName: netInfo.Name,
})
}
}
}
}
return externalSubnets, nil
}
// bridgedNICExternalRoutes returns a list of external routes currently used by bridged NICs that are connected to
// networks specified.
func (n *bridge) bridgedNICExternalRoutes(bridgeProjectNetworks map[string][]*api.Network) ([]externalSubnetUsage, error) {
externalRoutes := make([]externalSubnetUsage, 0)
err := n.state.Cluster.InstanceList(nil, func(inst db.Instance, p db.Project, profiles []api.Profile) error {
// Get the instance's effective network project name.
instNetworkProject := project.NetworkProjectFromRecord(&p)
if instNetworkProject != project.Default {
return nil // Managed bridge networks can only exist in default project.
}
devices := db.ExpandInstanceDevices(deviceConfig.NewDevices(db.DevicesToAPI(inst.Devices)), profiles)
// Iterate through each of the instance's devices, looking for bridged NICs that are linked to
// networks specified.
for devName, devConfig := range devices {
if devConfig["type"] != "nic" {
continue
}
// Check whether the NIC device references one of the networks supplied.
if !NICUsesNetwork(devConfig, bridgeProjectNetworks[instNetworkProject]...) {
continue
}
// For bridged NICs that are connected to networks specified, check if they have any
// routes or external routes configured, and if so add them to the list to return.
for _, key := range []string{"ipv4.routes", "ipv6.routes", "ipv4.routes.external", "ipv6.routes.external"} {
for _, cidr := range util.SplitNTrimSpace(devConfig[key], ",", -1, true) {
_, ipNet, _ := net.ParseCIDR(cidr)
if ipNet == nil {
// Skip if NIC device doesn't have a valid route.
continue
}
externalRoutes = append(externalRoutes, externalSubnetUsage{
subnet: *ipNet,
networkProject: instNetworkProject,
networkName: devConfig["network"],
instanceProject: inst.Project,
instanceName: inst.Name,
instanceDevice: devName,
})
}
}
}
return nil
})
if err != nil {
return nil, err
}
return externalRoutes, nil
}
// getExternalSubnetInUse returns information about usage of external subnets by bridge networks (and NICs
// connected to them) on this member.
func (n *bridge) getExternalSubnetInUse() ([]externalSubnetUsage, error) {
var err error
var projectNetworks map[string]map[int64]api.Network
var projectNetworksForwardsOnUplink map[string]map[int64][]string
err = n.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
// Get all managed networks across all projects.
projectNetworks, err = tx.GetCreatedNetworks()
if err != nil {
return errors.Wrapf(err, "Failed to load all networks")
}
// Get all network forward listen addresses for forwards assigned to this specific cluster member.
projectNetworksForwardsOnUplink, err = tx.GetProjectNetworkForwardListenAddressesOnMember()
if err != nil {
return errors.Wrapf(err, "Failed loading network forward listen addresses")
}
return nil
})
if err != nil {
return nil, err
}
// Get managed bridge networks.
bridgeProjectNetworks := n.bridgeProjectNetworks(projectNetworks)
// Get external subnets used by other managed bridge networks.
bridgeNetworkExternalSubnets, err := n.bridgeNetworkExternalSubnets(bridgeProjectNetworks)
if err != nil {
return nil, err
}
// Get external routes configured on bridged NICs.
bridgedNICExternalRoutes, err := n.bridgedNICExternalRoutes(bridgeProjectNetworks)
if err != nil {
return nil, err
}
externalSubnets := make([]externalSubnetUsage, 0, len(bridgeNetworkExternalSubnets)+len(bridgedNICExternalRoutes))
externalSubnets = append(externalSubnets, bridgeNetworkExternalSubnets...)
externalSubnets = append(externalSubnets, bridgedNICExternalRoutes...)
// Add forward listen addresses to this list.
for projectName, networks := range projectNetworksForwardsOnUplink {
for networkID, listenAddresses := range networks {
for _, listenAddress := range listenAddresses {
// Convert listen address to subnet.
listenAddressNet, err := ParseIPToNet(listenAddress)
if err != nil {
return nil, fmt.Errorf("Invalid existing forward listen address %q", listenAddress)
}
// Create an externalSubnetUsage for the listen address by using the network ID
// of the listen address to retrieve the already loaded network name from the
// projectNetworks map.
externalSubnets = append(externalSubnets, externalSubnetUsage{
subnet: *listenAddressNet,
networkProject: projectName,
networkName: projectNetworks[projectName][networkID].Name,
})
}
}
}
return externalSubnets, nil
}
// ForwardCreate creates a network forward.
func (n *bridge) ForwardCreate(forward api.NetworkForwardsPost, clientType request.ClientType) error {
memberSpecific := true // bridge supports per-member forwards.
// Check if there is an existing forward using the same listen address.
_, _, err := n.state.Cluster.GetNetworkForward(n.ID(), memberSpecific, forward.ListenAddress)
if err == nil {
return api.StatusErrorf(http.StatusConflict, "A forward for that listen address already exists")
}
// Convert listen address to subnet so we can check its valid and can be used.
listenAddressNet, err := ParseIPToNet(forward.ListenAddress)
if err != nil {
return errors.Wrapf(err, "Failed parsing address forward listen address %q", forward.ListenAddress)
}
_, err = n.forwardValidate(listenAddressNet.IP, &forward.NetworkForwardPut)
if err != nil {
return err
}
externalSubnetsInUse, err := n.getExternalSubnetInUse()
if err != nil {
return err
}
// Check the listen address subnet doesn't fall within any existing network external subnets.
for _, externalSubnetUser := range externalSubnetsInUse {
// Skip our own network's SNAT address (as it can be used for NICs in the network).
if externalSubnetUser.networkSNAT && externalSubnetUser.networkProject == n.project && externalSubnetUser.networkName == n.name {
continue
}
// Skip our own network (but not NIC devices on our own network).
if externalSubnetUser.networkProject == n.project && externalSubnetUser.networkName == n.name && externalSubnetUser.instanceDevice == "" {
continue
}
if SubnetContains(&externalSubnetUser.subnet, listenAddressNet) || SubnetContains(listenAddressNet, &externalSubnetUser.subnet) {
// This error is purposefully vague so that it doesn't reveal any names of
// resources potentially outside of the network.
return fmt.Errorf("Forward listen address %q overlaps with another network or NIC", listenAddressNet.String())
}
}
revert := revert.New()
defer revert.Fail()
// Create forward DB record.
forwardID, err := n.state.Cluster.CreateNetworkForward(n.ID(), memberSpecific, &forward)
if err != nil {
return err
}
revert.Add(func() {
n.state.Cluster.DeleteNetworkForward(n.ID(), forwardID)
n.forwardSetupFirewall()
n.forwardBGPSetupPrefixes()
})
err = n.forwardSetupFirewall()
if err != nil {
return err
}
// Check if hairpin mode needs to be enabled on active NIC bridge ports.
if n.config["bridge.driver"] != "openvswitch" {
brNetfilterEnabled := false
for _, ipVersion := range []uint{4, 6} {
if BridgeNetfilterEnabled(ipVersion) == nil {
brNetfilterEnabled = true
break
}
}
// If br_netfilter is enabled and bridge has forwards, we enable hairpin mode on each NIC's bridge
// port in case any of the forwards target the NIC and the instance attempts to connect to the
// forward's listener. Without hairpin mode on the target of the forward will not be able to
// connect to the listener.
if brNetfilterEnabled {
listenAddresses, err := n.state.Cluster.GetNetworkForwardListenAddresses(n.ID(), true)
if err != nil {
return fmt.Errorf("Failed loading network forwards: %w", err)
}
// If we are the first forward on this bridge, enable hairpin mode on active NIC ports.
if len(listenAddresses) <= 1 {
var localNode string
err = n.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
localNode, err = tx.GetLocalNodeName()
if err != nil {
return errors.Wrapf(err, "Failed to get local member name")
}
return err
})
if err != nil {
return err
}
filter := db.InstanceFilter{
Node: &localNode,
}
err = n.state.Cluster.InstanceList(&filter, func(inst db.Instance, p db.Project, profiles []api.Profile) error {
// Get the instance's effective network project name.
instNetworkProject := project.NetworkProjectFromRecord(&p)
if instNetworkProject != project.Default {
return nil // Managed bridge networks can only exist in default project.
}
devices := db.ExpandInstanceDevices(deviceConfig.NewDevices(db.DevicesToAPI(inst.Devices)), profiles)
// Iterate through each of the instance's devices, looking for bridged NICs
// that are linked to this network.
for devName, devConfig := range devices {
if devConfig["type"] != "nic" {
continue
}
// Check whether the NIC device references our network..
if !NICUsesNetwork(devConfig, &api.Network{Name: n.Name()}) {
continue
}
hostName := inst.Config[fmt.Sprintf("volatile.%s.host_name", devName)]
if InterfaceExists(hostName) {
link := &ip.Link{Name: hostName}
err = link.BridgeLinkSetHairpin(true)
if err != nil {
return errors.Wrapf(err, "Error enabling hairpin mode on bridge port %q", link.Name)
}
n.logger.Debug("Enabled hairpin mode on NIC bridge port", log.Ctx{"inst": inst.Name, "project": inst.Project, "device": devName, "dev": link.Name})
}
}
return nil
})
if err != nil {
return err
}
}
}
}
// Refresh exported BGP prefixes on local member.
err = n.forwardBGPSetupPrefixes()
if err != nil {
return fmt.Errorf("Failed applying BGP prefixes for address forwards: %w", err)
}
revert.Success()
return nil
}
// ForwardUpdate updates a network forward.
func (n *bridge) ForwardUpdate(listenAddress string, req api.NetworkForwardPut, clientType request.ClientType) error {
memberSpecific := true // bridge supports per-member forwards.
curForwardID, curForward, err := n.state.Cluster.GetNetworkForward(n.ID(), memberSpecific, listenAddress)
if err != nil {
return err
}
_, err = n.forwardValidate(net.ParseIP(curForward.ListenAddress), &req)
if err != nil {
return err
}
curForwardEtagHash, err := util.EtagHash(curForward.Etag())
if err != nil {
return err
}
newForward := api.NetworkForward{
ListenAddress: curForward.ListenAddress,
NetworkForwardPut: req,
}
newForwardEtagHash, err := util.EtagHash(newForward.Etag())
if err != nil {
return err
}
if curForwardEtagHash == newForwardEtagHash {
return nil // Nothing has changed.
}
revert := revert.New()
defer revert.Fail()
err = n.state.Cluster.UpdateNetworkForward(n.ID(), curForwardID, &newForward.NetworkForwardPut)
if err != nil {
return err
}
revert.Add(func() {
n.state.Cluster.UpdateNetworkForward(n.ID(), curForwardID, &curForward.NetworkForwardPut)
n.forwardSetupFirewall()
n.forwardBGPSetupPrefixes()
})
err = n.forwardSetupFirewall()
if err != nil {
return err
}
// Refresh exported BGP prefixes on local member.
err = n.forwardBGPSetupPrefixes()
if err != nil {
return fmt.Errorf("Failed applying BGP prefixes for address forwards: %w", err)
}
revert.Success()
return nil
}
// ForwardDelete deletes a network forward.
func (n *bridge) ForwardDelete(listenAddress string, clientType request.ClientType) error {
memberSpecific := true // bridge supports per-member forwards.
forwardID, forward, err := n.state.Cluster.GetNetworkForward(n.ID(), memberSpecific, listenAddress)
if err != nil {
return err
}
revert := revert.New()
defer revert.Fail()
err = n.state.Cluster.DeleteNetworkForward(n.ID(), forwardID)
if err != nil {
return err
}
revert.Add(func() {
newForward := api.NetworkForwardsPost{
NetworkForwardPut: forward.NetworkForwardPut,
ListenAddress: forward.ListenAddress,
}
n.state.Cluster.CreateNetworkForward(n.ID(), memberSpecific, &newForward)
n.forwardSetupFirewall()
n.forwardBGPSetupPrefixes()
})
err = n.forwardSetupFirewall()
if err != nil {
return err
}
// Refresh exported BGP prefixes on local member.
err = n.forwardBGPSetupPrefixes()
if err != nil {
return fmt.Errorf("Failed applying BGP prefixes for address forwards: %w", err)
}
revert.Success()
return nil
}
// forwardSetupFirewall applies all network address forwards defined for this network and this member.
func (n *bridge) forwardSetupFirewall() error {
memberSpecific := true // Get all forwards for this cluster member.
forwards, err := n.state.Cluster.GetNetworkForwards(n.ID(), memberSpecific)
if err != nil {
return fmt.Errorf("Failed loading network forwards: %w", err)
}
var fwForwards []firewallDrivers.AddressForward
ipVersions := make(map[uint]struct{})
for _, forward := range forwards {
// Convert listen address to subnet so we can check its valid and can be used.
listenAddressNet, err := ParseIPToNet(forward.ListenAddress)
if err != nil {
return errors.Wrapf(err, "Failed parsing address forward listen address %q", forward.ListenAddress)
}
// Track which IP versions we are using.
if listenAddressNet.IP.To4() == nil {
ipVersions[6] = struct{}{}
} else {
ipVersions[4] = struct{}{}
}
portMaps, err := n.forwardValidate(listenAddressNet.IP, &forward.NetworkForwardPut)
if err != nil {
return fmt.Errorf("Failed validating firewall address forward for listen address %q: %w", forward.ListenAddress, err)
}
fwForwards = append(fwForwards, n.forwardConvertToFirewallForwards(listenAddressNet.IP, net.ParseIP(forward.Config["target_address"]), portMaps)...)
}
if len(forwards) > 0 {
// Check if br_netfilter is enabled to, and warn if not.
brNetfilterWarning := false
for ipVersion := range ipVersions {
err = BridgeNetfilterEnabled(ipVersion)
if err != nil {
brNetfilterWarning = true
msg := fmt.Sprintf("IPv%d bridge netfilter not enabled. Instances using the bridge will not be able to connect to the forward listen IPs", ipVersion)
n.logger.Warn(msg, log.Ctx{"err": err})
err = n.state.Cluster.UpsertWarningLocalNode(n.project, dbCluster.TypeNetwork, int(n.id), db.WarningProxyBridgeNetfilterNotEnabled, fmt.Sprintf("%s: %v", msg, err))
if err != nil {
n.logger.Warn("Failed to create warning", log.Ctx{"err": err})
}
}
}
if !brNetfilterWarning {
err = warnings.ResolveWarningsByLocalNodeAndProjectAndTypeAndEntity(n.state.Cluster, n.project, db.WarningProxyBridgeNetfilterNotEnabled, dbCluster.TypeNetwork, int(n.id))
if err != nil {
n.logger.Warn("Failed to resolve warning", log.Ctx{"err": err})
}
}
}
err = n.state.Firewall.NetworkApplyForwards(n.name, fwForwards)
if err != nil {
return fmt.Errorf("Failed applying firewall address forwards: %w", err)
}
return nil
}
// Leases returns a list of leases for the bridged network. It will reach out to other cluster members as needed.
// The projectName passed here refers to the initial project from the API request which may differ from the network's project.
func (n *bridge) Leases(projectName string, clientType request.ClientType) ([]api.NetworkLease, error) {
leases := []api.NetworkLease{}
projectMacs := []string{}
// Get all static leases.
if clientType == request.ClientTypeNormal {
// Get the downstream networks.
if n.project == project.Default {
var err error
// Load all the networks.
var projectNetworks map[string]map[int64]api.Network
err = n.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
projectNetworks, err = tx.GetCreatedNetworks()
return err
})
if err != nil {
return nil, err
}
// Look for networks using the current network as an uplink.
for projectName, networks := range projectNetworks {
for _, network := range networks {
if network.Config["network"] != n.name {
continue
}
// Found a network, add leases.
for _, k := range []string{"volatile.network.ipv4.address", "volatile.network.ipv6.address"} {
v := network.Config[k]
if v != "" {
leases = append(leases, api.NetworkLease{
Hostname: fmt.Sprintf("%s-%s.uplink", projectName, network.Name),
Address: v,
Type: "uplink",
})
}
}
}
}
}
// Get all the instances.
instances, err := instance.LoadByProject(n.state, projectName)
if err != nil {
return nil, err
}
for _, inst := range instances {
// Go through all its devices (including profiles).
for k, dev := range inst.ExpandedDevices() {
// Skip uninteresting entries.
if dev["type"] != "nic" {
continue
}
nicType, err := nictype.NICType(n.state, inst.Project(), dev)
if err != nil || nicType != "bridged" {
continue
}
// Temporarily populate parent from network setting if used.
if dev["network"] != "" {
dev["parent"] = dev["network"]
}
if dev["parent"] != n.name {
continue
}
// Fill in the hwaddr from volatile.
if dev["hwaddr"] == "" {
dev["hwaddr"] = inst.LocalConfig()[fmt.Sprintf("volatile.%s.hwaddr", k)]
}
// Record the MAC.
if dev["hwaddr"] != "" {
projectMacs = append(projectMacs, dev["hwaddr"])
}
// Add the lease.
if dev["ipv4.address"] != "" {
leases = append(leases, api.NetworkLease{
Hostname: inst.Name(),
Address: dev["ipv4.address"],
Hwaddr: dev["hwaddr"],
Type: "static",
Location: inst.Location(),
})
}
if dev["ipv6.address"] != "" {
leases = append(leases, api.NetworkLease{
Hostname: inst.Name(),
Address: dev["ipv6.address"],
Hwaddr: dev["hwaddr"],
Type: "static",
Location: inst.Location(),
})
}
// Add EUI64 records.
ipv6Address := n.config["ipv6.address"]
if ipv6Address != "" && ipv6Address != "none" && !shared.IsTrue(n.config["ipv6.dhcp.stateful"]) {
_, netAddress, _ := net.ParseCIDR(ipv6Address)
hwAddr, _ := net.ParseMAC(dev["hwaddr"])
if netAddress != nil && hwAddr != nil {
ipv6, err := eui64.ParseMAC(netAddress.IP, hwAddr)
if err == nil {
leases = append(leases, api.NetworkLease{
Hostname: inst.Name(),
Address: ipv6.String(),
Hwaddr: dev["hwaddr"],
Type: "dynamic",
Location: inst.Location(),
})
}
}
}
}
}
}
// Local server name.
var err error
var serverName string
err = n.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
serverName, err = tx.GetLocalNodeName()
return err
})
if err != nil {
return nil, err
}
// Get dynamic leases.
leaseFile := shared.VarPath("networks", n.name, "dnsmasq.leases")
if !shared.PathExists(leaseFile) {
return leases, nil
}
content, err := ioutil.ReadFile(leaseFile)
if err != nil {
return nil, err
}
for _, lease := range strings.Split(string(content), "\n") {
fields := strings.Fields(lease)
if len(fields) >= 5 {
// Parse the MAC.
mac := GetMACSlice(fields[1])
macStr := strings.Join(mac, ":")
if len(macStr) < 17 && fields[4] != "" {
macStr = fields[4][len(fields[4])-17:]
}
// Look for an existing static entry.
found := false
for _, entry := range leases {
if entry.Hwaddr == macStr && entry.Address == fields[2] {
found = true
break
}
}
if found {
continue
}
// DHCPv6 leases can't be tracked down to a MAC so clear the field.
// This means that instance project filtering will not work on IPv6 leases.
if strings.Contains(fields[2], ":") {
macStr = ""
}
// Skip leases that don't match any of the instance MACs from the project (only when we
// have populated the projectMacs list in ClientTypeNormal mode). Otherwise get all local
// leases and they will be filtered on the server handling the end user request.
if clientType == request.ClientTypeNormal && macStr != "" && !shared.StringInSlice(macStr, projectMacs) {
continue
}
// Add the lease to the list.
leases = append(leases, api.NetworkLease{
Hostname: fields[3],
Address: fields[2],
Hwaddr: macStr,
Type: "dynamic",
Location: serverName,
})
}
}
// Collect leases from other servers.
if clientType == request.ClientTypeNormal {
notifier, err := cluster.NewNotifier(n.state, n.state.Endpoints.NetworkCert(), n.state.ServerCert(), cluster.NotifyAll)
if err != nil {
return nil, err
}
err = notifier(func(client lxd.InstanceServer) error {
memberLeases, err := client.GetNetworkLeases(n.name)
if err != nil {
return err
}
// Add local leases from other members, filtering them for MACs that belong to the project.
for _, lease := range memberLeases {
if lease.Hwaddr != "" && shared.StringInSlice(lease.Hwaddr, projectMacs) {
leases = append(leases, lease)
}
}
return nil
})
if err != nil {
return nil, err
}
}
return leases, nil
}
|
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package misc
import (
"fmt"
"strings"
"github.com/coreos/mantle/kola/cluster"
"github.com/coreos/mantle/kola/register"
)
func init() {
register.Register(®ister.Test{
Run: NetworkListeners,
ClusterSize: 1,
Name: "coreos.network.listeners",
UserData: `#cloud-config`,
})
}
type listener struct {
process string
port string
}
func checkListeners(c cluster.TestCluster, protocol string, filter string, listeners []listener) {
m := c.Machines()[0]
var command string
if filter != "" {
command = fmt.Sprintf("sudo lsof +c0 -i%v -s%v", protocol, filter)
} else {
command = fmt.Sprintf("sudo lsof +c0 -i%v", protocol)
}
output, err := m.SSH(command)
if err != nil {
c.Fatalf("Failed to run %s: output %s, status: %v", command, output, err)
}
processes := strings.Split(string(output), "\n")
for i, process := range processes {
var valid bool
// skip header
if i == 0 {
continue
}
data := strings.Fields(process)
processname := data[0]
pid := data[1]
portdata := strings.Split(data[8], ":")
port := portdata[len(portdata)-1]
for _, listener := range listeners {
if processname == listener.process && port == listener.port {
valid = true
}
}
if valid != true {
// systemd renames child processes in parentheses before closing their fds
if processname[0] == '(' {
c.Logf("Ignoring %q listener process: %q (pid %s) on %q", protocol, processname, pid, port)
} else {
c.Fatalf("Unexpected %q listener process: %q (pid %s) on %q", protocol, processname, pid, port)
}
}
}
}
func NetworkListeners(c cluster.TestCluster) {
TCPListeners := []listener{
{"systemd", "ssh"},
}
UDPListeners := []listener{
{"systemd-network", "dhcpv6-client"},
{"systemd-network", "bootpc"},
}
checkListeners(c, "TCP", "TCP:LISTEN", TCPListeners)
checkListeners(c, "UDP", "", UDPListeners)
}
kola/tests/network-listener: check for listeners with netstat
The lsof checker in use before had occational false positives on UDP
since it was unable to distinguish listening from requesting.
Specifically, timesyncd appeared a couple times even though it was only
making a DNS request.
This switches to netstat which, as best I can tell, has better filtering
for 'listening and udp'.
This also changes ports to be numeric simply because I find that easier
to read.
Functionally, this should be the same as before.
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package misc
import (
"regexp"
"strings"
"github.com/coreos/mantle/kola/cluster"
"github.com/coreos/mantle/kola/register"
)
func init() {
register.Register(®ister.Test{
Run: NetworkListeners,
ClusterSize: 1,
Name: "coreos.network.listeners",
UserData: `#cloud-config`,
})
}
type listener struct {
// udp or tcp; note each v4 variant will also match 'v6'
protocol string
port string
process string
}
func checkListeners(c cluster.TestCluster, expectedListeners []listener) {
m := c.Machines()[0]
command := "sudo netstat -plutn"
output, err := m.SSH(command)
if err != nil {
c.Fatalf("Failed to run %s: output %s, status: %v", command, output, err)
}
processes := strings.Split(string(output), "\n")
// verify header is as expected
if len(processes) < 2 {
c.Fatalf("expected at least two lines of nestat output: %q", output)
}
if processes[0] != "Active Internet connections (only servers)" {
c.Fatalf("netstat output has changed format: %q", output)
}
if !regexp.MustCompile(`Proto\s+Recv-Q\s+Send-Q\s+Local Address\s+Foreign Address\s+State\s+PID/Program name`).MatchString(processes[1]) {
c.Fatalf("netstat output has changed format: %q", output)
}
// skip header
processes = processes[2:]
NextProcess:
for _, line := range processes {
parts := strings.Fields(line)
// One gotcha: udp's 'state' field is optional, so it's possible to have 6
// or 7 parts depending on that.
if len(parts) != 6 && len(parts) != 7 {
c.Fatalf("unexpected number of parts on line: %q in output %q", line, output)
}
proto := parts[0]
portdata := strings.Split(parts[3], ":")
port := portdata[len(portdata)-1]
pidProgramParts := strings.SplitN(parts[len(parts)-1], "/", 2)
if len(pidProgramParts) != 2 {
c.Errorf("%v did not contain pid and program parts; full output: %q", parts[6], output)
continue
}
pid, process := pidProgramParts[0], pidProgramParts[1]
for _, expected := range expectedListeners {
if strings.HasPrefix(proto, expected.protocol) && // allow expected tcp to match tcp6
expected.port == port &&
expected.process == process {
// matches expected process
continue NextProcess
}
}
if process[0] == '(' {
c.Logf("Ignoring %q listener process: %q (pid %s) on %q", proto, process, pid, port)
continue
}
c.Logf("full netstat output: %q", output)
c.Errorf("Unexpected listener process: %q", line)
}
}
func NetworkListeners(c cluster.TestCluster) {
expectedListeners := []listener{
{"tcp", "22", "systemd"}, // ssh
{"udp", "68", "systemd-network"}, // dhcp6-client
{"udp", "546", "systemd-network"}, // bootpc
}
checkListeners(c, expectedListeners)
}
|
package network
import (
"bufio"
"encoding/binary"
"fmt"
"hash/fnv"
"io"
"io/ioutil"
"math/rand"
"net"
"os"
"os/exec"
"reflect"
"strconv"
"strings"
"sync"
"github.com/pkg/errors"
"github.com/lxc/lxd/lxd/apparmor"
"github.com/lxc/lxd/lxd/cluster"
"github.com/lxc/lxd/lxd/daemon"
"github.com/lxc/lxd/lxd/dnsmasq"
"github.com/lxc/lxd/lxd/dnsmasq/dhcpalloc"
"github.com/lxc/lxd/lxd/network/openvswitch"
"github.com/lxc/lxd/lxd/node"
"github.com/lxc/lxd/lxd/revert"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/subprocess"
"github.com/lxc/lxd/shared/validate"
"github.com/lxc/lxd/shared/version"
)
// ForkdnsServersListPath defines the path that contains the forkdns server candidate file.
const ForkdnsServersListPath = "forkdns.servers"
// ForkdnsServersListFile file that contains the server candidates list.
const ForkdnsServersListFile = "servers.conf"
var forkdnsServersLock sync.Mutex
// bridge represents a LXD bridge network.
type bridge struct {
common
}
// checkClusterWideMACSafe returns whether it is safe to use the same MAC address for the bridge interface on all
// cluster nodes. It is not suitable to use a static MAC address when "bridge.external_interfaces" is non-empty an
// the bridge interface has no IPv4 or IPv6 address set. This is because in a clustered environment the same bridge
// config is applied to all nodes, and if the bridge is being used to connect multiple nodes to the same network
// segment it would cause MAC conflicts to use the the same MAC on all nodes. If an IP address is specified then
// connecting multiple nodes to the same network segment would also cause IP conflicts, so if an IP is defined
// then we assume this is not being done. However if IP addresses are explicitly set to "none" and
// "bridge.external_interfaces" is set then it may not be safe to use a the same MAC address on all nodes.
func (n *bridge) checkClusterWideMACSafe(config map[string]string) error {
// Fan mode breaks if using the same MAC address on each node.
if config["bridge.mode"] == "fan" {
return fmt.Errorf(`Cannot use static "bridge.hwaddr" MAC address in fan mode`)
}
// We can't be sure that multiple clustered nodes aren't connected to the same network segment so don't
// use a static MAC address for the bridge interface to avoid introducing a MAC conflict.
if config["bridge.external_interfaces"] != "" && config["ipv4.address"] == "none" && config["ipv6.address"] == "none" {
return fmt.Errorf(`Cannot use static "bridge.hwaddr" MAC address when bridge has no IP addresses and has external interfaces set`)
}
return nil
}
// fillConfig fills requested config with any default values.
func (n *bridge) fillConfig(config map[string]string) error {
// Set some default values where needed.
if config["bridge.mode"] == "fan" {
if config["fan.underlay_subnet"] == "" {
config["fan.underlay_subnet"] = "auto"
}
} else {
if config["ipv4.address"] == "" {
config["ipv4.address"] = "auto"
}
if config["ipv4.address"] == "auto" && config["ipv4.nat"] == "" {
config["ipv4.nat"] = "true"
}
if config["ipv6.address"] == "" {
content, err := ioutil.ReadFile("/proc/sys/net/ipv6/conf/default/disable_ipv6")
if err == nil && string(content) == "0\n" {
config["ipv6.address"] = "auto"
}
}
if config["ipv6.address"] == "auto" && config["ipv6.nat"] == "" {
config["ipv6.nat"] = "true"
}
}
// Now populate "auto" values where needed.
if config["ipv4.address"] == "auto" {
subnet, err := randomSubnetV4()
if err != nil {
return err
}
config["ipv4.address"] = subnet
}
if config["ipv6.address"] == "auto" {
subnet, err := randomSubnetV6()
if err != nil {
return err
}
config["ipv6.address"] = subnet
}
if config["fan.underlay_subnet"] == "auto" {
subnet, _, err := DefaultGatewaySubnetV4()
if err != nil {
return err
}
config["fan.underlay_subnet"] = subnet.String()
}
return nil
}
// ValidateName validates network name.
func (n *bridge) ValidateName(name string) error {
err := validInterfaceName(name)
if err != nil {
return err
}
// Apply common name validation that applies to all network types.
return n.common.ValidateName(name)
}
// Validate network config.
func (n *bridge) Validate(config map[string]string) error {
// Build driver specific rules dynamically.
rules := map[string]func(value string) error{
"bridge.driver": func(value string) error {
return validate.IsOneOf(value, []string{"native", "openvswitch"})
},
"bridge.external_interfaces": validate.Optional(func(value string) error {
for _, entry := range strings.Split(value, ",") {
entry = strings.TrimSpace(entry)
if err := validInterfaceName(entry); err != nil {
return errors.Wrapf(err, "Invalid interface name %q", entry)
}
}
return nil
}),
"bridge.hwaddr": validate.Optional(validate.IsNetworkMAC),
"bridge.mtu": validate.Optional(validate.IsNetworkMTU),
"bridge.mode": func(value string) error {
return validate.IsOneOf(value, []string{"standard", "fan"})
},
"fan.overlay_subnet": validate.Optional(validate.IsNetworkV4),
"fan.underlay_subnet": func(value string) error {
if value == "auto" {
return nil
}
return validate.Optional(validate.IsNetworkV4)(value)
},
"fan.type": func(value string) error {
return validate.IsOneOf(value, []string{"vxlan", "ipip"})
},
"ipv4.address": func(value string) error {
if validate.IsOneOf(value, []string{"none", "auto"}) == nil {
return nil
}
return validate.Optional(validate.IsNetworkAddressCIDRV4)(value)
},
"ipv4.firewall": validate.Optional(validate.IsBool),
"ipv4.nat": validate.Optional(validate.IsBool),
"ipv4.nat.order": func(value string) error {
return validate.IsOneOf(value, []string{"before", "after"})
},
"ipv4.nat.address": validate.Optional(validate.IsNetworkAddressV4),
"ipv4.dhcp": validate.Optional(validate.IsBool),
"ipv4.dhcp.gateway": validate.Optional(validate.IsNetworkAddressV4),
"ipv4.dhcp.expiry": validate.IsAny,
"ipv4.dhcp.ranges": validate.Optional(validate.IsNetworkRangeV4List),
"ipv4.routes": validate.Optional(validate.IsNetworkV4List),
"ipv4.routing": validate.Optional(validate.IsBool),
"ipv4.ovn.ranges": validate.Optional(validate.IsNetworkRangeV4List),
"ipv6.address": func(value string) error {
if validate.IsOneOf(value, []string{"none", "auto"}) == nil {
return nil
}
return validate.Optional(validate.IsNetworkAddressCIDRV6)(value)
},
"ipv6.firewall": validate.Optional(validate.IsBool),
"ipv6.nat": validate.Optional(validate.IsBool),
"ipv6.nat.order": func(value string) error {
return validate.IsOneOf(value, []string{"before", "after"})
},
"ipv6.nat.address": validate.Optional(validate.IsNetworkAddressV6),
"ipv6.dhcp": validate.Optional(validate.IsBool),
"ipv6.dhcp.expiry": validate.IsAny,
"ipv6.dhcp.stateful": validate.Optional(validate.IsBool),
"ipv6.dhcp.ranges": validate.Optional(validate.IsNetworkRangeV6List),
"ipv6.routes": validate.Optional(validate.IsNetworkV6List),
"ipv6.routing": validate.Optional(validate.IsBool),
"ipv6.ovn.ranges": validate.Optional(validate.IsNetworkRangeV6List),
"dns.domain": validate.IsAny,
"dns.search": validate.IsAny,
"dns.mode": func(value string) error {
return validate.IsOneOf(value, []string{"dynamic", "managed", "none"})
},
"raw.dnsmasq": validate.IsAny,
"maas.subnet.ipv4": validate.IsAny,
"maas.subnet.ipv6": validate.IsAny,
}
// Add dynamic validation rules.
for k := range config {
// Tunnel keys have the remote name in their name, so extract the real key
if strings.HasPrefix(k, "tunnel.") {
// Validate remote name in key.
fields := strings.Split(k, ".")
if len(fields) != 3 {
return fmt.Errorf("Invalid network configuration key: %s", k)
}
if len(n.name)+len(fields[1]) > 14 {
return fmt.Errorf("Network name too long for tunnel interface: %s-%s", n.name, fields[1])
}
tunnelKey := fields[2]
// Add the correct validation rule for the dynamic field based on last part of key.
switch tunnelKey {
case "protocol":
rules[k] = func(value string) error {
return validate.IsOneOf(value, []string{"gre", "vxlan"})
}
case "local":
rules[k] = validate.Optional(validate.IsNetworkAddress)
case "remote":
rules[k] = validate.Optional(validate.IsNetworkAddress)
case "port":
rules[k] = networkValidPort
case "group":
rules[k] = validate.Optional(validate.IsNetworkAddress)
case "id":
rules[k] = validate.Optional(validate.IsInt64)
case "inteface":
rules[k] = validInterfaceName
case "ttl":
rules[k] = validate.Optional(validate.IsUint8)
}
}
}
err := n.validate(config, rules)
if err != nil {
return err
}
// Peform composite key checks after per-key validation.
// Validate network name when used in fan mode.
bridgeMode := config["bridge.mode"]
if bridgeMode == "fan" && len(n.name) > 11 {
return fmt.Errorf("Network name too long to use with the FAN (must be 11 characters or less)")
}
for k, v := range config {
key := k
// Bridge mode checks
if bridgeMode == "fan" && strings.HasPrefix(key, "ipv4.") && !shared.StringInSlice(key, []string{"ipv4.dhcp.expiry", "ipv4.firewall", "ipv4.nat", "ipv4.nat.order"}) && v != "" {
return fmt.Errorf("IPv4 configuration may not be set when in 'fan' mode")
}
if bridgeMode == "fan" && strings.HasPrefix(key, "ipv6.") && v != "" {
return fmt.Errorf("IPv6 configuration may not be set when in 'fan' mode")
}
if bridgeMode != "fan" && strings.HasPrefix(key, "fan.") && v != "" {
return fmt.Errorf("FAN configuration may only be set when in 'fan' mode")
}
// MTU checks
if key == "bridge.mtu" && v != "" {
mtu, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return fmt.Errorf("Invalid value for an integer: %s", v)
}
ipv6 := config["ipv6.address"]
if ipv6 != "" && ipv6 != "none" && mtu < 1280 {
return fmt.Errorf("The minimum MTU for an IPv6 network is 1280")
}
ipv4 := config["ipv4.address"]
if ipv4 != "" && ipv4 != "none" && mtu < 68 {
return fmt.Errorf("The minimum MTU for an IPv4 network is 68")
}
if config["bridge.mode"] == "fan" {
if config["fan.type"] == "ipip" {
if mtu > 1480 {
return fmt.Errorf("Maximum MTU for an IPIP FAN bridge is 1480")
}
} else {
if mtu > 1450 {
return fmt.Errorf("Maximum MTU for a VXLAN FAN bridge is 1450")
}
}
}
}
}
// Check using same MAC address on every cluster node is safe.
if config["bridge.hwaddr"] != "" {
err = n.checkClusterWideMACSafe(config)
if err != nil {
return err
}
}
// Check IPv4 OVN ranges.
if config["ipv4.ovn.ranges"] != "" {
dhcpSubnet := n.DHCPv4Subnet()
allowedNets := []*net.IPNet{}
if dhcpSubnet != nil {
if config["ipv4.dhcp.ranges"] == "" {
return fmt.Errorf(`"ipv4.ovn.ranges" must be used in conjunction with non-overlapping "ipv4.dhcp.ranges" when DHCPv4 is enabled`)
}
allowedNets = append(allowedNets, dhcpSubnet)
}
_, err := parseIPRanges(config["ipv4.ovn.ranges"], allowedNets...)
if err != nil {
return err
}
}
// Check IPv6 OVN ranges.
if config["ipv6.ovn.ranges"] != "" {
dhcpSubnet := n.DHCPv6Subnet()
allowedNets := []*net.IPNet{}
if dhcpSubnet != nil {
if config["ipv6.dhcp.ranges"] == "" && shared.IsTrue(config["ipv6.dhcp.stateful"]) {
return fmt.Errorf(`"ipv6.ovn.ranges" must be used in conjunction with non-overlapping "ipv6.dhcp.ranges" when stateful DHCPv6 is enabled`)
}
allowedNets = append(allowedNets, dhcpSubnet)
}
_, err := parseIPRanges(config["ipv6.ovn.ranges"], allowedNets...)
if err != nil {
return err
}
}
return nil
}
// Create checks whether the bridge interface name is used already.
func (n *bridge) Create(clientType cluster.ClientType) error {
n.logger.Debug("Create", log.Ctx{"clientType": clientType, "config": n.config})
if shared.PathExists(fmt.Sprintf("/sys/class/net/%s", n.name)) {
return fmt.Errorf("Network interface %q already exists", n.name)
}
return nil
}
// isRunning returns whether the network is up.
func (n *bridge) isRunning() bool {
return shared.PathExists(fmt.Sprintf("/sys/class/net/%s", n.name))
}
// Delete deletes a network.
func (n *bridge) Delete(clientType cluster.ClientType) error {
n.logger.Debug("Delete", log.Ctx{"clientType": clientType})
// Bring the network down.
if n.isRunning() {
err := n.Stop()
if err != nil {
return err
}
}
// Delete apparmor profiles.
err := apparmor.NetworkDelete(n.state, n)
if err != nil {
return err
}
return n.common.delete(clientType)
}
// Rename renames a network.
func (n *bridge) Rename(newName string) error {
n.logger.Debug("Rename", log.Ctx{"newName": newName})
if shared.PathExists(fmt.Sprintf("/sys/class/net/%s", newName)) {
return fmt.Errorf("Network interface %q already exists", newName)
}
// Bring the network down.
if n.isRunning() {
err := n.Stop()
if err != nil {
return err
}
}
// Rename forkdns log file.
forkDNSLogPath := fmt.Sprintf("forkdns.%s.log", n.name)
if shared.PathExists(shared.LogPath(forkDNSLogPath)) {
err := os.Rename(forkDNSLogPath, shared.LogPath(fmt.Sprintf("forkdns.%s.log", newName)))
if err != nil {
return err
}
}
// Rename common steps.
err := n.common.rename(newName)
if err != nil {
return err
}
// Bring the network up.
err = n.Start()
if err != nil {
return err
}
return nil
}
// Start starts the network.
func (n *bridge) Start() error {
n.logger.Debug("Start")
return n.setup(nil)
}
// setup restarts the network.
func (n *bridge) setup(oldConfig map[string]string) error {
// If we are in mock mode, just no-op.
if n.state.OS.MockMode {
return nil
}
n.logger.Debug("Setting up network")
if n.status == api.NetworkStatusPending {
return fmt.Errorf("Cannot start pending network")
}
// Create directory.
if !shared.PathExists(shared.VarPath("networks", n.name)) {
err := os.MkdirAll(shared.VarPath("networks", n.name), 0711)
if err != nil {
return err
}
}
// Create the bridge interface if doesn't exist.
if !n.isRunning() {
if n.config["bridge.driver"] == "openvswitch" {
ovs := openvswitch.NewOVS()
if !ovs.Installed() {
return fmt.Errorf("Open vSwitch isn't installed on this system")
}
err := ovs.BridgeAdd(n.name, false)
if err != nil {
return err
}
} else {
_, err := shared.RunCommand("ip", "link", "add", "dev", n.name, "type", "bridge")
if err != nil {
return err
}
}
}
// Get a list of tunnels.
tunnels := n.getTunnels()
// IPv6 bridge configuration.
if !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
if !shared.PathExists("/proc/sys/net/ipv6") {
return fmt.Errorf("Network has ipv6.address but kernel IPv6 support is missing")
}
err := util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/autoconf", n.name), "0")
if err != nil {
return err
}
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/accept_dad", n.name), "0")
if err != nil {
return err
}
}
// Get a list of interfaces.
ifaces, err := net.Interfaces()
if err != nil {
return err
}
// Cleanup any existing tunnel device.
for _, iface := range ifaces {
if strings.HasPrefix(iface.Name, fmt.Sprintf("%s-", n.name)) {
_, err = shared.RunCommand("ip", "link", "del", "dev", iface.Name)
if err != nil {
return err
}
}
}
// Set the MTU.
mtu := ""
if n.config["bridge.mtu"] != "" {
mtu = n.config["bridge.mtu"]
} else if len(tunnels) > 0 {
mtu = "1400"
} else if n.config["bridge.mode"] == "fan" {
if n.config["fan.type"] == "ipip" {
mtu = "1480"
} else {
mtu = "1450"
}
}
// Attempt to add a dummy device to the bridge to force the MTU.
if mtu != "" && n.config["bridge.driver"] != "openvswitch" {
_, err = shared.RunCommand("ip", "link", "add", "dev", fmt.Sprintf("%s-mtu", n.name), "mtu", mtu, "type", "dummy")
if err == nil {
_, err = shared.RunCommand("ip", "link", "set", "dev", fmt.Sprintf("%s-mtu", n.name), "up")
if err == nil {
AttachInterface(n.name, fmt.Sprintf("%s-mtu", n.name))
}
}
}
// Now, set a default MTU.
if mtu == "" {
mtu = "1500"
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "mtu", mtu)
if err != nil {
return err
}
// Always prefer static MAC address if set.
hwAddr := n.config["bridge.hwaddr"]
// If no cluster wide static MAC address set, then generate one.
if hwAddr == "" {
var seedNodeID int64
if n.checkClusterWideMACSafe(n.config) != nil {
// Use cluster node's ID to g enerate a stable per-node & network derived random MAC in fan
// mode or when cluster-wide MAC addresses are unsafe.
seedNodeID = n.state.Cluster.GetNodeID()
} else {
// Use a static cluster node of 0 to generate a stable per-network derived random MAC if
// safe to do so.
seedNodeID = 0
}
// Load server certificate. This is needs to be the same certificate for all nodes in a cluster.
cert, err := util.LoadCert(n.state.OS.VarDir)
if err != nil {
return err
}
// Generate the random seed, this uses the server certificate fingerprint (to ensure that multiple
// standalone nodes on the same external network don't generate the same MAC for their networks).
// It relies on the certificate being the same for all nodes in a cluster to allow the same MAC to
// be generated on each bridge interface in the network (if safe to do so).
seed := fmt.Sprintf("%s.%d.%d", cert.Fingerprint(), seedNodeID, n.ID())
// Generate a hash from the randSourceNodeID and network ID to use as seed for random MAC.
// Use the FNV-1a hash algorithm to convert our seed string into an int64 for use as seed.
hash := fnv.New64a()
_, err = io.WriteString(hash, seed)
if err != nil {
return err
}
// Initialise a non-cryptographic random number generator using the stable seed.
r := rand.New(rand.NewSource(int64(hash.Sum64())))
hwAddr = randomHwaddr(r)
n.logger.Debug("Stable MAC generated", log.Ctx{"seed": seed, "hwAddr": hwAddr})
}
// Set the MAC address on the bridge interface if specified.
if hwAddr != "" {
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "address", hwAddr)
if err != nil {
return err
}
}
// Enable VLAN filtering for Linux bridges.
if n.config["bridge.driver"] != "openvswitch" {
err = BridgeVLANFilterSetStatus(n.name, "1")
if err != nil {
n.logger.Warn(fmt.Sprintf("%v", err))
}
// Set the default PVID for new ports to 1.
err = BridgeVLANSetDefaultPVID(n.name, "1")
if err != nil {
n.logger.Warn(fmt.Sprintf("%v", err))
}
}
// Bring it up.
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "up")
if err != nil {
return err
}
// Add any listed existing external interface.
if n.config["bridge.external_interfaces"] != "" {
for _, entry := range strings.Split(n.config["bridge.external_interfaces"], ",") {
entry = strings.TrimSpace(entry)
iface, err := net.InterfaceByName(entry)
if err != nil {
n.logger.Warn("Skipping attaching missing external interface", log.Ctx{"interface": entry})
continue
}
unused := true
addrs, err := iface.Addrs()
if err == nil {
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if ip != nil && err == nil && ip.IsGlobalUnicast() {
unused = false
break
}
}
}
if !unused {
return fmt.Errorf("Only unconfigured network interfaces can be bridged")
}
err = AttachInterface(n.name, entry)
if err != nil {
return err
}
}
}
// Remove any existing IPv4 firewall rules.
if usesIPv4Firewall(n.config) || usesIPv4Firewall(oldConfig) {
err = n.state.Firewall.NetworkClear(n.name, 4)
if err != nil {
return err
}
}
// Snapshot container specific IPv4 routes (added with boot proto) before removing IPv4 addresses.
// This is because the kernel removes any static routes on an interface when all addresses removed.
ctRoutes, err := n.bootRoutesV4()
if err != nil {
return err
}
// Flush all IPv4 addresses and routes.
_, err = shared.RunCommand("ip", "-4", "addr", "flush", "dev", n.name, "scope", "global")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "-4", "route", "flush", "dev", n.name, "proto", "static")
if err != nil {
return err
}
// Configure IPv4 firewall (includes fan).
if n.config["bridge.mode"] == "fan" || !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) {
if n.DHCPv4Subnet() != nil && n.hasIPv4Firewall() {
// Setup basic iptables overrides for DHCP/DNS.
err = n.state.Firewall.NetworkSetupDHCPDNSAccess(n.name, 4)
if err != nil {
return err
}
}
// Attempt a workaround for broken DHCP clients.
if n.hasIPv4Firewall() {
err = n.state.Firewall.NetworkSetupDHCPv4Checksum(n.name)
if err != nil {
return err
}
}
// Allow forwarding.
if n.config["bridge.mode"] == "fan" || n.config["ipv4.routing"] == "" || shared.IsTrue(n.config["ipv4.routing"]) {
err = util.SysctlSet("net/ipv4/ip_forward", "1")
if err != nil {
return err
}
if n.hasIPv4Firewall() {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 4, true)
if err != nil {
return err
}
}
} else {
if n.hasIPv4Firewall() {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 4, false)
if err != nil {
return err
}
}
}
}
// Start building process using subprocess package.
command := "dnsmasq"
dnsmasqCmd := []string{"--keep-in-foreground", "--strict-order", "--bind-interfaces",
"--except-interface=lo",
"--pid-file=", // Disable attempt at writing a PID file.
"--no-ping", // --no-ping is very important to prevent delays to lease file updates.
fmt.Sprintf("--interface=%s", n.name)}
dnsmasqVersion, err := dnsmasq.GetVersion()
if err != nil {
return err
}
// --dhcp-rapid-commit option is only supported on >2.79.
minVer, _ := version.NewDottedVersion("2.79")
if dnsmasqVersion.Compare(minVer) > 0 {
dnsmasqCmd = append(dnsmasqCmd, "--dhcp-rapid-commit")
}
if !daemon.Debug {
// --quiet options are only supported on >2.67.
minVer, _ := version.NewDottedVersion("2.67")
if err == nil && dnsmasqVersion.Compare(minVer) > 0 {
dnsmasqCmd = append(dnsmasqCmd, []string{"--quiet-dhcp", "--quiet-dhcp6", "--quiet-ra"}...)
}
}
// Configure IPv4.
if !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) {
// Parse the subnet.
ip, subnet, err := net.ParseCIDR(n.config["ipv4.address"])
if err != nil {
return err
}
// Update the dnsmasq config.
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--listen-address=%s", ip.String()))
if n.DHCPv4Subnet() != nil {
if !shared.StringInSlice("--dhcp-no-override", dnsmasqCmd) {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-no-override", "--dhcp-authoritative", fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")), fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts"))}...)
}
if n.config["ipv4.dhcp.gateway"] != "" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=3,%s", n.config["ipv4.dhcp.gateway"]))
}
if mtu != "1500" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=26,%s", mtu))
}
dnsSearch := n.config["dns.search"]
if dnsSearch != "" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=119,%s", strings.Trim(dnsSearch, " ")))
}
expiry := "1h"
if n.config["ipv4.dhcp.expiry"] != "" {
expiry = n.config["ipv4.dhcp.expiry"]
}
if n.config["ipv4.dhcp.ranges"] != "" {
for _, dhcpRange := range strings.Split(n.config["ipv4.dhcp.ranges"], ",") {
dhcpRange = strings.TrimSpace(dhcpRange)
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s", strings.Replace(dhcpRange, "-", ",", -1), expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s,%s", dhcpalloc.GetIP(subnet, 2).String(), dhcpalloc.GetIP(subnet, -2).String(), expiry)}...)
}
}
// Add the address.
_, err = shared.RunCommand("ip", "-4", "addr", "add", "dev", n.name, n.config["ipv4.address"])
if err != nil {
return err
}
// Configure NAT
if shared.IsTrue(n.config["ipv4.nat"]) {
//If a SNAT source address is specified, use that, otherwise default to MASQUERADE mode.
var srcIP net.IP
if n.config["ipv4.nat.address"] != "" {
srcIP = net.ParseIP(n.config["ipv4.nat.address"])
}
if n.config["ipv4.nat.order"] == "after" {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, true)
if err != nil {
return err
}
} else {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, false)
if err != nil {
return err
}
}
}
// Add additional routes.
if n.config["ipv4.routes"] != "" {
for _, route := range strings.Split(n.config["ipv4.routes"], ",") {
route = strings.TrimSpace(route)
_, err = shared.RunCommand("ip", "-4", "route", "add", "dev", n.name, route, "proto", "static")
if err != nil {
return err
}
}
}
// Restore container specific IPv4 routes to interface.
n.applyBootRoutesV4(ctRoutes)
}
// Remove any existing IPv6 firewall rules.
if usesIPv6Firewall(n.config) || usesIPv6Firewall(oldConfig) {
err = n.state.Firewall.NetworkClear(n.name, 6)
if err != nil {
return err
}
}
// Snapshot container specific IPv6 routes (added with boot proto) before removing IPv6 addresses.
// This is because the kernel removes any static routes on an interface when all addresses removed.
ctRoutes, err = n.bootRoutesV6()
if err != nil {
return err
}
// Flush all IPv6 addresses and routes.
_, err = shared.RunCommand("ip", "-6", "addr", "flush", "dev", n.name, "scope", "global")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "-6", "route", "flush", "dev", n.name, "proto", "static")
if err != nil {
return err
}
// Configure IPv6.
if !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
// Enable IPv6 for the subnet.
err := util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/disable_ipv6", n.name), "0")
if err != nil {
return err
}
// Parse the subnet.
ip, subnet, err := net.ParseCIDR(n.config["ipv6.address"])
if err != nil {
return err
}
subnetSize, _ := subnet.Mask.Size()
if subnetSize > 64 {
n.logger.Warn("IPv6 networks with a prefix larger than 64 aren't properly supported by dnsmasq")
}
// Update the dnsmasq config.
dnsmasqCmd = append(dnsmasqCmd, []string{fmt.Sprintf("--listen-address=%s", ip.String()), "--enable-ra"}...)
if n.DHCPv6Subnet() != nil {
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
// Setup basic iptables overrides for DHCP/DNS.
err = n.state.Firewall.NetworkSetupDHCPDNSAccess(n.name, 6)
if err != nil {
return err
}
}
// Build DHCP configuration.
if !shared.StringInSlice("--dhcp-no-override", dnsmasqCmd) {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-no-override", "--dhcp-authoritative", fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")), fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts"))}...)
}
expiry := "1h"
if n.config["ipv6.dhcp.expiry"] != "" {
expiry = n.config["ipv6.dhcp.expiry"]
}
if shared.IsTrue(n.config["ipv6.dhcp.stateful"]) {
if n.config["ipv6.dhcp.ranges"] != "" {
for _, dhcpRange := range strings.Split(n.config["ipv6.dhcp.ranges"], ",") {
dhcpRange = strings.TrimSpace(dhcpRange)
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%d,%s", strings.Replace(dhcpRange, "-", ",", -1), subnetSize, expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s,%d,%s", dhcpalloc.GetIP(subnet, 2), dhcpalloc.GetIP(subnet, -1), subnetSize, expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("::,constructor:%s,ra-stateless,ra-names", n.name)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("::,constructor:%s,ra-only", n.name)}...)
}
// Allow forwarding.
if n.config["ipv6.routing"] == "" || shared.IsTrue(n.config["ipv6.routing"]) {
// Get a list of proc entries.
entries, err := ioutil.ReadDir("/proc/sys/net/ipv6/conf/")
if err != nil {
return err
}
// First set accept_ra to 2 for everything.
for _, entry := range entries {
content, err := ioutil.ReadFile(fmt.Sprintf("/proc/sys/net/ipv6/conf/%s/accept_ra", entry.Name()))
if err == nil && string(content) != "1\n" {
continue
}
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/accept_ra", entry.Name()), "2")
if err != nil && !os.IsNotExist(err) {
return err
}
}
// Then set forwarding for all of them.
for _, entry := range entries {
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/forwarding", entry.Name()), "1")
if err != nil && !os.IsNotExist(err) {
return err
}
}
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 6, true)
if err != nil {
return err
}
}
} else {
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 6, false)
if err != nil {
return err
}
}
}
// Add the address.
_, err = shared.RunCommand("ip", "-6", "addr", "add", "dev", n.name, n.config["ipv6.address"])
if err != nil {
return err
}
// Configure NAT.
if shared.IsTrue(n.config["ipv6.nat"]) {
var srcIP net.IP
if n.config["ipv6.nat.address"] != "" {
srcIP = net.ParseIP(n.config["ipv6.nat.address"])
}
if n.config["ipv6.nat.order"] == "after" {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, true)
if err != nil {
return err
}
} else {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, false)
if err != nil {
return err
}
}
}
// Add additional routes.
if n.config["ipv6.routes"] != "" {
for _, route := range strings.Split(n.config["ipv6.routes"], ",") {
route = strings.TrimSpace(route)
_, err = shared.RunCommand("ip", "-6", "route", "add", "dev", n.name, route, "proto", "static")
if err != nil {
return err
}
}
}
// Restore container specific IPv6 routes to interface.
n.applyBootRoutesV6(ctRoutes)
}
// Configure the fan.
dnsClustered := false
dnsClusteredAddress := ""
var overlaySubnet *net.IPNet
if n.config["bridge.mode"] == "fan" {
tunName := fmt.Sprintf("%s-fan", n.name)
// Parse the underlay.
underlay := n.config["fan.underlay_subnet"]
_, underlaySubnet, err := net.ParseCIDR(underlay)
if err != nil {
return nil
}
// Parse the overlay.
overlay := n.config["fan.overlay_subnet"]
if overlay == "" {
overlay = "240.0.0.0/8"
}
_, overlaySubnet, err = net.ParseCIDR(overlay)
if err != nil {
return err
}
// Get the address.
fanAddress, devName, devAddr, err := n.fanAddress(underlaySubnet, overlaySubnet)
if err != nil {
return err
}
addr := strings.Split(fanAddress, "/")
if n.config["fan.type"] == "ipip" {
fanAddress = fmt.Sprintf("%s/24", addr[0])
}
// Update the MTU based on overlay device (if available).
fanMtuInt, err := GetDevMTU(devName)
if err == nil {
// Apply overhead.
if n.config["fan.type"] == "ipip" {
fanMtuInt = fanMtuInt - 20
} else {
fanMtuInt = fanMtuInt - 50
}
// Apply changes.
fanMtu := fmt.Sprintf("%d", fanMtuInt)
if fanMtu != mtu {
mtu = fanMtu
if n.config["bridge.driver"] != "openvswitch" {
_, err = shared.RunCommand("ip", "link", "set", "dev", fmt.Sprintf("%s-mtu", n.name), "mtu", mtu)
if err != nil {
return err
}
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "mtu", mtu)
if err != nil {
return err
}
}
}
// Parse the host subnet.
_, hostSubnet, err := net.ParseCIDR(fmt.Sprintf("%s/24", addr[0]))
if err != nil {
return err
}
// Add the address.
_, err = shared.RunCommand("ip", "-4", "addr", "add", "dev", n.name, fanAddress)
if err != nil {
return err
}
// Update the dnsmasq config.
expiry := "1h"
if n.config["ipv4.dhcp.expiry"] != "" {
expiry = n.config["ipv4.dhcp.expiry"]
}
dnsmasqCmd = append(dnsmasqCmd, []string{
fmt.Sprintf("--listen-address=%s", addr[0]),
"--dhcp-no-override", "--dhcp-authoritative",
fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")),
fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts")),
"--dhcp-range", fmt.Sprintf("%s,%s,%s", dhcpalloc.GetIP(hostSubnet, 2).String(), dhcpalloc.GetIP(hostSubnet, -2).String(), expiry)}...)
// Setup the tunnel.
if n.config["fan.type"] == "ipip" {
_, err = shared.RunCommand("ip", "-4", "route", "flush", "dev", "tunl0")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", "tunl0", "up")
if err != nil {
return err
}
// Fails if the map is already set.
shared.RunCommand("ip", "link", "change", "dev", "tunl0", "type", "ipip", "fan-map", fmt.Sprintf("%s:%s", overlay, underlay))
_, err = shared.RunCommand("ip", "route", "add", overlay, "dev", "tunl0", "src", addr[0])
if err != nil {
return err
}
} else {
vxlanID := fmt.Sprintf("%d", binary.BigEndian.Uint32(overlaySubnet.IP.To4())>>8)
_, err = shared.RunCommand("ip", "link", "add", tunName, "type", "vxlan", "id", vxlanID, "dev", devName, "dstport", "0", "local", devAddr, "fan-map", fmt.Sprintf("%s:%s", overlay, underlay))
if err != nil {
return err
}
err = AttachInterface(n.name, tunName)
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", tunName, "mtu", mtu, "up")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "up")
if err != nil {
return err
}
}
// Configure NAT.
if n.config["ipv4.nat"] == "" || shared.IsTrue(n.config["ipv4.nat"]) {
if n.config["ipv4.nat.order"] == "after" {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, overlaySubnet, nil, true)
if err != nil {
return err
}
} else {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, overlaySubnet, nil, false)
if err != nil {
return err
}
}
}
// Setup clustered DNS.
clusterAddress, err := node.ClusterAddress(n.state.Node)
if err != nil {
return err
}
// If clusterAddress is non-empty, this indicates the intention for this node to be
// part of a cluster and so we should ensure that dnsmasq and forkdns are started
// in cluster mode. Note: During LXD initialisation the cluster may not actually be
// setup yet, but we want the DNS processes to be ready for when it is.
if clusterAddress != "" {
dnsClustered = true
}
dnsClusteredAddress = strings.Split(fanAddress, "/")[0]
}
// Configure tunnels.
for _, tunnel := range tunnels {
getConfig := func(key string) string {
return n.config[fmt.Sprintf("tunnel.%s.%s", tunnel, key)]
}
tunProtocol := getConfig("protocol")
tunLocal := getConfig("local")
tunRemote := getConfig("remote")
tunName := fmt.Sprintf("%s-%s", n.name, tunnel)
// Configure the tunnel.
cmd := []string{"ip", "link", "add", "dev", tunName}
if tunProtocol == "gre" {
// Skip partial configs.
if tunProtocol == "" || tunLocal == "" || tunRemote == "" {
continue
}
cmd = append(cmd, []string{"type", "gretap", "local", tunLocal, "remote", tunRemote}...)
} else if tunProtocol == "vxlan" {
tunGroup := getConfig("group")
tunInterface := getConfig("interface")
// Skip partial configs.
if tunProtocol == "" {
continue
}
cmd = append(cmd, []string{"type", "vxlan"}...)
if tunLocal != "" && tunRemote != "" {
cmd = append(cmd, []string{"local", tunLocal, "remote", tunRemote}...)
} else {
if tunGroup == "" {
tunGroup = "239.0.0.1"
}
devName := tunInterface
if devName == "" {
_, devName, err = DefaultGatewaySubnetV4()
if err != nil {
return err
}
}
cmd = append(cmd, []string{"group", tunGroup, "dev", devName}...)
}
tunPort := getConfig("port")
if tunPort == "" {
tunPort = "0"
}
cmd = append(cmd, []string{"dstport", tunPort}...)
tunID := getConfig("id")
if tunID == "" {
tunID = "1"
}
cmd = append(cmd, []string{"id", tunID}...)
tunTTL := getConfig("ttl")
if tunTTL == "" {
tunTTL = "1"
}
cmd = append(cmd, []string{"ttl", tunTTL}...)
}
// Create the interface.
_, err = shared.RunCommand(cmd[0], cmd[1:]...)
if err != nil {
return err
}
// Bridge it and bring up.
err = AttachInterface(n.name, tunName)
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", tunName, "mtu", mtu, "up")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "up")
if err != nil {
return err
}
}
// Generate and load apparmor profiles.
err = apparmor.NetworkLoad(n.state, n)
if err != nil {
return err
}
// Kill any existing dnsmasq and forkdns daemon for this network.
err = dnsmasq.Kill(n.name, false)
if err != nil {
return err
}
err = n.killForkDNS()
if err != nil {
return err
}
// Configure dnsmasq.
if n.config["bridge.mode"] == "fan" || !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) || !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
// Setup the dnsmasq domain.
dnsDomain := n.config["dns.domain"]
if dnsDomain == "" {
dnsDomain = "lxd"
}
if n.config["dns.mode"] != "none" {
if dnsClustered {
dnsmasqCmd = append(dnsmasqCmd, "-s", dnsDomain)
dnsmasqCmd = append(dnsmasqCmd, "-S", fmt.Sprintf("/%s/%s#1053", dnsDomain, dnsClusteredAddress))
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--rev-server=%s,%s#1053", overlaySubnet, dnsClusteredAddress))
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"-s", dnsDomain, "-S", fmt.Sprintf("/%s/", dnsDomain)}...)
}
}
// Create a config file to contain additional config (and to prevent dnsmasq from reading /etc/dnsmasq.conf)
err = ioutil.WriteFile(shared.VarPath("networks", n.name, "dnsmasq.raw"), []byte(fmt.Sprintf("%s\n", n.config["raw.dnsmasq"])), 0644)
if err != nil {
return err
}
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--conf-file=%s", shared.VarPath("networks", n.name, "dnsmasq.raw")))
// Attempt to drop privileges.
if n.state.OS.UnprivUser != "" {
dnsmasqCmd = append(dnsmasqCmd, []string{"-u", n.state.OS.UnprivUser}...)
}
if n.state.OS.UnprivGroup != "" {
dnsmasqCmd = append(dnsmasqCmd, []string{"-g", n.state.OS.UnprivGroup}...)
}
// Create DHCP hosts directory.
if !shared.PathExists(shared.VarPath("networks", n.name, "dnsmasq.hosts")) {
err = os.MkdirAll(shared.VarPath("networks", n.name, "dnsmasq.hosts"), 0755)
if err != nil {
return err
}
}
// Check for dnsmasq.
_, err := exec.LookPath("dnsmasq")
if err != nil {
return fmt.Errorf("dnsmasq is required for LXD managed bridges")
}
// Update the static leases.
err = UpdateDNSMasqStatic(n.state, n.name)
if err != nil {
return err
}
// Create subprocess object dnsmasq.
p, err := subprocess.NewProcess(command, dnsmasqCmd, "", "")
if err != nil {
return fmt.Errorf("Failed to create subprocess: %s", err)
}
// Apply AppArmor confinement.
if n.config["raw.dnsmasq"] == "" {
p.SetApparmor(apparmor.DnsmasqProfileName(n))
} else {
n.logger.Warn("Skipping AppArmor for dnsmasq due to raw.dnsmasq being set", log.Ctx{"name": n.name})
}
// Start dnsmasq.
err = p.Start()
if err != nil {
return fmt.Errorf("Failed to run: %s %s: %v", command, strings.Join(dnsmasqCmd, " "), err)
}
err = p.Save(shared.VarPath("networks", n.name, "dnsmasq.pid"))
if err != nil {
// Kill Process if started, but could not save the file.
err2 := p.Stop()
if err != nil {
return fmt.Errorf("Could not kill subprocess while handling saving error: %s: %s", err, err2)
}
return fmt.Errorf("Failed to save subprocess details: %s", err)
}
// Spawn DNS forwarder if needed (backgrounded to avoid deadlocks during cluster boot).
if dnsClustered {
// Create forkdns servers directory.
if !shared.PathExists(shared.VarPath("networks", n.name, ForkdnsServersListPath)) {
err = os.MkdirAll(shared.VarPath("networks", n.name, ForkdnsServersListPath), 0755)
if err != nil {
return err
}
}
// Create forkdns servers.conf file if doesn't exist.
f, err := os.OpenFile(shared.VarPath("networks", n.name, ForkdnsServersListPath+"/"+ForkdnsServersListFile), os.O_RDONLY|os.O_CREATE, 0666)
if err != nil {
return err
}
f.Close()
err = n.spawnForkDNS(dnsClusteredAddress)
if err != nil {
return err
}
}
} else {
// Clean up old dnsmasq config if exists and we are not starting dnsmasq.
leasesPath := shared.VarPath("networks", n.name, "dnsmasq.leases")
if shared.PathExists(leasesPath) {
err := os.Remove(leasesPath)
if err != nil {
return errors.Wrapf(err, "Failed to remove old dnsmasq leases file '%s'", leasesPath)
}
}
// And same for our PID file.
pidPath := shared.VarPath("networks", n.name, "dnsmasq.pid")
if shared.PathExists(pidPath) {
err := os.Remove(pidPath)
if err != nil {
return errors.Wrapf(err, "Failed to remove old dnsmasq pid file '%s'", pidPath)
}
}
}
return nil
}
// Stop stops the network.
func (n *bridge) Stop() error {
n.logger.Debug("Stop")
if !n.isRunning() {
return nil
}
// Destroy the bridge interface
if n.config["bridge.driver"] == "openvswitch" {
ovs := openvswitch.NewOVS()
err := ovs.BridgeDelete(n.name)
if err != nil {
return err
}
} else {
_, err := shared.RunCommand("ip", "link", "del", "dev", n.name)
if err != nil {
return err
}
}
// Cleanup firewall rules.
if usesIPv4Firewall(n.config) {
err := n.state.Firewall.NetworkClear(n.name, 4)
if err != nil {
return err
}
}
if usesIPv6Firewall(n.config) {
err := n.state.Firewall.NetworkClear(n.name, 6)
if err != nil {
return err
}
}
// Kill any existing dnsmasq and forkdns daemon for this network
err := dnsmasq.Kill(n.name, false)
if err != nil {
return err
}
err = n.killForkDNS()
if err != nil {
return err
}
// Get a list of interfaces
ifaces, err := net.Interfaces()
if err != nil {
return err
}
// Cleanup any existing tunnel device
for _, iface := range ifaces {
if strings.HasPrefix(iface.Name, fmt.Sprintf("%s-", n.name)) {
_, err = shared.RunCommand("ip", "link", "del", "dev", iface.Name)
if err != nil {
return err
}
}
}
// Unload apparmor profiles.
err = apparmor.NetworkUnload(n.state, n)
if err != nil {
return err
}
return nil
}
// Update updates the network. Accepts notification boolean indicating if this update request is coming from a
// cluster notification, in which case do not update the database, just apply local changes needed.
func (n *bridge) Update(newNetwork api.NetworkPut, targetNode string, clientType cluster.ClientType) error {
n.logger.Debug("Update", log.Ctx{"clientType": clientType, "newNetwork": newNetwork})
// Populate default values if they are missing.
err := n.fillConfig(newNetwork.Config)
if err != nil {
return err
}
dbUpdateNeeeded, changedKeys, oldNetwork, err := n.common.configChanged(newNetwork)
if err != nil {
return err
}
if !dbUpdateNeeeded {
return nil // Nothing changed.
}
revert := revert.New()
defer revert.Fail()
// Define a function which reverts everything.
revert.Add(func() {
// Reset changes to all nodes and database.
n.common.update(oldNetwork, targetNode, clientType)
// Reset any change that was made to local bridge.
n.setup(newNetwork.Config)
})
// Bring the bridge down entirely if the driver has changed.
if shared.StringInSlice("bridge.driver", changedKeys) && n.isRunning() {
err = n.Stop()
if err != nil {
return err
}
}
// Detach any external interfaces should no longer be attached.
if shared.StringInSlice("bridge.external_interfaces", changedKeys) && n.isRunning() {
devices := []string{}
for _, dev := range strings.Split(newNetwork.Config["bridge.external_interfaces"], ",") {
dev = strings.TrimSpace(dev)
devices = append(devices, dev)
}
for _, dev := range strings.Split(oldNetwork.Config["bridge.external_interfaces"], ",") {
dev = strings.TrimSpace(dev)
if dev == "" {
continue
}
if !shared.StringInSlice(dev, devices) && shared.PathExists(fmt.Sprintf("/sys/class/net/%s", dev)) {
err = DetachInterface(n.name, dev)
if err != nil {
return err
}
}
}
}
// Apply changes to database.
err = n.common.update(newNetwork, targetNode, clientType)
if err != nil {
return err
}
// Restart the network if needed.
if len(changedKeys) > 0 {
err = n.setup(oldNetwork.Config)
if err != nil {
return err
}
}
revert.Success()
return nil
}
func (n *bridge) spawnForkDNS(listenAddress string) error {
// Setup the dnsmasq domain
dnsDomain := n.config["dns.domain"]
if dnsDomain == "" {
dnsDomain = "lxd"
}
// Spawn the daemon using subprocess
command := n.state.OS.ExecPath
forkdnsargs := []string{"forkdns",
fmt.Sprintf("%s:1053", listenAddress),
dnsDomain,
n.name}
logPath := shared.LogPath(fmt.Sprintf("forkdns.%s.log", n.name))
p, err := subprocess.NewProcess(command, forkdnsargs, logPath, logPath)
if err != nil {
return fmt.Errorf("Failed to create subprocess: %s", err)
}
// Drop privileges.
p.SetCreds(n.state.OS.UnprivUID, n.state.OS.UnprivGID)
// Apply AppArmor profile.
p.SetApparmor(apparmor.ForkdnsProfileName(n))
err = p.Start()
if err != nil {
return fmt.Errorf("Failed to run: %s %s: %v", command, strings.Join(forkdnsargs, " "), err)
}
err = p.Save(shared.VarPath("networks", n.name, "forkdns.pid"))
if err != nil {
// Kill Process if started, but could not save the file
err2 := p.Stop()
if err != nil {
return fmt.Errorf("Could not kill subprocess while handling saving error: %s: %s", err, err2)
}
return fmt.Errorf("Failed to save subprocess details: %s", err)
}
return nil
}
// HandleHeartbeat refreshes forkdns servers. Retrieves the IPv4 address of each cluster node (excluding ourselves)
// for this network. It then updates the forkdns server list file if there are changes.
func (n *bridge) HandleHeartbeat(heartbeatData *cluster.APIHeartbeat) error {
addresses := []string{}
localAddress, err := node.HTTPSAddress(n.state.Node)
if err != nil {
return err
}
n.logger.Info("Refreshing forkdns peers")
cert := n.state.Endpoints.NetworkCert()
for _, node := range heartbeatData.Members {
if node.Address == localAddress {
// No need to query ourselves.
continue
}
client, err := cluster.Connect(node.Address, cert, true)
if err != nil {
return err
}
state, err := client.GetNetworkState(n.name)
if err != nil {
return err
}
for _, addr := range state.Addresses {
// Only get IPv4 addresses of nodes on network.
if addr.Family != "inet" || addr.Scope != "global" {
continue
}
addresses = append(addresses, addr.Address)
break
}
}
// Compare current stored list to retrieved list and see if we need to update.
curList, err := ForkdnsServersList(n.name)
if err != nil {
// Only warn here, but continue on to regenerate the servers list from cluster info.
n.logger.Warn("Failed to load existing forkdns server list", log.Ctx{"err": err})
}
// If current list is same as cluster list, nothing to do.
if err == nil && reflect.DeepEqual(curList, addresses) {
return nil
}
err = n.updateForkdnsServersFile(addresses)
if err != nil {
return err
}
n.logger.Info("Updated forkdns server list", log.Ctx{"nodes": addresses})
return nil
}
func (n *bridge) getTunnels() []string {
tunnels := []string{}
for k := range n.config {
if !strings.HasPrefix(k, "tunnel.") {
continue
}
fields := strings.Split(k, ".")
if !shared.StringInSlice(fields[1], tunnels) {
tunnels = append(tunnels, fields[1])
}
}
return tunnels
}
// bootRoutesV4 returns a list of IPv4 boot routes on the network's device.
func (n *bridge) bootRoutesV4() ([]string, error) {
routes := []string{}
cmd := exec.Command("ip", "-4", "route", "show", "dev", n.name, "proto", "boot")
ipOut, err := cmd.StdoutPipe()
if err != nil {
return routes, err
}
cmd.Start()
scanner := bufio.NewScanner(ipOut)
for scanner.Scan() {
route := strings.Replace(scanner.Text(), "linkdown", "", -1)
routes = append(routes, route)
}
cmd.Wait()
return routes, nil
}
// bootRoutesV6 returns a list of IPv6 boot routes on the network's device.
func (n *bridge) bootRoutesV6() ([]string, error) {
routes := []string{}
cmd := exec.Command("ip", "-6", "route", "show", "dev", n.name, "proto", "boot")
ipOut, err := cmd.StdoutPipe()
if err != nil {
return routes, err
}
cmd.Start()
scanner := bufio.NewScanner(ipOut)
for scanner.Scan() {
route := strings.Replace(scanner.Text(), "linkdown", "", -1)
routes = append(routes, route)
}
cmd.Wait()
return routes, nil
}
// applyBootRoutesV4 applies a list of IPv4 boot routes to the network's device.
func (n *bridge) applyBootRoutesV4(routes []string) {
for _, route := range routes {
cmd := []string{"-4", "route", "replace", "dev", n.name, "proto", "boot"}
cmd = append(cmd, strings.Fields(route)...)
_, err := shared.RunCommand("ip", cmd...)
if err != nil {
// If it fails, then we can't stop as the route has already gone, so just log and continue.
n.logger.Error("Failed to restore route", log.Ctx{"err": err})
}
}
}
// applyBootRoutesV6 applies a list of IPv6 boot routes to the network's device.
func (n *bridge) applyBootRoutesV6(routes []string) {
for _, route := range routes {
cmd := []string{"-6", "route", "replace", "dev", n.name, "proto", "boot"}
cmd = append(cmd, strings.Fields(route)...)
_, err := shared.RunCommand("ip", cmd...)
if err != nil {
// If it fails, then we can't stop as the route has already gone, so just log and continue.
n.logger.Error("Failed to restore route", log.Ctx{"err": err})
}
}
}
func (n *bridge) fanAddress(underlay *net.IPNet, overlay *net.IPNet) (string, string, string, error) {
// Sanity checks
underlaySize, _ := underlay.Mask.Size()
if underlaySize != 16 && underlaySize != 24 {
return "", "", "", fmt.Errorf("Only /16 or /24 underlays are supported at this time")
}
overlaySize, _ := overlay.Mask.Size()
if overlaySize != 8 && overlaySize != 16 {
return "", "", "", fmt.Errorf("Only /8 or /16 overlays are supported at this time")
}
if overlaySize+(32-underlaySize)+8 > 32 {
return "", "", "", fmt.Errorf("Underlay or overlay networks too large to accommodate the FAN")
}
// Get the IP
ip, dev, err := n.addressForSubnet(underlay)
if err != nil {
return "", "", "", err
}
ipStr := ip.String()
// Force into IPv4 format
ipBytes := ip.To4()
if ipBytes == nil {
return "", "", "", fmt.Errorf("Invalid IPv4: %s", ip)
}
// Compute the IP
ipBytes[0] = overlay.IP[0]
if overlaySize == 16 {
ipBytes[1] = overlay.IP[1]
ipBytes[2] = ipBytes[3]
} else if underlaySize == 24 {
ipBytes[1] = ipBytes[3]
ipBytes[2] = 0
} else if underlaySize == 16 {
ipBytes[1] = ipBytes[2]
ipBytes[2] = ipBytes[3]
}
ipBytes[3] = 1
return fmt.Sprintf("%s/%d", ipBytes.String(), overlaySize), dev, ipStr, err
}
func (n *bridge) addressForSubnet(subnet *net.IPNet) (net.IP, string, error) {
ifaces, err := net.Interfaces()
if err != nil {
return net.IP{}, "", err
}
for _, iface := range ifaces {
addrs, err := iface.Addrs()
if err != nil {
continue
}
for _, addr := range addrs {
ip, network, err := net.ParseCIDR(addr.String())
if err != nil {
continue
}
// Skip /32 addresses on interfaces in case VIPs are being used on a different interface
// than the intended underlay subnet interface.
maskOnes, maskSize := network.Mask.Size()
if maskOnes == 32 && maskSize == 32 {
continue
}
if subnet.Contains(ip) {
return ip, iface.Name, nil
}
}
}
return net.IP{}, "", fmt.Errorf("No address found in subnet")
}
func (n *bridge) killForkDNS() error {
// Check if we have a running forkdns at all
pidPath := shared.VarPath("networks", n.name, "forkdns.pid")
// If the pid file doesn't exist, there is no process to kill.
if !shared.PathExists(pidPath) {
return nil
}
p, err := subprocess.ImportProcess(pidPath)
if err != nil {
return fmt.Errorf("Could not read pid file: %s", err)
}
err = p.Stop()
if err != nil && err != subprocess.ErrNotRunning {
return fmt.Errorf("Unable to kill dnsmasq: %s", err)
}
return nil
}
// updateForkdnsServersFile takes a list of node addresses and writes them atomically to
// the forkdns.servers file ready for forkdns to notice and re-apply its config.
func (n *bridge) updateForkdnsServersFile(addresses []string) error {
// We don't want to race with ourselves here
forkdnsServersLock.Lock()
defer forkdnsServersLock.Unlock()
permName := shared.VarPath("networks", n.name, ForkdnsServersListPath+"/"+ForkdnsServersListFile)
tmpName := permName + ".tmp"
// Open tmp file and truncate
tmpFile, err := os.Create(tmpName)
if err != nil {
return err
}
defer tmpFile.Close()
for _, address := range addresses {
_, err := tmpFile.WriteString(address + "\n")
if err != nil {
return err
}
}
tmpFile.Close()
// Atomically rename finished file into permanent location so forkdns can pick it up.
err = os.Rename(tmpName, permName)
if err != nil {
return err
}
return nil
}
// hasIPv4Firewall indicates whether the network has IPv4 firewall enabled.
func (n *bridge) hasIPv4Firewall() bool {
if n.config["ipv4.firewall"] == "" || shared.IsTrue(n.config["ipv4.firewall"]) {
return true
}
return false
}
// hasIPv6Firewall indicates whether the network has IPv6 firewall enabled.
func (n *bridge) hasIPv6Firewall() bool {
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
return true
}
return false
}
// DHCPv4Subnet returns the DHCPv4 subnet (if DHCP is enabled on network).
func (n *bridge) DHCPv4Subnet() *net.IPNet {
// DHCP is disabled on this network (an empty ipv4.dhcp setting indicates enabled by default).
if n.config["ipv4.dhcp"] != "" && !shared.IsTrue(n.config["ipv4.dhcp"]) {
return nil
}
_, subnet, err := net.ParseCIDR(n.config["ipv4.address"])
if err != nil {
return nil
}
return subnet
}
// DHCPv6Subnet returns the DHCPv6 subnet (if DHCP or SLAAC is enabled on network).
func (n *bridge) DHCPv6Subnet() *net.IPNet {
// DHCP is disabled on this network (an empty ipv6.dhcp setting indicates enabled by default).
if n.config["ipv6.dhcp"] != "" && !shared.IsTrue(n.config["ipv6.dhcp"]) {
return nil
}
_, subnet, err := net.ParseCIDR(n.config["ipv6.address"])
if err != nil {
return nil
}
return subnet
}
Revert "lxd/network/driver/bridge: Exclude /32 underlay addresses from overlay address generation"
This reverts commit 602ecadd3c7a1286bf2d1245394f06824c712394.
Signed-off-by: Thomas Parrott <6b778ce645fb0e3dde76d79eccad490955b1ae74@canonical.com>
package network
import (
"bufio"
"encoding/binary"
"fmt"
"hash/fnv"
"io"
"io/ioutil"
"math/rand"
"net"
"os"
"os/exec"
"reflect"
"strconv"
"strings"
"sync"
"github.com/pkg/errors"
"github.com/lxc/lxd/lxd/apparmor"
"github.com/lxc/lxd/lxd/cluster"
"github.com/lxc/lxd/lxd/daemon"
"github.com/lxc/lxd/lxd/dnsmasq"
"github.com/lxc/lxd/lxd/dnsmasq/dhcpalloc"
"github.com/lxc/lxd/lxd/network/openvswitch"
"github.com/lxc/lxd/lxd/node"
"github.com/lxc/lxd/lxd/revert"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/subprocess"
"github.com/lxc/lxd/shared/validate"
"github.com/lxc/lxd/shared/version"
)
// ForkdnsServersListPath defines the path that contains the forkdns server candidate file.
const ForkdnsServersListPath = "forkdns.servers"
// ForkdnsServersListFile file that contains the server candidates list.
const ForkdnsServersListFile = "servers.conf"
var forkdnsServersLock sync.Mutex
// bridge represents a LXD bridge network.
type bridge struct {
common
}
// checkClusterWideMACSafe returns whether it is safe to use the same MAC address for the bridge interface on all
// cluster nodes. It is not suitable to use a static MAC address when "bridge.external_interfaces" is non-empty an
// the bridge interface has no IPv4 or IPv6 address set. This is because in a clustered environment the same bridge
// config is applied to all nodes, and if the bridge is being used to connect multiple nodes to the same network
// segment it would cause MAC conflicts to use the the same MAC on all nodes. If an IP address is specified then
// connecting multiple nodes to the same network segment would also cause IP conflicts, so if an IP is defined
// then we assume this is not being done. However if IP addresses are explicitly set to "none" and
// "bridge.external_interfaces" is set then it may not be safe to use a the same MAC address on all nodes.
func (n *bridge) checkClusterWideMACSafe(config map[string]string) error {
// Fan mode breaks if using the same MAC address on each node.
if config["bridge.mode"] == "fan" {
return fmt.Errorf(`Cannot use static "bridge.hwaddr" MAC address in fan mode`)
}
// We can't be sure that multiple clustered nodes aren't connected to the same network segment so don't
// use a static MAC address for the bridge interface to avoid introducing a MAC conflict.
if config["bridge.external_interfaces"] != "" && config["ipv4.address"] == "none" && config["ipv6.address"] == "none" {
return fmt.Errorf(`Cannot use static "bridge.hwaddr" MAC address when bridge has no IP addresses and has external interfaces set`)
}
return nil
}
// fillConfig fills requested config with any default values.
func (n *bridge) fillConfig(config map[string]string) error {
// Set some default values where needed.
if config["bridge.mode"] == "fan" {
if config["fan.underlay_subnet"] == "" {
config["fan.underlay_subnet"] = "auto"
}
} else {
if config["ipv4.address"] == "" {
config["ipv4.address"] = "auto"
}
if config["ipv4.address"] == "auto" && config["ipv4.nat"] == "" {
config["ipv4.nat"] = "true"
}
if config["ipv6.address"] == "" {
content, err := ioutil.ReadFile("/proc/sys/net/ipv6/conf/default/disable_ipv6")
if err == nil && string(content) == "0\n" {
config["ipv6.address"] = "auto"
}
}
if config["ipv6.address"] == "auto" && config["ipv6.nat"] == "" {
config["ipv6.nat"] = "true"
}
}
// Now populate "auto" values where needed.
if config["ipv4.address"] == "auto" {
subnet, err := randomSubnetV4()
if err != nil {
return err
}
config["ipv4.address"] = subnet
}
if config["ipv6.address"] == "auto" {
subnet, err := randomSubnetV6()
if err != nil {
return err
}
config["ipv6.address"] = subnet
}
if config["fan.underlay_subnet"] == "auto" {
subnet, _, err := DefaultGatewaySubnetV4()
if err != nil {
return err
}
config["fan.underlay_subnet"] = subnet.String()
}
return nil
}
// ValidateName validates network name.
func (n *bridge) ValidateName(name string) error {
err := validInterfaceName(name)
if err != nil {
return err
}
// Apply common name validation that applies to all network types.
return n.common.ValidateName(name)
}
// Validate network config.
func (n *bridge) Validate(config map[string]string) error {
// Build driver specific rules dynamically.
rules := map[string]func(value string) error{
"bridge.driver": func(value string) error {
return validate.IsOneOf(value, []string{"native", "openvswitch"})
},
"bridge.external_interfaces": validate.Optional(func(value string) error {
for _, entry := range strings.Split(value, ",") {
entry = strings.TrimSpace(entry)
if err := validInterfaceName(entry); err != nil {
return errors.Wrapf(err, "Invalid interface name %q", entry)
}
}
return nil
}),
"bridge.hwaddr": validate.Optional(validate.IsNetworkMAC),
"bridge.mtu": validate.Optional(validate.IsNetworkMTU),
"bridge.mode": func(value string) error {
return validate.IsOneOf(value, []string{"standard", "fan"})
},
"fan.overlay_subnet": validate.Optional(validate.IsNetworkV4),
"fan.underlay_subnet": func(value string) error {
if value == "auto" {
return nil
}
return validate.Optional(validate.IsNetworkV4)(value)
},
"fan.type": func(value string) error {
return validate.IsOneOf(value, []string{"vxlan", "ipip"})
},
"ipv4.address": func(value string) error {
if validate.IsOneOf(value, []string{"none", "auto"}) == nil {
return nil
}
return validate.Optional(validate.IsNetworkAddressCIDRV4)(value)
},
"ipv4.firewall": validate.Optional(validate.IsBool),
"ipv4.nat": validate.Optional(validate.IsBool),
"ipv4.nat.order": func(value string) error {
return validate.IsOneOf(value, []string{"before", "after"})
},
"ipv4.nat.address": validate.Optional(validate.IsNetworkAddressV4),
"ipv4.dhcp": validate.Optional(validate.IsBool),
"ipv4.dhcp.gateway": validate.Optional(validate.IsNetworkAddressV4),
"ipv4.dhcp.expiry": validate.IsAny,
"ipv4.dhcp.ranges": validate.Optional(validate.IsNetworkRangeV4List),
"ipv4.routes": validate.Optional(validate.IsNetworkV4List),
"ipv4.routing": validate.Optional(validate.IsBool),
"ipv4.ovn.ranges": validate.Optional(validate.IsNetworkRangeV4List),
"ipv6.address": func(value string) error {
if validate.IsOneOf(value, []string{"none", "auto"}) == nil {
return nil
}
return validate.Optional(validate.IsNetworkAddressCIDRV6)(value)
},
"ipv6.firewall": validate.Optional(validate.IsBool),
"ipv6.nat": validate.Optional(validate.IsBool),
"ipv6.nat.order": func(value string) error {
return validate.IsOneOf(value, []string{"before", "after"})
},
"ipv6.nat.address": validate.Optional(validate.IsNetworkAddressV6),
"ipv6.dhcp": validate.Optional(validate.IsBool),
"ipv6.dhcp.expiry": validate.IsAny,
"ipv6.dhcp.stateful": validate.Optional(validate.IsBool),
"ipv6.dhcp.ranges": validate.Optional(validate.IsNetworkRangeV6List),
"ipv6.routes": validate.Optional(validate.IsNetworkV6List),
"ipv6.routing": validate.Optional(validate.IsBool),
"ipv6.ovn.ranges": validate.Optional(validate.IsNetworkRangeV6List),
"dns.domain": validate.IsAny,
"dns.search": validate.IsAny,
"dns.mode": func(value string) error {
return validate.IsOneOf(value, []string{"dynamic", "managed", "none"})
},
"raw.dnsmasq": validate.IsAny,
"maas.subnet.ipv4": validate.IsAny,
"maas.subnet.ipv6": validate.IsAny,
}
// Add dynamic validation rules.
for k := range config {
// Tunnel keys have the remote name in their name, so extract the real key
if strings.HasPrefix(k, "tunnel.") {
// Validate remote name in key.
fields := strings.Split(k, ".")
if len(fields) != 3 {
return fmt.Errorf("Invalid network configuration key: %s", k)
}
if len(n.name)+len(fields[1]) > 14 {
return fmt.Errorf("Network name too long for tunnel interface: %s-%s", n.name, fields[1])
}
tunnelKey := fields[2]
// Add the correct validation rule for the dynamic field based on last part of key.
switch tunnelKey {
case "protocol":
rules[k] = func(value string) error {
return validate.IsOneOf(value, []string{"gre", "vxlan"})
}
case "local":
rules[k] = validate.Optional(validate.IsNetworkAddress)
case "remote":
rules[k] = validate.Optional(validate.IsNetworkAddress)
case "port":
rules[k] = networkValidPort
case "group":
rules[k] = validate.Optional(validate.IsNetworkAddress)
case "id":
rules[k] = validate.Optional(validate.IsInt64)
case "inteface":
rules[k] = validInterfaceName
case "ttl":
rules[k] = validate.Optional(validate.IsUint8)
}
}
}
err := n.validate(config, rules)
if err != nil {
return err
}
// Peform composite key checks after per-key validation.
// Validate network name when used in fan mode.
bridgeMode := config["bridge.mode"]
if bridgeMode == "fan" && len(n.name) > 11 {
return fmt.Errorf("Network name too long to use with the FAN (must be 11 characters or less)")
}
for k, v := range config {
key := k
// Bridge mode checks
if bridgeMode == "fan" && strings.HasPrefix(key, "ipv4.") && !shared.StringInSlice(key, []string{"ipv4.dhcp.expiry", "ipv4.firewall", "ipv4.nat", "ipv4.nat.order"}) && v != "" {
return fmt.Errorf("IPv4 configuration may not be set when in 'fan' mode")
}
if bridgeMode == "fan" && strings.HasPrefix(key, "ipv6.") && v != "" {
return fmt.Errorf("IPv6 configuration may not be set when in 'fan' mode")
}
if bridgeMode != "fan" && strings.HasPrefix(key, "fan.") && v != "" {
return fmt.Errorf("FAN configuration may only be set when in 'fan' mode")
}
// MTU checks
if key == "bridge.mtu" && v != "" {
mtu, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return fmt.Errorf("Invalid value for an integer: %s", v)
}
ipv6 := config["ipv6.address"]
if ipv6 != "" && ipv6 != "none" && mtu < 1280 {
return fmt.Errorf("The minimum MTU for an IPv6 network is 1280")
}
ipv4 := config["ipv4.address"]
if ipv4 != "" && ipv4 != "none" && mtu < 68 {
return fmt.Errorf("The minimum MTU for an IPv4 network is 68")
}
if config["bridge.mode"] == "fan" {
if config["fan.type"] == "ipip" {
if mtu > 1480 {
return fmt.Errorf("Maximum MTU for an IPIP FAN bridge is 1480")
}
} else {
if mtu > 1450 {
return fmt.Errorf("Maximum MTU for a VXLAN FAN bridge is 1450")
}
}
}
}
}
// Check using same MAC address on every cluster node is safe.
if config["bridge.hwaddr"] != "" {
err = n.checkClusterWideMACSafe(config)
if err != nil {
return err
}
}
// Check IPv4 OVN ranges.
if config["ipv4.ovn.ranges"] != "" {
dhcpSubnet := n.DHCPv4Subnet()
allowedNets := []*net.IPNet{}
if dhcpSubnet != nil {
if config["ipv4.dhcp.ranges"] == "" {
return fmt.Errorf(`"ipv4.ovn.ranges" must be used in conjunction with non-overlapping "ipv4.dhcp.ranges" when DHCPv4 is enabled`)
}
allowedNets = append(allowedNets, dhcpSubnet)
}
_, err := parseIPRanges(config["ipv4.ovn.ranges"], allowedNets...)
if err != nil {
return err
}
}
// Check IPv6 OVN ranges.
if config["ipv6.ovn.ranges"] != "" {
dhcpSubnet := n.DHCPv6Subnet()
allowedNets := []*net.IPNet{}
if dhcpSubnet != nil {
if config["ipv6.dhcp.ranges"] == "" && shared.IsTrue(config["ipv6.dhcp.stateful"]) {
return fmt.Errorf(`"ipv6.ovn.ranges" must be used in conjunction with non-overlapping "ipv6.dhcp.ranges" when stateful DHCPv6 is enabled`)
}
allowedNets = append(allowedNets, dhcpSubnet)
}
_, err := parseIPRanges(config["ipv6.ovn.ranges"], allowedNets...)
if err != nil {
return err
}
}
return nil
}
// Create checks whether the bridge interface name is used already.
func (n *bridge) Create(clientType cluster.ClientType) error {
n.logger.Debug("Create", log.Ctx{"clientType": clientType, "config": n.config})
if shared.PathExists(fmt.Sprintf("/sys/class/net/%s", n.name)) {
return fmt.Errorf("Network interface %q already exists", n.name)
}
return nil
}
// isRunning returns whether the network is up.
func (n *bridge) isRunning() bool {
return shared.PathExists(fmt.Sprintf("/sys/class/net/%s", n.name))
}
// Delete deletes a network.
func (n *bridge) Delete(clientType cluster.ClientType) error {
n.logger.Debug("Delete", log.Ctx{"clientType": clientType})
// Bring the network down.
if n.isRunning() {
err := n.Stop()
if err != nil {
return err
}
}
// Delete apparmor profiles.
err := apparmor.NetworkDelete(n.state, n)
if err != nil {
return err
}
return n.common.delete(clientType)
}
// Rename renames a network.
func (n *bridge) Rename(newName string) error {
n.logger.Debug("Rename", log.Ctx{"newName": newName})
if shared.PathExists(fmt.Sprintf("/sys/class/net/%s", newName)) {
return fmt.Errorf("Network interface %q already exists", newName)
}
// Bring the network down.
if n.isRunning() {
err := n.Stop()
if err != nil {
return err
}
}
// Rename forkdns log file.
forkDNSLogPath := fmt.Sprintf("forkdns.%s.log", n.name)
if shared.PathExists(shared.LogPath(forkDNSLogPath)) {
err := os.Rename(forkDNSLogPath, shared.LogPath(fmt.Sprintf("forkdns.%s.log", newName)))
if err != nil {
return err
}
}
// Rename common steps.
err := n.common.rename(newName)
if err != nil {
return err
}
// Bring the network up.
err = n.Start()
if err != nil {
return err
}
return nil
}
// Start starts the network.
func (n *bridge) Start() error {
n.logger.Debug("Start")
return n.setup(nil)
}
// setup restarts the network.
func (n *bridge) setup(oldConfig map[string]string) error {
// If we are in mock mode, just no-op.
if n.state.OS.MockMode {
return nil
}
n.logger.Debug("Setting up network")
if n.status == api.NetworkStatusPending {
return fmt.Errorf("Cannot start pending network")
}
// Create directory.
if !shared.PathExists(shared.VarPath("networks", n.name)) {
err := os.MkdirAll(shared.VarPath("networks", n.name), 0711)
if err != nil {
return err
}
}
// Create the bridge interface if doesn't exist.
if !n.isRunning() {
if n.config["bridge.driver"] == "openvswitch" {
ovs := openvswitch.NewOVS()
if !ovs.Installed() {
return fmt.Errorf("Open vSwitch isn't installed on this system")
}
err := ovs.BridgeAdd(n.name, false)
if err != nil {
return err
}
} else {
_, err := shared.RunCommand("ip", "link", "add", "dev", n.name, "type", "bridge")
if err != nil {
return err
}
}
}
// Get a list of tunnels.
tunnels := n.getTunnels()
// IPv6 bridge configuration.
if !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
if !shared.PathExists("/proc/sys/net/ipv6") {
return fmt.Errorf("Network has ipv6.address but kernel IPv6 support is missing")
}
err := util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/autoconf", n.name), "0")
if err != nil {
return err
}
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/accept_dad", n.name), "0")
if err != nil {
return err
}
}
// Get a list of interfaces.
ifaces, err := net.Interfaces()
if err != nil {
return err
}
// Cleanup any existing tunnel device.
for _, iface := range ifaces {
if strings.HasPrefix(iface.Name, fmt.Sprintf("%s-", n.name)) {
_, err = shared.RunCommand("ip", "link", "del", "dev", iface.Name)
if err != nil {
return err
}
}
}
// Set the MTU.
mtu := ""
if n.config["bridge.mtu"] != "" {
mtu = n.config["bridge.mtu"]
} else if len(tunnels) > 0 {
mtu = "1400"
} else if n.config["bridge.mode"] == "fan" {
if n.config["fan.type"] == "ipip" {
mtu = "1480"
} else {
mtu = "1450"
}
}
// Attempt to add a dummy device to the bridge to force the MTU.
if mtu != "" && n.config["bridge.driver"] != "openvswitch" {
_, err = shared.RunCommand("ip", "link", "add", "dev", fmt.Sprintf("%s-mtu", n.name), "mtu", mtu, "type", "dummy")
if err == nil {
_, err = shared.RunCommand("ip", "link", "set", "dev", fmt.Sprintf("%s-mtu", n.name), "up")
if err == nil {
AttachInterface(n.name, fmt.Sprintf("%s-mtu", n.name))
}
}
}
// Now, set a default MTU.
if mtu == "" {
mtu = "1500"
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "mtu", mtu)
if err != nil {
return err
}
// Always prefer static MAC address if set.
hwAddr := n.config["bridge.hwaddr"]
// If no cluster wide static MAC address set, then generate one.
if hwAddr == "" {
var seedNodeID int64
if n.checkClusterWideMACSafe(n.config) != nil {
// Use cluster node's ID to g enerate a stable per-node & network derived random MAC in fan
// mode or when cluster-wide MAC addresses are unsafe.
seedNodeID = n.state.Cluster.GetNodeID()
} else {
// Use a static cluster node of 0 to generate a stable per-network derived random MAC if
// safe to do so.
seedNodeID = 0
}
// Load server certificate. This is needs to be the same certificate for all nodes in a cluster.
cert, err := util.LoadCert(n.state.OS.VarDir)
if err != nil {
return err
}
// Generate the random seed, this uses the server certificate fingerprint (to ensure that multiple
// standalone nodes on the same external network don't generate the same MAC for their networks).
// It relies on the certificate being the same for all nodes in a cluster to allow the same MAC to
// be generated on each bridge interface in the network (if safe to do so).
seed := fmt.Sprintf("%s.%d.%d", cert.Fingerprint(), seedNodeID, n.ID())
// Generate a hash from the randSourceNodeID and network ID to use as seed for random MAC.
// Use the FNV-1a hash algorithm to convert our seed string into an int64 for use as seed.
hash := fnv.New64a()
_, err = io.WriteString(hash, seed)
if err != nil {
return err
}
// Initialise a non-cryptographic random number generator using the stable seed.
r := rand.New(rand.NewSource(int64(hash.Sum64())))
hwAddr = randomHwaddr(r)
n.logger.Debug("Stable MAC generated", log.Ctx{"seed": seed, "hwAddr": hwAddr})
}
// Set the MAC address on the bridge interface if specified.
if hwAddr != "" {
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "address", hwAddr)
if err != nil {
return err
}
}
// Enable VLAN filtering for Linux bridges.
if n.config["bridge.driver"] != "openvswitch" {
err = BridgeVLANFilterSetStatus(n.name, "1")
if err != nil {
n.logger.Warn(fmt.Sprintf("%v", err))
}
// Set the default PVID for new ports to 1.
err = BridgeVLANSetDefaultPVID(n.name, "1")
if err != nil {
n.logger.Warn(fmt.Sprintf("%v", err))
}
}
// Bring it up.
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "up")
if err != nil {
return err
}
// Add any listed existing external interface.
if n.config["bridge.external_interfaces"] != "" {
for _, entry := range strings.Split(n.config["bridge.external_interfaces"], ",") {
entry = strings.TrimSpace(entry)
iface, err := net.InterfaceByName(entry)
if err != nil {
n.logger.Warn("Skipping attaching missing external interface", log.Ctx{"interface": entry})
continue
}
unused := true
addrs, err := iface.Addrs()
if err == nil {
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if ip != nil && err == nil && ip.IsGlobalUnicast() {
unused = false
break
}
}
}
if !unused {
return fmt.Errorf("Only unconfigured network interfaces can be bridged")
}
err = AttachInterface(n.name, entry)
if err != nil {
return err
}
}
}
// Remove any existing IPv4 firewall rules.
if usesIPv4Firewall(n.config) || usesIPv4Firewall(oldConfig) {
err = n.state.Firewall.NetworkClear(n.name, 4)
if err != nil {
return err
}
}
// Snapshot container specific IPv4 routes (added with boot proto) before removing IPv4 addresses.
// This is because the kernel removes any static routes on an interface when all addresses removed.
ctRoutes, err := n.bootRoutesV4()
if err != nil {
return err
}
// Flush all IPv4 addresses and routes.
_, err = shared.RunCommand("ip", "-4", "addr", "flush", "dev", n.name, "scope", "global")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "-4", "route", "flush", "dev", n.name, "proto", "static")
if err != nil {
return err
}
// Configure IPv4 firewall (includes fan).
if n.config["bridge.mode"] == "fan" || !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) {
if n.DHCPv4Subnet() != nil && n.hasIPv4Firewall() {
// Setup basic iptables overrides for DHCP/DNS.
err = n.state.Firewall.NetworkSetupDHCPDNSAccess(n.name, 4)
if err != nil {
return err
}
}
// Attempt a workaround for broken DHCP clients.
if n.hasIPv4Firewall() {
err = n.state.Firewall.NetworkSetupDHCPv4Checksum(n.name)
if err != nil {
return err
}
}
// Allow forwarding.
if n.config["bridge.mode"] == "fan" || n.config["ipv4.routing"] == "" || shared.IsTrue(n.config["ipv4.routing"]) {
err = util.SysctlSet("net/ipv4/ip_forward", "1")
if err != nil {
return err
}
if n.hasIPv4Firewall() {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 4, true)
if err != nil {
return err
}
}
} else {
if n.hasIPv4Firewall() {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 4, false)
if err != nil {
return err
}
}
}
}
// Start building process using subprocess package.
command := "dnsmasq"
dnsmasqCmd := []string{"--keep-in-foreground", "--strict-order", "--bind-interfaces",
"--except-interface=lo",
"--pid-file=", // Disable attempt at writing a PID file.
"--no-ping", // --no-ping is very important to prevent delays to lease file updates.
fmt.Sprintf("--interface=%s", n.name)}
dnsmasqVersion, err := dnsmasq.GetVersion()
if err != nil {
return err
}
// --dhcp-rapid-commit option is only supported on >2.79.
minVer, _ := version.NewDottedVersion("2.79")
if dnsmasqVersion.Compare(minVer) > 0 {
dnsmasqCmd = append(dnsmasqCmd, "--dhcp-rapid-commit")
}
if !daemon.Debug {
// --quiet options are only supported on >2.67.
minVer, _ := version.NewDottedVersion("2.67")
if err == nil && dnsmasqVersion.Compare(minVer) > 0 {
dnsmasqCmd = append(dnsmasqCmd, []string{"--quiet-dhcp", "--quiet-dhcp6", "--quiet-ra"}...)
}
}
// Configure IPv4.
if !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) {
// Parse the subnet.
ip, subnet, err := net.ParseCIDR(n.config["ipv4.address"])
if err != nil {
return err
}
// Update the dnsmasq config.
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--listen-address=%s", ip.String()))
if n.DHCPv4Subnet() != nil {
if !shared.StringInSlice("--dhcp-no-override", dnsmasqCmd) {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-no-override", "--dhcp-authoritative", fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")), fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts"))}...)
}
if n.config["ipv4.dhcp.gateway"] != "" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=3,%s", n.config["ipv4.dhcp.gateway"]))
}
if mtu != "1500" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=26,%s", mtu))
}
dnsSearch := n.config["dns.search"]
if dnsSearch != "" {
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--dhcp-option-force=119,%s", strings.Trim(dnsSearch, " ")))
}
expiry := "1h"
if n.config["ipv4.dhcp.expiry"] != "" {
expiry = n.config["ipv4.dhcp.expiry"]
}
if n.config["ipv4.dhcp.ranges"] != "" {
for _, dhcpRange := range strings.Split(n.config["ipv4.dhcp.ranges"], ",") {
dhcpRange = strings.TrimSpace(dhcpRange)
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s", strings.Replace(dhcpRange, "-", ",", -1), expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s,%s", dhcpalloc.GetIP(subnet, 2).String(), dhcpalloc.GetIP(subnet, -2).String(), expiry)}...)
}
}
// Add the address.
_, err = shared.RunCommand("ip", "-4", "addr", "add", "dev", n.name, n.config["ipv4.address"])
if err != nil {
return err
}
// Configure NAT
if shared.IsTrue(n.config["ipv4.nat"]) {
//If a SNAT source address is specified, use that, otherwise default to MASQUERADE mode.
var srcIP net.IP
if n.config["ipv4.nat.address"] != "" {
srcIP = net.ParseIP(n.config["ipv4.nat.address"])
}
if n.config["ipv4.nat.order"] == "after" {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, true)
if err != nil {
return err
}
} else {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, false)
if err != nil {
return err
}
}
}
// Add additional routes.
if n.config["ipv4.routes"] != "" {
for _, route := range strings.Split(n.config["ipv4.routes"], ",") {
route = strings.TrimSpace(route)
_, err = shared.RunCommand("ip", "-4", "route", "add", "dev", n.name, route, "proto", "static")
if err != nil {
return err
}
}
}
// Restore container specific IPv4 routes to interface.
n.applyBootRoutesV4(ctRoutes)
}
// Remove any existing IPv6 firewall rules.
if usesIPv6Firewall(n.config) || usesIPv6Firewall(oldConfig) {
err = n.state.Firewall.NetworkClear(n.name, 6)
if err != nil {
return err
}
}
// Snapshot container specific IPv6 routes (added with boot proto) before removing IPv6 addresses.
// This is because the kernel removes any static routes on an interface when all addresses removed.
ctRoutes, err = n.bootRoutesV6()
if err != nil {
return err
}
// Flush all IPv6 addresses and routes.
_, err = shared.RunCommand("ip", "-6", "addr", "flush", "dev", n.name, "scope", "global")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "-6", "route", "flush", "dev", n.name, "proto", "static")
if err != nil {
return err
}
// Configure IPv6.
if !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
// Enable IPv6 for the subnet.
err := util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/disable_ipv6", n.name), "0")
if err != nil {
return err
}
// Parse the subnet.
ip, subnet, err := net.ParseCIDR(n.config["ipv6.address"])
if err != nil {
return err
}
subnetSize, _ := subnet.Mask.Size()
if subnetSize > 64 {
n.logger.Warn("IPv6 networks with a prefix larger than 64 aren't properly supported by dnsmasq")
}
// Update the dnsmasq config.
dnsmasqCmd = append(dnsmasqCmd, []string{fmt.Sprintf("--listen-address=%s", ip.String()), "--enable-ra"}...)
if n.DHCPv6Subnet() != nil {
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
// Setup basic iptables overrides for DHCP/DNS.
err = n.state.Firewall.NetworkSetupDHCPDNSAccess(n.name, 6)
if err != nil {
return err
}
}
// Build DHCP configuration.
if !shared.StringInSlice("--dhcp-no-override", dnsmasqCmd) {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-no-override", "--dhcp-authoritative", fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")), fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts"))}...)
}
expiry := "1h"
if n.config["ipv6.dhcp.expiry"] != "" {
expiry = n.config["ipv6.dhcp.expiry"]
}
if shared.IsTrue(n.config["ipv6.dhcp.stateful"]) {
if n.config["ipv6.dhcp.ranges"] != "" {
for _, dhcpRange := range strings.Split(n.config["ipv6.dhcp.ranges"], ",") {
dhcpRange = strings.TrimSpace(dhcpRange)
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%d,%s", strings.Replace(dhcpRange, "-", ",", -1), subnetSize, expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s,%d,%s", dhcpalloc.GetIP(subnet, 2), dhcpalloc.GetIP(subnet, -1), subnetSize, expiry)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("::,constructor:%s,ra-stateless,ra-names", n.name)}...)
}
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("::,constructor:%s,ra-only", n.name)}...)
}
// Allow forwarding.
if n.config["ipv6.routing"] == "" || shared.IsTrue(n.config["ipv6.routing"]) {
// Get a list of proc entries.
entries, err := ioutil.ReadDir("/proc/sys/net/ipv6/conf/")
if err != nil {
return err
}
// First set accept_ra to 2 for everything.
for _, entry := range entries {
content, err := ioutil.ReadFile(fmt.Sprintf("/proc/sys/net/ipv6/conf/%s/accept_ra", entry.Name()))
if err == nil && string(content) != "1\n" {
continue
}
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/accept_ra", entry.Name()), "2")
if err != nil && !os.IsNotExist(err) {
return err
}
}
// Then set forwarding for all of them.
for _, entry := range entries {
err = util.SysctlSet(fmt.Sprintf("net/ipv6/conf/%s/forwarding", entry.Name()), "1")
if err != nil && !os.IsNotExist(err) {
return err
}
}
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 6, true)
if err != nil {
return err
}
}
} else {
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
err = n.state.Firewall.NetworkSetupForwardingPolicy(n.name, 6, false)
if err != nil {
return err
}
}
}
// Add the address.
_, err = shared.RunCommand("ip", "-6", "addr", "add", "dev", n.name, n.config["ipv6.address"])
if err != nil {
return err
}
// Configure NAT.
if shared.IsTrue(n.config["ipv6.nat"]) {
var srcIP net.IP
if n.config["ipv6.nat.address"] != "" {
srcIP = net.ParseIP(n.config["ipv6.nat.address"])
}
if n.config["ipv6.nat.order"] == "after" {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, true)
if err != nil {
return err
}
} else {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, subnet, srcIP, false)
if err != nil {
return err
}
}
}
// Add additional routes.
if n.config["ipv6.routes"] != "" {
for _, route := range strings.Split(n.config["ipv6.routes"], ",") {
route = strings.TrimSpace(route)
_, err = shared.RunCommand("ip", "-6", "route", "add", "dev", n.name, route, "proto", "static")
if err != nil {
return err
}
}
}
// Restore container specific IPv6 routes to interface.
n.applyBootRoutesV6(ctRoutes)
}
// Configure the fan.
dnsClustered := false
dnsClusteredAddress := ""
var overlaySubnet *net.IPNet
if n.config["bridge.mode"] == "fan" {
tunName := fmt.Sprintf("%s-fan", n.name)
// Parse the underlay.
underlay := n.config["fan.underlay_subnet"]
_, underlaySubnet, err := net.ParseCIDR(underlay)
if err != nil {
return nil
}
// Parse the overlay.
overlay := n.config["fan.overlay_subnet"]
if overlay == "" {
overlay = "240.0.0.0/8"
}
_, overlaySubnet, err = net.ParseCIDR(overlay)
if err != nil {
return err
}
// Get the address.
fanAddress, devName, devAddr, err := n.fanAddress(underlaySubnet, overlaySubnet)
if err != nil {
return err
}
addr := strings.Split(fanAddress, "/")
if n.config["fan.type"] == "ipip" {
fanAddress = fmt.Sprintf("%s/24", addr[0])
}
// Update the MTU based on overlay device (if available).
fanMtuInt, err := GetDevMTU(devName)
if err == nil {
// Apply overhead.
if n.config["fan.type"] == "ipip" {
fanMtuInt = fanMtuInt - 20
} else {
fanMtuInt = fanMtuInt - 50
}
// Apply changes.
fanMtu := fmt.Sprintf("%d", fanMtuInt)
if fanMtu != mtu {
mtu = fanMtu
if n.config["bridge.driver"] != "openvswitch" {
_, err = shared.RunCommand("ip", "link", "set", "dev", fmt.Sprintf("%s-mtu", n.name), "mtu", mtu)
if err != nil {
return err
}
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "mtu", mtu)
if err != nil {
return err
}
}
}
// Parse the host subnet.
_, hostSubnet, err := net.ParseCIDR(fmt.Sprintf("%s/24", addr[0]))
if err != nil {
return err
}
// Add the address.
_, err = shared.RunCommand("ip", "-4", "addr", "add", "dev", n.name, fanAddress)
if err != nil {
return err
}
// Update the dnsmasq config.
expiry := "1h"
if n.config["ipv4.dhcp.expiry"] != "" {
expiry = n.config["ipv4.dhcp.expiry"]
}
dnsmasqCmd = append(dnsmasqCmd, []string{
fmt.Sprintf("--listen-address=%s", addr[0]),
"--dhcp-no-override", "--dhcp-authoritative",
fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")),
fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts")),
"--dhcp-range", fmt.Sprintf("%s,%s,%s", dhcpalloc.GetIP(hostSubnet, 2).String(), dhcpalloc.GetIP(hostSubnet, -2).String(), expiry)}...)
// Setup the tunnel.
if n.config["fan.type"] == "ipip" {
_, err = shared.RunCommand("ip", "-4", "route", "flush", "dev", "tunl0")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", "tunl0", "up")
if err != nil {
return err
}
// Fails if the map is already set.
shared.RunCommand("ip", "link", "change", "dev", "tunl0", "type", "ipip", "fan-map", fmt.Sprintf("%s:%s", overlay, underlay))
_, err = shared.RunCommand("ip", "route", "add", overlay, "dev", "tunl0", "src", addr[0])
if err != nil {
return err
}
} else {
vxlanID := fmt.Sprintf("%d", binary.BigEndian.Uint32(overlaySubnet.IP.To4())>>8)
_, err = shared.RunCommand("ip", "link", "add", tunName, "type", "vxlan", "id", vxlanID, "dev", devName, "dstport", "0", "local", devAddr, "fan-map", fmt.Sprintf("%s:%s", overlay, underlay))
if err != nil {
return err
}
err = AttachInterface(n.name, tunName)
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", tunName, "mtu", mtu, "up")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "up")
if err != nil {
return err
}
}
// Configure NAT.
if n.config["ipv4.nat"] == "" || shared.IsTrue(n.config["ipv4.nat"]) {
if n.config["ipv4.nat.order"] == "after" {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, overlaySubnet, nil, true)
if err != nil {
return err
}
} else {
err = n.state.Firewall.NetworkSetupOutboundNAT(n.name, overlaySubnet, nil, false)
if err != nil {
return err
}
}
}
// Setup clustered DNS.
clusterAddress, err := node.ClusterAddress(n.state.Node)
if err != nil {
return err
}
// If clusterAddress is non-empty, this indicates the intention for this node to be
// part of a cluster and so we should ensure that dnsmasq and forkdns are started
// in cluster mode. Note: During LXD initialisation the cluster may not actually be
// setup yet, but we want the DNS processes to be ready for when it is.
if clusterAddress != "" {
dnsClustered = true
}
dnsClusteredAddress = strings.Split(fanAddress, "/")[0]
}
// Configure tunnels.
for _, tunnel := range tunnels {
getConfig := func(key string) string {
return n.config[fmt.Sprintf("tunnel.%s.%s", tunnel, key)]
}
tunProtocol := getConfig("protocol")
tunLocal := getConfig("local")
tunRemote := getConfig("remote")
tunName := fmt.Sprintf("%s-%s", n.name, tunnel)
// Configure the tunnel.
cmd := []string{"ip", "link", "add", "dev", tunName}
if tunProtocol == "gre" {
// Skip partial configs.
if tunProtocol == "" || tunLocal == "" || tunRemote == "" {
continue
}
cmd = append(cmd, []string{"type", "gretap", "local", tunLocal, "remote", tunRemote}...)
} else if tunProtocol == "vxlan" {
tunGroup := getConfig("group")
tunInterface := getConfig("interface")
// Skip partial configs.
if tunProtocol == "" {
continue
}
cmd = append(cmd, []string{"type", "vxlan"}...)
if tunLocal != "" && tunRemote != "" {
cmd = append(cmd, []string{"local", tunLocal, "remote", tunRemote}...)
} else {
if tunGroup == "" {
tunGroup = "239.0.0.1"
}
devName := tunInterface
if devName == "" {
_, devName, err = DefaultGatewaySubnetV4()
if err != nil {
return err
}
}
cmd = append(cmd, []string{"group", tunGroup, "dev", devName}...)
}
tunPort := getConfig("port")
if tunPort == "" {
tunPort = "0"
}
cmd = append(cmd, []string{"dstport", tunPort}...)
tunID := getConfig("id")
if tunID == "" {
tunID = "1"
}
cmd = append(cmd, []string{"id", tunID}...)
tunTTL := getConfig("ttl")
if tunTTL == "" {
tunTTL = "1"
}
cmd = append(cmd, []string{"ttl", tunTTL}...)
}
// Create the interface.
_, err = shared.RunCommand(cmd[0], cmd[1:]...)
if err != nil {
return err
}
// Bridge it and bring up.
err = AttachInterface(n.name, tunName)
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", tunName, "mtu", mtu, "up")
if err != nil {
return err
}
_, err = shared.RunCommand("ip", "link", "set", "dev", n.name, "up")
if err != nil {
return err
}
}
// Generate and load apparmor profiles.
err = apparmor.NetworkLoad(n.state, n)
if err != nil {
return err
}
// Kill any existing dnsmasq and forkdns daemon for this network.
err = dnsmasq.Kill(n.name, false)
if err != nil {
return err
}
err = n.killForkDNS()
if err != nil {
return err
}
// Configure dnsmasq.
if n.config["bridge.mode"] == "fan" || !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) || !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) {
// Setup the dnsmasq domain.
dnsDomain := n.config["dns.domain"]
if dnsDomain == "" {
dnsDomain = "lxd"
}
if n.config["dns.mode"] != "none" {
if dnsClustered {
dnsmasqCmd = append(dnsmasqCmd, "-s", dnsDomain)
dnsmasqCmd = append(dnsmasqCmd, "-S", fmt.Sprintf("/%s/%s#1053", dnsDomain, dnsClusteredAddress))
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--rev-server=%s,%s#1053", overlaySubnet, dnsClusteredAddress))
} else {
dnsmasqCmd = append(dnsmasqCmd, []string{"-s", dnsDomain, "-S", fmt.Sprintf("/%s/", dnsDomain)}...)
}
}
// Create a config file to contain additional config (and to prevent dnsmasq from reading /etc/dnsmasq.conf)
err = ioutil.WriteFile(shared.VarPath("networks", n.name, "dnsmasq.raw"), []byte(fmt.Sprintf("%s\n", n.config["raw.dnsmasq"])), 0644)
if err != nil {
return err
}
dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--conf-file=%s", shared.VarPath("networks", n.name, "dnsmasq.raw")))
// Attempt to drop privileges.
if n.state.OS.UnprivUser != "" {
dnsmasqCmd = append(dnsmasqCmd, []string{"-u", n.state.OS.UnprivUser}...)
}
if n.state.OS.UnprivGroup != "" {
dnsmasqCmd = append(dnsmasqCmd, []string{"-g", n.state.OS.UnprivGroup}...)
}
// Create DHCP hosts directory.
if !shared.PathExists(shared.VarPath("networks", n.name, "dnsmasq.hosts")) {
err = os.MkdirAll(shared.VarPath("networks", n.name, "dnsmasq.hosts"), 0755)
if err != nil {
return err
}
}
// Check for dnsmasq.
_, err := exec.LookPath("dnsmasq")
if err != nil {
return fmt.Errorf("dnsmasq is required for LXD managed bridges")
}
// Update the static leases.
err = UpdateDNSMasqStatic(n.state, n.name)
if err != nil {
return err
}
// Create subprocess object dnsmasq.
p, err := subprocess.NewProcess(command, dnsmasqCmd, "", "")
if err != nil {
return fmt.Errorf("Failed to create subprocess: %s", err)
}
// Apply AppArmor confinement.
if n.config["raw.dnsmasq"] == "" {
p.SetApparmor(apparmor.DnsmasqProfileName(n))
} else {
n.logger.Warn("Skipping AppArmor for dnsmasq due to raw.dnsmasq being set", log.Ctx{"name": n.name})
}
// Start dnsmasq.
err = p.Start()
if err != nil {
return fmt.Errorf("Failed to run: %s %s: %v", command, strings.Join(dnsmasqCmd, " "), err)
}
err = p.Save(shared.VarPath("networks", n.name, "dnsmasq.pid"))
if err != nil {
// Kill Process if started, but could not save the file.
err2 := p.Stop()
if err != nil {
return fmt.Errorf("Could not kill subprocess while handling saving error: %s: %s", err, err2)
}
return fmt.Errorf("Failed to save subprocess details: %s", err)
}
// Spawn DNS forwarder if needed (backgrounded to avoid deadlocks during cluster boot).
if dnsClustered {
// Create forkdns servers directory.
if !shared.PathExists(shared.VarPath("networks", n.name, ForkdnsServersListPath)) {
err = os.MkdirAll(shared.VarPath("networks", n.name, ForkdnsServersListPath), 0755)
if err != nil {
return err
}
}
// Create forkdns servers.conf file if doesn't exist.
f, err := os.OpenFile(shared.VarPath("networks", n.name, ForkdnsServersListPath+"/"+ForkdnsServersListFile), os.O_RDONLY|os.O_CREATE, 0666)
if err != nil {
return err
}
f.Close()
err = n.spawnForkDNS(dnsClusteredAddress)
if err != nil {
return err
}
}
} else {
// Clean up old dnsmasq config if exists and we are not starting dnsmasq.
leasesPath := shared.VarPath("networks", n.name, "dnsmasq.leases")
if shared.PathExists(leasesPath) {
err := os.Remove(leasesPath)
if err != nil {
return errors.Wrapf(err, "Failed to remove old dnsmasq leases file '%s'", leasesPath)
}
}
// And same for our PID file.
pidPath := shared.VarPath("networks", n.name, "dnsmasq.pid")
if shared.PathExists(pidPath) {
err := os.Remove(pidPath)
if err != nil {
return errors.Wrapf(err, "Failed to remove old dnsmasq pid file '%s'", pidPath)
}
}
}
return nil
}
// Stop stops the network.
func (n *bridge) Stop() error {
n.logger.Debug("Stop")
if !n.isRunning() {
return nil
}
// Destroy the bridge interface
if n.config["bridge.driver"] == "openvswitch" {
ovs := openvswitch.NewOVS()
err := ovs.BridgeDelete(n.name)
if err != nil {
return err
}
} else {
_, err := shared.RunCommand("ip", "link", "del", "dev", n.name)
if err != nil {
return err
}
}
// Cleanup firewall rules.
if usesIPv4Firewall(n.config) {
err := n.state.Firewall.NetworkClear(n.name, 4)
if err != nil {
return err
}
}
if usesIPv6Firewall(n.config) {
err := n.state.Firewall.NetworkClear(n.name, 6)
if err != nil {
return err
}
}
// Kill any existing dnsmasq and forkdns daemon for this network
err := dnsmasq.Kill(n.name, false)
if err != nil {
return err
}
err = n.killForkDNS()
if err != nil {
return err
}
// Get a list of interfaces
ifaces, err := net.Interfaces()
if err != nil {
return err
}
// Cleanup any existing tunnel device
for _, iface := range ifaces {
if strings.HasPrefix(iface.Name, fmt.Sprintf("%s-", n.name)) {
_, err = shared.RunCommand("ip", "link", "del", "dev", iface.Name)
if err != nil {
return err
}
}
}
// Unload apparmor profiles.
err = apparmor.NetworkUnload(n.state, n)
if err != nil {
return err
}
return nil
}
// Update updates the network. Accepts notification boolean indicating if this update request is coming from a
// cluster notification, in which case do not update the database, just apply local changes needed.
func (n *bridge) Update(newNetwork api.NetworkPut, targetNode string, clientType cluster.ClientType) error {
n.logger.Debug("Update", log.Ctx{"clientType": clientType, "newNetwork": newNetwork})
// Populate default values if they are missing.
err := n.fillConfig(newNetwork.Config)
if err != nil {
return err
}
dbUpdateNeeeded, changedKeys, oldNetwork, err := n.common.configChanged(newNetwork)
if err != nil {
return err
}
if !dbUpdateNeeeded {
return nil // Nothing changed.
}
revert := revert.New()
defer revert.Fail()
// Define a function which reverts everything.
revert.Add(func() {
// Reset changes to all nodes and database.
n.common.update(oldNetwork, targetNode, clientType)
// Reset any change that was made to local bridge.
n.setup(newNetwork.Config)
})
// Bring the bridge down entirely if the driver has changed.
if shared.StringInSlice("bridge.driver", changedKeys) && n.isRunning() {
err = n.Stop()
if err != nil {
return err
}
}
// Detach any external interfaces should no longer be attached.
if shared.StringInSlice("bridge.external_interfaces", changedKeys) && n.isRunning() {
devices := []string{}
for _, dev := range strings.Split(newNetwork.Config["bridge.external_interfaces"], ",") {
dev = strings.TrimSpace(dev)
devices = append(devices, dev)
}
for _, dev := range strings.Split(oldNetwork.Config["bridge.external_interfaces"], ",") {
dev = strings.TrimSpace(dev)
if dev == "" {
continue
}
if !shared.StringInSlice(dev, devices) && shared.PathExists(fmt.Sprintf("/sys/class/net/%s", dev)) {
err = DetachInterface(n.name, dev)
if err != nil {
return err
}
}
}
}
// Apply changes to database.
err = n.common.update(newNetwork, targetNode, clientType)
if err != nil {
return err
}
// Restart the network if needed.
if len(changedKeys) > 0 {
err = n.setup(oldNetwork.Config)
if err != nil {
return err
}
}
revert.Success()
return nil
}
func (n *bridge) spawnForkDNS(listenAddress string) error {
// Setup the dnsmasq domain
dnsDomain := n.config["dns.domain"]
if dnsDomain == "" {
dnsDomain = "lxd"
}
// Spawn the daemon using subprocess
command := n.state.OS.ExecPath
forkdnsargs := []string{"forkdns",
fmt.Sprintf("%s:1053", listenAddress),
dnsDomain,
n.name}
logPath := shared.LogPath(fmt.Sprintf("forkdns.%s.log", n.name))
p, err := subprocess.NewProcess(command, forkdnsargs, logPath, logPath)
if err != nil {
return fmt.Errorf("Failed to create subprocess: %s", err)
}
// Drop privileges.
p.SetCreds(n.state.OS.UnprivUID, n.state.OS.UnprivGID)
// Apply AppArmor profile.
p.SetApparmor(apparmor.ForkdnsProfileName(n))
err = p.Start()
if err != nil {
return fmt.Errorf("Failed to run: %s %s: %v", command, strings.Join(forkdnsargs, " "), err)
}
err = p.Save(shared.VarPath("networks", n.name, "forkdns.pid"))
if err != nil {
// Kill Process if started, but could not save the file
err2 := p.Stop()
if err != nil {
return fmt.Errorf("Could not kill subprocess while handling saving error: %s: %s", err, err2)
}
return fmt.Errorf("Failed to save subprocess details: %s", err)
}
return nil
}
// HandleHeartbeat refreshes forkdns servers. Retrieves the IPv4 address of each cluster node (excluding ourselves)
// for this network. It then updates the forkdns server list file if there are changes.
func (n *bridge) HandleHeartbeat(heartbeatData *cluster.APIHeartbeat) error {
addresses := []string{}
localAddress, err := node.HTTPSAddress(n.state.Node)
if err != nil {
return err
}
n.logger.Info("Refreshing forkdns peers")
cert := n.state.Endpoints.NetworkCert()
for _, node := range heartbeatData.Members {
if node.Address == localAddress {
// No need to query ourselves.
continue
}
client, err := cluster.Connect(node.Address, cert, true)
if err != nil {
return err
}
state, err := client.GetNetworkState(n.name)
if err != nil {
return err
}
for _, addr := range state.Addresses {
// Only get IPv4 addresses of nodes on network.
if addr.Family != "inet" || addr.Scope != "global" {
continue
}
addresses = append(addresses, addr.Address)
break
}
}
// Compare current stored list to retrieved list and see if we need to update.
curList, err := ForkdnsServersList(n.name)
if err != nil {
// Only warn here, but continue on to regenerate the servers list from cluster info.
n.logger.Warn("Failed to load existing forkdns server list", log.Ctx{"err": err})
}
// If current list is same as cluster list, nothing to do.
if err == nil && reflect.DeepEqual(curList, addresses) {
return nil
}
err = n.updateForkdnsServersFile(addresses)
if err != nil {
return err
}
n.logger.Info("Updated forkdns server list", log.Ctx{"nodes": addresses})
return nil
}
func (n *bridge) getTunnels() []string {
tunnels := []string{}
for k := range n.config {
if !strings.HasPrefix(k, "tunnel.") {
continue
}
fields := strings.Split(k, ".")
if !shared.StringInSlice(fields[1], tunnels) {
tunnels = append(tunnels, fields[1])
}
}
return tunnels
}
// bootRoutesV4 returns a list of IPv4 boot routes on the network's device.
func (n *bridge) bootRoutesV4() ([]string, error) {
routes := []string{}
cmd := exec.Command("ip", "-4", "route", "show", "dev", n.name, "proto", "boot")
ipOut, err := cmd.StdoutPipe()
if err != nil {
return routes, err
}
cmd.Start()
scanner := bufio.NewScanner(ipOut)
for scanner.Scan() {
route := strings.Replace(scanner.Text(), "linkdown", "", -1)
routes = append(routes, route)
}
cmd.Wait()
return routes, nil
}
// bootRoutesV6 returns a list of IPv6 boot routes on the network's device.
func (n *bridge) bootRoutesV6() ([]string, error) {
routes := []string{}
cmd := exec.Command("ip", "-6", "route", "show", "dev", n.name, "proto", "boot")
ipOut, err := cmd.StdoutPipe()
if err != nil {
return routes, err
}
cmd.Start()
scanner := bufio.NewScanner(ipOut)
for scanner.Scan() {
route := strings.Replace(scanner.Text(), "linkdown", "", -1)
routes = append(routes, route)
}
cmd.Wait()
return routes, nil
}
// applyBootRoutesV4 applies a list of IPv4 boot routes to the network's device.
func (n *bridge) applyBootRoutesV4(routes []string) {
for _, route := range routes {
cmd := []string{"-4", "route", "replace", "dev", n.name, "proto", "boot"}
cmd = append(cmd, strings.Fields(route)...)
_, err := shared.RunCommand("ip", cmd...)
if err != nil {
// If it fails, then we can't stop as the route has already gone, so just log and continue.
n.logger.Error("Failed to restore route", log.Ctx{"err": err})
}
}
}
// applyBootRoutesV6 applies a list of IPv6 boot routes to the network's device.
func (n *bridge) applyBootRoutesV6(routes []string) {
for _, route := range routes {
cmd := []string{"-6", "route", "replace", "dev", n.name, "proto", "boot"}
cmd = append(cmd, strings.Fields(route)...)
_, err := shared.RunCommand("ip", cmd...)
if err != nil {
// If it fails, then we can't stop as the route has already gone, so just log and continue.
n.logger.Error("Failed to restore route", log.Ctx{"err": err})
}
}
}
func (n *bridge) fanAddress(underlay *net.IPNet, overlay *net.IPNet) (string, string, string, error) {
// Sanity checks
underlaySize, _ := underlay.Mask.Size()
if underlaySize != 16 && underlaySize != 24 {
return "", "", "", fmt.Errorf("Only /16 or /24 underlays are supported at this time")
}
overlaySize, _ := overlay.Mask.Size()
if overlaySize != 8 && overlaySize != 16 {
return "", "", "", fmt.Errorf("Only /8 or /16 overlays are supported at this time")
}
if overlaySize+(32-underlaySize)+8 > 32 {
return "", "", "", fmt.Errorf("Underlay or overlay networks too large to accommodate the FAN")
}
// Get the IP
ip, dev, err := n.addressForSubnet(underlay)
if err != nil {
return "", "", "", err
}
ipStr := ip.String()
// Force into IPv4 format
ipBytes := ip.To4()
if ipBytes == nil {
return "", "", "", fmt.Errorf("Invalid IPv4: %s", ip)
}
// Compute the IP
ipBytes[0] = overlay.IP[0]
if overlaySize == 16 {
ipBytes[1] = overlay.IP[1]
ipBytes[2] = ipBytes[3]
} else if underlaySize == 24 {
ipBytes[1] = ipBytes[3]
ipBytes[2] = 0
} else if underlaySize == 16 {
ipBytes[1] = ipBytes[2]
ipBytes[2] = ipBytes[3]
}
ipBytes[3] = 1
return fmt.Sprintf("%s/%d", ipBytes.String(), overlaySize), dev, ipStr, err
}
func (n *bridge) addressForSubnet(subnet *net.IPNet) (net.IP, string, error) {
ifaces, err := net.Interfaces()
if err != nil {
return net.IP{}, "", err
}
for _, iface := range ifaces {
addrs, err := iface.Addrs()
if err != nil {
continue
}
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if err != nil {
continue
}
if subnet.Contains(ip) {
return ip, iface.Name, nil
}
}
}
return net.IP{}, "", fmt.Errorf("No address found in subnet")
}
func (n *bridge) killForkDNS() error {
// Check if we have a running forkdns at all
pidPath := shared.VarPath("networks", n.name, "forkdns.pid")
// If the pid file doesn't exist, there is no process to kill.
if !shared.PathExists(pidPath) {
return nil
}
p, err := subprocess.ImportProcess(pidPath)
if err != nil {
return fmt.Errorf("Could not read pid file: %s", err)
}
err = p.Stop()
if err != nil && err != subprocess.ErrNotRunning {
return fmt.Errorf("Unable to kill dnsmasq: %s", err)
}
return nil
}
// updateForkdnsServersFile takes a list of node addresses and writes them atomically to
// the forkdns.servers file ready for forkdns to notice and re-apply its config.
func (n *bridge) updateForkdnsServersFile(addresses []string) error {
// We don't want to race with ourselves here
forkdnsServersLock.Lock()
defer forkdnsServersLock.Unlock()
permName := shared.VarPath("networks", n.name, ForkdnsServersListPath+"/"+ForkdnsServersListFile)
tmpName := permName + ".tmp"
// Open tmp file and truncate
tmpFile, err := os.Create(tmpName)
if err != nil {
return err
}
defer tmpFile.Close()
for _, address := range addresses {
_, err := tmpFile.WriteString(address + "\n")
if err != nil {
return err
}
}
tmpFile.Close()
// Atomically rename finished file into permanent location so forkdns can pick it up.
err = os.Rename(tmpName, permName)
if err != nil {
return err
}
return nil
}
// hasIPv4Firewall indicates whether the network has IPv4 firewall enabled.
func (n *bridge) hasIPv4Firewall() bool {
if n.config["ipv4.firewall"] == "" || shared.IsTrue(n.config["ipv4.firewall"]) {
return true
}
return false
}
// hasIPv6Firewall indicates whether the network has IPv6 firewall enabled.
func (n *bridge) hasIPv6Firewall() bool {
if n.config["ipv6.firewall"] == "" || shared.IsTrue(n.config["ipv6.firewall"]) {
return true
}
return false
}
// DHCPv4Subnet returns the DHCPv4 subnet (if DHCP is enabled on network).
func (n *bridge) DHCPv4Subnet() *net.IPNet {
// DHCP is disabled on this network (an empty ipv4.dhcp setting indicates enabled by default).
if n.config["ipv4.dhcp"] != "" && !shared.IsTrue(n.config["ipv4.dhcp"]) {
return nil
}
_, subnet, err := net.ParseCIDR(n.config["ipv4.address"])
if err != nil {
return nil
}
return subnet
}
// DHCPv6Subnet returns the DHCPv6 subnet (if DHCP or SLAAC is enabled on network).
func (n *bridge) DHCPv6Subnet() *net.IPNet {
// DHCP is disabled on this network (an empty ipv6.dhcp setting indicates enabled by default).
if n.config["ipv6.dhcp"] != "" && !shared.IsTrue(n.config["ipv6.dhcp"]) {
return nil
}
_, subnet, err := net.ParseCIDR(n.config["ipv6.address"])
if err != nil {
return nil
}
return subnet
}
|
package drivers
import (
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"sort"
"strings"
"time"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
"github.com/lxc/lxd/lxd/operations"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/idmap"
)
// MinBlockBoundary minimum block boundary size to use.
const MinBlockBoundary = 8192
// wipeDirectory empties the contents of a directory, but leaves it in place.
func wipeDirectory(path string) error {
// List all entries.
entries, err := ioutil.ReadDir(path)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return errors.Wrapf(err, "Failed to list directory '%s'", path)
}
// Individually wipe all entries.
for _, entry := range entries {
entryPath := filepath.Join(path, entry.Name())
err := os.RemoveAll(entryPath)
if err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "Failed to remove '%s'", entryPath)
}
}
return nil
}
// forceRemoveAll wipes a path including any immutable/non-append files.
func forceRemoveAll(path string) error {
err := os.RemoveAll(path)
if err != nil {
shared.RunCommand("chattr", "-ai", "-R", path)
err = os.RemoveAll(path)
if err != nil {
return err
}
}
return nil
}
// forceUnmount unmounts stacked mounts until no mountpoint remains.
func forceUnmount(path string) (bool, error) {
unmounted := false
for {
// Check if already unmounted.
if !shared.IsMountPoint(path) {
return unmounted, nil
}
// Try a clean unmount first.
err := TryUnmount(path, 0)
if err != nil {
// Fallback to lazy unmounting.
err = unix.Unmount(path, unix.MNT_DETACH)
if err != nil {
return false, errors.Wrapf(err, "Failed to unmount '%s'", path)
}
}
unmounted = true
}
}
// mountReadOnly performs a read-only bind-mount.
func mountReadOnly(srcPath string, dstPath string) (bool, error) {
// Check if already mounted.
if shared.IsMountPoint(dstPath) {
return false, nil
}
// Create a mount entry.
err := TryMount(srcPath, dstPath, "none", unix.MS_BIND, "")
if err != nil {
return false, err
}
// Make it read-only.
err = TryMount("", dstPath, "none", unix.MS_BIND|unix.MS_RDONLY|unix.MS_REMOUNT, "")
if err != nil {
forceUnmount(dstPath)
return false, err
}
return true, nil
}
// sameMount checks if two paths are on the same mountpoint.
func sameMount(srcPath string, dstPath string) bool {
// Get the source vfs path information
var srcFsStat unix.Statfs_t
err := unix.Statfs(srcPath, &srcFsStat)
if err != nil {
return false
}
// Get the destination vfs path information
var dstFsStat unix.Statfs_t
err = unix.Statfs(dstPath, &dstFsStat)
if err != nil {
return false
}
// Compare statfs
if srcFsStat.Type != dstFsStat.Type || srcFsStat.Fsid != dstFsStat.Fsid {
return false
}
// Get the source path information
var srcStat unix.Stat_t
err = unix.Stat(srcPath, &srcStat)
if err != nil {
return false
}
// Get the destination path information
var dstStat unix.Stat_t
err = unix.Stat(dstPath, &dstStat)
if err != nil {
return false
}
// Compare inode
if srcStat.Ino != dstStat.Ino {
return false
}
return true
}
// TryMount tries mounting a filesystem multiple times. This is useful for unreliable backends.
func TryMount(src string, dst string, fs string, flags uintptr, options string) error {
var err error
// Attempt 20 mounts over 10s
for i := 0; i < 20; i++ {
err = unix.Mount(src, dst, fs, flags, options)
if err == nil {
break
}
time.Sleep(500 * time.Millisecond)
}
if err != nil {
return errors.Wrapf(err, "Failed to mount %q on %q using %q", src, dst, fs)
}
return nil
}
// TryUnmount tries unmounting a filesystem multiple times. This is useful for unreliable backends.
func TryUnmount(path string, flags int) error {
var err error
for i := 0; i < 20; i++ {
err = unix.Unmount(path, flags)
if err == nil {
break
}
time.Sleep(500 * time.Millisecond)
}
if err != nil {
return errors.Wrapf(err, "Failed to unmount '%s'", path)
}
return nil
}
// tryExists waits up to 10s for a file to exist.
func tryExists(path string) bool {
// Attempt 20 checks over 10s
for i := 0; i < 20; i++ {
if shared.PathExists(path) {
return true
}
time.Sleep(500 * time.Millisecond)
}
return false
}
// fsUUID returns the filesystem UUID for the given block path.
func fsUUID(path string) (string, error) {
return shared.RunCommand("blkid", "-s", "UUID", "-o", "value", path)
}
// hasFilesystem checks if a given path is backed by a specified filesystem.
func hasFilesystem(path string, fsType int64) bool {
fs := unix.Statfs_t{}
err := unix.Statfs(path, &fs)
if err != nil {
return false
}
if int64(fs.Type) != fsType {
return false
}
return true
}
// GetPoolMountPath returns the mountpoint of the given pool.
// {LXD_DIR}/storage-pools/<pool>
func GetPoolMountPath(poolName string) string {
return shared.VarPath("storage-pools", poolName)
}
// GetVolumeMountPath returns the mount path for a specific volume based on its pool and type and
// whether it is a snapshot or not. For VolumeTypeImage the volName is the image fingerprint.
func GetVolumeMountPath(poolName string, volType VolumeType, volName string) string {
if shared.IsSnapshot(volName) {
return shared.VarPath("storage-pools", poolName, fmt.Sprintf("%s-snapshots", string(volType)), volName)
}
return shared.VarPath("storage-pools", poolName, string(volType), volName)
}
// GetVolumeSnapshotDir gets the snapshot mount directory for the parent volume.
func GetVolumeSnapshotDir(poolName string, volType VolumeType, volName string) string {
parent, _, _ := shared.InstanceGetParentAndSnapshotName(volName)
return shared.VarPath("storage-pools", poolName, fmt.Sprintf("%s-snapshots", string(volType)), parent)
}
// GetSnapshotVolumeName returns the full volume name for a parent volume and snapshot name.
func GetSnapshotVolumeName(parentName, snapshotName string) string {
return fmt.Sprintf("%s%s%s", parentName, shared.SnapshotDelimiter, snapshotName)
}
// createParentSnapshotDirIfMissing creates the parent directory for volume snapshots
func createParentSnapshotDirIfMissing(poolName string, volType VolumeType, volName string) error {
snapshotsPath := GetVolumeSnapshotDir(poolName, volType, volName)
// If it's missing, create it.
if !shared.PathExists(snapshotsPath) {
err := os.Mkdir(snapshotsPath, 0700)
if err != nil {
return errors.Wrapf(err, "Failed to create directory '%s'", snapshotsPath)
}
return nil
}
return nil
}
// deleteParentSnapshotDirIfEmpty removes the parent snapshot directory if it is empty.
// It accepts the pool name, volume type and parent volume name.
func deleteParentSnapshotDirIfEmpty(poolName string, volType VolumeType, volName string) error {
snapshotsPath := GetVolumeSnapshotDir(poolName, volType, volName)
// If it exists, try to delete it.
if shared.PathExists(snapshotsPath) {
isEmpty, err := shared.PathIsEmpty(snapshotsPath)
if err != nil {
return err
}
if isEmpty {
err := os.Remove(snapshotsPath)
if err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "Failed to remove '%s'", snapshotsPath)
}
}
}
return nil
}
// ensureSparseFile creates a sparse empty file at specified location with specified size.
// If the path already exists, the file is truncated to the requested size.
func ensureSparseFile(filePath string, sizeBytes int64) error {
f, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return errors.Wrapf(err, "Failed to open %s", filePath)
}
defer f.Close()
err = f.Truncate(sizeBytes)
if err != nil {
return errors.Wrapf(err, "Failed to create sparse file %s", filePath)
}
return nil
}
// roundVolumeBlockFileSizeBytes parses the supplied size string and then rounds it to the nearest multiple of
// MinBlockBoundary bytes that is equal to or larger than sizeBytes.
func roundVolumeBlockFileSizeBytes(sizeBytes int64) int64 {
// Qemu requires image files to be in traditional storage block boundaries.
// We use 8k here to ensure our images are compatible with all of our backend drivers.
if sizeBytes < MinBlockBoundary {
sizeBytes = MinBlockBoundary
}
roundedSizeBytes := int64(sizeBytes/MinBlockBoundary) * MinBlockBoundary
// Ensure the rounded size is at least the size specified in sizeBytes.
if roundedSizeBytes < sizeBytes {
roundedSizeBytes += MinBlockBoundary
}
// Round the size to closest MinBlockBoundary bytes to avoid qemu boundary issues.
return roundedSizeBytes
}
// ensureVolumeBlockFile creates new block file or enlarges the raw block file for a volume to the specified size.
// Returns true if resize took place, false if not. Requested size is rounded to nearest block size using
// roundVolumeBlockFileSizeBytes() before decision whether to resize is taken.
func ensureVolumeBlockFile(vol Volume, path string, sizeBytes int64) (bool, error) {
if sizeBytes <= 0 {
return false, fmt.Errorf("Size cannot be zero")
}
// Get rounded block size to avoid qemu boundary issues.
sizeBytes = roundVolumeBlockFileSizeBytes(sizeBytes)
if shared.PathExists(path) {
fi, err := os.Stat(path)
if err != nil {
return false, err
}
oldSizeBytes := fi.Size()
if sizeBytes == oldSizeBytes {
return false, nil
}
// Block image volumes cannot be resized because they can have a readonly snapshot that doesn't get
// updated when the volume's size is changed, and this is what instances are created from.
// During initial volume fill allowUnsafeResize is enabled because snapshot hasn't been taken yet.
if !vol.allowUnsafeResize && vol.volType == VolumeTypeImage {
return false, ErrNotSupported
}
// Only perform pre-resize sanity checks if we are not in "unsafe" mode.
// In unsafe mode we expect the caller to know what they are doing and understand the risks.
if !vol.allowUnsafeResize {
if sizeBytes < oldSizeBytes {
return false, errors.Wrap(ErrCannotBeShrunk, "Block volumes cannot be shrunk")
}
if vol.MountInUse() {
return false, ErrInUse // We don't allow online resizing of block volumes.
}
}
err = ensureSparseFile(path, sizeBytes)
if err != nil {
return false, errors.Wrapf(err, "Failed resizing disk image %q to size %d", path, sizeBytes)
}
return true, nil
}
// If path doesn't exist, then there has been no filler function supplied to create it from another source.
// So instead create an empty volume (use for PXE booting a VM).
err := ensureSparseFile(path, sizeBytes)
if err != nil {
return false, errors.Wrapf(err, "Failed creating disk image %q as size %d", path, sizeBytes)
}
return false, nil
}
// mkfsOptions represents options for filesystem creation.
type mkfsOptions struct {
Label string
}
// makeFSType creates the provided filesystem.
func makeFSType(path string, fsType string, options *mkfsOptions) (string, error) {
var err error
var msg string
fsOptions := options
if fsOptions == nil {
fsOptions = &mkfsOptions{}
}
cmd := []string{fmt.Sprintf("mkfs.%s", fsType)}
if fsOptions.Label != "" {
cmd = append(cmd, "-L", fsOptions.Label)
}
if fsType == "ext4" {
cmd = append(cmd, "-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0")
}
// Always add the path to the device as the last argument for wider compatibility with versions of mkfs.
cmd = append(cmd, path)
msg, err = shared.TryRunCommand(cmd[0], cmd[1:]...)
if err != nil {
return msg, err
}
return "", nil
}
// mountOption represents an individual mount option.
type mountOption struct {
capture bool
flag uintptr
}
// mountOptions represents a list of possible mount options.
var mountOptions = map[string]mountOption{
"async": {false, unix.MS_SYNCHRONOUS},
"atime": {false, unix.MS_NOATIME},
"bind": {true, unix.MS_BIND},
"defaults": {true, 0},
"dev": {false, unix.MS_NODEV},
"diratime": {false, unix.MS_NODIRATIME},
"dirsync": {true, unix.MS_DIRSYNC},
"exec": {false, unix.MS_NOEXEC},
"lazytime": {true, unix.MS_LAZYTIME},
"mand": {true, unix.MS_MANDLOCK},
"noatime": {true, unix.MS_NOATIME},
"nodev": {true, unix.MS_NODEV},
"nodiratime": {true, unix.MS_NODIRATIME},
"noexec": {true, unix.MS_NOEXEC},
"nomand": {false, unix.MS_MANDLOCK},
"norelatime": {false, unix.MS_RELATIME},
"nostrictatime": {false, unix.MS_STRICTATIME},
"nosuid": {true, unix.MS_NOSUID},
"rbind": {true, unix.MS_BIND | unix.MS_REC},
"relatime": {true, unix.MS_RELATIME},
"remount": {true, unix.MS_REMOUNT},
"ro": {true, unix.MS_RDONLY},
"rw": {false, unix.MS_RDONLY},
"strictatime": {true, unix.MS_STRICTATIME},
"suid": {false, unix.MS_NOSUID},
"sync": {true, unix.MS_SYNCHRONOUS},
}
// resolveMountOptions resolves the provided mount options.
func resolveMountOptions(options string) (uintptr, string) {
mountFlags := uintptr(0)
tmp := strings.SplitN(options, ",", -1)
for i := 0; i < len(tmp); i++ {
opt := tmp[i]
do, ok := mountOptions[opt]
if !ok {
continue
}
if do.capture {
mountFlags |= do.flag
} else {
mountFlags &= ^do.flag
}
copy(tmp[i:], tmp[i+1:])
tmp[len(tmp)-1] = ""
tmp = tmp[:len(tmp)-1]
i--
}
return mountFlags, strings.Join(tmp, ",")
}
// filesystemTypeCanBeShrunk indicates if filesystems of fsType can be shrunk.
func filesystemTypeCanBeShrunk(fsType string) bool {
if fsType == "" {
fsType = DefaultFilesystem
}
if shared.StringInSlice(fsType, []string{"ext4", "btrfs"}) {
return true
}
return false
}
// shrinkFileSystem shrinks a filesystem if it is supported.
// EXT4 volumes will be unmounted temporarily if needed.
// BTRFS volumes will be mounted temporarily if needed.
func shrinkFileSystem(fsType string, devPath string, vol Volume, byteSize int64) error {
if fsType == "" {
fsType = DefaultFilesystem
}
if !filesystemTypeCanBeShrunk(fsType) {
return ErrCannotBeShrunk
}
// The smallest unit that resize2fs accepts in byte size (rather than blocks) is kilobytes.
strSize := fmt.Sprintf("%dK", byteSize/1024)
switch fsType {
case "ext4":
return vol.UnmountTask(func(op *operations.Operation) error {
output, err := shared.RunCommand("e2fsck", "-f", "-y", devPath)
if err != nil {
exitCodeFSModified := false
runErr, ok := err.(shared.RunError)
if ok {
exitError, ok := runErr.Err.(*exec.ExitError)
if ok {
if exitError.ExitCode() == 1 {
exitCodeFSModified = true
}
}
}
// e2fsck can return non-zero exit code if it has modified the filesystem, but
// this isn't an error and we can proceed.
if !exitCodeFSModified {
// e2fsck provides some context to errors on stdout.
return errors.Wrapf(err, "%s", strings.TrimSpace(output))
}
}
_, err = shared.RunCommand("resize2fs", devPath, strSize)
if err != nil {
return err
}
return nil
}, true, nil)
case "btrfs":
return vol.MountTask(func(mountPath string, op *operations.Operation) error {
_, err := shared.RunCommand("btrfs", "filesystem", "resize", strSize, mountPath)
if err != nil {
return err
}
return nil
}, nil)
}
return fmt.Errorf("Unrecognised filesystem type %q", fsType)
}
// growFileSystem grows a filesystem if it is supported. The volume will be mounted temporarily if needed.
func growFileSystem(fsType string, devPath string, vol Volume) error {
if fsType == "" {
fsType = DefaultFilesystem
}
return vol.MountTask(func(mountPath string, op *operations.Operation) error {
var msg string
var err error
switch fsType {
case "ext4":
msg, err = shared.TryRunCommand("resize2fs", devPath)
case "xfs":
msg, err = shared.TryRunCommand("xfs_growfs", mountPath)
case "btrfs":
msg, err = shared.TryRunCommand("btrfs", "filesystem", "resize", "max", mountPath)
default:
return fmt.Errorf("Unrecognised filesystem type %q", fsType)
}
if err != nil {
return fmt.Errorf("Could not grow underlying %q filesystem for %q: %s", fsType, devPath, msg)
}
return nil
}, nil)
}
// renegerateFilesystemUUIDNeeded returns true if fsType requires UUID regeneration, false if not.
func renegerateFilesystemUUIDNeeded(fsType string) bool {
switch fsType {
case "btrfs":
return true
case "xfs":
return true
}
return false
}
// regenerateFilesystemUUID changes the filesystem UUID to a new randomly generated one if the fsType requires it.
// Otherwise this function does nothing.
func regenerateFilesystemUUID(fsType string, devPath string) error {
switch fsType {
case "btrfs":
return regenerateFilesystemBTRFSUUID(devPath)
case "xfs":
return regenerateFilesystemXFSUUID(devPath)
}
return fmt.Errorf("Filesystem not supported")
}
// regenerateFilesystemBTRFSUUID changes the BTRFS filesystem UUID to a new randomly generated one.
func regenerateFilesystemBTRFSUUID(devPath string) error {
// If the snapshot was taken whilst instance was running there may be outstanding transactions that will
// cause btrfstune to corrupt superblock, so ensure these are cleared out first.
_, err := shared.RunCommand("btrfs", "rescue", "zero-log", devPath)
if err != nil {
return err
}
_, err = shared.RunCommand("btrfstune", "-f", "-u", devPath)
if err != nil {
return err
}
return nil
}
// regenerateFilesystemXFSUUID changes the XFS filesystem UUID to a new randomly generated one.
func regenerateFilesystemXFSUUID(devPath string) error {
// Attempt to generate a new UUID.
msg, err := shared.RunCommand("xfs_admin", "-U", "generate", devPath)
if err != nil {
return err
}
if msg != "" {
// Exit 0 with a msg usually means some log entry getting in the way.
_, err = shared.RunCommand("xfs_repair", "-o", "force_geometry", "-L", devPath)
if err != nil {
return err
}
// Attempt to generate a new UUID again.
_, err = shared.RunCommand("xfs_admin", "-U", "generate", devPath)
if err != nil {
return err
}
}
return nil
}
// copyDevice copies one device path to another.
func copyDevice(inputPath, outputPath string) error {
from, err := os.Open(inputPath)
if err != nil {
return errors.Wrapf(err, "Error opening file for reading %q", inputPath)
}
defer from.Close()
to, err := os.OpenFile(outputPath, os.O_WRONLY, 0)
if err != nil {
return errors.Wrapf(err, "Error opening file for writing %q", outputPath)
}
defer to.Close()
_, err = io.Copy(to, from)
if err != nil {
return errors.Wrapf(err, "Error copying file %q to %q", inputPath, outputPath)
}
return nil
}
// loopFilePath returns the loop file path for a storage pool.
func loopFilePath(poolName string) string {
return filepath.Join(shared.VarPath("disks"), fmt.Sprintf("%s.img", poolName))
}
// ShiftBtrfsRootfs shifts the BTRFS root filesystem.
func ShiftBtrfsRootfs(path string, diskIdmap *idmap.IdmapSet) error {
return shiftBtrfsRootfs(path, diskIdmap, true)
}
// UnshiftBtrfsRootfs unshifts the BTRFS root filesystem.
func UnshiftBtrfsRootfs(path string, diskIdmap *idmap.IdmapSet) error {
return shiftBtrfsRootfs(path, diskIdmap, false)
}
// shiftBtrfsRootfs shiftfs a filesystem that main include read-only subvolumes.
func shiftBtrfsRootfs(path string, diskIdmap *idmap.IdmapSet, shift bool) error {
var err error
roSubvols := []string{}
subvols, _ := BTRFSSubVolumesGet(path)
sort.Sort(sort.StringSlice(subvols))
for _, subvol := range subvols {
subvol = filepath.Join(path, subvol)
if !BTRFSSubVolumeIsRo(subvol) {
continue
}
roSubvols = append(roSubvols, subvol)
BTRFSSubVolumeMakeRw(subvol)
}
if shift {
err = diskIdmap.ShiftRootfs(path, nil)
} else {
err = diskIdmap.UnshiftRootfs(path, nil)
}
for _, subvol := range roSubvols {
BTRFSSubVolumeMakeRo(subvol)
}
return err
}
// BTRFSSubVolumesGet gets subvolumes.
func BTRFSSubVolumesGet(path string) ([]string, error) {
result := []string{}
if !strings.HasSuffix(path, "/") {
path = path + "/"
}
// Unprivileged users can't get to fs internals
filepath.Walk(path, func(fpath string, fi os.FileInfo, err error) error {
// Skip walk errors
if err != nil {
return nil
}
// Ignore the base path
if strings.TrimRight(fpath, "/") == strings.TrimRight(path, "/") {
return nil
}
// Subvolumes can only be directories
if !fi.IsDir() {
return nil
}
// Check if a btrfs subvolume
if btrfsIsSubVolume(fpath) {
result = append(result, strings.TrimPrefix(fpath, path))
}
return nil
})
return result, nil
}
// btrfsIsSubvolume checks if a given path is a subvolume.
func btrfsIsSubVolume(subvolPath string) bool {
fs := unix.Stat_t{}
err := unix.Lstat(subvolPath, &fs)
if err != nil {
return false
}
// Check if BTRFS_FIRST_FREE_OBJECTID
if fs.Ino != 256 {
return false
}
return true
}
// BTRFSSubVolumeIsRo returns if subvolume is read only.
func BTRFSSubVolumeIsRo(path string) bool {
output, err := shared.RunCommand("btrfs", "property", "get", "-ts", path)
if err != nil {
return false
}
return strings.HasPrefix(string(output), "ro=true")
}
// BTRFSSubVolumeMakeRo makes a subvolume read only. Deprecated use btrfs.setSubvolumeReadonlyProperty().
func BTRFSSubVolumeMakeRo(path string) error {
_, err := shared.RunCommand("btrfs", "property", "set", "-ts", path, "ro", "true")
return err
}
// BTRFSSubVolumeMakeRw makes a sub volume read/write. Deprecated use btrfs.setSubvolumeReadonlyProperty().
func BTRFSSubVolumeMakeRw(path string) error {
_, err := shared.RunCommand("btrfs", "property", "set", "-ts", path, "ro", "false")
return err
}
// ShiftZFSSkipper indicates which files not to shift for ZFS.
func ShiftZFSSkipper(dir string, absPath string, fi os.FileInfo) bool {
strippedPath := absPath
if dir != "" {
strippedPath = absPath[len(dir):]
}
if fi.IsDir() && strippedPath == "/.zfs/snapshot" {
return true
}
return false
}
// BlockDiskSizeBytes returns the size of a block disk (path can be either block device or raw file).
func BlockDiskSizeBytes(blockDiskPath string) (int64, error) {
if shared.IsBlockdevPath(blockDiskPath) {
// Attempt to open the device path.
f, err := os.Open(blockDiskPath)
if err != nil {
return -1, err
}
defer f.Close()
fd := int(f.Fd())
// Retrieve the block size.
res, err := unix.IoctlGetInt(fd, unix.BLKGETSIZE64)
if err != nil {
return -1, err
}
return int64(res), nil
}
// Block device is assumed to be a raw file.
fi, err := os.Lstat(blockDiskPath)
if err != nil {
return -1, err
}
return fi.Size(), nil
}
// PathNameEncode encodes a path string to be used as part of a file name.
// The encoding scheme replaces "-" with "--" and then "/" with "-".
func PathNameEncode(text string) string {
return strings.Replace(strings.Replace(text, "-", "--", -1), "/", "-", -1)
}
// PathNameDecode decodes a string containing an encoded path back to its original form.
// The decoding scheme converts "-" back to "/" and "--" back to "-".
func PathNameDecode(text string) string {
// This converts "--" to the null character "\0" first, to allow remaining "-" chars to be
// converted back to "/" before making a final pass to convert "\0" back to original "-".
return strings.Replace(strings.Replace(strings.Replace(text, "--", "\000", -1), "-", "/", -1), "\000", "-", -1)
}
// OperationLockName returns the storage specific lock name to use with locking package.
func OperationLockName(operationName string, poolName string, volType VolumeType, contentType ContentType, volName string) string {
return fmt.Sprintf("%s/%s/%s/%s/%s", operationName, poolName, volType, contentType, volName)
}
utils: trim whitespace from block device UUID
When creating a storage (btrfs) pool backed by a block device as in this
example
lxc storage create pool-btrfs btrfs source=/dev/sdc
we create the storage pool based on the volatile device path /dev/sdc. Since
this is obviously a bad idea we have logic to replace this device path with the
reliable /dev/disk/by-uuid/<uuid> path based on the uuid we just generated when
creating the filesystem. So we call
blkid -s UUID -o value /dev/sdc
and use the returned uuid in a tryExists(/dev/disk/by-uuid/<uuid>) loop for 10
seconds. This consistently failed for me which means I ended up with the
unreliable /dev/sdc device path as the source property of the storage pool
which I couldn't change later. The reason this happens is that whitespace
is sneaking in making the path whose existence we test for useless. Fix this by
trimming whitespace.
Signed-off-by: Christian Brauner <48455ab3070520a2d174545c7239d6d0fabd9a83@ubuntu.com>
package drivers
import (
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"sort"
"strings"
"time"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
"github.com/lxc/lxd/lxd/operations"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/idmap"
)
// MinBlockBoundary minimum block boundary size to use.
const MinBlockBoundary = 8192
// wipeDirectory empties the contents of a directory, but leaves it in place.
func wipeDirectory(path string) error {
// List all entries.
entries, err := ioutil.ReadDir(path)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return errors.Wrapf(err, "Failed to list directory '%s'", path)
}
// Individually wipe all entries.
for _, entry := range entries {
entryPath := filepath.Join(path, entry.Name())
err := os.RemoveAll(entryPath)
if err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "Failed to remove '%s'", entryPath)
}
}
return nil
}
// forceRemoveAll wipes a path including any immutable/non-append files.
func forceRemoveAll(path string) error {
err := os.RemoveAll(path)
if err != nil {
shared.RunCommand("chattr", "-ai", "-R", path)
err = os.RemoveAll(path)
if err != nil {
return err
}
}
return nil
}
// forceUnmount unmounts stacked mounts until no mountpoint remains.
func forceUnmount(path string) (bool, error) {
unmounted := false
for {
// Check if already unmounted.
if !shared.IsMountPoint(path) {
return unmounted, nil
}
// Try a clean unmount first.
err := TryUnmount(path, 0)
if err != nil {
// Fallback to lazy unmounting.
err = unix.Unmount(path, unix.MNT_DETACH)
if err != nil {
return false, errors.Wrapf(err, "Failed to unmount '%s'", path)
}
}
unmounted = true
}
}
// mountReadOnly performs a read-only bind-mount.
func mountReadOnly(srcPath string, dstPath string) (bool, error) {
// Check if already mounted.
if shared.IsMountPoint(dstPath) {
return false, nil
}
// Create a mount entry.
err := TryMount(srcPath, dstPath, "none", unix.MS_BIND, "")
if err != nil {
return false, err
}
// Make it read-only.
err = TryMount("", dstPath, "none", unix.MS_BIND|unix.MS_RDONLY|unix.MS_REMOUNT, "")
if err != nil {
forceUnmount(dstPath)
return false, err
}
return true, nil
}
// sameMount checks if two paths are on the same mountpoint.
func sameMount(srcPath string, dstPath string) bool {
// Get the source vfs path information
var srcFsStat unix.Statfs_t
err := unix.Statfs(srcPath, &srcFsStat)
if err != nil {
return false
}
// Get the destination vfs path information
var dstFsStat unix.Statfs_t
err = unix.Statfs(dstPath, &dstFsStat)
if err != nil {
return false
}
// Compare statfs
if srcFsStat.Type != dstFsStat.Type || srcFsStat.Fsid != dstFsStat.Fsid {
return false
}
// Get the source path information
var srcStat unix.Stat_t
err = unix.Stat(srcPath, &srcStat)
if err != nil {
return false
}
// Get the destination path information
var dstStat unix.Stat_t
err = unix.Stat(dstPath, &dstStat)
if err != nil {
return false
}
// Compare inode
if srcStat.Ino != dstStat.Ino {
return false
}
return true
}
// TryMount tries mounting a filesystem multiple times. This is useful for unreliable backends.
func TryMount(src string, dst string, fs string, flags uintptr, options string) error {
var err error
// Attempt 20 mounts over 10s
for i := 0; i < 20; i++ {
err = unix.Mount(src, dst, fs, flags, options)
if err == nil {
break
}
time.Sleep(500 * time.Millisecond)
}
if err != nil {
return errors.Wrapf(err, "Failed to mount %q on %q using %q", src, dst, fs)
}
return nil
}
// TryUnmount tries unmounting a filesystem multiple times. This is useful for unreliable backends.
func TryUnmount(path string, flags int) error {
var err error
for i := 0; i < 20; i++ {
err = unix.Unmount(path, flags)
if err == nil {
break
}
time.Sleep(500 * time.Millisecond)
}
if err != nil {
return errors.Wrapf(err, "Failed to unmount '%s'", path)
}
return nil
}
// tryExists waits up to 10s for a file to exist.
func tryExists(path string) bool {
// Attempt 20 checks over 10s
for i := 0; i < 20; i++ {
if shared.PathExists(path) {
return true
}
time.Sleep(500 * time.Millisecond)
}
return false
}
// fsUUID returns the filesystem UUID for the given block path.
func fsUUID(path string) (string, error) {
val, err := shared.RunCommand("blkid", "-s", "UUID", "-o", "value", path)
if err != nil {
return "", err
}
val = strings.TrimSpace(val)
return val, nil
}
// hasFilesystem checks if a given path is backed by a specified filesystem.
func hasFilesystem(path string, fsType int64) bool {
fs := unix.Statfs_t{}
err := unix.Statfs(path, &fs)
if err != nil {
return false
}
if int64(fs.Type) != fsType {
return false
}
return true
}
// GetPoolMountPath returns the mountpoint of the given pool.
// {LXD_DIR}/storage-pools/<pool>
func GetPoolMountPath(poolName string) string {
return shared.VarPath("storage-pools", poolName)
}
// GetVolumeMountPath returns the mount path for a specific volume based on its pool and type and
// whether it is a snapshot or not. For VolumeTypeImage the volName is the image fingerprint.
func GetVolumeMountPath(poolName string, volType VolumeType, volName string) string {
if shared.IsSnapshot(volName) {
return shared.VarPath("storage-pools", poolName, fmt.Sprintf("%s-snapshots", string(volType)), volName)
}
return shared.VarPath("storage-pools", poolName, string(volType), volName)
}
// GetVolumeSnapshotDir gets the snapshot mount directory for the parent volume.
func GetVolumeSnapshotDir(poolName string, volType VolumeType, volName string) string {
parent, _, _ := shared.InstanceGetParentAndSnapshotName(volName)
return shared.VarPath("storage-pools", poolName, fmt.Sprintf("%s-snapshots", string(volType)), parent)
}
// GetSnapshotVolumeName returns the full volume name for a parent volume and snapshot name.
func GetSnapshotVolumeName(parentName, snapshotName string) string {
return fmt.Sprintf("%s%s%s", parentName, shared.SnapshotDelimiter, snapshotName)
}
// createParentSnapshotDirIfMissing creates the parent directory for volume snapshots
func createParentSnapshotDirIfMissing(poolName string, volType VolumeType, volName string) error {
snapshotsPath := GetVolumeSnapshotDir(poolName, volType, volName)
// If it's missing, create it.
if !shared.PathExists(snapshotsPath) {
err := os.Mkdir(snapshotsPath, 0700)
if err != nil {
return errors.Wrapf(err, "Failed to create directory '%s'", snapshotsPath)
}
return nil
}
return nil
}
// deleteParentSnapshotDirIfEmpty removes the parent snapshot directory if it is empty.
// It accepts the pool name, volume type and parent volume name.
func deleteParentSnapshotDirIfEmpty(poolName string, volType VolumeType, volName string) error {
snapshotsPath := GetVolumeSnapshotDir(poolName, volType, volName)
// If it exists, try to delete it.
if shared.PathExists(snapshotsPath) {
isEmpty, err := shared.PathIsEmpty(snapshotsPath)
if err != nil {
return err
}
if isEmpty {
err := os.Remove(snapshotsPath)
if err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "Failed to remove '%s'", snapshotsPath)
}
}
}
return nil
}
// ensureSparseFile creates a sparse empty file at specified location with specified size.
// If the path already exists, the file is truncated to the requested size.
func ensureSparseFile(filePath string, sizeBytes int64) error {
f, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return errors.Wrapf(err, "Failed to open %s", filePath)
}
defer f.Close()
err = f.Truncate(sizeBytes)
if err != nil {
return errors.Wrapf(err, "Failed to create sparse file %s", filePath)
}
return nil
}
// roundVolumeBlockFileSizeBytes parses the supplied size string and then rounds it to the nearest multiple of
// MinBlockBoundary bytes that is equal to or larger than sizeBytes.
func roundVolumeBlockFileSizeBytes(sizeBytes int64) int64 {
// Qemu requires image files to be in traditional storage block boundaries.
// We use 8k here to ensure our images are compatible with all of our backend drivers.
if sizeBytes < MinBlockBoundary {
sizeBytes = MinBlockBoundary
}
roundedSizeBytes := int64(sizeBytes/MinBlockBoundary) * MinBlockBoundary
// Ensure the rounded size is at least the size specified in sizeBytes.
if roundedSizeBytes < sizeBytes {
roundedSizeBytes += MinBlockBoundary
}
// Round the size to closest MinBlockBoundary bytes to avoid qemu boundary issues.
return roundedSizeBytes
}
// ensureVolumeBlockFile creates new block file or enlarges the raw block file for a volume to the specified size.
// Returns true if resize took place, false if not. Requested size is rounded to nearest block size using
// roundVolumeBlockFileSizeBytes() before decision whether to resize is taken.
func ensureVolumeBlockFile(vol Volume, path string, sizeBytes int64) (bool, error) {
if sizeBytes <= 0 {
return false, fmt.Errorf("Size cannot be zero")
}
// Get rounded block size to avoid qemu boundary issues.
sizeBytes = roundVolumeBlockFileSizeBytes(sizeBytes)
if shared.PathExists(path) {
fi, err := os.Stat(path)
if err != nil {
return false, err
}
oldSizeBytes := fi.Size()
if sizeBytes == oldSizeBytes {
return false, nil
}
// Block image volumes cannot be resized because they can have a readonly snapshot that doesn't get
// updated when the volume's size is changed, and this is what instances are created from.
// During initial volume fill allowUnsafeResize is enabled because snapshot hasn't been taken yet.
if !vol.allowUnsafeResize && vol.volType == VolumeTypeImage {
return false, ErrNotSupported
}
// Only perform pre-resize sanity checks if we are not in "unsafe" mode.
// In unsafe mode we expect the caller to know what they are doing and understand the risks.
if !vol.allowUnsafeResize {
if sizeBytes < oldSizeBytes {
return false, errors.Wrap(ErrCannotBeShrunk, "Block volumes cannot be shrunk")
}
if vol.MountInUse() {
return false, ErrInUse // We don't allow online resizing of block volumes.
}
}
err = ensureSparseFile(path, sizeBytes)
if err != nil {
return false, errors.Wrapf(err, "Failed resizing disk image %q to size %d", path, sizeBytes)
}
return true, nil
}
// If path doesn't exist, then there has been no filler function supplied to create it from another source.
// So instead create an empty volume (use for PXE booting a VM).
err := ensureSparseFile(path, sizeBytes)
if err != nil {
return false, errors.Wrapf(err, "Failed creating disk image %q as size %d", path, sizeBytes)
}
return false, nil
}
// mkfsOptions represents options for filesystem creation.
type mkfsOptions struct {
Label string
}
// makeFSType creates the provided filesystem.
func makeFSType(path string, fsType string, options *mkfsOptions) (string, error) {
var err error
var msg string
fsOptions := options
if fsOptions == nil {
fsOptions = &mkfsOptions{}
}
cmd := []string{fmt.Sprintf("mkfs.%s", fsType)}
if fsOptions.Label != "" {
cmd = append(cmd, "-L", fsOptions.Label)
}
if fsType == "ext4" {
cmd = append(cmd, "-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0")
}
// Always add the path to the device as the last argument for wider compatibility with versions of mkfs.
cmd = append(cmd, path)
msg, err = shared.TryRunCommand(cmd[0], cmd[1:]...)
if err != nil {
return msg, err
}
return "", nil
}
// mountOption represents an individual mount option.
type mountOption struct {
capture bool
flag uintptr
}
// mountOptions represents a list of possible mount options.
var mountOptions = map[string]mountOption{
"async": {false, unix.MS_SYNCHRONOUS},
"atime": {false, unix.MS_NOATIME},
"bind": {true, unix.MS_BIND},
"defaults": {true, 0},
"dev": {false, unix.MS_NODEV},
"diratime": {false, unix.MS_NODIRATIME},
"dirsync": {true, unix.MS_DIRSYNC},
"exec": {false, unix.MS_NOEXEC},
"lazytime": {true, unix.MS_LAZYTIME},
"mand": {true, unix.MS_MANDLOCK},
"noatime": {true, unix.MS_NOATIME},
"nodev": {true, unix.MS_NODEV},
"nodiratime": {true, unix.MS_NODIRATIME},
"noexec": {true, unix.MS_NOEXEC},
"nomand": {false, unix.MS_MANDLOCK},
"norelatime": {false, unix.MS_RELATIME},
"nostrictatime": {false, unix.MS_STRICTATIME},
"nosuid": {true, unix.MS_NOSUID},
"rbind": {true, unix.MS_BIND | unix.MS_REC},
"relatime": {true, unix.MS_RELATIME},
"remount": {true, unix.MS_REMOUNT},
"ro": {true, unix.MS_RDONLY},
"rw": {false, unix.MS_RDONLY},
"strictatime": {true, unix.MS_STRICTATIME},
"suid": {false, unix.MS_NOSUID},
"sync": {true, unix.MS_SYNCHRONOUS},
}
// resolveMountOptions resolves the provided mount options.
func resolveMountOptions(options string) (uintptr, string) {
mountFlags := uintptr(0)
tmp := strings.SplitN(options, ",", -1)
for i := 0; i < len(tmp); i++ {
opt := tmp[i]
do, ok := mountOptions[opt]
if !ok {
continue
}
if do.capture {
mountFlags |= do.flag
} else {
mountFlags &= ^do.flag
}
copy(tmp[i:], tmp[i+1:])
tmp[len(tmp)-1] = ""
tmp = tmp[:len(tmp)-1]
i--
}
return mountFlags, strings.Join(tmp, ",")
}
// filesystemTypeCanBeShrunk indicates if filesystems of fsType can be shrunk.
func filesystemTypeCanBeShrunk(fsType string) bool {
if fsType == "" {
fsType = DefaultFilesystem
}
if shared.StringInSlice(fsType, []string{"ext4", "btrfs"}) {
return true
}
return false
}
// shrinkFileSystem shrinks a filesystem if it is supported.
// EXT4 volumes will be unmounted temporarily if needed.
// BTRFS volumes will be mounted temporarily if needed.
func shrinkFileSystem(fsType string, devPath string, vol Volume, byteSize int64) error {
if fsType == "" {
fsType = DefaultFilesystem
}
if !filesystemTypeCanBeShrunk(fsType) {
return ErrCannotBeShrunk
}
// The smallest unit that resize2fs accepts in byte size (rather than blocks) is kilobytes.
strSize := fmt.Sprintf("%dK", byteSize/1024)
switch fsType {
case "ext4":
return vol.UnmountTask(func(op *operations.Operation) error {
output, err := shared.RunCommand("e2fsck", "-f", "-y", devPath)
if err != nil {
exitCodeFSModified := false
runErr, ok := err.(shared.RunError)
if ok {
exitError, ok := runErr.Err.(*exec.ExitError)
if ok {
if exitError.ExitCode() == 1 {
exitCodeFSModified = true
}
}
}
// e2fsck can return non-zero exit code if it has modified the filesystem, but
// this isn't an error and we can proceed.
if !exitCodeFSModified {
// e2fsck provides some context to errors on stdout.
return errors.Wrapf(err, "%s", strings.TrimSpace(output))
}
}
_, err = shared.RunCommand("resize2fs", devPath, strSize)
if err != nil {
return err
}
return nil
}, true, nil)
case "btrfs":
return vol.MountTask(func(mountPath string, op *operations.Operation) error {
_, err := shared.RunCommand("btrfs", "filesystem", "resize", strSize, mountPath)
if err != nil {
return err
}
return nil
}, nil)
}
return fmt.Errorf("Unrecognised filesystem type %q", fsType)
}
// growFileSystem grows a filesystem if it is supported. The volume will be mounted temporarily if needed.
func growFileSystem(fsType string, devPath string, vol Volume) error {
if fsType == "" {
fsType = DefaultFilesystem
}
return vol.MountTask(func(mountPath string, op *operations.Operation) error {
var msg string
var err error
switch fsType {
case "ext4":
msg, err = shared.TryRunCommand("resize2fs", devPath)
case "xfs":
msg, err = shared.TryRunCommand("xfs_growfs", mountPath)
case "btrfs":
msg, err = shared.TryRunCommand("btrfs", "filesystem", "resize", "max", mountPath)
default:
return fmt.Errorf("Unrecognised filesystem type %q", fsType)
}
if err != nil {
return fmt.Errorf("Could not grow underlying %q filesystem for %q: %s", fsType, devPath, msg)
}
return nil
}, nil)
}
// renegerateFilesystemUUIDNeeded returns true if fsType requires UUID regeneration, false if not.
func renegerateFilesystemUUIDNeeded(fsType string) bool {
switch fsType {
case "btrfs":
return true
case "xfs":
return true
}
return false
}
// regenerateFilesystemUUID changes the filesystem UUID to a new randomly generated one if the fsType requires it.
// Otherwise this function does nothing.
func regenerateFilesystemUUID(fsType string, devPath string) error {
switch fsType {
case "btrfs":
return regenerateFilesystemBTRFSUUID(devPath)
case "xfs":
return regenerateFilesystemXFSUUID(devPath)
}
return fmt.Errorf("Filesystem not supported")
}
// regenerateFilesystemBTRFSUUID changes the BTRFS filesystem UUID to a new randomly generated one.
func regenerateFilesystemBTRFSUUID(devPath string) error {
// If the snapshot was taken whilst instance was running there may be outstanding transactions that will
// cause btrfstune to corrupt superblock, so ensure these are cleared out first.
_, err := shared.RunCommand("btrfs", "rescue", "zero-log", devPath)
if err != nil {
return err
}
_, err = shared.RunCommand("btrfstune", "-f", "-u", devPath)
if err != nil {
return err
}
return nil
}
// regenerateFilesystemXFSUUID changes the XFS filesystem UUID to a new randomly generated one.
func regenerateFilesystemXFSUUID(devPath string) error {
// Attempt to generate a new UUID.
msg, err := shared.RunCommand("xfs_admin", "-U", "generate", devPath)
if err != nil {
return err
}
if msg != "" {
// Exit 0 with a msg usually means some log entry getting in the way.
_, err = shared.RunCommand("xfs_repair", "-o", "force_geometry", "-L", devPath)
if err != nil {
return err
}
// Attempt to generate a new UUID again.
_, err = shared.RunCommand("xfs_admin", "-U", "generate", devPath)
if err != nil {
return err
}
}
return nil
}
// copyDevice copies one device path to another.
func copyDevice(inputPath, outputPath string) error {
from, err := os.Open(inputPath)
if err != nil {
return errors.Wrapf(err, "Error opening file for reading %q", inputPath)
}
defer from.Close()
to, err := os.OpenFile(outputPath, os.O_WRONLY, 0)
if err != nil {
return errors.Wrapf(err, "Error opening file for writing %q", outputPath)
}
defer to.Close()
_, err = io.Copy(to, from)
if err != nil {
return errors.Wrapf(err, "Error copying file %q to %q", inputPath, outputPath)
}
return nil
}
// loopFilePath returns the loop file path for a storage pool.
func loopFilePath(poolName string) string {
return filepath.Join(shared.VarPath("disks"), fmt.Sprintf("%s.img", poolName))
}
// ShiftBtrfsRootfs shifts the BTRFS root filesystem.
func ShiftBtrfsRootfs(path string, diskIdmap *idmap.IdmapSet) error {
return shiftBtrfsRootfs(path, diskIdmap, true)
}
// UnshiftBtrfsRootfs unshifts the BTRFS root filesystem.
func UnshiftBtrfsRootfs(path string, diskIdmap *idmap.IdmapSet) error {
return shiftBtrfsRootfs(path, diskIdmap, false)
}
// shiftBtrfsRootfs shiftfs a filesystem that main include read-only subvolumes.
func shiftBtrfsRootfs(path string, diskIdmap *idmap.IdmapSet, shift bool) error {
var err error
roSubvols := []string{}
subvols, _ := BTRFSSubVolumesGet(path)
sort.Sort(sort.StringSlice(subvols))
for _, subvol := range subvols {
subvol = filepath.Join(path, subvol)
if !BTRFSSubVolumeIsRo(subvol) {
continue
}
roSubvols = append(roSubvols, subvol)
BTRFSSubVolumeMakeRw(subvol)
}
if shift {
err = diskIdmap.ShiftRootfs(path, nil)
} else {
err = diskIdmap.UnshiftRootfs(path, nil)
}
for _, subvol := range roSubvols {
BTRFSSubVolumeMakeRo(subvol)
}
return err
}
// BTRFSSubVolumesGet gets subvolumes.
func BTRFSSubVolumesGet(path string) ([]string, error) {
result := []string{}
if !strings.HasSuffix(path, "/") {
path = path + "/"
}
// Unprivileged users can't get to fs internals
filepath.Walk(path, func(fpath string, fi os.FileInfo, err error) error {
// Skip walk errors
if err != nil {
return nil
}
// Ignore the base path
if strings.TrimRight(fpath, "/") == strings.TrimRight(path, "/") {
return nil
}
// Subvolumes can only be directories
if !fi.IsDir() {
return nil
}
// Check if a btrfs subvolume
if btrfsIsSubVolume(fpath) {
result = append(result, strings.TrimPrefix(fpath, path))
}
return nil
})
return result, nil
}
// btrfsIsSubvolume checks if a given path is a subvolume.
func btrfsIsSubVolume(subvolPath string) bool {
fs := unix.Stat_t{}
err := unix.Lstat(subvolPath, &fs)
if err != nil {
return false
}
// Check if BTRFS_FIRST_FREE_OBJECTID
if fs.Ino != 256 {
return false
}
return true
}
// BTRFSSubVolumeIsRo returns if subvolume is read only.
func BTRFSSubVolumeIsRo(path string) bool {
output, err := shared.RunCommand("btrfs", "property", "get", "-ts", path)
if err != nil {
return false
}
return strings.HasPrefix(string(output), "ro=true")
}
// BTRFSSubVolumeMakeRo makes a subvolume read only. Deprecated use btrfs.setSubvolumeReadonlyProperty().
func BTRFSSubVolumeMakeRo(path string) error {
_, err := shared.RunCommand("btrfs", "property", "set", "-ts", path, "ro", "true")
return err
}
// BTRFSSubVolumeMakeRw makes a sub volume read/write. Deprecated use btrfs.setSubvolumeReadonlyProperty().
func BTRFSSubVolumeMakeRw(path string) error {
_, err := shared.RunCommand("btrfs", "property", "set", "-ts", path, "ro", "false")
return err
}
// ShiftZFSSkipper indicates which files not to shift for ZFS.
func ShiftZFSSkipper(dir string, absPath string, fi os.FileInfo) bool {
strippedPath := absPath
if dir != "" {
strippedPath = absPath[len(dir):]
}
if fi.IsDir() && strippedPath == "/.zfs/snapshot" {
return true
}
return false
}
// BlockDiskSizeBytes returns the size of a block disk (path can be either block device or raw file).
func BlockDiskSizeBytes(blockDiskPath string) (int64, error) {
if shared.IsBlockdevPath(blockDiskPath) {
// Attempt to open the device path.
f, err := os.Open(blockDiskPath)
if err != nil {
return -1, err
}
defer f.Close()
fd := int(f.Fd())
// Retrieve the block size.
res, err := unix.IoctlGetInt(fd, unix.BLKGETSIZE64)
if err != nil {
return -1, err
}
return int64(res), nil
}
// Block device is assumed to be a raw file.
fi, err := os.Lstat(blockDiskPath)
if err != nil {
return -1, err
}
return fi.Size(), nil
}
// PathNameEncode encodes a path string to be used as part of a file name.
// The encoding scheme replaces "-" with "--" and then "/" with "-".
func PathNameEncode(text string) string {
return strings.Replace(strings.Replace(text, "-", "--", -1), "/", "-", -1)
}
// PathNameDecode decodes a string containing an encoded path back to its original form.
// The decoding scheme converts "-" back to "/" and "--" back to "-".
func PathNameDecode(text string) string {
// This converts "--" to the null character "\0" first, to allow remaining "-" chars to be
// converted back to "/" before making a final pass to convert "\0" back to original "-".
return strings.Replace(strings.Replace(strings.Replace(text, "--", "\000", -1), "-", "/", -1), "\000", "-", -1)
}
// OperationLockName returns the storage specific lock name to use with locking package.
func OperationLockName(operationName string, poolName string, volType VolumeType, contentType ContentType, volName string) string {
return fmt.Sprintf("%s/%s/%s/%s/%s", operationName, poolName, volType, contentType, volName)
}
|
package render
import (
"io"
"os"
"path/filepath"
"github.com/vito/booklit"
)
type RenderingEngine interface {
booklit.Visitor
FileExtension() string
Render(io.Writer) error
}
type Writer struct {
Engine RenderingEngine
Destination string
}
func (writer Writer) WriteSection(section *booklit.Section) error {
if section.Parent != nil && !section.Parent.SplitSections {
return nil
}
name := section.PrimaryTag.Name + "." + writer.Engine.FileExtension()
path := filepath.Join(writer.Destination, name)
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return err
}
var node booklit.Content = section
if section.SplitSections {
for _, child := range section.Children {
err := writer.WriteSection(child)
if err != nil {
return err
}
}
}
err = node.Visit(writer.Engine)
if err != nil {
return err
}
return writer.Engine.Render(file)
}
remove dead code
see NB in c0cb12b8248a8ed5e232e836ebef8a4aafceebbd for context
package render
import (
"io"
"os"
"path/filepath"
"github.com/vito/booklit"
)
type RenderingEngine interface {
booklit.Visitor
FileExtension() string
Render(io.Writer) error
}
type Writer struct {
Engine RenderingEngine
Destination string
}
func (writer Writer) WriteSection(section *booklit.Section) error {
name := section.PrimaryTag.Name + "." + writer.Engine.FileExtension()
path := filepath.Join(writer.Destination, name)
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return err
}
var node booklit.Content = section
if section.SplitSections {
for _, child := range section.Children {
err := writer.WriteSection(child)
if err != nil {
return err
}
}
}
err = node.Visit(writer.Engine)
if err != nil {
return err
}
return writer.Engine.Render(file)
}
|
package utils
import (
"database/sql"
_ "github.com/mattn/go-sqlite3"
"fmt"
"log"
"objects"
"time"
)
func DbInit() error {
var err error = nil
var db *sql.DB = nil
var stmt *sql.Stmt = nil
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Println("init, Error opening db", DB_NAME, ", err=", err)
return err;
}
defer db.Close();
//init table users
stmt, err = db.Prepare(STMT_CREATE_TABLE_USERS)
if err != nil {
log.Println("init, Error preparing create table users stmt, err=", err)
return err;
}
_, err = stmt.Exec()
if err != nil {
log.Println("init, Error exec create table stmt, err=", err)
}
if stmt != nil {stmt.Close();}
//init table tokens
stmt, err = db.Prepare(STMT_CREATE_TABLE_TOKENS)
if err != nil {
log.Printf("Error preparing, %s, error=%s", STMT_CREATE_TABLE_TOKENS, err.Error())
}
_, err = stmt.Exec()
if err != nil {
log.Printf("Error creating table, %s, error=%s", STMT_CREATE_TABLE_TOKENS, err.Error())
}
if stmt != nil {stmt.Close()}
//init table apikeys
stmt, err = db.Prepare(STMT_CREATE_TABLE_APIKEYS)
if err != nil {
log.Printf("init, Error preparing, %s, err=%s", STMT_CREATE_TABLE_APIKEYS, err)
}
_, err = stmt.Exec()
if err != nil {
log.Printf("init, Error executing, %s, err=%s",STMT_CREATE_TABLE_APIKEYS, err)
}
if stmt != nil {stmt.Close();}
return nil
}
// Adds a user in the users table if it does not exist.
// ApiKey is also generated for the user. If apiKey creation fails for some reason,
// we would not fail the user creation. We would just have to check if a apiKey exists for that user when we are presenting it to the
// user and if it doesn't we should create it then.
func DbAddUser(aEmail string, aPassword string, aDb *sql.DB) (isUserExists bool, isUserAdded bool, errorUser error) {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var rows *sql.Rows = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return false, false, err
}
defer db.Close()
}
isUserExists = false
stmt, err = db.Prepare(fmt.Sprintf("select * from %s where %s=?", TABLE_users, TABLE_USERS_COLUMN_email))
if err != nil {
log.Printf("Error preparing, %s, error=%s", fmt.Sprintf("select * from %s where %s=?", TABLE_users, TABLE_USERS_COLUMN_email), err.Error())
return
}
rows, err = stmt.Query(aEmail)
if err != nil {
log.Printf("Error executing, %s, error=%s", fmt.Sprintf("select * from %s where %s=%s", TABLE_users, TABLE_USERS_COLUMN_email, aEmail), err.Error())
return
}
for rows.Next() {
var id int
var email string
var password string
var salt string
rows.Scan(&id, &email, &password, &salt)
if email == aEmail {
isUserExists = true
break;
}
}
if rows != nil {rows.Close()}
if stmt != nil {stmt.Close()}
if isUserExists == false {
salt := GenerateRandomString(SALT_LENGTH)
passwordHash, err := HashSha1(fmt.Sprintf("%s%s", aPassword, salt))
if err != nil {
log.Printf("Error hashing string=%s, error=%s", fmt.Sprintf("%s%s", aPassword, salt), err.Error())
return isUserExists, false, err
}
stmt, err = db.Prepare(STMT_INSERT_INTO_USERS)
if err != nil {
log.Printf("Error preparing %s, error=%s", STMT_INSERT_INTO_USERS, err.Error())
return
}
_, err = stmt.Exec(aEmail, passwordHash, salt)
if err != nil {
log.Printf("Error executing %s, error=%s", STMT_INSERT_INTO_USERS, err.Error())
return isUserExists, false, err
}
stmt.Close()
//create and insert apiKey
stmt, err = db.Prepare(fmt.Sprintf("select %s from %s where %s=?", TABLE_USERS_COLUMN_id, TABLE_users, TABLE_USERS_COLUMN_email))
if err != nil {
log.Printf("Error preparing, select id from users where email=?, error=%s", err.Error())
}
rows, err = stmt.Query(aEmail)
if err != nil {
log.Printf("Error query, select id from users where email=%s, error=%s", aEmail, err.Error())
}
var userId int = -1
if rows.Next() {
err = rows.Scan(&userId)
if err != nil {
log.Printf("Error scanning userId, error=%s", err.Error())
}
}
if rows != nil {rows.Close()}
if stmt != nil {stmt.Close()}
if userId < 0 {
return isUserExists, false, nil
}
//DbAddApiKey(userId, STR_EMPTY, db)
return isUserExists, true, nil
} else {
return isUserExists, false, nil
}
}
//Deletes a user and his apiKeys and the reports<apiKey> tables of that user
func DbDeleteUser(aUserId int, aDb *sql.DB) bool {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var rows *sql.Rows = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return false
}
defer db.Close()
}
//Delete user from table users
stmt, err = db.Prepare(fmt.Sprintf("delete from %s where %s=?", TABLE_users, TABLE_USERS_COLUMN_id))
if err != nil {
log.Printf("Error preparing, delete from users where id=?, error=%s", err.Error())
return false
}
_, err = stmt.Exec(aUserId)
if err != nil {
log.Printf("Error executing, %s, error=%s", fmt.Sprintf("delete from %s where %s=%d", TABLE_users, TABLE_USERS_COLUMN_id, aUserId), err.Error())
return false
}
stmt.Close()
stmt, err = db.Prepare(fmt.Sprintf("select %s from %s where %s=?", TABLE_APIKEYS_COLUMN_apikey, TABLE_apikeys, TABLE_APIKEYS_COLUMN_userid))
if err != nil {
log.Printf("Error preparing, %s, error=%s",
fmt.Sprintf("select %s from %s where %s=?", TABLE_APIKEYS_COLUMN_apikey, TABLE_apikeys, TABLE_APIKEYS_COLUMN_userid), err.Error())
return false
}
rows, err = stmt.Query(aUserId)
if err != nil {
log.Printf("Error quering, %s, error=%s",
fmt.Sprintf("select %s from %s where %s=%d", TABLE_APIKEYS_COLUMN_apikey, TABLE_apikeys, TABLE_APIKEYS_COLUMN_userid, aUserId), err.Error())
return false
}
//get all apiKeys of user
var sliceApiKeys []string = make([]string, 0, 16)
for rows.Next() {
var apiKey string
rows.Scan(&apiKey)
sliceApiKeys = append(sliceApiKeys, apiKey)
}
if rows != nil {rows.Close()}
if stmt != nil {stmt.Close()}
if len(sliceApiKeys) == 0 {
return true
}
//Delete apiKeys of user (delete from table apiKeys)
stmt, err = db.Prepare(fmt.Sprintf("delete from %s where %s=?", TABLE_apikeys, TABLE_APIKEYS_COLUMN_userid))
if err != nil {
log.Printf("Error preparing, delete from %s where userid=?, error=%s", TABLE_apikeys, err.Error())
return false
}
_, err = stmt.Exec(aUserId)
if err != nil {
log.Printf("Error executing, %s, error=%s", fmt.Sprintf("delete from %s where %s=%d", TABLE_apikeys, TABLE_APIKEYS_COLUMN_userid, aUserId), err.Error())
return false
}
stmt.Close()
//Drop reports<apiKey> tables for the user
for _, name := range sliceApiKeys {
stmt, err = db.Prepare(fmt.Sprintf("drop table if exists %s%s", TABLE_reports, name))
if err != nil {
log.Printf("Error preparing, drop table if exists ?, error=%s", err.Error())
}
_, err = stmt.Exec()
if err != nil {
log.Printf("Error executing, drop table if exists %s, error=%s", (TABLE_reports + name), err.Error())
}
}
if stmt != nil {stmt.Close()}
return true
}
func DbGetUser(aEmail string, aPassword string, aDb *sql.DB) (id int, err error){
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var row *sql.Row = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return -1, err
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("select * from %s where %s=?", TABLE_users, TABLE_USERS_COLUMN_email))
if err != nil {
log.Printf("Error preparing %s, error=%s", fmt.Sprintf("select * from %s where %s=?", TABLE_users, TABLE_USERS_COLUMN_email), err.Error())
}
row = stmt.QueryRow(aEmail)
var email string
var password string
var salt string
err = row.Scan(&id, &email, &password, &salt)
if err != nil && err == sql.ErrNoRows {
return -1, err
}
passwordHash, err := HashSha1(fmt.Sprintf("%s%s", aPassword, salt))
if err != nil {
log.Printf("Error hashing string=%s, error=%s", fmt.Sprintf("%s%s", aPassword, salt), err.Error())
}
if passwordHash != password {
return -1, err
}
return id, err
}
func DbGetUserLoad(aUserId int, aDb *sql.DB) (user *objects.User, err error) {
user = new(objects.User)
user.Id = aUserId
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var row *sql.Row = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return user, err
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("select * from %s where %s=?", TABLE_users, TABLE_USERS_COLUMN_id))
if stmt != nil {defer stmt.Close()}
if err != nil {
log.Printf("Error preparing %s, error=%s", fmt.Sprintf("select * from %s where %s=?", TABLE_users, TABLE_USERS_COLUMN_email), err.Error())
}
row = stmt.QueryRow(aUserId)
var id int
var email string
var password string
var salt string
err = row.Scan(&id, &email, &password, &salt)
if err != nil && err == sql.ErrNoRows {
return user, err
} else {
user.Id = id
user.Email = email
}
return user, err
}
func DbAddToken(aUserId int, aDb *sql.DB) (token string) {
DbCleanTokens(aUserId, aDb)
var err error = nil
var db *sql.DB = aDb
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
fmt.Println("AddToken, Error opening db, err=", err)
return
}
defer db.Close()
}
token, errUUID := GenerateUUID()
if errUUID != nil {
fmt.Println("Erro generating uuid, err=", errUUID)
}
issued := time.Now().UnixNano() / int64(time.Millisecond)
expires := TOKEN_VALIDITY_MS
_, err = db.Exec("insert or ignore into tokens(userid, token, issued, expires) values(?, ?, ?, ?)", aUserId, token, issued, expires)
if err != nil {
fmt.Println("AddToken, Error inserting into tokens, err=", err)
}
return token
}
func DbDeleteApiKey(aApiKey string, aDb *sql.DB) (isDeleted bool) {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("DbDeleteApiKey, Error opening db login.sqlite, err=%s", err.Error())
return
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("delete from %s where %s=?", TABLE_apikeys, TABLE_APIKEYS_COLUMN_apikey))
if err != nil {
log.Printf("Error preparing, delete from %s where %s=?, error=%s", TABLE_apikeys, TABLE_APIKEYS_COLUMN_apikey, err.Error())
return false
}
_, err = stmt.Exec(aApiKey)
if err != nil {
log.Printf("Error executing, %s, error=%s", fmt.Sprintf("delete from %s where %s=%d", TABLE_apikeys, TABLE_APIKEYS_COLUMN_apikey, aApiKey), err.Error())
return false
}
stmt.Close()
return true
}
func DbIsTokenValid(aToken string, aDb *sql.DB) (isValid bool, userId int) {
var err error = nil
var db *sql.DB = aDb
var rows *sql.Rows = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
fmt.Println("IsTokenValid, Error opening db login.sqlite, err=", err)
return false, -1
}
defer db.Close()
}
rows, err = db.Query("select * from tokens")
if err != nil {
fmt.Println("IsTokenValid, Error select from tokens, err=", err)
return false, -1;
}
defer rows.Close()
now := time.Now().UnixNano() / int64(time.Millisecond)
for (rows.Next()) {
var id int
var userId int
var token string
var issued int64
var expires int64
err = rows.Scan(&id, &userId, &token, &issued, &expires)
if err != nil {
fmt.Println("IsTokenValid, Error scan tokens, err=", err)
}
if token == aToken && now < (issued + expires) {
return true, userId;
}
}
return false, -1;
}
func DbIsApiKeyValid(aApiKey string, aDb *sql.DB) (isValid bool, userId int) {
var err error = nil
var db *sql.DB = aDb
var rows *sql.Rows = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("IsTokenValid, Error opening db login.sqlite, err=%s", err.Error())
return false, -1
}
defer db.Close()
}
rows, err = db.Query(fmt.Sprintf("select * from %s", TABLE_apikeys))
if err != nil {
log.Printf("IsTokenValid, Error select from %s, err=%s", TABLE_apikeys, err.Error())
return false, -1;
}
defer rows.Close()
for (rows.Next()) {
var id int
var userId int
var apiKey string
var appName string
err = rows.Scan(&id, &userId, &apiKey, &appName)
if err != nil {
log.Printf("IsApiKeyValid, Error scan %s, err=%s", TABLE_apikeys, err.Error())
}
if apiKey == aApiKey {
return true, userId;
}
}
return false, -1
}
func DbCleanTokens(aUserId int, aDb *sql.DB) {
var err error = nil
var db *sql.DB = aDb
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
fmt.Println("IsTokenValid, Error opening db login.sqlite, err=", err)
return
}
defer db.Close()
}
now := time.Now().UnixNano() / int64(time.Millisecond)
db.Exec("delete from tokens where userid=? AND issued + expires < ?", aUserId, now)
}
// Get all apiKeys of a user.
func DbGetApiKey(aUserId int, aDb *sql.DB) []*objects.ApiKey {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var rows *sql.Rows = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return nil
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("select * from %s where %s=?", TABLE_apikeys, TABLE_APIKEYS_COLUMN_userid))
if err != nil {
log.Printf("Error preparing %s, error=%s",
fmt.Sprintf("select %s from %s where %s=?", TABLE_APIKEYS_COLUMN_apikey, TABLE_apikeys, TABLE_APIKEYS_COLUMN_userid), err.Error())
}
rows, err = stmt.Query(aUserId)
if err != nil {
log.Printf("Error quering, %s, error=%s",
fmt.Sprintf("select %s from %s where %s=?", TABLE_APIKEYS_COLUMN_apikey, TABLE_apikeys, TABLE_APIKEYS_COLUMN_userid), err.Error())
}
var sliceApiKeys []*objects.ApiKey = make([]*objects.ApiKey, 0, 16)
for rows.Next() {
var id int
var userId int
var apiKey string
var appName string
var objApiKey *objects.ApiKey
objApiKey = new(objects.ApiKey)
rows.Scan(&id, &userId, &apiKey, &appName)
objApiKey.UserId = userId
objApiKey.ApiKey = apiKey
objApiKey.AppName = appName
sliceApiKeys = append(sliceApiKeys, objApiKey)
}
if rows != nil {rows.Close()}
if stmt != nil {stmt.Close()}
return sliceApiKeys
}
// ApiKey is added in DbAddUser.
// This method we can use when we want to add additional apiKeys for a user, or if the user does not have a apiKey when we present it to him.
func DbAddApiKey(aUserId int, aAppName string, aDb *sql.DB) bool {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
if aUserId < 0 {
return false
}
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return false
}
defer db.Close()
}
stmt, err = db.Prepare(STMT_INSERT_INTO_APIKEYS)
if err != nil {
log.Printf("Error preparing %s, error=%s", STMT_INSERT_INTO_APIKEYS, err.Error())
}
var apiKey string = STR_EMPTY
apiKey, err = GenerateToken()
if err != nil {
log.Printf("Error generateToken, error=%s", err.Error())
}
if aAppName == STR_EMPTY {
aAppName = apiKey
}
_, err = stmt.Exec(aUserId, apiKey, aAppName)
if err != nil {
log.Printf("Error executing %s, values userId=%d, apiKey=%s, appName=%s, error=%s", STMT_INSERT_INTO_APIKEYS, aUserId, apiKey, aAppName, err.Error())
if stmt != nil {stmt.Close()}
return false
}
if stmt != nil {stmt.Close()}
return true
}
func DbAddClientInfo(aApiKey string, aClientId string, aName string, aManufacturer string, aModel string, aDeviceId string, aDb *sql.DB) error {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return err
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf(STMT_CREATE_TABLE_CLIENTINFO, aApiKey))
if err != nil {
log.Printf("Error creating, %s, error=%s", fmt.Sprintf(STMT_CREATE_TABLE_CLIENTINFO, aApiKey), err.Error())
return err
}
_, err = stmt.Exec()
if err != nil {
log.Printf("Error executing, %s, error=%s", fmt.Sprintf(STMT_CREATE_TABLE_CLIENTINFO, aApiKey), err.Error())
return err
}
if stmt != nil {stmt.Close()}
stmt, err = db.Prepare(fmt.Sprintf(STMT_INSERT_INTO_CLIENTINFO, aApiKey))
if err != nil {
log.Printf("Error preparing, %s, error=%s", fmt.Sprintf(STMT_INSERT_INTO_CLIENTINFO, aApiKey), err.Error())
return err
}
_, err = stmt.Exec(aClientId, aName, aManufacturer, aModel, aDeviceId)
if err != nil {
log.Printf("Error executing, %s, error=%s", fmt.Sprintf(STMT_INSERT_INTO_CLIENTINFO, aApiKey), err.Error())
return err
}
if stmt != nil {stmt.Close()}
return nil
}
func DbDeleteClientInfo(aApiKey string, aClientId string, aDb *sql.DB) (isDeleted bool) {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("DbDeleteApiKey, Error opening db login.sqlite, err=%s", err.Error())
return
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("delete from %s%s where %s=?", TABLE_clientinfo, aApiKey, TABLE_CLIENTINFO_clientid))
if err != nil {
log.Printf("Error preparing, %s", fmt.Sprintf("delete from %s%s where %s=?", TABLE_clientinfo, aApiKey, TABLE_CLIENTINFO_clientid), err.Error())
return false
}
_, err = stmt.Exec(aClientId)
if err != nil {
log.Printf("Error executing, %s, error=%s", fmt.Sprintf("delete from %s%s where %s=?", TABLE_clientinfo, aApiKey, TABLE_CLIENTINFO_clientid), err.Error())
return false
}
if stmt != nil {stmt.Close()}
return true
}
func DbGetClientInfo(aApiKey string, aClientId string, aDb *sql.DB) *objects.ClientInfo {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var row *sql.Row = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("DbDeleteApiKey, Error opening db login.sqlite, err=%s", err.Error())
return nil
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("select * from %s%s where %s=?", TABLE_clientinfo, aApiKey, TABLE_CLIENTINFO_clientid))
if err != nil {
log.Printf("Error preparing, %s", fmt.Sprintf("select * from %s%s where %s=?", TABLE_clientinfo, aApiKey, TABLE_CLIENTINFO_clientid), err.Error())
return nil
}
row = stmt.QueryRow(aClientId)
var clientId string
var name string
var manufacturer string
var model string
var deviceId string
err = row.Scan(&clientId, &name, &manufacturer, &model, &deviceId)
if err != nil {
log.Printf("Error executing, %s, error=%s", fmt.Sprintf("select * from %s%s where %s=?", TABLE_clientinfo, aApiKey, TABLE_CLIENTINFO_clientid), err.Error())
return nil
}
clientInfo := new(objects.ClientInfo)
clientInfo.ClientId = clientId
clientInfo.Name = name
clientInfo.Manufacturer = manufacturer
clientInfo.Model = model
clientInfo.DeviceId = deviceId
if stmt != nil {stmt.Close()}
return clientInfo
}
func DbGetClientInfos(aApiKey string, aDb *sql.DB) []*objects.ClientInfo {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var rows *sql.Rows = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return nil
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("select * from %s%s", TABLE_clientinfo, aApiKey))
if err != nil {
log.Printf("DbGetClientInfos, Error preparing %s, error=%s", fmt.Sprintf("select * from %s%s", TABLE_clientinfo, aApiKey), err.Error())
return nil
}
rows, err = stmt.Query()
if err != nil {
log.Printf("DbGetClientInfos, Error quering, %s, error=%s",
fmt.Sprintf("select * from %s%s", TABLE_clientinfo, aApiKey), err.Error())
}
var sliceClientInfo []*objects.ClientInfo = make([]*objects.ClientInfo, 0, 16)
for rows.Next() {
var clientId string
var name string
var manufacturer string
var model string
var deviceId string
var objClientInfo *objects.ClientInfo
objClientInfo = new(objects.ClientInfo)
rows.Scan(&clientId, &name, &manufacturer, &model, &deviceId)
objClientInfo.ApiKey = aApiKey
objClientInfo.ClientId = clientId
objClientInfo.Name = name
objClientInfo.Manufacturer = manufacturer
objClientInfo.Model = model
objClientInfo.DeviceId = deviceId
sliceClientInfo = append(sliceClientInfo, objClientInfo)
}
if rows != nil {rows.Close()}
if stmt != nil {stmt.Close()}
return sliceClientInfo
}
func DbUpdateClientInfo(aApiKey string, aClientId string, aName string, aDb *sql.DB) error {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("DbUpdateClientInfo, Error opening db login.sqlite, err=%s", err.Error())
return err
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("update %s%s set %s=? where %s=?", TABLE_clientinfo, aApiKey, TABLE_CLIENTINFO_name, TABLE_CLIENTINFO_clientid))
if err != nil {
log.Printf("DbUpdateClientInfo, Error preparing, %s",
fmt.Sprintf("update %s%s set %s=? where %s=?", TABLE_clientinfo, aApiKey, TABLE_CLIENTINFO_name, TABLE_CLIENTINFO_clientid), err.Error())
return err
}
_, err = stmt.Exec(aName, aClientId)
if err != nil {
log.Printf("DbUpdateClientInfo, Error executing, %s",
fmt.Sprintf("update %s%s set %s=? where %s=?", TABLE_clientinfo, aApiKey, TABLE_CLIENTINFO_name, TABLE_CLIENTINFO_clientid), err.Error())
}
return err
}
func DbClearClientInfo(aApiKey string, aDb *sql.DB) error {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var row *sql.Row = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return nil
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("select name from sqlite_master where type='table' and name='%s%s'", TABLE_clientinfo, aApiKey))
if err != nil {
log.Printf("DbClearClientInfo, Error preparing %s, error=%s",
fmt.Sprintf("select name from sqlite_master where type='table' and name='%s'", TABLE_clientinfo, aApiKey), err.Error())
return nil
}
row = stmt.QueryRow()
if row != nil {
var name string
err = row.Scan(&name)
if err != nil {
stmt.Close()
return err
} else {
stmt.Close()
}
}
stmt, err = db.Prepare(fmt.Sprintf("delete from %s%s", TABLE_clientinfo, aApiKey))
if err != nil {
log.Printf("DbClearClientInfo, Error preparing %s, error=%s", fmt.Sprintf("delete * from %s%s", TABLE_clientinfo, aApiKey), err.Error())
return err
}
_, err = stmt.Exec()
if err != nil {
log.Printf("DbClearClientInfo, Error executing %s, error=%s", fmt.Sprintf("delete * from %s%s", TABLE_clientinfo, aApiKey), err.Error())
}
if stmt != nil {stmt.Close()}
return err
}
func DbAddReport(aApiKey string, aClientId string, aTime int64, aSequence int, aMessage string, aFilePath string, aDb *sql.DB) {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf(STMT_CREATE_TABLE_REPORTS, aApiKey))
if err != nil {
log.Printf("Error creating, %s, error=%s", fmt.Sprintf(STMT_CREATE_TABLE_REPORTS, aApiKey), err.Error())
}
_, err = stmt.Exec()
if err != nil {
log.Printf("Error executing, %s, error=%s", fmt.Sprintf(STMT_CREATE_TABLE_REPORTS, aApiKey), err.Error())
}
if stmt != nil {stmt.Close()}
stmt, err = db.Prepare(fmt.Sprintf(STMT_INSERT_INTO_REPORTS, aApiKey))
if err != nil {
log.Printf("Error preparing, %s, error=%s", fmt.Sprintf(STMT_INSERT_INTO_REPORTS, aApiKey), err.Error())
}
_, err = stmt.Exec(aClientId, aTime, aSequence, aMessage, aFilePath)
if err != nil {
log.Printf("Error executing, %s, error=%s", fmt.Sprintf(STMT_INSERT_INTO_REPORTS, aApiKey), err.Error())
}
if stmt != nil {stmt.Close()}
}
func DbDeleteReport(aApiKey string, aId int, aDb *sql.DB) {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("delete from %s%s where %s=?", TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_id))
if err != nil {
log.Printf("Error preparing, %s, error=%s", fmt.Sprintf("delete from %s%s where %s=?", TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_id), err.Error())
}
_, err = stmt.Exec(aId)
if err != nil {
log.Printf("Error deleting, %s, error=%s",
fmt.Sprintf("delete from %s%s where %s=aId", TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_id, aId), err.Error())
}
if stmt != nil {stmt.Close()}
}
// Delete all records in the reports<apiKey> table
func DbClearReports(aApiKey string, aDb *sql.DB) error {
// Instead of deleting all from reports<aApiKey> we can just - drop table if exists reports<aApiKey>
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return err
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("delete from %s%s", TABLE_reports, aApiKey))
if err != nil {
log.Printf("Error preparing %s, error=%s", fmt.Sprintf("delete * from %s%s", TABLE_reports, aApiKey), err.Error())
return err
}
_, err = stmt.Exec()
if err != nil {
log.Printf("Error executing %s, error=%s", fmt.Sprintf("delete * from %s%s", TABLE_reports, aApiKey), err.Error())
}
if stmt != nil {stmt.Close()}
return err
}
func DbGetReportsByApiKey(aApiKey string, aClientId string, aStartNum int, aPageSize int, aDb *sql.DB) (sliceReports []*objects.Report, endNum int) {
endNum = aStartNum
sliceReports = make([]*objects.Report, 0, 64)
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var rows *sql.Rows = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return
}
defer db.Close()
}
if aClientId != STR_EMPTY {
stmt, err = db.Prepare(fmt.Sprintf("select * from %s%s where %s=? order by %s, %s limit ?, ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id))
log.Printf("DbGetReportsByApiKey, %s", fmt.Sprintf("select * from %s%s where %s=? order by %s, %s limit ?, ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id))
} else {
stmt, err = db.Prepare(fmt.Sprintf("select * from %s%s order by %s, %s limit ?, ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id))
log.Printf("DbGetReportsByApiKey, %s", fmt.Sprintf("select * from %s%s order by %s, %s limit ?, ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id))
}
if err != nil {
log.Printf("Error preparing, %s, error=%s",
fmt.Sprintf("select * from %s%s order by %s, %s limit ?, ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id),
err.Error())
return sliceReports, endNum
}
if aClientId != STR_EMPTY {
log.Printf("DbGetReportsByApiKey, Query, aClientId=%s", aClientId)
rows, err = stmt.Query(aClientId, aStartNum, aPageSize)
} else {
log.Printf("DbGetReportsByApiKey, Query, without clientId, aClientId=%s", aClientId)
rows, err = stmt.Query(aStartNum, aPageSize)
}
if err != nil {
log.Printf("Error quering, %s, error=%s",
fmt.Sprintf("select * from %s%s order by %s, %s limit ?, ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id),
err.Error())
return sliceReports, endNum
}
for rows.Next() {
var id int
var clientId string
var reportTime int64
var sequence int
var message string
var filePath string
err = rows.Scan(&id, &clientId, &reportTime, &sequence, &message, &filePath)
if err != nil {
log.Printf("Error scanning, error=%s", err.Error())
}
var report = new(objects.Report)
report.Id = id
report.ClientId = clientId
report.Time = reportTime
report.Sequence = sequence
report.Message = message
report.FilePath = filePath
report.TimeString = fmt.Sprintf("%s", time.Unix(reportTime, 0))
sliceReports = append(sliceReports, report)
endNum++
}
if rows != nil {rows.Close()}
if stmt != nil {stmt.Close()}
return sliceReports, endNum
}
func DbGetReports(aApiKey string, aId int, aPageSize int, aDb *sql.DB) (sliceReports []*objects.Report) {
sliceReports = make([]*objects.Report, 0, 64)
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var rows *sql.Rows = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("select * from %s%s where %s > ? order by %s, %s limit ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_id, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id))
if err != nil {
log.Printf("Error preparing, %s, error=%s",
fmt.Sprintf("select * from %s%s where %s > ? order by %s, %s limit ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_id, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id),
err.Error())
return sliceReports
}
rows, err = stmt.Query(aId, aPageSize)
if err != nil {
log.Printf("Error quering, %s, error=%s",
fmt.Sprintf("select * from %s%s where %s > ? order by %s, %s limit ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_id, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id),
err.Error())
return sliceReports
}
for rows.Next() {
var id int
var clientId string
var time int64
var sequence int
var message string
var filePath string
err = rows.Scan(&id, &clientId, &time, &sequence, &message, &filePath)
if err != nil {
log.Printf("Error scanning, error=%s", err.Error())
}
var report = new(objects.Report)
report.Id = id
report.ClientId = clientId
report.Time = time
report.Sequence = sequence
report.Message = message
report.FilePath = filePath
sliceReports = append(sliceReports, report)
}
if rows != nil {rows.Close()}
if stmt != nil {stmt.Close()}
return sliceReports
}
func DbGetReportsLastPage(aApiKey string, aClientId string, aPageSize int, aDb *sql.DB) (sliceReports []*objects.Report, count int64) {
sliceReports = make([]*objects.Report, 0, 64)
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var rows *sql.Rows = nil
var rowCount int64 = 0
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return
}
defer db.Close()
}
if aClientId != STR_EMPTY {
stmt, err = db.Prepare(fmt.Sprintf("select Count(*) from %s%s where %s=?", TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_clientid))
} else {
stmt, err = db.Prepare(fmt.Sprintf("select Count(*) from %s%s",TABLE_reports, aApiKey))
}
if err != nil {
log.Printf("Error preparing, %s, error=%s",
fmt.Sprintf("select Count(*) from %s%s",TABLE_reports, aApiKey), err.Error())
return sliceReports, rowCount
}
if aClientId != STR_EMPTY {
rows, err = stmt.Query(aClientId)
} else {
rows, err = stmt.Query()
}
if err != nil {
log.Printf("Error quering, %s, error=%s", fmt.Sprintf("select Count(*) from %s%s", TABLE_reports, aApiKey), err.Error())
return sliceReports, rowCount
}
for rows.Next() {
err = rows.Scan(&rowCount)
if err != nil {
log.Printf("Error scanning, error=%s", err.Error())
}
}
log.Printf("DbGetReportsLastPage, rowCount=%d", rowCount)
if rows != nil {rows.Close()}
if stmt != nil {stmt.Close()}
if aClientId != STR_EMPTY {
stmt, err = db.Prepare(fmt.Sprintf("select * from %s%s where %s=? order by %s, %s limit ?, ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id))
} else {
stmt, err = db.Prepare(fmt.Sprintf("select * from %s%s order by %s, %s limit ?, ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id))
}
if err != nil {
log.Printf("Error preparing, %s, error=%s",
fmt.Sprintf("select * from %s%s order by %s, %s limit ?, ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id),
err.Error())
return sliceReports, rowCount
}
if aClientId != STR_EMPTY {
rows, err = stmt.Query(aClientId, (rowCount - int64(aPageSize)), aPageSize)
} else {
rows, err = stmt.Query((rowCount - int64(aPageSize)), aPageSize)
}
if err != nil {
log.Printf("Error quering, %s, error=%s",
fmt.Sprintf("select * from %s%s order by %s, %s limit ?, ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id),
err.Error())
return sliceReports, rowCount
}
for rows.Next() {
var id int
var clientId string
var reportTime int64
var sequence int
var message string
var filePath string
err = rows.Scan(&id, &clientId, &reportTime, &sequence, &message, &filePath)
if err != nil {
log.Printf("Error scanning, error=%s", err.Error())
}
var report = new(objects.Report)
report.Id = id
report.ClientId = clientId
report.Time = reportTime
report.Sequence = sequence
report.Message = message
report.FilePath = filePath
report.TimeString = fmt.Sprintf("%s", time.Unix(reportTime, 0))
sliceReports = append(sliceReports, report)
}
if rows != nil {rows.Close()}
if stmt != nil {stmt.Close()}
return sliceReports, rowCount
}
func DbInviteAddApiKey(aUserId int, aInviteId string, aApiKey string, aAppName string, aDb *sql.DB) {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var rows *sql.Rows = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return
}
defer db.Close()
}
DbInviteCreateTable(aApiKey, db)
var isValidInvite = false
stmt, err = db.Prepare(fmt.Sprintf("select * from %s%s where %s=?", TABLE_invites, aApiKey, TABLE_INVITES_COLUMN_inviteid))
if err != nil {
log.Printf("Error preparing, %s, error=%s",
fmt.Sprintf("select * from %s%s where %s=?", TABLE_invites, aApiKey, TABLE_INVITES_COLUMN_inviteid), err.Error())
return
}
rows, err = stmt.Query(aInviteId)
if err != nil {
log.Printf("Error quering, %s, error=%s", fmt.Sprintf("select * from %s%s where %s=?", TABLE_invites, aApiKey, TABLE_INVITES_COLUMN_inviteid), err.Error())
}
now := time.Now().UnixNano() / int64(time.Millisecond)
for rows.Next() {
var id string
var inviteId string
var apiKey string
var issued int64
var expires int64
err = rows.Scan(&id, &inviteId, &apiKey, &issued, &expires)
if err != nil {
log.Printf("Error scanning, error=%s", err.Error())
return
}
if apiKey == aApiKey && now < (issued + expires) {
isValidInvite = true
break
}
}
if rows != nil {rows.Close()}
if stmt != nil {stmt.Close()}
if !isValidInvite {return}
stmt, err = db.Prepare(STMT_INSERT_INTO_APIKEYS)
if err != nil {
log.Printf("Error preparing %s, error=%s", STMT_INSERT_INTO_APIKEYS, err.Error())
return
}
_, err = stmt.Exec(aUserId, aApiKey, aAppName)
if err != nil {
log.Printf("Error executing %s, values userId=%d, apiKey=%s, appName=%s, error=%s", STMT_INSERT_INTO_APIKEYS, aUserId, aApiKey, aAppName, err.Error())
return
}
if stmt != nil {stmt.Close()}
}
func DbInviteAdd(aApiKey string, aDb *sql.DB) (inviteId string) {
var err error = nil
var stmt *sql.Stmt = nil
var db *sql.DB = aDb
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
fmt.Println("DbInviteAdd, Error opening db, err=", err)
return
}
defer db.Close()
}
DbInviteCreateTable(aApiKey, db)
DbInviteClean(aApiKey, db)
inviteId, errUUID := GenerateUUID()
if errUUID != nil {
fmt.Println("Error generating uuid, err=", errUUID)
}
log.Printf("DbInviteAdd, inviteId=%s", inviteId)
issued := time.Now().UnixNano() / int64(time.Millisecond)
expires := INVITE_VALIDITY_MS
stmt, err = db.Prepare(fmt.Sprintf(STMT_INSERT_INTO_INVITES, aApiKey))
_, err = stmt.Exec(inviteId, aApiKey, issued, expires)
if err != nil {
fmt.Println("DbInviteAdd, Error inserting into tokens, err=", err)
}
if stmt != nil {stmt.Close()}
return inviteId
}
func DbInviteClean(aApiKey string, aDb *sql.DB) {
var err error = nil
var stmt *sql.Stmt = nil
var db *sql.DB = aDb
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
fmt.Println("DbInviteClean, Error opening db login.sqlite, err=", err)
return
}
defer db.Close()
}
now := time.Now().UnixNano() / int64(time.Millisecond)
stmt, err = db.Prepare(fmt.Sprintf("delete from %s%s where %s=? AND %s + %s < ?",
TABLE_invites, aApiKey, TABLE_INVITES_COLUMN_apikey, TABLE_INVITES_COLUMN_issued, TABLE_INVITES_COLUMN_expires))
if err != nil {
log.Printf("DbInviteClean, error preparing %s, error=%s", fmt.Sprintf("delete from %s%s where %s=? AND %s + %s < ?",
TABLE_invites, aApiKey, TABLE_INVITES_COLUMN_apikey, TABLE_INVITES_COLUMN_issued, TABLE_INVITES_COLUMN_expires), err.Error())
return
}
_, err = stmt.Exec(aApiKey, now)
if err != nil {
log.Printf("DbInviteClean, error executing %s, error=%s", fmt.Sprintf("delete from %s%s where %s=%s AND isssued + expires < %d",
TABLE_invites, aApiKey, TABLE_INVITES_COLUMN_apikey, aApiKey, now), err.Error())
}
}
func DbInviteCreateTable(aApiKey string, aDb *sql.DB) {
var err error
var stmt *sql.Stmt = nil
var db *sql.DB = aDb
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
fmt.Println("DbInviteClean, Error opening db login.sqlite, err=", err)
return
}
defer db.Close()
}
//init table invites
stmt, err = db.Prepare(fmt.Sprintf(STMT_CREATE_TABLE_INVITES, aApiKey))
if err != nil {
log.Println("init, Error preparing, %s, err=%s", STMT_CREATE_TABLE_INVITES, err)
}
if stmt != nil {defer stmt.Close()}
_, err = stmt.Exec()
if err != nil {
log.Printf("init, Error executing, %s, err=%s",STMT_CREATE_TABLE_INVITES, err)
}
}
1.Add documentation comments.
package utils
import (
"database/sql"
_ "github.com/mattn/go-sqlite3"
"fmt"
"log"
"objects"
"time"
)
//DbInit initalizes the database - creates tables used by the application.
func DbInit() error {
var err error = nil
var db *sql.DB = nil
var stmt *sql.Stmt = nil
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Println("init, Error opening db", DB_NAME, ", err=", err)
return err;
}
defer db.Close();
//init table users
stmt, err = db.Prepare(STMT_CREATE_TABLE_USERS)
if err != nil {
log.Println("init, Error preparing create table users stmt, err=", err)
return err;
}
_, err = stmt.Exec()
if err != nil {
log.Println("init, Error exec create table stmt, err=", err)
}
if stmt != nil {stmt.Close();}
//init table tokens
stmt, err = db.Prepare(STMT_CREATE_TABLE_TOKENS)
if err != nil {
log.Printf("Error preparing, %s, error=%s", STMT_CREATE_TABLE_TOKENS, err.Error())
}
_, err = stmt.Exec()
if err != nil {
log.Printf("Error creating table, %s, error=%s", STMT_CREATE_TABLE_TOKENS, err.Error())
}
if stmt != nil {stmt.Close()}
//init table apikeys
stmt, err = db.Prepare(STMT_CREATE_TABLE_APIKEYS)
if err != nil {
log.Printf("init, Error preparing, %s, err=%s", STMT_CREATE_TABLE_APIKEYS, err)
}
_, err = stmt.Exec()
if err != nil {
log.Printf("init, Error executing, %s, err=%s",STMT_CREATE_TABLE_APIKEYS, err)
}
if stmt != nil {stmt.Close();}
return nil
}
//DbAddUser adds a user in the users table if it does not exist.
//ApiKey is also generated for the user.
func DbAddUser(aEmail string, aPassword string, aDb *sql.DB) (isUserExists bool, isUserAdded bool, errorUser error) {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var rows *sql.Rows = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return false, false, err
}
defer db.Close()
}
isUserExists = false
stmt, err = db.Prepare(fmt.Sprintf("select * from %s where %s=?", TABLE_users, TABLE_USERS_COLUMN_email))
if err != nil {
log.Printf("Error preparing, %s, error=%s", fmt.Sprintf("select * from %s where %s=?", TABLE_users, TABLE_USERS_COLUMN_email), err.Error())
return
}
rows, err = stmt.Query(aEmail)
if err != nil {
log.Printf("Error executing, %s, error=%s", fmt.Sprintf("select * from %s where %s=%s", TABLE_users, TABLE_USERS_COLUMN_email, aEmail), err.Error())
return
}
for rows.Next() {
var id int
var email string
var password string
var salt string
rows.Scan(&id, &email, &password, &salt)
if email == aEmail {
isUserExists = true
break;
}
}
if rows != nil {rows.Close()}
if stmt != nil {stmt.Close()}
if isUserExists == false {
salt := GenerateRandomString(SALT_LENGTH)
passwordHash, err := HashSha1(fmt.Sprintf("%s%s", aPassword, salt))
if err != nil {
log.Printf("Error hashing string=%s, error=%s", fmt.Sprintf("%s%s", aPassword, salt), err.Error())
return isUserExists, false, err
}
stmt, err = db.Prepare(STMT_INSERT_INTO_USERS)
if err != nil {
log.Printf("Error preparing %s, error=%s", STMT_INSERT_INTO_USERS, err.Error())
return
}
_, err = stmt.Exec(aEmail, passwordHash, salt)
if err != nil {
log.Printf("Error executing %s, error=%s", STMT_INSERT_INTO_USERS, err.Error())
return isUserExists, false, err
}
stmt.Close()
//create and insert apiKey
stmt, err = db.Prepare(fmt.Sprintf("select %s from %s where %s=?", TABLE_USERS_COLUMN_id, TABLE_users, TABLE_USERS_COLUMN_email))
if err != nil {
log.Printf("Error preparing, select id from users where email=?, error=%s", err.Error())
}
rows, err = stmt.Query(aEmail)
if err != nil {
log.Printf("Error query, select id from users where email=%s, error=%s", aEmail, err.Error())
}
var userId int = -1
if rows.Next() {
err = rows.Scan(&userId)
if err != nil {
log.Printf("Error scanning userId, error=%s", err.Error())
}
}
if rows != nil {rows.Close()}
if stmt != nil {stmt.Close()}
if userId < 0 {
return isUserExists, false, nil
}
//DbAddApiKey(userId, STR_EMPTY, db)
return isUserExists, true, nil
} else {
return isUserExists, false, nil
}
}
//Deletes a user and his apiKeys and the reports<apiKey> tables of that user.
func DbDeleteUser(aUserId int, aDb *sql.DB) bool {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var rows *sql.Rows = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return false
}
defer db.Close()
}
//Delete user from table users
stmt, err = db.Prepare(fmt.Sprintf("delete from %s where %s=?", TABLE_users, TABLE_USERS_COLUMN_id))
if err != nil {
log.Printf("Error preparing, delete from users where id=?, error=%s", err.Error())
return false
}
_, err = stmt.Exec(aUserId)
if err != nil {
log.Printf("Error executing, %s, error=%s", fmt.Sprintf("delete from %s where %s=%d", TABLE_users, TABLE_USERS_COLUMN_id, aUserId), err.Error())
return false
}
stmt.Close()
stmt, err = db.Prepare(fmt.Sprintf("select %s from %s where %s=?", TABLE_APIKEYS_COLUMN_apikey, TABLE_apikeys, TABLE_APIKEYS_COLUMN_userid))
if err != nil {
log.Printf("Error preparing, %s, error=%s",
fmt.Sprintf("select %s from %s where %s=?", TABLE_APIKEYS_COLUMN_apikey, TABLE_apikeys, TABLE_APIKEYS_COLUMN_userid), err.Error())
return false
}
rows, err = stmt.Query(aUserId)
if err != nil {
log.Printf("Error quering, %s, error=%s",
fmt.Sprintf("select %s from %s where %s=%d", TABLE_APIKEYS_COLUMN_apikey, TABLE_apikeys, TABLE_APIKEYS_COLUMN_userid, aUserId), err.Error())
return false
}
//get all apiKeys of user
var sliceApiKeys []string = make([]string, 0, 16)
for rows.Next() {
var apiKey string
rows.Scan(&apiKey)
sliceApiKeys = append(sliceApiKeys, apiKey)
}
if rows != nil {rows.Close()}
if stmt != nil {stmt.Close()}
if len(sliceApiKeys) == 0 {
return true
}
//Delete apiKeys of user (delete from table apiKeys)
stmt, err = db.Prepare(fmt.Sprintf("delete from %s where %s=?", TABLE_apikeys, TABLE_APIKEYS_COLUMN_userid))
if err != nil {
log.Printf("Error preparing, delete from %s where userid=?, error=%s", TABLE_apikeys, err.Error())
return false
}
_, err = stmt.Exec(aUserId)
if err != nil {
log.Printf("Error executing, %s, error=%s", fmt.Sprintf("delete from %s where %s=%d", TABLE_apikeys, TABLE_APIKEYS_COLUMN_userid, aUserId), err.Error())
return false
}
stmt.Close()
//Drop reports<apiKey> tables for the user
for _, name := range sliceApiKeys {
stmt, err = db.Prepare(fmt.Sprintf("drop table if exists %s%s", TABLE_reports, name))
if err != nil {
log.Printf("Error preparing, drop table if exists ?, error=%s", err.Error())
}
_, err = stmt.Exec()
if err != nil {
log.Printf("Error executing, drop table if exists %s, error=%s", (TABLE_reports + name), err.Error())
}
}
if stmt != nil {stmt.Close()}
return true
}
//DbGetUser gets a user record from the users table.
func DbGetUser(aEmail string, aPassword string, aDb *sql.DB) (id int, err error){
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var row *sql.Row = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return -1, err
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("select * from %s where %s=?", TABLE_users, TABLE_USERS_COLUMN_email))
if err != nil {
log.Printf("Error preparing %s, error=%s", fmt.Sprintf("select * from %s where %s=?", TABLE_users, TABLE_USERS_COLUMN_email), err.Error())
}
row = stmt.QueryRow(aEmail)
var email string
var password string
var salt string
err = row.Scan(&id, &email, &password, &salt)
if err != nil && err == sql.ErrNoRows {
return -1, err
}
passwordHash, err := HashSha1(fmt.Sprintf("%s%s", aPassword, salt))
if err != nil {
log.Printf("Error hashing string=%s, error=%s", fmt.Sprintf("%s%s", aPassword, salt), err.Error())
}
if passwordHash != password {
return -1, err
}
return id, err
}
//DbGetUserLoad gets a user record from the users table.
func DbGetUserLoad(aUserId int, aDb *sql.DB) (user *objects.User, err error) {
user = new(objects.User)
user.Id = aUserId
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var row *sql.Row = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return user, err
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("select * from %s where %s=?", TABLE_users, TABLE_USERS_COLUMN_id))
if stmt != nil {defer stmt.Close()}
if err != nil {
log.Printf("Error preparing %s, error=%s", fmt.Sprintf("select * from %s where %s=?", TABLE_users, TABLE_USERS_COLUMN_email), err.Error())
}
row = stmt.QueryRow(aUserId)
var id int
var email string
var password string
var salt string
err = row.Scan(&id, &email, &password, &salt)
if err != nil && err == sql.ErrNoRows {
return user, err
} else {
user.Id = id
user.Email = email
}
return user, err
}
//DbAddToken adds a token for a user to the tokens table.
func DbAddToken(aUserId int, aDb *sql.DB) (token string) {
DbCleanTokens(aUserId, aDb)
var err error = nil
var db *sql.DB = aDb
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
fmt.Println("AddToken, Error opening db, err=", err)
return
}
defer db.Close()
}
token, errUUID := GenerateUUID()
if errUUID != nil {
fmt.Println("Erro generating uuid, err=", errUUID)
}
issued := time.Now().UnixNano() / int64(time.Millisecond)
expires := TOKEN_VALIDITY_MS
_, err = db.Exec("insert or ignore into tokens(userid, token, issued, expires) values(?, ?, ?, ?)", aUserId, token, issued, expires)
if err != nil {
fmt.Println("AddToken, Error inserting into tokens, err=", err)
}
return token
}
//DbDeleteApiKey deletes API key from api keys table.
func DbDeleteApiKey(aApiKey string, aDb *sql.DB) (isDeleted bool) {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("DbDeleteApiKey, Error opening db login.sqlite, err=%s", err.Error())
return
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("delete from %s where %s=?", TABLE_apikeys, TABLE_APIKEYS_COLUMN_apikey))
if err != nil {
log.Printf("Error preparing, delete from %s where %s=?, error=%s", TABLE_apikeys, TABLE_APIKEYS_COLUMN_apikey, err.Error())
return false
}
_, err = stmt.Exec(aApiKey)
if err != nil {
log.Printf("Error executing, %s, error=%s", fmt.Sprintf("delete from %s where %s=%d", TABLE_apikeys, TABLE_APIKEYS_COLUMN_apikey, aApiKey), err.Error())
return false
}
stmt.Close()
return true
}
//DbIsTokenValid checks if a token is valid. The check is against the TABLE_tokens table.
func DbIsTokenValid(aToken string, aDb *sql.DB) (isValid bool, userId int) {
var err error = nil
var db *sql.DB = aDb
var rows *sql.Rows = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
fmt.Println("IsTokenValid, Error opening db login.sqlite, err=", err)
return false, -1
}
defer db.Close()
}
rows, err = db.Query(fmt.Sprintf("select * from %s", TABLE_tokens))
if err != nil {
fmt.Println("IsTokenValid, Error select from tokens, err=", err)
return false, -1;
}
defer rows.Close()
now := time.Now().UnixNano() / int64(time.Millisecond)
for (rows.Next()) {
var id int
var userId int
var token string
var issued int64
var expires int64
err = rows.Scan(&id, &userId, &token, &issued, &expires)
if err != nil {
fmt.Println("IsTokenValid, Error scan tokens, err=", err)
}
if token == aToken && now < (issued + expires) {
return true, userId;
}
}
return false, -1;
}
//DbIsApiKeyValid checks if an API Key is valid. The check is against the TABLE_apikeys table.
func DbIsApiKeyValid(aApiKey string, aDb *sql.DB) (isValid bool, userId int) {
var err error = nil
var db *sql.DB = aDb
var rows *sql.Rows = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("IsTokenValid, Error opening db login.sqlite, err=%s", err.Error())
return false, -1
}
defer db.Close()
}
rows, err = db.Query(fmt.Sprintf("select * from %s", TABLE_apikeys))
if err != nil {
log.Printf("IsTokenValid, Error select from %s, err=%s", TABLE_apikeys, err.Error())
return false, -1;
}
defer rows.Close()
for (rows.Next()) {
var id int
var userId int
var apiKey string
var appName string
err = rows.Scan(&id, &userId, &apiKey, &appName)
if err != nil {
log.Printf("IsApiKeyValid, Error scan %s, err=%s", TABLE_apikeys, err.Error())
}
if apiKey == aApiKey {
return true, userId;
}
}
return false, -1
}
//DbCleanTokens deletes expired token records from the TABLE_tokens table.
func DbCleanTokens(aUserId int, aDb *sql.DB) {
var err error = nil
var db *sql.DB = aDb
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
fmt.Println("IsTokenValid, Error opening db login.sqlite, err=", err)
return
}
defer db.Close()
}
now := time.Now().UnixNano() / int64(time.Millisecond)
db.Exec(fmt.Sprintf("delete from %s where %s=? AND %s + %s < ?",
TABLE_tokens, TABLE_TOKENS_COLUMN_userid, TABLE_TOKENS_COLUMN_issued, TABLE_TOKENS_COLUMN_expires),
aUserId, now)
}
//DbGetApiKey gets all apiKeys of a user.
func DbGetApiKey(aUserId int, aDb *sql.DB) []*objects.ApiKey {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var rows *sql.Rows = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return nil
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("select * from %s where %s=?", TABLE_apikeys, TABLE_APIKEYS_COLUMN_userid))
if err != nil {
log.Printf("Error preparing %s, error=%s",
fmt.Sprintf("select %s from %s where %s=?", TABLE_APIKEYS_COLUMN_apikey, TABLE_apikeys, TABLE_APIKEYS_COLUMN_userid), err.Error())
}
rows, err = stmt.Query(aUserId)
if err != nil {
log.Printf("Error quering, %s, error=%s",
fmt.Sprintf("select %s from %s where %s=?", TABLE_APIKEYS_COLUMN_apikey, TABLE_apikeys, TABLE_APIKEYS_COLUMN_userid), err.Error())
}
var sliceApiKeys []*objects.ApiKey = make([]*objects.ApiKey, 0, 16)
for rows.Next() {
var id int
var userId int
var apiKey string
var appName string
var objApiKey *objects.ApiKey
objApiKey = new(objects.ApiKey)
rows.Scan(&id, &userId, &apiKey, &appName)
objApiKey.UserId = userId
objApiKey.ApiKey = apiKey
objApiKey.AppName = appName
sliceApiKeys = append(sliceApiKeys, objApiKey)
}
if rows != nil {rows.Close()}
if stmt != nil {stmt.Close()}
return sliceApiKeys
}
//DbAddApiKey adds additional API Keys for a user. Initial API Key is added in the DbAddUser.
func DbAddApiKey(aUserId int, aAppName string, aDb *sql.DB) bool {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
if aUserId < 0 {
return false
}
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return false
}
defer db.Close()
}
stmt, err = db.Prepare(STMT_INSERT_INTO_APIKEYS)
if err != nil {
log.Printf("Error preparing %s, error=%s", STMT_INSERT_INTO_APIKEYS, err.Error())
}
var apiKey string = STR_EMPTY
apiKey, err = GenerateToken()
if err != nil {
log.Printf("Error generateToken, error=%s", err.Error())
}
if aAppName == STR_EMPTY {
aAppName = apiKey
}
_, err = stmt.Exec(aUserId, apiKey, aAppName)
if err != nil {
log.Printf("Error executing %s, values userId=%d, apiKey=%s, appName=%s, error=%s", STMT_INSERT_INTO_APIKEYS, aUserId, apiKey, aAppName, err.Error())
if stmt != nil {stmt.Close()}
return false
}
if stmt != nil {stmt.Close()}
return true
}
//DbAddClientInfo adds client info to the TABLE_clientinfo for API key table.
func DbAddClientInfo(aApiKey string, aClientId string, aName string, aManufacturer string, aModel string, aDeviceId string, aDb *sql.DB) error {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return err
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf(STMT_CREATE_TABLE_CLIENTINFO, aApiKey))
if err != nil {
log.Printf("Error creating, %s, error=%s", fmt.Sprintf(STMT_CREATE_TABLE_CLIENTINFO, aApiKey), err.Error())
return err
}
_, err = stmt.Exec()
if err != nil {
log.Printf("Error executing, %s, error=%s", fmt.Sprintf(STMT_CREATE_TABLE_CLIENTINFO, aApiKey), err.Error())
return err
}
if stmt != nil {stmt.Close()}
stmt, err = db.Prepare(fmt.Sprintf(STMT_INSERT_INTO_CLIENTINFO, aApiKey))
if err != nil {
log.Printf("Error preparing, %s, error=%s", fmt.Sprintf(STMT_INSERT_INTO_CLIENTINFO, aApiKey), err.Error())
return err
}
_, err = stmt.Exec(aClientId, aName, aManufacturer, aModel, aDeviceId)
if err != nil {
log.Printf("Error executing, %s, error=%s", fmt.Sprintf(STMT_INSERT_INTO_CLIENTINFO, aApiKey), err.Error())
return err
}
if stmt != nil {stmt.Close()}
return nil
}
//DbDeleteClientInfo deletes client info from the TABLE_clientinfo for API Key table.
func DbDeleteClientInfo(aApiKey string, aClientId string, aDb *sql.DB) (isDeleted bool) {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("DbDeleteApiKey, Error opening db login.sqlite, err=%s", err.Error())
return
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("delete from %s%s where %s=?", TABLE_clientinfo, aApiKey, TABLE_CLIENTINFO_clientid))
if err != nil {
log.Printf("Error preparing, %s", fmt.Sprintf("delete from %s%s where %s=?", TABLE_clientinfo, aApiKey, TABLE_CLIENTINFO_clientid), err.Error())
return false
}
_, err = stmt.Exec(aClientId)
if err != nil {
log.Printf("Error executing, %s, error=%s", fmt.Sprintf("delete from %s%s where %s=?", TABLE_clientinfo, aApiKey, TABLE_CLIENTINFO_clientid), err.Error())
return false
}
if stmt != nil {stmt.Close()}
return true
}
//DbGetClientInfo gets client info from the TABLE_clientinfo for an API Key.
func DbGetClientInfo(aApiKey string, aClientId string, aDb *sql.DB) *objects.ClientInfo {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var row *sql.Row = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("DbDeleteApiKey, Error opening db login.sqlite, err=%s", err.Error())
return nil
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("select * from %s%s where %s=?", TABLE_clientinfo, aApiKey, TABLE_CLIENTINFO_clientid))
if err != nil {
log.Printf("Error preparing, %s", fmt.Sprintf("select * from %s%s where %s=?", TABLE_clientinfo, aApiKey, TABLE_CLIENTINFO_clientid), err.Error())
return nil
}
row = stmt.QueryRow(aClientId)
var clientId string
var name string
var manufacturer string
var model string
var deviceId string
err = row.Scan(&clientId, &name, &manufacturer, &model, &deviceId)
if err != nil {
log.Printf("Error executing, %s, error=%s", fmt.Sprintf("select * from %s%s where %s=?", TABLE_clientinfo, aApiKey, TABLE_CLIENTINFO_clientid), err.Error())
return nil
}
clientInfo := new(objects.ClientInfo)
clientInfo.ClientId = clientId
clientInfo.Name = name
clientInfo.Manufacturer = manufacturer
clientInfo.Model = model
clientInfo.DeviceId = deviceId
if stmt != nil {stmt.Close()}
return clientInfo
}
//DbGetClientInfos gets all client info records from TABLE_clientinfo for API key table.
func DbGetClientInfos(aApiKey string, aDb *sql.DB) []*objects.ClientInfo {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var rows *sql.Rows = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return nil
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("select * from %s%s", TABLE_clientinfo, aApiKey))
if err != nil {
log.Printf("DbGetClientInfos, Error preparing %s, error=%s", fmt.Sprintf("select * from %s%s", TABLE_clientinfo, aApiKey), err.Error())
return nil
}
rows, err = stmt.Query()
if err != nil {
log.Printf("DbGetClientInfos, Error quering, %s, error=%s",
fmt.Sprintf("select * from %s%s", TABLE_clientinfo, aApiKey), err.Error())
}
var sliceClientInfo []*objects.ClientInfo = make([]*objects.ClientInfo, 0, 16)
for rows.Next() {
var clientId string
var name string
var manufacturer string
var model string
var deviceId string
var objClientInfo *objects.ClientInfo
objClientInfo = new(objects.ClientInfo)
rows.Scan(&clientId, &name, &manufacturer, &model, &deviceId)
objClientInfo.ApiKey = aApiKey
objClientInfo.ClientId = clientId
objClientInfo.Name = name
objClientInfo.Manufacturer = manufacturer
objClientInfo.Model = model
objClientInfo.DeviceId = deviceId
sliceClientInfo = append(sliceClientInfo, objClientInfo)
}
if rows != nil {rows.Close()}
if stmt != nil {stmt.Close()}
return sliceClientInfo
}
//DbUpdateClientInfo updates client info in TABLE_clientinfo for API key table.
func DbUpdateClientInfo(aApiKey string, aClientId string, aName string, aDb *sql.DB) error {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("DbUpdateClientInfo, Error opening db login.sqlite, err=%s", err.Error())
return err
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("update %s%s set %s=? where %s=?", TABLE_clientinfo, aApiKey, TABLE_CLIENTINFO_name, TABLE_CLIENTINFO_clientid))
if err != nil {
log.Printf("DbUpdateClientInfo, Error preparing, %s",
fmt.Sprintf("update %s%s set %s=? where %s=?", TABLE_clientinfo, aApiKey, TABLE_CLIENTINFO_name, TABLE_CLIENTINFO_clientid), err.Error())
return err
}
_, err = stmt.Exec(aName, aClientId)
if err != nil {
log.Printf("DbUpdateClientInfo, Error executing, %s",
fmt.Sprintf("update %s%s set %s=? where %s=?", TABLE_clientinfo, aApiKey, TABLE_CLIENTINFO_name, TABLE_CLIENTINFO_clientid), err.Error())
}
return err
}
//DbClearClientInfo deletes all client info records from TABLE_clientinfo for API key table.
func DbClearClientInfo(aApiKey string, aDb *sql.DB) error {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var row *sql.Row = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return nil
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("select name from sqlite_master where type='table' and name='%s%s'", TABLE_clientinfo, aApiKey))
if err != nil {
log.Printf("DbClearClientInfo, Error preparing %s, error=%s",
fmt.Sprintf("select name from sqlite_master where type='table' and name='%s'", TABLE_clientinfo, aApiKey), err.Error())
return nil
}
row = stmt.QueryRow()
if row != nil {
var name string
err = row.Scan(&name)
if err != nil {
stmt.Close()
return err
} else {
stmt.Close()
}
}
stmt, err = db.Prepare(fmt.Sprintf("delete from %s%s", TABLE_clientinfo, aApiKey))
if err != nil {
log.Printf("DbClearClientInfo, Error preparing %s, error=%s", fmt.Sprintf("delete * from %s%s", TABLE_clientinfo, aApiKey), err.Error())
return err
}
_, err = stmt.Exec()
if err != nil {
log.Printf("DbClearClientInfo, Error executing %s, error=%s", fmt.Sprintf("delete * from %s%s", TABLE_clientinfo, aApiKey), err.Error())
}
if stmt != nil {stmt.Close()}
return err
}
//DbAddReport adds report to TABLE_reports for API key table.
func DbAddReport(aApiKey string, aClientId string, aTime int64, aSequence int, aMessage string, aFilePath string, aDb *sql.DB) {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf(STMT_CREATE_TABLE_REPORTS, aApiKey))
if err != nil {
log.Printf("Error creating, %s, error=%s", fmt.Sprintf(STMT_CREATE_TABLE_REPORTS, aApiKey), err.Error())
}
_, err = stmt.Exec()
if err != nil {
log.Printf("Error executing, %s, error=%s", fmt.Sprintf(STMT_CREATE_TABLE_REPORTS, aApiKey), err.Error())
}
if stmt != nil {stmt.Close()}
stmt, err = db.Prepare(fmt.Sprintf(STMT_INSERT_INTO_REPORTS, aApiKey))
if err != nil {
log.Printf("Error preparing, %s, error=%s", fmt.Sprintf(STMT_INSERT_INTO_REPORTS, aApiKey), err.Error())
}
_, err = stmt.Exec(aClientId, aTime, aSequence, aMessage, aFilePath)
if err != nil {
log.Printf("Error executing, %s, error=%s", fmt.Sprintf(STMT_INSERT_INTO_REPORTS, aApiKey), err.Error())
}
if stmt != nil {stmt.Close()}
}
//DbDeleteReport deletes report from TABLE_reports for API key table.
func DbDeleteReport(aApiKey string, aId int, aDb *sql.DB) {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("delete from %s%s where %s=?", TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_id))
if err != nil {
log.Printf("Error preparing, %s, error=%s", fmt.Sprintf("delete from %s%s where %s=?", TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_id), err.Error())
}
_, err = stmt.Exec(aId)
if err != nil {
log.Printf("Error deleting, %s, error=%s",
fmt.Sprintf("delete from %s%s where %s=aId", TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_id, aId), err.Error())
}
if stmt != nil {stmt.Close()}
}
//DbClearReports deletes all records in the TABLE_reports for API key table.
func DbClearReports(aApiKey string, aDb *sql.DB) error {
// Instead of deleting all from reports<aApiKey> we can just - drop table if exists reports<aApiKey>
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return err
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("delete from %s%s", TABLE_reports, aApiKey))
if err != nil {
log.Printf("Error preparing %s, error=%s", fmt.Sprintf("delete * from %s%s", TABLE_reports, aApiKey), err.Error())
return err
}
_, err = stmt.Exec()
if err != nil {
log.Printf("Error executing %s, error=%s", fmt.Sprintf("delete * from %s%s", TABLE_reports, aApiKey), err.Error())
}
if stmt != nil {stmt.Close()}
return err
}
//DbGetReportsByApiKey deletes reports from TABLE_reports for API key. All records will be deleted if no clientId is supplied, otherwise only
//the records for the supplied clientId will be deleted.
func DbGetReportsByApiKey(aApiKey string, aClientId string, aStartNum int, aPageSize int, aDb *sql.DB) (sliceReports []*objects.Report, endNum int) {
endNum = aStartNum
sliceReports = make([]*objects.Report, 0, 64)
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var rows *sql.Rows = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return
}
defer db.Close()
}
if aClientId != STR_EMPTY {
stmt, err = db.Prepare(fmt.Sprintf("select * from %s%s where %s=? order by %s, %s limit ?, ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id))
log.Printf("DbGetReportsByApiKey, %s", fmt.Sprintf("select * from %s%s where %s=? order by %s, %s limit ?, ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id))
} else {
stmt, err = db.Prepare(fmt.Sprintf("select * from %s%s order by %s, %s limit ?, ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id))
log.Printf("DbGetReportsByApiKey, %s", fmt.Sprintf("select * from %s%s order by %s, %s limit ?, ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id))
}
if err != nil {
log.Printf("Error preparing, %s, error=%s",
fmt.Sprintf("select * from %s%s order by %s, %s limit ?, ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id),
err.Error())
return sliceReports, endNum
}
if aClientId != STR_EMPTY {
log.Printf("DbGetReportsByApiKey, Query, aClientId=%s", aClientId)
rows, err = stmt.Query(aClientId, aStartNum, aPageSize)
} else {
log.Printf("DbGetReportsByApiKey, Query, without clientId, aClientId=%s", aClientId)
rows, err = stmt.Query(aStartNum, aPageSize)
}
if err != nil {
log.Printf("Error quering, %s, error=%s",
fmt.Sprintf("select * from %s%s order by %s, %s limit ?, ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id),
err.Error())
return sliceReports, endNum
}
for rows.Next() {
var id int
var clientId string
var reportTime int64
var sequence int
var message string
var filePath string
err = rows.Scan(&id, &clientId, &reportTime, &sequence, &message, &filePath)
if err != nil {
log.Printf("Error scanning, error=%s", err.Error())
}
var report = new(objects.Report)
report.Id = id
report.ClientId = clientId
report.Time = reportTime
report.Sequence = sequence
report.Message = message
report.FilePath = filePath
report.TimeString = fmt.Sprintf("%s", time.Unix(reportTime, 0))
sliceReports = append(sliceReports, report)
endNum++
}
if rows != nil {rows.Close()}
if stmt != nil {stmt.Close()}
return sliceReports, endNum
}
//DbGetReports gets all reports from TABLE_reports for API key table.
func DbGetReports(aApiKey string, aId int, aPageSize int, aDb *sql.DB) (sliceReports []*objects.Report) {
sliceReports = make([]*objects.Report, 0, 64)
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var rows *sql.Rows = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return
}
defer db.Close()
}
stmt, err = db.Prepare(fmt.Sprintf("select * from %s%s where %s > ? order by %s, %s limit ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_id, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id))
if err != nil {
log.Printf("Error preparing, %s, error=%s",
fmt.Sprintf("select * from %s%s where %s > ? order by %s, %s limit ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_id, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id),
err.Error())
return sliceReports
}
rows, err = stmt.Query(aId, aPageSize)
if err != nil {
log.Printf("Error quering, %s, error=%s",
fmt.Sprintf("select * from %s%s where %s > ? order by %s, %s limit ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_id, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id),
err.Error())
return sliceReports
}
for rows.Next() {
var id int
var clientId string
var time int64
var sequence int
var message string
var filePath string
err = rows.Scan(&id, &clientId, &time, &sequence, &message, &filePath)
if err != nil {
log.Printf("Error scanning, error=%s", err.Error())
}
var report = new(objects.Report)
report.Id = id
report.ClientId = clientId
report.Time = time
report.Sequence = sequence
report.Message = message
report.FilePath = filePath
sliceReports = append(sliceReports, report)
}
if rows != nil {rows.Close()}
if stmt != nil {stmt.Close()}
return sliceReports
}
//DbGetReportsLastPage gets the last records from the TABLE_reports for API key, that are in the last page according to the pagination.
func DbGetReportsLastPage(aApiKey string, aClientId string, aPageSize int, aDb *sql.DB) (sliceReports []*objects.Report, count int64) {
sliceReports = make([]*objects.Report, 0, 64)
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var rows *sql.Rows = nil
var rowCount int64 = 0
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return
}
defer db.Close()
}
if aClientId != STR_EMPTY {
stmt, err = db.Prepare(fmt.Sprintf("select Count(*) from %s%s where %s=?", TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_clientid))
} else {
stmt, err = db.Prepare(fmt.Sprintf("select Count(*) from %s%s",TABLE_reports, aApiKey))
}
if err != nil {
log.Printf("Error preparing, %s, error=%s",
fmt.Sprintf("select Count(*) from %s%s",TABLE_reports, aApiKey), err.Error())
return sliceReports, rowCount
}
if aClientId != STR_EMPTY {
rows, err = stmt.Query(aClientId)
} else {
rows, err = stmt.Query()
}
if err != nil {
log.Printf("Error quering, %s, error=%s", fmt.Sprintf("select Count(*) from %s%s", TABLE_reports, aApiKey), err.Error())
return sliceReports, rowCount
}
for rows.Next() {
err = rows.Scan(&rowCount)
if err != nil {
log.Printf("Error scanning, error=%s", err.Error())
}
}
log.Printf("DbGetReportsLastPage, rowCount=%d", rowCount)
if rows != nil {rows.Close()}
if stmt != nil {stmt.Close()}
if aClientId != STR_EMPTY {
stmt, err = db.Prepare(fmt.Sprintf("select * from %s%s where %s=? order by %s, %s limit ?, ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id))
} else {
stmt, err = db.Prepare(fmt.Sprintf("select * from %s%s order by %s, %s limit ?, ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id))
}
if err != nil {
log.Printf("Error preparing, %s, error=%s",
fmt.Sprintf("select * from %s%s order by %s, %s limit ?, ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id),
err.Error())
return sliceReports, rowCount
}
if aClientId != STR_EMPTY {
rows, err = stmt.Query(aClientId, (rowCount - int64(aPageSize)), aPageSize)
} else {
rows, err = stmt.Query((rowCount - int64(aPageSize)), aPageSize)
}
if err != nil {
log.Printf("Error quering, %s, error=%s",
fmt.Sprintf("select * from %s%s order by %s, %s limit ?, ?",
TABLE_reports, aApiKey, TABLE_REPORTS_COLUMN_clientid, TABLE_REPORTS_COLUMN_id),
err.Error())
return sliceReports, rowCount
}
for rows.Next() {
var id int
var clientId string
var reportTime int64
var sequence int
var message string
var filePath string
err = rows.Scan(&id, &clientId, &reportTime, &sequence, &message, &filePath)
if err != nil {
log.Printf("Error scanning, error=%s", err.Error())
}
var report = new(objects.Report)
report.Id = id
report.ClientId = clientId
report.Time = reportTime
report.Sequence = sequence
report.Message = message
report.FilePath = filePath
report.TimeString = fmt.Sprintf("%s", time.Unix(reportTime, 0))
sliceReports = append(sliceReports, report)
}
if rows != nil {rows.Close()}
if stmt != nil {stmt.Close()}
return sliceReports, rowCount
}
//DbInviteAddApiKey adds an API key to a user that has been invited.
//The TABLE_invites for API key is queried to check if the invitation has expired and if the it exists.
func DbInviteAddApiKey(aUserId int, aInviteId string, aApiKey string, aAppName string, aDb *sql.DB) {
var err error = nil
var db *sql.DB = aDb
var stmt *sql.Stmt = nil
var rows *sql.Rows = nil
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
log.Printf("Error opening database=%s, error=%s", DB_NAME, err.Error())
return
}
defer db.Close()
}
DbInviteCreateTable(aApiKey, db)
var isValidInvite = false
stmt, err = db.Prepare(fmt.Sprintf("select * from %s%s where %s=?", TABLE_invites, aApiKey, TABLE_INVITES_COLUMN_inviteid))
if err != nil {
log.Printf("Error preparing, %s, error=%s",
fmt.Sprintf("select * from %s%s where %s=?", TABLE_invites, aApiKey, TABLE_INVITES_COLUMN_inviteid), err.Error())
return
}
rows, err = stmt.Query(aInviteId)
if err != nil {
log.Printf("Error quering, %s, error=%s", fmt.Sprintf("select * from %s%s where %s=?", TABLE_invites, aApiKey, TABLE_INVITES_COLUMN_inviteid), err.Error())
return
}
now := time.Now().UnixNano() / int64(time.Millisecond)
for rows.Next() {
var id string
var inviteId string
var apiKey string
var issued int64
var expires int64
err = rows.Scan(&id, &inviteId, &apiKey, &issued, &expires)
if err != nil {
log.Printf("Error scanning, error=%s", err.Error())
return
}
if apiKey == aApiKey && now < (issued + expires) {
isValidInvite = true
break
}
}
if rows != nil {rows.Close()}
if stmt != nil {stmt.Close()}
if !isValidInvite {return}
stmt, err = db.Prepare(STMT_INSERT_INTO_APIKEYS)
if err != nil {
log.Printf("Error preparing %s, error=%s", STMT_INSERT_INTO_APIKEYS, err.Error())
return
}
_, err = stmt.Exec(aUserId, aApiKey, aAppName)
if err != nil {
log.Printf("Error executing %s, values userId=%d, apiKey=%s, appName=%s, error=%s", STMT_INSERT_INTO_APIKEYS, aUserId, aApiKey, aAppName, err.Error())
return
}
if stmt != nil {stmt.Close()}
}
//DbInviteAdd adds an invitation to TABLE_invites for API key.
func DbInviteAdd(aApiKey string, aDb *sql.DB) (inviteId string) {
var err error = nil
var stmt *sql.Stmt = nil
var db *sql.DB = aDb
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
fmt.Println("DbInviteAdd, Error opening db, err=", err)
return
}
defer db.Close()
}
DbInviteCreateTable(aApiKey, db)
DbInviteClean(aApiKey, db)
inviteId, errUUID := GenerateUUID()
if errUUID != nil {
fmt.Println("Error generating uuid, err=", errUUID)
}
log.Printf("DbInviteAdd, inviteId=%s", inviteId)
issued := time.Now().UnixNano() / int64(time.Millisecond)
expires := INVITE_VALIDITY_MS
stmt, err = db.Prepare(fmt.Sprintf(STMT_INSERT_INTO_INVITES, aApiKey))
_, err = stmt.Exec(inviteId, aApiKey, issued, expires)
if err != nil {
fmt.Println("DbInviteAdd, Error inserting into tokens, err=", err)
}
if stmt != nil {stmt.Close()}
return inviteId
}
//DbInviteClean deletes expired invitation records from TABLE_invites for API key table.
func DbInviteClean(aApiKey string, aDb *sql.DB) {
var err error = nil
var stmt *sql.Stmt = nil
var db *sql.DB = aDb
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
fmt.Println("DbInviteClean, Error opening db login.sqlite, err=", err)
return
}
defer db.Close()
}
now := time.Now().UnixNano() / int64(time.Millisecond)
stmt, err = db.Prepare(fmt.Sprintf("delete from %s%s where %s=? AND %s + %s < ?",
TABLE_invites, aApiKey, TABLE_INVITES_COLUMN_apikey, TABLE_INVITES_COLUMN_issued, TABLE_INVITES_COLUMN_expires))
if err != nil {
log.Printf("DbInviteClean, error preparing %s, error=%s", fmt.Sprintf("delete from %s%s where %s=? AND %s + %s < ?",
TABLE_invites, aApiKey, TABLE_INVITES_COLUMN_apikey, TABLE_INVITES_COLUMN_issued, TABLE_INVITES_COLUMN_expires), err.Error())
return
}
_, err = stmt.Exec(aApiKey, now)
if err != nil {
log.Printf("DbInviteClean, error executing %s, error=%s", fmt.Sprintf("delete from %s%s where %s=%s AND isssued + expires < %d",
TABLE_invites, aApiKey, TABLE_INVITES_COLUMN_apikey, aApiKey, now), err.Error())
}
}
//DbInviteCreateTable creates TABLE_invites for API key table.
func DbInviteCreateTable(aApiKey string, aDb *sql.DB) {
var err error
var stmt *sql.Stmt = nil
var db *sql.DB = aDb
if db == nil {
db, err = sql.Open(DB_TYPE, DB_NAME)
if err != nil {
fmt.Println("DbInviteClean, Error opening db login.sqlite, err=", err)
return
}
defer db.Close()
}
//init table invites
stmt, err = db.Prepare(fmt.Sprintf(STMT_CREATE_TABLE_INVITES, aApiKey))
if err != nil {
log.Println("init, Error preparing, %s, err=%s", STMT_CREATE_TABLE_INVITES, err)
}
if stmt != nil {defer stmt.Close()}
_, err = stmt.Exec()
if err != nil {
log.Printf("init, Error executing, %s, err=%s",STMT_CREATE_TABLE_INVITES, err)
}
} |
package compute
import "fmt"
// EntitySummary is used to group an entity Id and name together for serialisation / deserialisation purposes.
type EntitySummary struct {
// The entity Id.
ID string `json:"id"`
// The entity name.
Name string `json:"name"`
}
// IPRange represents an IPvX range.
type IPRange interface {
// Convert the IPvX range to a display string.
ToDisplayString() string
}
// IPv4Range represents an IPv4 network (base address and prefix size)
type IPv4Range struct {
// The network base address.
BaseAddress string `json:"address"`
// The network prefix size.
PrefixSize int `json:"prefixSize"`
}
// ToDisplayString converts the IPv4 range to a display string.
func (network IPv4Range) ToDisplayString() string {
return fmt.Sprintf("%s/%d", network.BaseAddress, network.PrefixSize)
}
// IPv6Range represents an IPv6 network (base address and prefix size)
type IPv6Range struct {
// The network base address.
BaseAddress string `json:"address"`
// The network prefix size.
PrefixSize int `json:"prefixSize"`
}
// ToDisplayString converts the IPv6 range to a display string.
func (network IPv6Range) ToDisplayString() string {
return fmt.Sprintf("%s/%d", network.BaseAddress, network.PrefixSize)
}
// OperatingSystem represents a well-known operating system for virtual machines.
type OperatingSystem struct {
// The operating system Id.
ID string `json:"id"`
// The operating system type.
Family string `json:"family"`
// The operating system display-name.
DisplayName string `json:"displayName"`
}
// VirtualMachineCPU represents the CPU configuration for a virtual machine.
type VirtualMachineCPU struct {
Count int `json:"count,omitempty"`
Speed string `json:"speed,omitempty"`
CoresPerSocket int `json:"coresPerSocket,omitempty"`
}
// VirtualMachineDisk represents the disk configuration for a virtual machine.
type VirtualMachineDisk struct {
ID *string `json:"id,omitempty"`
SCSIUnitID int `json:"scsiId"`
SizeGB int `json:"sizeGb"`
Speed string `json:"speed"`
}
// VirtualMachineNetwork represents the networking configuration for a virtual machine.
type VirtualMachineNetwork struct {
NetworkDomainID string `json:"networkDomainId,omitempty"`
PrimaryAdapter VirtualMachineNetworkAdapter `json:"primaryNic"`
AdditionalNetworkAdapters []VirtualMachineNetworkAdapter `json:"additionalNic"`
}
// VirtualMachineNetworkAdapter represents the configuration for a virtual machine's network adapter.
// If deploying a new VM, exactly one of VLANID / PrivateIPv4Address must be specified.
type VirtualMachineNetworkAdapter struct {
ID *string `json:"id,omitempty"`
VLANID *string `json:"vlanId,omitempty"`
VLANName *string `json:"vlanName,omitempty"`
PrivateIPv4Address *string `json:"privateIpv4,omitempty"`
PrivateIPv6Address *string `json:"privateIpv6,omitempty"`
State *string `json:"state,omitempty"`
}
Fix incorrect deserialisation of IPv6 address from server details.
package compute
import "fmt"
// EntitySummary is used to group an entity Id and name together for serialisation / deserialisation purposes.
type EntitySummary struct {
// The entity Id.
ID string `json:"id"`
// The entity name.
Name string `json:"name"`
}
// IPRange represents an IPvX range.
type IPRange interface {
// Convert the IPvX range to a display string.
ToDisplayString() string
}
// IPv4Range represents an IPv4 network (base address and prefix size)
type IPv4Range struct {
// The network base address.
BaseAddress string `json:"address"`
// The network prefix size.
PrefixSize int `json:"prefixSize"`
}
// ToDisplayString converts the IPv4 range to a display string.
func (network IPv4Range) ToDisplayString() string {
return fmt.Sprintf("%s/%d", network.BaseAddress, network.PrefixSize)
}
// IPv6Range represents an IPv6 network (base address and prefix size)
type IPv6Range struct {
// The network base address.
BaseAddress string `json:"address"`
// The network prefix size.
PrefixSize int `json:"prefixSize"`
}
// ToDisplayString converts the IPv6 range to a display string.
func (network IPv6Range) ToDisplayString() string {
return fmt.Sprintf("%s/%d", network.BaseAddress, network.PrefixSize)
}
// OperatingSystem represents a well-known operating system for virtual machines.
type OperatingSystem struct {
// The operating system Id.
ID string `json:"id"`
// The operating system type.
Family string `json:"family"`
// The operating system display-name.
DisplayName string `json:"displayName"`
}
// VirtualMachineCPU represents the CPU configuration for a virtual machine.
type VirtualMachineCPU struct {
Count int `json:"count,omitempty"`
Speed string `json:"speed,omitempty"`
CoresPerSocket int `json:"coresPerSocket,omitempty"`
}
// VirtualMachineDisk represents the disk configuration for a virtual machine.
type VirtualMachineDisk struct {
ID *string `json:"id,omitempty"`
SCSIUnitID int `json:"scsiId"`
SizeGB int `json:"sizeGb"`
Speed string `json:"speed"`
}
// VirtualMachineNetwork represents the networking configuration for a virtual machine.
type VirtualMachineNetwork struct {
NetworkDomainID string `json:"networkDomainId,omitempty"`
PrimaryAdapter VirtualMachineNetworkAdapter `json:"primaryNic"`
AdditionalNetworkAdapters []VirtualMachineNetworkAdapter `json:"additionalNic"`
}
// VirtualMachineNetworkAdapter represents the configuration for a virtual machine's network adapter.
// If deploying a new VM, exactly one of VLANID / PrivateIPv4Address must be specified.
type VirtualMachineNetworkAdapter struct {
ID *string `json:"id,omitempty"`
VLANID *string `json:"vlanId,omitempty"`
VLANName *string `json:"vlanName,omitempty"`
PrivateIPv4Address *string `json:"privateIpv4,omitempty"`
PrivateIPv6Address *string `json:"ipv6,omitempty"`
State *string `json:"state,omitempty"`
}
|
package html
import (
"code.google.com/p/go.net/html"
"errors"
"github.com/slyrz/newscat/util"
"io"
"net/url"
"unicode"
)
const (
// We remember a few special node types when descending into their
// children.
AncestorArticle = 1 << iota
AncestorAside
AncestorBlockquote
AncestorList
)
// Document is a parsed HTML document that extracts the document title and
// holds unexported pointers to the html, head and body nodes.
type Document struct {
Title *util.Text // the <title>...</title> text.
// Unexported fields.
html *html.Node // the <html>...</html> part
head *html.Node // the <head>...</head> part
body *html.Node // the <body>...</body> part
}
// Article stores all text chunks found in a HTML document.
type Article struct {
Document
Chunks []*Chunk // all chunks found in this document.
// State variables used when collectiong chunks.
ancestors int // bitmask which stores ancestor of the current node
// Number of non-space characters inside link tags / normal tags
// per html.ElementNode.
linkText map[*html.Node]int // length of text inside <a></a> tags
normText map[*html.Node]int // length of text outside <a></a> tags
}
// Website finds all links in a HTML document.
type Website struct {
Document
Links []*Link // all links found in this document.
}
// NewDocument parses the HTML data provided through an io.Reader interface.
func NewDocument(r io.Reader) (*Document, error) {
doc := new(Document)
if err := doc.init(r); err != nil {
return nil, err
}
return doc, nil
}
func (doc *Document) init(r io.Reader) error {
doc.Title = util.NewText()
root, err := html.Parse(r)
if err != nil {
return err
}
// Assign the fields html, head and body from the HTML page.
iterateNode(root, func(n *html.Node) int {
switch n.Data {
case "html":
doc.html = n
return IterNext
case "body":
doc.body = n
return IterSkip
case "head":
doc.head = n
return IterSkip
}
// Keep going as long as we're missing some nodes.
return IterNext
})
// Check if html, head and body nodes were found.
if doc.html == nil || doc.head == nil || doc.body == nil {
return errors.New("Document missing <html>, <head> or <body>.")
}
// Detect the document title.
iterateNode(doc.head, func(n *html.Node) int {
if n.Type == html.ElementNode && n.Data == "title" {
iterateText(n, doc.Title.WriteString)
return IterStop
}
return IterNext
})
return nil
}
// NewWebsite parses the HTML data provided through an io.Reader interface
// and returns, if successful, a Website object that can be used to access
// all links and extract links to news articles.
func NewWebsite(r io.Reader) (*Website, error) {
website := new(Website)
if err := website.init(r); err != nil {
return nil, err
}
return website, nil
}
func (website *Website) init(r io.Reader) error {
if err := website.Document.init(r); err != nil {
return err
}
website.Links = make([]*Link, 0, 256)
// Extract all links.
iterateNode(website.body, func(n *html.Node) int {
if n.Type == html.ElementNode && n.Data == "a" {
if link, err := NewLink(n); err == nil {
website.Links = append(website.Links, link)
}
return IterSkip
}
return IterNext
})
return nil
}
// TODO
func (website *Website) ResolveReference(ref string) error {
refURL, err := url.Parse(ref)
if err == nil {
for _, link := range website.Links {
link.Resolve(refURL)
}
}
return err
}
// NewArticle parses the HTML data provided through an io.Reader interface
// and returns, if successful, an Article object that can be used to access
// all relevant text chunks found in the document.
func NewArticle(r io.Reader) (*Article, error) {
article := new(Article)
if err := article.init(r); err != nil {
return nil, err
}
return article, nil
}
func (article *Article) init(r io.Reader) error {
if err := article.Document.init(r); err != nil {
return err
}
article.Chunks = make([]*Chunk, 0, 512)
article.linkText = make(map[*html.Node]int)
article.normText = make(map[*html.Node]int)
article.cleanBody(article.body, 0)
article.countText(article.body, false)
article.parseBody(article.body)
// Now we link the chunks.
min, max := 0, len(article.Chunks)-1
for i := range article.Chunks {
if i > min {
article.Chunks[i].Prev = article.Chunks[i-1]
}
if i < max {
article.Chunks[i].Next = article.Chunks[i+1]
}
}
return nil
}
// countText counts the text inside of links and the text outside of links
// per html.Node. Counting is done cumulative, so the umbers of a parent node
// include the numbers of it's child nodes.
func (article *Article) countText(n *html.Node, insideLink bool) (linkText int, normText int) {
linkText = 0
normText = 0
if n.Type == html.ElementNode && n.Data == "a" {
insideLink = true
}
for s := n.FirstChild; s != nil; s = s.NextSibling {
linkTextChild, normTextChild := article.countText(s, insideLink)
linkText += linkTextChild
normText += normTextChild
}
if n.Type == html.TextNode {
count := 0
for _, rune := range n.Data {
if unicode.IsLetter(rune) {
count += 1
}
}
if insideLink {
linkText += count
} else {
normText += count
}
}
article.linkText[n] = linkText
article.normText[n] = normText
return
}
// cleanBody removes unwanted HTML elements from the HTML body.
func (article *Article) cleanBody(n *html.Node, level int) {
// removeNode returns true if a node should be removed from HTML document.
removeNode := func(c *html.Node, level int) bool {
switch c.Data {
// Elements save to ignore.
case "address", "audio", "button", "canvas", "caption", "fieldset",
"figcaption", "figure", "footer", "form", "frame", "iframe",
"map", "menu", "nav", "noscript", "object", "option", "output",
"script", "select", "style", "svg", "textarea", "video":
return true
// High-level tables might be used to layout the document, so we better
// not ignore them.
case "table":
return level > 5
}
return false
}
var curr *html.Node = n.FirstChild
var next *html.Node = nil
for ; curr != nil; curr = next {
// We have to remember the next sibling here becase calling RemoveChild
// sets curr's NextSibling pointer to nil and we would quit the loop
// prematurely.
next = curr.NextSibling
if curr.Type == html.ElementNode {
if removeNode(curr, level) {
n.RemoveChild(curr)
} else {
article.cleanBody(curr, level+1)
}
}
}
}
var (
ignoreNames = util.NewRegexFromWords(
"breadcrumb",
"byline",
"caption",
"comment",
"community",
"credit",
"description",
"email",
"foot",
"gallery",
"hide",
"infotext",
"photo",
"related",
"shares",
"social",
"story[-_]?bar",
"story[-_]?feature",
)
ignoreStyle = util.NewRegex(`(?i)display:\s*none`)
)
// parseBody parses the <body>...</body> part of the HTML page. It creates
// Chunks for every html.TextNode found in the body.
func (article *Article) parseBody(n *html.Node) {
switch n.Type {
case html.ElementNode:
// We ignore the node if it has some nasty classes/ids/itemprobs or if
// it contains "display: none" in its style attribute.
for _, attr := range n.Attr {
switch attr.Key {
case "id", "class", "itemprop":
if ignoreNames.In(attr.Val) {
return
}
case "style":
if ignoreStyle.In(attr.Val) {
return
}
}
}
ancestorMask := 0
switch n.Data {
// We convert headings and links to text immediately. This is easier
// and feasible because headings and links don't contain many children.
// Descending into these children and handling every TextNode separately
// would make things unnecessary complicated and our results noisy.
case "h1", "h2", "h3", "h4", "h5", "h6", "a":
if chunk, err := NewChunk(article, n); err == nil {
article.Chunks = append(article.Chunks, chunk)
}
return
// Now mask the element type, but only if it isn't already set.
// If we mask a bit which was already set by one of our callers, we'd also
// clear it at the end of this function, though it actually should be cleared
// by the caller.
case "article":
ancestorMask = AncestorArticle &^ article.ancestors
case "aside":
ancestorMask = AncestorAside &^ article.ancestors
case "blockquote":
ancestorMask = AncestorBlockquote &^ article.ancestors
case "ul", "ol":
ancestorMask = AncestorList &^ article.ancestors
}
// Add our mask to the ancestor bitmask.
article.ancestors |= ancestorMask
for c := n.FirstChild; c != nil; c = c.NextSibling {
article.parseBody(c)
}
// Remove our mask from the ancestor bitmask.
article.ancestors &^= ancestorMask
case html.TextNode:
if chunk, err := NewChunk(article, n); err == nil {
article.Chunks = append(article.Chunks, chunk)
}
}
}
// TextStat contains the number of words and sentences found in text.
type TextStat struct {
Words int // total number of words
Sentences int // total number of sentences
Count int // number of texts used to calculate this stats
}
// GetClassStats groups the document chunks by their classes (defined by the
// class attribute of HTML nodes) and calculates TextStats for each class.
func (article *Article) GetClassStats() map[string]*TextStat {
result := make(map[string]*TextStat)
for _, chunk := range article.Chunks {
for _, class := range chunk.Classes {
if stat, ok := result[class]; ok {
stat.Words += chunk.Text.Words
stat.Sentences += chunk.Text.Sentences
stat.Count += 1
} else {
result[class] = &TextStat{chunk.Text.Words, chunk.Text.Sentences, 1}
}
}
}
return result
}
// GetClusterStats groups the document chunks by common ancestors and
// calculates TextStats for each group of chunks.
func (article *Article) GetClusterStats() map[*Chunk]*TextStat {
// Don't ascend further than this.
const maxAncestors = 3
// Count TextStats for Chunk ancestors.
ancestorStat := make(map[*html.Node]*TextStat)
for _, chunk := range article.Chunks {
node, count := chunk.Block, 0
for node != nil && count < maxAncestors {
if stat, ok := ancestorStat[node]; ok {
stat.Words += chunk.Text.Words
stat.Sentences += chunk.Text.Sentences
stat.Count += 1
} else {
ancestorStat[node] = &TextStat{chunk.Text.Words, chunk.Text.Sentences, 1}
}
node, count = node.Parent, count+1
}
}
// Generate result.
result := make(map[*Chunk]*TextStat)
for _, chunk := range article.Chunks {
node := chunk.Block
if node == nil {
continue
}
// Start with the parent's TextStat. Then ascend and check if the
// current chunk has an ancestor with better stats. Use the best stat
// as result.
stat := ancestorStat[node]
for {
if node = node.Parent; node == nil {
break
}
if statPrev, ok := ancestorStat[node]; ok {
if stat.Count < statPrev.Count {
stat = statPrev
}
} else {
break
}
}
result[chunk] = stat
}
return result
}
store initial array capacities in constants
package html
import (
"code.google.com/p/go.net/html"
"errors"
"github.com/slyrz/newscat/util"
"io"
"net/url"
"unicode"
)
const (
chunkCap = 512 // initial capacity of the Article.Chunks array
linkCap = 256 // initial capacity of the Website.Links array
)
const (
// We remember a few special node types when descending into their
// children.
AncestorArticle = 1 << iota
AncestorAside
AncestorBlockquote
AncestorList
)
// Document is a parsed HTML document that extracts the document title and
// holds unexported pointers to the html, head and body nodes.
type Document struct {
Title *util.Text // the <title>...</title> text.
// Unexported fields.
html *html.Node // the <html>...</html> part
head *html.Node // the <head>...</head> part
body *html.Node // the <body>...</body> part
}
// Article stores all text chunks found in a HTML document.
type Article struct {
Document
Chunks []*Chunk // all chunks found in this document.
// State variables used when collectiong chunks.
ancestors int // bitmask which stores ancestor of the current node
// Number of non-space characters inside link tags / normal tags
// per html.ElementNode.
linkText map[*html.Node]int // length of text inside <a></a> tags
normText map[*html.Node]int // length of text outside <a></a> tags
}
// Website finds all links in a HTML document.
type Website struct {
Document
Links []*Link // all links found in this document.
}
// NewDocument parses the HTML data provided through an io.Reader interface.
func NewDocument(r io.Reader) (*Document, error) {
doc := new(Document)
if err := doc.init(r); err != nil {
return nil, err
}
return doc, nil
}
func (doc *Document) init(r io.Reader) error {
doc.Title = util.NewText()
root, err := html.Parse(r)
if err != nil {
return err
}
// Assign the fields html, head and body from the HTML page.
iterateNode(root, func(n *html.Node) int {
switch n.Data {
case "html":
doc.html = n
return IterNext
case "body":
doc.body = n
return IterSkip
case "head":
doc.head = n
return IterSkip
}
// Keep going as long as we're missing some nodes.
return IterNext
})
// Check if html, head and body nodes were found.
if doc.html == nil || doc.head == nil || doc.body == nil {
return errors.New("Document missing <html>, <head> or <body>.")
}
// Detect the document title.
iterateNode(doc.head, func(n *html.Node) int {
if n.Type == html.ElementNode && n.Data == "title" {
iterateText(n, doc.Title.WriteString)
return IterStop
}
return IterNext
})
return nil
}
// NewWebsite parses the HTML data provided through an io.Reader interface
// and returns, if successful, a Website object that can be used to access
// all links and extract links to news articles.
func NewWebsite(r io.Reader) (*Website, error) {
website := new(Website)
if err := website.init(r); err != nil {
return nil, err
}
return website, nil
}
func (website *Website) init(r io.Reader) error {
if err := website.Document.init(r); err != nil {
return err
}
website.Links = make([]*Link, 0, linkCap)
// Extract all links.
iterateNode(website.body, func(n *html.Node) int {
if n.Type == html.ElementNode && n.Data == "a" {
if link, err := NewLink(n); err == nil {
website.Links = append(website.Links, link)
}
return IterSkip
}
return IterNext
})
return nil
}
// TODO
func (website *Website) ResolveReference(ref string) error {
refURL, err := url.Parse(ref)
if err == nil {
for _, link := range website.Links {
link.Resolve(refURL)
}
}
return err
}
// NewArticle parses the HTML data provided through an io.Reader interface
// and returns, if successful, an Article object that can be used to access
// all relevant text chunks found in the document.
func NewArticle(r io.Reader) (*Article, error) {
article := new(Article)
if err := article.init(r); err != nil {
return nil, err
}
return article, nil
}
func (article *Article) init(r io.Reader) error {
if err := article.Document.init(r); err != nil {
return err
}
article.Chunks = make([]*Chunk, 0, chunkCap)
article.linkText = make(map[*html.Node]int)
article.normText = make(map[*html.Node]int)
article.cleanBody(article.body, 0)
article.countText(article.body, false)
article.parseBody(article.body)
// Now we link the chunks.
min, max := 0, len(article.Chunks)-1
for i := range article.Chunks {
if i > min {
article.Chunks[i].Prev = article.Chunks[i-1]
}
if i < max {
article.Chunks[i].Next = article.Chunks[i+1]
}
}
return nil
}
// countText counts the text inside of links and the text outside of links
// per html.Node. Counting is done cumulative, so the umbers of a parent node
// include the numbers of it's child nodes.
func (article *Article) countText(n *html.Node, insideLink bool) (linkText int, normText int) {
linkText = 0
normText = 0
if n.Type == html.ElementNode && n.Data == "a" {
insideLink = true
}
for s := n.FirstChild; s != nil; s = s.NextSibling {
linkTextChild, normTextChild := article.countText(s, insideLink)
linkText += linkTextChild
normText += normTextChild
}
if n.Type == html.TextNode {
count := 0
for _, rune := range n.Data {
if unicode.IsLetter(rune) {
count += 1
}
}
if insideLink {
linkText += count
} else {
normText += count
}
}
article.linkText[n] = linkText
article.normText[n] = normText
return
}
// cleanBody removes unwanted HTML elements from the HTML body.
func (article *Article) cleanBody(n *html.Node, level int) {
// removeNode returns true if a node should be removed from HTML document.
removeNode := func(c *html.Node, level int) bool {
switch c.Data {
// Elements save to ignore.
case "address", "audio", "button", "canvas", "caption", "fieldset",
"figcaption", "figure", "footer", "form", "frame", "iframe",
"map", "menu", "nav", "noscript", "object", "option", "output",
"script", "select", "style", "svg", "textarea", "video":
return true
// High-level tables might be used to layout the document, so we better
// not ignore them.
case "table":
return level > 5
}
return false
}
var curr *html.Node = n.FirstChild
var next *html.Node = nil
for ; curr != nil; curr = next {
// We have to remember the next sibling here becase calling RemoveChild
// sets curr's NextSibling pointer to nil and we would quit the loop
// prematurely.
next = curr.NextSibling
if curr.Type == html.ElementNode {
if removeNode(curr, level) {
n.RemoveChild(curr)
} else {
article.cleanBody(curr, level+1)
}
}
}
}
var (
ignoreNames = util.NewRegexFromWords(
"breadcrumb",
"byline",
"caption",
"comment",
"community",
"credit",
"description",
"email",
"foot",
"gallery",
"hide",
"infotext",
"photo",
"related",
"shares",
"social",
"story[-_]?bar",
"story[-_]?feature",
)
ignoreStyle = util.NewRegex(`(?i)display:\s*none`)
)
// parseBody parses the <body>...</body> part of the HTML page. It creates
// Chunks for every html.TextNode found in the body.
func (article *Article) parseBody(n *html.Node) {
switch n.Type {
case html.ElementNode:
// We ignore the node if it has some nasty classes/ids/itemprobs or if
// it contains "display: none" in its style attribute.
for _, attr := range n.Attr {
switch attr.Key {
case "id", "class", "itemprop":
if ignoreNames.In(attr.Val) {
return
}
case "style":
if ignoreStyle.In(attr.Val) {
return
}
}
}
ancestorMask := 0
switch n.Data {
// We convert headings and links to text immediately. This is easier
// and feasible because headings and links don't contain many children.
// Descending into these children and handling every TextNode separately
// would make things unnecessary complicated and our results noisy.
case "h1", "h2", "h3", "h4", "h5", "h6", "a":
if chunk, err := NewChunk(article, n); err == nil {
article.Chunks = append(article.Chunks, chunk)
}
return
// Now mask the element type, but only if it isn't already set.
// If we mask a bit which was already set by one of our callers, we'd also
// clear it at the end of this function, though it actually should be cleared
// by the caller.
case "article":
ancestorMask = AncestorArticle &^ article.ancestors
case "aside":
ancestorMask = AncestorAside &^ article.ancestors
case "blockquote":
ancestorMask = AncestorBlockquote &^ article.ancestors
case "ul", "ol":
ancestorMask = AncestorList &^ article.ancestors
}
// Add our mask to the ancestor bitmask.
article.ancestors |= ancestorMask
for c := n.FirstChild; c != nil; c = c.NextSibling {
article.parseBody(c)
}
// Remove our mask from the ancestor bitmask.
article.ancestors &^= ancestorMask
case html.TextNode:
if chunk, err := NewChunk(article, n); err == nil {
article.Chunks = append(article.Chunks, chunk)
}
}
}
// TextStat contains the number of words and sentences found in text.
type TextStat struct {
Words int // total number of words
Sentences int // total number of sentences
Count int // number of texts used to calculate this stats
}
// GetClassStats groups the document chunks by their classes (defined by the
// class attribute of HTML nodes) and calculates TextStats for each class.
func (article *Article) GetClassStats() map[string]*TextStat {
result := make(map[string]*TextStat)
for _, chunk := range article.Chunks {
for _, class := range chunk.Classes {
if stat, ok := result[class]; ok {
stat.Words += chunk.Text.Words
stat.Sentences += chunk.Text.Sentences
stat.Count += 1
} else {
result[class] = &TextStat{chunk.Text.Words, chunk.Text.Sentences, 1}
}
}
}
return result
}
// GetClusterStats groups the document chunks by common ancestors and
// calculates TextStats for each group of chunks.
func (article *Article) GetClusterStats() map[*Chunk]*TextStat {
// Don't ascend further than this.
const maxAncestors = 3
// Count TextStats for Chunk ancestors.
ancestorStat := make(map[*html.Node]*TextStat)
for _, chunk := range article.Chunks {
node, count := chunk.Block, 0
for node != nil && count < maxAncestors {
if stat, ok := ancestorStat[node]; ok {
stat.Words += chunk.Text.Words
stat.Sentences += chunk.Text.Sentences
stat.Count += 1
} else {
ancestorStat[node] = &TextStat{chunk.Text.Words, chunk.Text.Sentences, 1}
}
node, count = node.Parent, count+1
}
}
// Generate result.
result := make(map[*Chunk]*TextStat)
for _, chunk := range article.Chunks {
node := chunk.Block
if node == nil {
continue
}
// Start with the parent's TextStat. Then ascend and check if the
// current chunk has an ancestor with better stats. Use the best stat
// as result.
stat := ancestorStat[node]
for {
if node = node.Parent; node == nil {
break
}
if statPrev, ok := ancestorStat[node]; ok {
if stat.Count < statPrev.Count {
stat = statPrev
}
} else {
break
}
}
result[chunk] = stat
}
return result
}
|
// Copyright (c) Ilia Kravets, 2015. All rights reserved. PROVIDED "AS IS"
// WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED. See LICENSE file for details.
package bats
import (
"encoding/binary"
"errors"
"code.google.com/p/gopacket"
"my/errs"
"my/itto/verify/packet"
)
/************************************************************************/
var LayerTypePitch = gopacket.RegisterLayerType(12001, gopacket.LayerTypeMetadata{"Pitch", gopacket.DecodeFunc(decodePitch)})
func decodePitch(data []byte, p gopacket.PacketBuilder) error {
if len(data) < 2 {
return errors.New("message to short")
}
pitchMessageType := PitchMessageType(data[1])
return pitchMessageType.Decode(data, p)
}
/************************************************************************/
type PitchMessageType uint8
func (a PitchMessageType) Decode(data []byte, p gopacket.PacketBuilder) error {
layer := PitchMessageTypeMetadata[a].CreateLayer()
if err := layer.DecodeFromBytes(data, p); err != nil {
return err
}
p.AddLayer(layer)
return p.NextDecoder(layer.NextLayerType())
}
func (a PitchMessageType) String() string {
return PitchMessageTypeMetadata[a].Name
}
func (a PitchMessageType) LayerType() gopacket.LayerType {
return PitchMessageTypeMetadata[a].LayerType
}
/************************************************************************/
const (
PitchMessageTypeUnknown PitchMessageType = 0 // not in spec, catch-all
PitchMessageTypeTime PitchMessageType = 0x20
PitchMessageTypeAddOrderLong PitchMessageType = 0x21
PitchMessageTypeAddOrderShort PitchMessageType = 0x22
PitchMessageTypeOrderExecuted PitchMessageType = 0x23
PitchMessageTypeOrderExecutedAtPriceSize PitchMessageType = 0x24
PitchMessageTypeReduceSizeLong PitchMessageType = 0x25
PitchMessageTypeReduceSizeShort PitchMessageType = 0x26
PitchMessageTypeModifyOrderLong PitchMessageType = 0x27
PitchMessageTypeModifyOrderShort PitchMessageType = 0x28
PitchMessageTypeDeleteOrder PitchMessageType = 0x29
PitchMessageTypeTradeLong PitchMessageType = 0x2a
PitchMessageTypeTradeShort PitchMessageType = 0x2b
PitchMessageTypeTradeBreak PitchMessageType = 0x2c
PitchMessageTypeEndOfSession PitchMessageType = 0x2d
PitchMessageTypeSymbolMapping PitchMessageType = 0x2e
PitchMessageTypeAddOrderExpanded PitchMessageType = 0x2f
PitchMessageTypeTradeExpanded PitchMessageType = 0x30
PitchMessageTypeTradingStatus PitchMessageType = 0x31
PitchMessageTypeAuctionUpdate PitchMessageType = 0x95
PitchMessageTypeAuctionSummary PitchMessageType = 0x96
PitchMessageTypeUnitClear PitchMessageType = 0x97
PitchMessageTypeRetailPriceImprovement PitchMessageType = 0x98
)
var PitchMessageTypeNames = [256]string{
PitchMessageTypeUnknown: "PitchUnknown",
PitchMessageTypeTime: "PitchTime",
PitchMessageTypeAddOrderLong: "PitchAddOrderLong",
PitchMessageTypeAddOrderShort: "PitchAddOrderShort",
PitchMessageTypeOrderExecuted: "PitchOrderExecuted",
PitchMessageTypeOrderExecutedAtPriceSize: "PitchOrderExecutedAtPriceSize",
PitchMessageTypeReduceSizeLong: "PitchReduceSizeLong",
PitchMessageTypeReduceSizeShort: "PitchReduceSizeShort",
PitchMessageTypeModifyOrderLong: "PitchModifyOrderLong",
PitchMessageTypeModifyOrderShort: "PitchModifyOrderShort",
PitchMessageTypeDeleteOrder: "PitchDeleteOrder",
PitchMessageTypeTradeLong: "PitchTradeLong",
PitchMessageTypeTradeShort: "PitchTradeShort",
PitchMessageTypeTradeBreak: "PitchTradeBreak",
PitchMessageTypeEndOfSession: "PitchEndOfSession",
PitchMessageTypeSymbolMapping: "PitchSymbolMapping",
PitchMessageTypeAddOrderExpanded: "PitchAddOrderExpanded",
PitchMessageTypeTradeExpanded: "PitchTradeExpanded",
PitchMessageTypeTradingStatus: "PitchTradingStatus",
PitchMessageTypeAuctionUpdate: "PitchAuctionUpdate",
PitchMessageTypeAuctionSummary: "PitchAuctionSummary",
PitchMessageTypeUnitClear: "PitchUnitClear",
PitchMessageTypeRetailPriceImprovement: "PitchRetailPriceImprovement",
}
var PitchMessageCreators = [256]func() PitchMessage{
PitchMessageTypeUnknown: func() PitchMessage { return &PitchMessageUnknown{} },
PitchMessageTypeTime: func() PitchMessage { return &PitchMessageTime{} },
PitchMessageTypeAddOrderLong: func() PitchMessage { return &PitchMessageAddOrder{} },
PitchMessageTypeAddOrderShort: func() PitchMessage { return &PitchMessageAddOrder{} },
PitchMessageTypeOrderExecuted: func() PitchMessage { return &PitchMessageOrderExecuted{} },
PitchMessageTypeOrderExecutedAtPriceSize: func() PitchMessage { return &PitchMessageOrderExecutedAtPriceSize{} },
PitchMessageTypeReduceSizeLong: func() PitchMessage { return &PitchMessageReduceSize{} },
PitchMessageTypeReduceSizeShort: func() PitchMessage { return &PitchMessageReduceSize{} },
PitchMessageTypeModifyOrderLong: func() PitchMessage { return &PitchMessageModifyOrder{} },
PitchMessageTypeModifyOrderShort: func() PitchMessage { return &PitchMessageModifyOrder{} },
PitchMessageTypeDeleteOrder: func() PitchMessage { return &PitchMessageDeleteOrder{} },
PitchMessageTypeTradeLong: func() PitchMessage { return &PitchMessageTrade{} },
PitchMessageTypeTradeShort: func() PitchMessage { return &PitchMessageTrade{} },
PitchMessageTypeTradeBreak: func() PitchMessage { return &PitchMessageTradeBreak{} },
PitchMessageTypeEndOfSession: func() PitchMessage { return &PitchMessageEndOfSession{} },
PitchMessageTypeSymbolMapping: func() PitchMessage { return &PitchMessageSymbolMapping{} },
PitchMessageTypeAddOrderExpanded: func() PitchMessage { return &PitchMessageAddOrder{} },
PitchMessageTypeTradeExpanded: func() PitchMessage { return &PitchMessageTrade{} },
//PitchMessageTypeTradingStatus: func() PitchMessage { return &PitchMessageTradingStatus{} },
//PitchMessageTypeAuctionUpdate: func() PitchMessage { return &PitchMessageAuctionUpdate{} },
//PitchMessageTypeAuctionSummary: func() PitchMessage { return &PitchMessageAuctionSummary{} },
PitchMessageTypeUnitClear: func() PitchMessage { return &PitchMessageUnitClear{} },
//PitchMessageTypeRetailPriceImprovement: func() PitchMessage { return &PitchMessageRetailPriceImprovement{} },
}
type EnumMessageTypeMetadata struct {
Name string
LayerType gopacket.LayerType
CreateLayer func() PitchMessage
}
var PitchMessageTypeMetadata [256]EnumMessageTypeMetadata
var LayerClassPitch gopacket.LayerClass
const PITCH_LAYERS_BASE_NUM = 12100
func init() {
layerTypes := make([]gopacket.LayerType, 0, 256)
for i := 0; i < 256; i++ {
if PitchMessageTypeNames[i] == "" {
continue
}
pitchMessageType := PitchMessageType(i)
layerTypeMetadata := gopacket.LayerTypeMetadata{
Name: PitchMessageTypeNames[i],
Decoder: pitchMessageType,
}
layerType := gopacket.RegisterLayerType(PITCH_LAYERS_BASE_NUM+i, layerTypeMetadata)
layerTypes = append(layerTypes, layerType)
creator := PitchMessageCreators[i]
createLayer := func() PitchMessage {
m := creator()
m.Base().Type = pitchMessageType
return m
}
PitchMessageTypeMetadata[i] = EnumMessageTypeMetadata{
Name: PitchMessageTypeNames[i],
LayerType: layerType,
CreateLayer: createLayer,
}
}
for i := 0; i < 256; i++ {
if PitchMessageTypeMetadata[i].Name == "" {
// unknown message type
PitchMessageTypeMetadata[i] = PitchMessageTypeMetadata[PitchMessageTypeUnknown]
}
}
LayerClassPitch = gopacket.NewLayerClass(layerTypes)
}
/************************************************************************/
type PitchMessage interface {
packet.ExchangeMessage
gopacket.DecodingLayer
//embed gopacket.Layer by "inlining"
//workaround for https://github.com/golang/go/issues/6977
LayerType() gopacket.LayerType
LayerContents() []byte
Base() *PitchMessageCommon
}
type PitchMessageCommon struct {
Length uint8
Type PitchMessageType
TimeOffset uint32
}
func (m *PitchMessageCommon) CanDecode() gopacket.LayerClass {
return m.LayerType()
}
func (m *PitchMessageCommon) NextLayerType() gopacket.LayerType {
return gopacket.LayerTypeZero
}
func (m *PitchMessageCommon) LayerContents() []byte {
return nil
}
func (m *PitchMessageCommon) LayerPayload() []byte {
return nil
}
func (m *PitchMessageCommon) LayerType() gopacket.LayerType {
return m.Type.LayerType()
}
func (m *PitchMessageCommon) Base() *PitchMessageCommon {
return m
}
func (m *PitchMessageCommon) Nanoseconds() int {
return int(m.TimeOffset)
}
func decodePitchMessage(data []byte) PitchMessageCommon {
if len(data) < 2 {
panic("message to short")
}
m := PitchMessageCommon{
Length: data[0],
Type: PitchMessageType(data[1]),
}
if m.Type != PitchMessageTypeTime && m.Type != PitchMessageTypeSymbolMapping && len(data) >= 6 {
m.TimeOffset = binary.LittleEndian.Uint32(data[2:6])
}
return m
}
func (m *PitchMessageCommon) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) (err error) {
errs.PassE(&err)
buf, err := b.AppendBytes(2)
errs.CheckE(err)
buf[0] = m.Length
buf[1] = byte(m.Type)
if m.Type != PitchMessageTypeTime {
buf, err := b.AppendBytes(4)
errs.CheckE(err)
binary.LittleEndian.PutUint32(buf, m.TimeOffset)
}
return
}
/************************************************************************/
type PitchMessageUnknown struct {
PitchMessageCommon
}
func (m *PitchMessageUnknown) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageUnknown{
PitchMessageCommon: decodePitchMessage(data),
}
return nil
}
/************************************************************************/
type PitchMessageTime struct {
PitchMessageCommon
Time uint32
}
var _ packet.SecondsMessage = &PitchMessageTime{}
func (m *PitchMessageTime) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageTime{
PitchMessageCommon: decodePitchMessage(data),
Time: binary.LittleEndian.Uint32(data[1:5]),
}
return nil
}
func (m *PitchMessageTime) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) (err error) {
errs.PassE(&err)
errs.CheckE(m.PitchMessageCommon.SerializeTo(b, opts))
buf, err := b.AppendBytes(4)
errs.CheckE(err)
binary.LittleEndian.PutUint32(buf, m.Time)
return
}
func (m *PitchMessageTime) Seconds() int {
return int(m.Time)
}
/************************************************************************/
type PitchMessageUnitClear struct {
PitchMessageCommon
}
func (m *PitchMessageUnitClear) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageUnitClear{
PitchMessageCommon: decodePitchMessage(data),
}
return nil
}
func (m *PitchMessageUnitClear) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) (err error) {
errs.PassE(&err)
errs.CheckE(m.PitchMessageCommon.SerializeTo(b, opts))
return
}
/************************************************************************/
type PitchMessageAddOrder struct {
PitchMessageCommon
OrderId packet.OrderId
Side packet.MarketSide
Size uint32
Symbol packet.OptionId
Price packet.Price
Flags byte
ParticipantId [4]byte
}
func (m *PitchMessageAddOrder) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageAddOrder{
PitchMessageCommon: decodePitchMessage(data),
OrderId: packet.OrderIdFromUint64(binary.LittleEndian.Uint64(data[6:14])),
Side: packet.MarketSideFromByte(data[14]),
}
switch m.Type {
case PitchMessageTypeAddOrderShort:
m.Size = uint32(binary.LittleEndian.Uint16(data[15:17]))
m.Symbol = parseSymbol(data[17:23])
m.Price = packet.PriceFrom2Dec(int(binary.LittleEndian.Uint16(data[23:25])))
m.Flags = data[25]
case PitchMessageTypeAddOrderLong:
m.Size = binary.LittleEndian.Uint32(data[15:19])
m.Symbol = parseSymbol(data[19:25])
m.Price = packet.PriceFrom4Dec(int(binary.LittleEndian.Uint64(data[25:33])))
m.Flags = data[33]
case PitchMessageTypeAddOrderExpanded:
m.Size = binary.LittleEndian.Uint32(data[15:19])
m.Symbol = parseSymbol(data[19:27])
m.Price = packet.PriceFrom4Dec(int(binary.LittleEndian.Uint64(data[27:35])))
m.Flags = data[35]
copy(m.ParticipantId[:], data[36:40])
}
return nil
}
/************************************************************************/
type PitchMessageOrderExecuted struct {
PitchMessageCommon
OrderId packet.OrderId
Size uint32
ExecutionId uint64
}
func (m *PitchMessageOrderExecuted) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageOrderExecuted{
PitchMessageCommon: decodePitchMessage(data),
OrderId: packet.OrderIdFromUint64(binary.LittleEndian.Uint64(data[6:14])),
Size: binary.LittleEndian.Uint32(data[14:18]),
ExecutionId: binary.LittleEndian.Uint64(data[18:26]),
}
return nil
}
/************************************************************************/
type PitchMessageOrderExecutedAtPriceSize struct {
PitchMessageOrderExecuted
RemainingSize uint32
Price packet.Price
}
func (m *PitchMessageOrderExecutedAtPriceSize) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageOrderExecutedAtPriceSize{
PitchMessageOrderExecuted: PitchMessageOrderExecuted{
PitchMessageCommon: decodePitchMessage(data),
OrderId: packet.OrderIdFromUint64(binary.LittleEndian.Uint64(data[6:14])),
Size: binary.LittleEndian.Uint32(data[14:18]),
ExecutionId: binary.LittleEndian.Uint64(data[22:30]),
},
RemainingSize: binary.LittleEndian.Uint32(data[18:22]),
Price: packet.PriceFrom4Dec(int(binary.LittleEndian.Uint64(data[30:38]))),
}
return nil
}
/************************************************************************/
type PitchMessageReduceSize struct {
PitchMessageCommon
OrderId packet.OrderId
Size uint32
}
func (m *PitchMessageReduceSize) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageReduceSize{
PitchMessageCommon: decodePitchMessage(data),
OrderId: packet.OrderIdFromUint64(binary.LittleEndian.Uint64(data[6:14])),
}
switch m.Type {
case PitchMessageTypeReduceSizeLong:
m.Size = binary.LittleEndian.Uint32(data[14:18])
case PitchMessageTypeReduceSizeShort:
m.Size = uint32(binary.LittleEndian.Uint16(data[14:16]))
}
return nil
}
/************************************************************************/
type PitchMessageModifyOrder struct {
PitchMessageCommon
OrderId packet.OrderId
Size uint32
Price packet.Price
Flags byte
}
func (m *PitchMessageModifyOrder) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageModifyOrder{
PitchMessageCommon: decodePitchMessage(data),
OrderId: packet.OrderIdFromUint64(binary.LittleEndian.Uint64(data[6:14])),
}
switch m.Type {
case PitchMessageTypeModifyOrderLong:
m.Size = binary.LittleEndian.Uint32(data[14:18])
m.Price = packet.PriceFrom4Dec(int(binary.LittleEndian.Uint64(data[18:26])))
m.Flags = data[26]
case PitchMessageTypeModifyOrderShort:
m.Size = uint32(binary.LittleEndian.Uint16(data[14:16]))
m.Price = packet.PriceFrom2Dec(int(binary.LittleEndian.Uint16(data[16:18])))
m.Flags = data[18]
}
return nil
}
/************************************************************************/
type PitchMessageDeleteOrder struct {
PitchMessageCommon
OrderId packet.OrderId
}
func (m *PitchMessageDeleteOrder) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageDeleteOrder{
PitchMessageCommon: decodePitchMessage(data),
OrderId: packet.OrderIdFromUint64(binary.LittleEndian.Uint64(data[6:14])),
}
return nil
}
/************************************************************************/
type PitchMessageTrade struct {
PitchMessageCommon
OrderId packet.OrderId
Side packet.MarketSide
Size uint32
Symbol packet.OptionId
Price packet.Price
ExecutionId uint64
}
func (m *PitchMessageTrade) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageTrade{
PitchMessageCommon: decodePitchMessage(data),
OrderId: packet.OrderIdFromUint64(binary.LittleEndian.Uint64(data[6:14])),
Side: packet.MarketSideFromByte(data[14]),
}
switch m.Type {
case PitchMessageTypeTradeShort:
m.Size = uint32(binary.LittleEndian.Uint16(data[15:16]))
m.Symbol = parseSymbol(data[17:23])
m.Price = packet.PriceFrom2Dec(int(binary.LittleEndian.Uint16(data[23:25])))
m.ExecutionId = binary.LittleEndian.Uint64(data[25:33])
case PitchMessageTypeTradeLong:
m.Size = binary.LittleEndian.Uint32(data[15:19])
m.Symbol = parseSymbol(data[19:25])
m.Price = packet.PriceFrom4Dec(int(binary.LittleEndian.Uint64(data[25:33])))
m.ExecutionId = binary.LittleEndian.Uint64(data[33:41])
case PitchMessageTypeTradeExpanded:
m.Size = binary.LittleEndian.Uint32(data[15:19])
m.Symbol = parseSymbol(data[19:27])
m.Price = packet.PriceFrom4Dec(int(binary.LittleEndian.Uint64(data[27:35])))
m.ExecutionId = binary.LittleEndian.Uint64(data[35:43])
}
return nil
}
var _ packet.TradeMessage = &PitchMessageTrade{}
func (m *PitchMessageTrade) TradeInfo() (packet.OptionId, packet.Price, int) {
return m.Symbol, m.Price, int(m.Size)
}
/************************************************************************/
type PitchMessageTradeBreak struct {
PitchMessageCommon
ExecutionId uint64
}
func (m *PitchMessageTradeBreak) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageTradeBreak{
PitchMessageCommon: decodePitchMessage(data),
ExecutionId: binary.LittleEndian.Uint64(data[6:14]),
}
return nil
}
/************************************************************************/
type PitchMessageEndOfSession struct {
PitchMessageCommon
}
func (m *PitchMessageEndOfSession) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageEndOfSession{
PitchMessageCommon: decodePitchMessage(data),
}
return nil
}
/************************************************************************/
type PitchMessageSymbolMapping struct {
PitchMessageCommon
Symbol packet.OptionId
OsiSymbol string
SymbolCondition byte
}
func (m *PitchMessageSymbolMapping) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageSymbolMapping{
PitchMessageCommon: decodePitchMessage(data),
Symbol: parseSymbol(data[2:8]),
OsiSymbol: string(data[8:29]),
SymbolCondition: data[29],
}
return nil
}
/************************************************************************/
func parseSymbol(data []byte) packet.OptionId {
errs.Check(len(data) >= 6)
var b [8]byte
copy(b[:], data)
oid := packet.OptionIdFromUint64(binary.LittleEndian.Uint64(b[:]))
return oid
}
/************************************************************************/
var PitchLayerFactory = &pitchLayerFactory{}
type pitchLayerFactory struct{}
var _ packet.DecodingLayerFactory = &pitchLayerFactory{}
func (f *pitchLayerFactory) Create(layerType gopacket.LayerType) gopacket.DecodingLayer {
d := int(layerType - gopacket.LayerType(PITCH_LAYERS_BASE_NUM))
if d < 0 || d > 255 {
panic("FIXME")
//return gopacket.LayerTypeZero // FIXME
}
m := PitchMessageTypeMetadata[d]
errs.Check(m.LayerType == layerType)
return m.CreateLayer()
}
func (f *pitchLayerFactory) SupportedLayers() gopacket.LayerClass {
return LayerClassPitch
}
packet/bats: add Contents to pitch message
// Copyright (c) Ilia Kravets, 2015. All rights reserved. PROVIDED "AS IS"
// WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED. See LICENSE file for details.
package bats
import (
"encoding/binary"
"errors"
"code.google.com/p/gopacket"
"my/errs"
"my/itto/verify/packet"
)
/************************************************************************/
var LayerTypePitch = gopacket.RegisterLayerType(12001, gopacket.LayerTypeMetadata{"Pitch", gopacket.DecodeFunc(decodePitch)})
func decodePitch(data []byte, p gopacket.PacketBuilder) error {
if len(data) < 2 {
return errors.New("message to short")
}
pitchMessageType := PitchMessageType(data[1])
return pitchMessageType.Decode(data, p)
}
/************************************************************************/
type PitchMessageType uint8
func (a PitchMessageType) Decode(data []byte, p gopacket.PacketBuilder) error {
layer := PitchMessageTypeMetadata[a].CreateLayer()
if err := layer.DecodeFromBytes(data, p); err != nil {
return err
}
p.AddLayer(layer)
return p.NextDecoder(layer.NextLayerType())
}
func (a PitchMessageType) String() string {
return PitchMessageTypeMetadata[a].Name
}
func (a PitchMessageType) LayerType() gopacket.LayerType {
return PitchMessageTypeMetadata[a].LayerType
}
/************************************************************************/
const (
PitchMessageTypeUnknown PitchMessageType = 0 // not in spec, catch-all
PitchMessageTypeTime PitchMessageType = 0x20
PitchMessageTypeAddOrderLong PitchMessageType = 0x21
PitchMessageTypeAddOrderShort PitchMessageType = 0x22
PitchMessageTypeOrderExecuted PitchMessageType = 0x23
PitchMessageTypeOrderExecutedAtPriceSize PitchMessageType = 0x24
PitchMessageTypeReduceSizeLong PitchMessageType = 0x25
PitchMessageTypeReduceSizeShort PitchMessageType = 0x26
PitchMessageTypeModifyOrderLong PitchMessageType = 0x27
PitchMessageTypeModifyOrderShort PitchMessageType = 0x28
PitchMessageTypeDeleteOrder PitchMessageType = 0x29
PitchMessageTypeTradeLong PitchMessageType = 0x2a
PitchMessageTypeTradeShort PitchMessageType = 0x2b
PitchMessageTypeTradeBreak PitchMessageType = 0x2c
PitchMessageTypeEndOfSession PitchMessageType = 0x2d
PitchMessageTypeSymbolMapping PitchMessageType = 0x2e
PitchMessageTypeAddOrderExpanded PitchMessageType = 0x2f
PitchMessageTypeTradeExpanded PitchMessageType = 0x30
PitchMessageTypeTradingStatus PitchMessageType = 0x31
PitchMessageTypeAuctionUpdate PitchMessageType = 0x95
PitchMessageTypeAuctionSummary PitchMessageType = 0x96
PitchMessageTypeUnitClear PitchMessageType = 0x97
PitchMessageTypeRetailPriceImprovement PitchMessageType = 0x98
)
var PitchMessageTypeNames = [256]string{
PitchMessageTypeUnknown: "PitchUnknown",
PitchMessageTypeTime: "PitchTime",
PitchMessageTypeAddOrderLong: "PitchAddOrderLong",
PitchMessageTypeAddOrderShort: "PitchAddOrderShort",
PitchMessageTypeOrderExecuted: "PitchOrderExecuted",
PitchMessageTypeOrderExecutedAtPriceSize: "PitchOrderExecutedAtPriceSize",
PitchMessageTypeReduceSizeLong: "PitchReduceSizeLong",
PitchMessageTypeReduceSizeShort: "PitchReduceSizeShort",
PitchMessageTypeModifyOrderLong: "PitchModifyOrderLong",
PitchMessageTypeModifyOrderShort: "PitchModifyOrderShort",
PitchMessageTypeDeleteOrder: "PitchDeleteOrder",
PitchMessageTypeTradeLong: "PitchTradeLong",
PitchMessageTypeTradeShort: "PitchTradeShort",
PitchMessageTypeTradeBreak: "PitchTradeBreak",
PitchMessageTypeEndOfSession: "PitchEndOfSession",
PitchMessageTypeSymbolMapping: "PitchSymbolMapping",
PitchMessageTypeAddOrderExpanded: "PitchAddOrderExpanded",
PitchMessageTypeTradeExpanded: "PitchTradeExpanded",
PitchMessageTypeTradingStatus: "PitchTradingStatus",
PitchMessageTypeAuctionUpdate: "PitchAuctionUpdate",
PitchMessageTypeAuctionSummary: "PitchAuctionSummary",
PitchMessageTypeUnitClear: "PitchUnitClear",
PitchMessageTypeRetailPriceImprovement: "PitchRetailPriceImprovement",
}
var PitchMessageCreators = [256]func() PitchMessage{
PitchMessageTypeUnknown: func() PitchMessage { return &PitchMessageUnknown{} },
PitchMessageTypeTime: func() PitchMessage { return &PitchMessageTime{} },
PitchMessageTypeAddOrderLong: func() PitchMessage { return &PitchMessageAddOrder{} },
PitchMessageTypeAddOrderShort: func() PitchMessage { return &PitchMessageAddOrder{} },
PitchMessageTypeOrderExecuted: func() PitchMessage { return &PitchMessageOrderExecuted{} },
PitchMessageTypeOrderExecutedAtPriceSize: func() PitchMessage { return &PitchMessageOrderExecutedAtPriceSize{} },
PitchMessageTypeReduceSizeLong: func() PitchMessage { return &PitchMessageReduceSize{} },
PitchMessageTypeReduceSizeShort: func() PitchMessage { return &PitchMessageReduceSize{} },
PitchMessageTypeModifyOrderLong: func() PitchMessage { return &PitchMessageModifyOrder{} },
PitchMessageTypeModifyOrderShort: func() PitchMessage { return &PitchMessageModifyOrder{} },
PitchMessageTypeDeleteOrder: func() PitchMessage { return &PitchMessageDeleteOrder{} },
PitchMessageTypeTradeLong: func() PitchMessage { return &PitchMessageTrade{} },
PitchMessageTypeTradeShort: func() PitchMessage { return &PitchMessageTrade{} },
PitchMessageTypeTradeBreak: func() PitchMessage { return &PitchMessageTradeBreak{} },
PitchMessageTypeEndOfSession: func() PitchMessage { return &PitchMessageEndOfSession{} },
PitchMessageTypeSymbolMapping: func() PitchMessage { return &PitchMessageSymbolMapping{} },
PitchMessageTypeAddOrderExpanded: func() PitchMessage { return &PitchMessageAddOrder{} },
PitchMessageTypeTradeExpanded: func() PitchMessage { return &PitchMessageTrade{} },
//PitchMessageTypeTradingStatus: func() PitchMessage { return &PitchMessageTradingStatus{} },
//PitchMessageTypeAuctionUpdate: func() PitchMessage { return &PitchMessageAuctionUpdate{} },
//PitchMessageTypeAuctionSummary: func() PitchMessage { return &PitchMessageAuctionSummary{} },
PitchMessageTypeUnitClear: func() PitchMessage { return &PitchMessageUnitClear{} },
//PitchMessageTypeRetailPriceImprovement: func() PitchMessage { return &PitchMessageRetailPriceImprovement{} },
}
type EnumMessageTypeMetadata struct {
Name string
LayerType gopacket.LayerType
CreateLayer func() PitchMessage
}
var PitchMessageTypeMetadata [256]EnumMessageTypeMetadata
var LayerClassPitch gopacket.LayerClass
const PITCH_LAYERS_BASE_NUM = 12100
func init() {
layerTypes := make([]gopacket.LayerType, 0, 256)
for i := 0; i < 256; i++ {
if PitchMessageTypeNames[i] == "" {
continue
}
pitchMessageType := PitchMessageType(i)
layerTypeMetadata := gopacket.LayerTypeMetadata{
Name: PitchMessageTypeNames[i],
Decoder: pitchMessageType,
}
layerType := gopacket.RegisterLayerType(PITCH_LAYERS_BASE_NUM+i, layerTypeMetadata)
layerTypes = append(layerTypes, layerType)
creator := PitchMessageCreators[i]
createLayer := func() PitchMessage {
m := creator()
m.Base().Type = pitchMessageType
return m
}
PitchMessageTypeMetadata[i] = EnumMessageTypeMetadata{
Name: PitchMessageTypeNames[i],
LayerType: layerType,
CreateLayer: createLayer,
}
}
for i := 0; i < 256; i++ {
if PitchMessageTypeMetadata[i].Name == "" {
// unknown message type
PitchMessageTypeMetadata[i] = PitchMessageTypeMetadata[PitchMessageTypeUnknown]
}
}
LayerClassPitch = gopacket.NewLayerClass(layerTypes)
}
/************************************************************************/
type PitchMessage interface {
packet.ExchangeMessage
gopacket.DecodingLayer
//embed gopacket.Layer by "inlining"
//workaround for https://github.com/golang/go/issues/6977
LayerType() gopacket.LayerType
LayerContents() []byte
Base() *PitchMessageCommon
}
type PitchMessageCommon struct {
Contents []byte
Length uint8
Type PitchMessageType
TimeOffset uint32
}
func (m *PitchMessageCommon) CanDecode() gopacket.LayerClass {
return m.LayerType()
}
func (m *PitchMessageCommon) NextLayerType() gopacket.LayerType {
return gopacket.LayerTypeZero
}
func (m *PitchMessageCommon) LayerContents() []byte {
return m.Contents
}
func (m *PitchMessageCommon) LayerPayload() []byte {
return nil
}
func (m *PitchMessageCommon) LayerType() gopacket.LayerType {
return m.Type.LayerType()
}
func (m *PitchMessageCommon) Base() *PitchMessageCommon {
return m
}
func (m *PitchMessageCommon) Nanoseconds() int {
return int(m.TimeOffset)
}
func decodePitchMessage(data []byte) PitchMessageCommon {
if len(data) < 2 {
panic("message to short")
}
m := PitchMessageCommon{
Contents: data,
Length: data[0],
Type: PitchMessageType(data[1]),
}
if m.Type != PitchMessageTypeTime && m.Type != PitchMessageTypeSymbolMapping && len(data) >= 6 {
m.TimeOffset = binary.LittleEndian.Uint32(data[2:6])
}
return m
}
func (m *PitchMessageCommon) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) (err error) {
errs.PassE(&err)
buf, err := b.AppendBytes(2)
errs.CheckE(err)
buf[0] = m.Length
buf[1] = byte(m.Type)
if m.Type != PitchMessageTypeTime {
buf, err := b.AppendBytes(4)
errs.CheckE(err)
binary.LittleEndian.PutUint32(buf, m.TimeOffset)
}
return
}
/************************************************************************/
type PitchMessageUnknown struct {
PitchMessageCommon
}
func (m *PitchMessageUnknown) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageUnknown{
PitchMessageCommon: decodePitchMessage(data),
}
return nil
}
/************************************************************************/
type PitchMessageTime struct {
PitchMessageCommon
Time uint32
}
var _ packet.SecondsMessage = &PitchMessageTime{}
func (m *PitchMessageTime) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageTime{
PitchMessageCommon: decodePitchMessage(data),
Time: binary.LittleEndian.Uint32(data[1:5]),
}
return nil
}
func (m *PitchMessageTime) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) (err error) {
errs.PassE(&err)
errs.CheckE(m.PitchMessageCommon.SerializeTo(b, opts))
buf, err := b.AppendBytes(4)
errs.CheckE(err)
binary.LittleEndian.PutUint32(buf, m.Time)
return
}
func (m *PitchMessageTime) Seconds() int {
return int(m.Time)
}
/************************************************************************/
type PitchMessageUnitClear struct {
PitchMessageCommon
}
func (m *PitchMessageUnitClear) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageUnitClear{
PitchMessageCommon: decodePitchMessage(data),
}
return nil
}
func (m *PitchMessageUnitClear) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) (err error) {
errs.PassE(&err)
errs.CheckE(m.PitchMessageCommon.SerializeTo(b, opts))
return
}
/************************************************************************/
type PitchMessageAddOrder struct {
PitchMessageCommon
OrderId packet.OrderId
Side packet.MarketSide
Size uint32
Symbol packet.OptionId
Price packet.Price
Flags byte
ParticipantId [4]byte
}
func (m *PitchMessageAddOrder) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageAddOrder{
PitchMessageCommon: decodePitchMessage(data),
OrderId: packet.OrderIdFromUint64(binary.LittleEndian.Uint64(data[6:14])),
Side: packet.MarketSideFromByte(data[14]),
}
switch m.Type {
case PitchMessageTypeAddOrderShort:
m.Size = uint32(binary.LittleEndian.Uint16(data[15:17]))
m.Symbol = parseSymbol(data[17:23])
m.Price = packet.PriceFrom2Dec(int(binary.LittleEndian.Uint16(data[23:25])))
m.Flags = data[25]
case PitchMessageTypeAddOrderLong:
m.Size = binary.LittleEndian.Uint32(data[15:19])
m.Symbol = parseSymbol(data[19:25])
m.Price = packet.PriceFrom4Dec(int(binary.LittleEndian.Uint64(data[25:33])))
m.Flags = data[33]
case PitchMessageTypeAddOrderExpanded:
m.Size = binary.LittleEndian.Uint32(data[15:19])
m.Symbol = parseSymbol(data[19:27])
m.Price = packet.PriceFrom4Dec(int(binary.LittleEndian.Uint64(data[27:35])))
m.Flags = data[35]
copy(m.ParticipantId[:], data[36:40])
}
return nil
}
/************************************************************************/
type PitchMessageOrderExecuted struct {
PitchMessageCommon
OrderId packet.OrderId
Size uint32
ExecutionId uint64
}
func (m *PitchMessageOrderExecuted) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageOrderExecuted{
PitchMessageCommon: decodePitchMessage(data),
OrderId: packet.OrderIdFromUint64(binary.LittleEndian.Uint64(data[6:14])),
Size: binary.LittleEndian.Uint32(data[14:18]),
ExecutionId: binary.LittleEndian.Uint64(data[18:26]),
}
return nil
}
/************************************************************************/
type PitchMessageOrderExecutedAtPriceSize struct {
PitchMessageOrderExecuted
RemainingSize uint32
Price packet.Price
}
func (m *PitchMessageOrderExecutedAtPriceSize) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageOrderExecutedAtPriceSize{
PitchMessageOrderExecuted: PitchMessageOrderExecuted{
PitchMessageCommon: decodePitchMessage(data),
OrderId: packet.OrderIdFromUint64(binary.LittleEndian.Uint64(data[6:14])),
Size: binary.LittleEndian.Uint32(data[14:18]),
ExecutionId: binary.LittleEndian.Uint64(data[22:30]),
},
RemainingSize: binary.LittleEndian.Uint32(data[18:22]),
Price: packet.PriceFrom4Dec(int(binary.LittleEndian.Uint64(data[30:38]))),
}
return nil
}
/************************************************************************/
type PitchMessageReduceSize struct {
PitchMessageCommon
OrderId packet.OrderId
Size uint32
}
func (m *PitchMessageReduceSize) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageReduceSize{
PitchMessageCommon: decodePitchMessage(data),
OrderId: packet.OrderIdFromUint64(binary.LittleEndian.Uint64(data[6:14])),
}
switch m.Type {
case PitchMessageTypeReduceSizeLong:
m.Size = binary.LittleEndian.Uint32(data[14:18])
case PitchMessageTypeReduceSizeShort:
m.Size = uint32(binary.LittleEndian.Uint16(data[14:16]))
}
return nil
}
/************************************************************************/
type PitchMessageModifyOrder struct {
PitchMessageCommon
OrderId packet.OrderId
Size uint32
Price packet.Price
Flags byte
}
func (m *PitchMessageModifyOrder) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageModifyOrder{
PitchMessageCommon: decodePitchMessage(data),
OrderId: packet.OrderIdFromUint64(binary.LittleEndian.Uint64(data[6:14])),
}
switch m.Type {
case PitchMessageTypeModifyOrderLong:
m.Size = binary.LittleEndian.Uint32(data[14:18])
m.Price = packet.PriceFrom4Dec(int(binary.LittleEndian.Uint64(data[18:26])))
m.Flags = data[26]
case PitchMessageTypeModifyOrderShort:
m.Size = uint32(binary.LittleEndian.Uint16(data[14:16]))
m.Price = packet.PriceFrom2Dec(int(binary.LittleEndian.Uint16(data[16:18])))
m.Flags = data[18]
}
return nil
}
/************************************************************************/
type PitchMessageDeleteOrder struct {
PitchMessageCommon
OrderId packet.OrderId
}
func (m *PitchMessageDeleteOrder) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageDeleteOrder{
PitchMessageCommon: decodePitchMessage(data),
OrderId: packet.OrderIdFromUint64(binary.LittleEndian.Uint64(data[6:14])),
}
return nil
}
/************************************************************************/
type PitchMessageTrade struct {
PitchMessageCommon
OrderId packet.OrderId
Side packet.MarketSide
Size uint32
Symbol packet.OptionId
Price packet.Price
ExecutionId uint64
}
func (m *PitchMessageTrade) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageTrade{
PitchMessageCommon: decodePitchMessage(data),
OrderId: packet.OrderIdFromUint64(binary.LittleEndian.Uint64(data[6:14])),
Side: packet.MarketSideFromByte(data[14]),
}
switch m.Type {
case PitchMessageTypeTradeShort:
m.Size = uint32(binary.LittleEndian.Uint16(data[15:16]))
m.Symbol = parseSymbol(data[17:23])
m.Price = packet.PriceFrom2Dec(int(binary.LittleEndian.Uint16(data[23:25])))
m.ExecutionId = binary.LittleEndian.Uint64(data[25:33])
case PitchMessageTypeTradeLong:
m.Size = binary.LittleEndian.Uint32(data[15:19])
m.Symbol = parseSymbol(data[19:25])
m.Price = packet.PriceFrom4Dec(int(binary.LittleEndian.Uint64(data[25:33])))
m.ExecutionId = binary.LittleEndian.Uint64(data[33:41])
case PitchMessageTypeTradeExpanded:
m.Size = binary.LittleEndian.Uint32(data[15:19])
m.Symbol = parseSymbol(data[19:27])
m.Price = packet.PriceFrom4Dec(int(binary.LittleEndian.Uint64(data[27:35])))
m.ExecutionId = binary.LittleEndian.Uint64(data[35:43])
}
return nil
}
var _ packet.TradeMessage = &PitchMessageTrade{}
func (m *PitchMessageTrade) TradeInfo() (packet.OptionId, packet.Price, int) {
return m.Symbol, m.Price, int(m.Size)
}
/************************************************************************/
type PitchMessageTradeBreak struct {
PitchMessageCommon
ExecutionId uint64
}
func (m *PitchMessageTradeBreak) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageTradeBreak{
PitchMessageCommon: decodePitchMessage(data),
ExecutionId: binary.LittleEndian.Uint64(data[6:14]),
}
return nil
}
/************************************************************************/
type PitchMessageEndOfSession struct {
PitchMessageCommon
}
func (m *PitchMessageEndOfSession) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageEndOfSession{
PitchMessageCommon: decodePitchMessage(data),
}
return nil
}
/************************************************************************/
type PitchMessageSymbolMapping struct {
PitchMessageCommon
Symbol packet.OptionId
OsiSymbol string
SymbolCondition byte
}
func (m *PitchMessageSymbolMapping) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
*m = PitchMessageSymbolMapping{
PitchMessageCommon: decodePitchMessage(data),
Symbol: parseSymbol(data[2:8]),
OsiSymbol: string(data[8:29]),
SymbolCondition: data[29],
}
return nil
}
/************************************************************************/
func parseSymbol(data []byte) packet.OptionId {
errs.Check(len(data) >= 6)
var b [8]byte
copy(b[:], data)
oid := packet.OptionIdFromUint64(binary.LittleEndian.Uint64(b[:]))
return oid
}
/************************************************************************/
var PitchLayerFactory = &pitchLayerFactory{}
type pitchLayerFactory struct{}
var _ packet.DecodingLayerFactory = &pitchLayerFactory{}
func (f *pitchLayerFactory) Create(layerType gopacket.LayerType) gopacket.DecodingLayer {
d := int(layerType - gopacket.LayerType(PITCH_LAYERS_BASE_NUM))
if d < 0 || d > 255 {
panic("FIXME")
//return gopacket.LayerTypeZero // FIXME
}
m := PitchMessageTypeMetadata[d]
errs.Check(m.LayerType == layerType)
return m.CreateLayer()
}
func (f *pitchLayerFactory) SupportedLayers() gopacket.LayerClass {
return LayerClassPitch
}
|
package html
//#include "helper.h"
import "C"
import (
"unsafe"
"os"
"gokogiri/xml"
. "gokogiri/util"
"bytes"
)
var fragmentWrapperStart = []byte("<div>")
var fragmentWrapperEnd = []byte("</div>")
var fragmentWrapper = []byte("<html><body>")
var bodySigBytes = []byte("<body")
var ErrFailParseFragment = os.NewError("failed to parse html fragment")
var ErrEmptyFragment = os.NewError("empty html fragment")
const initChildrenNumber = 4
func parsefragment(document xml.Document, node *xml.XmlNode, content, url []byte, options int) (fragment *xml.DocumentFragment, err os.Error) {
//set up pointers before calling the C function
var contentPtr, urlPtr unsafe.Pointer
if len(url) > 0 {
urlPtr = unsafe.Pointer(&url[0])
}
var root xml.Node
if node == nil {
containBody := (bytes.Index(content, bodySigBytes) >= 0)
content = append(fragmentWrapper, content...)
contentPtr = unsafe.Pointer(&content[0])
contentLen := len(content)
inEncoding := document.InputEncoding()
var encodingPtr unsafe.Pointer
if len(inEncoding) > 0 {
encodingPtr = unsafe.Pointer(&inEncoding[0])
}
htmlPtr := C.htmlParseFragmentAsDoc(document.DocPtr(), contentPtr, C.int(contentLen), urlPtr, encodingPtr, C.int(options), nil, 0)
//Note we've parsed the fragment within the given document
//the root is not the root of the document; rather it's the root of the subtree from the fragment
html := xml.NewNode(unsafe.Pointer(htmlPtr), document)
if html == nil {
err = ErrFailParseFragment
return
}
root = html
if !containBody {
root = html.FirstChild()
html.Remove() //remove html otherwise it's leaked
}
} else {
//wrap the content
content = append(fragmentWrapperStart, content...)
content = append(content, fragmentWrapperEnd...)
contentPtr = unsafe.Pointer(&content[0])
contentLen := len(content)
rootElementPtr := C.htmlParseFragment(node.NodePtr(), contentPtr, C.int(contentLen), urlPtr, C.int(options), nil, 0)
root = xml.NewNode(unsafe.Pointer(rootElementPtr), document)
}
fragment = &xml.DocumentFragment{}
fragment.Node = root
fragment.InEncoding = document.InputEncoding()
fragment.OutEncoding = document.OutputEncoding()
document.BookkeepFragment(fragment)
return
}
func ParseFragment(content, inEncoding, url []byte, options int, outEncoding []byte) (fragment *xml.DocumentFragment, err os.Error) {
inEncoding = AppendCStringTerminator(inEncoding)
outEncoding = AppendCStringTerminator(outEncoding)
document := CreateEmptyDocument(inEncoding, outEncoding)
fragment, err = parsefragment(document, nil, content, url, options)
return
}
retry if in-document parsing fails
package html
//#include "helper.h"
import "C"
import (
"unsafe"
"os"
"gokogiri/xml"
. "gokogiri/util"
"bytes"
)
var fragmentWrapperStart = []byte("<div>")
var fragmentWrapperEnd = []byte("</div>")
var fragmentWrapper = []byte("<html><body>")
var bodySigBytes = []byte("<body")
var ErrFailParseFragment = os.NewError("failed to parse html fragment")
var ErrEmptyFragment = os.NewError("empty html fragment")
const initChildrenNumber = 4
func parsefragment(document xml.Document, node *xml.XmlNode, content, url []byte, options int) (fragment *xml.DocumentFragment, err os.Error) {
//set up pointers before calling the C function
var contentPtr, urlPtr unsafe.Pointer
if len(url) > 0 {
urlPtr = unsafe.Pointer(&url[0])
}
var root xml.Node
if node == nil {
containBody := (bytes.Index(content, bodySigBytes) >= 0)
content = append(fragmentWrapper, content...)
contentPtr = unsafe.Pointer(&content[0])
contentLen := len(content)
inEncoding := document.InputEncoding()
var encodingPtr unsafe.Pointer
if len(inEncoding) > 0 {
encodingPtr = unsafe.Pointer(&inEncoding[0])
}
htmlPtr := C.htmlParseFragmentAsDoc(document.DocPtr(), contentPtr, C.int(contentLen), urlPtr, encodingPtr, C.int(options), nil, 0)
//Note we've parsed the fragment within the given document
//the root is not the root of the document; rather it's the root of the subtree from the fragment
html := xml.NewNode(unsafe.Pointer(htmlPtr), document)
if html == nil {
err = ErrFailParseFragment
return
}
root = html
if !containBody {
root = html.FirstChild()
html.Remove() //remove html otherwise it's leaked
}
} else {
//wrap the content
newContent := append(fragmentWrapperStart, content...)
newContent = append(newContent, fragmentWrapperEnd...)
contentPtr = unsafe.Pointer(&newContent[0])
contentLen := len(newContent)
rootElementPtr := C.htmlParseFragment(node.NodePtr(), contentPtr, C.int(contentLen), urlPtr, C.int(options), nil, 0)
if rootElementPtr == nil {
//try to parse it as a doc
fragment, err = parsefragment(document, nil, content, url, options)
return
}
if rootElementPtr == nil {
err = ErrFailParseFragment
return
}
root = xml.NewNode(unsafe.Pointer(rootElementPtr), document)
}
fragment = &xml.DocumentFragment{}
fragment.Node = root
fragment.InEncoding = document.InputEncoding()
fragment.OutEncoding = document.OutputEncoding()
document.BookkeepFragment(fragment)
return
}
func ParseFragment(content, inEncoding, url []byte, options int, outEncoding []byte) (fragment *xml.DocumentFragment, err os.Error) {
inEncoding = AppendCStringTerminator(inEncoding)
outEncoding = AppendCStringTerminator(outEncoding)
document := CreateEmptyDocument(inEncoding, outEncoding)
fragment, err = parsefragment(document, nil, content, url, options)
return
}
|
package applications
import (
"context"
"errors"
"fmt"
"log"
"net/http"
"strings"
"time"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"github.com/manicminer/hamilton/msgraph"
"github.com/manicminer/hamilton/odata"
"github.com/hashicorp/terraform-provider-azuread/internal/clients"
"github.com/hashicorp/terraform-provider-azuread/internal/helpers"
"github.com/hashicorp/terraform-provider-azuread/internal/services/applications/parse"
"github.com/hashicorp/terraform-provider-azuread/internal/tf"
"github.com/hashicorp/terraform-provider-azuread/internal/validate"
)
func applicationCertificateResource() *schema.Resource {
return &schema.Resource{
CreateContext: applicationCertificateResourceCreate,
ReadContext: applicationCertificateResourceRead,
DeleteContext: applicationCertificateResourceDelete,
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(5 * time.Minute),
Read: schema.DefaultTimeout(5 * time.Minute),
Update: schema.DefaultTimeout(5 * time.Minute),
Delete: schema.DefaultTimeout(5 * time.Minute),
},
Importer: tf.ValidateResourceIDPriorToImport(func(id string) error {
_, err := parse.CertificateID(id)
return err
}),
Schema: map[string]*schema.Schema{
"application_object_id": {
Description: "The object ID of the application for which this certificate should be created",
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateDiagFunc: validate.UUID,
},
"encoding": {
Description: "Specifies the encoding used for the supplied certificate data",
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Default: "pem",
ValidateFunc: validation.StringInSlice([]string{
"base64",
"hex",
"pem",
}, false),
},
"key_id": {
Description: "A UUID used to uniquely identify this certificate. If omitted, a random UUID will be automatically generated",
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ValidateDiagFunc: validate.UUID,
},
"start_date": {
Description: "The start date from which the certificate is valid, formatted as an RFC3339 date string (e.g. `2018-01-01T01:02:03Z`). If this isn't specified, the current date and time are use",
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: validation.IsRFC3339Time,
},
"end_date": {
Description: "The end date until which the certificate is valid, formatted as an RFC3339 date string (e.g. `2018-01-01T01:02:03Z`). If omitted, the API will decide a suitable expiry date, which is typically around 2 years from the start date",
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ConflictsWith: []string{"end_date_relative"},
ValidateFunc: validation.IsRFC3339Time,
},
"end_date_relative": {
Description: "A relative duration for which the certificate is valid until, for example `240h` (10 days) or `2400h30m`",
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ConflictsWith: []string{"end_date"},
ValidateDiagFunc: validate.NoEmptyStrings,
},
"type": {
Description: "The type of key/certificate",
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validation.StringInSlice([]string{
"AsymmetricX509Cert",
"Symmetric",
}, false),
},
"value": {
Description: "The certificate data, which can be PEM encoded, base64 encoded DER or hexadecimal encoded DER. See also the `encoding` argumen",
Type: schema.TypeString,
Required: true,
ForceNew: true,
Sensitive: true,
},
},
}
}
func applicationCertificateResourceCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*clients.Client).Applications.ApplicationsClient
objectId := d.Get("application_object_id").(string)
credential, err := helpers.KeyCredentialForResource(d)
if err != nil {
attr := ""
if kerr, ok := err.(helpers.CredentialError); ok {
attr = kerr.Attr()
}
return tf.ErrorDiagPathF(err, attr, "Generating certificate credentials for application with object ID %q", objectId)
}
if credential.KeyId == nil {
return tf.ErrorDiagF(errors.New("keyId for certificate credential is nil"), "Creating certificate credential")
}
id := parse.NewCredentialID(objectId, "certificate", *credential.KeyId)
tf.LockByName(applicationResourceName, id.ObjectId)
defer tf.UnlockByName(applicationResourceName, id.ObjectId)
app, status, err := client.Get(ctx, id.ObjectId, odata.Query{})
if err != nil {
if status == http.StatusNotFound {
return tf.ErrorDiagPathF(nil, "application_object_id", "Application with object ID %q was not found", id.ObjectId)
}
return tf.ErrorDiagPathF(err, "application_object_id", "Retrieving application with object ID %q", id.ObjectId)
}
newCredentials := make([]msgraph.KeyCredential, 0)
if app.KeyCredentials != nil {
for _, cred := range *app.KeyCredentials {
if cred.KeyId != nil && strings.EqualFold(*cred.KeyId, *credential.KeyId) {
return tf.ImportAsExistsDiag("azuread_application_certificate", id.String())
}
newCredentials = append(newCredentials, cred)
}
}
newCredentials = append(newCredentials, *credential)
properties := msgraph.Application{
DirectoryObject: msgraph.DirectoryObject{
ID: &id.ObjectId,
},
KeyCredentials: &newCredentials,
}
if _, err := client.Update(ctx, properties); err != nil {
return tf.ErrorDiagF(err, "Adding certificate for application with object ID %q", id.ObjectId)
}
// Wait for the credential to appear in the application manifest, this can take several minutes
timeout, _ := ctx.Deadline()
polledForCredential, err := (&resource.StateChangeConf{
Pending: []string{"Waiting"},
Target: []string{"Done"},
Timeout: time.Until(timeout),
MinTimeout: 1 * time.Second,
ContinuousTargetOccurence: 5,
Refresh: func() (interface{}, string, error) {
app, _, err := client.Get(ctx, id.ObjectId, odata.Query{})
if err != nil {
return nil, "Error", err
}
if app.KeyCredentials != nil {
for _, cred := range *app.KeyCredentials {
if cred.KeyId != nil && strings.EqualFold(*cred.KeyId, id.KeyId) {
return &cred, "Done", nil
}
}
}
return nil, "Waiting", nil
},
}).WaitForStateContext(ctx)
if err != nil {
return tf.ErrorDiagF(err, "Waiting for certificate credential for application with object ID %q", id.ObjectId)
} else if polledForCredential == nil {
return tf.ErrorDiagF(errors.New("certificate credential not found in application manifest"), "Waiting for certificate credential for application with object ID %q", id.ObjectId)
}
d.SetId(id.String())
return applicationCertificateResourceRead(ctx, d, meta)
}
func applicationCertificateResourceRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*clients.Client).Applications.ApplicationsClient
id, err := parse.CertificateID(d.Id())
if err != nil {
return tf.ErrorDiagPathF(err, "id", "Parsing certificate credential with ID %q", d.Id())
}
app, status, err := client.Get(ctx, id.ObjectId, odata.Query{})
if err != nil {
if status == http.StatusNotFound {
log.Printf("[DEBUG] Application with ID %q for %s credential %q was not found - removing from state!", id.ObjectId, id.KeyType, id.KeyId)
d.SetId("")
return nil
}
return tf.ErrorDiagPathF(err, "application_object_id", "Retrieving Application with object ID %q", id.ObjectId)
}
var credential *msgraph.KeyCredential
if app.KeyCredentials != nil {
for _, cred := range *app.KeyCredentials {
if cred.KeyId != nil && strings.EqualFold(*cred.KeyId, id.KeyId) {
credential = &cred
break
}
}
}
if credential == nil {
log.Printf("[DEBUG] Certificate credential %q (ID %q) was not found - removing from state!", id.KeyId, id.ObjectId)
d.SetId("")
return nil
}
tf.Set(d, "application_object_id", id.ObjectId)
tf.Set(d, "key_id", id.KeyId)
tf.Set(d, "type", credential.Type)
startDate := ""
if v := credential.StartDateTime; v != nil {
startDate = v.Format(time.RFC3339)
}
tf.Set(d, "start_date", startDate)
endDate := ""
if v := credential.EndDateTime; v != nil {
endDate = v.Format(time.RFC3339)
}
tf.Set(d, "end_date", endDate)
return nil
}
func applicationCertificateResourceDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*clients.Client).Applications.ApplicationsClient
id, err := parse.CertificateID(d.Id())
if err != nil {
return tf.ErrorDiagPathF(err, "id", "Parsing certificate credential with ID %q", d.Id())
}
tf.LockByName(applicationResourceName, id.ObjectId)
defer tf.UnlockByName(applicationResourceName, id.ObjectId)
app, status, err := client.Get(ctx, id.ObjectId, odata.Query{})
if err != nil {
if status == http.StatusNotFound {
return tf.ErrorDiagPathF(fmt.Errorf("Application was not found"), "application_object_id", "Retrieving Application with ID %q", id.ObjectId)
}
return tf.ErrorDiagPathF(err, "application_object_id", "Retrieving application with object ID %q", id.ObjectId)
}
newCredentials := make([]msgraph.KeyCredential, 0)
if app.KeyCredentials != nil {
for _, cred := range *app.KeyCredentials {
if cred.KeyId != nil && !strings.EqualFold(*cred.KeyId, id.KeyId) {
newCredentials = append(newCredentials, cred)
}
}
}
properties := msgraph.Application{
DirectoryObject: msgraph.DirectoryObject{
ID: &id.ObjectId,
},
KeyCredentials: &newCredentials,
}
if _, err := client.Update(ctx, properties); err != nil {
return tf.ErrorDiagF(err, "Removing certificate credential %q from application with object ID %q", id.KeyId, id.ObjectId)
}
return nil
}
azuread_application_certificate: check for consistency on deletion
package applications
import (
"context"
"errors"
"fmt"
"log"
"net/http"
"strings"
"time"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"github.com/manicminer/hamilton/msgraph"
"github.com/manicminer/hamilton/odata"
"github.com/hashicorp/terraform-provider-azuread/internal/clients"
"github.com/hashicorp/terraform-provider-azuread/internal/helpers"
"github.com/hashicorp/terraform-provider-azuread/internal/services/applications/parse"
"github.com/hashicorp/terraform-provider-azuread/internal/tf"
"github.com/hashicorp/terraform-provider-azuread/internal/utils"
"github.com/hashicorp/terraform-provider-azuread/internal/validate"
)
func applicationCertificateResource() *schema.Resource {
return &schema.Resource{
CreateContext: applicationCertificateResourceCreate,
ReadContext: applicationCertificateResourceRead,
DeleteContext: applicationCertificateResourceDelete,
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(5 * time.Minute),
Read: schema.DefaultTimeout(5 * time.Minute),
Update: schema.DefaultTimeout(5 * time.Minute),
Delete: schema.DefaultTimeout(5 * time.Minute),
},
Importer: tf.ValidateResourceIDPriorToImport(func(id string) error {
_, err := parse.CertificateID(id)
return err
}),
Schema: map[string]*schema.Schema{
"application_object_id": {
Description: "The object ID of the application for which this certificate should be created",
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateDiagFunc: validate.UUID,
},
"encoding": {
Description: "Specifies the encoding used for the supplied certificate data",
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Default: "pem",
ValidateFunc: validation.StringInSlice([]string{
"base64",
"hex",
"pem",
}, false),
},
"key_id": {
Description: "A UUID used to uniquely identify this certificate. If omitted, a random UUID will be automatically generated",
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ValidateDiagFunc: validate.UUID,
},
"start_date": {
Description: "The start date from which the certificate is valid, formatted as an RFC3339 date string (e.g. `2018-01-01T01:02:03Z`). If this isn't specified, the current date and time are use",
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: validation.IsRFC3339Time,
},
"end_date": {
Description: "The end date until which the certificate is valid, formatted as an RFC3339 date string (e.g. `2018-01-01T01:02:03Z`). If omitted, the API will decide a suitable expiry date, which is typically around 2 years from the start date",
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ConflictsWith: []string{"end_date_relative"},
ValidateFunc: validation.IsRFC3339Time,
},
"end_date_relative": {
Description: "A relative duration for which the certificate is valid until, for example `240h` (10 days) or `2400h30m`",
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ConflictsWith: []string{"end_date"},
ValidateDiagFunc: validate.NoEmptyStrings,
},
"type": {
Description: "The type of key/certificate",
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validation.StringInSlice([]string{
"AsymmetricX509Cert",
"Symmetric",
}, false),
},
"value": {
Description: "The certificate data, which can be PEM encoded, base64 encoded DER or hexadecimal encoded DER. See also the `encoding` argumen",
Type: schema.TypeString,
Required: true,
ForceNew: true,
Sensitive: true,
},
},
}
}
func applicationCertificateResourceCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*clients.Client).Applications.ApplicationsClient
objectId := d.Get("application_object_id").(string)
credential, err := helpers.KeyCredentialForResource(d)
if err != nil {
attr := ""
if kerr, ok := err.(helpers.CredentialError); ok {
attr = kerr.Attr()
}
return tf.ErrorDiagPathF(err, attr, "Generating certificate credentials for application with object ID %q", objectId)
}
if credential.KeyId == nil {
return tf.ErrorDiagF(errors.New("keyId for certificate credential is nil"), "Creating certificate credential")
}
id := parse.NewCredentialID(objectId, "certificate", *credential.KeyId)
tf.LockByName(applicationResourceName, id.ObjectId)
defer tf.UnlockByName(applicationResourceName, id.ObjectId)
app, status, err := client.Get(ctx, id.ObjectId, odata.Query{})
if err != nil {
if status == http.StatusNotFound {
return tf.ErrorDiagPathF(nil, "application_object_id", "Application with object ID %q was not found", id.ObjectId)
}
return tf.ErrorDiagPathF(err, "application_object_id", "Retrieving application with object ID %q", id.ObjectId)
}
newCredentials := make([]msgraph.KeyCredential, 0)
if app.KeyCredentials != nil {
for _, cred := range *app.KeyCredentials {
if cred.KeyId != nil && strings.EqualFold(*cred.KeyId, *credential.KeyId) {
return tf.ImportAsExistsDiag("azuread_application_certificate", id.String())
}
newCredentials = append(newCredentials, cred)
}
}
newCredentials = append(newCredentials, *credential)
properties := msgraph.Application{
DirectoryObject: msgraph.DirectoryObject{
ID: &id.ObjectId,
},
KeyCredentials: &newCredentials,
}
if _, err := client.Update(ctx, properties); err != nil {
return tf.ErrorDiagF(err, "Adding certificate for application with object ID %q", id.ObjectId)
}
// Wait for the credential to appear in the application manifest, this can take several minutes
timeout, _ := ctx.Deadline()
polledForCredential, err := (&resource.StateChangeConf{
Pending: []string{"Waiting"},
Target: []string{"Done"},
Timeout: time.Until(timeout),
MinTimeout: 1 * time.Second,
ContinuousTargetOccurence: 5,
Refresh: func() (interface{}, string, error) {
app, _, err := client.Get(ctx, id.ObjectId, odata.Query{})
if err != nil {
return nil, "Error", err
}
if app.KeyCredentials != nil {
for _, cred := range *app.KeyCredentials {
if cred.KeyId != nil && strings.EqualFold(*cred.KeyId, id.KeyId) {
return &cred, "Done", nil
}
}
}
return nil, "Waiting", nil
},
}).WaitForStateContext(ctx)
if err != nil {
return tf.ErrorDiagF(err, "Waiting for certificate credential for application with object ID %q", id.ObjectId)
} else if polledForCredential == nil {
return tf.ErrorDiagF(errors.New("certificate credential not found in application manifest"), "Waiting for certificate credential for application with object ID %q", id.ObjectId)
}
d.SetId(id.String())
return applicationCertificateResourceRead(ctx, d, meta)
}
func applicationCertificateResourceRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*clients.Client).Applications.ApplicationsClient
id, err := parse.CertificateID(d.Id())
if err != nil {
return tf.ErrorDiagPathF(err, "id", "Parsing certificate credential with ID %q", d.Id())
}
app, status, err := client.Get(ctx, id.ObjectId, odata.Query{})
if err != nil {
if status == http.StatusNotFound {
log.Printf("[DEBUG] Application with ID %q for %s credential %q was not found - removing from state!", id.ObjectId, id.KeyType, id.KeyId)
d.SetId("")
return nil
}
return tf.ErrorDiagPathF(err, "application_object_id", "Retrieving Application with object ID %q", id.ObjectId)
}
credential := helpers.GetKeyCredential(app.KeyCredentials, id.KeyId)
if credential == nil {
log.Printf("[DEBUG] Certificate credential %q (ID %q) was not found - removing from state!", id.KeyId, id.ObjectId)
d.SetId("")
return nil
}
tf.Set(d, "application_object_id", id.ObjectId)
tf.Set(d, "key_id", id.KeyId)
tf.Set(d, "type", credential.Type)
startDate := ""
if v := credential.StartDateTime; v != nil {
startDate = v.Format(time.RFC3339)
}
tf.Set(d, "start_date", startDate)
endDate := ""
if v := credential.EndDateTime; v != nil {
endDate = v.Format(time.RFC3339)
}
tf.Set(d, "end_date", endDate)
return nil
}
func applicationCertificateResourceDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*clients.Client).Applications.ApplicationsClient
id, err := parse.CertificateID(d.Id())
if err != nil {
return tf.ErrorDiagPathF(err, "id", "Parsing certificate credential with ID %q", d.Id())
}
tf.LockByName(applicationResourceName, id.ObjectId)
defer tf.UnlockByName(applicationResourceName, id.ObjectId)
app, status, err := client.Get(ctx, id.ObjectId, odata.Query{})
if err != nil {
if status == http.StatusNotFound {
return tf.ErrorDiagPathF(fmt.Errorf("Application was not found"), "application_object_id", "Retrieving Application with ID %q", id.ObjectId)
}
return tf.ErrorDiagPathF(err, "application_object_id", "Retrieving application with object ID %q", id.ObjectId)
}
newCredentials := make([]msgraph.KeyCredential, 0)
if app.KeyCredentials != nil {
for _, cred := range *app.KeyCredentials {
if cred.KeyId != nil && !strings.EqualFold(*cred.KeyId, id.KeyId) {
newCredentials = append(newCredentials, cred)
}
}
}
properties := msgraph.Application{
DirectoryObject: msgraph.DirectoryObject{
ID: &id.ObjectId,
},
KeyCredentials: &newCredentials,
}
if _, err := client.Update(ctx, properties); err != nil {
return tf.ErrorDiagF(err, "Removing certificate credential %q from application with object ID %q", id.KeyId, id.ObjectId)
}
// Wait for application certificate to be deleted
if err := helpers.WaitForDeletion(ctx, func(ctx context.Context) (*bool, error) {
client.BaseClient.DisableRetries = true
app, _, err := client.Get(ctx, id.ObjectId, odata.Query{})
if err != nil {
return nil, err
}
credential := helpers.GetKeyCredential(app.KeyCredentials, id.KeyId)
if credential == nil {
return utils.Bool(false), nil
}
return utils.Bool(true), nil
}); err != nil {
return tf.ErrorDiagF(err, "Waiting for deletion of certificate credential %q from application with object ID %q", id.KeyId, id.ObjectId)
}
return nil
}
|
package http
import (
"net"
"net/http"
)
type consumer struct {
conn net.Conn
close chan bool
}
func newConsumer(resp http.ResponseWriter) (*consumer, error) {
conn, _, err := resp.(http.Hijacker).Hijack()
if err != nil {
return nil, err
}
conn.Write([]byte("HTTP/1.1 200 OK\nContent-Type: text/event-stream\n\n"))
return &consumer{conn, make(chan bool)}, nil
}
Turn off nginx buffering (closes #2)
package http
import (
"net"
"net/http"
)
type consumer struct {
conn net.Conn
close chan bool
}
func newConsumer(resp http.ResponseWriter) (*consumer, error) {
conn, _, err := resp.(http.Hijacker).Hijack()
if err != nil {
return nil, err
}
conn.Write([]byte("HTTP/1.1 200 OK\nContent-Type: text/event-stream\nX-Accel-Buffering: no\n\n"))
return &consumer{conn, make(chan bool)}, nil
}
|
package http
import (
"bufio"
"bytes"
"code.google.com/p/go.crypto/bcrypt"
"compress/gzip"
"crypto/rand"
"fmt"
"github.com/ian-kent/go-log/log"
"net"
nethttp "net/http"
neturl "net/url"
"strings"
"encoding/json"
"encoding/base64"
)
type Response struct {
session *Session
writer nethttp.ResponseWriter
buffer *bytes.Buffer
headerSent bool
Gzipped bool
gzwriter *gzip.Writer
IsChunked bool
IsEventStream bool
esBufrw *bufio.ReadWriter
esConn net.Conn
Status int
Headers Headers
Cookies Cookies
}
type Headers map[string][]string
func (h Headers) Add(name string, value string) {
// TODO support same header multiple times
h[name] = []string{value}
}
func (h Headers) Set(name string, value string) {
h[name] = []string{value}
}
func (h Headers) Remove(name string) {
delete(h, name)
}
type Cookies map[string]*nethttp.Cookie
func (c Cookies) Set(cookie *nethttp.Cookie) {
c[cookie.Name] = cookie
}
func CreateResponse(session *Session, writer nethttp.ResponseWriter) *Response {
return &Response{
session: session,
writer: writer,
buffer: &bytes.Buffer{},
headerSent: false,
Gzipped: false,
IsChunked: false,
IsEventStream: false,
Status: 200,
Headers: make(Headers),
Cookies: make(Cookies),
}
}
func toBase64(b []byte) string {
var buf bytes.Buffer
encoder := base64.NewEncoder(base64.StdEncoding, &buf)
encoder.Write(b)
encoder.Close()
return buf.String()
}
func (r *Response) Gzip() {
r.Gzipped = true
r.Headers.Add("Content-Encoding", "gzip")
r.gzwriter = gzip.NewWriter(r.writer)
}
func (r *Response) SessionID() string {
if len(r.session.SessionData) > 0 && len(r.session.SessionID) == 0 {
r.createSessionId()
}
return r.session.SessionID
}
func (r *Response) createSessionId() {
bytes := make([]byte, 256)
rand.Read(bytes)
s, _ := bcrypt.GenerateFromPassword(bytes, 11)
r.session.SessionID = string(s)
log.Info("Generated session ID (__SID): %s", r.session.SessionID)
r.Cookies.Set(&nethttp.Cookie{
Name: "__SID",
Value: r.session.SessionID,
Path: "/",
})
}
func (r *Response) writeSessionData() {
b, err := json.Marshal(r.session.SessionData)
if err != nil {
r.session.RenderException(500, err)
return
}
r.Cookies.Set(&nethttp.Cookie{
Name: "__SD",
Value: toBase64(b),
Path: "/",
})
}
func (r *Response) Write(bytes []byte) (int, error) {
if r.headerSent {
if r.Gzipped {
return r.gzwriter.Write(bytes)
} else {
return r.writer.Write(bytes)
}
} else {
return r.buffer.Write(bytes)
}
}
func (r *Response) WriteText(text string) {
r.Write([]byte(text))
}
func (r *Response) EventStream() chan []byte {
c := make(chan []byte)
r.IsEventStream = true
r.Headers.Add("Content-Type", "text/event-stream")
r.Headers.Add("Cache-Control", "no-cache")
r.Headers.Add("Connection", "keep-alive")
//r.Write([]byte("\n\n"))
r.Send()
hj, ok := r.writer.(nethttp.Hijacker)
if !ok {
log.Warn("Connection unsuitable for hijack")
return nil
}
conn, bufrw, err := hj.Hijack()
if err != nil {
log.Warn("Connection hijack failed")
return nil
}
r.esBufrw = bufrw
r.esConn = conn
go func() {
for b := range c {
if len(b) == 0 {
log.Trace("Event stream ended")
r.esConn.Close()
break
}
lines := strings.Split(string(b), "\n")
data := ""
for _, l := range lines {
data += "data: " + l + "\n"
}
data += "\n"
sz := len(data) + 1
log.Info("Event stream message is %d bytes", sz)
size := fmt.Sprintf("%X", sz)
r.esBufrw.Write([]byte(size + "\r\n"))
lines = strings.Split(data, "\n")
for _, ln := range lines {
r.esBufrw.Write([]byte(ln + "\n"))
}
_, err := r.esBufrw.Write([]byte("\r\n"))
if err != nil {
log.Error("Error writing to connection: %s\n", err)
r.esConn.Close()
break
}
err = r.esBufrw.Flush()
if err != nil {
log.Error("Error flushing buffer: %s\n", err)
r.esConn.Close()
break
}
}
}()
return c
}
func (r *Response) Chunked() chan []byte {
c := make(chan []byte)
r.IsChunked = true
r.Send()
go func() {
for b := range c {
if len(b) == 0 {
log.Trace("Chunk stream ended")
if r.Gzipped {
r.gzwriter.Close()
}
break
}
log.Trace("Writing chunk: %d bytes", len(b))
if r.Gzipped {
r.gzwriter.Write(b)
} else {
r.Write(b)
}
if f, ok := r.writer.(nethttp.Flusher); ok {
f.Flush()
}
}
}()
return c
}
func (r *Response) Redirect(url *neturl.URL, status int) {
r.Headers.Set("Location", url.String())
r.Status = status
}
func (r *Response) Close() {
if !r.IsChunked && r.Gzipped {
r.gzwriter.Close()
}
}
func (r *Response) Send() {
if r.headerSent {
return
}
r.headerSent = true
r.SessionID()
r.writeSessionData()
for k, v := range r.Headers {
for _, h := range v {
log.Trace("Adding header [%s]: [%s]", k, h)
r.writer.Header().Add(k, h)
}
}
for _, c := range r.Cookies {
nethttp.SetCookie(r.writer, c)
}
r.writer.WriteHeader(r.Status)
if r.Gzipped {
r.gzwriter.Write(r.buffer.Bytes())
} else {
r.writer.Write(r.buffer.Bytes())
}
if !r.IsChunked && !r.IsEventStream {
if r.Gzipped {
r.gzwriter.Close()
}
}
}
Provide access to underlying writer
package http
import (
"bufio"
"bytes"
"code.google.com/p/go.crypto/bcrypt"
"compress/gzip"
"crypto/rand"
"fmt"
"github.com/ian-kent/go-log/log"
"net"
nethttp "net/http"
neturl "net/url"
"strings"
"encoding/json"
"encoding/base64"
)
type Response struct {
session *Session
writer nethttp.ResponseWriter
buffer *bytes.Buffer
headerSent bool
Gzipped bool
gzwriter *gzip.Writer
IsChunked bool
IsEventStream bool
esBufrw *bufio.ReadWriter
esConn net.Conn
Status int
Headers Headers
Cookies Cookies
}
type Headers map[string][]string
func (h Headers) Add(name string, value string) {
// TODO support same header multiple times
h[name] = []string{value}
}
func (h Headers) Set(name string, value string) {
h[name] = []string{value}
}
func (h Headers) Remove(name string) {
delete(h, name)
}
type Cookies map[string]*nethttp.Cookie
func (c Cookies) Set(cookie *nethttp.Cookie) {
c[cookie.Name] = cookie
}
func CreateResponse(session *Session, writer nethttp.ResponseWriter) *Response {
return &Response{
session: session,
writer: writer,
buffer: &bytes.Buffer{},
headerSent: false,
Gzipped: false,
IsChunked: false,
IsEventStream: false,
Status: 200,
Headers: make(Headers),
Cookies: make(Cookies),
}
}
func (r *Response) GetWriter() nethttp.ResponseWriter {
return r.writer
}
func toBase64(b []byte) string {
var buf bytes.Buffer
encoder := base64.NewEncoder(base64.StdEncoding, &buf)
encoder.Write(b)
encoder.Close()
return buf.String()
}
func (r *Response) Gzip() {
r.Gzipped = true
r.Headers.Add("Content-Encoding", "gzip")
r.gzwriter = gzip.NewWriter(r.writer)
}
func (r *Response) SessionID() string {
if len(r.session.SessionData) > 0 && len(r.session.SessionID) == 0 {
r.createSessionId()
}
return r.session.SessionID
}
func (r *Response) createSessionId() {
bytes := make([]byte, 256)
rand.Read(bytes)
s, _ := bcrypt.GenerateFromPassword(bytes, 11)
r.session.SessionID = string(s)
log.Info("Generated session ID (__SID): %s", r.session.SessionID)
r.Cookies.Set(&nethttp.Cookie{
Name: "__SID",
Value: r.session.SessionID,
Path: "/",
})
}
func (r *Response) writeSessionData() {
b, err := json.Marshal(r.session.SessionData)
if err != nil {
r.session.RenderException(500, err)
return
}
r.Cookies.Set(&nethttp.Cookie{
Name: "__SD",
Value: toBase64(b),
Path: "/",
})
}
func (r *Response) Write(bytes []byte) (int, error) {
if r.headerSent {
if r.Gzipped {
return r.gzwriter.Write(bytes)
} else {
return r.writer.Write(bytes)
}
} else {
return r.buffer.Write(bytes)
}
}
func (r *Response) WriteText(text string) {
r.Write([]byte(text))
}
func (r *Response) EventStream() chan []byte {
c := make(chan []byte)
r.IsEventStream = true
r.Headers.Add("Content-Type", "text/event-stream")
r.Headers.Add("Cache-Control", "no-cache")
r.Headers.Add("Connection", "keep-alive")
//r.Write([]byte("\n\n"))
r.Send()
hj, ok := r.writer.(nethttp.Hijacker)
if !ok {
log.Warn("Connection unsuitable for hijack")
return nil
}
conn, bufrw, err := hj.Hijack()
if err != nil {
log.Warn("Connection hijack failed")
return nil
}
r.esBufrw = bufrw
r.esConn = conn
go func() {
for b := range c {
if len(b) == 0 {
log.Trace("Event stream ended")
r.esConn.Close()
break
}
lines := strings.Split(string(b), "\n")
data := ""
for _, l := range lines {
data += "data: " + l + "\n"
}
data += "\n"
sz := len(data) + 1
log.Info("Event stream message is %d bytes", sz)
size := fmt.Sprintf("%X", sz)
r.esBufrw.Write([]byte(size + "\r\n"))
lines = strings.Split(data, "\n")
for _, ln := range lines {
r.esBufrw.Write([]byte(ln + "\n"))
}
_, err := r.esBufrw.Write([]byte("\r\n"))
if err != nil {
log.Error("Error writing to connection: %s\n", err)
r.esConn.Close()
break
}
err = r.esBufrw.Flush()
if err != nil {
log.Error("Error flushing buffer: %s\n", err)
r.esConn.Close()
break
}
}
}()
return c
}
func (r *Response) Chunked() chan []byte {
c := make(chan []byte)
r.IsChunked = true
r.Send()
go func() {
for b := range c {
if len(b) == 0 {
log.Trace("Chunk stream ended")
if r.Gzipped {
r.gzwriter.Close()
}
break
}
log.Trace("Writing chunk: %d bytes", len(b))
if r.Gzipped {
r.gzwriter.Write(b)
} else {
r.Write(b)
}
if f, ok := r.writer.(nethttp.Flusher); ok {
f.Flush()
}
}
}()
return c
}
func (r *Response) Redirect(url *neturl.URL, status int) {
r.Headers.Set("Location", url.String())
r.Status = status
}
func (r *Response) Close() {
if !r.IsChunked && r.Gzipped {
r.gzwriter.Close()
}
}
func (r *Response) Send() {
if r.headerSent {
return
}
r.headerSent = true
r.SessionID()
r.writeSessionData()
for k, v := range r.Headers {
for _, h := range v {
log.Trace("Adding header [%s]: [%s]", k, h)
r.writer.Header().Add(k, h)
}
}
for _, c := range r.Cookies {
nethttp.SetCookie(r.writer, c)
}
r.writer.WriteHeader(r.Status)
if r.Gzipped {
r.gzwriter.Write(r.buffer.Bytes())
} else {
r.writer.Write(r.buffer.Bytes())
}
if !r.IsChunked && !r.IsEventStream {
if r.Gzipped {
r.gzwriter.Close()
}
}
}
|
package cmd
import (
"compress/gzip"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strconv"
"time"
"github.com/go-errors/errors"
"github.com/privacybydesign/gabi"
"github.com/privacybydesign/gabi/big"
"github.com/privacybydesign/gabi/keyproof"
"github.com/privacybydesign/irmago/internal/common"
"github.com/sietseringers/cobra"
)
type stepStartMessage struct {
desc string
intermediates int
}
type stepDoneMessage struct{}
type tickMessage struct{}
type quitMessage struct{}
type finishMessage struct{}
type setFinalMessage struct {
message string
}
type logFollower struct {
stepStartEvents chan<- stepStartMessage
stepDoneEvents chan<- stepDoneMessage
tickEvents chan<- tickMessage
quitEvents chan<- quitMessage
finalEvents chan<- setFinalMessage
finished <-chan finishMessage
}
func (l *logFollower) StepStart(desc string, intermediates int) {
l.stepStartEvents <- stepStartMessage{desc, intermediates}
}
func (l *logFollower) StepDone() {
l.stepDoneEvents <- stepDoneMessage{}
}
func (l *logFollower) Tick() {
l.tickEvents <- tickMessage{}
}
func (l *logFollower) Quit() {
l.quitEvents <- quitMessage{}
}
func printProofStatus(status string, count, limit int, done bool) {
var tail string
if done {
tail = "done"
} else if limit > 0 {
tail = fmt.Sprintf("%v/%v", count, limit)
} else {
tail = ""
}
tlen := len(tail)
if tlen == 0 {
tlen = 4
}
fmt.Printf("\r%s", status)
for i := 0; i < 60-len(status)-tlen; i++ {
fmt.Printf(".")
}
fmt.Printf("%s", tail)
}
func startLogFollower() *logFollower {
var result = new(logFollower)
starts := make(chan stepStartMessage)
dones := make(chan stepDoneMessage)
ticks := make(chan tickMessage)
quit := make(chan quitMessage)
finished := make(chan finishMessage)
finalmessage := make(chan setFinalMessage)
result.stepStartEvents = starts
result.stepDoneEvents = dones
result.tickEvents = ticks
result.quitEvents = quit
result.finished = finished
result.finalEvents = finalmessage
go func() {
doneMissing := 0
curStatus := ""
curCount := 0
curLimit := 0
curDone := true
finalMessage := ""
ticker := time.NewTicker(time.Second / 4)
defer ticker.Stop()
for {
select {
case <-ticks:
curCount++
case <-dones:
if doneMissing > 0 {
doneMissing--
continue // Swallow quietly
} else {
curDone = true
printProofStatus(curStatus, curCount, curLimit, true)
fmt.Printf("\n")
}
case stepstart := <-starts:
if !curDone {
printProofStatus(curStatus, curCount, curLimit, true)
fmt.Printf("\n")
doneMissing++
}
curDone = false
curCount = 0
curLimit = stepstart.intermediates
curStatus = stepstart.desc
case messageevent := <-finalmessage:
finalMessage = messageevent.message
case <-quit:
if finalMessage != "" {
fmt.Printf("%s\n", finalMessage)
}
finished <- finishMessage{}
return
case <-ticker.C:
if !curDone {
printProofStatus(curStatus, curCount, curLimit, false)
}
}
}
}()
keyproof.Follower = result
return result
}
var issuerKeyproveCmd = &cobra.Command{
Use: "keyprove [path]",
Short: "Generate proof of correct generation for an IRMA issuer keypair",
Long: `Generate proof of correct generation for an IRMA issuer keypair.
The keyprove command generates a proof that an issuer key was generated correctly. By default, it generates a proof for the newest private key in the PrivateKeys folder, and then stores the proof in the Proofs folder.`,
Args: cobra.MaximumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
flags := cmd.Flags()
counter, _ := flags.GetUint("counter")
pubkeyfile, _ := flags.GetString("publickey")
privkeyfile, _ := flags.GetString("privatekey")
prooffile, _ := flags.GetString("proof")
var err error
// Determine path for key
var path string
if len(args) != 0 {
path = args[0]
} else {
path, err = os.Getwd()
if err != nil {
return err
}
}
if err = common.AssertPathExists(path); err != nil {
return errors.WrapPrefix(err, "Nonexisting path specified", 0)
}
// Determine counter if needed
if !flags.Changed("counter") {
counter = uint(lastPrivateKeyIndex(path))
}
// Fill in pubkey if needed
if pubkeyfile == "" {
pubkeyfile = filepath.Join(path, "PublicKeys", strconv.Itoa(int(counter))+".xml")
}
// Fill in privkey if needed
if privkeyfile == "" {
privkeyfile = filepath.Join(path, "PrivateKeys", strconv.Itoa(int(counter))+".xml")
}
// Prepare storage for proof if needed
if prooffile == "" {
proofpath := filepath.Join(path, "Proofs")
if err = common.EnsureDirectoryExists(proofpath); err != nil {
return errors.WrapPrefix(err, "Failed to create"+proofpath, 0)
}
prooffile = filepath.Join(proofpath, strconv.Itoa(int(counter))+".json.gz")
}
// Try to read public key
pk, err := gabi.NewPublicKeyFromFile(pubkeyfile)
if err != nil {
return errors.WrapPrefix(err, "Could not read public key", 0)
}
// Try to read private key
sk, err := gabi.NewPrivateKeyFromFile(privkeyfile, false)
if err != nil {
return errors.WrapPrefix(err, "Could not read private key", 0)
}
// Validate that they match
if pk.N.Cmp(new(big.Int).Mul(sk.P, sk.Q)) != 0 {
return errors.New("Private and public key do not match")
}
// Validate that the key is amenable to proving
ConstEight := big.NewInt(8)
ConstOne := big.NewInt(1)
PMod := new(big.Int).Mod(sk.P, ConstEight)
QMod := new(big.Int).Mod(sk.Q, ConstEight)
PPrimeMod := new(big.Int).Mod(sk.PPrime, ConstEight)
QPrimeMod := new(big.Int).Mod(sk.QPrime, ConstEight)
if PMod.Cmp(ConstOne) == 0 || QMod.Cmp(ConstOne) == 0 ||
PPrimeMod.Cmp(ConstOne) == 0 || QPrimeMod.Cmp(ConstOne) == 0 ||
PMod.Cmp(QMod) == 0 || PPrimeMod.Cmp(QPrimeMod) == 0 {
return errors.New("Private key not amenable to proving")
}
// Open proof file for writing
proofOut, err := os.Create(prooffile)
if err != nil {
return errors.WrapPrefix(err, "Error opening proof file for writing", 0)
}
defer proofOut.Close()
// Wrap it for gzip compression
proofWriter := gzip.NewWriter(proofOut)
defer proofWriter.Close()
// Start log follower
follower := startLogFollower()
defer func() {
follower.quitEvents <- quitMessage{}
<-follower.finished
}()
// Build the proof
s := keyproof.NewValidKeyProofStructure(pk.N, pk.Z, pk.S, pk.R)
proof := s.BuildProof(sk.PPrime, sk.QPrime)
// And write it to file
follower.StepStart("Writing proof", 0)
proofEncoder := json.NewEncoder(proofWriter)
err = proofEncoder.Encode(proof)
follower.StepDone()
if err != nil {
return errors.WrapPrefix(err, "Could not write proof", 0)
}
return nil
},
}
func init() {
issuerCmd.AddCommand(issuerKeyproveCmd)
issuerKeyproveCmd.Flags().StringP("privatekey", "s", "", `File to get private key from (default "PrivateKeys/$counter.xml")`)
issuerKeyproveCmd.Flags().StringP("publickey", "p", "", `File to get public key from (default "PublicKeys/$counter.xml")`)
issuerKeyproveCmd.Flags().StringP("proof", "o", "", `File to write proof to (default "Proofs/$index.json.gz")`)
issuerKeyproveCmd.Flags().UintP("counter", "c", 0, "Counter of key to prove (default to latest)")
}
fix: when key proving, don't generate Proofs subdir until after all checks are done
package cmd
import (
"compress/gzip"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strconv"
"time"
"github.com/go-errors/errors"
"github.com/privacybydesign/gabi"
"github.com/privacybydesign/gabi/big"
"github.com/privacybydesign/gabi/keyproof"
"github.com/privacybydesign/irmago/internal/common"
"github.com/sietseringers/cobra"
)
type stepStartMessage struct {
desc string
intermediates int
}
type stepDoneMessage struct{}
type tickMessage struct{}
type quitMessage struct{}
type finishMessage struct{}
type setFinalMessage struct {
message string
}
type logFollower struct {
stepStartEvents chan<- stepStartMessage
stepDoneEvents chan<- stepDoneMessage
tickEvents chan<- tickMessage
quitEvents chan<- quitMessage
finalEvents chan<- setFinalMessage
finished <-chan finishMessage
}
func (l *logFollower) StepStart(desc string, intermediates int) {
l.stepStartEvents <- stepStartMessage{desc, intermediates}
}
func (l *logFollower) StepDone() {
l.stepDoneEvents <- stepDoneMessage{}
}
func (l *logFollower) Tick() {
l.tickEvents <- tickMessage{}
}
func (l *logFollower) Quit() {
l.quitEvents <- quitMessage{}
}
func printProofStatus(status string, count, limit int, done bool) {
var tail string
if done {
tail = "done"
} else if limit > 0 {
tail = fmt.Sprintf("%v/%v", count, limit)
} else {
tail = ""
}
tlen := len(tail)
if tlen == 0 {
tlen = 4
}
fmt.Printf("\r%s", status)
for i := 0; i < 60-len(status)-tlen; i++ {
fmt.Printf(".")
}
fmt.Printf("%s", tail)
}
func startLogFollower() *logFollower {
var result = new(logFollower)
starts := make(chan stepStartMessage)
dones := make(chan stepDoneMessage)
ticks := make(chan tickMessage)
quit := make(chan quitMessage)
finished := make(chan finishMessage)
finalmessage := make(chan setFinalMessage)
result.stepStartEvents = starts
result.stepDoneEvents = dones
result.tickEvents = ticks
result.quitEvents = quit
result.finished = finished
result.finalEvents = finalmessage
go func() {
doneMissing := 0
curStatus := ""
curCount := 0
curLimit := 0
curDone := true
finalMessage := ""
ticker := time.NewTicker(time.Second / 4)
defer ticker.Stop()
for {
select {
case <-ticks:
curCount++
case <-dones:
if doneMissing > 0 {
doneMissing--
continue // Swallow quietly
} else {
curDone = true
printProofStatus(curStatus, curCount, curLimit, true)
fmt.Printf("\n")
}
case stepstart := <-starts:
if !curDone {
printProofStatus(curStatus, curCount, curLimit, true)
fmt.Printf("\n")
doneMissing++
}
curDone = false
curCount = 0
curLimit = stepstart.intermediates
curStatus = stepstart.desc
case messageevent := <-finalmessage:
finalMessage = messageevent.message
case <-quit:
if finalMessage != "" {
fmt.Printf("%s\n", finalMessage)
}
finished <- finishMessage{}
return
case <-ticker.C:
if !curDone {
printProofStatus(curStatus, curCount, curLimit, false)
}
}
}
}()
keyproof.Follower = result
return result
}
var issuerKeyproveCmd = &cobra.Command{
Use: "keyprove [path]",
Short: "Generate proof of correct generation for an IRMA issuer keypair",
Long: `Generate proof of correct generation for an IRMA issuer keypair.
The keyprove command generates a proof that an issuer key was generated correctly. By default, it generates a proof for the newest private key in the PrivateKeys folder, and then stores the proof in the Proofs folder.`,
Args: cobra.MaximumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
flags := cmd.Flags()
counter, _ := flags.GetUint("counter")
pubkeyfile, _ := flags.GetString("publickey")
privkeyfile, _ := flags.GetString("privatekey")
prooffile, _ := flags.GetString("proof")
var err error
// Determine path for key
var path string
if len(args) != 0 {
path = args[0]
} else {
path, err = os.Getwd()
if err != nil {
return err
}
}
if err = common.AssertPathExists(path); err != nil {
return errors.WrapPrefix(err, "Nonexisting path specified", 0)
}
// Determine counter if needed
if !flags.Changed("counter") {
counter = uint(lastPrivateKeyIndex(path))
}
// Fill in pubkey if needed
if pubkeyfile == "" {
pubkeyfile = filepath.Join(path, "PublicKeys", strconv.Itoa(int(counter))+".xml")
}
// Fill in privkey if needed
if privkeyfile == "" {
privkeyfile = filepath.Join(path, "PrivateKeys", strconv.Itoa(int(counter))+".xml")
}
// Try to read public key
pk, err := gabi.NewPublicKeyFromFile(pubkeyfile)
if err != nil {
return errors.WrapPrefix(err, "Could not read public key", 0)
}
// Try to read private key
sk, err := gabi.NewPrivateKeyFromFile(privkeyfile, false)
if err != nil {
return errors.WrapPrefix(err, "Could not read private key", 0)
}
// Validate that they match
if pk.N.Cmp(new(big.Int).Mul(sk.P, sk.Q)) != 0 {
return errors.New("Private and public key do not match")
}
// Validate that the key is amenable to proving
ConstEight := big.NewInt(8)
ConstOne := big.NewInt(1)
PMod := new(big.Int).Mod(sk.P, ConstEight)
QMod := new(big.Int).Mod(sk.Q, ConstEight)
PPrimeMod := new(big.Int).Mod(sk.PPrime, ConstEight)
QPrimeMod := new(big.Int).Mod(sk.QPrime, ConstEight)
if PMod.Cmp(ConstOne) == 0 || QMod.Cmp(ConstOne) == 0 ||
PPrimeMod.Cmp(ConstOne) == 0 || QPrimeMod.Cmp(ConstOne) == 0 ||
PMod.Cmp(QMod) == 0 || PPrimeMod.Cmp(QPrimeMod) == 0 {
return errors.New("Private key not amenable to proving")
}
// Prepare storage for proof if needed
if prooffile == "" {
proofpath := filepath.Join(path, "Proofs")
if err = common.EnsureDirectoryExists(proofpath); err != nil {
return errors.WrapPrefix(err, "Failed to create"+proofpath, 0)
}
prooffile = filepath.Join(proofpath, strconv.Itoa(int(counter))+".json.gz")
}
// Open proof file for writing
proofOut, err := os.Create(prooffile)
if err != nil {
return errors.WrapPrefix(err, "Error opening proof file for writing", 0)
}
defer proofOut.Close()
// Wrap it for gzip compression
proofWriter := gzip.NewWriter(proofOut)
defer proofWriter.Close()
// Start log follower
follower := startLogFollower()
defer func() {
follower.quitEvents <- quitMessage{}
<-follower.finished
}()
// Build the proof
s := keyproof.NewValidKeyProofStructure(pk.N, pk.Z, pk.S, pk.R)
proof := s.BuildProof(sk.PPrime, sk.QPrime)
// And write it to file
follower.StepStart("Writing proof", 0)
proofEncoder := json.NewEncoder(proofWriter)
err = proofEncoder.Encode(proof)
follower.StepDone()
if err != nil {
return errors.WrapPrefix(err, "Could not write proof", 0)
}
return nil
},
}
func init() {
issuerCmd.AddCommand(issuerKeyproveCmd)
issuerKeyproveCmd.Flags().StringP("privatekey", "s", "", `File to get private key from (default "PrivateKeys/$counter.xml")`)
issuerKeyproveCmd.Flags().StringP("publickey", "p", "", `File to get public key from (default "PublicKeys/$counter.xml")`)
issuerKeyproveCmd.Flags().StringP("proof", "o", "", `File to write proof to (default "Proofs/$index.json.gz")`)
issuerKeyproveCmd.Flags().UintP("counter", "c", 0, "Counter of key to prove (default to latest)")
}
|
/* _ _
*__ _____ __ ___ ___ __ _| |_ ___
*\ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
* \ V V / __/ (_| |\ V /| | (_| | || __/
* \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
*
* Copyright © 2016 Weaviate. All rights reserved.
* LICENSE: https://github.com/weaviate/weaviate/blob/master/LICENSE
* AUTHOR: Bob van Luijt (bob@weaviate.com)
* See www.weaviate.com for details
* Contact: @weaviate_iot / yourfriends@weaviate.com
*/
/* _ _
*__ _____ __ ___ ___ __ _| |_ ___
*\ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
* \ V V / __/ (_| |\ V /| | (_| | || __/
* \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
*
* Copyright © 2016 Weaviate. All rights reserved.
* LICENSE: https://github.com/weaviate/weaviate/blob/master/LICENSE
* AUTHOR: Bob van Luijt (bob@weaviate.com)
* See www.weaviate.com for details
* Contact: @weaviate_iot / yourfriends@weaviate.com
*/
package restapi
import (
"crypto/tls"
"encoding/json"
"net/http"
"strings"
errors "github.com/go-openapi/errors"
runtime "github.com/go-openapi/runtime"
middleware "github.com/go-openapi/runtime/middleware"
"github.com/go-openapi/runtime/yamlpc"
graceful "github.com/tylerb/graceful"
"github.com/weaviate/weaviate/connectors"
"github.com/weaviate/weaviate/connectors/datastore"
"github.com/weaviate/weaviate/connectors/mysql"
"github.com/weaviate/weaviate/models"
"github.com/weaviate/weaviate/restapi/operations"
"github.com/weaviate/weaviate/restapi/operations/acl_entries"
"github.com/weaviate/weaviate/restapi/operations/adapters"
"github.com/weaviate/weaviate/restapi/operations/commands"
"github.com/weaviate/weaviate/restapi/operations/devices"
"github.com/weaviate/weaviate/restapi/operations/events"
"github.com/weaviate/weaviate/restapi/operations/locations"
"github.com/weaviate/weaviate/restapi/operations/model_manifests"
)
func configureFlags(api *operations.WeaviateAPI) {
// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }
}
func configureAPI(api *operations.WeaviateAPI) http.Handler {
// configure database connection
var databaseConnector dbconnector.DatabaseConnector
commandLineInput := "datastore"
if commandLineInput == "datastore" {
databaseConnector = &datastore.Datastore{}
} else {
databaseConnector = &mysql.Mysql{}
}
err := databaseConnector.Connect()
if err != nil {
panic(err)
}
// configure the api here
api.ServeError = errors.ServeError
api.JSONConsumer = runtime.JSONConsumer()
api.BinConsumer = runtime.ByteStreamConsumer()
api.UrlformConsumer = runtime.DiscardConsumer
api.YamlConsumer = yamlpc.YAMLConsumer()
api.XMLConsumer = runtime.XMLConsumer()
api.MultipartformConsumer = runtime.DiscardConsumer
api.TxtConsumer = runtime.TextConsumer()
api.JSONProducer = runtime.JSONProducer()
api.BinProducer = runtime.ByteStreamProducer()
api.UrlformProducer = runtime.DiscardProducer
api.YamlProducer = yamlpc.YAMLProducer()
api.XMLProducer = runtime.XMLProducer()
api.MultipartformProducer = runtime.DiscardProducer
api.TxtProducer = runtime.TextProducer()
/*
* HANDLE ACL
*/
api.ACLEntriesWeaviateACLEntriesDeleteHandler = acl_entries.WeaviateACLEntriesDeleteHandlerFunc(func(params acl_entries.WeaviateACLEntriesDeleteParams) middleware.Responder {
return middleware.NotImplemented("operation acl_entries.WeaviateACLEntriesDelete has not yet been implemented")
})
api.ACLEntriesWeaviateACLEntriesGetHandler = acl_entries.WeaviateACLEntriesGetHandlerFunc(func(params acl_entries.WeaviateACLEntriesGetParams) middleware.Responder {
return middleware.NotImplemented("operation acl_entries.WeaviateACLEntriesGet has not yet been implemented")
})
api.ACLEntriesWeaviateACLEntriesInsertHandler = acl_entries.WeaviateACLEntriesInsertHandlerFunc(func(params acl_entries.WeaviateACLEntriesInsertParams) middleware.Responder {
return middleware.NotImplemented("operation acl_entries.WeaviateACLEntriesInsert has not yet been implemented")
})
api.ACLEntriesWeaviateACLEntriesListHandler = acl_entries.WeaviateACLEntriesListHandlerFunc(func(params acl_entries.WeaviateACLEntriesListParams) middleware.Responder {
return middleware.NotImplemented("operation acl_entries.WeaviateACLEntriesList has not yet been implemented")
})
api.ACLEntriesWeaviateACLEntriesPatchHandler = acl_entries.WeaviateACLEntriesPatchHandlerFunc(func(params acl_entries.WeaviateACLEntriesPatchParams) middleware.Responder {
return middleware.NotImplemented("operation acl_entries.WeaviateACLEntriesPatch has not yet been implemented")
})
api.ACLEntriesWeaviateACLEntriesUpdateHandler = acl_entries.WeaviateACLEntriesUpdateHandlerFunc(func(params acl_entries.WeaviateACLEntriesUpdateParams) middleware.Responder {
return middleware.NotImplemented("operation acl_entries.WeaviateACLEntriesUpdate has not yet been implemented")
})
/*
* HANDLE ADAPTERS
*/
api.AdaptersWeaviateAdaptersDeleteHandler = adapters.WeaviateAdaptersDeleteHandlerFunc(func(params adapters.WeaviateAdaptersDeleteParams) middleware.Responder {
return middleware.NotImplemented("operation adapters.WeaviateAdaptersDelete has not yet been implemented")
})
api.AdaptersWeaviateAdaptersGetHandler = adapters.WeaviateAdaptersGetHandlerFunc(func(params adapters.WeaviateAdaptersGetParams) middleware.Responder {
return middleware.NotImplemented("operation adapters.WeaviateAdaptersGet has not yet been implemented")
})
api.AdaptersWeaviateAdaptersInsertHandler = adapters.WeaviateAdaptersInsertHandlerFunc(func(params adapters.WeaviateAdaptersInsertParams) middleware.Responder {
return middleware.NotImplemented("operation adapters.WeaviateAdaptersInsert has not yet been implemented")
})
api.AdaptersWeaviateAdaptersListHandler = adapters.WeaviateAdaptersListHandlerFunc(func(params adapters.WeaviateAdaptersListParams) middleware.Responder {
return middleware.NotImplemented("operation adapters.WeaviateAdaptersList has not yet been implemented")
})
api.AdaptersWeaviateAdaptersPatchHandler = adapters.WeaviateAdaptersPatchHandlerFunc(func(params adapters.WeaviateAdaptersPatchParams) middleware.Responder {
return middleware.NotImplemented("operation adapters.WeaviateAdaptersPatch has not yet been implemented")
})
api.AdaptersWeaviateAdaptersUpdateHandler = adapters.WeaviateAdaptersUpdateHandlerFunc(func(params adapters.WeaviateAdaptersUpdateParams) middleware.Responder {
return middleware.NotImplemented("operation adapters.WeaviateAdaptersUpdate has not yet been implemented")
})
/*
* HANDLE COMMANDS
*/
api.CommandsWeaviateCommandsDeleteHandler = commands.WeaviateCommandsDeleteHandlerFunc(func(params commands.WeaviateCommandsDeleteParams) middleware.Responder {
return middleware.NotImplemented("operation commands.WeaviateCommandsDelete has not yet been implemented")
})
api.CommandsWeaviateCommandsGetHandler = commands.WeaviateCommandsGetHandlerFunc(func(params commands.WeaviateCommandsGetParams) middleware.Responder {
return middleware.NotImplemented("operation commands.WeaviateCommandsGet has not yet been implemented")
})
api.CommandsWeaviateCommandsGetQueueHandler = commands.WeaviateCommandsGetQueueHandlerFunc(func(params commands.WeaviateCommandsGetQueueParams) middleware.Responder {
return middleware.NotImplemented("operation commands.WeaviateCommandsGetQueue has not yet been implemented")
})
api.CommandsWeaviateCommandsInsertHandler = commands.WeaviateCommandsInsertHandlerFunc(func(params commands.WeaviateCommandsInsertParams) middleware.Responder {
return middleware.NotImplemented("operation commands.WeaviateCommandsInsert has not yet been implemented")
})
api.CommandsWeaviateCommandsListHandler = commands.WeaviateCommandsListHandlerFunc(func(params commands.WeaviateCommandsListParams) middleware.Responder {
return middleware.NotImplemented("operation commands.WeaviateCommandsList has not yet been implemented")
})
api.CommandsWeaviateCommandsPatchHandler = commands.WeaviateCommandsPatchHandlerFunc(func(params commands.WeaviateCommandsPatchParams) middleware.Responder {
return middleware.NotImplemented("operation commands.WeaviateCommandsPatch has not yet been implemented")
})
api.CommandsWeaviateCommandsUpdateHandler = commands.WeaviateCommandsUpdateHandlerFunc(func(params commands.WeaviateCommandsUpdateParams) middleware.Responder {
return middleware.NotImplemented("operation commands.WeaviateCommandsUpdate has not yet been implemented")
})
/*
* HANDLE DEVICES
*/
api.DevicesWeaviateDevicesDeleteHandler = devices.WeaviateDevicesDeleteHandlerFunc(func(params devices.WeaviateDevicesDeleteParams) middleware.Responder {
return middleware.NotImplemented("operation devices.WeaviateDevicesDelete has not yet been implemented")
})
api.DevicesWeaviateDevicesGetHandler = devices.WeaviateDevicesGetHandlerFunc(func(params devices.WeaviateDevicesGetParams) middleware.Responder {
return middleware.NotImplemented("operation devices.WeaviateDevicesGet has not yet been implemented")
})
api.DevicesWeaviateDevicesInsertHandler = devices.WeaviateDevicesInsertHandlerFunc(func(params devices.WeaviateDevicesInsertParams) middleware.Responder {
return middleware.NotImplemented("operation devices.WeaviateDevicesInsert has not yet been implemented")
})
api.DevicesWeaviateDevicesListHandler = devices.WeaviateDevicesListHandlerFunc(func(params devices.WeaviateDevicesListParams) middleware.Responder {
return middleware.NotImplemented("operation devices.WeaviateDevicesList has not yet been implemented")
})
api.DevicesWeaviateDevicesPatchHandler = devices.WeaviateDevicesPatchHandlerFunc(func(params devices.WeaviateDevicesPatchParams) middleware.Responder {
return middleware.NotImplemented("operation devices.WeaviateDevicesPatch has not yet been implemented")
})
api.DevicesWeaviateDevicesUpdateHandler = devices.WeaviateDevicesUpdateHandlerFunc(func(params devices.WeaviateDevicesUpdateParams) middleware.Responder {
return middleware.NotImplemented("operation devices.WeaviateDevicesUpdate has not yet been implemented")
})
/*
* HANDLE EVENTS
*/
api.EventsWeaviateEventsGetHandler = events.WeaviateEventsGetHandlerFunc(func(params events.WeaviateEventsGetParams) middleware.Responder {
return middleware.NotImplemented("operation events.WeaviateEventsGet has not yet been implemented")
})
api.EventsWeaviateEventsListHandler = events.WeaviateEventsListHandlerFunc(func(params events.WeaviateEventsListParams) middleware.Responder {
return middleware.NotImplemented("operation events.WeaviateEventsList has not yet been implemented")
})
api.EventsWeaviateEventsRecordDeviceEventsHandler = events.WeaviateEventsRecordDeviceEventsHandlerFunc(func(params events.WeaviateEventsRecordDeviceEventsParams) middleware.Responder {
return middleware.NotImplemented("operation events.WeaviateEventsRecordDeviceEvents has not yet been implemented")
})
/*
* HANDLE LOCATIONS
*/
api.LocationsWeaviateLocationsDeleteHandler = locations.WeaviateLocationsDeleteHandlerFunc(func(params locations.WeaviateLocationsDeleteParams) middleware.Responder {
// Delete item from database
err := databaseConnector.Delete(params.LocationID)
// TODO: Not found response
// TODO: Deleted response
if err != nil {
panic(err)
}
return locations.NewWeaviateLocationsDeleteNoContent()
})
api.LocationsWeaviateLocationsGetHandler = locations.WeaviateLocationsGetHandlerFunc(func(params locations.WeaviateLocationsGetParams) middleware.Responder {
// Get item from database
result, err := databaseConnector.Get(params.LocationID)
// Create object to return
object := &models.Location{}
json.Unmarshal([]byte(result.Object), &object)
objectID := strings.TrimSpace(object.ID)
println("'", objectID, "'")
// If there are no results, the Object ID = 0
if len(objectID) == 0 && err == nil {
// return SUCCESS of query but no content.
return locations.NewWeaviateLocationsGetNotFound()
}
// return SUCCESS
return locations.NewWeaviateLocationsGetOK().WithPayload(object)
})
api.LocationsWeaviateLocationsInsertHandler = locations.WeaviateLocationsInsertHandlerFunc(func(params locations.WeaviateLocationsInsertParams) middleware.Responder {
/*
* TODO VALIDATE IF THE OBJECT IS OKAY
*/
validated := true
// return error
if validated == false {
return middleware.ResponderFunc(func(rw http.ResponseWriter, p runtime.Producer) {
rw.WriteHeader(422)
rw.Write([]byte("{ \"ERROR\": \"There is something wrong with your original POSTed body\" }"))
})
} else {
// Generate DatabaseObject without JSON-object in it.
dbObject := *dbconnector.NewDatabaseObject("FOOBAR USER UUID", "#/paths/locations")
// Set the body-id and generate JSON to save to the database
params.Body.ID = dbObject.Uuid
databaseBody, _ := json.Marshal(params.Body)
dbObject.Object = string(databaseBody)
// Save to DB, this needs to be a Go routine because we will return an accepted
go databaseConnector.Add(dbObject)
// Return SUCCESS (NOTE: this is ACCEPTED, so the databaseConnector.Add should have a go routine)
return locations.NewWeaviateLocationsInsertAccepted().WithPayload(params.Body)
}
})
api.LocationsWeaviateLocationsListHandler = locations.WeaviateLocationsListHandlerFunc(func(params locations.WeaviateLocationsListParams) middleware.Responder {
// Show all locations with List function, get max results in URL, otherwise max = 10.
limit := 10
locationDatabaseObjects, err := databaseConnector.List("#/paths/locations", limit)
// TODO: Limit max here
// TODO: None found
if err != nil {
panic(err)
}
locationsListResponse := &models.LocationsListResponse{}
locationsListResponse.Locations = make([]*models.Location, limit)
for i, locationDatabaseObject := range locationDatabaseObjects {
locationObject := &models.Location{}
json.Unmarshal([]byte(locationDatabaseObject.Object), locationObject)
locationsListResponse.Locations[i] = locationObject
}
return locations.NewWeaviateLocationsListOK().WithPayload(locationsListResponse)
})
api.LocationsWeaviateLocationsPatchHandler = locations.WeaviateLocationsPatchHandlerFunc(func(params locations.WeaviateLocationsPatchParams) middleware.Responder {
return middleware.NotImplemented("operation locations.WeaviateLocationsPatch has not yet been implemented")
})
api.LocationsWeaviateLocationsUpdateHandler = locations.WeaviateLocationsUpdateHandlerFunc(func(params locations.WeaviateLocationsUpdateParams) middleware.Responder {
return middleware.NotImplemented("operation locations.WeaviateLocationsUpdate has not yet been implemented")
})
/*
* HANDLE MODEL MANIFESTS
*/
api.ModelManifestsWeaviateModelManifestsCreateHandler = model_manifests.WeaviateModelManifestsCreateHandlerFunc(func(params model_manifests.WeaviateModelManifestsCreateParams) middleware.Responder {
return middleware.NotImplemented("operation model_manifests.WeaviateModelManifestsCreate has not yet been implemented")
})
api.ModelManifestsWeaviateModelManifestsDeleteHandler = model_manifests.WeaviateModelManifestsDeleteHandlerFunc(func(params model_manifests.WeaviateModelManifestsDeleteParams) middleware.Responder {
return middleware.NotImplemented("operation model_manifests.WeaviateModelManifestsDelete has not yet been implemented")
})
api.ModelManifestsWeaviateModelManifestsGetHandler = model_manifests.WeaviateModelManifestsGetHandlerFunc(func(params model_manifests.WeaviateModelManifestsGetParams) middleware.Responder {
return middleware.NotImplemented("operation model_manifests.WeaviateModelManifestsGet has not yet been implemented")
})
api.ModelManifestsWeaviateModelManifestsListHandler = model_manifests.WeaviateModelManifestsListHandlerFunc(func(params model_manifests.WeaviateModelManifestsListParams) middleware.Responder {
return middleware.NotImplemented("operation model_manifests.WeaviateModelManifestsList has not yet been implemented")
})
api.ModelManifestsWeaviateModelManifestsPatchHandler = model_manifests.WeaviateModelManifestsPatchHandlerFunc(func(params model_manifests.WeaviateModelManifestsPatchParams) middleware.Responder {
return middleware.NotImplemented("operation model_manifests.WeaviateModelManifestsPatch has not yet been implemented")
})
api.ModelManifestsWeaviateModelManifestsUpdateHandler = model_manifests.WeaviateModelManifestsUpdateHandlerFunc(func(params model_manifests.WeaviateModelManifestsUpdateParams) middleware.Responder {
return middleware.NotImplemented("operation model_manifests.WeaviateModelManifestsUpdate has not yet been implemented")
})
api.ModelManifestsWeaviateModelManifestsValidateCommandDefsHandler = model_manifests.WeaviateModelManifestsValidateCommandDefsHandlerFunc(func(params model_manifests.WeaviateModelManifestsValidateCommandDefsParams) middleware.Responder {
return middleware.NotImplemented("operation model_manifests.WeaviateModelManifestsValidateCommandDefs has not yet been implemented")
})
api.ModelManifestsWeaviateModelManifestsValidateComponentsHandler = model_manifests.WeaviateModelManifestsValidateComponentsHandlerFunc(func(params model_manifests.WeaviateModelManifestsValidateComponentsParams) middleware.Responder {
return middleware.NotImplemented("operation model_manifests.WeaviateModelManifestsValidateComponents has not yet been implemented")
})
api.ModelManifestsWeaviateModelManifestsValidateDeviceStateHandler = model_manifests.WeaviateModelManifestsValidateDeviceStateHandlerFunc(func(params model_manifests.WeaviateModelManifestsValidateDeviceStateParams) middleware.Responder {
return middleware.NotImplemented("operation model_manifests.WeaviateModelManifestsValidateDeviceState has not yet been implemented")
})
api.ServerShutdown = func() {}
return setupGlobalMiddleware(api.Serve(setupMiddlewares))
}
// The TLS configuration before HTTPS server starts.
func configureTLS(tlsConfig *tls.Config) {
// Make all necessary changes to the TLS configuration here.
}
// As soon as server is initialized but not run yet, this function will be called.
// If you need to modify a config, store server instance to stop it individually later, this is the place.
// This function can be called multiple times, depending on the number of serving schemes.
// scheme value will be set accordingly: "http", "https" or "unix"
func configureServer(s *graceful.Server, scheme, addr string) {
}
// The middleware configuration is for the handler executors. These do not apply to the swagger.json document.
// The middleware executes after routing but before authentication, binding and validation
func setupMiddlewares(handler http.Handler) http.Handler {
return handler
}
// The middleware configuration happens before anything, this middleware also applies to serving the swagger.json document.
// So this is a good place to plug in a panic handling middleware, logging and metrics
func setupGlobalMiddleware(handler http.Handler) http.Handler {
return handler
}
gh-25: Add first update/put version for locations.
/* _ _
*__ _____ __ ___ ___ __ _| |_ ___
*\ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
* \ V V / __/ (_| |\ V /| | (_| | || __/
* \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
*
* Copyright © 2016 Weaviate. All rights reserved.
* LICENSE: https://github.com/weaviate/weaviate/blob/master/LICENSE
* AUTHOR: Bob van Luijt (bob@weaviate.com)
* See www.weaviate.com for details
* Contact: @weaviate_iot / yourfriends@weaviate.com
*/
/* _ _
*__ _____ __ ___ ___ __ _| |_ ___
*\ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
* \ V V / __/ (_| |\ V /| | (_| | || __/
* \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
*
* Copyright © 2016 Weaviate. All rights reserved.
* LICENSE: https://github.com/weaviate/weaviate/blob/master/LICENSE
* AUTHOR: Bob van Luijt (bob@weaviate.com)
* See www.weaviate.com for details
* Contact: @weaviate_iot / yourfriends@weaviate.com
*/
package restapi
import (
"crypto/tls"
"encoding/json"
"net/http"
"strings"
errors "github.com/go-openapi/errors"
runtime "github.com/go-openapi/runtime"
middleware "github.com/go-openapi/runtime/middleware"
"github.com/go-openapi/runtime/yamlpc"
graceful "github.com/tylerb/graceful"
"github.com/weaviate/weaviate/connectors"
"github.com/weaviate/weaviate/connectors/datastore"
"github.com/weaviate/weaviate/connectors/mysql"
"github.com/weaviate/weaviate/models"
"github.com/weaviate/weaviate/restapi/operations"
"github.com/weaviate/weaviate/restapi/operations/acl_entries"
"github.com/weaviate/weaviate/restapi/operations/adapters"
"github.com/weaviate/weaviate/restapi/operations/commands"
"github.com/weaviate/weaviate/restapi/operations/devices"
"github.com/weaviate/weaviate/restapi/operations/events"
"github.com/weaviate/weaviate/restapi/operations/locations"
"github.com/weaviate/weaviate/restapi/operations/model_manifests"
)
func configureFlags(api *operations.WeaviateAPI) {
// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }
}
func configureAPI(api *operations.WeaviateAPI) http.Handler {
// configure database connection
var databaseConnector dbconnector.DatabaseConnector
commandLineInput := "datastore"
if commandLineInput == "datastore" {
databaseConnector = &datastore.Datastore{}
} else {
databaseConnector = &mysql.Mysql{}
}
err := databaseConnector.Connect()
if err != nil {
panic(err)
}
// configure the api here
api.ServeError = errors.ServeError
api.JSONConsumer = runtime.JSONConsumer()
api.BinConsumer = runtime.ByteStreamConsumer()
api.UrlformConsumer = runtime.DiscardConsumer
api.YamlConsumer = yamlpc.YAMLConsumer()
api.XMLConsumer = runtime.XMLConsumer()
api.MultipartformConsumer = runtime.DiscardConsumer
api.TxtConsumer = runtime.TextConsumer()
api.JSONProducer = runtime.JSONProducer()
api.BinProducer = runtime.ByteStreamProducer()
api.UrlformProducer = runtime.DiscardProducer
api.YamlProducer = yamlpc.YAMLProducer()
api.XMLProducer = runtime.XMLProducer()
api.MultipartformProducer = runtime.DiscardProducer
api.TxtProducer = runtime.TextProducer()
/*
* HANDLE ACL
*/
api.ACLEntriesWeaviateACLEntriesDeleteHandler = acl_entries.WeaviateACLEntriesDeleteHandlerFunc(func(params acl_entries.WeaviateACLEntriesDeleteParams) middleware.Responder {
return middleware.NotImplemented("operation acl_entries.WeaviateACLEntriesDelete has not yet been implemented")
})
api.ACLEntriesWeaviateACLEntriesGetHandler = acl_entries.WeaviateACLEntriesGetHandlerFunc(func(params acl_entries.WeaviateACLEntriesGetParams) middleware.Responder {
return middleware.NotImplemented("operation acl_entries.WeaviateACLEntriesGet has not yet been implemented")
})
api.ACLEntriesWeaviateACLEntriesInsertHandler = acl_entries.WeaviateACLEntriesInsertHandlerFunc(func(params acl_entries.WeaviateACLEntriesInsertParams) middleware.Responder {
return middleware.NotImplemented("operation acl_entries.WeaviateACLEntriesInsert has not yet been implemented")
})
api.ACLEntriesWeaviateACLEntriesListHandler = acl_entries.WeaviateACLEntriesListHandlerFunc(func(params acl_entries.WeaviateACLEntriesListParams) middleware.Responder {
return middleware.NotImplemented("operation acl_entries.WeaviateACLEntriesList has not yet been implemented")
})
api.ACLEntriesWeaviateACLEntriesPatchHandler = acl_entries.WeaviateACLEntriesPatchHandlerFunc(func(params acl_entries.WeaviateACLEntriesPatchParams) middleware.Responder {
return middleware.NotImplemented("operation acl_entries.WeaviateACLEntriesPatch has not yet been implemented")
})
api.ACLEntriesWeaviateACLEntriesUpdateHandler = acl_entries.WeaviateACLEntriesUpdateHandlerFunc(func(params acl_entries.WeaviateACLEntriesUpdateParams) middleware.Responder {
return middleware.NotImplemented("operation acl_entries.WeaviateACLEntriesUpdate has not yet been implemented")
})
/*
* HANDLE ADAPTERS
*/
api.AdaptersWeaviateAdaptersDeleteHandler = adapters.WeaviateAdaptersDeleteHandlerFunc(func(params adapters.WeaviateAdaptersDeleteParams) middleware.Responder {
return middleware.NotImplemented("operation adapters.WeaviateAdaptersDelete has not yet been implemented")
})
api.AdaptersWeaviateAdaptersGetHandler = adapters.WeaviateAdaptersGetHandlerFunc(func(params adapters.WeaviateAdaptersGetParams) middleware.Responder {
return middleware.NotImplemented("operation adapters.WeaviateAdaptersGet has not yet been implemented")
})
api.AdaptersWeaviateAdaptersInsertHandler = adapters.WeaviateAdaptersInsertHandlerFunc(func(params adapters.WeaviateAdaptersInsertParams) middleware.Responder {
return middleware.NotImplemented("operation adapters.WeaviateAdaptersInsert has not yet been implemented")
})
api.AdaptersWeaviateAdaptersListHandler = adapters.WeaviateAdaptersListHandlerFunc(func(params adapters.WeaviateAdaptersListParams) middleware.Responder {
return middleware.NotImplemented("operation adapters.WeaviateAdaptersList has not yet been implemented")
})
api.AdaptersWeaviateAdaptersPatchHandler = adapters.WeaviateAdaptersPatchHandlerFunc(func(params adapters.WeaviateAdaptersPatchParams) middleware.Responder {
return middleware.NotImplemented("operation adapters.WeaviateAdaptersPatch has not yet been implemented")
})
api.AdaptersWeaviateAdaptersUpdateHandler = adapters.WeaviateAdaptersUpdateHandlerFunc(func(params adapters.WeaviateAdaptersUpdateParams) middleware.Responder {
return middleware.NotImplemented("operation adapters.WeaviateAdaptersUpdate has not yet been implemented")
})
/*
* HANDLE COMMANDS
*/
api.CommandsWeaviateCommandsDeleteHandler = commands.WeaviateCommandsDeleteHandlerFunc(func(params commands.WeaviateCommandsDeleteParams) middleware.Responder {
return middleware.NotImplemented("operation commands.WeaviateCommandsDelete has not yet been implemented")
})
api.CommandsWeaviateCommandsGetHandler = commands.WeaviateCommandsGetHandlerFunc(func(params commands.WeaviateCommandsGetParams) middleware.Responder {
return middleware.NotImplemented("operation commands.WeaviateCommandsGet has not yet been implemented")
})
api.CommandsWeaviateCommandsGetQueueHandler = commands.WeaviateCommandsGetQueueHandlerFunc(func(params commands.WeaviateCommandsGetQueueParams) middleware.Responder {
return middleware.NotImplemented("operation commands.WeaviateCommandsGetQueue has not yet been implemented")
})
api.CommandsWeaviateCommandsInsertHandler = commands.WeaviateCommandsInsertHandlerFunc(func(params commands.WeaviateCommandsInsertParams) middleware.Responder {
return middleware.NotImplemented("operation commands.WeaviateCommandsInsert has not yet been implemented")
})
api.CommandsWeaviateCommandsListHandler = commands.WeaviateCommandsListHandlerFunc(func(params commands.WeaviateCommandsListParams) middleware.Responder {
return middleware.NotImplemented("operation commands.WeaviateCommandsList has not yet been implemented")
})
api.CommandsWeaviateCommandsPatchHandler = commands.WeaviateCommandsPatchHandlerFunc(func(params commands.WeaviateCommandsPatchParams) middleware.Responder {
return middleware.NotImplemented("operation commands.WeaviateCommandsPatch has not yet been implemented")
})
api.CommandsWeaviateCommandsUpdateHandler = commands.WeaviateCommandsUpdateHandlerFunc(func(params commands.WeaviateCommandsUpdateParams) middleware.Responder {
return middleware.NotImplemented("operation commands.WeaviateCommandsUpdate has not yet been implemented")
})
/*
* HANDLE DEVICES
*/
api.DevicesWeaviateDevicesDeleteHandler = devices.WeaviateDevicesDeleteHandlerFunc(func(params devices.WeaviateDevicesDeleteParams) middleware.Responder {
return middleware.NotImplemented("operation devices.WeaviateDevicesDelete has not yet been implemented")
})
api.DevicesWeaviateDevicesGetHandler = devices.WeaviateDevicesGetHandlerFunc(func(params devices.WeaviateDevicesGetParams) middleware.Responder {
return middleware.NotImplemented("operation devices.WeaviateDevicesGet has not yet been implemented")
})
api.DevicesWeaviateDevicesInsertHandler = devices.WeaviateDevicesInsertHandlerFunc(func(params devices.WeaviateDevicesInsertParams) middleware.Responder {
return middleware.NotImplemented("operation devices.WeaviateDevicesInsert has not yet been implemented")
})
api.DevicesWeaviateDevicesListHandler = devices.WeaviateDevicesListHandlerFunc(func(params devices.WeaviateDevicesListParams) middleware.Responder {
return middleware.NotImplemented("operation devices.WeaviateDevicesList has not yet been implemented")
})
api.DevicesWeaviateDevicesPatchHandler = devices.WeaviateDevicesPatchHandlerFunc(func(params devices.WeaviateDevicesPatchParams) middleware.Responder {
return middleware.NotImplemented("operation devices.WeaviateDevicesPatch has not yet been implemented")
})
api.DevicesWeaviateDevicesUpdateHandler = devices.WeaviateDevicesUpdateHandlerFunc(func(params devices.WeaviateDevicesUpdateParams) middleware.Responder {
return middleware.NotImplemented("operation devices.WeaviateDevicesUpdate has not yet been implemented")
})
/*
* HANDLE EVENTS
*/
api.EventsWeaviateEventsGetHandler = events.WeaviateEventsGetHandlerFunc(func(params events.WeaviateEventsGetParams) middleware.Responder {
return middleware.NotImplemented("operation events.WeaviateEventsGet has not yet been implemented")
})
api.EventsWeaviateEventsListHandler = events.WeaviateEventsListHandlerFunc(func(params events.WeaviateEventsListParams) middleware.Responder {
return middleware.NotImplemented("operation events.WeaviateEventsList has not yet been implemented")
})
api.EventsWeaviateEventsRecordDeviceEventsHandler = events.WeaviateEventsRecordDeviceEventsHandlerFunc(func(params events.WeaviateEventsRecordDeviceEventsParams) middleware.Responder {
return middleware.NotImplemented("operation events.WeaviateEventsRecordDeviceEvents has not yet been implemented")
})
/*
* HANDLE LOCATIONS
*/
api.LocationsWeaviateLocationsDeleteHandler = locations.WeaviateLocationsDeleteHandlerFunc(func(params locations.WeaviateLocationsDeleteParams) middleware.Responder {
// Delete item from database
err := databaseConnector.Delete(params.LocationID)
// TODO: Not found response
// TODO: Deleted response
if err != nil {
panic(err)
}
return locations.NewWeaviateLocationsDeleteNoContent()
})
api.LocationsWeaviateLocationsGetHandler = locations.WeaviateLocationsGetHandlerFunc(func(params locations.WeaviateLocationsGetParams) middleware.Responder {
// Get item from database
result, err := databaseConnector.Get(params.LocationID)
// Create object to return
object := &models.Location{}
json.Unmarshal([]byte(result.Object), &object)
objectID := strings.TrimSpace(object.ID)
// If there are no results, the Object ID = 0
if len(objectID) == 0 && err == nil {
// return SUCCESS of query but no content.
return locations.NewWeaviateLocationsGetNotFound()
}
// return SUCCESS
return locations.NewWeaviateLocationsGetOK().WithPayload(object)
})
api.LocationsWeaviateLocationsInsertHandler = locations.WeaviateLocationsInsertHandlerFunc(func(params locations.WeaviateLocationsInsertParams) middleware.Responder {
// TODO: VALIDATE IF THE OBJECT IS OKAY
validated := true
// return error
if validated == false {
return middleware.ResponderFunc(func(rw http.ResponseWriter, p runtime.Producer) {
rw.WriteHeader(422)
rw.Write([]byte("{ \"ERROR\": \"There is something wrong with your original POSTed body\" }"))
})
} else {
// Generate DatabaseObject without JSON-object in it.
dbObject := *dbconnector.NewDatabaseObject("FOOBAR USER UUID", "#/paths/locations")
// Set the body-id and generate JSON to save to the database
params.Body.ID = dbObject.Uuid
databaseBody, _ := json.Marshal(params.Body)
dbObject.Object = string(databaseBody)
// Save to DB, this needs to be a Go routine because we will return an accepted
go databaseConnector.Add(dbObject)
// Return SUCCESS (NOTE: this is ACCEPTED, so the databaseConnector.Add should have a go routine)
return locations.NewWeaviateLocationsInsertAccepted().WithPayload(params.Body)
}
})
api.LocationsWeaviateLocationsListHandler = locations.WeaviateLocationsListHandlerFunc(func(params locations.WeaviateLocationsListParams) middleware.Responder {
// Show all locations with List function, get max results in URL, otherwise max = 10.
limit := 10
locationDatabaseObjects, err := databaseConnector.List("#/paths/locations", limit)
// TODO: Limit max here
// TODO: None found
if err != nil {
panic(err)
}
locationsListResponse := &models.LocationsListResponse{}
locationsListResponse.Locations = make([]*models.Location, limit)
for i, locationDatabaseObject := range locationDatabaseObjects {
locationObject := &models.Location{}
json.Unmarshal([]byte(locationDatabaseObject.Object), locationObject)
locationsListResponse.Locations[i] = locationObject
}
return locations.NewWeaviateLocationsListOK().WithPayload(locationsListResponse)
})
api.LocationsWeaviateLocationsPatchHandler = locations.WeaviateLocationsPatchHandlerFunc(func(params locations.WeaviateLocationsPatchParams) middleware.Responder {
return middleware.NotImplemented("operation locations.WeaviateLocationsPatch has not yet been implemented")
})
api.LocationsWeaviateLocationsUpdateHandler = locations.WeaviateLocationsUpdateHandlerFunc(func(params locations.WeaviateLocationsUpdateParams) middleware.Responder {
// Get item from database
UUID := params.LocationID
dbObject, _ := databaseConnector.Get(UUID)
// TODO: Add item not found 404
// TODO: VALIDATE IF THE OBJECT IS OKAY
validated := true
// return error
if validated == false {
return middleware.ResponderFunc(func(rw http.ResponseWriter, p runtime.Producer) {
rw.WriteHeader(422)
rw.Write([]byte("{ \"ERROR\": \"There is something wrong with your original POSTed body\" }"))
})
} else {
// Set the body-id and generate JSON to save to the database
databaseBody, _ := json.Marshal(params.Body)
dbObject.Object = string(databaseBody)
dbObject.SetTimeToNow()
// Save to DB, this needs to be a Go routine because we will return an accepted
go databaseConnector.Add(dbObject)
// Return SUCCESS (NOTE: this is ACCEPTED, so the databaseConnector.Add should have a go routine)
return locations.NewWeaviateLocationsUpdateOK().WithPayload(params.Body)
}
})
/*
* HANDLE MODEL MANIFESTS
*/
api.ModelManifestsWeaviateModelManifestsCreateHandler = model_manifests.WeaviateModelManifestsCreateHandlerFunc(func(params model_manifests.WeaviateModelManifestsCreateParams) middleware.Responder {
return middleware.NotImplemented("operation model_manifests.WeaviateModelManifestsCreate has not yet been implemented")
})
api.ModelManifestsWeaviateModelManifestsDeleteHandler = model_manifests.WeaviateModelManifestsDeleteHandlerFunc(func(params model_manifests.WeaviateModelManifestsDeleteParams) middleware.Responder {
return middleware.NotImplemented("operation model_manifests.WeaviateModelManifestsDelete has not yet been implemented")
})
api.ModelManifestsWeaviateModelManifestsGetHandler = model_manifests.WeaviateModelManifestsGetHandlerFunc(func(params model_manifests.WeaviateModelManifestsGetParams) middleware.Responder {
return middleware.NotImplemented("operation model_manifests.WeaviateModelManifestsGet has not yet been implemented")
})
api.ModelManifestsWeaviateModelManifestsListHandler = model_manifests.WeaviateModelManifestsListHandlerFunc(func(params model_manifests.WeaviateModelManifestsListParams) middleware.Responder {
return middleware.NotImplemented("operation model_manifests.WeaviateModelManifestsList has not yet been implemented")
})
api.ModelManifestsWeaviateModelManifestsPatchHandler = model_manifests.WeaviateModelManifestsPatchHandlerFunc(func(params model_manifests.WeaviateModelManifestsPatchParams) middleware.Responder {
return middleware.NotImplemented("operation model_manifests.WeaviateModelManifestsPatch has not yet been implemented")
})
api.ModelManifestsWeaviateModelManifestsUpdateHandler = model_manifests.WeaviateModelManifestsUpdateHandlerFunc(func(params model_manifests.WeaviateModelManifestsUpdateParams) middleware.Responder {
return middleware.NotImplemented("operation model_manifests.WeaviateModelManifestsUpdate has not yet been implemented")
})
api.ModelManifestsWeaviateModelManifestsValidateCommandDefsHandler = model_manifests.WeaviateModelManifestsValidateCommandDefsHandlerFunc(func(params model_manifests.WeaviateModelManifestsValidateCommandDefsParams) middleware.Responder {
return middleware.NotImplemented("operation model_manifests.WeaviateModelManifestsValidateCommandDefs has not yet been implemented")
})
api.ModelManifestsWeaviateModelManifestsValidateComponentsHandler = model_manifests.WeaviateModelManifestsValidateComponentsHandlerFunc(func(params model_manifests.WeaviateModelManifestsValidateComponentsParams) middleware.Responder {
return middleware.NotImplemented("operation model_manifests.WeaviateModelManifestsValidateComponents has not yet been implemented")
})
api.ModelManifestsWeaviateModelManifestsValidateDeviceStateHandler = model_manifests.WeaviateModelManifestsValidateDeviceStateHandlerFunc(func(params model_manifests.WeaviateModelManifestsValidateDeviceStateParams) middleware.Responder {
return middleware.NotImplemented("operation model_manifests.WeaviateModelManifestsValidateDeviceState has not yet been implemented")
})
api.ServerShutdown = func() {}
return setupGlobalMiddleware(api.Serve(setupMiddlewares))
}
// The TLS configuration before HTTPS server starts.
func configureTLS(tlsConfig *tls.Config) {
// Make all necessary changes to the TLS configuration here.
}
// As soon as server is initialized but not run yet, this function will be called.
// If you need to modify a config, store server instance to stop it individually later, this is the place.
// This function can be called multiple times, depending on the number of serving schemes.
// scheme value will be set accordingly: "http", "https" or "unix"
func configureServer(s *graceful.Server, scheme, addr string) {
}
// The middleware configuration is for the handler executors. These do not apply to the swagger.json document.
// The middleware executes after routing but before authentication, binding and validation
func setupMiddlewares(handler http.Handler) http.Handler {
return handler
}
// The middleware configuration happens before anything, this middleware also applies to serving the swagger.json document.
// So this is a good place to plug in a panic handling middleware, logging and metrics
func setupGlobalMiddleware(handler http.Handler) http.Handler {
return handler
}
|
/*
This package contains the blog's models and holds the database connection globally but as
an unexported value.
*/
package models
import (
"database/sql"
_ "github.com/go-sql-driver/mysql"
"log"
)
var db *sql.DB
const (
SQL_POST_BY_ID = `
SELECT idPost, slug, title, abstract, body, date, idUser, users.name, users.email, draft, tag
FROM posts
LEFT JOIN post_tags USING(idPost)
INNER JOIN users USING(idUser)
WHERE idPost=?`
SQL_POST_BY_SLUG = `
SELECT idPost, slug, title, abstract, body, date, idUser, users.name, users.email, draft, tag
FROM posts
LEFT JOIN post_tags USING(idPost)
INNER JOIN users USING(idUser)
WHERE slug=?`
SQL_POSTS_BY_USER = `
SELECT idPost, title, slug, date, draft
FROM posts
WHERE idUser=?
ORDER BY draft DESC, date DESC`
SQL_POSTS_BY_TAG = `
SELECT slug, title, abstract, date, idUser, users.name
FROM posts
INNER JOIN users USING(idUser)
LEFT JOIN post_tags USING(idPost)
WHERE draft=false AND post_tags.tag=?
ORDER BY date DESC`
SQL_ALL_POSTS = `
SELECT slug, title, abstract, date, idUser, users.name
FROM posts
INNER JOIN users USING(idUser)
WHERE draft=false
ORDER BY date DESC LIMIT ?`
SQL_ALL_TAGS = `SELECT DISTINCT tag FROM post_tags`
SQL_INSERT_POST = `
INSERT INTO posts (slug, title, abstract, body, idUser, draft)
VALUES (?, ?, ?, ?, ?, ?)`
SQL_INSERT_TAGS = `
INSERT IGNORE INTO post_tags (idPost, tag)
VALUES (?, ?)`
SQL_REMOVE_TAGS = `DELETE from post_tags WHERE idPost=?`
SQL_DELETE_POST = `DELETE from posts WHERE idPost=?`
SQL_UPDATE_POST = `
UPDATE posts SET slug=?, title=?, abstract=?, body=?, idUser=?, draft=?
WHERE idPost=?`
SQL_USER_BY_ID = `
SELECT name, email
FROM users
WHERE idUser=?`
SQL_USER_AUTH = `
SELECT name, idUser
FROM users
WHERE email=? AND password=?`
)
// Creates and tests database connection
func ConnectDb(address string) {
var err error
db, err = sql.Open("mysql", address)
if err != nil {
log.Fatal("Error opening DB")
}
err = db.Ping()
if err != nil {
log.Fatal("Error connecting to DB")
}
}
// Closes database connection
func CloseDb() {
db.Close()
}
No longer showing tags from draft posts
/*
This package contains the blog's models and holds the database connection globally but as
an unexported value.
*/
package models
import (
"database/sql"
_ "github.com/go-sql-driver/mysql"
"log"
)
var db *sql.DB
const (
SQL_POST_BY_ID = `
SELECT idPost, slug, title, abstract, body, date, idUser, users.name, users.email, draft, tag
FROM posts
LEFT JOIN post_tags USING(idPost)
INNER JOIN users USING(idUser)
WHERE idPost=?`
SQL_POST_BY_SLUG = `
SELECT idPost, slug, title, abstract, body, date, idUser, users.name, users.email, draft, tag
FROM posts
LEFT JOIN post_tags USING(idPost)
INNER JOIN users USING(idUser)
WHERE slug=?`
SQL_POSTS_BY_USER = `
SELECT idPost, title, slug, date, draft
FROM posts
WHERE idUser=?
ORDER BY draft DESC, date DESC`
SQL_POSTS_BY_TAG = `
SELECT slug, title, abstract, date, idUser, users.name
FROM posts
INNER JOIN users USING(idUser)
LEFT JOIN post_tags USING(idPost)
WHERE draft=false AND post_tags.tag=?
ORDER BY date DESC`
SQL_ALL_POSTS = `
SELECT slug, title, abstract, date, idUser, users.name
FROM posts
INNER JOIN users USING(idUser)
WHERE draft=false
ORDER BY date DESC LIMIT ?`
SQL_ALL_TAGS = `SELECT DISTINCT tag FROM post_tags INNER JOIN posts USING(idPost) WHERE posts.draft=false`
SQL_INSERT_POST = `
INSERT INTO posts (slug, title, abstract, body, idUser, draft)
VALUES (?, ?, ?, ?, ?, ?)`
SQL_INSERT_TAGS = `
INSERT IGNORE INTO post_tags (idPost, tag)
VALUES (?, ?)`
SQL_REMOVE_TAGS = `DELETE from post_tags WHERE idPost=?`
SQL_DELETE_POST = `DELETE from posts WHERE idPost=?`
SQL_UPDATE_POST = `
UPDATE posts SET slug=?, title=?, abstract=?, body=?, idUser=?, draft=?
WHERE idPost=?`
SQL_USER_BY_ID = `
SELECT name, email
FROM users
WHERE idUser=?`
SQL_USER_AUTH = `
SELECT name, idUser
FROM users
WHERE email=? AND password=?`
)
// Creates and tests database connection
func ConnectDb(address string) {
var err error
db, err = sql.Open("mysql", address)
if err != nil {
log.Fatal("Error opening DB")
}
err = db.Ping()
if err != nil {
log.Fatal("Error connecting to DB")
}
}
// Closes database connection
func CloseDb() {
db.Close()
}
|
/*
Copyright 2015 The Httpgzip Authors. See the AUTHORS file at the
top-level directory of this distribution and at
<https://xi2.org/x/httpgzip/m/AUTHORS>.
This file is part of Httpgzip.
Httpgzip is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Httpgzip is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with Httpgzip. If not, see <https://www.gnu.org/licenses/>.
*/
package httpgzip_test
import (
"bytes"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"path/filepath"
"strings"
"testing"
"xi2.org/x/httpgzip"
)
const defComp = httpgzip.DefaultCompression
type fsRequestResponse struct {
reqFile string
reqHeaders []string
resGzip bool
resCode int
resHeaders []string
}
var fsTests = []fsRequestResponse{
// test downloading of all test files in testdata with/without
// requesting gzip encoding
{
reqFile: "0bytes.txt",
reqHeaders: nil,
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: text/plain; charset=utf-8",
"Content-Encoding: ",
"Content-Length: 0",
"Accept-Ranges: bytes",
"Vary: Accept-Encoding"},
},
{
reqFile: "0bytes.bin",
reqHeaders: nil,
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: application/octet-stream",
"Content-Encoding: ",
"Content-Length: 0",
"Accept-Ranges: bytes",
"Vary: Accept-Encoding"},
},
{
reqFile: "0bytes.txt",
reqHeaders: []string{"Accept-Encoding: gzip"},
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: text/plain; charset=utf-8",
"Content-Encoding: ",
"Content-Length: 0",
"Accept-Ranges: ",
"Vary: Accept-Encoding"},
},
{
reqFile: "0bytes.bin",
reqHeaders: []string{"Accept-Encoding: gzip"},
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: application/octet-stream",
"Content-Encoding: ",
"Content-Length: 0",
"Accept-Ranges: ",
"Vary: Accept-Encoding"},
},
{
reqFile: "511bytes.txt",
reqHeaders: nil,
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: text/plain; charset=utf-8",
"Content-Encoding: ",
"Content-Length: 511",
"Accept-Ranges: bytes",
"Vary: Accept-Encoding"},
},
{
reqFile: "511bytes.bin",
reqHeaders: nil,
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: application/octet-stream",
"Content-Encoding: ",
"Content-Length: 511",
"Accept-Ranges: bytes",
"Vary: Accept-Encoding"},
},
{
reqFile: "511bytes.txt",
reqHeaders: []string{"Accept-Encoding: gzip"},
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: text/plain; charset=utf-8",
"Content-Encoding: ",
"Content-Length: 511",
"Accept-Ranges: ",
"Vary: Accept-Encoding"},
},
{
reqFile: "511bytes.bin",
reqHeaders: []string{"Accept-Encoding: gzip"},
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: application/octet-stream",
"Content-Encoding: ",
"Content-Length: 511",
"Accept-Ranges: ",
"Vary: Accept-Encoding"},
},
{
reqFile: "512bytes.txt",
reqHeaders: nil,
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: text/plain; charset=utf-8",
"Content-Encoding: ",
"Content-Length: 512",
"Accept-Ranges: bytes",
"Vary: Accept-Encoding"},
},
{
reqFile: "512bytes.bin",
reqHeaders: nil,
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: application/octet-stream",
"Content-Encoding: ",
"Content-Length: 512",
"Accept-Ranges: bytes",
"Vary: Accept-Encoding"},
},
{
reqFile: "512bytes.txt",
reqHeaders: []string{"Accept-Encoding: gzip"},
resGzip: true,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: text/plain; charset=utf-8",
"Content-Encoding: gzip",
"Content-Length: 512", // inverse match i.e. look for != 512
"Accept-Ranges: ",
"Vary: Accept-Encoding"},
},
{
reqFile: "512bytes.bin",
reqHeaders: []string{"Accept-Encoding: gzip"},
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: application/octet-stream",
"Content-Encoding: ",
"Content-Length: 512",
"Accept-Ranges: ",
"Vary: Accept-Encoding"},
},
{
reqFile: "4096bytes.txt",
reqHeaders: nil,
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: text/plain; charset=utf-8",
"Content-Encoding: ",
"Content-Length: 4096",
"Accept-Ranges: bytes",
"Vary: Accept-Encoding"},
},
{
reqFile: "4096bytes.bin",
reqHeaders: nil,
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: application/octet-stream",
"Content-Encoding: ",
"Content-Length: 4096",
"Accept-Ranges: bytes",
"Vary: Accept-Encoding"},
},
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Accept-Encoding: gzip"},
resGzip: true,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: text/plain; charset=utf-8",
"Content-Encoding: gzip",
"Content-Length: ",
"Accept-Ranges: ",
"Vary: Accept-Encoding"},
},
{
reqFile: "4096bytes.bin",
reqHeaders: []string{"Accept-Encoding: gzip"},
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: application/octet-stream",
"Content-Encoding: ",
"Content-Length: 4096",
"Accept-Ranges: ",
"Vary: Accept-Encoding"},
},
// test Accept-Encoding parsing
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Accept-Encoding: gzip;q=0.5"},
resGzip: true,
resCode: http.StatusOK,
},
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Accept-Encoding: gzip;q=0"},
resGzip: false,
resCode: http.StatusOK,
},
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Accept-Encoding: identity;q=0"},
resGzip: false,
resCode: http.StatusNotAcceptable,
},
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Accept-Encoding: identity;q=0.5, gzip;q=0.4"},
resGzip: false,
resCode: http.StatusOK,
},
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Accept-Encoding: *"},
resGzip: true,
resCode: http.StatusOK,
},
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Accept-Encoding: *;q=0"},
resGzip: false,
resCode: http.StatusNotAcceptable,
},
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Accept-Encoding: *,gzip;q=0"},
resGzip: false,
resCode: http.StatusOK,
},
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Accept-Encoding: deflate"},
resGzip: false,
resCode: http.StatusOK,
},
// test gzip encoding of non compressible files when forced to by
// Accept-Encoding header
{
reqFile: "4096bytes.bin",
reqHeaders: []string{"Accept-Encoding: identity;q=0,gzip"},
resGzip: true,
resCode: http.StatusOK,
},
// test websocket requests are not gzipped
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Accept-Encoding: gzip", "Sec-WebSocket-Key: XX"},
resGzip: false,
resCode: http.StatusOK,
},
// test Range requests are ignored when requesting gzip encoding
// and actioned otherwise
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Accept-Encoding: gzip", "Range: bytes=500-"},
resGzip: true,
resCode: http.StatusOK,
resHeaders: []string{
"Accept-Ranges: ",
"Content-Length: ",
"Content-Range: ",
},
},
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Range: bytes=500-"},
resGzip: false,
resCode: http.StatusPartialContent,
resHeaders: []string{
"Accept-Ranges: bytes",
"Content-Length: 3596",
"Content-Range: bytes 500-4095/4096",
},
},
}
// parseHeader returns a header key and value from a "Key: Value" string
func parseHeader(header string) (key, value string) {
i := strings.IndexByte(header, ':')
key = header[:i]
value = strings.TrimLeft(header[i+1:], " ")
return
}
// isGzip returns true if the slice b is gzipped data
func isGzip(b []byte) bool {
if len(b) < 2 {
return false
}
return b[0] == 0x1f && b[1] == 0x8b
}
// getPath starts a temporary test server using handler h (wrapped
// with httpgzip with the given compression level) and issues a
// request for path. The request has the specified headers
// added. getPath returns the http.Response (with Body closed) and the
// result of reading the response Body.
func getPath(t *testing.T, h http.Handler, level int, path string, headers []string) (*http.Response, []byte) {
gzh, _ := httpgzip.NewHandlerLevel(h, nil, level)
ts := httptest.NewServer(gzh)
defer ts.Close()
req, err := http.NewRequest("GET", ts.URL+path, nil)
if err != nil {
t.Fatal(err)
}
for _, h := range headers {
req.Header.Add(parseHeader(h))
}
// explicitly disable automatic sending of "Accept-Encoding"
transport := &http.Transport{DisableCompression: true}
client := http.Client{Transport: transport}
res, err := client.Do(req)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
return res, body
}
// TestFileServer runs all tests in fsTests against an http.FileServer
// serving the testdata directory.
func TestFileServer(t *testing.T) {
h := http.FileServer(http.Dir("testdata"))
for _, fst := range fsTests {
res, body := getPath(t, h, defComp, "/"+fst.reqFile, fst.reqHeaders)
if res.StatusCode != fst.resCode {
t.Fatalf(
"\nfile %s, request headers %v\n"+
"expected status code %d, got %d\n",
fst.reqFile, fst.reqHeaders, fst.resCode, res.StatusCode)
}
if isGzip(body) != fst.resGzip {
t.Fatalf(
"\nfile %s, request headers %v\n"+
"expected gzip status %v, got %v\n",
fst.reqFile, fst.reqHeaders, fst.resGzip, isGzip(body))
}
for _, h := range fst.resHeaders {
k, v := parseHeader(h)
if k == "Content-Length" && v != "" && isGzip(body) {
// Content-Length: XXX is special cased. if body is
// gzipped fail on match instead of a non-match. But
// still check a value is returned.
if res.Header.Get(k) == v || res.Header.Get(k) == "" {
t.Fatalf(
"\nfile %s, request headers %v\n"+
"unexpected response header %s: %s\n",
fst.reqFile, fst.reqHeaders, k, res.Header.Get(k))
}
} else {
if res.Header.Get(k) != v {
t.Fatalf(
"\nfile %s, request headers %v\n"+
"expected response header %s: %s, got %s: %s\n",
fst.reqFile, fst.reqHeaders,
k, v, k, res.Header.Get(k))
}
}
}
}
}
// TestDetectContentType creates a handler serving a text file which
// does not set Content-Type, wraps it with httpgzip, and requests
// that file with Accept-Encoding: gzip. It checks that httpgzip sets
// Content-Type and it is not left to the standard library (which
// would set it to "application/x-gzip").
func TestDetectContentType(t *testing.T) {
data, err := ioutil.ReadFile(
filepath.Join("testdata", "4096bytes.txt"))
if err != nil {
t.Fatal(err)
}
h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, _ = io.Copy(w, bytes.NewBuffer(data))
})
res, _ := getPath(t, h, defComp, "/", []string{"Accept-Encoding: gzip"})
expected := "text/plain; charset=utf-8"
if res.Header.Get("Content-Type") != expected {
t.Fatalf(
"\nexpected Content-Type %s, got %s\n",
expected, res.Header.Get("Content-Type"))
}
}
// TestPresetContentEncoding creates a handler serving a text file
// which sets Content-Encoding, wraps it with httpgzip, and requests
// that file with Accept-Encoding: gzip. It checks that httpgzip does
// not mess with Content-Encoding and serves the file without
// compression as expected.
func TestPresetContentEncoding(t *testing.T) {
data, err := ioutil.ReadFile(
filepath.Join("testdata", "4096bytes.txt"))
if err != nil {
t.Fatal(err)
}
h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Encoding", "text/foobar")
_, _ = io.Copy(w, bytes.NewBuffer(data))
})
res, body := getPath(t, h, defComp, "/", []string{"Accept-Encoding: gzip"})
expectedEnc := "text/foobar"
if res.Header.Get("Content-Encoding") != expectedEnc {
t.Fatalf(
"\nexpected Content-Encoding %s, got %s\n",
expectedEnc, res.Header.Get("Content-Encoding"))
}
if isGzip(body) {
t.Fatalf(
"\nexpected non-gzipped body, got gzipped\n")
}
}
// TestCompressionLevels creates a handler serving a text file and
// requests that file with Accept-Encoding: gzip with different
// compression levels set. It checks that the sizes of the responses
// vary.
func TestCompressionLevels(t *testing.T) {
data, err := ioutil.ReadFile(
filepath.Join("testdata", "4096bytes.txt"))
if err != nil {
t.Fatal(err)
}
h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, _ = io.Copy(w, bytes.NewBuffer(data))
})
sizes := map[int]struct{}{}
for _, level := range []int{
httpgzip.NoCompression,
httpgzip.BestSpeed,
httpgzip.BestCompression,
} {
_, body :=
getPath(t, h, level, "/", []string{"Accept-Encoding: gzip"})
if _, ok := sizes[len(body)]; ok {
t.Fatalf(
"\nlevel %d, body of length %d already received\n",
level, len(body))
}
sizes[len(body)] = struct{}{}
}
}
Use cleaner "non-match" syntax in tests
/*
Copyright 2015 The Httpgzip Authors. See the AUTHORS file at the
top-level directory of this distribution and at
<https://xi2.org/x/httpgzip/m/AUTHORS>.
This file is part of Httpgzip.
Httpgzip is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Httpgzip is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with Httpgzip. If not, see <https://www.gnu.org/licenses/>.
*/
package httpgzip_test
import (
"bytes"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"path/filepath"
"strings"
"testing"
"xi2.org/x/httpgzip"
)
const defComp = httpgzip.DefaultCompression
type fsRequestResponse struct {
reqFile string
reqHeaders []string
resGzip bool
resCode int
resHeaders []string
}
var fsTests = []fsRequestResponse{
// test downloading of all test files in testdata with/without
// requesting gzip encoding
{
reqFile: "0bytes.txt",
reqHeaders: nil,
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: text/plain; charset=utf-8",
"Content-Encoding: ",
"Content-Length: 0",
"Accept-Ranges: bytes",
"Vary: Accept-Encoding"},
},
{
reqFile: "0bytes.bin",
reqHeaders: nil,
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: application/octet-stream",
"Content-Encoding: ",
"Content-Length: 0",
"Accept-Ranges: bytes",
"Vary: Accept-Encoding"},
},
{
reqFile: "0bytes.txt",
reqHeaders: []string{"Accept-Encoding: gzip"},
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: text/plain; charset=utf-8",
"Content-Encoding: ",
"Content-Length: 0",
"Accept-Ranges: ",
"Vary: Accept-Encoding"},
},
{
reqFile: "0bytes.bin",
reqHeaders: []string{"Accept-Encoding: gzip"},
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: application/octet-stream",
"Content-Encoding: ",
"Content-Length: 0",
"Accept-Ranges: ",
"Vary: Accept-Encoding"},
},
{
reqFile: "511bytes.txt",
reqHeaders: nil,
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: text/plain; charset=utf-8",
"Content-Encoding: ",
"Content-Length: 511",
"Accept-Ranges: bytes",
"Vary: Accept-Encoding"},
},
{
reqFile: "511bytes.bin",
reqHeaders: nil,
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: application/octet-stream",
"Content-Encoding: ",
"Content-Length: 511",
"Accept-Ranges: bytes",
"Vary: Accept-Encoding"},
},
{
reqFile: "511bytes.txt",
reqHeaders: []string{"Accept-Encoding: gzip"},
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: text/plain; charset=utf-8",
"Content-Encoding: ",
"Content-Length: 511",
"Accept-Ranges: ",
"Vary: Accept-Encoding"},
},
{
reqFile: "511bytes.bin",
reqHeaders: []string{"Accept-Encoding: gzip"},
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: application/octet-stream",
"Content-Encoding: ",
"Content-Length: 511",
"Accept-Ranges: ",
"Vary: Accept-Encoding"},
},
{
reqFile: "512bytes.txt",
reqHeaders: nil,
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: text/plain; charset=utf-8",
"Content-Encoding: ",
"Content-Length: 512",
"Accept-Ranges: bytes",
"Vary: Accept-Encoding"},
},
{
reqFile: "512bytes.bin",
reqHeaders: nil,
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: application/octet-stream",
"Content-Encoding: ",
"Content-Length: 512",
"Accept-Ranges: bytes",
"Vary: Accept-Encoding"},
},
{
reqFile: "512bytes.txt",
reqHeaders: []string{"Accept-Encoding: gzip"},
resGzip: true,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: text/plain; charset=utf-8",
"Content-Encoding: gzip",
"Content-Length: 512|NOMATCH", // look for value != 512
"Accept-Ranges: ",
"Vary: Accept-Encoding"},
},
{
reqFile: "512bytes.bin",
reqHeaders: []string{"Accept-Encoding: gzip"},
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: application/octet-stream",
"Content-Encoding: ",
"Content-Length: 512",
"Accept-Ranges: ",
"Vary: Accept-Encoding"},
},
{
reqFile: "4096bytes.txt",
reqHeaders: nil,
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: text/plain; charset=utf-8",
"Content-Encoding: ",
"Content-Length: 4096",
"Accept-Ranges: bytes",
"Vary: Accept-Encoding"},
},
{
reqFile: "4096bytes.bin",
reqHeaders: nil,
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: application/octet-stream",
"Content-Encoding: ",
"Content-Length: 4096",
"Accept-Ranges: bytes",
"Vary: Accept-Encoding"},
},
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Accept-Encoding: gzip"},
resGzip: true,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: text/plain; charset=utf-8",
"Content-Encoding: gzip",
"Content-Length: ",
"Accept-Ranges: ",
"Vary: Accept-Encoding"},
},
{
reqFile: "4096bytes.bin",
reqHeaders: []string{"Accept-Encoding: gzip"},
resGzip: false,
resCode: http.StatusOK,
resHeaders: []string{
"Content-Type: application/octet-stream",
"Content-Encoding: ",
"Content-Length: 4096",
"Accept-Ranges: ",
"Vary: Accept-Encoding"},
},
// test Accept-Encoding parsing
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Accept-Encoding: gzip;q=0.5"},
resGzip: true,
resCode: http.StatusOK,
},
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Accept-Encoding: gzip;q=0"},
resGzip: false,
resCode: http.StatusOK,
},
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Accept-Encoding: identity;q=0"},
resGzip: false,
resCode: http.StatusNotAcceptable,
},
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Accept-Encoding: identity;q=0.5, gzip;q=0.4"},
resGzip: false,
resCode: http.StatusOK,
},
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Accept-Encoding: *"},
resGzip: true,
resCode: http.StatusOK,
},
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Accept-Encoding: *;q=0"},
resGzip: false,
resCode: http.StatusNotAcceptable,
},
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Accept-Encoding: *,gzip;q=0"},
resGzip: false,
resCode: http.StatusOK,
},
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Accept-Encoding: deflate"},
resGzip: false,
resCode: http.StatusOK,
},
// test gzip encoding of non compressible files when forced to by
// Accept-Encoding header
{
reqFile: "4096bytes.bin",
reqHeaders: []string{"Accept-Encoding: identity;q=0,gzip"},
resGzip: true,
resCode: http.StatusOK,
},
// test websocket requests are not gzipped
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Accept-Encoding: gzip", "Sec-WebSocket-Key: XX"},
resGzip: false,
resCode: http.StatusOK,
},
// test Range requests are ignored when requesting gzip encoding
// and actioned otherwise
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Accept-Encoding: gzip", "Range: bytes=500-"},
resGzip: true,
resCode: http.StatusOK,
resHeaders: []string{
"Accept-Ranges: ",
"Content-Length: ",
"Content-Range: ",
},
},
{
reqFile: "4096bytes.txt",
reqHeaders: []string{"Range: bytes=500-"},
resGzip: false,
resCode: http.StatusPartialContent,
resHeaders: []string{
"Accept-Ranges: bytes",
"Content-Length: 3596",
"Content-Range: bytes 500-4095/4096",
},
},
}
// parseHeader returns a header key and value from a "Key: Value" string
func parseHeader(header string) (key, value string) {
i := strings.IndexByte(header, ':')
key = header[:i]
value = strings.TrimLeft(header[i+1:], " ")
return
}
// isGzip returns true if the slice b is gzipped data
func isGzip(b []byte) bool {
if len(b) < 2 {
return false
}
return b[0] == 0x1f && b[1] == 0x8b
}
// getPath starts a temporary test server using handler h (wrapped
// with httpgzip with the given compression level) and issues a
// request for path. The request has the specified headers
// added. getPath returns the http.Response (with Body closed) and the
// result of reading the response Body.
func getPath(t *testing.T, h http.Handler, level int, path string, headers []string) (*http.Response, []byte) {
gzh, _ := httpgzip.NewHandlerLevel(h, nil, level)
ts := httptest.NewServer(gzh)
defer ts.Close()
req, err := http.NewRequest("GET", ts.URL+path, nil)
if err != nil {
t.Fatal(err)
}
for _, h := range headers {
req.Header.Add(parseHeader(h))
}
// explicitly disable automatic sending of "Accept-Encoding"
transport := &http.Transport{DisableCompression: true}
client := http.Client{Transport: transport}
res, err := client.Do(req)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
return res, body
}
// TestFileServer runs all tests in fsTests against an http.FileServer
// serving the testdata directory.
func TestFileServer(t *testing.T) {
h := http.FileServer(http.Dir("testdata"))
for _, fst := range fsTests {
res, body := getPath(t, h, defComp, "/"+fst.reqFile, fst.reqHeaders)
if res.StatusCode != fst.resCode {
t.Fatalf(
"\nfile %s, request headers %v\n"+
"expected status code %d, got %d\n",
fst.reqFile, fst.reqHeaders, fst.resCode, res.StatusCode)
}
if isGzip(body) != fst.resGzip {
t.Fatalf(
"\nfile %s, request headers %v\n"+
"expected gzip status %v, got %v\n",
fst.reqFile, fst.reqHeaders, fst.resGzip, isGzip(body))
}
for _, h := range fst.resHeaders {
k, v := parseHeader(h)
if strings.HasSuffix(v, "|NOMATCH") {
v = strings.TrimSuffix(v, "|NOMATCH")
// fail on match or empty value instead of a non-match
if res.Header.Get(k) == v || res.Header.Get(k) == "" {
t.Fatalf(
"\nfile %s, request headers %v\n"+
"unexpected response header %s: %s\n",
fst.reqFile, fst.reqHeaders, k, res.Header.Get(k))
}
} else {
if res.Header.Get(k) != v {
t.Fatalf(
"\nfile %s, request headers %v\n"+
"expected response header %s: %s, got %s: %s\n",
fst.reqFile, fst.reqHeaders,
k, v, k, res.Header.Get(k))
}
}
}
}
}
// TestDetectContentType creates a handler serving a text file which
// does not set Content-Type, wraps it with httpgzip, and requests
// that file with Accept-Encoding: gzip. It checks that httpgzip sets
// Content-Type and it is not left to the standard library (which
// would set it to "application/x-gzip").
func TestDetectContentType(t *testing.T) {
data, err := ioutil.ReadFile(
filepath.Join("testdata", "4096bytes.txt"))
if err != nil {
t.Fatal(err)
}
h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, _ = io.Copy(w, bytes.NewBuffer(data))
})
res, _ := getPath(t, h, defComp, "/", []string{"Accept-Encoding: gzip"})
expected := "text/plain; charset=utf-8"
if res.Header.Get("Content-Type") != expected {
t.Fatalf(
"\nexpected Content-Type %s, got %s\n",
expected, res.Header.Get("Content-Type"))
}
}
// TestPresetContentEncoding creates a handler serving a text file
// which sets Content-Encoding, wraps it with httpgzip, and requests
// that file with Accept-Encoding: gzip. It checks that httpgzip does
// not mess with Content-Encoding and serves the file without
// compression as expected.
func TestPresetContentEncoding(t *testing.T) {
data, err := ioutil.ReadFile(
filepath.Join("testdata", "4096bytes.txt"))
if err != nil {
t.Fatal(err)
}
h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Encoding", "text/foobar")
_, _ = io.Copy(w, bytes.NewBuffer(data))
})
res, body := getPath(t, h, defComp, "/", []string{"Accept-Encoding: gzip"})
expectedEnc := "text/foobar"
if res.Header.Get("Content-Encoding") != expectedEnc {
t.Fatalf(
"\nexpected Content-Encoding %s, got %s\n",
expectedEnc, res.Header.Get("Content-Encoding"))
}
if isGzip(body) {
t.Fatalf(
"\nexpected non-gzipped body, got gzipped\n")
}
}
// TestCompressionLevels creates a handler serving a text file and
// requests that file with Accept-Encoding: gzip with different
// compression levels set. It checks that the sizes of the responses
// vary.
func TestCompressionLevels(t *testing.T) {
data, err := ioutil.ReadFile(
filepath.Join("testdata", "4096bytes.txt"))
if err != nil {
t.Fatal(err)
}
h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, _ = io.Copy(w, bytes.NewBuffer(data))
})
sizes := map[int]struct{}{}
for _, level := range []int{
httpgzip.NoCompression,
httpgzip.BestSpeed,
httpgzip.BestCompression,
} {
_, body :=
getPath(t, h, level, "/", []string{"Accept-Encoding: gzip"})
if _, ok := sizes[len(body)]; ok {
t.Fatalf(
"\nlevel %d, body of length %d already received\n",
level, len(body))
}
sizes[len(body)] = struct{}{}
}
}
|
package mini
import (
"github.com/stretchr/testify/assert"
"os"
"path"
"strings"
"testing"
)
func TestSimpleIniFile(t *testing.T) {
simpleIni := `first=alpha
second=beta
third="gamma bamma"
fourth = 'delta'
int=32
float=3.14
true=true
false=false
#comment
; comment`
filepath := path.Join(os.TempDir(), "simpleini.txt")
f, err := os.Create(filepath)
if err != nil {
t.Fatal(err)
}
defer os.Remove(filepath)
if _, err := f.WriteString(simpleIni); err != nil {
t.Fatal(err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
config, err := LoadConfiguration(filepath)
assert.Nil(t, err, "Simple configuration should load without error.")
assert.Equal(t, config.String("first", ""), "alpha", "Read value of first wrong")
assert.Equal(t, config.String("second", ""), "beta", "Read value of second wrong")
assert.Equal(t, config.String("third", ""), "gamma bamma", "Read value of third wrong")
assert.Equal(t, config.String("fourth", ""), "delta", "Read value of fourth wrong")
assert.Equal(t, config.Integer("int", 0), 32, "Read value of int wrong")
assert.Equal(t, config.Float("float", 0), 3.14, "Read value of float wrong")
assert.Equal(t, config.Boolean("true", false), true, "Read true wrong")
assert.Equal(t, config.Boolean("false", true), false, "Read false wrong")
assert.Equal(t, len(config.Keys()), 8, "Simple ini contains 8 fields")
}
func TestSimpleIniFileFromReader(t *testing.T) {
simpleIni := `first=alpha
second=beta
third="gamma bamma"
fourth = 'delta'
int=32
float=3.14
true=true
false=false
#comment
; comment`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Simple configuration should load without error.")
assert.Equal(t, config.String("first", ""), "alpha", "Read value of first wrong")
assert.Equal(t, config.String("second", ""), "beta", "Read value of second wrong")
assert.Equal(t, config.String("third", ""), "gamma bamma", "Read value of third wrong")
assert.Equal(t, config.String("fourth", ""), "delta", "Read value of fourth wrong")
assert.Equal(t, config.Integer("int", 0), 32, "Read value of int wrong")
assert.Equal(t, config.Float("float", 0), 3.14, "Read value of float wrong")
assert.Equal(t, config.Boolean("true", false), true, "Read true wrong")
assert.Equal(t, config.Boolean("false", true), false, "Read false wrong")
assert.Equal(t, len(config.Keys()), 8, "Simple ini contains 8 fields")
}
func TestCaseInsensitive(t *testing.T) {
simpleIni := `fIrst=alpha
SECOND=beta
Third="gamma bamma"
FourTh = 'delta'`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Case insensitive configuration should load without error.")
assert.Equal(t, config.String("first", ""), "alpha", "Read value of first wrong")
assert.Equal(t, config.String("second", ""), "beta", "Read value of second wrong")
assert.Equal(t, config.String("THIRD", ""), "gamma bamma", "Read value of third wrong")
assert.Equal(t, config.String("fourth", ""), "delta", "Read value of fourth wrong")
assert.Equal(t, len(config.Keys()), 4, "Case ins ini contains 4 fields")
}
func TestArrayOfStrings(t *testing.T) {
simpleIni := `key[]=one
key[]=two
noarray=three`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Simple configuration should load without error.")
val := config.Strings("key")
assert.Equal(t, len(val), 2, "Array for keys should have 2 values")
assert.Equal(t, val[0], "one", "Read value of first wrong")
assert.Equal(t, val[1], "two", "Read value of second wrong")
val = config.Strings("noarray")
assert.Equal(t, len(val), 1, "Array for noarray should have 1 value")
assert.Equal(t, val[0], "three", "Read value of noarray wrong")
assert.Equal(t, len(config.Keys()), 2, "StringArray test contains 2 fields")
}
func TestArrayOfIntegers(t *testing.T) {
simpleIni := `key[]=1
key[]=2
noarray=3`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Simple configuration should load without error.")
val := config.Integers("key")
assert.Equal(t, len(val), 2, "Array for keys should have 2 values")
assert.Equal(t, val[0], 1, "Read value of first wrong")
assert.Equal(t, val[1], 2, "Read value of second wrong")
val = config.Integers("noarray")
assert.Equal(t, len(val), 1, "Array for noarray should have 1 value")
assert.Equal(t, val[0], 3, "Read value of noarray wrong")
assert.Equal(t, len(config.Keys()), 2, "IntArray test contains 2 fields")
}
func TestArrayOfFloats(t *testing.T) {
simpleIni := `key[]=1.1
key[]=2.2
noarray=3.3`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Simple configuration should load without error.")
val := config.Floats("key")
assert.Equal(t, len(val), 2, "Array for keys should have 2 values")
assert.Equal(t, val[0], 1.1, "Read value of first wrong")
assert.Equal(t, val[1], 2.2, "Read value of second wrong")
val = config.Floats("noarray")
assert.Equal(t, len(val), 1, "Array for noarray should have 1 value")
assert.Equal(t, val[0], 3.3, "Read value of noarray wrong")
assert.Equal(t, len(config.Keys()), 2, "FloatArray test contains 2 fields")
}
func TestSectionedIniFile(t *testing.T) {
simpleIni := `first=alpha
second=beta
third="gamma bamma"
fourth = 'delta'
int=32
float=3.14
true=true
false=false
[section]
first=raz
second=dba
int=124
float=1222.7
true=false
false=true
#comment
; comment`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Sectioned configuration should load without error.")
assert.Equal(t, config.String("first", ""), "alpha", "Read value of first wrong")
assert.Equal(t, config.String("second", ""), "beta", "Read value of second wrong")
assert.Equal(t, config.String("third", ""), "gamma bamma", "Read value of third wrong")
assert.Equal(t, config.String("fourth", ""), "delta", "Read value of fourth wrong")
assert.Equal(t, config.Integer("int", 0), 32, "Read value of int wrong")
assert.Equal(t, config.Float("float", 0), 3.14, "Read value of float wrong")
assert.Equal(t, config.Boolean("true", false), true, "Read true wrong")
assert.Equal(t, config.Boolean("false", true), false, "Read false wrong")
assert.Equal(t, config.StringFromSection("section", "first", ""), "raz", "Read value of first from section wrong")
assert.Equal(t, config.StringFromSection("section", "second", ""), "dba", "Read value of second from section wrong")
assert.Equal(t, config.IntegerFromSection("section", "int", 0), 124, "Read value of int in section wrong")
assert.Equal(t, config.FloatFromSection("section", "float", 0), 1222.7, "Read value of float in section wrong")
assert.Equal(t, config.BooleanFromSection("section", "true", true), false, "Read true in section wrong")
assert.Equal(t, config.BooleanFromSection("section", "false", false), true, "Read false in section wrong")
assert.Equal(t, len(config.Keys()), 8, "Section ini contains 6 fields")
assert.Equal(t, len(config.KeysForSection("section")), 6, "Section in ini contains 4 fields")
}
func TestArrayOfStringsInSection(t *testing.T) {
simpleIni := `key=nope
noarray=nope
[section]
key[]=one
key[]=two
noarray=three`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Configuration should load without error.")
val := config.StringsFromSection("section", "key")
assert.Equal(t, len(val), 2, "Array for keys should have 2 values")
assert.Equal(t, val[0], "one", "Read value of first wrong")
assert.Equal(t, val[1], "two", "Read value of second wrong")
val = config.StringsFromSection("section", "noarray")
assert.Equal(t, len(val), 1, "Array for noarray should have 1 value")
assert.Equal(t, val[0], "three", "Read value of noarray wrong")
assert.Equal(t, len(config.Keys()), 2, "StringArray section test contains 2 fields")
}
func TestArrayOfIntegersInSection(t *testing.T) {
simpleIni := `key=nope
noarray=nope
[section]
key[]=1
key[]=2
noarray=3`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Configuration should load without error.")
val := config.IntegersFromSection("section", "key")
assert.Equal(t, len(val), 2, "Array for keys should have 2 values")
assert.Equal(t, val[0], 1, "Read value of first wrong")
assert.Equal(t, val[1], 2, "Read value of second wrong")
val = config.IntegersFromSection("section", "noarray")
assert.Equal(t, len(val), 1, "Array for noarray should have 1 value")
assert.Equal(t, val[0], 3, "Read value of noarray wrong")
assert.Equal(t, len(config.KeysForSection("section")), 2, "IntArray section test contains 2 fields")
}
func TestArrayOfFloatsInSection(t *testing.T) {
simpleIni := `key=nope
noarray=nope
[section]
key[]=1.1
key[]=2.2
noarray=3.3`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Configuration should load without error.")
val := config.FloatsFromSection("section", "key")
assert.Equal(t, len(val), 2, "Array for keys should have 2 values")
assert.Equal(t, val[0], 1.1, "Read value of first wrong")
assert.Equal(t, val[1], 2.2, "Read value of second wrong")
val = config.FloatsFromSection("section", "noarray")
assert.Equal(t, len(val), 1, "Array for noarray should have 1 value")
assert.Equal(t, val[0], 3.3, "Read value of noarray wrong")
assert.Equal(t, len(config.Keys()), 2, "FloatArray section test contains 2 fields")
}
func TestMultipleSections(t *testing.T) {
simpleIni := `first=alpha
int=32
float=3.14
[section_one]
first=raz
int=124
float=1222.7
[section_two]
first=one
int=555
float=124.3`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Sectioned configuration should load without error.")
assert.Equal(t, config.String("first", ""), "alpha", "Read value of first wrong")
assert.Equal(t, config.Integer("int", 0), 32, "Read value of int wrong")
assert.Equal(t, config.Float("float", 0), 3.14, "Read value of float wrong")
assert.Equal(t, config.StringFromSection("", "first", ""), "alpha", "Read value of first wrong")
assert.Equal(t, config.IntegerFromSection("", "int", 0), 32, "Read value of int wrong")
assert.Equal(t, config.FloatFromSection("", "float", 0), 3.14, "Read value of float wrong")
config.SetName("section_zero")
assert.Equal(t, config.StringFromSection("section_zero", "first", ""), "alpha", "Read value of first wrong")
assert.Equal(t, config.IntegerFromSection("section_zero", "int", 0), 32, "Read value of int wrong")
assert.Equal(t, config.FloatFromSection("section_zero", "float", 0), 3.14, "Read value of float wrong")
assert.Equal(t, config.StringFromSection("section_one", "first", ""), "raz", "Read value of first from section wrong")
assert.Equal(t, config.IntegerFromSection("section_one", "int", 0), 124, "Read value of int in section wrong")
assert.Equal(t, config.FloatFromSection("section_one", "float", 0), 1222.7, "Read value of float in section wrong")
assert.Equal(t, config.StringFromSection("section_two", "first", ""), "one", "Read value of first from section wrong")
assert.Equal(t, config.IntegerFromSection("section_two", "int", 0), 555, "Read value of int in section wrong")
assert.Equal(t, config.FloatFromSection("section_two", "float", 0), 124.3, "Read value of float in section wrong")
assert.Equal(t, len(config.Keys()), 3, "Section ini contains 3 fields")
assert.Equal(t, len(config.KeysForSection("section_one")), 3, "Section in ini contains 3 fields")
assert.Equal(t, len(config.KeysForSection("section_two")), 3, "Section in ini contains 3 fields")
}
func TestSectionNames(t *testing.T) {
simpleIni := `first=alpha
int=32
float=3.14
[section_one]
first=raz
int=124
float=1222.7
[section_two]
first=one
[section_three]
int=555
float=124.3`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Sectioned configuration should load without error.")
sectionNames := config.SectionNames()
assert.Equal(t, len(sectionNames), 3, "Read section name array wrong")
//Section names should be sorted, alphabetically
assert.Equal(t, sectionNames[0], "section_one", "Read section name wrong")
assert.Equal(t, sectionNames[1], "section_three", "Read section name wrong")
assert.Equal(t, sectionNames[2], "section_two", "Read section name wrong")
config.SetName("section_zero")
sectionNames = config.SectionNames()
assert.Equal(t, len(sectionNames), 4, "Read section name array wrong")
//Section names should be sorted, alphabetically
assert.Equal(t, sectionNames[0], "section_one", "Read section name wrong")
assert.Equal(t, sectionNames[1], "section_three", "Read section name wrong")
assert.Equal(t, sectionNames[2], "section_two", "Read section name wrong")
assert.Equal(t, sectionNames[3], "section_zero", "Read section name wrong")
}
func TestSplitSection(t *testing.T) {
simpleIni := `first=alpha
int=32
float=3.14
[section_one]
first=raz
[section_two]
first=one
int=555
float=124.3
[section_one]
int=124
float=1222.7`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Sectioned configuration should load without error.")
assert.Equal(t, config.String("first", ""), "alpha", "Read value of first wrong")
assert.Equal(t, config.Integer("int", 0), 32, "Read value of int wrong")
assert.Equal(t, config.Float("float", 0), 3.14, "Read value of float wrong")
assert.Equal(t, config.StringFromSection("section_one", "first", ""), "raz", "Read value of first from section wrong")
assert.Equal(t, config.IntegerFromSection("section_one", "int", 0), 124, "Read value of int in section wrong")
assert.Equal(t, config.FloatFromSection("section_one", "float", 0), 1222.7, "Read value of float in section wrong")
assert.Equal(t, config.StringFromSection("section_two", "first", ""), "one", "Read value of first from section wrong")
assert.Equal(t, config.IntegerFromSection("section_two", "int", 0), 555, "Read value of int in section wrong")
assert.Equal(t, config.FloatFromSection("section_two", "float", 0), 124.3, "Read value of float in section wrong")
assert.Equal(t, len(config.Keys()), 3, "Section ini contains 3 fields")
assert.Equal(t, len(config.KeysForSection("section_one")), 3, "Section in ini contains 3 fields")
assert.Equal(t, len(config.KeysForSection("section_two")), 3, "Section in ini contains 3 fields")
}
func TestRepeatedKey(t *testing.T) {
simpleIni := `first=alpha
first=beta`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Configuration should load without error.")
assert.Equal(t, config.String("first", ""), "beta", "Read value of first wrong")
assert.Equal(t, len(config.Keys()), 1, "ini contains 1 fields")
}
func TestDefaults(t *testing.T) {
simpleIni := `first=alpha
third=\`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Configuration should load without error.")
assert.Equal(t, config.String("second", "beta"), "beta", "Read default value of first wrong")
assert.Equal(t, config.String("third", "gamma"), "gamma", "Read default value of too short a string")
assert.Equal(t, config.Integer("int", 32), 32, "Default value of int wrong")
assert.Equal(t, config.Float("float", 3.14), 3.14, "Default value of float wrong")
assert.Equal(t, config.Boolean("bool", true), true, "Default value of bool wrong")
assert.Equal(t, config.String("", "test"), "test", "Nil key should result in empty value")
assert.Equal(t, config.Integer("", 32), 32, "Default value of int wrong for empty key")
assert.Equal(t, config.Float("", 3.14), 3.14, "Default value of float wrong for empty key")
assert.Equal(t, config.Boolean("", true), true, "Default value of bool wrong for empty key")
assert.Equal(t, len(config.Keys()), 2, "ini contains 2 fields")
}
func TestDefaultsOnParseError(t *testing.T) {
simpleIni := `first=alpha
int=yex
float=blap
bool=zipzap
intarray[]=blip
floatarray[]=blap`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Configuration should load without error.")
assert.Equal(t, config.String("second", "beta"), "beta", "Read default value of first wrong")
assert.Equal(t, config.Integer("int", 32), 32, "Default value of int wrong")
assert.Equal(t, config.Float("float", 3.14), 3.14, "Default value of float wrong")
assert.Equal(t, config.Boolean("bool", true), true, "Default value of bool wrong")
assert.Nil(t, config.Integers("intarray"), "Default value of ints wrong on parse error")
assert.Nil(t, config.Floats("floatarray"), "Default value of floats wrong on parse error")
assert.Equal(t, len(config.Keys()), 6, "ini contains 4 fields")
}
func TestMissingArray(t *testing.T) {
simpleIni := `first=alpha`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Configuration should load without error.")
assert.Nil(t, config.Strings("second"), "Read default value of strings wrong")
assert.Nil(t, config.Integers("int"), "Default value of ints wrong")
assert.Nil(t, config.Floats("float"), "Default value of floats wrong")
assert.Nil(t, config.Strings(""), "Read default value of strings wrong for empty key")
assert.Nil(t, config.Integers(""), "Default value of ints wrong for empty key")
assert.Nil(t, config.Floats(""), "Default value of floats wrong for empty key")
assert.Equal(t, len(config.Keys()), 1, "ini contains 1 fields")
}
func TestDefaultsWithSection(t *testing.T) {
simpleIni := `[section]
first=alpha`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Configuration should load without error.")
assert.Equal(t, config.StringFromSection("section", "second", "beta"), "beta", "Read default value of first wrong")
assert.Equal(t, config.IntegerFromSection("section", "int", 32), 32, "Default value of int wrong")
assert.Equal(t, config.FloatFromSection("section", "float", 3.14), 3.14, "Default value of float wrong")
assert.Equal(t, config.BooleanFromSection("section", "bool", true), true, "Default value of bool wrong")
assert.Equal(t, config.StringFromSection("section-1", "second", "beta"), "beta", "Missing section for first wrong")
assert.Equal(t, config.IntegerFromSection("section-1", "int", 32), 32, "Missing section for int wrong")
assert.Equal(t, config.FloatFromSection("section-1", "float", 3.14), 3.14, "Missing section for float wrong")
assert.Equal(t, config.BooleanFromSection("section-1", "bool", true), true, "Missing section for bool wrong")
assert.Equal(t, len(config.Keys()), 0, "ini contains 0 fields")
assert.Equal(t, len(config.KeysForSection("section")), 1, "section contains 1 field")
assert.Nil(t, config.KeysForSection("section-1"), "missing section should have no keys")
}
type testStruct struct {
First string
Second string
L int64
F64 float64
Flag bool
Strings []string
LS []int64
F64s []float64
Missing string
MissingInt int64
MissingArray []string
Flags []bool
U uint64
private string
}
func TestLoadStructFile(t *testing.T) {
simpleIni := `[section]
first=alpha
second=beta
third="gamma bamma"
fourth = 'delta'
l=-32
f64=3.14
flag=true
unflag=false
strings[]=one
strings[]=two
LS[]=1
LS[]=2
F64s[]=11.0
F64s[]=22.0
#comment
; comment`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Simple configuration should load without error.")
var data testStruct
data.MissingInt = 33
data.Missing = "hello world"
ok := config.DataFromSection("section", &data)
assert.Equal(t, ok, true, "load should succeed")
assert.Equal(t, data.First, "alpha", "Read value of first wrong")
assert.Equal(t, data.Second, "beta", "Read value of second wrong")
assert.Equal(t, data.L, -32, "Read value of int wrong")
assert.Equal(t, data.F64, 3.14, "Read value of float wrong")
assert.Equal(t, data.Flag, true, "Read true wrong")
assert.Equal(t, data.Missing, "hello world", "Read value of missing wrong")
assert.Equal(t, data.MissingInt, 33, "Read false wrong")
assert.Nil(t, data.MissingArray, "Missing array Should be nil")
assert.Equal(t, data.private, "", "private value in struct should be ignored")
strings := data.Strings
assert.NotNil(t, strings, "strings should not be nil")
assert.Equal(t, len(strings), 2, "Read wrong length of string array")
assert.Equal(t, strings[0], "one", "Read string array wrong")
assert.Equal(t, strings[1], "two", "Read string array wrong")
assert.NotNil(t, data.LS, "ints should not be nil")
assert.Equal(t, len(data.LS), 2, "Read wrong length of ints array")
assert.Equal(t, data.LS[0], 1, "Read ints array wrong")
assert.Equal(t, data.LS[1], 2, "Read ints array wrong")
assert.NotNil(t, data.F64s, "floats should not be nil")
assert.Equal(t, len(data.F64s), 2, "Read wrong length of floats array")
assert.Equal(t, data.F64s[0], 11.0, "Read floats array wrong")
assert.Equal(t, data.F64s[1], 22.0, "Read floats array wrong")
}
func TestLoadStructMissingSection(t *testing.T) {
simpleIni := ``
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Simple configuration should load without error.")
var data testStruct
ok := config.DataFromSection("section", &data)
assert.Equal(t, ok, false, "section is missing so ok should be false")
}
func TestMissingSection(t *testing.T) {
simpleIni := `[section]
first=alpha`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Simple configuration should load without error.")
var data testStruct
val := config.DataFromSection("missing_section", &data)
assert.False(t, val)
}
func TestMissingArrayInSection(t *testing.T) {
simpleIni := `[section]
first=alpha`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Configuration should load without error.")
assert.Nil(t, config.StringsFromSection("section", "second"), "Read default value of strings wrong")
assert.Nil(t, config.IntegersFromSection("section", "int"), "Default value of ints wrong")
assert.Nil(t, config.FloatsFromSection("section", "float"), "Default value of floats wrong")
assert.Nil(t, config.StringsFromSection("section-1", "second"), "Missing section for strings wrong")
assert.Nil(t, config.IntegersFromSection("section-1", "int"), "Missing section for ints wrong")
assert.Nil(t, config.FloatsFromSection("section-1", "float"), "Missing section for floats wrong")
assert.Equal(t, len(config.Keys()), 0, "ini contains 0 fields")
assert.Equal(t, len(config.KeysForSection("section")), 1, "section contains 1 field")
assert.Nil(t, config.KeysForSection("section-1"), "missing section should have no keys")
}
func TestBadSection(t *testing.T) {
simpleIni := `key=nope
noarray=nope
[section
key[]=one
key[]=two
noarray=three`
_, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.NotNil(t, err, "Configuration should load with error.")
}
func TestBadKeyValue(t *testing.T) {
simpleIni := `key=nope
noarray:nope`
_, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.NotNil(t, err, "Configuration should load with error.")
}
func TestBadArrayAsSingle(t *testing.T) {
simpleIni := `key=nope
noarray=nope
[section]
key[]=one
key[]=two
noarray=three`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Configuration should load without error.")
assert.Equal(t, config.StringFromSection("section", "key", "default"), "default", "Read default value of strings wrong")
}
func TestBadFile(t *testing.T) {
const filepath = "/no.such.dir/xxx.no-such-file.txt"
config, err := LoadConfiguration(filepath)
assert.NotNil(t, err, "No valid file is an error.")
assert.True(t, os.IsNotExist(err), "No valid file errors is of expected type.")
assert.Nil(t, config, "Configuration should be nil.")
}
func TestNullValuesInGet(t *testing.T) {
assert.Nil(t, get(nil, "foo"), "Configuration should be nil.")
}
func TestStringEscape(t *testing.T) {
simpleIni := `first=\n\t\rhello`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Simple configuration should load without error.")
assert.Equal(t, config.String("first", ""), "\n\t\rhello", "Read value of first wrong")
assert.Equal(t, len(config.Keys()), 1, "ini contains 1 fields")
}
func TestBadStringArray(t *testing.T) {
simpleIni := `first[]=\`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Simple configuration should load without error.")
assert.Nil(t, config.Strings("first"), "Read value of first wrong")
}
func BenchmarkLoadConfiguration(b *testing.B) {
simpleIni := `first=alpha
int=32
float=3.14
[section_one]
first=raz
[section_two]
first=one
int=555
float=124.3
[section_one]
int=124
float=1222.7`
b.ReportAllocs()
r := strings.NewReader(simpleIni)
for i := 0; i < b.N; i++ {
r.Seek(0, os.SEEK_SET)
_, err := LoadConfigurationFromReader(r)
if err != nil {
b.Fatal(err)
}
}
}
Fixed data types in tests.
package mini
import (
"github.com/stretchr/testify/assert"
"os"
"path"
"strings"
"testing"
)
func TestSimpleIniFile(t *testing.T) {
simpleIni := `first=alpha
second=beta
third="gamma bamma"
fourth = 'delta'
int=32
float=3.14
true=true
false=false
#comment
; comment`
filepath := path.Join(os.TempDir(), "simpleini.txt")
f, err := os.Create(filepath)
if err != nil {
t.Fatal(err)
}
defer os.Remove(filepath)
if _, err := f.WriteString(simpleIni); err != nil {
t.Fatal(err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
config, err := LoadConfiguration(filepath)
assert.Nil(t, err, "Simple configuration should load without error.")
assert.Equal(t, config.String("first", ""), "alpha", "Read value of first wrong")
assert.Equal(t, config.String("second", ""), "beta", "Read value of second wrong")
assert.Equal(t, config.String("third", ""), "gamma bamma", "Read value of third wrong")
assert.Equal(t, config.String("fourth", ""), "delta", "Read value of fourth wrong")
assert.Equal(t, config.Integer("int", 0), int64(32), "Read value of int wrong")
assert.Equal(t, config.Float("float", 0), 3.14, "Read value of float wrong")
assert.Equal(t, config.Boolean("true", false), true, "Read true wrong")
assert.Equal(t, config.Boolean("false", true), false, "Read false wrong")
assert.Equal(t, len(config.Keys()), 8, "Simple ini contains 8 fields")
}
func TestSimpleIniFileFromReader(t *testing.T) {
simpleIni := `first=alpha
second=beta
third="gamma bamma"
fourth = 'delta'
int=32
float=3.14
true=true
false=false
#comment
; comment`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Simple configuration should load without error.")
assert.Equal(t, config.String("first", ""), "alpha", "Read value of first wrong")
assert.Equal(t, config.String("second", ""), "beta", "Read value of second wrong")
assert.Equal(t, config.String("third", ""), "gamma bamma", "Read value of third wrong")
assert.Equal(t, config.String("fourth", ""), "delta", "Read value of fourth wrong")
assert.Equal(t, config.Integer("int", 0), int64(32), "Read value of int wrong")
assert.Equal(t, config.Float("float", 0), 3.14, "Read value of float wrong")
assert.Equal(t, config.Boolean("true", false), true, "Read true wrong")
assert.Equal(t, config.Boolean("false", true), false, "Read false wrong")
assert.Equal(t, len(config.Keys()), 8, "Simple ini contains 8 fields")
}
func TestCaseInsensitive(t *testing.T) {
simpleIni := `fIrst=alpha
SECOND=beta
Third="gamma bamma"
FourTh = 'delta'`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Case insensitive configuration should load without error.")
assert.Equal(t, config.String("first", ""), "alpha", "Read value of first wrong")
assert.Equal(t, config.String("second", ""), "beta", "Read value of second wrong")
assert.Equal(t, config.String("THIRD", ""), "gamma bamma", "Read value of third wrong")
assert.Equal(t, config.String("fourth", ""), "delta", "Read value of fourth wrong")
assert.Equal(t, len(config.Keys()), 4, "Case ins ini contains 4 fields")
}
func TestArrayOfStrings(t *testing.T) {
simpleIni := `key[]=one
key[]=two
noarray=three`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Simple configuration should load without error.")
val := config.Strings("key")
assert.Equal(t, len(val), 2, "Array for keys should have 2 values")
assert.Equal(t, val[0], "one", "Read value of first wrong")
assert.Equal(t, val[1], "two", "Read value of second wrong")
val = config.Strings("noarray")
assert.Equal(t, len(val), 1, "Array for noarray should have 1 value")
assert.Equal(t, val[0], "three", "Read value of noarray wrong")
assert.Equal(t, len(config.Keys()), 2, "StringArray test contains 2 fields")
}
func TestArrayOfIntegers(t *testing.T) {
simpleIni := `key[]=1
key[]=2
noarray=3`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Simple configuration should load without error.")
val := config.Integers("key")
assert.Equal(t, len(val), 2, "Array for keys should have 2 values")
assert.Equal(t, val[0], int64(1), "Read value of first wrong")
assert.Equal(t, val[1], int64(2), "Read value of second wrong")
val = config.Integers("noarray")
assert.Equal(t, len(val), 1, "Array for noarray should have 1 value")
assert.Equal(t, val[0], int64(3), "Read value of noarray wrong")
assert.Equal(t, len(config.Keys()), 2, "IntArray test contains 2 fields")
}
func TestArrayOfFloats(t *testing.T) {
simpleIni := `key[]=1.1
key[]=2.2
noarray=3.3`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Simple configuration should load without error.")
val := config.Floats("key")
assert.Equal(t, len(val), 2, "Array for keys should have 2 values")
assert.Equal(t, val[0], 1.1, "Read value of first wrong")
assert.Equal(t, val[1], 2.2, "Read value of second wrong")
val = config.Floats("noarray")
assert.Equal(t, len(val), 1, "Array for noarray should have 1 value")
assert.Equal(t, val[0], 3.3, "Read value of noarray wrong")
assert.Equal(t, len(config.Keys()), 2, "FloatArray test contains 2 fields")
}
func TestSectionedIniFile(t *testing.T) {
simpleIni := `first=alpha
second=beta
third="gamma bamma"
fourth = 'delta'
int=32
float=3.14
true=true
false=false
[section]
first=raz
second=dba
int=124
float=1222.7
true=false
false=true
#comment
; comment`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Sectioned configuration should load without error.")
assert.Equal(t, config.String("first", ""), "alpha", "Read value of first wrong")
assert.Equal(t, config.String("second", ""), "beta", "Read value of second wrong")
assert.Equal(t, config.String("third", ""), "gamma bamma", "Read value of third wrong")
assert.Equal(t, config.String("fourth", ""), "delta", "Read value of fourth wrong")
assert.Equal(t, config.Integer("int", 0), int64(32), "Read value of int wrong")
assert.Equal(t, config.Float("float", 0), 3.14, "Read value of float wrong")
assert.Equal(t, config.Boolean("true", false), true, "Read true wrong")
assert.Equal(t, config.Boolean("false", true), false, "Read false wrong")
assert.Equal(t, config.StringFromSection("section", "first", ""), "raz", "Read value of first from section wrong")
assert.Equal(t, config.StringFromSection("section", "second", ""), "dba", "Read value of second from section wrong")
assert.Equal(t, config.IntegerFromSection("section", "int", 0), int64(124), "Read value of int in section wrong")
assert.Equal(t, config.FloatFromSection("section", "float", 0), 1222.7, "Read value of float in section wrong")
assert.Equal(t, config.BooleanFromSection("section", "true", true), false, "Read true in section wrong")
assert.Equal(t, config.BooleanFromSection("section", "false", false), true, "Read false in section wrong")
assert.Equal(t, len(config.Keys()), 8, "Section ini contains 6 fields")
assert.Equal(t, len(config.KeysForSection("section")), 6, "Section in ini contains 4 fields")
}
func TestArrayOfStringsInSection(t *testing.T) {
simpleIni := `key=nope
noarray=nope
[section]
key[]=one
key[]=two
noarray=three`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Configuration should load without error.")
val := config.StringsFromSection("section", "key")
assert.Equal(t, len(val), 2, "Array for keys should have 2 values")
assert.Equal(t, val[0], "one", "Read value of first wrong")
assert.Equal(t, val[1], "two", "Read value of second wrong")
val = config.StringsFromSection("section", "noarray")
assert.Equal(t, len(val), 1, "Array for noarray should have 1 value")
assert.Equal(t, val[0], "three", "Read value of noarray wrong")
assert.Equal(t, len(config.Keys()), 2, "StringArray section test contains 2 fields")
}
func TestArrayOfIntegersInSection(t *testing.T) {
simpleIni := `key=nope
noarray=nope
[section]
key[]=1
key[]=2
noarray=3`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Configuration should load without error.")
val := config.IntegersFromSection("section", "key")
assert.Equal(t, len(val), 2, "Array for keys should have 2 values")
assert.Equal(t, val[0], int64(1), "Read value of first wrong")
assert.Equal(t, val[1], int64(2), "Read value of second wrong")
val = config.IntegersFromSection("section", "noarray")
assert.Equal(t, len(val), 1, "Array for noarray should have 1 value")
assert.Equal(t, val[0], int64(3), "Read value of noarray wrong")
assert.Equal(t, len(config.KeysForSection("section")), 2, "IntArray section test contains 2 fields")
}
func TestArrayOfFloatsInSection(t *testing.T) {
simpleIni := `key=nope
noarray=nope
[section]
key[]=1.1
key[]=2.2
noarray=3.3`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Configuration should load without error.")
val := config.FloatsFromSection("section", "key")
assert.Equal(t, len(val), 2, "Array for keys should have 2 values")
assert.Equal(t, val[0], 1.1, "Read value of first wrong")
assert.Equal(t, val[1], 2.2, "Read value of second wrong")
val = config.FloatsFromSection("section", "noarray")
assert.Equal(t, len(val), 1, "Array for noarray should have 1 value")
assert.Equal(t, val[0], 3.3, "Read value of noarray wrong")
assert.Equal(t, len(config.Keys()), 2, "FloatArray section test contains 2 fields")
}
func TestMultipleSections(t *testing.T) {
simpleIni := `first=alpha
int=32
float=3.14
[section_one]
first=raz
int=124
float=1222.7
[section_two]
first=one
int=555
float=124.3`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Sectioned configuration should load without error.")
assert.Equal(t, config.String("first", ""), "alpha", "Read value of first wrong")
assert.Equal(t, config.Integer("int", 0), int64(32), "Read value of int wrong")
assert.Equal(t, config.Float("float", 0), 3.14, "Read value of float wrong")
assert.Equal(t, config.StringFromSection("", "first", ""), "alpha", "Read value of first wrong")
assert.Equal(t, config.IntegerFromSection("", "int", 0), int64(32), "Read value of int wrong")
assert.Equal(t, config.FloatFromSection("", "float", 0), 3.14, "Read value of float wrong")
config.SetName("section_zero")
assert.Equal(t, config.StringFromSection("section_zero", "first", ""), "alpha", "Read value of first wrong")
assert.Equal(t, config.IntegerFromSection("section_zero", "int", 0), int64(32), "Read value of int wrong")
assert.Equal(t, config.FloatFromSection("section_zero", "float", 0), 3.14, "Read value of float wrong")
assert.Equal(t, config.StringFromSection("section_one", "first", ""), "raz", "Read value of first from section wrong")
assert.Equal(t, config.IntegerFromSection("section_one", "int", 0), int64(124), "Read value of int in section wrong")
assert.Equal(t, config.FloatFromSection("section_one", "float", 0), 1222.7, "Read value of float in section wrong")
assert.Equal(t, config.StringFromSection("section_two", "first", ""), "one", "Read value of first from section wrong")
assert.Equal(t, config.IntegerFromSection("section_two", "int", 0), int64(555), "Read value of int in section wrong")
assert.Equal(t, config.FloatFromSection("section_two", "float", 0), 124.3, "Read value of float in section wrong")
assert.Equal(t, len(config.Keys()), 3, "Section ini contains 3 fields")
assert.Equal(t, len(config.KeysForSection("section_one")), 3, "Section in ini contains 3 fields")
assert.Equal(t, len(config.KeysForSection("section_two")), 3, "Section in ini contains 3 fields")
}
func TestSectionNames(t *testing.T) {
simpleIni := `first=alpha
int=32
float=3.14
[section_one]
first=raz
int=124
float=1222.7
[section_two]
first=one
[section_three]
int=555
float=124.3`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Sectioned configuration should load without error.")
sectionNames := config.SectionNames()
assert.Equal(t, len(sectionNames), 3, "Read section name array wrong")
//Section names should be sorted, alphabetically
assert.Equal(t, sectionNames[0], "section_one", "Read section name wrong")
assert.Equal(t, sectionNames[1], "section_three", "Read section name wrong")
assert.Equal(t, sectionNames[2], "section_two", "Read section name wrong")
config.SetName("section_zero")
sectionNames = config.SectionNames()
assert.Equal(t, len(sectionNames), 4, "Read section name array wrong")
//Section names should be sorted, alphabetically
assert.Equal(t, sectionNames[0], "section_one", "Read section name wrong")
assert.Equal(t, sectionNames[1], "section_three", "Read section name wrong")
assert.Equal(t, sectionNames[2], "section_two", "Read section name wrong")
assert.Equal(t, sectionNames[3], "section_zero", "Read section name wrong")
}
func TestSplitSection(t *testing.T) {
simpleIni := `first=alpha
int=32
float=3.14
[section_one]
first=raz
[section_two]
first=one
int=555
float=124.3
[section_one]
int=124
float=1222.7`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Sectioned configuration should load without error.")
assert.Equal(t, config.String("first", ""), "alpha", "Read value of first wrong")
assert.Equal(t, config.Integer("int", 0), int64(32), "Read value of int wrong")
assert.Equal(t, config.Float("float", 0), 3.14, "Read value of float wrong")
assert.Equal(t, config.StringFromSection("section_one", "first", ""), "raz", "Read value of first from section wrong")
assert.Equal(t, config.IntegerFromSection("section_one", "int", 0), int64(124), "Read value of int in section wrong")
assert.Equal(t, config.FloatFromSection("section_one", "float", 0), 1222.7, "Read value of float in section wrong")
assert.Equal(t, config.StringFromSection("section_two", "first", ""), "one", "Read value of first from section wrong")
assert.Equal(t, config.IntegerFromSection("section_two", "int", 0), int64(555), "Read value of int in section wrong")
assert.Equal(t, config.FloatFromSection("section_two", "float", 0), 124.3, "Read value of float in section wrong")
assert.Equal(t, len(config.Keys()), 3, "Section ini contains 3 fields")
assert.Equal(t, len(config.KeysForSection("section_one")), 3, "Section in ini contains 3 fields")
assert.Equal(t, len(config.KeysForSection("section_two")), 3, "Section in ini contains 3 fields")
}
func TestRepeatedKey(t *testing.T) {
simpleIni := `first=alpha
first=beta`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Configuration should load without error.")
assert.Equal(t, config.String("first", ""), "beta", "Read value of first wrong")
assert.Equal(t, len(config.Keys()), 1, "ini contains 1 fields")
}
func TestDefaults(t *testing.T) {
simpleIni := `first=alpha
third=\`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Configuration should load without error.")
assert.Equal(t, config.String("second", "beta"), "beta", "Read default value of first wrong")
assert.Equal(t, config.String("third", "gamma"), "gamma", "Read default value of too short a string")
assert.Equal(t, config.Integer("int", 32), int64(32), "Default value of int wrong")
assert.Equal(t, config.Float("float", 3.14), 3.14, "Default value of float wrong")
assert.Equal(t, config.Boolean("bool", true), true, "Default value of bool wrong")
assert.Equal(t, config.String("", "test"), "test", "Nil key should result in empty value")
assert.Equal(t, config.Integer("", 32), int64(32), "Default value of int wrong for empty key")
assert.Equal(t, config.Float("", 3.14), 3.14, "Default value of float wrong for empty key")
assert.Equal(t, config.Boolean("", true), true, "Default value of bool wrong for empty key")
assert.Equal(t, len(config.Keys()), 2, "ini contains 2 fields")
}
func TestDefaultsOnParseError(t *testing.T) {
simpleIni := `first=alpha
int=yex
float=blap
bool=zipzap
intarray[]=blip
floatarray[]=blap`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Configuration should load without error.")
assert.Equal(t, config.String("second", "beta"), "beta", "Read default value of first wrong")
assert.Equal(t, config.Integer("int", 32), int64(32), "Default value of int wrong")
assert.Equal(t, config.Float("float", 3.14), 3.14, "Default value of float wrong")
assert.Equal(t, config.Boolean("bool", true), true, "Default value of bool wrong")
assert.Nil(t, config.Integers("intarray"), "Default value of ints wrong on parse error")
assert.Nil(t, config.Floats("floatarray"), "Default value of floats wrong on parse error")
assert.Equal(t, len(config.Keys()), 6, "ini contains 4 fields")
}
func TestMissingArray(t *testing.T) {
simpleIni := `first=alpha`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Configuration should load without error.")
assert.Nil(t, config.Strings("second"), "Read default value of strings wrong")
assert.Nil(t, config.Integers("int"), "Default value of ints wrong")
assert.Nil(t, config.Floats("float"), "Default value of floats wrong")
assert.Nil(t, config.Strings(""), "Read default value of strings wrong for empty key")
assert.Nil(t, config.Integers(""), "Default value of ints wrong for empty key")
assert.Nil(t, config.Floats(""), "Default value of floats wrong for empty key")
assert.Equal(t, len(config.Keys()), 1, "ini contains 1 fields")
}
func TestDefaultsWithSection(t *testing.T) {
simpleIni := `[section]
first=alpha`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Configuration should load without error.")
assert.Equal(t, config.StringFromSection("section", "second", "beta"), "beta", "Read default value of first wrong")
assert.Equal(t, config.IntegerFromSection("section", "int", 32), int64(32), "Default value of int wrong")
assert.Equal(t, config.FloatFromSection("section", "float", 3.14), 3.14, "Default value of float wrong")
assert.Equal(t, config.BooleanFromSection("section", "bool", true), true, "Default value of bool wrong")
assert.Equal(t, config.StringFromSection("section-1", "second", "beta"), "beta", "Missing section for first wrong")
assert.Equal(t, config.IntegerFromSection("section-1", "int", 32), int64(32), "Missing section for int wrong")
assert.Equal(t, config.FloatFromSection("section-1", "float", 3.14), 3.14, "Missing section for float wrong")
assert.Equal(t, config.BooleanFromSection("section-1", "bool", true), true, "Missing section for bool wrong")
assert.Equal(t, len(config.Keys()), 0, "ini contains 0 fields")
assert.Equal(t, len(config.KeysForSection("section")), 1, "section contains 1 field")
assert.Nil(t, config.KeysForSection("section-1"), "missing section should have no keys")
}
type testStruct struct {
First string
Second string
L int64
F64 float64
Flag bool
Strings []string
LS []int64
F64s []float64
Missing string
MissingInt int64
MissingArray []string
Flags []bool
U uint64
private string
}
func TestLoadStructFile(t *testing.T) {
simpleIni := `[section]
first=alpha
second=beta
third="gamma bamma"
fourth = 'delta'
l=-32
f64=3.14
flag=true
unflag=false
strings[]=one
strings[]=two
LS[]=1
LS[]=2
F64s[]=11.0
F64s[]=22.0
#comment
; comment`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Simple configuration should load without error.")
var data testStruct
data.MissingInt = 33
data.Missing = "hello world"
ok := config.DataFromSection("section", &data)
assert.Equal(t, ok, true, "load should succeed")
assert.Equal(t, data.First, "alpha", "Read value of first wrong")
assert.Equal(t, data.Second, "beta", "Read value of second wrong")
assert.Equal(t, data.L, int64(-32), "Read value of int wrong")
assert.Equal(t, data.F64, 3.14, "Read value of float wrong")
assert.Equal(t, data.Flag, true, "Read true wrong")
assert.Equal(t, data.Missing, "hello world", "Read value of missing wrong")
assert.Equal(t, data.MissingInt, int64(33), "Read false wrong")
assert.Nil(t, data.MissingArray, "Missing array Should be nil")
assert.Equal(t, data.private, "", "private value in struct should be ignored")
strings := data.Strings
assert.NotNil(t, strings, "strings should not be nil")
assert.Equal(t, len(strings), 2, "Read wrong length of string array")
assert.Equal(t, strings[0], "one", "Read string array wrong")
assert.Equal(t, strings[1], "two", "Read string array wrong")
assert.NotNil(t, data.LS, "ints should not be nil")
assert.Equal(t, len(data.LS), 2, "Read wrong length of ints array")
assert.Equal(t, data.LS[0], int64(1), "Read ints array wrong")
assert.Equal(t, data.LS[1], int64(2), "Read ints array wrong")
assert.NotNil(t, data.F64s, "floats should not be nil")
assert.Equal(t, len(data.F64s), 2, "Read wrong length of floats array")
assert.Equal(t, data.F64s[0], 11.0, "Read floats array wrong")
assert.Equal(t, data.F64s[1], 22.0, "Read floats array wrong")
}
func TestLoadStructMissingSection(t *testing.T) {
simpleIni := ``
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Simple configuration should load without error.")
var data testStruct
ok := config.DataFromSection("section", &data)
assert.Equal(t, ok, false, "section is missing so ok should be false")
}
func TestMissingSection(t *testing.T) {
simpleIni := `[section]
first=alpha`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Simple configuration should load without error.")
var data testStruct
val := config.DataFromSection("missing_section", &data)
assert.False(t, val)
}
func TestMissingArrayInSection(t *testing.T) {
simpleIni := `[section]
first=alpha`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Configuration should load without error.")
assert.Nil(t, config.StringsFromSection("section", "second"), "Read default value of strings wrong")
assert.Nil(t, config.IntegersFromSection("section", "int"), "Default value of ints wrong")
assert.Nil(t, config.FloatsFromSection("section", "float"), "Default value of floats wrong")
assert.Nil(t, config.StringsFromSection("section-1", "second"), "Missing section for strings wrong")
assert.Nil(t, config.IntegersFromSection("section-1", "int"), "Missing section for ints wrong")
assert.Nil(t, config.FloatsFromSection("section-1", "float"), "Missing section for floats wrong")
assert.Equal(t, len(config.Keys()), 0, "ini contains 0 fields")
assert.Equal(t, len(config.KeysForSection("section")), 1, "section contains 1 field")
assert.Nil(t, config.KeysForSection("section-1"), "missing section should have no keys")
}
func TestBadSection(t *testing.T) {
simpleIni := `key=nope
noarray=nope
[section
key[]=one
key[]=two
noarray=three`
_, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.NotNil(t, err, "Configuration should load with error.")
}
func TestBadKeyValue(t *testing.T) {
simpleIni := `key=nope
noarray:nope`
_, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.NotNil(t, err, "Configuration should load with error.")
}
func TestBadArrayAsSingle(t *testing.T) {
simpleIni := `key=nope
noarray=nope
[section]
key[]=one
key[]=two
noarray=three`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Configuration should load without error.")
assert.Equal(t, config.StringFromSection("section", "key", "default"), "default", "Read default value of strings wrong")
}
func TestBadFile(t *testing.T) {
const filepath = "/no.such.dir/xxx.no-such-file.txt"
config, err := LoadConfiguration(filepath)
assert.NotNil(t, err, "No valid file is an error.")
assert.True(t, os.IsNotExist(err), "No valid file errors is of expected type.")
assert.Nil(t, config, "Configuration should be nil.")
}
func TestNullValuesInGet(t *testing.T) {
assert.Nil(t, get(nil, "foo"), "Configuration should be nil.")
}
func TestStringEscape(t *testing.T) {
simpleIni := `first=\n\t\rhello`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Simple configuration should load without error.")
assert.Equal(t, config.String("first", ""), "\n\t\rhello", "Read value of first wrong")
assert.Equal(t, len(config.Keys()), 1, "ini contains 1 fields")
}
func TestBadStringArray(t *testing.T) {
simpleIni := `first[]=\`
config, err := LoadConfigurationFromReader(strings.NewReader(simpleIni))
assert.Nil(t, err, "Simple configuration should load without error.")
assert.Nil(t, config.Strings("first"), "Read value of first wrong")
}
func BenchmarkLoadConfiguration(b *testing.B) {
simpleIni := `first=alpha
int=32
float=3.14
[section_one]
first=raz
[section_two]
first=one
int=555
float=124.3
[section_one]
int=124
float=1222.7`
b.ReportAllocs()
r := strings.NewReader(simpleIni)
for i := 0; i < b.N; i++ {
r.Seek(0, os.SEEK_SET)
_, err := LoadConfigurationFromReader(r)
if err != nil {
b.Fatal(err)
}
}
}
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package discovery
import (
"encoding/json"
"fmt"
"net/url"
"sort"
"strings"
"github.com/emicklei/go-restful-swagger12"
"github.com/golang/protobuf/proto"
"github.com/googleapis/gnostic/OpenAPIv2"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/kubernetes/scheme"
restclient "k8s.io/client-go/rest"
)
// defaultRetries is the number of times a resource discovery is repeated if an api group disappears on the fly (e.g. ThirdPartyResources).
const defaultRetries = 2
// DiscoveryInterface holds the methods that discover server-supported API groups,
// versions and resources.
type DiscoveryInterface interface {
RESTClient() restclient.Interface
ServerGroupsInterface
ServerResourcesInterface
ServerVersionInterface
SwaggerSchemaInterface
OpenAPISchemaInterface
}
// CachedDiscoveryInterface is a DiscoveryInterface with cache invalidation and freshness.
type CachedDiscoveryInterface interface {
DiscoveryInterface
// Fresh is supposed to tell the caller whether or not to retry if the cache
// fails to find something (false = retry, true = no need to retry).
//
// TODO: this needs to be revisited, this interface can't be locked properly
// and doesn't make a lot of sense.
Fresh() bool
// Invalidate enforces that no cached data is used in the future that is older than the current time.
Invalidate()
}
// ServerGroupsInterface has methods for obtaining supported groups on the API server
type ServerGroupsInterface interface {
// ServerGroups returns the supported groups, with information like supported versions and the
// preferred version.
ServerGroups() (*metav1.APIGroupList, error)
}
// ServerResourcesInterface has methods for obtaining supported resources on the API server
type ServerResourcesInterface interface {
// ServerResourcesForGroupVersion returns the supported resources for a group and version.
ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error)
// ServerResources returns the supported resources for all groups and versions.
ServerResources() ([]*metav1.APIResourceList, error)
// ServerPreferredResources returns the supported resources with the version preferred by the
// server.
ServerPreferredResources() ([]*metav1.APIResourceList, error)
// ServerPreferredNamespacedResources returns the supported namespaced resources with the
// version preferred by the server.
ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error)
}
// ServerVersionInterface has a method for retrieving the server's version.
type ServerVersionInterface interface {
// ServerVersion retrieves and parses the server's version (git version).
ServerVersion() (*version.Info, error)
}
// SwaggerSchemaInterface has a method to retrieve the swagger schema.
type SwaggerSchemaInterface interface {
// SwaggerSchema retrieves and parses the swagger API schema the server supports.
SwaggerSchema(version schema.GroupVersion) (*swagger.ApiDeclaration, error)
}
// OpenAPISchemaInterface has a method to retrieve the open API schema.
type OpenAPISchemaInterface interface {
// OpenAPISchema retrieves and parses the swagger API schema the server supports.
OpenAPISchema() (*openapi_v2.Document, error)
}
// DiscoveryClient implements the functions that discover server-supported API groups,
// versions and resources.
type DiscoveryClient struct {
restClient restclient.Interface
LegacyPrefix string
}
// Convert metav1.APIVersions to metav1.APIGroup. APIVersions is used by legacy v1, so
// group would be "".
func apiVersionsToAPIGroup(apiVersions *metav1.APIVersions) (apiGroup metav1.APIGroup) {
groupVersions := []metav1.GroupVersionForDiscovery{}
for _, version := range apiVersions.Versions {
groupVersion := metav1.GroupVersionForDiscovery{
GroupVersion: version,
Version: version,
}
groupVersions = append(groupVersions, groupVersion)
}
apiGroup.Versions = groupVersions
// There should be only one groupVersion returned at /api
apiGroup.PreferredVersion = groupVersions[0]
return
}
// ServerGroups returns the supported groups, with information like supported versions and the
// preferred version.
func (d *DiscoveryClient) ServerGroups() (apiGroupList *metav1.APIGroupList, err error) {
// Get the groupVersions exposed at /api
v := &metav1.APIVersions{}
err = d.restClient.Get().AbsPath(d.LegacyPrefix).Do().Into(v)
apiGroup := metav1.APIGroup{}
if err == nil && len(v.Versions) != 0 {
apiGroup = apiVersionsToAPIGroup(v)
}
if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) {
return nil, err
}
// Get the groupVersions exposed at /apis
apiGroupList = &metav1.APIGroupList{}
err = d.restClient.Get().AbsPath("/apis").Do().Into(apiGroupList)
if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) {
return nil, err
}
// to be compatible with a v1.0 server, if it's a 403 or 404, ignore and return whatever we got from /api
if err != nil && (errors.IsNotFound(err) || errors.IsForbidden(err)) {
apiGroupList = &metav1.APIGroupList{}
}
// append the group retrieved from /api to the list if not empty
if len(v.Versions) != 0 {
apiGroupList.Groups = append(apiGroupList.Groups, apiGroup)
}
return apiGroupList, nil
}
// ServerResourcesForGroupVersion returns the supported resources for a group and version.
func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (resources *metav1.APIResourceList, err error) {
url := url.URL{}
if len(groupVersion) == 0 {
return nil, fmt.Errorf("groupVersion shouldn't be empty")
}
if len(d.LegacyPrefix) > 0 && groupVersion == "v1" {
url.Path = d.LegacyPrefix + "/" + groupVersion
} else {
url.Path = "/apis/" + groupVersion
}
resources = &metav1.APIResourceList{
GroupVersion: groupVersion,
}
err = d.restClient.Get().AbsPath(url.String()).Do().Into(resources)
if err != nil {
// ignore 403 or 404 error to be compatible with an v1.0 server.
if groupVersion == "v1" && (errors.IsNotFound(err) || errors.IsForbidden(err)) {
return resources, nil
}
return nil, err
}
return resources, nil
}
// serverResources returns the supported resources for all groups and versions.
func (d *DiscoveryClient) serverResources(failEarly bool) ([]*metav1.APIResourceList, error) {
apiGroups, err := d.ServerGroups()
if err != nil {
return nil, err
}
result := []*metav1.APIResourceList{}
failedGroups := make(map[schema.GroupVersion]error)
for _, apiGroup := range apiGroups.Groups {
for _, version := range apiGroup.Versions {
gv := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version}
resources, err := d.ServerResourcesForGroupVersion(version.GroupVersion)
if err != nil {
// TODO: maybe restrict this to NotFound errors
failedGroups[gv] = err
if failEarly {
return nil, &ErrGroupDiscoveryFailed{Groups: failedGroups}
}
continue
}
result = append(result, resources)
}
}
if len(failedGroups) == 0 {
return result, nil
}
return result, &ErrGroupDiscoveryFailed{Groups: failedGroups}
}
// ServerResources returns the supported resources for all groups and versions.
func (d *DiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) {
return withRetries(defaultRetries, d.serverResources)
}
// ErrGroupDiscoveryFailed is returned if one or more API groups fail to load.
type ErrGroupDiscoveryFailed struct {
// Groups is a list of the groups that failed to load and the error cause
Groups map[schema.GroupVersion]error
}
// Error implements the error interface
func (e *ErrGroupDiscoveryFailed) Error() string {
var groups []string
for k, v := range e.Groups {
groups = append(groups, fmt.Sprintf("%s: %v", k, v))
}
sort.Strings(groups)
return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(groups, ", "))
}
// IsGroupDiscoveryFailedError returns true if the provided error indicates the server was unable to discover
// a complete list of APIs for the client to use.
func IsGroupDiscoveryFailedError(err error) bool {
_, ok := err.(*ErrGroupDiscoveryFailed)
return err != nil && ok
}
// serverPreferredResources returns the supported resources with the version preferred by the server.
func (d *DiscoveryClient) serverPreferredResources(failEarly bool) ([]*metav1.APIResourceList, error) {
serverGroupList, err := d.ServerGroups()
if err != nil {
return nil, err
}
result := []*metav1.APIResourceList{}
failedGroups := make(map[schema.GroupVersion]error)
grVersions := map[schema.GroupResource]string{} // selected version of a GroupResource
grApiResources := map[schema.GroupResource]*metav1.APIResource{} // selected APIResource for a GroupResource
gvApiResourceLists := map[schema.GroupVersion]*metav1.APIResourceList{} // blueprint for a APIResourceList for later grouping
for _, apiGroup := range serverGroupList.Groups {
for _, version := range apiGroup.Versions {
groupVersion := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version}
apiResourceList, err := d.ServerResourcesForGroupVersion(version.GroupVersion)
if err != nil {
// TODO: maybe restrict this to NotFound errors
failedGroups[groupVersion] = err
if failEarly {
return nil, &ErrGroupDiscoveryFailed{Groups: failedGroups}
}
continue
}
// create empty list which is filled later in another loop
emptyApiResourceList := metav1.APIResourceList{
GroupVersion: version.GroupVersion,
}
gvApiResourceLists[groupVersion] = &emptyApiResourceList
result = append(result, &emptyApiResourceList)
for i := range apiResourceList.APIResources {
apiResource := &apiResourceList.APIResources[i]
if strings.Contains(apiResource.Name, "/") {
continue
}
gv := schema.GroupResource{Group: apiGroup.Name, Resource: apiResource.Name}
if _, ok := grApiResources[gv]; ok && version.Version != apiGroup.PreferredVersion.Version {
// only override with preferred version
continue
}
grVersions[gv] = version.Version
grApiResources[gv] = apiResource
}
}
}
// group selected APIResources according to GroupVersion into APIResourceLists
for groupResource, apiResource := range grApiResources {
version := grVersions[groupResource]
groupVersion := schema.GroupVersion{Group: groupResource.Group, Version: version}
apiResourceList := gvApiResourceLists[groupVersion]
apiResourceList.APIResources = append(apiResourceList.APIResources, *apiResource)
}
if len(failedGroups) == 0 {
return result, nil
}
return result, &ErrGroupDiscoveryFailed{Groups: failedGroups}
}
// ServerPreferredResources returns the supported resources with the version preferred by the
// server.
func (d *DiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) {
return withRetries(defaultRetries, func(retryEarly bool) ([]*metav1.APIResourceList, error) {
return d.serverPreferredResources(retryEarly)
})
}
// ServerPreferredNamespacedResources returns the supported namespaced resources with the
// version preferred by the server.
func (d *DiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) {
all, err := d.ServerPreferredResources()
return FilteredBy(ResourcePredicateFunc(func(groupVersion string, r *metav1.APIResource) bool {
return r.Namespaced
}), all), err
}
// ServerVersion retrieves and parses the server's version (git version).
func (d *DiscoveryClient) ServerVersion() (*version.Info, error) {
body, err := d.restClient.Get().AbsPath("/version").Do().Raw()
if err != nil {
return nil, err
}
var info version.Info
err = json.Unmarshal(body, &info)
if err != nil {
return nil, fmt.Errorf("got '%s': %v", string(body), err)
}
return &info, nil
}
// SwaggerSchema retrieves and parses the swagger API schema the server supports.
// TODO: Replace usages with Open API. Tracked in https://github.com/kubernetes/kubernetes/issues/44589
func (d *DiscoveryClient) SwaggerSchema(version schema.GroupVersion) (*swagger.ApiDeclaration, error) {
if version.Empty() {
return nil, fmt.Errorf("groupVersion cannot be empty")
}
groupList, err := d.ServerGroups()
if err != nil {
return nil, err
}
groupVersions := metav1.ExtractGroupVersions(groupList)
// This check also takes care the case that kubectl is newer than the running endpoint
if stringDoesntExistIn(version.String(), groupVersions) {
return nil, fmt.Errorf("API version: %v is not supported by the server. Use one of: %v", version, groupVersions)
}
var path string
if len(d.LegacyPrefix) > 0 && version == v1.SchemeGroupVersion {
path = "/swaggerapi" + d.LegacyPrefix + "/" + version.Version
} else {
path = "/swaggerapi/apis/" + version.Group + "/" + version.Version
}
body, err := d.restClient.Get().AbsPath(path).Do().Raw()
if err != nil {
return nil, err
}
var schema swagger.ApiDeclaration
err = json.Unmarshal(body, &schema)
if err != nil {
return nil, fmt.Errorf("got '%s': %v", string(body), err)
}
return &schema, nil
}
// OpenAPISchema fetches the open api schema using a rest client and parses the proto.
func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) {
data, err := d.restClient.Get().AbsPath("/swagger-2.0.0.pb-v1").Do().Raw()
if err != nil {
return nil, err
}
document := &openapi_v2.Document{}
err = proto.Unmarshal(data, document)
if err != nil {
return nil, err
}
return document, nil
}
// withRetries retries the given recovery function in case the groups supported by the server change after ServerGroup() returns.
func withRetries(maxRetries int, f func(failEarly bool) ([]*metav1.APIResourceList, error)) ([]*metav1.APIResourceList, error) {
var result []*metav1.APIResourceList
var err error
for i := 0; i < maxRetries; i++ {
failEarly := i < maxRetries-1
result, err = f(failEarly)
if err == nil {
return result, nil
}
if _, ok := err.(*ErrGroupDiscoveryFailed); !ok {
return nil, err
}
}
return result, err
}
func setDiscoveryDefaults(config *restclient.Config) error {
config.APIPath = ""
config.GroupVersion = nil
codec := runtime.NoopEncoder{Decoder: scheme.Codecs.UniversalDecoder()}
config.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec})
if len(config.UserAgent) == 0 {
config.UserAgent = restclient.DefaultKubernetesUserAgent()
}
return nil
}
// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. This client
// can be used to discover supported resources in the API server.
func NewDiscoveryClientForConfig(c *restclient.Config) (*DiscoveryClient, error) {
config := *c
if err := setDiscoveryDefaults(&config); err != nil {
return nil, err
}
client, err := restclient.UnversionedRESTClientFor(&config)
return &DiscoveryClient{restClient: client, LegacyPrefix: "/api"}, err
}
// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. If
// there is an error, it panics.
func NewDiscoveryClientForConfigOrDie(c *restclient.Config) *DiscoveryClient {
client, err := NewDiscoveryClientForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new DiscoveryClient for the given RESTClient.
func NewDiscoveryClient(c restclient.Interface) *DiscoveryClient {
return &DiscoveryClient{restClient: c, LegacyPrefix: "/api"}
}
func stringDoesntExistIn(str string, slice []string) bool {
for _, s := range slice {
if s == str {
return false
}
}
return true
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *DiscoveryClient) RESTClient() restclient.Interface {
if c == nil {
return nil
}
return c.restClient
}
make it possible to allow discovery errors for controllers
Kubernetes-commit: b7286f3188e997c98c89e1a8755b58ae69fbd4b0
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package discovery
import (
"encoding/json"
"fmt"
"net/url"
"sort"
"strings"
"github.com/emicklei/go-restful-swagger12"
"github.com/golang/protobuf/proto"
"github.com/googleapis/gnostic/OpenAPIv2"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/kubernetes/scheme"
restclient "k8s.io/client-go/rest"
)
// defaultRetries is the number of times a resource discovery is repeated if an api group disappears on the fly (e.g. ThirdPartyResources).
const defaultRetries = 2
// DiscoveryInterface holds the methods that discover server-supported API groups,
// versions and resources.
type DiscoveryInterface interface {
RESTClient() restclient.Interface
ServerGroupsInterface
ServerResourcesInterface
ServerVersionInterface
SwaggerSchemaInterface
OpenAPISchemaInterface
}
// CachedDiscoveryInterface is a DiscoveryInterface with cache invalidation and freshness.
type CachedDiscoveryInterface interface {
DiscoveryInterface
// Fresh is supposed to tell the caller whether or not to retry if the cache
// fails to find something (false = retry, true = no need to retry).
//
// TODO: this needs to be revisited, this interface can't be locked properly
// and doesn't make a lot of sense.
Fresh() bool
// Invalidate enforces that no cached data is used in the future that is older than the current time.
Invalidate()
}
// ServerGroupsInterface has methods for obtaining supported groups on the API server
type ServerGroupsInterface interface {
// ServerGroups returns the supported groups, with information like supported versions and the
// preferred version.
ServerGroups() (*metav1.APIGroupList, error)
}
// ServerResourcesInterface has methods for obtaining supported resources on the API server
type ServerResourcesInterface interface {
// ServerResourcesForGroupVersion returns the supported resources for a group and version.
ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error)
// ServerResources returns the supported resources for all groups and versions.
ServerResources() ([]*metav1.APIResourceList, error)
// ServerPreferredResources returns the supported resources with the version preferred by the
// server.
ServerPreferredResources() ([]*metav1.APIResourceList, error)
// ServerPreferredNamespacedResources returns the supported namespaced resources with the
// version preferred by the server.
ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error)
}
// ServerVersionInterface has a method for retrieving the server's version.
type ServerVersionInterface interface {
// ServerVersion retrieves and parses the server's version (git version).
ServerVersion() (*version.Info, error)
}
// SwaggerSchemaInterface has a method to retrieve the swagger schema.
type SwaggerSchemaInterface interface {
// SwaggerSchema retrieves and parses the swagger API schema the server supports.
SwaggerSchema(version schema.GroupVersion) (*swagger.ApiDeclaration, error)
}
// OpenAPISchemaInterface has a method to retrieve the open API schema.
type OpenAPISchemaInterface interface {
// OpenAPISchema retrieves and parses the swagger API schema the server supports.
OpenAPISchema() (*openapi_v2.Document, error)
}
// DiscoveryClient implements the functions that discover server-supported API groups,
// versions and resources.
type DiscoveryClient struct {
restClient restclient.Interface
LegacyPrefix string
}
// Convert metav1.APIVersions to metav1.APIGroup. APIVersions is used by legacy v1, so
// group would be "".
func apiVersionsToAPIGroup(apiVersions *metav1.APIVersions) (apiGroup metav1.APIGroup) {
groupVersions := []metav1.GroupVersionForDiscovery{}
for _, version := range apiVersions.Versions {
groupVersion := metav1.GroupVersionForDiscovery{
GroupVersion: version,
Version: version,
}
groupVersions = append(groupVersions, groupVersion)
}
apiGroup.Versions = groupVersions
// There should be only one groupVersion returned at /api
apiGroup.PreferredVersion = groupVersions[0]
return
}
// ServerGroups returns the supported groups, with information like supported versions and the
// preferred version.
func (d *DiscoveryClient) ServerGroups() (apiGroupList *metav1.APIGroupList, err error) {
// Get the groupVersions exposed at /api
v := &metav1.APIVersions{}
err = d.restClient.Get().AbsPath(d.LegacyPrefix).Do().Into(v)
apiGroup := metav1.APIGroup{}
if err == nil && len(v.Versions) != 0 {
apiGroup = apiVersionsToAPIGroup(v)
}
if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) {
return nil, err
}
// Get the groupVersions exposed at /apis
apiGroupList = &metav1.APIGroupList{}
err = d.restClient.Get().AbsPath("/apis").Do().Into(apiGroupList)
if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) {
return nil, err
}
// to be compatible with a v1.0 server, if it's a 403 or 404, ignore and return whatever we got from /api
if err != nil && (errors.IsNotFound(err) || errors.IsForbidden(err)) {
apiGroupList = &metav1.APIGroupList{}
}
// append the group retrieved from /api to the list if not empty
if len(v.Versions) != 0 {
apiGroupList.Groups = append(apiGroupList.Groups, apiGroup)
}
return apiGroupList, nil
}
// ServerResourcesForGroupVersion returns the supported resources for a group and version.
func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (resources *metav1.APIResourceList, err error) {
url := url.URL{}
if len(groupVersion) == 0 {
return nil, fmt.Errorf("groupVersion shouldn't be empty")
}
if len(d.LegacyPrefix) > 0 && groupVersion == "v1" {
url.Path = d.LegacyPrefix + "/" + groupVersion
} else {
url.Path = "/apis/" + groupVersion
}
resources = &metav1.APIResourceList{
GroupVersion: groupVersion,
}
err = d.restClient.Get().AbsPath(url.String()).Do().Into(resources)
if err != nil {
// ignore 403 or 404 error to be compatible with an v1.0 server.
if groupVersion == "v1" && (errors.IsNotFound(err) || errors.IsForbidden(err)) {
return resources, nil
}
return nil, err
}
return resources, nil
}
// serverResources returns the supported resources for all groups and versions.
func (d *DiscoveryClient) serverResources() ([]*metav1.APIResourceList, error) {
apiGroups, err := d.ServerGroups()
if err != nil {
return nil, err
}
result := []*metav1.APIResourceList{}
failedGroups := make(map[schema.GroupVersion]error)
for _, apiGroup := range apiGroups.Groups {
for _, version := range apiGroup.Versions {
gv := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version}
resources, err := d.ServerResourcesForGroupVersion(version.GroupVersion)
if err != nil {
// TODO: maybe restrict this to NotFound errors
failedGroups[gv] = err
continue
}
result = append(result, resources)
}
}
if len(failedGroups) == 0 {
return result, nil
}
return result, &ErrGroupDiscoveryFailed{Groups: failedGroups}
}
// ServerResources returns the supported resources for all groups and versions.
func (d *DiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) {
return withRetries(defaultRetries, d.serverResources)
}
// ErrGroupDiscoveryFailed is returned if one or more API groups fail to load.
type ErrGroupDiscoveryFailed struct {
// Groups is a list of the groups that failed to load and the error cause
Groups map[schema.GroupVersion]error
}
// Error implements the error interface
func (e *ErrGroupDiscoveryFailed) Error() string {
var groups []string
for k, v := range e.Groups {
groups = append(groups, fmt.Sprintf("%s: %v", k, v))
}
sort.Strings(groups)
return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(groups, ", "))
}
// IsGroupDiscoveryFailedError returns true if the provided error indicates the server was unable to discover
// a complete list of APIs for the client to use.
func IsGroupDiscoveryFailedError(err error) bool {
_, ok := err.(*ErrGroupDiscoveryFailed)
return err != nil && ok
}
// serverPreferredResources returns the supported resources with the version preferred by the server.
func (d *DiscoveryClient) serverPreferredResources() ([]*metav1.APIResourceList, error) {
serverGroupList, err := d.ServerGroups()
if err != nil {
return nil, err
}
result := []*metav1.APIResourceList{}
failedGroups := make(map[schema.GroupVersion]error)
grVersions := map[schema.GroupResource]string{} // selected version of a GroupResource
grApiResources := map[schema.GroupResource]*metav1.APIResource{} // selected APIResource for a GroupResource
gvApiResourceLists := map[schema.GroupVersion]*metav1.APIResourceList{} // blueprint for a APIResourceList for later grouping
for _, apiGroup := range serverGroupList.Groups {
for _, version := range apiGroup.Versions {
groupVersion := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version}
apiResourceList, err := d.ServerResourcesForGroupVersion(version.GroupVersion)
if err != nil {
// TODO: maybe restrict this to NotFound errors
failedGroups[groupVersion] = err
continue
}
// create empty list which is filled later in another loop
emptyApiResourceList := metav1.APIResourceList{
GroupVersion: version.GroupVersion,
}
gvApiResourceLists[groupVersion] = &emptyApiResourceList
result = append(result, &emptyApiResourceList)
for i := range apiResourceList.APIResources {
apiResource := &apiResourceList.APIResources[i]
if strings.Contains(apiResource.Name, "/") {
continue
}
gv := schema.GroupResource{Group: apiGroup.Name, Resource: apiResource.Name}
if _, ok := grApiResources[gv]; ok && version.Version != apiGroup.PreferredVersion.Version {
// only override with preferred version
continue
}
grVersions[gv] = version.Version
grApiResources[gv] = apiResource
}
}
}
// group selected APIResources according to GroupVersion into APIResourceLists
for groupResource, apiResource := range grApiResources {
version := grVersions[groupResource]
groupVersion := schema.GroupVersion{Group: groupResource.Group, Version: version}
apiResourceList := gvApiResourceLists[groupVersion]
apiResourceList.APIResources = append(apiResourceList.APIResources, *apiResource)
}
if len(failedGroups) == 0 {
return result, nil
}
return result, &ErrGroupDiscoveryFailed{Groups: failedGroups}
}
// ServerPreferredResources returns the supported resources with the version preferred by the
// server.
func (d *DiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) {
return withRetries(defaultRetries, d.serverPreferredResources)
}
// ServerPreferredNamespacedResources returns the supported namespaced resources with the
// version preferred by the server.
func (d *DiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) {
all, err := d.ServerPreferredResources()
return FilteredBy(ResourcePredicateFunc(func(groupVersion string, r *metav1.APIResource) bool {
return r.Namespaced
}), all), err
}
// ServerVersion retrieves and parses the server's version (git version).
func (d *DiscoveryClient) ServerVersion() (*version.Info, error) {
body, err := d.restClient.Get().AbsPath("/version").Do().Raw()
if err != nil {
return nil, err
}
var info version.Info
err = json.Unmarshal(body, &info)
if err != nil {
return nil, fmt.Errorf("got '%s': %v", string(body), err)
}
return &info, nil
}
// SwaggerSchema retrieves and parses the swagger API schema the server supports.
// TODO: Replace usages with Open API. Tracked in https://github.com/kubernetes/kubernetes/issues/44589
func (d *DiscoveryClient) SwaggerSchema(version schema.GroupVersion) (*swagger.ApiDeclaration, error) {
if version.Empty() {
return nil, fmt.Errorf("groupVersion cannot be empty")
}
groupList, err := d.ServerGroups()
if err != nil {
return nil, err
}
groupVersions := metav1.ExtractGroupVersions(groupList)
// This check also takes care the case that kubectl is newer than the running endpoint
if stringDoesntExistIn(version.String(), groupVersions) {
return nil, fmt.Errorf("API version: %v is not supported by the server. Use one of: %v", version, groupVersions)
}
var path string
if len(d.LegacyPrefix) > 0 && version == v1.SchemeGroupVersion {
path = "/swaggerapi" + d.LegacyPrefix + "/" + version.Version
} else {
path = "/swaggerapi/apis/" + version.Group + "/" + version.Version
}
body, err := d.restClient.Get().AbsPath(path).Do().Raw()
if err != nil {
return nil, err
}
var schema swagger.ApiDeclaration
err = json.Unmarshal(body, &schema)
if err != nil {
return nil, fmt.Errorf("got '%s': %v", string(body), err)
}
return &schema, nil
}
// OpenAPISchema fetches the open api schema using a rest client and parses the proto.
func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) {
data, err := d.restClient.Get().AbsPath("/swagger-2.0.0.pb-v1").Do().Raw()
if err != nil {
return nil, err
}
document := &openapi_v2.Document{}
err = proto.Unmarshal(data, document)
if err != nil {
return nil, err
}
return document, nil
}
// withRetries retries the given recovery function in case the groups supported by the server change after ServerGroup() returns.
func withRetries(maxRetries int, f func() ([]*metav1.APIResourceList, error)) ([]*metav1.APIResourceList, error) {
var result []*metav1.APIResourceList
var err error
for i := 0; i < maxRetries; i++ {
result, err = f()
if err == nil {
return result, nil
}
if _, ok := err.(*ErrGroupDiscoveryFailed); !ok {
return nil, err
}
}
return result, err
}
func setDiscoveryDefaults(config *restclient.Config) error {
config.APIPath = ""
config.GroupVersion = nil
codec := runtime.NoopEncoder{Decoder: scheme.Codecs.UniversalDecoder()}
config.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec})
if len(config.UserAgent) == 0 {
config.UserAgent = restclient.DefaultKubernetesUserAgent()
}
return nil
}
// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. This client
// can be used to discover supported resources in the API server.
func NewDiscoveryClientForConfig(c *restclient.Config) (*DiscoveryClient, error) {
config := *c
if err := setDiscoveryDefaults(&config); err != nil {
return nil, err
}
client, err := restclient.UnversionedRESTClientFor(&config)
return &DiscoveryClient{restClient: client, LegacyPrefix: "/api"}, err
}
// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. If
// there is an error, it panics.
func NewDiscoveryClientForConfigOrDie(c *restclient.Config) *DiscoveryClient {
client, err := NewDiscoveryClientForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new DiscoveryClient for the given RESTClient.
func NewDiscoveryClient(c restclient.Interface) *DiscoveryClient {
return &DiscoveryClient{restClient: c, LegacyPrefix: "/api"}
}
func stringDoesntExistIn(str string, slice []string) bool {
for _, s := range slice {
if s == str {
return false
}
}
return true
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *DiscoveryClient) RESTClient() restclient.Interface {
if c == nil {
return nil
}
return c.restClient
}
|
package dynago
type queryRequest struct {
TableName string
IndexName string `json:",omitempty"`
// Filtering and query expressions
KeyConditionExpression string `json:",omitempty"`
FilterExpression string `json:",omitempty"`
ProjectionExpression string `json:",omitempty"`
expressionAttributes
CapacityDetail CapacityDetail `json:"ReturnConsumedCapacity,omitempty"`
ConsistentRead *bool `json:",omitempty"`
ScanIndexForward *bool `json:",omitempty"`
Limit uint `json:",omitempty"`
}
type queryResponse struct {
//ConsumedCapacity *ConsumedCapacityResponse // TODO
Count int
Items []Document
LastEvaluatedKey *Document
}
func newQuery(client *Client, table string) *Query {
req := queryRequest{
TableName: table,
}
return &Query{client, req}
}
type Query struct {
client *Client
req queryRequest
}
// If strong is true, do a strongly consistent read. (defaults to false)
func (q Query) ConsistentRead(strong bool) *Query {
q.req.ConsistentRead = &strong
return &q
}
// Set a post-filter expression for the results we scan.
func (q Query) FilterExpression(expression string, params ...interface{}) *Query {
q.req.paramsHelper(params)
q.req.FilterExpression = expression
return &q
}
// Set a condition expression on the key to narrow down what we scan
func (q Query) KeyConditionExpression(expression string) *Query {
q.req.KeyConditionExpression = expression
return &q
}
// Set a Projection Expression for controlling which attributes are returned.
func (q Query) ProjectionExpression(expression string) *Query {
q.req.ProjectionExpression = expression
return &q
}
// Shortcut to set a single parameter for ExpressionAttributeValues.
func (q Query) Param(key string, value interface{}) *Query {
q.req.paramHelper(key, value)
return &q
}
// Set a param, a document of params, or multiple params
func (q Query) Params(params ...interface{}) *Query {
q.req.paramsHelper(params)
return &q
}
// Return results descending.
func (q Query) Desc() *Query {
forward := false
q.req.ScanIndexForward = &forward
return &q
}
func (q Query) Limit(limit uint) *Query {
q.req.Limit = limit
return &q
}
// Execute this query and return results.
func (q *Query) Execute() (result *QueryResult, err error) {
return q.client.executor.Query(q)
}
func (e *awsExecutor) Query(q *Query) (result *QueryResult, err error) {
var response queryResponse
err = e.makeRequestUnmarshal("Query", &q.req, &response)
if err != nil {
return
}
result = &QueryResult{
Items: response.Items,
Count: response.Count,
}
return
}
// The result returned from a query.
type QueryResult struct {
Items []Document
Count int // The total number of items (for pagination)
}
Add IndexName to queries
package dynago
type queryRequest struct {
TableName string
IndexName string `json:",omitempty"`
// Filtering and query expressions
KeyConditionExpression string `json:",omitempty"`
FilterExpression string `json:",omitempty"`
ProjectionExpression string `json:",omitempty"`
expressionAttributes
CapacityDetail CapacityDetail `json:"ReturnConsumedCapacity,omitempty"`
ConsistentRead *bool `json:",omitempty"`
ScanIndexForward *bool `json:",omitempty"`
Limit uint `json:",omitempty"`
}
type queryResponse struct {
//ConsumedCapacity *ConsumedCapacityResponse // TODO
Count int
Items []Document
LastEvaluatedKey *Document
}
func newQuery(client *Client, table string) *Query {
req := queryRequest{
TableName: table,
}
return &Query{client, req}
}
type Query struct {
client *Client
req queryRequest
}
func (q Query) IndexName(name string) *Query {
q.req.IndexName = name
return &q
}
// If strong is true, do a strongly consistent read. (defaults to false)
func (q Query) ConsistentRead(strong bool) *Query {
q.req.ConsistentRead = &strong
return &q
}
// Set a post-filter expression for the results we scan.
func (q Query) FilterExpression(expression string, params ...interface{}) *Query {
q.req.paramsHelper(params)
q.req.FilterExpression = expression
return &q
}
// Set a condition expression on the key to narrow down what we scan
func (q Query) KeyConditionExpression(expression string) *Query {
q.req.KeyConditionExpression = expression
return &q
}
// Set a Projection Expression for controlling which attributes are returned.
func (q Query) ProjectionExpression(expression string) *Query {
q.req.ProjectionExpression = expression
return &q
}
// Shortcut to set a single parameter for ExpressionAttributeValues.
func (q Query) Param(key string, value interface{}) *Query {
q.req.paramHelper(key, value)
return &q
}
// Set a param, a document of params, or multiple params
func (q Query) Params(params ...interface{}) *Query {
q.req.paramsHelper(params)
return &q
}
// Return results descending.
func (q Query) Desc() *Query {
forward := false
q.req.ScanIndexForward = &forward
return &q
}
func (q Query) Limit(limit uint) *Query {
q.req.Limit = limit
return &q
}
// Execute this query and return results.
func (q *Query) Execute() (result *QueryResult, err error) {
return q.client.executor.Query(q)
}
func (e *awsExecutor) Query(q *Query) (result *QueryResult, err error) {
var response queryResponse
err = e.makeRequestUnmarshal("Query", &q.req, &response)
if err != nil {
return
}
result = &QueryResult{
Items: response.Items,
Count: response.Count,
}
return
}
// The result returned from a query.
type QueryResult struct {
Items []Document
Count int // The total number of items (for pagination)
}
|
package config
import (
"path/filepath"
"sync"
)
var (
mutex sync.Mutex
Values = make(map[int]interface{})
)
const (
BASEDIR = iota
APPDIR
)
func SetBaseDir(path string) error {
mutex.Lock()
defer mutex.Unlock()
absPath, err := filepath.Abs(path)
if err != nil {
return err
}
Values[BASEDIR] = filepath.Dir(absPath)
Values[APPDIR] = filepath.Clean(Values[BASEDIR].(string) + "/../webapp")
return nil
}
func GetBaseDir() string {
mutex.Lock()
defer mutex.Unlock()
return Values[BASEDIR].(string)
}
func GetAppDir() string {
mutex.Lock()
defer mutex.Unlock()
return Values[APPDIR].(string)
}
Remove locking boiler plate
package config
import (
"path/filepath"
"sync"
)
var (
mutex sync.Mutex
Values = make(map[int]interface{})
)
const (
BASEDIR = iota
APPDIR
)
func SetBaseDir(path string) error {
mutex.Lock()
defer mutex.Unlock()
absPath, err := filepath.Abs(path)
if err != nil {
return err
}
Values[BASEDIR] = filepath.Dir(absPath)
Values[APPDIR] = filepath.Clean(Values[BASEDIR].(string) + "/../webapp")
return nil
}
func GetBaseDir() string {
return getValue(BASEDIR).(string)
}
func GetAppDir() string {
return getValue(APPDIR).(string)
}
func getValue(key int) interface{} {
mutex.Lock()
defer mutex.Unlock()
return Values[key]
}
|
// Copyright 2012, 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package testing
import (
"bufio"
"crypto/tls"
"crypto/x509"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
stdtesting "testing"
"time"
"github.com/juju/loggo"
"labix.org/v2/mgo"
gc "launchpad.net/gocheck"
"launchpad.net/juju-core/cert"
"launchpad.net/juju-core/log"
"launchpad.net/juju-core/utils"
)
var (
// MgoServer is a shared mongo server used by tests.
MgoServer = &MgoInstance{ssl: true}
logger = loggo.GetLogger("juju.testing")
)
type MgoInstance struct {
// addr holds the address of the MongoDB server
addr string
// MgoPort holds the port of the MongoDB server.
port int
// server holds the running MongoDB command.
server *exec.Cmd
// exited receives a value when the mongodb server exits.
exited <-chan struct{}
// dir holds the directory that MongoDB is running in.
dir string
// ssl determines whether the MongoDB server will use TLS
ssl bool
// Params is a list of additional parameters that will be passed to
// the mongod application
Params []string
}
// Addr returns the address of the MongoDB server.
func (m *MgoInstance) Addr() string {
return m.addr
}
// Port returns the port of the MongoDB server.
func (m *MgoInstance) Port() int {
return m.port
}
// We specify a timeout to mgo.Dial, to prevent
// mongod failures hanging the tests.
const mgoDialTimeout = 15 * time.Second
// MgoSuite is a suite that deletes all content from the shared MongoDB
// server at the end of every test and supplies a connection to the shared
// MongoDB server.
type MgoSuite struct {
Session *mgo.Session
}
// Start starts a MongoDB server in a temporary directory.
func (inst *MgoInstance) Start(ssl bool) error {
dbdir, err := ioutil.TempDir("", "test-mgo")
if err != nil {
return err
}
logger.Debugf("starting mongo in %s", dbdir)
// give them all the same keyfile so they can talk appropriately
keyFilePath := filepath.Join(dbdir, "keyfile")
err = ioutil.WriteFile(keyFilePath, []byte("not very secret"), 0600)
if err != nil {
return fmt.Errorf("cannot write key file: %v", err)
}
pemPath := filepath.Join(dbdir, "server.pem")
err = ioutil.WriteFile(pemPath, []byte(ServerCert+ServerKey), 0600)
if err != nil {
return fmt.Errorf("cannot write cert/key PEM: %v", err)
}
inst.port = FindTCPPort()
inst.addr = fmt.Sprintf("localhost:%d", inst.port)
inst.dir = dbdir
inst.ssl = ssl
if err := inst.run(); err != nil {
inst.addr = ""
inst.port = 0
os.RemoveAll(inst.dir)
inst.dir = ""
}
logger.Debugf("started mongod pid %d in %s on port %d", inst.server.Process.Pid, dbdir, inst.port)
return err
}
// run runs the MongoDB server at the
// address and directory already configured.
func (inst *MgoInstance) run() error {
if inst.server != nil {
panic("mongo server is already running")
}
mgoport := strconv.Itoa(inst.port)
mgoargs := []string{
"--auth",
"--dbpath", inst.dir,
"--port", mgoport,
"--nssize", "1",
"--noprealloc",
"--smallfiles",
"--nojournal",
"--nounixsocket",
"--oplogSize", "10",
"--keyFile", filepath.Join(inst.dir, "keyfile"),
}
if inst.ssl {
mgoargs = append(mgoargs,
"--sslOnNormalPorts",
"--sslPEMKeyFile", filepath.Join(inst.dir, "server.pem"),
"--sslPEMKeyPassword", "ignored")
}
if inst.Params != nil {
mgoargs = append(mgoargs, inst.Params...)
}
server := exec.Command("mongod", mgoargs...)
out, err := server.StdoutPipe()
if err != nil {
return err
}
server.Stderr = server.Stdout
exited := make(chan struct{})
started := make(chan struct{})
go func() {
<-started
lines := readLines(fmt.Sprintf("mongod:%v", mgoport), out, 20)
err := server.Wait()
exitErr, _ := err.(*exec.ExitError)
if err == nil || exitErr != nil && exitErr.Exited() {
// mongodb has exited without being killed, so print the
// last few lines of its log output.
log.Errorf("mongodb has exited without being killed")
for _, line := range lines {
log.Errorf("mongod: %s", line)
}
}
close(exited)
}()
inst.exited = exited
err = server.Start()
close(started)
if err != nil {
return err
}
inst.server = server
return nil
}
func (inst *MgoInstance) kill() {
inst.server.Process.Kill()
<-inst.exited
inst.server = nil
inst.exited = nil
}
func (inst *MgoInstance) Destroy() {
if inst.server != nil {
logger.Debugf("killing mongod pid %d in %s on port %d", inst.server.Process.Pid, inst.dir, inst.port)
inst.kill()
os.RemoveAll(inst.dir)
inst.addr, inst.dir = "", ""
}
}
// Restart restarts the mongo server, useful for
// testing what happens when a state server goes down.
func (inst *MgoInstance) Restart() {
logger.Debugf("restarting mongod pid %d in %s on port %d", inst.server.Process.Pid, inst.dir, inst.port)
inst.kill()
if err := inst.Start(inst.ssl); err != nil {
panic(err)
}
}
// MgoTestPackage should be called to register the tests for any package that
// requires a MongoDB server.
func MgoTestPackage(t *stdtesting.T) {
MgoTestPackageSsl(t, true)
}
func MgoTestPackageSsl(t *stdtesting.T, ssl bool) {
if err := MgoServer.Start(ssl); err != nil {
t.Fatal(err)
}
defer MgoServer.Destroy()
gc.TestingT(t)
}
func (s *MgoSuite) SetUpSuite(c *gc.C) {
if MgoServer.addr == "" {
panic("MgoSuite tests must be run with MgoTestPackage")
}
mgo.SetStats(true)
// Make tests that use password authentication faster.
utils.FastInsecureHash = true
}
// readLines reads lines from the given reader and returns
// the last n non-empty lines, ignoring empty lines.
func readLines(prefix string, r io.Reader, n int) []string {
br := bufio.NewReader(r)
lines := make([]string, n)
i := 0
for {
line, err := br.ReadString('\n')
if line = strings.TrimRight(line, "\n"); line != "" {
logger.Tracef("%s: %s", prefix, line)
lines[i%n] = line
i++
}
if err != nil {
break
}
}
final := make([]string, 0, n+1)
if i > n {
final = append(final, fmt.Sprintf("[%d lines omitted]", i-n))
}
for j := 0; j < n; j++ {
if line := lines[(j+i)%n]; line != "" {
final = append(final, line)
}
}
return final
}
func (s *MgoSuite) TearDownSuite(c *gc.C) {
utils.FastInsecureHash = false
}
// MustDial returns a new connection to the MongoDB server, and panics on
// errors.
func (inst *MgoInstance) MustDial() *mgo.Session {
s, err := mgo.DialWithInfo(inst.DialInfo())
if err != nil {
panic(err)
}
return s
}
// Dial returns a new connection to the MongoDB server.
func (inst *MgoInstance) Dial() (*mgo.Session, error) {
return mgo.DialWithInfo(inst.DialInfo())
}
// DialInfo returns information suitable for dialling the
// receiving MongoDB instance.
func (inst *MgoInstance) DialInfo() *mgo.DialInfo {
return MgoDialInfo(inst.addr)
}
// DialDirect returns a new direct connection to the shared MongoDB server. This
// must be used if you're connecting to a replicaset that hasn't been initiated
// yet.
func (inst *MgoInstance) DialDirect() (*mgo.Session, error) {
info := inst.DialInfo()
info.Direct = true
return mgo.DialWithInfo(info)
}
// MustDialDirect works like DialDirect, but panics on errors.
func (inst *MgoInstance) MustDialDirect() *mgo.Session {
session, err := inst.DialDirect()
if err != nil {
panic(err)
}
return session
}
// MgoDialInfo returns a DialInfo suitable
// for dialling an MgoInstance at any of the
// given addresses.
func MgoDialInfo(addrs ...string) *mgo.DialInfo {
pool := x509.NewCertPool()
xcert, err := cert.ParseCert([]byte(CACert))
if err != nil {
panic(err)
}
pool.AddCert(xcert)
tlsConfig := &tls.Config{
RootCAs: pool,
ServerName: "anything",
}
return &mgo.DialInfo{
Addrs: addrs,
Dial: func(addr net.Addr) (net.Conn, error) {
conn, err := tls.Dial("tcp", addr.String(), tlsConfig)
if err != nil {
logger.Debugf("tls.Dial(%s) failed with %v", addr, err)
return nil, err
}
return conn, nil
},
Timeout: mgoDialTimeout,
}
}
func (s *MgoSuite) SetUpTest(c *gc.C) {
mgo.ResetStats()
s.Session = MgoServer.MustDial()
}
// Reset deletes all content from the MongoDB server and panics if it encounters
// errors.
func (inst *MgoInstance) Reset() {
// If the server has already been destroyed for testing purposes,
// just start it again.
if inst.Addr() == "" {
if err := inst.Start(inst.ssl); err != nil {
panic(err)
}
return
}
session := inst.MustDial()
defer session.Close()
dbnames, ok := resetAdminPasswordAndFetchDBNames(session)
if !ok {
// We restart it to regain access. This should only
// happen when tests fail.
log.Noticef("testing: restarting MongoDB server after unauthorized access")
inst.Destroy()
if err := inst.Start(inst.ssl); err != nil {
panic(err)
}
return
}
log.Infof("Reset successfully reset admin password")
for _, name := range dbnames {
switch name {
case "admin", "local", "config":
default:
if err := session.DB(name).DropDatabase(); err != nil {
panic(fmt.Errorf("Cannot drop MongoDB database %v: %v", name, err))
}
}
}
}
// resetAdminPasswordAndFetchDBNames logs into the database with a
// plausible password and returns all the database's db names. We need
// to try several passwords because we don't know what state the mongo
// server is in when Reset is called. If the test has set a custom
// password, we're out of luck, but if they are using
// DefaultStatePassword, we can succeed.
func resetAdminPasswordAndFetchDBNames(session *mgo.Session) ([]string, bool) {
// First try with no password
dbnames, err := session.DatabaseNames()
if err == nil {
return dbnames, true
}
if !isUnauthorized(err) {
panic(err)
}
// Then try the two most likely passwords in turn.
for _, password := range []string{
DefaultMongoPassword,
utils.UserPasswordHash(DefaultMongoPassword, utils.CompatSalt),
} {
admin := session.DB("admin")
if err := admin.Login("admin", password); err != nil {
log.Infof("failed to log in with password %q", password)
continue
}
dbnames, err := session.DatabaseNames()
if err == nil {
if err := admin.RemoveUser("admin"); err != nil {
panic(err)
}
return dbnames, true
}
if !isUnauthorized(err) {
panic(err)
}
log.Infof("unauthorized access when getting database names; password %q", password)
}
return nil, false
}
// isUnauthorized is a copy of the same function in state/open.go.
func isUnauthorized(err error) bool {
if err == nil {
return false
}
// Some unauthorized access errors have no error code,
// just a simple error string.
if err.Error() == "auth fails" {
return true
}
if err, ok := err.(*mgo.QueryError); ok {
return err.Code == 10057 ||
err.Message == "need to login" ||
err.Message == "unauthorized"
}
return false
}
func (s *MgoSuite) TearDownTest(c *gc.C) {
MgoServer.Reset()
s.Session.Close()
for i := 0; ; i++ {
stats := mgo.GetStats()
if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 {
break
}
if i == 20 {
c.Fatal("Test left sockets in a dirty state")
}
c.Logf("Waiting for sockets to die: %d in use, %d alive", stats.SocketsInUse, stats.SocketsAlive)
time.Sleep(500 * time.Millisecond)
}
}
// FindTCPPort finds an unused TCP port and returns it.
// Use of this function has an inherent race condition - another
// process may claim the port before we try to use it.
// We hope that the probability is small enough during
// testing to be negligible.
func FindTCPPort() int {
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
panic(err)
}
l.Close()
return l.Addr().(*net.TCPAddr).Port
}
[r=dave-cheney] Fix nil pointer error during test setup
Now a missing mongod returns a much improved error
ubuntu@winton-02:~/src/launchpad.net/juju-core$ go test ./worker/uniter
--- FAIL: TestPackage (0.00 seconds)
mgo.go:211: exec: "mongod": executable file not found in $PATH
FAIL
FAIL launchpad.net/juju-core/worker/uniter 0.509s
https://codereview.appspot.com/85100044/
// Copyright 2012, 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package testing
import (
"bufio"
"crypto/tls"
"crypto/x509"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
stdtesting "testing"
"time"
"github.com/juju/loggo"
"labix.org/v2/mgo"
gc "launchpad.net/gocheck"
"launchpad.net/juju-core/cert"
"launchpad.net/juju-core/log"
"launchpad.net/juju-core/utils"
)
var (
// MgoServer is a shared mongo server used by tests.
MgoServer = &MgoInstance{ssl: true}
logger = loggo.GetLogger("juju.testing")
)
type MgoInstance struct {
// addr holds the address of the MongoDB server
addr string
// MgoPort holds the port of the MongoDB server.
port int
// server holds the running MongoDB command.
server *exec.Cmd
// exited receives a value when the mongodb server exits.
exited <-chan struct{}
// dir holds the directory that MongoDB is running in.
dir string
// ssl determines whether the MongoDB server will use TLS
ssl bool
// Params is a list of additional parameters that will be passed to
// the mongod application
Params []string
}
// Addr returns the address of the MongoDB server.
func (m *MgoInstance) Addr() string {
return m.addr
}
// Port returns the port of the MongoDB server.
func (m *MgoInstance) Port() int {
return m.port
}
// We specify a timeout to mgo.Dial, to prevent
// mongod failures hanging the tests.
const mgoDialTimeout = 15 * time.Second
// MgoSuite is a suite that deletes all content from the shared MongoDB
// server at the end of every test and supplies a connection to the shared
// MongoDB server.
type MgoSuite struct {
Session *mgo.Session
}
// Start starts a MongoDB server in a temporary directory.
func (inst *MgoInstance) Start(ssl bool) error {
dbdir, err := ioutil.TempDir("", "test-mgo")
if err != nil {
return err
}
logger.Debugf("starting mongo in %s", dbdir)
// give them all the same keyfile so they can talk appropriately
keyFilePath := filepath.Join(dbdir, "keyfile")
err = ioutil.WriteFile(keyFilePath, []byte("not very secret"), 0600)
if err != nil {
return fmt.Errorf("cannot write key file: %v", err)
}
pemPath := filepath.Join(dbdir, "server.pem")
err = ioutil.WriteFile(pemPath, []byte(ServerCert+ServerKey), 0600)
if err != nil {
return fmt.Errorf("cannot write cert/key PEM: %v", err)
}
inst.port = FindTCPPort()
inst.addr = fmt.Sprintf("localhost:%d", inst.port)
inst.dir = dbdir
inst.ssl = ssl
if err := inst.run(); err != nil {
inst.addr = ""
inst.port = 0
os.RemoveAll(inst.dir)
inst.dir = ""
return err
}
logger.Debugf("started mongod pid %d in %s on port %d", inst.server.Process.Pid, dbdir, inst.port)
return nil
}
// run runs the MongoDB server at the
// address and directory already configured.
func (inst *MgoInstance) run() error {
if inst.server != nil {
panic("mongo server is already running")
}
mgoport := strconv.Itoa(inst.port)
mgoargs := []string{
"--auth",
"--dbpath", inst.dir,
"--port", mgoport,
"--nssize", "1",
"--noprealloc",
"--smallfiles",
"--nojournal",
"--nounixsocket",
"--oplogSize", "10",
"--keyFile", filepath.Join(inst.dir, "keyfile"),
}
if inst.ssl {
mgoargs = append(mgoargs,
"--sslOnNormalPorts",
"--sslPEMKeyFile", filepath.Join(inst.dir, "server.pem"),
"--sslPEMKeyPassword", "ignored")
}
if inst.Params != nil {
mgoargs = append(mgoargs, inst.Params...)
}
server := exec.Command("mongod", mgoargs...)
out, err := server.StdoutPipe()
if err != nil {
return err
}
server.Stderr = server.Stdout
exited := make(chan struct{})
started := make(chan struct{})
go func() {
<-started
lines := readLines(fmt.Sprintf("mongod:%v", mgoport), out, 20)
err := server.Wait()
exitErr, _ := err.(*exec.ExitError)
if err == nil || exitErr != nil && exitErr.Exited() {
// mongodb has exited without being killed, so print the
// last few lines of its log output.
log.Errorf("mongodb has exited without being killed")
for _, line := range lines {
log.Errorf("mongod: %s", line)
}
}
close(exited)
}()
inst.exited = exited
err = server.Start()
close(started)
if err != nil {
return err
}
inst.server = server
return nil
}
func (inst *MgoInstance) kill() {
inst.server.Process.Kill()
<-inst.exited
inst.server = nil
inst.exited = nil
}
func (inst *MgoInstance) Destroy() {
if inst.server != nil {
logger.Debugf("killing mongod pid %d in %s on port %d", inst.server.Process.Pid, inst.dir, inst.port)
inst.kill()
os.RemoveAll(inst.dir)
inst.addr, inst.dir = "", ""
}
}
// Restart restarts the mongo server, useful for
// testing what happens when a state server goes down.
func (inst *MgoInstance) Restart() {
logger.Debugf("restarting mongod pid %d in %s on port %d", inst.server.Process.Pid, inst.dir, inst.port)
inst.kill()
if err := inst.Start(inst.ssl); err != nil {
panic(err)
}
}
// MgoTestPackage should be called to register the tests for any package that
// requires a MongoDB server.
func MgoTestPackage(t *stdtesting.T) {
MgoTestPackageSsl(t, true)
}
func MgoTestPackageSsl(t *stdtesting.T, ssl bool) {
if err := MgoServer.Start(ssl); err != nil {
t.Fatal(err)
}
defer MgoServer.Destroy()
gc.TestingT(t)
}
func (s *MgoSuite) SetUpSuite(c *gc.C) {
if MgoServer.addr == "" {
panic("MgoSuite tests must be run with MgoTestPackage")
}
mgo.SetStats(true)
// Make tests that use password authentication faster.
utils.FastInsecureHash = true
}
// readLines reads lines from the given reader and returns
// the last n non-empty lines, ignoring empty lines.
func readLines(prefix string, r io.Reader, n int) []string {
br := bufio.NewReader(r)
lines := make([]string, n)
i := 0
for {
line, err := br.ReadString('\n')
if line = strings.TrimRight(line, "\n"); line != "" {
logger.Tracef("%s: %s", prefix, line)
lines[i%n] = line
i++
}
if err != nil {
break
}
}
final := make([]string, 0, n+1)
if i > n {
final = append(final, fmt.Sprintf("[%d lines omitted]", i-n))
}
for j := 0; j < n; j++ {
if line := lines[(j+i)%n]; line != "" {
final = append(final, line)
}
}
return final
}
func (s *MgoSuite) TearDownSuite(c *gc.C) {
utils.FastInsecureHash = false
}
// MustDial returns a new connection to the MongoDB server, and panics on
// errors.
func (inst *MgoInstance) MustDial() *mgo.Session {
s, err := mgo.DialWithInfo(inst.DialInfo())
if err != nil {
panic(err)
}
return s
}
// Dial returns a new connection to the MongoDB server.
func (inst *MgoInstance) Dial() (*mgo.Session, error) {
return mgo.DialWithInfo(inst.DialInfo())
}
// DialInfo returns information suitable for dialling the
// receiving MongoDB instance.
func (inst *MgoInstance) DialInfo() *mgo.DialInfo {
return MgoDialInfo(inst.addr)
}
// DialDirect returns a new direct connection to the shared MongoDB server. This
// must be used if you're connecting to a replicaset that hasn't been initiated
// yet.
func (inst *MgoInstance) DialDirect() (*mgo.Session, error) {
info := inst.DialInfo()
info.Direct = true
return mgo.DialWithInfo(info)
}
// MustDialDirect works like DialDirect, but panics on errors.
func (inst *MgoInstance) MustDialDirect() *mgo.Session {
session, err := inst.DialDirect()
if err != nil {
panic(err)
}
return session
}
// MgoDialInfo returns a DialInfo suitable
// for dialling an MgoInstance at any of the
// given addresses.
func MgoDialInfo(addrs ...string) *mgo.DialInfo {
pool := x509.NewCertPool()
xcert, err := cert.ParseCert([]byte(CACert))
if err != nil {
panic(err)
}
pool.AddCert(xcert)
tlsConfig := &tls.Config{
RootCAs: pool,
ServerName: "anything",
}
return &mgo.DialInfo{
Addrs: addrs,
Dial: func(addr net.Addr) (net.Conn, error) {
conn, err := tls.Dial("tcp", addr.String(), tlsConfig)
if err != nil {
logger.Debugf("tls.Dial(%s) failed with %v", addr, err)
return nil, err
}
return conn, nil
},
Timeout: mgoDialTimeout,
}
}
func (s *MgoSuite) SetUpTest(c *gc.C) {
mgo.ResetStats()
s.Session = MgoServer.MustDial()
}
// Reset deletes all content from the MongoDB server and panics if it encounters
// errors.
func (inst *MgoInstance) Reset() {
// If the server has already been destroyed for testing purposes,
// just start it again.
if inst.Addr() == "" {
if err := inst.Start(inst.ssl); err != nil {
panic(err)
}
return
}
session := inst.MustDial()
defer session.Close()
dbnames, ok := resetAdminPasswordAndFetchDBNames(session)
if !ok {
// We restart it to regain access. This should only
// happen when tests fail.
log.Noticef("testing: restarting MongoDB server after unauthorized access")
inst.Destroy()
if err := inst.Start(inst.ssl); err != nil {
panic(err)
}
return
}
log.Infof("Reset successfully reset admin password")
for _, name := range dbnames {
switch name {
case "admin", "local", "config":
default:
if err := session.DB(name).DropDatabase(); err != nil {
panic(fmt.Errorf("Cannot drop MongoDB database %v: %v", name, err))
}
}
}
}
// resetAdminPasswordAndFetchDBNames logs into the database with a
// plausible password and returns all the database's db names. We need
// to try several passwords because we don't know what state the mongo
// server is in when Reset is called. If the test has set a custom
// password, we're out of luck, but if they are using
// DefaultStatePassword, we can succeed.
func resetAdminPasswordAndFetchDBNames(session *mgo.Session) ([]string, bool) {
// First try with no password
dbnames, err := session.DatabaseNames()
if err == nil {
return dbnames, true
}
if !isUnauthorized(err) {
panic(err)
}
// Then try the two most likely passwords in turn.
for _, password := range []string{
DefaultMongoPassword,
utils.UserPasswordHash(DefaultMongoPassword, utils.CompatSalt),
} {
admin := session.DB("admin")
if err := admin.Login("admin", password); err != nil {
log.Infof("failed to log in with password %q", password)
continue
}
dbnames, err := session.DatabaseNames()
if err == nil {
if err := admin.RemoveUser("admin"); err != nil {
panic(err)
}
return dbnames, true
}
if !isUnauthorized(err) {
panic(err)
}
log.Infof("unauthorized access when getting database names; password %q", password)
}
return nil, false
}
// isUnauthorized is a copy of the same function in state/open.go.
func isUnauthorized(err error) bool {
if err == nil {
return false
}
// Some unauthorized access errors have no error code,
// just a simple error string.
if err.Error() == "auth fails" {
return true
}
if err, ok := err.(*mgo.QueryError); ok {
return err.Code == 10057 ||
err.Message == "need to login" ||
err.Message == "unauthorized"
}
return false
}
func (s *MgoSuite) TearDownTest(c *gc.C) {
MgoServer.Reset()
s.Session.Close()
for i := 0; ; i++ {
stats := mgo.GetStats()
if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 {
break
}
if i == 20 {
c.Fatal("Test left sockets in a dirty state")
}
c.Logf("Waiting for sockets to die: %d in use, %d alive", stats.SocketsInUse, stats.SocketsAlive)
time.Sleep(500 * time.Millisecond)
}
}
// FindTCPPort finds an unused TCP port and returns it.
// Use of this function has an inherent race condition - another
// process may claim the port before we try to use it.
// We hope that the probability is small enough during
// testing to be negligible.
func FindTCPPort() int {
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
panic(err)
}
l.Close()
return l.Addr().(*net.TCPAddr).Port
}
|
/*
Copyright 2013 Tamás Gulácsi
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package oracall
import (
"bytes"
"fmt"
"io"
"os"
"sort"
"strings"
"text/template"
"github.com/tgulacsi/go/orahlp"
)
// MaxTableSize is the maximum size of the array arguments
const MaxTableSize = 512
const batchSize = 128
//
// OracleArgument
//
var (
stringTypes = make(map[string]struct{}, 16)
)
// SavePlsqlBlock saves the plsql block definition into writer
func (fun Function) PlsqlBlock(checkName string) (plsql, callFun string) {
decls, pre, call, post, convIn, convOut, err := fun.prepareCall()
if err != nil {
Log("msg", "error preparing", "function", fun, "error", err)
os.Exit(1)
}
fn := strings.Replace(fun.name, ".", "__", -1)
callBuf := buffers.Get()
defer buffers.Put(callBuf)
var check string
if checkName != "" {
check = fmt.Sprintf(`
if err = %s(input); err != nil {
return
}
`, checkName)
}
hasCursorOut := fun.HasCursorOut()
if hasCursorOut {
fmt.Fprintf(callBuf, `func (s *oracallServer) %s(input *pb.%s, stream pb.%s_%sServer) (err error) {
%s
output := new(pb.%s)
iterators := make([]iterator, 0, 1)
`,
CamelCase(fn), CamelCase(fun.getStructName(false, false)), CamelCase(fun.Package), CamelCase(fn),
check,
CamelCase(fun.getStructName(true, false)),
)
} else {
fmt.Fprintf(callBuf, `func (s *oracallServer) %s(ctx context.Context, input *pb.%s) (output *pb.%s, err error) {
%s
output = new(pb.%s)
iterators := make([]iterator, 0, 1) // just temporary
_ = iterators
`,
CamelCase(fn), CamelCase(fun.getStructName(false, false)), CamelCase(fun.getStructName(true, false)),
check,
CamelCase(fun.getStructName(true, false)),
)
}
for _, line := range convIn {
io.WriteString(callBuf, line+"\n")
}
i := strings.Index(call, fun.Name())
j := i + strings.Index(call[i:], ")") + 1
//Log("msg","PlsqlBlock", "i", i, "j", j, "call", call)
fmt.Fprintf(callBuf, "\nif true || DebugLevel > 0 { log.Printf(`calling %s\n\twith %%v`, params) }"+`
ses, err := s.OraSesPool.Get()
if err != nil {
err = errors.Wrap(err, "Get")
return
}
defer s.OraSesPool.Put(ses)
qry := %s
stmt, err := ses.Prep(qry)
if err != nil {
err = errors.Wrap(err, qry)
return
}
if _, err = stmt.ExeP(params...); err != nil {
if c, ok := err.(interface{ Code() int }); ok && c.Code() == 4068 {
// "existing state of packages has been discarded"
_, err= stmt.ExeP(params...)
}
if err = errors.Wrapf(err, "%%s %%#v", qry, params); err != nil {
return
}
}
defer stmt.Close()
`, call[i:j], fun.getPlsqlConstName())
callBuf.WriteString("\nif true || DebugLevel > 0 { log.Printf(`result params: %v`, params) }\n")
for _, line := range convOut {
io.WriteString(callBuf, line+"\n")
}
if hasCursorOut {
fmt.Fprintf(callBuf, `
if len(iterators) == 0 {
err = stream.Send(output)
return
}
reseters := make([]func(), 0, len(iterators))
iterators2 := make([]iterator, 0, len(iterators))
for {
for _, it := range iterators {
if err = it.Iterate(); err != nil {
if err != io.EOF {
_ = stream.Send(output)
return
}
reseters = append(reseters, it.Reset)
err = nil
continue
}
iterators2 = append(iterators2, it)
}
if err = stream.Send(output); err != nil {
return
}
if len(iterators) != len(iterators2) {
if len(iterators2) == 0 {
//err = stream.Send(output)
return
}
iterators = append(iterators[:0], iterators2...)
}
// reset the all arrays
for _, reset := range reseters {
reset()
}
iterators2 = iterators2[:0]
reseters = reseters[:0]
}
`)
}
fmt.Fprintf(callBuf, `
return
}`)
callFun = callBuf.String()
plsBuf := callBuf
plsBuf.Reset()
if len(decls) > 0 {
io.WriteString(plsBuf, "DECLARE\n")
for _, line := range decls {
fmt.Fprintf(plsBuf, " %s\n", line)
}
plsBuf.Write([]byte{'\n'})
}
io.WriteString(plsBuf, "BEGIN\n")
for _, line := range pre {
fmt.Fprintf(plsBuf, " %s\n", line)
}
fmt.Fprintf(plsBuf, "\n %s;\n\n", call)
for _, line := range post {
fmt.Fprintf(plsBuf, " %s\n", line)
}
io.WriteString(plsBuf, "\nEND;\n")
plsql = plsBuf.String()
plsql, callFun = demap(plsql, callFun)
return
}
func demap(plsql, callFun string) (string, string) {
type repl struct {
ParamsArrLen int
}
paramsMap := make(map[string][]int, 16)
var i int
first := make(map[string]int, len(paramsMap))
plsql, paramsArr := orahlp.MapToSlice(
plsql,
func(key string) interface{} {
paramsMap[key] = append(paramsMap[key], i)
if _, ok := first[key]; !ok {
first[key] = i
}
i++
return key
})
opts := repl{
ParamsArrLen: len(paramsArr),
}
callBuf := buffers.Get()
defer buffers.Put(callBuf)
var lastIdx int
tpl, err := template.New("callFun").
Funcs(
map[string]interface{}{
"paramsIdx": func(key string) int {
if strings.HasSuffix(key, MarkHidden) {
key = key[:len(key)-len(MarkHidden)] + "#"
}
arr := paramsMap[key]
if len(arr) == 0 {
Log("msg", "paramsIdx", "key", key, "val", arr, "map", paramsMap)
}
i = arr[0]
if len(arr) > 1 {
paramsMap[key] = arr[1:]
}
lastIdx = i
return i
},
}).
Parse(callFun)
if err != nil {
fmt.Fprintf(os.Stderr, callFun)
panic(err)
}
if err := tpl.Execute(callBuf, opts); err != nil {
panic(err)
}
plusIdxs := make([]idxRemap, 0, len(paramsMap))
for k, vv := range paramsMap {
for _, v := range vv {
if i := first[k]; i != v {
plusIdxs = append(plusIdxs, idxRemap{Name: k, New: v, Old: i})
}
}
}
if len(plusIdxs) == 0 {
return plsql, callBuf.String()
}
sort.Sort(byNewRemap(plusIdxs))
plus := buffers.Get()
defer buffers.Put(plus)
b := callBuf.Bytes()
i = bytes.LastIndex(b, []byte(fmt.Sprintf("params[%d] =", lastIdx)))
j := bytes.IndexByte(b[i:], '\n')
j += i + 1
rest := string(b[j:])
callBuf.Truncate(j)
for _, v := range plusIdxs {
fmt.Fprintf(callBuf, "params[%d] = params[%d] // %s\n", v.New, v.Old, v.Name)
}
callBuf.WriteString(rest)
return plsql, callBuf.String()
}
func (fun Function) prepareCall() (decls, pre []string, call string, post []string, convIn, convOut []string, err error) {
tableTypes := make(map[string]string, 4)
callArgs := make(map[string]string, 16)
getTableType := func(absType string) string {
if strings.HasPrefix(absType, "CHAR") {
absType = "VARCHAR2" + absType[4:]
}
typ, ok := tableTypes[absType]
if ok {
return typ
}
typ = strings.Map(func(c rune) rune {
switch c {
case '(', ',':
return '_'
case ' ', ')':
return -1
default:
return c
}
}, absType) + "_tab_typ"
decls = append(decls, "TYPE "+typ+" IS TABLE OF "+absType+" INDEX BY BINARY_INTEGER;")
tableTypes[absType] = typ
return typ
}
//fStructIn, fStructOut := fun.getStructName(false), fun.getStructName(true)
var (
vn, tmp, typ string
ok bool
)
decls = append(decls, "i1 PLS_INTEGER;", "i2 PLS_INTEGER;")
convIn = append(convIn, "params := make([]interface{}, {{.ParamsArrLen}})", "var x, v interface{}\n _,_ = x,v")
args := make([]Argument, 0, len(fun.Args)+1)
for _, arg := range fun.Args {
arg.Name = replHidden(arg.Name)
args = append(args, arg)
}
if fun.Returns != nil {
args = append(args, *fun.Returns)
}
addParam := func(paramName string) string {
if paramName == "" {
panic("empty param name")
}
return `params[{{paramsIdx "` + paramName + `"}}]`
}
for _, arg := range args {
switch arg.Flavor {
case FLAVOR_SIMPLE:
name := (CamelCase(arg.Name))
//name := capitalize(replHidden(arg.Name))
convIn, convOut = arg.getConvSimple(convIn, convOut,
name, addParam(arg.Name))
case FLAVOR_RECORD:
vn = getInnerVarName(fun.Name(), arg.Name)
if arg.TypeName == "" {
arg.TypeName = mkRecTypName(arg.Name)
decls = append(decls, "TYPE "+arg.TypeName+" IS RECORD (")
for i, sub := range arg.RecordOf {
var comma string
if i != 0 {
comma = ","
}
decls = append(decls, " "+comma+sub.Name+" "+sub.AbsType)
}
decls = append(decls, ");")
}
decls = append(decls, vn+" "+arg.TypeName+"; --E="+arg.Name)
callArgs[arg.Name] = vn
aname := (CamelCase(arg.Name))
//aname := capitalize(replHidden(arg.Name))
if arg.IsOutput() {
if arg.IsInput() {
convIn = append(convIn, fmt.Sprintf(`
output.%s = new(%s) // sr1
if input.%s != nil { *output.%s = *input.%s }
`, aname, withPb(CamelCase(arg.goType(false)[1:])),
aname, aname, aname))
} else {
// yes, convIn - getConvRec uses this!
convIn = append(convIn, fmt.Sprintf(`
if output.%s == nil {
output.%s = new(%s) // sr2
}`, aname,
aname, withPb(CamelCase(arg.goType(false)[1:]))))
}
}
for _, a := range arg.RecordOf {
a := a
k, v := a.Name, a.Argument
tmp = getParamName(fun.Name(), vn+"."+k)
kName := (CamelCase(k))
//kName := capitalize(replHidden(k))
name := aname + "." + kName
if arg.IsInput() {
pre = append(pre, vn+"."+k+" := :"+tmp+";")
}
if arg.IsOutput() {
post = append(post, ":"+tmp+" := "+vn+"."+k+";")
}
convIn, convOut = v.getConvRec(convIn, convOut,
name, addParam(tmp),
0, arg, k)
}
case FLAVOR_TABLE:
if arg.Type == "REF CURSOR" {
if arg.IsInput() {
Log("msg", "cannot use IN cursor variables", "arg", arg)
os.Exit(1)
}
name := (CamelCase(arg.Name))
//name := capitalize(replHidden(arg.Name))
convIn, convOut = arg.getConvSimpleTable(convIn, convOut,
name, addParam(arg.Name), MaxTableSize)
} else {
switch arg.TableOf.Flavor {
case FLAVOR_SIMPLE: // like simple, but for the arg.TableOf
typ = getTableType(arg.TableOf.AbsType)
setvar := ""
if arg.IsInput() {
setvar = " := :" + arg.Name
}
decls = append(decls, arg.Name+" "+typ+setvar+"; --A="+arg.Name)
vn = getInnerVarName(fun.Name(), arg.Name)
callArgs[arg.Name] = vn
decls = append(decls, vn+" "+arg.TypeName+"; --B="+arg.Name)
if arg.IsInput() {
pre = append(pre,
vn+".DELETE;",
"i1 := "+arg.Name+".FIRST;",
"WHILE i1 IS NOT NULL LOOP",
" "+vn+"(i1) := "+arg.Name+"(i1);",
" i1 := "+arg.Name+".NEXT(i1);",
"END LOOP;")
}
if arg.IsOutput() {
post = append(post,
arg.Name+".DELETE;",
"i1 := "+vn+".FIRST;",
"WHILE i1 IS NOT NULL LOOP",
" "+arg.Name+"(i1) := "+vn+"(i1);",
" i1 := "+vn+".NEXT(i1);",
"END LOOP;",
":"+arg.Name+" := "+arg.Name+";")
}
name := (CamelCase(arg.Name))
//name := capitalize(replHidden(arg.Name))
convIn, convOut = arg.getConvSimpleTable(convIn, convOut,
name, addParam(arg.Name), MaxTableSize)
case FLAVOR_RECORD:
vn = getInnerVarName(fun.Name(), arg.Name+"."+arg.TableOf.Name)
callArgs[arg.Name] = vn
decls = append(decls, vn+" "+arg.TypeName+"; --C="+arg.Name)
aname := (CamelCase(arg.Name))
//aname := capitalize(replHidden(arg.Name))
if arg.IsOutput() {
st := withPb(CamelCase(arg.TableOf.goType(true)))
convOut = append(convOut, fmt.Sprintf(`
if m := %d - cap(output.%s); m > 0 { // %s
output.%s = append(output.%s[:cap(output.%s)], make([]%s, m)...) // fr1
}
output.%s = output.%s[:%d]
`,
MaxTableSize, aname, arg.TableOf.goType(true),
aname, aname, aname, st,
aname, aname, MaxTableSize))
}
if !arg.IsInput() {
pre = append(pre, vn+".DELETE;")
}
// declarations go first
for _, a := range arg.TableOf.RecordOf {
a := a
k, v := a.Name, a.Argument
typ = getTableType(v.AbsType)
decls = append(decls, getParamName(fun.Name(), vn+"."+k)+" "+typ+"; --D="+arg.Name)
tmp = getParamName(fun.Name(), vn+"."+k)
if arg.IsInput() {
pre = append(pre, tmp+" := :"+tmp+";")
} else {
pre = append(pre, tmp+".DELETE;")
}
}
// here comes the loops
var idxvar string
for _, a := range arg.TableOf.RecordOf {
a := a
k, v := a.Name, a.Argument
typ = getTableType(v.AbsType)
tmp = getParamName(fun.Name(), vn+"."+k)
if idxvar == "" {
idxvar = getParamName(fun.Name(), vn+"."+k)
if arg.IsInput() {
pre = append(pre, "",
"i1 := "+idxvar+".FIRST;",
"WHILE i1 IS NOT NULL LOOP")
}
if arg.IsOutput() {
post = append(post, "",
"i1 := "+vn+".FIRST; i2 := 1;",
"WHILE i1 IS NOT NULL LOOP")
}
}
kName := (CamelCase(k))
//kName := capitalize(replHidden(k))
//name := aname + "." + kName
convIn, convOut = v.getConvTableRec(
convIn, convOut,
[2]string{aname, kName},
addParam(tmp),
MaxTableSize,
k, *arg.TableOf)
if arg.IsInput() {
pre = append(pre,
" "+vn+"(i1)."+k+" := "+tmp+"(i1);")
}
if arg.IsOutput() {
post = append(post,
" "+tmp+"(i2) := "+vn+"(i1)."+k+";")
}
}
if arg.IsInput() {
pre = append(pre,
" i1 := "+idxvar+".NEXT(i1);",
"END LOOP;")
}
if arg.IsOutput() {
post = append(post,
" i1 := "+vn+".NEXT(i1); i2 := i2 + 1;",
"END LOOP;")
for _, a := range arg.TableOf.RecordOf {
a := a
k := a.Name
tmp = getParamName(fun.Name(), vn+"."+k)
post = append(post, ":"+tmp+" := "+tmp+";")
}
}
default:
Log("msg", "Only table of simple or record types are allowed (no table of table!)", "function", fun.Name(), "arg", arg.Name)
os.Exit(1)
}
}
default:
Log("msg", "unkown flavor", "flavor", arg.Flavor)
os.Exit(1)
}
}
callb := buffers.Get()
defer buffers.Put(callb)
if fun.Returns != nil {
callb.WriteString(":ret := ")
}
//Log("msg","prepareCall", "callArgs", callArgs)
callb.WriteString(fun.Name() + "(")
for i, arg := range fun.Args {
if i > 0 {
callb.WriteString(",\n\t\t")
}
if vn, ok = callArgs[arg.Name]; !ok {
vn = ":" + arg.Name
}
fmt.Fprintf(callb, "%s=>%s", arg.Name, vn)
}
callb.WriteString(")")
call = callb.String()
return
}
func (arg Argument) getIsValidCheck(name string) string {
got := arg.goType(false)
if got[0] == '*' {
return name + " != nil"
}
if strings.HasPrefix(got, "ora.") {
return "!" + name + ".IsNull"
}
if strings.HasPrefix(got, "sql.Null") {
return name + ".Valid"
}
return name + " != nil /*" + got + "*/"
}
func (arg Argument) getConvSimple(
convIn, convOut []string,
name, paramName string,
) ([]string, []string) {
if arg.IsOutput() {
got := arg.goType(false)
if got[0] == '*' {
convIn = append(convIn, fmt.Sprintf("output.%s = new(%s) // %s // gcs1", name, got[1:], got))
if arg.IsInput() {
convIn = append(convIn, fmt.Sprintf(`if input.%s != nil { *output.%s = *input.%s } // gcs2`, name, name, name))
}
} else if arg.IsInput() {
convIn = append(convIn, fmt.Sprintf(`output.%s = input.%s // gcs3`, name, name))
}
src := "output." + name
in, varName := arg.ToOra(paramName, "&"+src)
convIn = append(convIn, in+" // gcs3")
if varName != "" {
convOut = append(convOut, arg.FromOra(src, paramName, varName))
}
} else {
in, _ := arg.ToOra(paramName, "input."+name)
convIn = append(convIn, in+" // gcs4")
}
return convIn, convOut
}
func (arg Argument) getConvSimpleTable(
convIn, convOut []string,
name, paramName string,
tableSize int,
) ([]string, []string) {
if arg.IsOutput() {
got := arg.goType(true)
if arg.Type == "REF CURSOR" {
return arg.getConvRefCursor(convIn, convOut, name, paramName, tableSize)
}
if got == "*[]string" {
got = "[]string"
}
if got[0] == '*' {
convIn = append(convIn, fmt.Sprintf(`
if output.%s == nil { // %#v
x := make(%s, 0, %d)
output.%s = &x
} else if cap((*output.%s)) < %d { // simpletable
*output.%s = make(%s, 0, %d)
} else {
*(output.%s) = (*output.%s)[:0]
}`, name, arg,
strings.TrimLeft(got, "*"), tableSize,
name,
name, tableSize,
name, strings.TrimLeft(got, "*"), tableSize,
name, name))
if arg.IsInput() {
convIn = append(convIn, fmt.Sprintf(`*output.%s = append(*output.%s, input.%s)`, name, name, name))
}
} else {
if arg.IsInput() {
convIn = append(convIn, fmt.Sprintf("output.%s = input.%s", name, name))
} else {
got = CamelCase(got)
convIn = append(convIn, fmt.Sprintf("output.%s = make(%s, 0, %d) // gcst3", name, got, tableSize))
}
}
in, varName := arg.ToOra(strings.Replace(strings.Replace(paramName, `[{{paramsIdx "`, "__", 1), `"}}]`, "", 1), "output."+name)
convIn = append(convIn, fmt.Sprintf(`// in=%q varName=%q`, in, varName))
convIn = append(convIn, fmt.Sprintf(`%s = output.%s // gcst1`, paramName, name))
} else {
in, varName := arg.ToOra(strings.Replace(strings.Replace(paramName, `[{{paramsIdx "`, "__", 1), `"}}]`, "", 1), "output."+name)
convIn = append(convIn, fmt.Sprintf(`// in=%q varName=%q`, in, varName))
convIn = append(convIn, fmt.Sprintf("%s = input.%s // gcst2", paramName, name))
}
return convIn, convOut
}
func (arg Argument) getConvRefCursor(
convIn, convOut []string,
name, paramName string,
tableSize int,
) ([]string, []string) {
got := arg.goType(true)
GoT := withPb(CamelCase(got))
convIn = append(convIn, fmt.Sprintf(`output.%s = make([]%s, 0, %d) // gcrf1
%s = new(ora.Rset) // gcrf1 %q`,
name, GoT, tableSize,
paramName, got))
convOut = append(convOut, fmt.Sprintf(`
{
rset := %s.(*ora.Rset)
if rset.IsOpen() {
iterators = append(iterators, iterator{
Reset: func() { output.%s = nil },
Iterate: func() error {
a := output.%s[:0]
var err error
for i := 0; i < %d; i++ {
if !rset.Next() {
if err = rset.Err; err == nil {
err = io.EOF
}
break
}
a = append(a, %s)
}
output.%s = a
return err
},
})
}
}`,
paramName,
name,
name,
batchSize,
arg.getFromRset("rset.Row"),
name,
))
return convIn, convOut
}
func (arg Argument) getFromRset(rsetRow string) string {
buf := buffers.Get()
defer buffers.Put(buf)
GoT := CamelCase(arg.goType(true))
if GoT[0] == '*' {
GoT = "&" + GoT[1:]
}
fmt.Fprintf(buf, "%s{\n", withPb(GoT))
for i, a := range arg.TableOf.RecordOf {
a := a
got := a.Argument.goType(true)
if strings.Contains(got, ".") {
fmt.Fprintf(buf, "\t%s: %s,\n", CamelCase(a.Name),
a.GetOra(fmt.Sprintf("%s[%d]", rsetRow, i), ""))
} else {
fmt.Fprintf(buf, "\t%s: custom.As%s(%s[%d]),\n", CamelCase(a.Name), CamelCase(got), rsetRow, i)
}
}
fmt.Fprintf(buf, "}")
return buf.String()
}
func getOutConvTSwitch(name, pTyp string) string {
parse := ""
if strings.HasPrefix(pTyp, "int") {
bits := "32"
if len(pTyp) == 5 {
bits = pTyp[3:5]
}
parse = "ParseInt(xi, 10, " + bits + ")"
} else if strings.HasPrefix(pTyp, "float") {
bits := pTyp[5:7]
parse = "ParseFloat(xi, " + bits + ")"
}
if parse != "" {
return fmt.Sprintf(`
var y `+pTyp+`
err = nil
switch xi := x.(type) {
case int: y = `+pTyp+`(xi)
case int8: y = `+pTyp+`(xi)
case int16: y = `+pTyp+`(xi)
case int32: y = `+pTyp+`(xi)
case int64: y = `+pTyp+`(xi)
case float32: y = `+pTyp+`(xi)
case float64: y = `+pTyp+`(xi)
case string:
//log.Printf("converting %%q to `+pTyp+`", xi)
z, e := strconv.`+parse+`
y, err = `+pTyp+`(z), e
default:
err = fmt.Errorf("out parameter %s is bad type: awaited %s, got %%T", x)
}
if err != nil {
return
}`, name, pTyp)
}
return fmt.Sprintf(`
y, ok := x.(%s)
if !ok {
err = fmt.Errorf("out parameter %s is bad type: awaited %s, got %%T", x)
return
}`, pTyp, name, pTyp)
}
func (arg Argument) getConvRec(
convIn, convOut []string,
name, paramName string,
tableSize uint,
parentArg Argument,
key string,
) ([]string, []string) {
if arg.IsOutput() {
too, varName := arg.ToOra(paramName, "&output."+name)
convIn = append(convIn, too+" // gcr2 var="+varName)
if varName != "" {
convOut = append(convOut, arg.FromOra("output."+name, varName, varName))
}
} else if arg.IsInput() {
parts := strings.Split(name, ".")
too, _ := arg.ToOra(paramName, "input."+name)
convIn = append(convIn,
fmt.Sprintf(`if input.%s != nil {
%s
} // gcr1`,
parts[0], too))
}
return convIn, convOut
}
func (arg Argument) getConvTableRec(
convIn, convOut []string,
name [2]string,
paramName string,
tableSize uint,
key string,
parent Argument,
) ([]string, []string) {
lengthS := "0"
absName := "x__" + name[0] + "__" + name[1]
typ := arg.goType(true)
oraTyp := typ
switch oraTyp {
case "custom.Date":
oraTyp = "ora.Date"
case "float64":
oraTyp = "ora.Float64"
case "int32":
oraTyp = "ora.Int32"
}
if arg.IsInput() {
amp := "&"
if !arg.IsOutput() {
amp = ""
}
lengthS = "len(input." + name[0] + ")"
too, _ := arg.ToOra(absName+"[i]", "v."+name[1])
convIn = append(convIn, fmt.Sprintf(`
%s := make([]%s, %s, %d) // gctr1
for i,v := range input.%s {
%s
} // gctr1
%s = %s%s`,
absName,
oraTyp, lengthS, tableSize,
name[0], too,
paramName, amp, absName))
}
if arg.IsOutput() {
if !arg.IsInput() {
convIn = append(convIn,
fmt.Sprintf(`%s := make([]%s, %s, %d) // gctr2
%s = &%s // gctr2`,
absName, oraTyp, lengthS, tableSize,
paramName, absName))
}
got := parent.goType(true)
convOut = append(convOut,
fmt.Sprintf(`if m := len(%s)-cap(output.%s); m > 0 { // gctr3
output.%s = append(output.%s, make([]%s, m)...)
}
output.%s = output.%s[:len(%s)]
for i, v := range %s {
if output.%s[i] == nil {
output.%s[i] = new(%s)
}
%s // gctr3
}`,
absName, name[0],
name[0], name[0], withPb(CamelCase(got)),
name[0], name[0], absName,
absName,
name[0],
name[0], withPb(CamelCase(got[1:])),
arg.FromOra(
fmt.Sprintf("output.%s[i].%s", name[0], name[1]),
"v",
"v",
)))
}
return convIn, convOut
}
var varNames = make(map[string]map[string]string, 4)
func getVarName(funName, varName, prefix string) string {
m, ok := varNames[funName]
if !ok {
m = make(map[string]string, 16)
varNames[funName] = m
}
x, ok := m[varName]
if !ok {
length := len(m)
if i := strings.LastIndex(varName, "."); i > 0 && i < len(varName)-1 {
x = getVarName(funName, varName[:i], prefix) + "#" + varName[i+1:]
}
if x == "" || len(x) > 30 {
x = fmt.Sprintf("%s%03d", prefix, length+1)
}
m[varName] = x
}
return x
}
func getInnerVarName(funName, varName string) string {
return getVarName(funName, varName, "v")
}
func getParamName(funName, paramName string) string {
return getVarName(funName, paramName, "p")
}
func withPb(s string) string {
if s == "" {
return s
}
if s[0] == '*' || s[0] == '&' {
return s[:1] + "pb." + s[1:]
}
return "pb." + s
}
type idxRemap struct {
Name string
New, Old int
}
var _ = sort.Interface(byNewRemap(nil))
type byNewRemap []idxRemap
func (s byNewRemap) Len() int { return len(s) }
func (s byNewRemap) Less(i, j int) bool { return s[i].Old < s[j].Old }
func (s byNewRemap) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// vim: se noet fileencoding=utf-8:
state change error -> drop ses
/*
Copyright 2013 Tamás Gulácsi
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package oracall
import (
"bytes"
"fmt"
"io"
"os"
"sort"
"strings"
"text/template"
"github.com/tgulacsi/go/orahlp"
)
// MaxTableSize is the maximum size of the array arguments
const MaxTableSize = 512
const batchSize = 128
//
// OracleArgument
//
var (
stringTypes = make(map[string]struct{}, 16)
)
// SavePlsqlBlock saves the plsql block definition into writer
func (fun Function) PlsqlBlock(checkName string) (plsql, callFun string) {
decls, pre, call, post, convIn, convOut, err := fun.prepareCall()
if err != nil {
Log("msg", "error preparing", "function", fun, "error", err)
os.Exit(1)
}
fn := strings.Replace(fun.name, ".", "__", -1)
callBuf := buffers.Get()
defer buffers.Put(callBuf)
var check string
if checkName != "" {
check = fmt.Sprintf(`
if err = %s(input); err != nil {
return
}
`, checkName)
}
hasCursorOut := fun.HasCursorOut()
if hasCursorOut {
fmt.Fprintf(callBuf, `func (s *oracallServer) %s(input *pb.%s, stream pb.%s_%sServer) (err error) {
%s
output := new(pb.%s)
iterators := make([]iterator, 0, 1)
`,
CamelCase(fn), CamelCase(fun.getStructName(false, false)), CamelCase(fun.Package), CamelCase(fn),
check,
CamelCase(fun.getStructName(true, false)),
)
} else {
fmt.Fprintf(callBuf, `func (s *oracallServer) %s(ctx context.Context, input *pb.%s) (output *pb.%s, err error) {
%s
output = new(pb.%s)
iterators := make([]iterator, 0, 1) // just temporary
_ = iterators
`,
CamelCase(fn), CamelCase(fun.getStructName(false, false)), CamelCase(fun.getStructName(true, false)),
check,
CamelCase(fun.getStructName(true, false)),
)
}
for _, line := range convIn {
io.WriteString(callBuf, line+"\n")
}
i := strings.Index(call, fun.Name())
j := i + strings.Index(call[i:], ")") + 1
//Log("msg","PlsqlBlock", "i", i, "j", j, "call", call)
fmt.Fprintf(callBuf, "\nif true || DebugLevel > 0 { log.Printf(`calling %s\n\twith %%v`, params) }"+`
ses, err := s.OraSesPool.Get()
if err != nil {
err = errors.Wrap(err, "Get")
return
}
defer s.OraSesPool.Put(ses)
qry := %s
stmt, err := ses.Prep(qry)
if err != nil {
err = errors.Wrap(err, qry)
return
}
if _, err = stmt.ExeP(params...); err != nil {
if c, ok := err.(interface{ Code() int }); ok && c.Code() == 4068 {
// "existing state of packages has been discarded"
_, err= stmt.ExeP(params...)
}
if err != nil && strings.Contains(err.Error(), "state of") {
log.Println("!!! discard session !!!")
ses = nil
}
if err = errors.Wrapf(err, "%%s %%#v", qry, params); err != nil {
return
}
}
defer stmt.Close()
`, call[i:j], fun.getPlsqlConstName())
callBuf.WriteString("\nif true || DebugLevel > 0 { log.Printf(`result params: %v`, params) }\n")
for _, line := range convOut {
io.WriteString(callBuf, line+"\n")
}
if hasCursorOut {
fmt.Fprintf(callBuf, `
if len(iterators) == 0 {
err = stream.Send(output)
return
}
reseters := make([]func(), 0, len(iterators))
iterators2 := make([]iterator, 0, len(iterators))
for {
for _, it := range iterators {
if err = it.Iterate(); err != nil {
if err != io.EOF {
_ = stream.Send(output)
return
}
reseters = append(reseters, it.Reset)
err = nil
continue
}
iterators2 = append(iterators2, it)
}
if err = stream.Send(output); err != nil {
return
}
if len(iterators) != len(iterators2) {
if len(iterators2) == 0 {
//err = stream.Send(output)
return
}
iterators = append(iterators[:0], iterators2...)
}
// reset the all arrays
for _, reset := range reseters {
reset()
}
iterators2 = iterators2[:0]
reseters = reseters[:0]
}
`)
}
fmt.Fprintf(callBuf, `
return
}`)
callFun = callBuf.String()
plsBuf := callBuf
plsBuf.Reset()
if len(decls) > 0 {
io.WriteString(plsBuf, "DECLARE\n")
for _, line := range decls {
fmt.Fprintf(plsBuf, " %s\n", line)
}
plsBuf.Write([]byte{'\n'})
}
io.WriteString(plsBuf, "BEGIN\n")
for _, line := range pre {
fmt.Fprintf(plsBuf, " %s\n", line)
}
fmt.Fprintf(plsBuf, "\n %s;\n\n", call)
for _, line := range post {
fmt.Fprintf(plsBuf, " %s\n", line)
}
io.WriteString(plsBuf, "\nEND;\n")
plsql = plsBuf.String()
plsql, callFun = demap(plsql, callFun)
return
}
func demap(plsql, callFun string) (string, string) {
type repl struct {
ParamsArrLen int
}
paramsMap := make(map[string][]int, 16)
var i int
first := make(map[string]int, len(paramsMap))
plsql, paramsArr := orahlp.MapToSlice(
plsql,
func(key string) interface{} {
paramsMap[key] = append(paramsMap[key], i)
if _, ok := first[key]; !ok {
first[key] = i
}
i++
return key
})
opts := repl{
ParamsArrLen: len(paramsArr),
}
callBuf := buffers.Get()
defer buffers.Put(callBuf)
var lastIdx int
tpl, err := template.New("callFun").
Funcs(
map[string]interface{}{
"paramsIdx": func(key string) int {
if strings.HasSuffix(key, MarkHidden) {
key = key[:len(key)-len(MarkHidden)] + "#"
}
arr := paramsMap[key]
if len(arr) == 0 {
Log("msg", "paramsIdx", "key", key, "val", arr, "map", paramsMap)
}
i = arr[0]
if len(arr) > 1 {
paramsMap[key] = arr[1:]
}
lastIdx = i
return i
},
}).
Parse(callFun)
if err != nil {
fmt.Fprintf(os.Stderr, callFun)
panic(err)
}
if err := tpl.Execute(callBuf, opts); err != nil {
panic(err)
}
plusIdxs := make([]idxRemap, 0, len(paramsMap))
for k, vv := range paramsMap {
for _, v := range vv {
if i := first[k]; i != v {
plusIdxs = append(plusIdxs, idxRemap{Name: k, New: v, Old: i})
}
}
}
if len(plusIdxs) == 0 {
return plsql, callBuf.String()
}
sort.Sort(byNewRemap(plusIdxs))
plus := buffers.Get()
defer buffers.Put(plus)
b := callBuf.Bytes()
i = bytes.LastIndex(b, []byte(fmt.Sprintf("params[%d] =", lastIdx)))
j := bytes.IndexByte(b[i:], '\n')
j += i + 1
rest := string(b[j:])
callBuf.Truncate(j)
for _, v := range plusIdxs {
fmt.Fprintf(callBuf, "params[%d] = params[%d] // %s\n", v.New, v.Old, v.Name)
}
callBuf.WriteString(rest)
return plsql, callBuf.String()
}
func (fun Function) prepareCall() (decls, pre []string, call string, post []string, convIn, convOut []string, err error) {
tableTypes := make(map[string]string, 4)
callArgs := make(map[string]string, 16)
getTableType := func(absType string) string {
if strings.HasPrefix(absType, "CHAR") {
absType = "VARCHAR2" + absType[4:]
}
typ, ok := tableTypes[absType]
if ok {
return typ
}
typ = strings.Map(func(c rune) rune {
switch c {
case '(', ',':
return '_'
case ' ', ')':
return -1
default:
return c
}
}, absType) + "_tab_typ"
decls = append(decls, "TYPE "+typ+" IS TABLE OF "+absType+" INDEX BY BINARY_INTEGER;")
tableTypes[absType] = typ
return typ
}
//fStructIn, fStructOut := fun.getStructName(false), fun.getStructName(true)
var (
vn, tmp, typ string
ok bool
)
decls = append(decls, "i1 PLS_INTEGER;", "i2 PLS_INTEGER;")
convIn = append(convIn, "params := make([]interface{}, {{.ParamsArrLen}})", "var x, v interface{}\n _,_ = x,v")
args := make([]Argument, 0, len(fun.Args)+1)
for _, arg := range fun.Args {
arg.Name = replHidden(arg.Name)
args = append(args, arg)
}
if fun.Returns != nil {
args = append(args, *fun.Returns)
}
addParam := func(paramName string) string {
if paramName == "" {
panic("empty param name")
}
return `params[{{paramsIdx "` + paramName + `"}}]`
}
for _, arg := range args {
switch arg.Flavor {
case FLAVOR_SIMPLE:
name := (CamelCase(arg.Name))
//name := capitalize(replHidden(arg.Name))
convIn, convOut = arg.getConvSimple(convIn, convOut,
name, addParam(arg.Name))
case FLAVOR_RECORD:
vn = getInnerVarName(fun.Name(), arg.Name)
if arg.TypeName == "" {
arg.TypeName = mkRecTypName(arg.Name)
decls = append(decls, "TYPE "+arg.TypeName+" IS RECORD (")
for i, sub := range arg.RecordOf {
var comma string
if i != 0 {
comma = ","
}
decls = append(decls, " "+comma+sub.Name+" "+sub.AbsType)
}
decls = append(decls, ");")
}
decls = append(decls, vn+" "+arg.TypeName+"; --E="+arg.Name)
callArgs[arg.Name] = vn
aname := (CamelCase(arg.Name))
//aname := capitalize(replHidden(arg.Name))
if arg.IsOutput() {
if arg.IsInput() {
convIn = append(convIn, fmt.Sprintf(`
output.%s = new(%s) // sr1
if input.%s != nil { *output.%s = *input.%s }
`, aname, withPb(CamelCase(arg.goType(false)[1:])),
aname, aname, aname))
} else {
// yes, convIn - getConvRec uses this!
convIn = append(convIn, fmt.Sprintf(`
if output.%s == nil {
output.%s = new(%s) // sr2
}`, aname,
aname, withPb(CamelCase(arg.goType(false)[1:]))))
}
}
for _, a := range arg.RecordOf {
a := a
k, v := a.Name, a.Argument
tmp = getParamName(fun.Name(), vn+"."+k)
kName := (CamelCase(k))
//kName := capitalize(replHidden(k))
name := aname + "." + kName
if arg.IsInput() {
pre = append(pre, vn+"."+k+" := :"+tmp+";")
}
if arg.IsOutput() {
post = append(post, ":"+tmp+" := "+vn+"."+k+";")
}
convIn, convOut = v.getConvRec(convIn, convOut,
name, addParam(tmp),
0, arg, k)
}
case FLAVOR_TABLE:
if arg.Type == "REF CURSOR" {
if arg.IsInput() {
Log("msg", "cannot use IN cursor variables", "arg", arg)
os.Exit(1)
}
name := (CamelCase(arg.Name))
//name := capitalize(replHidden(arg.Name))
convIn, convOut = arg.getConvSimpleTable(convIn, convOut,
name, addParam(arg.Name), MaxTableSize)
} else {
switch arg.TableOf.Flavor {
case FLAVOR_SIMPLE: // like simple, but for the arg.TableOf
typ = getTableType(arg.TableOf.AbsType)
setvar := ""
if arg.IsInput() {
setvar = " := :" + arg.Name
}
decls = append(decls, arg.Name+" "+typ+setvar+"; --A="+arg.Name)
vn = getInnerVarName(fun.Name(), arg.Name)
callArgs[arg.Name] = vn
decls = append(decls, vn+" "+arg.TypeName+"; --B="+arg.Name)
if arg.IsInput() {
pre = append(pre,
vn+".DELETE;",
"i1 := "+arg.Name+".FIRST;",
"WHILE i1 IS NOT NULL LOOP",
" "+vn+"(i1) := "+arg.Name+"(i1);",
" i1 := "+arg.Name+".NEXT(i1);",
"END LOOP;")
}
if arg.IsOutput() {
post = append(post,
arg.Name+".DELETE;",
"i1 := "+vn+".FIRST;",
"WHILE i1 IS NOT NULL LOOP",
" "+arg.Name+"(i1) := "+vn+"(i1);",
" i1 := "+vn+".NEXT(i1);",
"END LOOP;",
":"+arg.Name+" := "+arg.Name+";")
}
name := (CamelCase(arg.Name))
//name := capitalize(replHidden(arg.Name))
convIn, convOut = arg.getConvSimpleTable(convIn, convOut,
name, addParam(arg.Name), MaxTableSize)
case FLAVOR_RECORD:
vn = getInnerVarName(fun.Name(), arg.Name+"."+arg.TableOf.Name)
callArgs[arg.Name] = vn
decls = append(decls, vn+" "+arg.TypeName+"; --C="+arg.Name)
aname := (CamelCase(arg.Name))
//aname := capitalize(replHidden(arg.Name))
if arg.IsOutput() {
st := withPb(CamelCase(arg.TableOf.goType(true)))
convOut = append(convOut, fmt.Sprintf(`
if m := %d - cap(output.%s); m > 0 { // %s
output.%s = append(output.%s[:cap(output.%s)], make([]%s, m)...) // fr1
}
output.%s = output.%s[:%d]
`,
MaxTableSize, aname, arg.TableOf.goType(true),
aname, aname, aname, st,
aname, aname, MaxTableSize))
}
if !arg.IsInput() {
pre = append(pre, vn+".DELETE;")
}
// declarations go first
for _, a := range arg.TableOf.RecordOf {
a := a
k, v := a.Name, a.Argument
typ = getTableType(v.AbsType)
decls = append(decls, getParamName(fun.Name(), vn+"."+k)+" "+typ+"; --D="+arg.Name)
tmp = getParamName(fun.Name(), vn+"."+k)
if arg.IsInput() {
pre = append(pre, tmp+" := :"+tmp+";")
} else {
pre = append(pre, tmp+".DELETE;")
}
}
// here comes the loops
var idxvar string
for _, a := range arg.TableOf.RecordOf {
a := a
k, v := a.Name, a.Argument
typ = getTableType(v.AbsType)
tmp = getParamName(fun.Name(), vn+"."+k)
if idxvar == "" {
idxvar = getParamName(fun.Name(), vn+"."+k)
if arg.IsInput() {
pre = append(pre, "",
"i1 := "+idxvar+".FIRST;",
"WHILE i1 IS NOT NULL LOOP")
}
if arg.IsOutput() {
post = append(post, "",
"i1 := "+vn+".FIRST; i2 := 1;",
"WHILE i1 IS NOT NULL LOOP")
}
}
kName := (CamelCase(k))
//kName := capitalize(replHidden(k))
//name := aname + "." + kName
convIn, convOut = v.getConvTableRec(
convIn, convOut,
[2]string{aname, kName},
addParam(tmp),
MaxTableSize,
k, *arg.TableOf)
if arg.IsInput() {
pre = append(pre,
" "+vn+"(i1)."+k+" := "+tmp+"(i1);")
}
if arg.IsOutput() {
post = append(post,
" "+tmp+"(i2) := "+vn+"(i1)."+k+";")
}
}
if arg.IsInput() {
pre = append(pre,
" i1 := "+idxvar+".NEXT(i1);",
"END LOOP;")
}
if arg.IsOutput() {
post = append(post,
" i1 := "+vn+".NEXT(i1); i2 := i2 + 1;",
"END LOOP;")
for _, a := range arg.TableOf.RecordOf {
a := a
k := a.Name
tmp = getParamName(fun.Name(), vn+"."+k)
post = append(post, ":"+tmp+" := "+tmp+";")
}
}
default:
Log("msg", "Only table of simple or record types are allowed (no table of table!)", "function", fun.Name(), "arg", arg.Name)
os.Exit(1)
}
}
default:
Log("msg", "unkown flavor", "flavor", arg.Flavor)
os.Exit(1)
}
}
callb := buffers.Get()
defer buffers.Put(callb)
if fun.Returns != nil {
callb.WriteString(":ret := ")
}
//Log("msg","prepareCall", "callArgs", callArgs)
callb.WriteString(fun.Name() + "(")
for i, arg := range fun.Args {
if i > 0 {
callb.WriteString(",\n\t\t")
}
if vn, ok = callArgs[arg.Name]; !ok {
vn = ":" + arg.Name
}
fmt.Fprintf(callb, "%s=>%s", arg.Name, vn)
}
callb.WriteString(")")
call = callb.String()
return
}
func (arg Argument) getIsValidCheck(name string) string {
got := arg.goType(false)
if got[0] == '*' {
return name + " != nil"
}
if strings.HasPrefix(got, "ora.") {
return "!" + name + ".IsNull"
}
if strings.HasPrefix(got, "sql.Null") {
return name + ".Valid"
}
return name + " != nil /*" + got + "*/"
}
func (arg Argument) getConvSimple(
convIn, convOut []string,
name, paramName string,
) ([]string, []string) {
if arg.IsOutput() {
got := arg.goType(false)
if got[0] == '*' {
convIn = append(convIn, fmt.Sprintf("output.%s = new(%s) // %s // gcs1", name, got[1:], got))
if arg.IsInput() {
convIn = append(convIn, fmt.Sprintf(`if input.%s != nil { *output.%s = *input.%s } // gcs2`, name, name, name))
}
} else if arg.IsInput() {
convIn = append(convIn, fmt.Sprintf(`output.%s = input.%s // gcs3`, name, name))
}
src := "output." + name
in, varName := arg.ToOra(paramName, "&"+src)
convIn = append(convIn, in+" // gcs3")
if varName != "" {
convOut = append(convOut, arg.FromOra(src, paramName, varName))
}
} else {
in, _ := arg.ToOra(paramName, "input."+name)
convIn = append(convIn, in+" // gcs4")
}
return convIn, convOut
}
func (arg Argument) getConvSimpleTable(
convIn, convOut []string,
name, paramName string,
tableSize int,
) ([]string, []string) {
if arg.IsOutput() {
got := arg.goType(true)
if arg.Type == "REF CURSOR" {
return arg.getConvRefCursor(convIn, convOut, name, paramName, tableSize)
}
if got == "*[]string" {
got = "[]string"
}
if got[0] == '*' {
convIn = append(convIn, fmt.Sprintf(`
if output.%s == nil { // %#v
x := make(%s, 0, %d)
output.%s = &x
} else if cap((*output.%s)) < %d { // simpletable
*output.%s = make(%s, 0, %d)
} else {
*(output.%s) = (*output.%s)[:0]
}`, name, arg,
strings.TrimLeft(got, "*"), tableSize,
name,
name, tableSize,
name, strings.TrimLeft(got, "*"), tableSize,
name, name))
if arg.IsInput() {
convIn = append(convIn, fmt.Sprintf(`*output.%s = append(*output.%s, input.%s)`, name, name, name))
}
} else {
if arg.IsInput() {
convIn = append(convIn, fmt.Sprintf("output.%s = input.%s", name, name))
} else {
got = CamelCase(got)
convIn = append(convIn, fmt.Sprintf("output.%s = make(%s, 0, %d) // gcst3", name, got, tableSize))
}
}
in, varName := arg.ToOra(strings.Replace(strings.Replace(paramName, `[{{paramsIdx "`, "__", 1), `"}}]`, "", 1), "output."+name)
convIn = append(convIn, fmt.Sprintf(`// in=%q varName=%q`, in, varName))
convIn = append(convIn, fmt.Sprintf(`%s = output.%s // gcst1`, paramName, name))
} else {
in, varName := arg.ToOra(strings.Replace(strings.Replace(paramName, `[{{paramsIdx "`, "__", 1), `"}}]`, "", 1), "output."+name)
convIn = append(convIn, fmt.Sprintf(`// in=%q varName=%q`, in, varName))
convIn = append(convIn, fmt.Sprintf("%s = input.%s // gcst2", paramName, name))
}
return convIn, convOut
}
func (arg Argument) getConvRefCursor(
convIn, convOut []string,
name, paramName string,
tableSize int,
) ([]string, []string) {
got := arg.goType(true)
GoT := withPb(CamelCase(got))
convIn = append(convIn, fmt.Sprintf(`output.%s = make([]%s, 0, %d) // gcrf1
%s = new(ora.Rset) // gcrf1 %q`,
name, GoT, tableSize,
paramName, got))
convOut = append(convOut, fmt.Sprintf(`
{
rset := %s.(*ora.Rset)
if rset.IsOpen() {
iterators = append(iterators, iterator{
Reset: func() { output.%s = nil },
Iterate: func() error {
a := output.%s[:0]
var err error
for i := 0; i < %d; i++ {
if !rset.Next() {
if err = rset.Err; err == nil {
err = io.EOF
}
break
}
a = append(a, %s)
}
output.%s = a
return err
},
})
}
}`,
paramName,
name,
name,
batchSize,
arg.getFromRset("rset.Row"),
name,
))
return convIn, convOut
}
func (arg Argument) getFromRset(rsetRow string) string {
buf := buffers.Get()
defer buffers.Put(buf)
GoT := CamelCase(arg.goType(true))
if GoT[0] == '*' {
GoT = "&" + GoT[1:]
}
fmt.Fprintf(buf, "%s{\n", withPb(GoT))
for i, a := range arg.TableOf.RecordOf {
a := a
got := a.Argument.goType(true)
if strings.Contains(got, ".") {
fmt.Fprintf(buf, "\t%s: %s,\n", CamelCase(a.Name),
a.GetOra(fmt.Sprintf("%s[%d]", rsetRow, i), ""))
} else {
fmt.Fprintf(buf, "\t%s: custom.As%s(%s[%d]),\n", CamelCase(a.Name), CamelCase(got), rsetRow, i)
}
}
fmt.Fprintf(buf, "}")
return buf.String()
}
func getOutConvTSwitch(name, pTyp string) string {
parse := ""
if strings.HasPrefix(pTyp, "int") {
bits := "32"
if len(pTyp) == 5 {
bits = pTyp[3:5]
}
parse = "ParseInt(xi, 10, " + bits + ")"
} else if strings.HasPrefix(pTyp, "float") {
bits := pTyp[5:7]
parse = "ParseFloat(xi, " + bits + ")"
}
if parse != "" {
return fmt.Sprintf(`
var y `+pTyp+`
err = nil
switch xi := x.(type) {
case int: y = `+pTyp+`(xi)
case int8: y = `+pTyp+`(xi)
case int16: y = `+pTyp+`(xi)
case int32: y = `+pTyp+`(xi)
case int64: y = `+pTyp+`(xi)
case float32: y = `+pTyp+`(xi)
case float64: y = `+pTyp+`(xi)
case string:
//log.Printf("converting %%q to `+pTyp+`", xi)
z, e := strconv.`+parse+`
y, err = `+pTyp+`(z), e
default:
err = fmt.Errorf("out parameter %s is bad type: awaited %s, got %%T", x)
}
if err != nil {
return
}`, name, pTyp)
}
return fmt.Sprintf(`
y, ok := x.(%s)
if !ok {
err = fmt.Errorf("out parameter %s is bad type: awaited %s, got %%T", x)
return
}`, pTyp, name, pTyp)
}
func (arg Argument) getConvRec(
convIn, convOut []string,
name, paramName string,
tableSize uint,
parentArg Argument,
key string,
) ([]string, []string) {
if arg.IsOutput() {
too, varName := arg.ToOra(paramName, "&output."+name)
convIn = append(convIn, too+" // gcr2 var="+varName)
if varName != "" {
convOut = append(convOut, arg.FromOra("output."+name, varName, varName))
}
} else if arg.IsInput() {
parts := strings.Split(name, ".")
too, _ := arg.ToOra(paramName, "input."+name)
convIn = append(convIn,
fmt.Sprintf(`if input.%s != nil {
%s
} // gcr1`,
parts[0], too))
}
return convIn, convOut
}
func (arg Argument) getConvTableRec(
convIn, convOut []string,
name [2]string,
paramName string,
tableSize uint,
key string,
parent Argument,
) ([]string, []string) {
lengthS := "0"
absName := "x__" + name[0] + "__" + name[1]
typ := arg.goType(true)
oraTyp := typ
switch oraTyp {
case "custom.Date":
oraTyp = "ora.Date"
case "float64":
oraTyp = "ora.Float64"
case "int32":
oraTyp = "ora.Int32"
}
if arg.IsInput() {
amp := "&"
if !arg.IsOutput() {
amp = ""
}
lengthS = "len(input." + name[0] + ")"
too, _ := arg.ToOra(absName+"[i]", "v."+name[1])
convIn = append(convIn, fmt.Sprintf(`
%s := make([]%s, %s, %d) // gctr1
for i,v := range input.%s {
%s
} // gctr1
%s = %s%s`,
absName,
oraTyp, lengthS, tableSize,
name[0], too,
paramName, amp, absName))
}
if arg.IsOutput() {
if !arg.IsInput() {
convIn = append(convIn,
fmt.Sprintf(`%s := make([]%s, %s, %d) // gctr2
%s = &%s // gctr2`,
absName, oraTyp, lengthS, tableSize,
paramName, absName))
}
got := parent.goType(true)
convOut = append(convOut,
fmt.Sprintf(`if m := len(%s)-cap(output.%s); m > 0 { // gctr3
output.%s = append(output.%s, make([]%s, m)...)
}
output.%s = output.%s[:len(%s)]
for i, v := range %s {
if output.%s[i] == nil {
output.%s[i] = new(%s)
}
%s // gctr3
}`,
absName, name[0],
name[0], name[0], withPb(CamelCase(got)),
name[0], name[0], absName,
absName,
name[0],
name[0], withPb(CamelCase(got[1:])),
arg.FromOra(
fmt.Sprintf("output.%s[i].%s", name[0], name[1]),
"v",
"v",
)))
}
return convIn, convOut
}
var varNames = make(map[string]map[string]string, 4)
func getVarName(funName, varName, prefix string) string {
m, ok := varNames[funName]
if !ok {
m = make(map[string]string, 16)
varNames[funName] = m
}
x, ok := m[varName]
if !ok {
length := len(m)
if i := strings.LastIndex(varName, "."); i > 0 && i < len(varName)-1 {
x = getVarName(funName, varName[:i], prefix) + "#" + varName[i+1:]
}
if x == "" || len(x) > 30 {
x = fmt.Sprintf("%s%03d", prefix, length+1)
}
m[varName] = x
}
return x
}
func getInnerVarName(funName, varName string) string {
return getVarName(funName, varName, "v")
}
func getParamName(funName, paramName string) string {
return getVarName(funName, paramName, "p")
}
func withPb(s string) string {
if s == "" {
return s
}
if s[0] == '*' || s[0] == '&' {
return s[:1] + "pb." + s[1:]
}
return "pb." + s
}
type idxRemap struct {
Name string
New, Old int
}
var _ = sort.Interface(byNewRemap(nil))
type byNewRemap []idxRemap
func (s byNewRemap) Len() int { return len(s) }
func (s byNewRemap) Less(i, j int) bool { return s[i].Old < s[j].Old }
func (s byNewRemap) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// vim: se noet fileencoding=utf-8:
|
package metrin
import (
"bytes"
"html/template"
"time"
"github.com/aws/aws-sdk-go/service/cloudwatch"
)
// BuildPrintStringInput - includes params and datapoints
type BuildPrintStringInput struct {
Params *cloudwatch.GetMetricStatisticsInput
Datapoints []*cloudwatch.Datapoint
TemplateString string
}
// TemplateInput - input type for each template execution
type TemplateInput struct {
Params *cloudwatch.GetMetricStatisticsInput
Datapoint *cloudwatch.Datapoint
}
// BuildPrintStrings - returns slice of built string
func BuildPrintStrings(input BuildPrintStringInput) []string {
var strings []string
buildTemplate := template.New("")
buildTemplate.Funcs(template.FuncMap{
"unixtime": func(t time.Time) int64 { return t.Unix() },
})
template.Must(buildTemplate.Parse(input.TemplateString))
for i := range input.Datapoints {
datapoint := input.Datapoints[i]
buffer := new(bytes.Buffer)
buildTemplate.Execute(buffer, TemplateInput{
Params: input.Params,
Datapoint: datapoint,
})
strings = append(strings, buffer.String())
}
return strings
}
add functions to template
package metrin
import (
"bytes"
"html/template"
"reflect"
"time"
"github.com/aws/aws-sdk-go/service/cloudwatch"
)
// BuildPrintStringInput - includes params and datapoints
type BuildPrintStringInput struct {
Params *cloudwatch.GetMetricStatisticsInput
Datapoints []*cloudwatch.Datapoint
TemplateString string
}
// TemplateInput - input type for each template execution
type TemplateInput struct {
Params *cloudwatch.GetMetricStatisticsInput
Datapoint *cloudwatch.Datapoint
}
// BuildPrintStrings - returns slice of built string
func BuildPrintStrings(input BuildPrintStringInput) []string {
var strings []string
buildTemplate := template.New("")
buildTemplate.Funcs(template.FuncMap{
"unixtime": func(t time.Time) int64 { return t.Unix() },
"deref": func(v *float64) float64 { return *v },
"getvalue": func(datapoint *cloudwatch.Datapoint, params *cloudwatch.GetMetricStatisticsInput, statIndex int) *float64 {
r := reflect.Indirect(reflect.ValueOf(datapoint))
f := r.FieldByName(*params.Statistics[statIndex]).Interface().(*float64)
return f
},
})
template.Must(buildTemplate.Parse(input.TemplateString))
for i := range input.Datapoints {
datapoint := input.Datapoints[i]
buffer := new(bytes.Buffer)
buildTemplate.Execute(buffer, TemplateInput{
Params: input.Params,
Datapoint: datapoint,
})
strings = append(strings, buffer.String())
}
return strings
}
|
/*
Copyright (c) 2016 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package replication
import (
"bytes"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"strings"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
"github.com/vmware/harbor/models"
"github.com/vmware/harbor/utils/log"
"github.com/vmware/harbor/utils/registry"
"github.com/vmware/harbor/utils/registry/auth"
)
const (
// StateCheck ...
StateCheck = "check"
// StatePullManifest ...
StatePullManifest = "pull_manifest"
// StateTransferBlob ...
StateTransferBlob = "transfer_blob"
// StatePushManifest ...
StatePushManifest = "push_manifest"
)
var (
// ErrConflict represents http 409 error
ErrConflict = errors.New("conflict")
)
// BaseHandler holds informations shared by other state handlers
type BaseHandler struct {
project string // project_name
repository string // prject_name/repo_name
tags []string
srcURL string // url of source registry
dstURL string // url of target registry
dstUsr string // username ...
dstPwd string // password ...
insecure bool // whether skip secure check when using https
srcClient *registry.Repository
dstClient *registry.Repository
manifest distribution.Manifest // manifest of tags[0]
digest string //digest of tags[0]'s manifest
blobs []string // blobs need to be transferred for tags[0]
blobsExistence map[string]bool //key: digest of blob, value: existence
logger *log.Logger
}
// InitBaseHandler initializes a BaseHandler: creating clients for source and destination registry,
// listing tags of the repository if parameter tags is nil.
func InitBaseHandler(repository, srcURL, srcSecret,
dstURL, dstUsr, dstPwd string, insecure bool, tags []string, logger *log.Logger) (*BaseHandler, error) {
logger.Infof("initializing: repository: %s, tags: %v, source URL: %s, destination URL: %s, insecure: %v, destination user: %s",
repository, tags, srcURL, dstURL, insecure, dstUsr)
base := &BaseHandler{
repository: repository,
tags: tags,
srcURL: srcURL,
dstURL: dstURL,
dstUsr: dstUsr,
dstPwd: dstPwd,
insecure: insecure,
blobsExistence: make(map[string]bool, 10),
logger: logger,
}
base.project = getProjectName(base.repository)
c := &http.Cookie{Name: models.UISecretCookie, Value: srcSecret}
srcCred := auth.NewCookieCredential(c)
// srcCred := auth.NewBasicAuthCredential("admin", "Harbor12345")
srcClient, err := newRepositoryClient(base.srcURL, base.insecure, srcCred,
base.repository, "repository", base.repository, "pull", "push", "*")
if err != nil {
base.logger.Errorf("an error occurred while creating source repository client: %v", err)
return nil, err
}
base.srcClient = srcClient
dstCred := auth.NewBasicAuthCredential(base.dstUsr, base.dstPwd)
dstClient, err := newRepositoryClient(base.dstURL, base.insecure, dstCred,
base.repository, "repository", base.repository, "pull", "push", "*")
if err != nil {
base.logger.Errorf("an error occurred while creating destination repository client: %v", err)
return nil, err
}
base.dstClient = dstClient
if len(base.tags) == 0 {
tags, err := base.srcClient.ListTag()
if err != nil {
base.logger.Errorf("an error occurred while listing tags for source repository: %v", err)
return nil, err
}
base.tags = tags
}
base.logger.Infof("initialization completed: project: %s, repository: %s, tags: %v, source URL: %s, destination URL: %s, insecure: %v, destination user: %s",
base.project, base.repository, base.tags, base.srcURL, base.dstURL, base.insecure, base.dstUsr)
return base, nil
}
// Exit ...
func (b *BaseHandler) Exit() error {
return nil
}
func getProjectName(repository string) string {
repository = strings.TrimSpace(repository)
repository = strings.TrimRight(repository, "/")
return repository[:strings.LastIndex(repository, "/")]
}
// Checker checks the existence of project and the user's privlege to the project
type Checker struct {
*BaseHandler
}
// Enter check existence of project, if it does not exist, create it,
// if it exists, check whether the user has write privilege to it.
func (c *Checker) Enter() (string, error) {
enter:
exist, canWrite, err := c.projectExist()
if err != nil {
c.logger.Errorf("an error occurred while checking existence of project %s on %s with user %s : %v", c.project, c.dstURL, c.dstUsr, err)
return "", err
}
if !exist {
err := c.createProject()
if err != nil {
// other job may be also doing the same thing when the current job
// is creating project, so when the response code is 409, re-check
// the existence of project
if err == ErrConflict {
goto enter
} else {
c.logger.Errorf("an error occurred while creating project %s on %s with user %s : %v", c.project, c.dstURL, c.dstUsr, err)
return "", err
}
}
c.logger.Infof("project %s is created on %s with user %s", c.project, c.dstURL, c.dstUsr)
return StatePullManifest, nil
}
c.logger.Infof("project %s already exists on %s", c.project, c.dstURL)
if !canWrite {
err = fmt.Errorf("the user %s is unauthorized to write to project %s on %s", c.dstUsr, c.project, c.dstURL)
c.logger.Errorf("%v", err)
return "", err
}
c.logger.Infof("the user %s has write privilege to project %s on %s", c.dstUsr, c.project, c.dstURL)
return StatePullManifest, nil
}
// check the existence of project, if it exists, returning whether the user has write privilege to it
func (c *Checker) projectExist() (exist, canWrite bool, err error) {
url := strings.TrimRight(c.dstURL, "/") + "/api/projects/?project_name=" + c.project
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return
}
req.SetBasicAuth(c.dstUsr, c.dstPwd)
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: c.insecure,
},
},
}
resp, err := client.Do(req)
if err != nil {
return
}
if resp.StatusCode == http.StatusNotFound {
return
}
if resp.StatusCode == http.StatusUnauthorized {
exist = true
return
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return
}
if resp.StatusCode == http.StatusOK {
projects := make([]models.Project, 0)
if err = json.Unmarshal(data, &projects); err != nil {
return
}
if len(projects) == 0 {
return
}
for _, project := range projects {
if project.Name == c.project {
exist = true
canWrite = (project.Role == models.PROJECTADMIN ||
project.Role == models.DEVELOPER)
break
}
}
return
}
err = fmt.Errorf("an error occurred while checking existen of project %s on %s with user %s: %d %s",
c.project, c.dstURL, c.dstUsr, resp.StatusCode, string(data))
return
}
func (c *Checker) createProject() error {
// TODO handle publicity of project
project := struct {
ProjectName string `json:"project_name"`
Public bool `json:"public"`
}{
ProjectName: c.project,
}
data, err := json.Marshal(project)
if err != nil {
return err
}
url := strings.TrimRight(c.dstURL, "/") + "/api/projects/"
req, err := http.NewRequest("POST", url, bytes.NewReader(data))
if err != nil {
return err
}
req.SetBasicAuth(c.dstUsr, c.dstPwd)
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: c.insecure,
},
},
}
resp, err := client.Do(req)
if err != nil {
return err
}
// version 0.1.1's reponse code is 200
if resp.StatusCode == http.StatusCreated ||
resp.StatusCode == http.StatusOK {
return nil
}
if resp.StatusCode == http.StatusConflict {
return ErrConflict
}
defer resp.Body.Close()
message, err := ioutil.ReadAll(resp.Body)
if err != nil {
c.logger.Errorf("an error occurred while reading message from response: %v", err)
}
return fmt.Errorf("failed to create project %s on %s with user %s: %d %s",
c.project, c.dstURL, c.dstUsr, resp.StatusCode, string(message))
}
// ManifestPuller pulls the manifest of a tag. And if no tag needs to be pulled,
// the next state that state machine should enter is "finished".
type ManifestPuller struct {
*BaseHandler
}
// Enter pulls manifest of a tag and checks if all blobs exist in the destination registry
func (m *ManifestPuller) Enter() (string, error) {
if len(m.tags) == 0 {
m.logger.Infof("no tag needs to be replicated, next state is \"finished\"")
return models.JobFinished, nil
}
name := m.repository
tag := m.tags[0]
acceptMediaTypes := []string{schema1.MediaTypeManifest, schema2.MediaTypeManifest}
digest, mediaType, payload, err := m.srcClient.PullManifest(tag, acceptMediaTypes)
if err != nil {
m.logger.Errorf("an error occurred while pulling manifest of %s:%s from %s: %v", name, tag, m.srcURL, err)
return "", err
}
m.digest = digest
m.logger.Infof("manifest of %s:%s pulled successfully from %s: %s", name, tag, m.srcURL, digest)
if strings.Contains(mediaType, "application/json") {
mediaType = schema1.MediaTypeManifest
}
manifest, _, err := registry.UnMarshal(mediaType, payload)
if err != nil {
m.logger.Errorf("an error occurred while parsing manifest of %s:%s from %s: %v", name, tag, m.srcURL, err)
return "", err
}
m.manifest = manifest
// all blobs(layers and config)
var blobs []string
for _, discriptor := range manifest.References() {
blobs = append(blobs, discriptor.Digest.String())
}
// config is also need to be transferred if the schema of manifest is v2
manifest2, ok := manifest.(*schema2.DeserializedManifest)
if ok {
blobs = append(blobs, manifest2.Target().Digest.String())
}
m.logger.Infof("all blobs of %s:%s from %s: %v", name, tag, m.srcURL, blobs)
for _, blob := range blobs {
exist, ok := m.blobsExistence[blob]
if !ok {
exist, err = m.dstClient.BlobExist(blob)
if err != nil {
m.logger.Errorf("an error occurred while checking existence of blob %s of %s:%s on %s: %v", blob, name, tag, m.dstURL, err)
return "", err
}
m.blobsExistence[blob] = exist
}
if !exist {
m.blobs = append(m.blobs, blob)
} else {
m.logger.Infof("blob %s of %s:%s already exists in %s", blob, name, tag, m.dstURL)
}
}
m.logger.Infof("blobs of %s:%s need to be transferred to %s: %v", name, tag, m.dstURL, m.blobs)
return StateTransferBlob, nil
}
// BlobTransfer transfers blobs of a tag
type BlobTransfer struct {
*BaseHandler
}
// Enter pulls blobs and then pushs them to destination registry.
func (b *BlobTransfer) Enter() (string, error) {
name := b.repository
tag := b.tags[0]
for _, blob := range b.blobs {
b.logger.Infof("transferring blob %s of %s:%s to %s ...", blob, name, tag, b.dstURL)
size, data, err := b.srcClient.PullBlob(blob)
if err != nil {
b.logger.Errorf("an error occurred while pulling blob %s of %s:%s from %s: %v", blob, name, tag, b.srcURL, err)
return "", err
}
if err = b.dstClient.PushBlob(blob, size, data); err != nil {
b.logger.Errorf("an error occurred while pushing blob %s of %s:%s to %s : %v", blob, name, tag, b.dstURL, err)
return "", err
}
b.logger.Infof("blob %s of %s:%s transferred to %s completed", blob, name, tag, b.dstURL)
}
return StatePushManifest, nil
}
// ManifestPusher pushs the manifest to destination registry
type ManifestPusher struct {
*BaseHandler
}
// Enter checks the existence of manifest in the source registry first, and if it
// exists, pushs it to destination registry. The checking operation is to avoid
// the situation that the tag is deleted during the blobs transfering
func (m *ManifestPusher) Enter() (string, error) {
name := m.repository
tag := m.tags[0]
_, exist, err := m.srcClient.ManifestExist(tag)
if err != nil {
m.logger.Infof("an error occurred while checking the existence of manifest of %s:%s on %s: %v", name, tag, m.srcURL, err)
return "", err
}
if !exist {
m.logger.Infof("manifest of %s:%s does not exist on source registry %s, cancel manifest pushing", name, tag, m.srcURL)
} else {
m.logger.Infof("manifest of %s:%s exists on source registry %s, continue manifest pushing", name, tag, m.srcURL)
_, manifestExist, err := m.dstClient.ManifestExist(m.digest)
if manifestExist {
m.logger.Infof("manifest of %s:%s exists on destination registry %s, skip manifest pushing", name, tag, m.dstURL)
m.tags = m.tags[1:]
m.manifest = nil
m.digest = ""
m.blobs = nil
return StatePullManifest, nil
}
mediaType, data, err := m.manifest.Payload()
if err != nil {
m.logger.Errorf("an error occurred while getting payload of manifest for %s:%s : %v", name, tag, err)
return "", err
}
if _, err = m.dstClient.PushManifest(tag, mediaType, data); err != nil {
m.logger.Errorf("an error occurred while pushing manifest of %s:%s to %s : %v", name, tag, m.dstURL, err)
return "", err
}
m.logger.Infof("manifest of %s:%s has been pushed to %s", name, tag, m.dstURL)
}
m.tags = m.tags[1:]
m.manifest = nil
m.digest = ""
m.blobs = nil
return StatePullManifest, nil
}
func newRepositoryClient(endpoint string, insecure bool, credential auth.Credential, repository, scopeType, scopeName string,
scopeActions ...string) (*registry.Repository, error) {
authorizer := auth.NewStandardTokenAuthorizer(credential, insecure, scopeType, scopeName, scopeActions...)
store, err := auth.NewAuthorizerStore(endpoint, insecure, authorizer)
if err != nil {
return nil, err
}
uam := &userAgentModifier{
userAgent: "harbor-registry-client",
}
client, err := registry.NewRepositoryWithModifiers(repository, endpoint, insecure, store, uam)
if err != nil {
return nil, err
}
return client, nil
}
type userAgentModifier struct {
userAgent string
}
// Modify adds user-agent header to the request
func (u *userAgentModifier) Modify(req *http.Request) error {
req.Header.Set(http.CanonicalHeaderKey("User-Agent"), u.userAgent)
return nil
}
pass golint
/*
Copyright (c) 2016 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package replication
import (
"bytes"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"strings"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
"github.com/vmware/harbor/models"
"github.com/vmware/harbor/utils/log"
"github.com/vmware/harbor/utils/registry"
"github.com/vmware/harbor/utils/registry/auth"
)
const (
// StateCheck ...
StateCheck = "check"
// StatePullManifest ...
StatePullManifest = "pull_manifest"
// StateTransferBlob ...
StateTransferBlob = "transfer_blob"
// StatePushManifest ...
StatePushManifest = "push_manifest"
)
var (
// ErrConflict represents http 409 error
ErrConflict = errors.New("conflict")
)
// BaseHandler holds informations shared by other state handlers
type BaseHandler struct {
project string // project_name
repository string // prject_name/repo_name
tags []string
srcURL string // url of source registry
dstURL string // url of target registry
dstUsr string // username ...
dstPwd string // password ...
insecure bool // whether skip secure check when using https
srcClient *registry.Repository
dstClient *registry.Repository
manifest distribution.Manifest // manifest of tags[0]
digest string //digest of tags[0]'s manifest
blobs []string // blobs need to be transferred for tags[0]
blobsExistence map[string]bool //key: digest of blob, value: existence
logger *log.Logger
}
// InitBaseHandler initializes a BaseHandler: creating clients for source and destination registry,
// listing tags of the repository if parameter tags is nil.
func InitBaseHandler(repository, srcURL, srcSecret,
dstURL, dstUsr, dstPwd string, insecure bool, tags []string, logger *log.Logger) (*BaseHandler, error) {
logger.Infof("initializing: repository: %s, tags: %v, source URL: %s, destination URL: %s, insecure: %v, destination user: %s",
repository, tags, srcURL, dstURL, insecure, dstUsr)
base := &BaseHandler{
repository: repository,
tags: tags,
srcURL: srcURL,
dstURL: dstURL,
dstUsr: dstUsr,
dstPwd: dstPwd,
insecure: insecure,
blobsExistence: make(map[string]bool, 10),
logger: logger,
}
base.project = getProjectName(base.repository)
c := &http.Cookie{Name: models.UISecretCookie, Value: srcSecret}
srcCred := auth.NewCookieCredential(c)
// srcCred := auth.NewBasicAuthCredential("admin", "Harbor12345")
srcClient, err := newRepositoryClient(base.srcURL, base.insecure, srcCred,
base.repository, "repository", base.repository, "pull", "push", "*")
if err != nil {
base.logger.Errorf("an error occurred while creating source repository client: %v", err)
return nil, err
}
base.srcClient = srcClient
dstCred := auth.NewBasicAuthCredential(base.dstUsr, base.dstPwd)
dstClient, err := newRepositoryClient(base.dstURL, base.insecure, dstCred,
base.repository, "repository", base.repository, "pull", "push", "*")
if err != nil {
base.logger.Errorf("an error occurred while creating destination repository client: %v", err)
return nil, err
}
base.dstClient = dstClient
if len(base.tags) == 0 {
tags, err := base.srcClient.ListTag()
if err != nil {
base.logger.Errorf("an error occurred while listing tags for source repository: %v", err)
return nil, err
}
base.tags = tags
}
base.logger.Infof("initialization completed: project: %s, repository: %s, tags: %v, source URL: %s, destination URL: %s, insecure: %v, destination user: %s",
base.project, base.repository, base.tags, base.srcURL, base.dstURL, base.insecure, base.dstUsr)
return base, nil
}
// Exit ...
func (b *BaseHandler) Exit() error {
return nil
}
func getProjectName(repository string) string {
repository = strings.TrimSpace(repository)
repository = strings.TrimRight(repository, "/")
return repository[:strings.LastIndex(repository, "/")]
}
// Checker checks the existence of project and the user's privlege to the project
type Checker struct {
*BaseHandler
}
// Enter check existence of project, if it does not exist, create it,
// if it exists, check whether the user has write privilege to it.
func (c *Checker) Enter() (string, error) {
enter:
exist, canWrite, err := c.projectExist()
if err != nil {
c.logger.Errorf("an error occurred while checking existence of project %s on %s with user %s : %v", c.project, c.dstURL, c.dstUsr, err)
return "", err
}
if !exist {
err := c.createProject()
if err != nil {
// other job may be also doing the same thing when the current job
// is creating project, so when the response code is 409, re-check
// the existence of project
if err == ErrConflict {
goto enter
} else {
c.logger.Errorf("an error occurred while creating project %s on %s with user %s : %v", c.project, c.dstURL, c.dstUsr, err)
return "", err
}
}
c.logger.Infof("project %s is created on %s with user %s", c.project, c.dstURL, c.dstUsr)
return StatePullManifest, nil
}
c.logger.Infof("project %s already exists on %s", c.project, c.dstURL)
if !canWrite {
err = fmt.Errorf("the user %s is unauthorized to write to project %s on %s", c.dstUsr, c.project, c.dstURL)
c.logger.Errorf("%v", err)
return "", err
}
c.logger.Infof("the user %s has write privilege to project %s on %s", c.dstUsr, c.project, c.dstURL)
return StatePullManifest, nil
}
// check the existence of project, if it exists, returning whether the user has write privilege to it
func (c *Checker) projectExist() (exist, canWrite bool, err error) {
url := strings.TrimRight(c.dstURL, "/") + "/api/projects/?project_name=" + c.project
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return
}
req.SetBasicAuth(c.dstUsr, c.dstPwd)
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: c.insecure,
},
},
}
resp, err := client.Do(req)
if err != nil {
return
}
if resp.StatusCode == http.StatusNotFound {
return
}
if resp.StatusCode == http.StatusUnauthorized {
exist = true
return
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return
}
if resp.StatusCode == http.StatusOK {
var projects []models.Project
if err = json.Unmarshal(data, &projects); err != nil {
return
}
if len(projects) == 0 {
return
}
for _, project := range projects {
if project.Name == c.project {
exist = true
canWrite = (project.Role == models.PROJECTADMIN ||
project.Role == models.DEVELOPER)
break
}
}
return
}
err = fmt.Errorf("an error occurred while checking existen of project %s on %s with user %s: %d %s",
c.project, c.dstURL, c.dstUsr, resp.StatusCode, string(data))
return
}
func (c *Checker) createProject() error {
// TODO handle publicity of project
project := struct {
ProjectName string `json:"project_name"`
Public bool `json:"public"`
}{
ProjectName: c.project,
}
data, err := json.Marshal(project)
if err != nil {
return err
}
url := strings.TrimRight(c.dstURL, "/") + "/api/projects/"
req, err := http.NewRequest("POST", url, bytes.NewReader(data))
if err != nil {
return err
}
req.SetBasicAuth(c.dstUsr, c.dstPwd)
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: c.insecure,
},
},
}
resp, err := client.Do(req)
if err != nil {
return err
}
// version 0.1.1's reponse code is 200
if resp.StatusCode == http.StatusCreated ||
resp.StatusCode == http.StatusOK {
return nil
}
if resp.StatusCode == http.StatusConflict {
return ErrConflict
}
defer resp.Body.Close()
message, err := ioutil.ReadAll(resp.Body)
if err != nil {
c.logger.Errorf("an error occurred while reading message from response: %v", err)
}
return fmt.Errorf("failed to create project %s on %s with user %s: %d %s",
c.project, c.dstURL, c.dstUsr, resp.StatusCode, string(message))
}
// ManifestPuller pulls the manifest of a tag. And if no tag needs to be pulled,
// the next state that state machine should enter is "finished".
type ManifestPuller struct {
*BaseHandler
}
// Enter pulls manifest of a tag and checks if all blobs exist in the destination registry
func (m *ManifestPuller) Enter() (string, error) {
if len(m.tags) == 0 {
m.logger.Infof("no tag needs to be replicated, next state is \"finished\"")
return models.JobFinished, nil
}
name := m.repository
tag := m.tags[0]
acceptMediaTypes := []string{schema1.MediaTypeManifest, schema2.MediaTypeManifest}
digest, mediaType, payload, err := m.srcClient.PullManifest(tag, acceptMediaTypes)
if err != nil {
m.logger.Errorf("an error occurred while pulling manifest of %s:%s from %s: %v", name, tag, m.srcURL, err)
return "", err
}
m.digest = digest
m.logger.Infof("manifest of %s:%s pulled successfully from %s: %s", name, tag, m.srcURL, digest)
if strings.Contains(mediaType, "application/json") {
mediaType = schema1.MediaTypeManifest
}
manifest, _, err := registry.UnMarshal(mediaType, payload)
if err != nil {
m.logger.Errorf("an error occurred while parsing manifest of %s:%s from %s: %v", name, tag, m.srcURL, err)
return "", err
}
m.manifest = manifest
// all blobs(layers and config)
var blobs []string
for _, discriptor := range manifest.References() {
blobs = append(blobs, discriptor.Digest.String())
}
// config is also need to be transferred if the schema of manifest is v2
manifest2, ok := manifest.(*schema2.DeserializedManifest)
if ok {
blobs = append(blobs, manifest2.Target().Digest.String())
}
m.logger.Infof("all blobs of %s:%s from %s: %v", name, tag, m.srcURL, blobs)
for _, blob := range blobs {
exist, ok := m.blobsExistence[blob]
if !ok {
exist, err = m.dstClient.BlobExist(blob)
if err != nil {
m.logger.Errorf("an error occurred while checking existence of blob %s of %s:%s on %s: %v", blob, name, tag, m.dstURL, err)
return "", err
}
m.blobsExistence[blob] = exist
}
if !exist {
m.blobs = append(m.blobs, blob)
} else {
m.logger.Infof("blob %s of %s:%s already exists in %s", blob, name, tag, m.dstURL)
}
}
m.logger.Infof("blobs of %s:%s need to be transferred to %s: %v", name, tag, m.dstURL, m.blobs)
return StateTransferBlob, nil
}
// BlobTransfer transfers blobs of a tag
type BlobTransfer struct {
*BaseHandler
}
// Enter pulls blobs and then pushs them to destination registry.
func (b *BlobTransfer) Enter() (string, error) {
name := b.repository
tag := b.tags[0]
for _, blob := range b.blobs {
b.logger.Infof("transferring blob %s of %s:%s to %s ...", blob, name, tag, b.dstURL)
size, data, err := b.srcClient.PullBlob(blob)
if err != nil {
b.logger.Errorf("an error occurred while pulling blob %s of %s:%s from %s: %v", blob, name, tag, b.srcURL, err)
return "", err
}
if err = b.dstClient.PushBlob(blob, size, data); err != nil {
b.logger.Errorf("an error occurred while pushing blob %s of %s:%s to %s : %v", blob, name, tag, b.dstURL, err)
return "", err
}
b.logger.Infof("blob %s of %s:%s transferred to %s completed", blob, name, tag, b.dstURL)
}
return StatePushManifest, nil
}
// ManifestPusher pushs the manifest to destination registry
type ManifestPusher struct {
*BaseHandler
}
// Enter checks the existence of manifest in the source registry first, and if it
// exists, pushs it to destination registry. The checking operation is to avoid
// the situation that the tag is deleted during the blobs transfering
func (m *ManifestPusher) Enter() (string, error) {
name := m.repository
tag := m.tags[0]
_, exist, err := m.srcClient.ManifestExist(tag)
if err != nil {
m.logger.Infof("an error occurred while checking the existence of manifest of %s:%s on %s: %v", name, tag, m.srcURL, err)
return "", err
}
if !exist {
m.logger.Infof("manifest of %s:%s does not exist on source registry %s, cancel manifest pushing", name, tag, m.srcURL)
} else {
m.logger.Infof("manifest of %s:%s exists on source registry %s, continue manifest pushing", name, tag, m.srcURL)
_, manifestExist, err := m.dstClient.ManifestExist(m.digest)
if manifestExist {
m.logger.Infof("manifest of %s:%s exists on destination registry %s, skip manifest pushing", name, tag, m.dstURL)
m.tags = m.tags[1:]
m.manifest = nil
m.digest = ""
m.blobs = nil
return StatePullManifest, nil
}
mediaType, data, err := m.manifest.Payload()
if err != nil {
m.logger.Errorf("an error occurred while getting payload of manifest for %s:%s : %v", name, tag, err)
return "", err
}
if _, err = m.dstClient.PushManifest(tag, mediaType, data); err != nil {
m.logger.Errorf("an error occurred while pushing manifest of %s:%s to %s : %v", name, tag, m.dstURL, err)
return "", err
}
m.logger.Infof("manifest of %s:%s has been pushed to %s", name, tag, m.dstURL)
}
m.tags = m.tags[1:]
m.manifest = nil
m.digest = ""
m.blobs = nil
return StatePullManifest, nil
}
func newRepositoryClient(endpoint string, insecure bool, credential auth.Credential, repository, scopeType, scopeName string,
scopeActions ...string) (*registry.Repository, error) {
authorizer := auth.NewStandardTokenAuthorizer(credential, insecure, scopeType, scopeName, scopeActions...)
store, err := auth.NewAuthorizerStore(endpoint, insecure, authorizer)
if err != nil {
return nil, err
}
uam := &userAgentModifier{
userAgent: "harbor-registry-client",
}
client, err := registry.NewRepositoryWithModifiers(repository, endpoint, insecure, store, uam)
if err != nil {
return nil, err
}
return client, nil
}
type userAgentModifier struct {
userAgent string
}
// Modify adds user-agent header to the request
func (u *userAgentModifier) Modify(req *http.Request) error {
req.Header.Set(http.CanonicalHeaderKey("User-Agent"), u.userAgent)
return nil
}
|
// package main provides the monstache binary
package main
import (
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"flag"
"fmt"
"github.com/BurntSushi/toml"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/coreos/go-systemd/daemon"
"github.com/evanphx/json-patch"
"github.com/olivere/elastic/v7"
aws "github.com/olivere/elastic/v7/aws/v4"
"github.com/robertkrimen/otto"
_ "github.com/robertkrimen/otto/underscore"
"github.com/rwynn/gtm"
"github.com/rwynn/gtm/consistent"
"github.com/rwynn/monstache/monstachemap"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/bsontype"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/gridfs"
"go.mongodb.org/mongo-driver/mongo/options"
"gopkg.in/Graylog2/go-gelf.v2/gelf"
"gopkg.in/natefinch/lumberjack.v2"
"io/ioutil"
"log"
"math"
"net/http"
"net/http/pprof"
"os"
"os/signal"
"plugin"
"reflect"
"regexp"
"strconv"
"strings"
"sync"
"syscall"
"text/template"
"time"
)
var infoLog = log.New(os.Stdout, "INFO ", log.Flags())
var warnLog = log.New(os.Stdout, "WARN ", log.Flags())
var statsLog = log.New(os.Stdout, "STATS ", log.Flags())
var traceLog = log.New(os.Stdout, "TRACE ", log.Flags())
var errorLog = log.New(os.Stderr, "ERROR ", log.Flags())
var mapperPlugin func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error)
var filterPlugin func(*monstachemap.MapperPluginInput) (bool, error)
var processPlugin func(*monstachemap.ProcessPluginInput) error
var pipePlugin func(string, bool) ([]interface{}, error)
var mapEnvs map[string]*executionEnv = make(map[string]*executionEnv)
var filterEnvs map[string]*executionEnv = make(map[string]*executionEnv)
var pipeEnvs map[string]*executionEnv = make(map[string]*executionEnv)
var mapIndexTypes map[string]*indexMapping = make(map[string]*indexMapping)
var relates map[string][]*relation = make(map[string][]*relation)
var fileNamespaces map[string]bool = make(map[string]bool)
var patchNamespaces map[string]bool = make(map[string]bool)
var tmNamespaces map[string]bool = make(map[string]bool)
var routingNamespaces map[string]bool = make(map[string]bool)
var mux sync.Mutex
var chunksRegex = regexp.MustCompile("\\.chunks$")
var systemsRegex = regexp.MustCompile("system\\..+$")
var exitStatus = 0
const version = "6.0.6"
const mongoURLDefault string = "mongodb://localhost:27017"
const resumeNameDefault string = "default"
const elasticMaxConnsDefault int = 4
const elasticClientTimeoutDefault int = 0
const elasticMaxDocsDefault int = -1
const elasticMaxBytesDefault int = 8 * 1024 * 1024
const gtmChannelSizeDefault int = 512
const fileDownloadersDefault = 10
const relateThreadsDefault = 10
const relateBufferDefault = 1000
const postProcessorsDefault = 10
const redact = "REDACTED"
const configDatabaseNameDefault = "monstache"
const relateQueueOverloadMsg = "Relate queue is full. Skipping relate for %v.(%v) to keep pipeline healthy."
type deleteStrategy int
const (
statelessDeleteStrategy deleteStrategy = iota
statefulDeleteStrategy
ignoreDeleteStrategy
)
type buildInfo struct {
Version string
}
type stringargs []string
type indexClient struct {
gtmCtx *gtm.OpCtxMulti
config *configOptions
mongo *mongo.Client
mongoConfig *mongo.Client
bulk *elastic.BulkProcessor
bulkStats *elastic.BulkProcessor
client *elastic.Client
hsc *httpServerCtx
fileWg *sync.WaitGroup
indexWg *sync.WaitGroup
processWg *sync.WaitGroup
relateWg *sync.WaitGroup
opsConsumed chan bool
closeC chan bool
doneC chan int
enabled bool
lastTs primitive.Timestamp
lastTsSaved primitive.Timestamp
indexC chan *gtm.Op
processC chan *gtm.Op
fileC chan *gtm.Op
relateC chan *gtm.Op
filter gtm.OpFilter
}
type awsConnect struct {
AccessKey string `toml:"access-key"`
SecretKey string `toml:"secret-key"`
Region string
}
type executionEnv struct {
VM *otto.Otto
Script string
lock *sync.Mutex
}
type javascript struct {
Namespace string
Script string
Path string
Routing bool
}
type relation struct {
Namespace string
WithNamespace string `toml:"with-namespace"`
SrcField string `toml:"src-field"`
MatchField string `toml:"match-field"`
KeepSrc bool `toml:"keep-src"`
MaxDepth int `toml:"max-depth"`
db string
col string
}
type indexMapping struct {
Namespace string
Index string
}
type findConf struct {
vm *otto.Otto
ns string
name string
client *mongo.Client
byId bool
multi bool
pipe bool
pipeAllowDisk bool
}
type findCall struct {
config *findConf
client *mongo.Client
query interface{}
db string
col string
limit int
sort map[string]int
sel map[string]int
}
type logFiles struct {
Info string
Warn string
Error string
Trace string
Stats string
}
type indexingMeta struct {
Routing string
Index string
Type string
Parent string
Version int64
VersionType string
Pipeline string
RetryOnConflict int
Skip bool
ID string
}
type gtmSettings struct {
ChannelSize int `toml:"channel-size"`
BufferSize int `toml:"buffer-size"`
BufferDuration string `toml:"buffer-duration"`
}
type httpServerCtx struct {
httpServer *http.Server
bulk *elastic.BulkProcessor
config *configOptions
shutdown bool
started time.Time
}
type configOptions struct {
EnableTemplate bool
EnvDelimiter string
MongoURL string `toml:"mongo-url"`
MongoConfigURL string `toml:"mongo-config-url"`
MongoOpLogDatabaseName string `toml:"mongo-oplog-database-name"`
MongoOpLogCollectionName string `toml:"mongo-oplog-collection-name"`
GtmSettings gtmSettings `toml:"gtm-settings"`
AWSConnect awsConnect `toml:"aws-connect"`
Logs logFiles `toml:"logs"`
GraylogAddr string `toml:"graylog-addr"`
ElasticUrls stringargs `toml:"elasticsearch-urls"`
ElasticUser string `toml:"elasticsearch-user"`
ElasticPassword string `toml:"elasticsearch-password"`
ElasticPemFile string `toml:"elasticsearch-pem-file"`
ElasticValidatePemFile bool `toml:"elasticsearch-validate-pem-file"`
ElasticVersion string `toml:"elasticsearch-version"`
ElasticHealth0 int `toml:"elasticsearch-healthcheck-timeout-startup"`
ElasticHealth1 int `toml:"elasticsearch-healthcheck-timeout"`
ResumeName string `toml:"resume-name"`
NsRegex string `toml:"namespace-regex"`
NsDropRegex string `toml:"namespace-drop-regex"`
NsExcludeRegex string `toml:"namespace-exclude-regex"`
NsDropExcludeRegex string `toml:"namespace-drop-exclude-regex"`
ClusterName string `toml:"cluster-name"`
Print bool `toml:"print-config"`
Version bool
Pprof bool
EnableOplog bool `toml:"enable-oplog"`
DisableChangeEvents bool `toml:"disable-change-events"`
EnableEasyJSON bool `toml:"enable-easy-json"`
Stats bool
IndexStats bool `toml:"index-stats"`
StatsDuration string `toml:"stats-duration"`
StatsIndexFormat string `toml:"stats-index-format"`
Gzip bool
Verbose bool
Resume bool
ResumeWriteUnsafe bool `toml:"resume-write-unsafe"`
ResumeFromTimestamp int64 `toml:"resume-from-timestamp"`
Replay bool
DroppedDatabases bool `toml:"dropped-databases"`
DroppedCollections bool `toml:"dropped-collections"`
IndexFiles bool `toml:"index-files"`
IndexAsUpdate bool `toml:"index-as-update"`
FileHighlighting bool `toml:"file-highlighting"`
EnablePatches bool `toml:"enable-patches"`
FailFast bool `toml:"fail-fast"`
IndexOplogTime bool `toml:"index-oplog-time"`
OplogTsFieldName string `toml:"oplog-ts-field-name"`
OplogDateFieldName string `toml:"oplog-date-field-name"`
OplogDateFieldFormat string `toml:"oplog-date-field-format"`
ExitAfterDirectReads bool `toml:"exit-after-direct-reads"`
MergePatchAttr string `toml:"merge-patch-attribute"`
ElasticMaxConns int `toml:"elasticsearch-max-conns"`
ElasticRetry bool `toml:"elasticsearch-retry"`
ElasticMaxDocs int `toml:"elasticsearch-max-docs"`
ElasticMaxBytes int `toml:"elasticsearch-max-bytes"`
ElasticMaxSeconds int `toml:"elasticsearch-max-seconds"`
ElasticClientTimeout int `toml:"elasticsearch-client-timeout"`
ElasticMajorVersion int
ElasticMinorVersion int
MaxFileSize int64 `toml:"max-file-size"`
ConfigFile string
Script []javascript
Filter []javascript
Pipeline []javascript
Mapping []indexMapping
Relate []relation
FileNamespaces stringargs `toml:"file-namespaces"`
PatchNamespaces stringargs `toml:"patch-namespaces"`
Workers stringargs
Worker string
ChangeStreamNs stringargs `toml:"change-stream-namespaces"`
DirectReadNs stringargs `toml:"direct-read-namespaces"`
DirectReadSplitMax int `toml:"direct-read-split-max"`
DirectReadConcur int `toml:"direct-read-concur"`
MapperPluginPath string `toml:"mapper-plugin-path"`
EnableHTTPServer bool `toml:"enable-http-server"`
HTTPServerAddr string `toml:"http-server-addr"`
TimeMachineNamespaces stringargs `toml:"time-machine-namespaces"`
TimeMachineIndexPrefix string `toml:"time-machine-index-prefix"`
TimeMachineIndexSuffix string `toml:"time-machine-index-suffix"`
TimeMachineDirectReads bool `toml:"time-machine-direct-reads"`
PipeAllowDisk bool `toml:"pipe-allow-disk"`
RoutingNamespaces stringargs `toml:"routing-namespaces"`
DeleteStrategy deleteStrategy `toml:"delete-strategy"`
DeleteIndexPattern string `toml:"delete-index-pattern"`
ConfigDatabaseName string `toml:"config-database-name"`
FileDownloaders int `toml:"file-downloaders"`
RelateThreads int `toml:"relate-threads"`
RelateBuffer int `toml:"relate-buffer"`
PostProcessors int `toml:"post-processors"`
PruneInvalidJSON bool `toml:"prune-invalid-json"`
Debug bool
mongoClientOptions *options.ClientOptions
}
func (rel *relation) IsIdentity() bool {
if rel.SrcField == "_id" && rel.MatchField == "_id" {
return true
} else {
return false
}
}
func (l *logFiles) enabled() bool {
return l.Info != "" || l.Warn != "" || l.Error != "" || l.Trace != "" || l.Stats != ""
}
func (ac *awsConnect) validate() error {
if ac.AccessKey == "" && ac.SecretKey == "" {
return nil
} else if ac.AccessKey != "" && ac.SecretKey != "" {
return nil
}
return errors.New("AWS connect settings must include both access-key and secret-key")
}
func (ac *awsConnect) enabled() bool {
return ac.AccessKey != "" || ac.SecretKey != ""
}
func (arg *deleteStrategy) String() string {
return fmt.Sprintf("%d", *arg)
}
func (arg *deleteStrategy) Set(value string) (err error) {
var i int
if i, err = strconv.Atoi(value); err != nil {
return
}
ds := deleteStrategy(i)
*arg = ds
return
}
func (args *stringargs) String() string {
return fmt.Sprintf("%s", *args)
}
func (args *stringargs) Set(value string) error {
*args = append(*args, value)
return nil
}
func (config *configOptions) readShards() bool {
return len(config.ChangeStreamNs) == 0 && config.MongoConfigURL != ""
}
func afterBulk(executionId int64, requests []elastic.BulkableRequest, response *elastic.BulkResponse, err error) {
if response != nil && response.Errors {
failed := response.Failed()
if failed != nil {
for _, item := range failed {
json, err := json.Marshal(item)
if err != nil {
errorLog.Printf("Unable to marshal bulk response item: %s", err)
} else {
errorLog.Printf("Bulk response item: %s", string(json))
}
}
}
}
}
func (config *configOptions) parseElasticsearchVersion(number string) (err error) {
if number == "" {
err = errors.New("Elasticsearch version cannot be blank")
} else {
versionParts := strings.Split(number, ".")
var majorVersion, minorVersion int
majorVersion, err = strconv.Atoi(versionParts[0])
if err == nil {
config.ElasticMajorVersion = majorVersion
if majorVersion == 0 {
err = errors.New("Invalid Elasticsearch major version 0")
}
}
if len(versionParts) > 1 {
minorVersion, err = strconv.Atoi(versionParts[1])
if err == nil {
config.ElasticMinorVersion = minorVersion
}
}
}
return
}
func (config *configOptions) newBulkProcessor(client *elastic.Client) (bulk *elastic.BulkProcessor, err error) {
bulkService := client.BulkProcessor().Name("monstache")
bulkService.Workers(config.ElasticMaxConns)
bulkService.Stats(config.Stats)
bulkService.BulkActions(config.ElasticMaxDocs)
bulkService.BulkSize(config.ElasticMaxBytes)
if config.ElasticRetry == false {
bulkService.Backoff(&elastic.StopBackoff{})
}
bulkService.After(afterBulk)
bulkService.FlushInterval(time.Duration(config.ElasticMaxSeconds) * time.Second)
return bulkService.Do(context.Background())
}
func (config *configOptions) newStatsBulkProcessor(client *elastic.Client) (bulk *elastic.BulkProcessor, err error) {
bulkService := client.BulkProcessor().Name("monstache-stats")
bulkService.Workers(1)
bulkService.Stats(false)
bulkService.BulkActions(-1)
bulkService.BulkSize(-1)
bulkService.After(afterBulk)
bulkService.FlushInterval(time.Duration(5) * time.Second)
return bulkService.Do(context.Background())
}
func (config *configOptions) needsSecureScheme() bool {
if len(config.ElasticUrls) > 0 {
for _, url := range config.ElasticUrls {
if strings.HasPrefix(url, "https") {
return true
}
}
}
return false
}
func (config *configOptions) newElasticClient() (client *elastic.Client, err error) {
var clientOptions []elastic.ClientOptionFunc
var httpClient *http.Client
clientOptions = append(clientOptions, elastic.SetSniff(false))
if config.needsSecureScheme() {
clientOptions = append(clientOptions, elastic.SetScheme("https"))
}
if len(config.ElasticUrls) > 0 {
clientOptions = append(clientOptions, elastic.SetURL(config.ElasticUrls...))
} else {
config.ElasticUrls = append(config.ElasticUrls, elastic.DefaultURL)
}
if config.Verbose {
clientOptions = append(clientOptions, elastic.SetTraceLog(traceLog))
clientOptions = append(clientOptions, elastic.SetErrorLog(errorLog))
}
if config.ElasticUser != "" {
clientOptions = append(clientOptions, elastic.SetBasicAuth(config.ElasticUser, config.ElasticPassword))
}
if config.ElasticRetry {
d1, d2 := time.Duration(50)*time.Millisecond, time.Duration(20)*time.Second
retrier := elastic.NewBackoffRetrier(elastic.NewExponentialBackoff(d1, d2))
clientOptions = append(clientOptions, elastic.SetRetrier(retrier))
}
httpClient, err = config.NewHTTPClient()
if err != nil {
return client, err
}
clientOptions = append(clientOptions, elastic.SetHttpClient(httpClient))
clientOptions = append(clientOptions,
elastic.SetHealthcheckTimeoutStartup(time.Duration(config.ElasticHealth0)*time.Second))
clientOptions = append(clientOptions,
elastic.SetHealthcheckTimeout(time.Duration(config.ElasticHealth1)*time.Second))
return elastic.NewClient(clientOptions...)
}
func (config *configOptions) testElasticsearchConn(client *elastic.Client) (err error) {
var number string
url := config.ElasticUrls[0]
number, err = client.ElasticsearchVersion(url)
if err == nil {
infoLog.Printf("Successfully connected to Elasticsearch version %s", number)
err = config.parseElasticsearchVersion(number)
}
return
}
func (ic *indexClient) deleteIndexes(db string) (err error) {
index := strings.ToLower(db + "*")
for ns, m := range mapIndexTypes {
dbCol := strings.SplitN(ns, ".", 2)
if dbCol[0] == db {
if m.Index != "" {
index = strings.ToLower(m.Index + "*")
}
break
}
}
_, err = ic.client.DeleteIndex(index).Do(context.Background())
return
}
func (ic *indexClient) deleteIndex(namespace string) (err error) {
ctx := context.Background()
index := strings.ToLower(namespace)
if m := mapIndexTypes[namespace]; m != nil {
if m.Index != "" {
index = strings.ToLower(m.Index)
}
}
_, err = ic.client.DeleteIndex(index).Do(ctx)
return err
}
func (ic *indexClient) ensureFileMapping() (err error) {
ctx := context.Background()
pipeline := map[string]interface{}{
"description": "Extract file information",
"processors": [1]map[string]interface{}{
{
"attachment": map[string]interface{}{
"field": "file",
},
},
},
}
_, err = ic.client.IngestPutPipeline("attachment").BodyJson(pipeline).Do(ctx)
return err
}
func defaultIndexMapping(config *configOptions, op *gtm.Op) *indexMapping {
return &indexMapping{
Namespace: op.Namespace,
Index: strings.ToLower(op.Namespace),
}
}
func (ic *indexClient) mapIndex(op *gtm.Op) *indexMapping {
config := ic.config
mapping := defaultIndexMapping(config, op)
if m := mapIndexTypes[op.Namespace]; m != nil {
if m.Index != "" {
mapping.Index = m.Index
}
}
return mapping
}
func opIDToString(op *gtm.Op) string {
var opIDStr string
switch op.Id.(type) {
case primitive.ObjectID:
opIDStr = op.Id.(primitive.ObjectID).Hex()
case float64:
intID := int(op.Id.(float64))
if op.Id.(float64) == float64(intID) {
opIDStr = fmt.Sprintf("%v", intID)
} else {
opIDStr = fmt.Sprintf("%v", op.Id)
}
case float32:
intID := int(op.Id.(float32))
if op.Id.(float32) == float32(intID) {
opIDStr = fmt.Sprintf("%v", intID)
} else {
opIDStr = fmt.Sprintf("%v", op.Id)
}
default:
opIDStr = fmt.Sprintf("%v", op.Id)
}
return opIDStr
}
func convertSliceJavascript(a []interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
var avc interface{}
switch achild := av.(type) {
case map[string]interface{}:
avc = convertMapJavascript(achild)
case []interface{}:
avc = convertSliceJavascript(achild)
case primitive.ObjectID:
avc = achild.Hex()
default:
avc = av
}
avs = append(avs, avc)
}
return avs
}
func convertMapJavascript(e map[string]interface{}) map[string]interface{} {
o := make(map[string]interface{})
for k, v := range e {
switch child := v.(type) {
case map[string]interface{}:
o[k] = convertMapJavascript(child)
case []interface{}:
o[k] = convertSliceJavascript(child)
case primitive.ObjectID:
o[k] = child.Hex()
default:
o[k] = v
}
}
return o
}
func fixSlicePruneInvalidJSON(id string, key string, a []interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
var avc interface{}
switch achild := av.(type) {
case map[string]interface{}:
avc = fixPruneInvalidJSON(id, achild)
case []interface{}:
avc = fixSlicePruneInvalidJSON(id, key, achild)
case time.Time:
year := achild.Year()
if year < 0 || year > 9999 {
// year outside of valid range
warnLog.Printf("Dropping key %s element: invalid time.Time value: %s for document _id: %s", key, achild, id)
continue
} else {
avc = av
}
case float64:
if math.IsNaN(achild) {
// causes an error in the json serializer
warnLog.Printf("Dropping key %s element: invalid float64 value: %v for document _id: %s", key, achild, id)
continue
} else if math.IsInf(achild, 0) {
// causes an error in the json serializer
warnLog.Printf("Dropping key %s element: invalid float64 value: %v for document _id: %s", key, achild, id)
continue
} else {
avc = av
}
default:
avc = av
}
avs = append(avs, avc)
}
return avs
}
func fixPruneInvalidJSON(id string, e map[string]interface{}) map[string]interface{} {
o := make(map[string]interface{})
for k, v := range e {
switch child := v.(type) {
case map[string]interface{}:
o[k] = fixPruneInvalidJSON(id, child)
case []interface{}:
o[k] = fixSlicePruneInvalidJSON(id, k, child)
case time.Time:
year := child.Year()
if year < 0 || year > 9999 {
// year outside of valid range
warnLog.Printf("Dropping key %s: invalid time.Time value: %s for document _id: %s", k, child, id)
continue
} else {
o[k] = v
}
case float64:
if math.IsNaN(child) {
// causes an error in the json serializer
warnLog.Printf("Dropping key %s: invalid float64 value: %v for document _id: %s", k, child, id)
continue
} else if math.IsInf(child, 0) {
// causes an error in the json serializer
warnLog.Printf("Dropping key %s: invalid float64 value: %v for document _id: %s", k, child, id)
continue
} else {
o[k] = v
}
default:
o[k] = v
}
}
return o
}
func deepExportValue(a interface{}) (b interface{}) {
switch t := a.(type) {
case otto.Value:
ex, err := t.Export()
if t.Class() == "Date" {
ex, err = time.Parse("Mon, 2 Jan 2006 15:04:05 MST", t.String())
}
if err == nil {
b = deepExportValue(ex)
} else {
errorLog.Printf("Error exporting from javascript: %s", err)
}
case map[string]interface{}:
b = deepExportMap(t)
case []map[string]interface{}:
b = deepExportMapSlice(t)
case []interface{}:
b = deepExportSlice(t)
default:
b = a
}
return
}
func deepExportMapSlice(a []map[string]interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
avs = append(avs, deepExportMap(av))
}
return avs
}
func deepExportSlice(a []interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
avs = append(avs, deepExportValue(av))
}
return avs
}
func deepExportMap(e map[string]interface{}) map[string]interface{} {
o := make(map[string]interface{})
for k, v := range e {
o[k] = deepExportValue(v)
}
return o
}
func mapDataJavascript(op *gtm.Op) error {
names := []string{"", op.Namespace}
for _, name := range names {
env := mapEnvs[name]
if env == nil {
continue
}
env.lock.Lock()
defer env.lock.Unlock()
arg := convertMapJavascript(op.Data)
arg2 := op.Namespace
arg3 := convertMapJavascript(op.UpdateDescription)
val, err := env.VM.Call("module.exports", arg, arg, arg2, arg3)
if err != nil {
return err
}
if strings.ToLower(val.Class()) == "object" {
data, err := val.Export()
if err != nil {
return err
} else if data == val {
return errors.New("Exported function must return an object")
} else {
dm := data.(map[string]interface{})
op.Data = deepExportMap(dm)
}
} else {
indexed, err := val.ToBoolean()
if err != nil {
return err
} else if !indexed {
op.Data = nil
break
}
}
}
return nil
}
func mapDataGolang(client *mongo.Client, op *gtm.Op) error {
input := &monstachemap.MapperPluginInput{
Document: op.Data,
Namespace: op.Namespace,
Database: op.GetDatabase(),
Collection: op.GetCollection(),
Operation: op.Operation,
MongoClient: client,
UpdateDescription: op.UpdateDescription,
}
output, err := mapperPlugin(input)
if err != nil {
return err
}
if output == nil {
return nil
}
if output.Drop {
op.Data = nil
} else {
if output.Skip {
op.Data = map[string]interface{}{}
} else if output.Passthrough == false {
if output.Document == nil {
return errors.New("Map function must return a non-nil document")
}
op.Data = output.Document
}
meta := make(map[string]interface{})
if output.Skip {
meta["skip"] = true
}
if output.Index != "" {
meta["index"] = output.Index
}
if output.ID != "" {
meta["id"] = output.ID
}
if output.Type != "" {
meta["type"] = output.Type
}
if output.Routing != "" {
meta["routing"] = output.Routing
}
if output.Parent != "" {
meta["parent"] = output.Parent
}
if output.Version != 0 {
meta["version"] = output.Version
}
if output.VersionType != "" {
meta["versionType"] = output.VersionType
}
if output.Pipeline != "" {
meta["pipeline"] = output.Pipeline
}
if output.RetryOnConflict != 0 {
meta["retryOnConflict"] = output.RetryOnConflict
}
if len(meta) > 0 {
op.Data["_meta_monstache"] = meta
}
}
return nil
}
func mapData(client *mongo.Client, config *configOptions, op *gtm.Op) error {
if mapperPlugin != nil {
return mapDataGolang(client, op)
}
return mapDataJavascript(op)
}
func extractData(srcField string, data map[string]interface{}) (result interface{}, err error) {
var cur map[string]interface{} = data
fields := strings.Split(srcField, ".")
flen := len(fields)
for i, field := range fields {
if i+1 == flen {
result = cur[field]
} else {
if next, ok := cur[field].(map[string]interface{}); ok {
cur = next
} else {
break
}
}
}
if result == nil {
var detail interface{}
b, e := json.Marshal(data)
if e == nil {
detail = string(b)
} else {
detail = err
}
err = fmt.Errorf("Source field %s not found in document: %s", srcField, detail)
}
return
}
func buildSelector(matchField string, data interface{}) bson.M {
sel := bson.M{}
var cur bson.M = sel
fields := strings.Split(matchField, ".")
flen := len(fields)
for i, field := range fields {
if i+1 == flen {
cur[field] = data
} else {
next := bson.M{}
cur[field] = next
cur = next
}
}
return sel
}
func (ic *indexClient) processRelated(root *gtm.Op) (err error) {
var q []*gtm.Op
batch := []*gtm.Op{root}
depth := 1
for len(batch) > 0 {
for _, e := range batch {
op := e
if op.Data == nil {
continue
}
rs := relates[op.Namespace]
if len(rs) == 0 {
continue
}
for _, r := range rs {
if r.MaxDepth > 0 && r.MaxDepth < depth {
continue
}
if op.IsDelete() && r.IsIdentity() {
rop := >m.Op{
Id: op.Id,
Operation: op.Operation,
Namespace: r.WithNamespace,
Source: op.Source,
Timestamp: op.Timestamp,
Data: op.Data,
}
ic.doDelete(rop)
q = append(q, rop)
continue
}
var srcData interface{}
if srcData, err = extractData(r.SrcField, op.Data); err != nil {
ic.processErr(err)
continue
}
opts := &options.FindOptions{}
col := ic.mongo.Database(r.db).Collection(r.col)
sel := buildSelector(r.MatchField, srcData)
cursor, err := col.Find(context.Background(), sel, opts)
doc := make(map[string]interface{})
for cursor.Next(context.Background()) {
if err = cursor.Decode(&doc); err != nil {
ic.processErr(err)
continue
}
now := time.Now().UTC()
tstamp := primitive.Timestamp{
T: uint32(now.Unix()),
I: uint32(now.Nanosecond()),
}
rop := >m.Op{
Id: doc["_id"],
Data: doc,
Operation: root.Operation,
Namespace: r.WithNamespace,
Source: gtm.DirectQuerySource,
Timestamp: tstamp,
UpdateDescription: root.UpdateDescription,
}
doc = make(map[string]interface{})
if ic.filter != nil && !ic.filter(rop) {
continue
}
if processPlugin != nil {
pop := >m.Op{
Id: rop.Id,
Operation: rop.Operation,
Namespace: rop.Namespace,
Source: rop.Source,
Timestamp: rop.Timestamp,
UpdateDescription: rop.UpdateDescription,
}
var data []byte
data, err = bson.Marshal(rop.Data)
if err == nil {
var m map[string]interface{}
err = bson.Unmarshal(data, &m)
if err == nil {
pop.Data = m
}
}
ic.processC <- pop
}
skip := false
if rs2 := relates[rop.Namespace]; len(rs2) != 0 {
skip = true
visit := false
for _, r2 := range rs2 {
if r2.KeepSrc {
skip = false
}
if r2.MaxDepth < 1 || r2.MaxDepth >= (depth+1) {
visit = true
}
}
if visit {
q = append(q, rop)
}
}
if !skip {
if ic.hasFileContent(rop) {
ic.fileC <- rop
} else {
ic.indexC <- rop
}
}
}
cursor.Close(context.Background())
}
}
depth++
batch = q
q = nil
}
return
}
func (ic *indexClient) prepareDataForIndexing(op *gtm.Op) {
config := ic.config
data := op.Data
if config.IndexOplogTime {
secs := op.Timestamp.T
t := time.Unix(int64(secs), 0).UTC()
data[config.OplogTsFieldName] = op.Timestamp
data[config.OplogDateFieldName] = t.Format(config.OplogDateFieldFormat)
}
delete(data, "_id")
delete(data, "_meta_monstache")
if config.PruneInvalidJSON {
op.Data = fixPruneInvalidJSON(opIDToString(op), data)
}
op.Data = monstachemap.ConvertMapForJSON(op.Data)
}
func parseIndexMeta(op *gtm.Op) (meta *indexingMeta) {
meta = &indexingMeta{
Version: tsVersion(op.Timestamp),
VersionType: "external",
}
if m, ok := op.Data["_meta_monstache"]; ok {
switch m.(type) {
case map[string]interface{}:
metaAttrs := m.(map[string]interface{})
meta.load(metaAttrs)
case otto.Value:
ex, err := m.(otto.Value).Export()
if err == nil && ex != m {
switch ex.(type) {
case map[string]interface{}:
metaAttrs := ex.(map[string]interface{})
meta.load(metaAttrs)
default:
errorLog.Println("Invalid indexing metadata")
}
}
default:
errorLog.Println("Invalid indexing metadata")
}
}
return meta
}
func (ic *indexClient) addFileContent(op *gtm.Op) (err error) {
op.Data["file"] = ""
var gridByteBuffer bytes.Buffer
db, bucketName :=
ic.mongo.Database(op.GetDatabase()),
strings.SplitN(op.GetCollection(), ".", 2)[0]
encoder := base64.NewEncoder(base64.StdEncoding, &gridByteBuffer)
opts := &options.BucketOptions{}
opts.SetName(bucketName)
var bucket *gridfs.Bucket
bucket, err = gridfs.NewBucket(db, opts)
if err != nil {
return
}
var size int64
if size, err = bucket.DownloadToStream(op.Id, encoder); err != nil {
return
}
if ic.config.MaxFileSize > 0 {
if size > ic.config.MaxFileSize {
warnLog.Printf("File size %d exceeds max file size. file content omitted.", size)
return
}
}
if err = encoder.Close(); err != nil {
return
}
op.Data["file"] = string(gridByteBuffer.Bytes())
return
}
func notMonstache(config *configOptions) gtm.OpFilter {
db := config.ConfigDatabaseName
return func(op *gtm.Op) bool {
return op.GetDatabase() != db
}
}
func notChunks(op *gtm.Op) bool {
return !chunksRegex.MatchString(op.GetCollection())
}
func notConfig(op *gtm.Op) bool {
return op.GetDatabase() != "config"
}
func notSystem(op *gtm.Op) bool {
return !systemsRegex.MatchString(op.GetCollection())
}
func filterWithRegex(regex string) gtm.OpFilter {
var validNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return true
}
return validNameSpace.MatchString(op.Namespace)
}
}
func filterDropWithRegex(regex string) gtm.OpFilter {
var validNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return validNameSpace.MatchString(op.Namespace)
}
return true
}
}
func filterWithPlugin() gtm.OpFilter {
return func(op *gtm.Op) bool {
var keep bool = true
if (op.IsInsert() || op.IsUpdate()) && op.Data != nil {
keep = false
input := &monstachemap.MapperPluginInput{
Document: op.Data,
Namespace: op.Namespace,
Database: op.GetDatabase(),
Collection: op.GetCollection(),
Operation: op.Operation,
UpdateDescription: op.UpdateDescription,
}
if ok, err := filterPlugin(input); err == nil {
keep = ok
} else {
errorLog.Println(err)
}
}
return keep
}
}
func filterWithScript() gtm.OpFilter {
return func(op *gtm.Op) bool {
var keep bool = true
if (op.IsInsert() || op.IsUpdate()) && op.Data != nil {
nss := []string{"", op.Namespace}
for _, ns := range nss {
if env := filterEnvs[ns]; env != nil {
keep = false
arg := convertMapJavascript(op.Data)
arg2 := op.Namespace
arg3 := convertMapJavascript(op.UpdateDescription)
env.lock.Lock()
defer env.lock.Unlock()
val, err := env.VM.Call("module.exports", arg, arg, arg2, arg3)
if err != nil {
errorLog.Println(err)
} else {
if ok, err := val.ToBoolean(); err == nil {
keep = ok
} else {
errorLog.Println(err)
}
}
}
if !keep {
break
}
}
}
return keep
}
}
func filterInverseWithRegex(regex string) gtm.OpFilter {
var invalidNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return true
}
return !invalidNameSpace.MatchString(op.Namespace)
}
}
func filterDropInverseWithRegex(regex string) gtm.OpFilter {
var invalidNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return !invalidNameSpace.MatchString(op.Namespace)
}
return true
}
}
func (ic *indexClient) ensureClusterTTL() error {
io := options.Index()
io.SetName("expireAt")
io.SetBackground(true)
io.SetExpireAfterSeconds(30)
im := mongo.IndexModel{
Keys: bson.M{"expireAt": 1},
Options: io,
}
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("cluster")
iv := col.Indexes()
_, err := iv.CreateOne(context.Background(), im)
return err
}
func (ic *indexClient) enableProcess() (bool, error) {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("cluster")
doc := bson.M{}
doc["_id"] = ic.config.ResumeName
doc["expireAt"] = time.Now().UTC()
doc["pid"] = os.Getpid()
if host, err := os.Hostname(); err == nil {
doc["host"] = host
} else {
return false, err
}
_, err := col.InsertOne(context.Background(), doc)
if err == nil {
return true, nil
}
if isDup(err) {
return false, nil
}
return false, err
}
func isDup(err error) bool {
checkCodeAndMessage := func(code int, message string) bool {
return code == 11000 ||
code == 11001 ||
code == 12582 ||
strings.Contains(message, "E11000")
}
if we, ok := err.(mongo.WriteException); ok {
if we.WriteConcernError != nil {
wce := we.WriteConcernError
code, message := wce.Code, wce.Message
if checkCodeAndMessage(code, message) {
return true
}
}
if we.WriteErrors != nil {
we := we.WriteErrors
for _, e := range we {
code, message := e.Code, e.Message
if checkCodeAndMessage(code, message) {
return true
}
}
}
}
return false
}
func (ic *indexClient) resetClusterState() error {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("cluster")
_, err := col.DeleteOne(context.Background(), bson.M{"_id": ic.config.ResumeName})
return err
}
func (ic *indexClient) ensureEnabled() (enabled bool, err error) {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("cluster")
result := col.FindOne(context.Background(), bson.M{
"_id": ic.config.ResumeName,
})
if err = result.Err(); err == nil {
doc := make(map[string]interface{})
if err = result.Decode(&doc); err == nil {
if doc["pid"] != nil && doc["host"] != nil {
var hostname string
pid := doc["pid"].(int32)
host := doc["host"].(string)
if hostname, err = os.Hostname(); err == nil {
enabled = (int(pid) == os.Getpid() && host == hostname)
if enabled {
_, err = col.UpdateOne(context.Background(), bson.M{
"_id": ic.config.ResumeName,
}, bson.M{
"$set": bson.M{"expireAt": time.Now().UTC()},
})
}
}
}
}
}
return
}
func (ic *indexClient) resumeWork() {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("monstache")
result := col.FindOne(context.Background(), bson.M{
"_id": ic.config.ResumeName,
})
if err := result.Err(); err == nil {
doc := make(map[string]interface{})
if err = result.Decode(&doc); err == nil {
if doc["ts"] != nil {
ts := doc["ts"].(primitive.Timestamp)
ic.gtmCtx.Since(ts)
}
}
}
drained := false
for !drained {
select {
case _, open := <-ic.gtmCtx.OpC:
if !open {
drained = true
}
default:
drained = true
}
}
ic.gtmCtx.Resume()
}
func (ic *indexClient) saveTimestamp() error {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("monstache")
doc := map[string]interface{}{
"ts": ic.lastTs,
}
opts := options.Update()
opts.SetUpsert(true)
_, err := col.UpdateOne(context.Background(), bson.M{
"_id": ic.config.ResumeName,
}, bson.M{
"$set": doc,
}, opts)
return err
}
func (config *configOptions) parseCommandLineFlags() *configOptions {
flag.BoolVar(&config.Print, "print-config", false, "Print the configuration and then exit")
flag.BoolVar(&config.EnableTemplate, "tpl", false, "True to interpret the config file as a template")
flag.StringVar(&config.EnvDelimiter, "env-delimiter", ",", "A delimiter to use when splitting environment variable values")
flag.StringVar(&config.MongoURL, "mongo-url", "", "MongoDB server or router server connection URL")
flag.StringVar(&config.MongoConfigURL, "mongo-config-url", "", "MongoDB config server connection URL")
flag.StringVar(&config.MongoOpLogDatabaseName, "mongo-oplog-database-name", "", "Override the database name which contains the mongodb oplog")
flag.StringVar(&config.MongoOpLogCollectionName, "mongo-oplog-collection-name", "", "Override the collection name which contains the mongodb oplog")
flag.StringVar(&config.GraylogAddr, "graylog-addr", "", "Send logs to a Graylog server at this address")
flag.StringVar(&config.ElasticVersion, "elasticsearch-version", "", "Specify elasticsearch version directly instead of getting it from the server")
flag.StringVar(&config.ElasticUser, "elasticsearch-user", "", "The elasticsearch user name for basic auth")
flag.StringVar(&config.ElasticPassword, "elasticsearch-password", "", "The elasticsearch password for basic auth")
flag.StringVar(&config.ElasticPemFile, "elasticsearch-pem-file", "", "Path to a PEM file for secure connections to elasticsearch")
flag.BoolVar(&config.ElasticValidatePemFile, "elasticsearch-validate-pem-file", true, "Set to boolean false to not validate the Elasticsearch PEM file")
flag.IntVar(&config.ElasticMaxConns, "elasticsearch-max-conns", 0, "Elasticsearch max connections")
flag.IntVar(&config.PostProcessors, "post-processors", 0, "Number of post-processing go routines")
flag.IntVar(&config.FileDownloaders, "file-downloaders", 0, "GridFs download go routines")
flag.IntVar(&config.RelateThreads, "relate-threads", 0, "Number of threads dedicated to processing relationships")
flag.IntVar(&config.RelateBuffer, "relate-buffer", 0, "Number of relates to queue before skipping and reporting an error")
flag.BoolVar(&config.ElasticRetry, "elasticsearch-retry", false, "True to retry failed request to Elasticsearch")
flag.IntVar(&config.ElasticMaxDocs, "elasticsearch-max-docs", 0, "Number of docs to hold before flushing to Elasticsearch")
flag.IntVar(&config.ElasticMaxBytes, "elasticsearch-max-bytes", 0, "Number of bytes to hold before flushing to Elasticsearch")
flag.IntVar(&config.ElasticMaxSeconds, "elasticsearch-max-seconds", 0, "Number of seconds before flushing to Elasticsearch")
flag.IntVar(&config.ElasticClientTimeout, "elasticsearch-client-timeout", 0, "Number of seconds before a request to Elasticsearch is timed out")
flag.Int64Var(&config.MaxFileSize, "max-file-size", 0, "GridFs file content exceeding this limit in bytes will not be indexed in Elasticsearch")
flag.StringVar(&config.ConfigFile, "f", "", "Location of configuration file")
flag.BoolVar(&config.DroppedDatabases, "dropped-databases", true, "True to delete indexes from dropped databases")
flag.BoolVar(&config.DroppedCollections, "dropped-collections", true, "True to delete indexes from dropped collections")
flag.BoolVar(&config.Version, "v", false, "True to print the version number")
flag.BoolVar(&config.Gzip, "gzip", false, "True to enable gzip for requests to Elasticsearch")
flag.BoolVar(&config.Verbose, "verbose", false, "True to output verbose messages")
flag.BoolVar(&config.Pprof, "pprof", false, "True to enable pprof endpoints")
flag.BoolVar(&config.EnableOplog, "enable-oplog", false, "True to enable direct tailing of the oplog")
flag.BoolVar(&config.DisableChangeEvents, "disable-change-events", false, "True to disable listening for changes. You must provide direct-reads in this case")
flag.BoolVar(&config.EnableEasyJSON, "enable-easy-json", false, "True to enable easy-json serialization")
flag.BoolVar(&config.Stats, "stats", false, "True to print out statistics")
flag.BoolVar(&config.IndexStats, "index-stats", false, "True to index stats in elasticsearch")
flag.StringVar(&config.StatsDuration, "stats-duration", "", "The duration after which stats are logged")
flag.StringVar(&config.StatsIndexFormat, "stats-index-format", "", "time.Time supported format to use for the stats index names")
flag.BoolVar(&config.Resume, "resume", false, "True to capture the last timestamp of this run and resume on a subsequent run")
flag.Int64Var(&config.ResumeFromTimestamp, "resume-from-timestamp", 0, "Timestamp to resume syncing from")
flag.BoolVar(&config.ResumeWriteUnsafe, "resume-write-unsafe", false, "True to speedup writes of the last timestamp synched for resuming at the cost of error checking")
flag.BoolVar(&config.Replay, "replay", false, "True to replay all events from the oplog and index them in elasticsearch")
flag.BoolVar(&config.IndexFiles, "index-files", false, "True to index gridfs files into elasticsearch. Requires the elasticsearch mapper-attachments (deprecated) or ingest-attachment plugin")
flag.BoolVar(&config.IndexAsUpdate, "index-as-update", false, "True to index documents as updates instead of overwrites")
flag.BoolVar(&config.FileHighlighting, "file-highlighting", false, "True to enable the ability to highlight search times for a file query")
flag.BoolVar(&config.EnablePatches, "enable-patches", false, "True to include an json-patch field on updates")
flag.BoolVar(&config.FailFast, "fail-fast", false, "True to exit if a single _bulk request fails")
flag.BoolVar(&config.IndexOplogTime, "index-oplog-time", false, "True to add date/time information from the oplog to each document when indexing")
flag.BoolVar(&config.ExitAfterDirectReads, "exit-after-direct-reads", false, "True to exit the program after reading directly from the configured namespaces")
flag.StringVar(&config.MergePatchAttr, "merge-patch-attribute", "", "Attribute to store json-patch values under")
flag.StringVar(&config.ResumeName, "resume-name", "", "Name under which to load/store the resume state. Defaults to 'default'")
flag.StringVar(&config.ClusterName, "cluster-name", "", "Name of the monstache process cluster")
flag.StringVar(&config.Worker, "worker", "", "The name of this worker in a multi-worker configuration")
flag.StringVar(&config.MapperPluginPath, "mapper-plugin-path", "", "The path to a .so file to load as a document mapper plugin")
flag.StringVar(&config.NsRegex, "namespace-regex", "", "A regex which is matched against an operation's namespace (<database>.<collection>). Only operations which match are synched to elasticsearch")
flag.StringVar(&config.NsDropRegex, "namespace-drop-regex", "", "A regex which is matched against a drop operation's namespace (<database>.<collection>). Only drop operations which match are synched to elasticsearch")
flag.StringVar(&config.NsExcludeRegex, "namespace-exclude-regex", "", "A regex which is matched against an operation's namespace (<database>.<collection>). Only operations which do not match are synched to elasticsearch")
flag.StringVar(&config.NsDropExcludeRegex, "namespace-drop-exclude-regex", "", "A regex which is matched against a drop operation's namespace (<database>.<collection>). Only drop operations which do not match are synched to elasticsearch")
flag.Var(&config.ChangeStreamNs, "change-stream-namespace", "A list of change stream namespaces")
flag.Var(&config.DirectReadNs, "direct-read-namespace", "A list of direct read namespaces")
flag.IntVar(&config.DirectReadSplitMax, "direct-read-split-max", 0, "Max number of times to split a collection for direct reads")
flag.IntVar(&config.DirectReadConcur, "direct-read-concur", 0, "Max number of direct-read-namespaces to read concurrently. By default all givne are read concurrently")
flag.Var(&config.RoutingNamespaces, "routing-namespace", "A list of namespaces that override routing information")
flag.Var(&config.TimeMachineNamespaces, "time-machine-namespace", "A list of direct read namespaces")
flag.StringVar(&config.TimeMachineIndexPrefix, "time-machine-index-prefix", "", "A prefix to preprend to time machine indexes")
flag.StringVar(&config.TimeMachineIndexSuffix, "time-machine-index-suffix", "", "A suffix to append to time machine indexes")
flag.BoolVar(&config.TimeMachineDirectReads, "time-machine-direct-reads", false, "True to index the results of direct reads into the any time machine indexes")
flag.BoolVar(&config.PipeAllowDisk, "pipe-allow-disk", false, "True to allow MongoDB to use the disk for pipeline options with lots of results")
flag.Var(&config.ElasticUrls, "elasticsearch-url", "A list of Elasticsearch URLs")
flag.Var(&config.FileNamespaces, "file-namespace", "A list of file namespaces")
flag.Var(&config.PatchNamespaces, "patch-namespace", "A list of patch namespaces")
flag.Var(&config.Workers, "workers", "A list of worker names")
flag.BoolVar(&config.EnableHTTPServer, "enable-http-server", false, "True to enable an internal http server")
flag.StringVar(&config.HTTPServerAddr, "http-server-addr", "", "The address the internal http server listens on")
flag.BoolVar(&config.PruneInvalidJSON, "prune-invalid-json", false, "True to omit values which do not serialize to JSON such as +Inf and -Inf and thus cause errors")
flag.Var(&config.DeleteStrategy, "delete-strategy", "Stategy to use for deletes. 0=stateless,1=stateful,2=ignore")
flag.StringVar(&config.DeleteIndexPattern, "delete-index-pattern", "", "An Elasticsearch index-pattern to restric the scope of stateless deletes")
flag.StringVar(&config.ConfigDatabaseName, "config-database-name", "", "The MongoDB database name that monstache uses to store metadata")
flag.StringVar(&config.OplogTsFieldName, "oplog-ts-field-name", "", "Field name to use for the oplog timestamp")
flag.StringVar(&config.OplogDateFieldName, "oplog-date-field-name", "", "Field name to use for the oplog date")
flag.StringVar(&config.OplogDateFieldFormat, "oplog-date-field-format", "", "Format to use for the oplog date")
flag.BoolVar(&config.Debug, "debug", false, "True to enable verbose debug information")
flag.Parse()
return config
}
func (config *configOptions) loadReplacements() {
if config.Relate != nil {
for _, r := range config.Relate {
if r.Namespace != "" || r.WithNamespace != "" {
dbCol := strings.SplitN(r.WithNamespace, ".", 2)
if len(dbCol) != 2 {
errorLog.Fatalf("Replacement namespace is invalid: %s", r.WithNamespace)
}
database, collection := dbCol[0], dbCol[1]
r := &relation{
Namespace: r.Namespace,
WithNamespace: r.WithNamespace,
SrcField: r.SrcField,
MatchField: r.MatchField,
KeepSrc: r.KeepSrc,
MaxDepth: r.MaxDepth,
db: database,
col: collection,
}
if r.SrcField == "" {
r.SrcField = "_id"
}
if r.MatchField == "" {
r.MatchField = "_id"
}
relates[r.Namespace] = append(relates[r.Namespace], r)
} else {
errorLog.Fatalln("Relates must specify namespace and with-namespace")
}
}
}
}
func (config *configOptions) loadIndexTypes() {
if config.Mapping != nil {
for _, m := range config.Mapping {
if m.Namespace != "" && m.Index != "" {
mapIndexTypes[m.Namespace] = &indexMapping{
Namespace: m.Namespace,
Index: strings.ToLower(m.Index),
}
} else {
errorLog.Fatalln("Mappings must specify namespace and index")
}
}
}
}
func (config *configOptions) loadPipelines() {
for _, s := range config.Pipeline {
if s.Path == "" && s.Script == "" {
errorLog.Fatalln("Pipelines must specify path or script attributes")
}
if s.Path != "" && s.Script != "" {
errorLog.Fatalln("Pipelines must specify path or script but not both")
}
if s.Path != "" {
if script, err := ioutil.ReadFile(s.Path); err == nil {
s.Script = string(script[:])
} else {
errorLog.Fatalf("Unable to load pipeline at path %s: %s", s.Path, err)
}
}
if _, exists := filterEnvs[s.Namespace]; exists {
errorLog.Fatalf("Multiple pipelines with namespace: %s", s.Namespace)
}
env := &executionEnv{
VM: otto.New(),
Script: s.Script,
lock: &sync.Mutex{},
}
if err := env.VM.Set("module", make(map[string]interface{})); err != nil {
errorLog.Fatalln(err)
}
if _, err := env.VM.Run(env.Script); err != nil {
errorLog.Fatalln(err)
}
val, err := env.VM.Run("module.exports")
if err != nil {
errorLog.Fatalln(err)
} else if !val.IsFunction() {
errorLog.Fatalln("module.exports must be a function")
}
pipeEnvs[s.Namespace] = env
}
}
func (config *configOptions) loadFilters() {
for _, s := range config.Filter {
if s.Script != "" || s.Path != "" {
if s.Path != "" && s.Script != "" {
errorLog.Fatalln("Filters must specify path or script but not both")
}
if s.Path != "" {
if script, err := ioutil.ReadFile(s.Path); err == nil {
s.Script = string(script[:])
} else {
errorLog.Fatalf("Unable to load filter at path %s: %s", s.Path, err)
}
}
if _, exists := filterEnvs[s.Namespace]; exists {
errorLog.Fatalf("Multiple filters with namespace: %s", s.Namespace)
}
env := &executionEnv{
VM: otto.New(),
Script: s.Script,
lock: &sync.Mutex{},
}
if err := env.VM.Set("module", make(map[string]interface{})); err != nil {
errorLog.Fatalln(err)
}
if _, err := env.VM.Run(env.Script); err != nil {
errorLog.Fatalln(err)
}
val, err := env.VM.Run("module.exports")
if err != nil {
errorLog.Fatalln(err)
} else if !val.IsFunction() {
errorLog.Fatalln("module.exports must be a function")
}
filterEnvs[s.Namespace] = env
} else {
errorLog.Fatalln("Filters must specify path or script attributes")
}
}
}
func (config *configOptions) loadScripts() {
for _, s := range config.Script {
if s.Script != "" || s.Path != "" {
if s.Path != "" && s.Script != "" {
errorLog.Fatalln("Scripts must specify path or script but not both")
}
if s.Path != "" {
if script, err := ioutil.ReadFile(s.Path); err == nil {
s.Script = string(script[:])
} else {
errorLog.Fatalf("Unable to load script at path %s: %s", s.Path, err)
}
}
if _, exists := mapEnvs[s.Namespace]; exists {
errorLog.Fatalf("Multiple scripts with namespace: %s", s.Namespace)
}
env := &executionEnv{
VM: otto.New(),
Script: s.Script,
lock: &sync.Mutex{},
}
if err := env.VM.Set("module", make(map[string]interface{})); err != nil {
errorLog.Fatalln(err)
}
if _, err := env.VM.Run(env.Script); err != nil {
errorLog.Fatalln(err)
}
val, err := env.VM.Run("module.exports")
if err != nil {
errorLog.Fatalln(err)
} else if !val.IsFunction() {
errorLog.Fatalln("module.exports must be a function")
}
mapEnvs[s.Namespace] = env
if s.Routing {
routingNamespaces[s.Namespace] = true
}
} else {
errorLog.Fatalln("Scripts must specify path or script")
}
}
}
func (config *configOptions) loadPlugins() *configOptions {
if config.MapperPluginPath != "" {
funcDefined := false
p, err := plugin.Open(config.MapperPluginPath)
if err != nil {
errorLog.Fatalf("Unable to load mapper plugin %s: %s", config.MapperPluginPath, err)
}
mapper, err := p.Lookup("Map")
if err == nil {
funcDefined = true
switch mapper.(type) {
case func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error):
mapperPlugin = mapper.(func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error))
default:
errorLog.Fatalf("Plugin 'Map' function must be typed %T", mapperPlugin)
}
}
filter, err := p.Lookup("Filter")
if err == nil {
funcDefined = true
switch filter.(type) {
case func(*monstachemap.MapperPluginInput) (bool, error):
filterPlugin = filter.(func(*monstachemap.MapperPluginInput) (bool, error))
default:
errorLog.Fatalf("Plugin 'Filter' function must be typed %T", filterPlugin)
}
}
process, err := p.Lookup("Process")
if err == nil {
funcDefined = true
switch process.(type) {
case func(*monstachemap.ProcessPluginInput) error:
processPlugin = process.(func(*monstachemap.ProcessPluginInput) error)
default:
errorLog.Fatalf("Plugin 'Process' function must be typed %T", processPlugin)
}
}
pipe, err := p.Lookup("Pipeline")
if err == nil {
funcDefined = true
switch pipe.(type) {
case func(string, bool) ([]interface{}, error):
pipePlugin = pipe.(func(string, bool) ([]interface{}, error))
default:
errorLog.Fatalf("Plugin 'Pipeline' function must be typed %T", pipePlugin)
}
}
if !funcDefined {
warnLog.Println("Plugin loaded but did not find a Map, Filter, Process or Pipeline function")
}
}
return config
}
func (config *configOptions) decodeAsTemplate() *configOptions {
env := map[string]string{}
for _, e := range os.Environ() {
pair := strings.SplitN(e, "=", 2)
if len(pair) < 2 {
continue
}
name, val := pair[0], pair[1]
env[name] = val
}
tpl, err := ioutil.ReadFile(config.ConfigFile)
if err != nil {
errorLog.Fatalln(err)
}
var t = template.Must(template.New("config").Parse(string(tpl)))
var b bytes.Buffer
err = t.Execute(&b, env)
if err != nil {
errorLog.Fatalln(err)
}
if md, err := toml.Decode(b.String(), config); err != nil {
errorLog.Fatalln(err)
} else if ud := md.Undecoded(); len(ud) != 0 {
errorLog.Fatalf("Config file contains undecoded keys: %q", ud)
}
return config
}
func (config *configOptions) loadConfigFile() *configOptions {
if config.ConfigFile != "" {
var tomlConfig = configOptions{
ConfigFile: config.ConfigFile,
DroppedDatabases: true,
DroppedCollections: true,
GtmSettings: gtmDefaultSettings(),
}
if config.EnableTemplate {
tomlConfig.decodeAsTemplate()
} else {
if md, err := toml.DecodeFile(tomlConfig.ConfigFile, &tomlConfig); err != nil {
errorLog.Fatalln(err)
} else if ud := md.Undecoded(); len(ud) != 0 {
errorLog.Fatalf("Config file contains undecoded keys: %q", ud)
}
}
if config.MongoURL == "" {
config.MongoURL = tomlConfig.MongoURL
}
if config.MongoConfigURL == "" {
config.MongoConfigURL = tomlConfig.MongoConfigURL
}
if config.MongoOpLogDatabaseName == "" {
config.MongoOpLogDatabaseName = tomlConfig.MongoOpLogDatabaseName
}
if config.MongoOpLogCollectionName == "" {
config.MongoOpLogCollectionName = tomlConfig.MongoOpLogCollectionName
}
if config.ElasticUser == "" {
config.ElasticUser = tomlConfig.ElasticUser
}
if config.ElasticPassword == "" {
config.ElasticPassword = tomlConfig.ElasticPassword
}
if config.ElasticPemFile == "" {
config.ElasticPemFile = tomlConfig.ElasticPemFile
}
if config.ElasticValidatePemFile && !tomlConfig.ElasticValidatePemFile {
config.ElasticValidatePemFile = false
}
if config.ElasticVersion == "" {
config.ElasticVersion = tomlConfig.ElasticVersion
}
if config.ElasticMaxConns == 0 {
config.ElasticMaxConns = tomlConfig.ElasticMaxConns
}
if config.ElasticHealth0 == 0 {
config.ElasticHealth0 = tomlConfig.ElasticHealth0
}
if config.ElasticHealth1 == 0 {
config.ElasticHealth1 = tomlConfig.ElasticHealth1
}
if config.DirectReadSplitMax == 0 {
config.DirectReadSplitMax = tomlConfig.DirectReadSplitMax
}
if config.DirectReadConcur == 0 {
config.DirectReadConcur = tomlConfig.DirectReadConcur
}
if !config.ElasticRetry && tomlConfig.ElasticRetry {
config.ElasticRetry = true
}
if config.ElasticMaxDocs == 0 {
config.ElasticMaxDocs = tomlConfig.ElasticMaxDocs
}
if config.ElasticMaxBytes == 0 {
config.ElasticMaxBytes = tomlConfig.ElasticMaxBytes
}
if config.ElasticMaxSeconds == 0 {
config.ElasticMaxSeconds = tomlConfig.ElasticMaxSeconds
}
if config.ElasticClientTimeout == 0 {
config.ElasticClientTimeout = tomlConfig.ElasticClientTimeout
}
if config.MaxFileSize == 0 {
config.MaxFileSize = tomlConfig.MaxFileSize
}
if !config.IndexFiles {
config.IndexFiles = tomlConfig.IndexFiles
}
if config.FileDownloaders == 0 {
config.FileDownloaders = tomlConfig.FileDownloaders
}
if config.RelateThreads == 0 {
config.RelateThreads = tomlConfig.RelateThreads
}
if config.RelateBuffer == 0 {
config.RelateBuffer = tomlConfig.RelateBuffer
}
if config.PostProcessors == 0 {
config.PostProcessors = tomlConfig.PostProcessors
}
if config.DeleteStrategy == 0 {
config.DeleteStrategy = tomlConfig.DeleteStrategy
}
if config.DeleteIndexPattern == "" {
config.DeleteIndexPattern = tomlConfig.DeleteIndexPattern
}
if config.DroppedDatabases && !tomlConfig.DroppedDatabases {
config.DroppedDatabases = false
}
if config.DroppedCollections && !tomlConfig.DroppedCollections {
config.DroppedCollections = false
}
if !config.Gzip && tomlConfig.Gzip {
config.Gzip = true
}
if !config.Verbose && tomlConfig.Verbose {
config.Verbose = true
}
if !config.Stats && tomlConfig.Stats {
config.Stats = true
}
if !config.Pprof && tomlConfig.Pprof {
config.Pprof = true
}
if !config.EnableOplog && tomlConfig.EnableOplog {
config.EnableOplog = true
}
if !config.EnableEasyJSON && tomlConfig.EnableEasyJSON {
config.EnableEasyJSON = true
}
if !config.DisableChangeEvents && tomlConfig.DisableChangeEvents {
config.DisableChangeEvents = true
}
if !config.IndexStats && tomlConfig.IndexStats {
config.IndexStats = true
}
if config.StatsDuration == "" {
config.StatsDuration = tomlConfig.StatsDuration
}
if config.StatsIndexFormat == "" {
config.StatsIndexFormat = tomlConfig.StatsIndexFormat
}
if !config.IndexAsUpdate && tomlConfig.IndexAsUpdate {
config.IndexAsUpdate = true
}
if !config.FileHighlighting && tomlConfig.FileHighlighting {
config.FileHighlighting = true
}
if !config.EnablePatches && tomlConfig.EnablePatches {
config.EnablePatches = true
}
if !config.PruneInvalidJSON && tomlConfig.PruneInvalidJSON {
config.PruneInvalidJSON = true
}
if !config.Debug && tomlConfig.Debug {
config.Debug = true
}
if !config.Replay && tomlConfig.Replay {
config.Replay = true
}
if !config.Resume && tomlConfig.Resume {
config.Resume = true
}
if !config.ResumeWriteUnsafe && tomlConfig.ResumeWriteUnsafe {
config.ResumeWriteUnsafe = true
}
if config.ResumeFromTimestamp == 0 {
config.ResumeFromTimestamp = tomlConfig.ResumeFromTimestamp
}
if config.MergePatchAttr == "" {
config.MergePatchAttr = tomlConfig.MergePatchAttr
}
if !config.FailFast && tomlConfig.FailFast {
config.FailFast = true
}
if !config.IndexOplogTime && tomlConfig.IndexOplogTime {
config.IndexOplogTime = true
}
if config.OplogTsFieldName == "" {
config.OplogTsFieldName = tomlConfig.OplogTsFieldName
}
if config.OplogDateFieldName == "" {
config.OplogDateFieldName = tomlConfig.OplogDateFieldName
}
if config.OplogDateFieldFormat == "" {
config.OplogDateFieldFormat = tomlConfig.OplogDateFieldFormat
}
if config.ConfigDatabaseName == "" {
config.ConfigDatabaseName = tomlConfig.ConfigDatabaseName
}
if !config.ExitAfterDirectReads && tomlConfig.ExitAfterDirectReads {
config.ExitAfterDirectReads = true
}
if config.ResumeName == "" {
config.ResumeName = tomlConfig.ResumeName
}
if config.ClusterName == "" {
config.ClusterName = tomlConfig.ClusterName
}
if config.NsRegex == "" {
config.NsRegex = tomlConfig.NsRegex
}
if config.NsDropRegex == "" {
config.NsDropRegex = tomlConfig.NsDropRegex
}
if config.NsExcludeRegex == "" {
config.NsExcludeRegex = tomlConfig.NsExcludeRegex
}
if config.NsDropExcludeRegex == "" {
config.NsDropExcludeRegex = tomlConfig.NsDropExcludeRegex
}
if config.IndexFiles {
if len(config.FileNamespaces) == 0 {
config.FileNamespaces = tomlConfig.FileNamespaces
config.loadGridFsConfig()
}
}
if config.Worker == "" {
config.Worker = tomlConfig.Worker
}
if config.GraylogAddr == "" {
config.GraylogAddr = tomlConfig.GraylogAddr
}
if config.MapperPluginPath == "" {
config.MapperPluginPath = tomlConfig.MapperPluginPath
}
if config.EnablePatches {
if len(config.PatchNamespaces) == 0 {
config.PatchNamespaces = tomlConfig.PatchNamespaces
config.loadPatchNamespaces()
}
}
if len(config.RoutingNamespaces) == 0 {
config.RoutingNamespaces = tomlConfig.RoutingNamespaces
config.loadRoutingNamespaces()
}
if len(config.TimeMachineNamespaces) == 0 {
config.TimeMachineNamespaces = tomlConfig.TimeMachineNamespaces
config.loadTimeMachineNamespaces()
}
if config.TimeMachineIndexPrefix == "" {
config.TimeMachineIndexPrefix = tomlConfig.TimeMachineIndexPrefix
}
if config.TimeMachineIndexSuffix == "" {
config.TimeMachineIndexSuffix = tomlConfig.TimeMachineIndexSuffix
}
if !config.TimeMachineDirectReads {
config.TimeMachineDirectReads = tomlConfig.TimeMachineDirectReads
}
if !config.PipeAllowDisk {
config.PipeAllowDisk = tomlConfig.PipeAllowDisk
}
if len(config.DirectReadNs) == 0 {
config.DirectReadNs = tomlConfig.DirectReadNs
}
if len(config.ChangeStreamNs) == 0 {
config.ChangeStreamNs = tomlConfig.ChangeStreamNs
}
if len(config.ElasticUrls) == 0 {
config.ElasticUrls = tomlConfig.ElasticUrls
}
if len(config.Workers) == 0 {
config.Workers = tomlConfig.Workers
}
if !config.EnableHTTPServer && tomlConfig.EnableHTTPServer {
config.EnableHTTPServer = true
}
if config.HTTPServerAddr == "" {
config.HTTPServerAddr = tomlConfig.HTTPServerAddr
}
if !config.AWSConnect.enabled() {
config.AWSConnect = tomlConfig.AWSConnect
}
if !config.Logs.enabled() {
config.Logs = tomlConfig.Logs
}
config.GtmSettings = tomlConfig.GtmSettings
config.Relate = tomlConfig.Relate
tomlConfig.loadScripts()
tomlConfig.loadFilters()
tomlConfig.loadPipelines()
tomlConfig.loadIndexTypes()
tomlConfig.loadReplacements()
}
return config
}
func (config *configOptions) newLogger(path string) *lumberjack.Logger {
return &lumberjack.Logger{
Filename: path,
MaxSize: 500, // megabytes
MaxBackups: 5,
MaxAge: 28, //days
}
}
func (config *configOptions) setupLogging() *configOptions {
if config.GraylogAddr != "" {
gelfWriter, err := gelf.NewUDPWriter(config.GraylogAddr)
if err != nil {
errorLog.Fatalf("Error creating gelf writer: %s", err)
}
infoLog.SetOutput(gelfWriter)
warnLog.SetOutput(gelfWriter)
errorLog.SetOutput(gelfWriter)
traceLog.SetOutput(gelfWriter)
statsLog.SetOutput(gelfWriter)
} else {
logs := config.Logs
if logs.Info != "" {
infoLog.SetOutput(config.newLogger(logs.Info))
}
if logs.Warn != "" {
warnLog.SetOutput(config.newLogger(logs.Warn))
}
if logs.Error != "" {
errorLog.SetOutput(config.newLogger(logs.Error))
}
if logs.Trace != "" {
traceLog.SetOutput(config.newLogger(logs.Trace))
}
if logs.Stats != "" {
statsLog.SetOutput(config.newLogger(logs.Stats))
}
}
return config
}
func (config *configOptions) build() *configOptions {
config.loadEnvironment()
config.loadTimeMachineNamespaces()
config.loadRoutingNamespaces()
config.loadPatchNamespaces()
config.loadGridFsConfig()
config.loadConfigFile()
config.loadPlugins()
config.setDefaults()
return config
}
func (config *configOptions) loadEnvironment() *configOptions {
del := config.EnvDelimiter
if del == "" {
del = ","
}
for _, e := range os.Environ() {
pair := strings.SplitN(e, "=", 2)
if len(pair) < 2 {
continue
}
name, val := pair[0], pair[1]
if val == "" {
continue
}
switch name {
case "MONSTACHE_MONGO_URL":
if config.MongoURL == "" {
config.MongoURL = val
}
break
case "MONSTACHE_MONGO_CONFIG_URL":
if config.MongoConfigURL == "" {
config.MongoConfigURL = val
}
break
case "MONSTACHE_MONGO_OPLOG_DB":
if config.MongoOpLogDatabaseName == "" {
config.MongoOpLogDatabaseName = val
}
break
case "MONSTACHE_MONGO_OPLOG_COL":
if config.MongoOpLogCollectionName == "" {
config.MongoOpLogCollectionName = val
}
break
case "MONSTACHE_ES_URLS":
if len(config.ElasticUrls) == 0 {
config.ElasticUrls = strings.Split(val, del)
}
break
case "MONSTACHE_ES_USER":
if config.ElasticUser == "" {
config.ElasticUser = val
}
break
case "MONSTACHE_ES_PASS":
if config.ElasticPassword == "" {
config.ElasticPassword = val
}
break
case "MONSTACHE_ES_PEM":
if config.ElasticPemFile == "" {
config.ElasticPemFile = val
}
break
case "MONSTACHE_WORKER":
if config.Worker == "" {
config.Worker = val
}
break
case "MONSTACHE_CLUSTER":
if config.ClusterName == "" {
config.ClusterName = val
}
break
case "MONSTACHE_DIRECT_READ_NS":
if len(config.DirectReadNs) == 0 {
config.DirectReadNs = strings.Split(val, del)
}
break
case "MONSTACHE_CHANGE_STREAM_NS":
if len(config.ChangeStreamNs) == 0 {
config.ChangeStreamNs = strings.Split(val, del)
}
break
case "MONSTACHE_NS_REGEX":
if config.NsRegex == "" {
config.NsRegex = val
}
break
case "MONSTACHE_NS_EXCLUDE_REGEX":
if config.NsExcludeRegex == "" {
config.NsExcludeRegex = val
}
break
case "MONSTACHE_NS_DROP_REGEX":
if config.NsDropRegex == "" {
config.NsDropRegex = val
}
break
case "MONSTACHE_NS_DROP_EXCLUDE_REGEX":
if config.NsDropExcludeRegex == "" {
config.NsDropExcludeRegex = val
}
break
case "MONSTACHE_GRAYLOG_ADDR":
if config.GraylogAddr == "" {
config.GraylogAddr = val
}
break
case "MONSTACHE_AWS_ACCESS_KEY":
config.AWSConnect.AccessKey = val
break
case "MONSTACHE_AWS_SECRET_KEY":
config.AWSConnect.SecretKey = val
break
case "MONSTACHE_AWS_REGION":
config.AWSConnect.Region = val
break
case "MONSTACHE_LOG_DIR":
config.Logs.Info = val + "/info.log"
config.Logs.Warn = val + "/warn.log"
config.Logs.Error = val + "/error.log"
config.Logs.Trace = val + "/trace.log"
config.Logs.Stats = val + "/stats.log"
break
case "MONSTACHE_HTTP_ADDR":
if config.HTTPServerAddr == "" {
config.HTTPServerAddr = val
}
break
case "MONSTACHE_FILE_NS":
if len(config.FileNamespaces) == 0 {
config.FileNamespaces = strings.Split(val, del)
}
break
case "MONSTACHE_PATCH_NS":
if len(config.PatchNamespaces) == 0 {
config.PatchNamespaces = strings.Split(val, del)
}
break
case "MONSTACHE_TIME_MACHINE_NS":
if len(config.TimeMachineNamespaces) == 0 {
config.TimeMachineNamespaces = strings.Split(val, del)
}
break
default:
continue
}
}
return config
}
func (config *configOptions) loadRoutingNamespaces() *configOptions {
for _, namespace := range config.RoutingNamespaces {
routingNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadTimeMachineNamespaces() *configOptions {
for _, namespace := range config.TimeMachineNamespaces {
tmNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadPatchNamespaces() *configOptions {
for _, namespace := range config.PatchNamespaces {
patchNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadGridFsConfig() *configOptions {
for _, namespace := range config.FileNamespaces {
fileNamespaces[namespace] = true
}
return config
}
func (config configOptions) dump() {
if config.MongoURL != "" {
config.MongoURL = cleanMongoURL(config.MongoURL)
}
if config.MongoConfigURL != "" {
config.MongoConfigURL = cleanMongoURL(config.MongoConfigURL)
}
if config.ElasticUser != "" {
config.ElasticUser = redact
}
if config.ElasticPassword != "" {
config.ElasticPassword = redact
}
if config.AWSConnect.AccessKey != "" {
config.AWSConnect.AccessKey = redact
}
if config.AWSConnect.SecretKey != "" {
config.AWSConnect.SecretKey = redact
}
if config.AWSConnect.Region != "" {
config.AWSConnect.Region = redact
}
json, err := json.MarshalIndent(config, "", " ")
if err != nil {
errorLog.Printf("Unable to print configuration: %s", err)
} else {
infoLog.Println(string(json))
}
}
func (config *configOptions) validate() {
if config.DisableChangeEvents && len(config.DirectReadNs) == 0 {
errorLog.Fatalln("Direct read namespaces must be specified if change events are disabled")
}
if config.AWSConnect.enabled() {
if err := config.AWSConnect.validate(); err != nil {
errorLog.Fatalln(err)
}
}
if len(config.DirectReadNs) > 0 {
if config.ElasticMaxSeconds < 5 {
warnLog.Println("Direct read performance degrades with small values for elasticsearch-max-seconds. Set to 5s or greater to remove this warning.")
}
if config.ElasticMaxDocs > 0 {
warnLog.Println("For performance reasons it is recommended to use elasticsearch-max-bytes instead of elasticsearch-max-docs since doc size may vary")
}
}
if config.StatsDuration != "" {
_, err := time.ParseDuration(config.StatsDuration)
if err != nil {
errorLog.Fatalf("Unable to parse stats duration: %s", err)
}
}
}
func (config *configOptions) setDefaults() *configOptions {
if !config.EnableOplog && len(config.ChangeStreamNs) == 0 {
config.ChangeStreamNs = []string{""}
}
if config.DisableChangeEvents {
config.ChangeStreamNs = []string{}
config.EnableOplog = false
}
if config.MongoURL == "" {
config.MongoURL = mongoURLDefault
}
if config.ClusterName != "" {
if config.Worker != "" {
config.ResumeName = fmt.Sprintf("%s:%s", config.ClusterName, config.Worker)
} else {
config.ResumeName = config.ClusterName
}
config.Resume = true
} else if config.Worker != "" {
config.ResumeName = config.Worker
} else if config.ResumeName == "" {
config.ResumeName = resumeNameDefault
}
if config.ElasticMaxConns == 0 {
config.ElasticMaxConns = elasticMaxConnsDefault
}
if config.ElasticClientTimeout == 0 {
config.ElasticClientTimeout = elasticClientTimeoutDefault
}
if config.MergePatchAttr == "" {
config.MergePatchAttr = "json-merge-patches"
}
if config.ElasticMaxSeconds == 0 {
if len(config.DirectReadNs) > 0 {
config.ElasticMaxSeconds = 5
} else {
config.ElasticMaxSeconds = 1
}
}
if config.ElasticMaxDocs == 0 {
config.ElasticMaxDocs = elasticMaxDocsDefault
}
if config.ElasticMaxBytes == 0 {
config.ElasticMaxBytes = elasticMaxBytesDefault
}
if config.ElasticHealth0 == 0 {
config.ElasticHealth0 = 15
}
if config.ElasticHealth1 == 0 {
config.ElasticHealth1 = 5
}
if config.HTTPServerAddr == "" {
config.HTTPServerAddr = ":8080"
}
if config.StatsIndexFormat == "" {
config.StatsIndexFormat = "monstache.stats.2006-01-02"
}
if config.TimeMachineIndexPrefix == "" {
config.TimeMachineIndexPrefix = "log"
}
if config.TimeMachineIndexSuffix == "" {
config.TimeMachineIndexSuffix = "2006-01-02"
}
if config.DeleteIndexPattern == "" {
config.DeleteIndexPattern = "*"
}
if config.FileDownloaders == 0 && config.IndexFiles {
config.FileDownloaders = fileDownloadersDefault
}
if config.RelateThreads == 0 {
config.RelateThreads = relateThreadsDefault
}
if config.RelateBuffer == 0 {
config.RelateBuffer = relateBufferDefault
}
if config.PostProcessors == 0 && processPlugin != nil {
config.PostProcessors = postProcessorsDefault
}
if config.OplogTsFieldName == "" {
config.OplogTsFieldName = "oplog_ts"
}
if config.OplogDateFieldName == "" {
config.OplogDateFieldName = "oplog_date"
}
if config.OplogDateFieldFormat == "" {
config.OplogDateFieldFormat = "2006/01/02 15:04:05"
}
if config.ConfigDatabaseName == "" {
config.ConfigDatabaseName = configDatabaseNameDefault
}
if config.ResumeFromTimestamp > 0 {
if config.ResumeFromTimestamp <= math.MaxInt32 {
config.ResumeFromTimestamp = config.ResumeFromTimestamp << 32
}
}
return config
}
func cleanMongoURL(URL string) string {
const (
scheme = "mongodb://"
schemeSrv = "mongodb+srv://"
)
url := URL
hasScheme := strings.HasPrefix(url, scheme)
hasSchemeSrv := strings.HasPrefix(url, schemeSrv)
url = strings.TrimPrefix(url, scheme)
url = strings.TrimPrefix(url, schemeSrv)
userEnd := strings.IndexAny(url, "@")
if userEnd != -1 {
url = redact + "@" + url[userEnd+1:]
}
if hasScheme {
url = scheme + url
} else if hasSchemeSrv {
url = schemeSrv + url
}
return url
}
func (config *configOptions) cancelConnection(mongoOk chan bool) {
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)
defer signal.Stop(sigs)
select {
case <-mongoOk:
return
case <-sigs:
os.Exit(exitStatus)
}
}
func (config *configOptions) dialMongo(URL string) (*mongo.Client, error) {
var clientOptions *options.ClientOptions
if config.mongoClientOptions == nil {
// use the initial URL to create most of the client options
// save the client options for potential use later with shards
rb := bson.NewRegistryBuilder()
rb.RegisterTypeMapEntry(bsontype.DateTime, reflect.TypeOf(time.Time{}))
reg := rb.Build()
clientOptions = options.Client()
clientOptions.ApplyURI(URL)
clientOptions.SetAppName("monstache")
clientOptions.SetRegistry(reg)
config.mongoClientOptions = clientOptions
} else {
// subsequent client connections will only be for adding shards
// for shards we only have the hostname and replica set
// apply the hostname to the previously saved client options
clientOptions = config.mongoClientOptions
clientOptions.ApplyURI(URL)
}
client, err := mongo.NewClient(clientOptions)
if err != nil {
return nil, err
}
mongoOk := make(chan bool)
go config.cancelConnection(mongoOk)
err = client.Connect(context.Background())
if err != nil {
return nil, err
}
err = client.Ping(context.Background(), nil)
if err != nil {
return nil, err
}
close(mongoOk)
return client, nil
}
func (config *configOptions) NewHTTPClient() (client *http.Client, err error) {
tlsConfig := &tls.Config{}
if config.ElasticPemFile != "" {
var ca []byte
certs := x509.NewCertPool()
if ca, err = ioutil.ReadFile(config.ElasticPemFile); err == nil {
if ok := certs.AppendCertsFromPEM(ca); !ok {
errorLog.Printf("No certs parsed successfully from %s", config.ElasticPemFile)
}
tlsConfig.RootCAs = certs
} else {
return client, err
}
}
if config.ElasticValidatePemFile == false {
// Turn off validation
tlsConfig.InsecureSkipVerify = true
}
transport := &http.Transport{
DisableCompression: !config.Gzip,
TLSHandshakeTimeout: time.Duration(30) * time.Second,
TLSClientConfig: tlsConfig,
}
client = &http.Client{
Timeout: time.Duration(config.ElasticClientTimeout) * time.Second,
Transport: transport,
}
if config.AWSConnect.enabled() {
client = aws.NewV4SigningClientWithHTTPClient(credentials.NewStaticCredentials(
config.AWSConnect.AccessKey,
config.AWSConnect.SecretKey,
"",
), config.AWSConnect.Region, client)
}
return client, err
}
func (ic *indexClient) doDrop(op *gtm.Op) (err error) {
if db, drop := op.IsDropDatabase(); drop {
if ic.config.DroppedDatabases {
if err = ic.deleteIndexes(db); err == nil {
if e := ic.dropDBMeta(db); e != nil {
errorLog.Printf("Unable to delete metadata for db: %s", e)
}
}
}
} else if col, drop := op.IsDropCollection(); drop {
if ic.config.DroppedCollections {
if err = ic.deleteIndex(op.GetDatabase() + "." + col); err == nil {
if e := ic.dropCollectionMeta(op.GetDatabase() + "." + col); e != nil {
errorLog.Printf("Unable to delete metadata for collection: %s", e)
}
}
}
}
return
}
func (ic *indexClient) hasFileContent(op *gtm.Op) (ingest bool) {
if !ic.config.IndexFiles {
return
}
return fileNamespaces[op.Namespace]
}
func addPatch(config *configOptions, client *elastic.Client, op *gtm.Op,
objectID string, indexType *indexMapping, meta *indexingMeta) (err error) {
var merges []interface{}
var toJSON []byte
if op.IsSourceDirect() {
return nil
}
if op.Timestamp.T == 0 {
return nil
}
if op.IsUpdate() {
ctx := context.Background()
service := client.Get()
service.Id(objectID)
service.Index(indexType.Index)
if meta.ID != "" {
service.Id(meta.ID)
}
if meta.Index != "" {
service.Index(meta.Index)
}
if meta.Routing != "" {
service.Routing(meta.Routing)
}
if meta.Parent != "" {
service.Parent(meta.Parent)
}
var resp *elastic.GetResult
if resp, err = service.Do(ctx); err == nil {
if resp.Found {
var src map[string]interface{}
if err = json.Unmarshal(resp.Source, &src); err == nil {
if val, ok := src[config.MergePatchAttr]; ok {
merges = val.([]interface{})
for _, m := range merges {
entry := m.(map[string]interface{})
entry["ts"] = int(entry["ts"].(float64))
entry["v"] = int(entry["v"].(float64))
}
}
delete(src, config.MergePatchAttr)
var fromJSON, mergeDoc []byte
if fromJSON, err = json.Marshal(src); err == nil {
if toJSON, err = json.Marshal(op.Data); err == nil {
if mergeDoc, err = jsonpatch.CreateMergePatch(fromJSON, toJSON); err == nil {
merge := make(map[string]interface{})
merge["ts"] = op.Timestamp.T
merge["p"] = string(mergeDoc)
merge["v"] = len(merges) + 1
merges = append(merges, merge)
op.Data[config.MergePatchAttr] = merges
}
}
}
}
} else {
err = errors.New("Last document revision not found")
}
}
} else {
if _, found := op.Data[config.MergePatchAttr]; !found {
if toJSON, err = json.Marshal(op.Data); err == nil {
merge := make(map[string]interface{})
merge["v"] = 1
merge["ts"] = op.Timestamp.T
merge["p"] = string(toJSON)
merges = append(merges, merge)
op.Data[config.MergePatchAttr] = merges
}
}
}
return
}
func (ic *indexClient) doIndexing(op *gtm.Op) (err error) {
meta := parseIndexMeta(op)
if meta.Skip {
return
}
ic.prepareDataForIndexing(op)
objectID, indexType := opIDToString(op), ic.mapIndex(op)
if ic.config.EnablePatches {
if patchNamespaces[op.Namespace] {
if e := addPatch(ic.config, ic.client, op, objectID, indexType, meta); e != nil {
errorLog.Printf("Unable to save json-patch info: %s", e)
}
}
}
ingestAttachment := false
if ic.hasFileContent(op) {
ingestAttachment = op.Data["file"] != nil
}
if ic.config.IndexAsUpdate && meta.Pipeline == "" && ingestAttachment == false {
req := elastic.NewBulkUpdateRequest()
req.UseEasyJSON(ic.config.EnableEasyJSON)
req.Id(objectID)
req.Index(indexType.Index)
req.Doc(op.Data)
req.DocAsUpsert(true)
if meta.ID != "" {
req.Id(meta.ID)
}
if meta.Index != "" {
req.Index(meta.Index)
}
if meta.Type != "" {
}
if meta.Routing != "" {
req.Routing(meta.Routing)
}
if meta.Parent != "" {
req.Parent(meta.Parent)
}
if meta.RetryOnConflict != 0 {
req.RetryOnConflict(meta.RetryOnConflict)
}
if _, err = req.Source(); err == nil {
ic.bulk.Add(req)
}
} else {
req := elastic.NewBulkIndexRequest()
req.UseEasyJSON(ic.config.EnableEasyJSON)
req.Id(objectID)
req.Index(indexType.Index)
req.Doc(op.Data)
if meta.ID != "" {
req.Id(meta.ID)
}
if meta.Index != "" {
req.Index(meta.Index)
}
if meta.Routing != "" {
req.Routing(meta.Routing)
}
if meta.Parent != "" {
req.Parent(meta.Parent)
}
if meta.Version != 0 {
req.Version(meta.Version)
}
if meta.VersionType != "" {
req.VersionType(meta.VersionType)
}
if meta.Pipeline != "" {
req.Pipeline(meta.Pipeline)
}
if meta.RetryOnConflict != 0 {
req.RetryOnConflict(meta.RetryOnConflict)
}
if ingestAttachment {
req.Pipeline("attachment")
}
if _, err = req.Source(); err == nil {
ic.bulk.Add(req)
}
}
if meta.shouldSave(ic.config) {
if e := ic.setIndexMeta(op.Namespace, objectID, meta); e != nil {
errorLog.Printf("Unable to save routing info: %s", e)
}
}
if tmNamespaces[op.Namespace] {
if op.IsSourceOplog() || ic.config.TimeMachineDirectReads {
t := time.Now().UTC()
tmIndex := func(idx string) string {
pre, suf := ic.config.TimeMachineIndexPrefix, ic.config.TimeMachineIndexSuffix
tmFormat := strings.Join([]string{pre, idx, suf}, ".")
return strings.ToLower(t.Format(tmFormat))
}
data := make(map[string]interface{})
for k, v := range op.Data {
data[k] = v
}
data["_source_id"] = objectID
if ic.config.IndexOplogTime == false {
secs := int64(op.Timestamp.T)
t := time.Unix(secs, 0).UTC()
data[ic.config.OplogTsFieldName] = op.Timestamp
data[ic.config.OplogDateFieldName] = t.Format(ic.config.OplogDateFieldFormat)
}
req := elastic.NewBulkIndexRequest()
req.UseEasyJSON(ic.config.EnableEasyJSON)
req.Index(tmIndex(indexType.Index))
req.Routing(objectID)
req.Doc(data)
if meta.Index != "" {
req.Index(tmIndex(meta.Index))
}
if meta.Pipeline != "" {
req.Pipeline(meta.Pipeline)
}
if ingestAttachment {
req.Pipeline("attachment")
}
if _, err = req.Source(); err == nil {
ic.bulk.Add(req)
}
}
}
return
}
func (ic *indexClient) doIndex(op *gtm.Op) (err error) {
if err = mapData(ic.mongo, ic.config, op); err == nil {
if op.Data != nil {
err = ic.doIndexing(op)
} else if op.IsUpdate() {
ic.doDelete(op)
}
}
return
}
func (ic *indexClient) runProcessor(op *gtm.Op) (err error) {
input := &monstachemap.ProcessPluginInput{
ElasticClient: ic.client,
ElasticBulkProcessor: ic.bulk,
Timestamp: op.Timestamp,
}
input.Document = op.Data
if op.IsDelete() {
input.Document = map[string]interface{}{
"_id": op.Id,
}
}
input.Namespace = op.Namespace
input.Database = op.GetDatabase()
input.Collection = op.GetCollection()
input.Operation = op.Operation
input.MongoClient = ic.mongo
input.UpdateDescription = op.UpdateDescription
err = processPlugin(input)
return
}
func (ic *indexClient) routeProcess(op *gtm.Op) (err error) {
rop := >m.Op{
Id: op.Id,
Operation: op.Operation,
Namespace: op.Namespace,
Source: op.Source,
Timestamp: op.Timestamp,
UpdateDescription: op.UpdateDescription,
}
if op.Data != nil {
var data []byte
data, err = bson.Marshal(op.Data)
if err == nil {
var m map[string]interface{}
err = bson.Unmarshal(data, &m)
if err == nil {
rop.Data = m
}
}
}
ic.processC <- rop
return
}
func (ic *indexClient) routeDrop(op *gtm.Op) (err error) {
ic.bulk.Flush()
err = ic.doDrop(op)
return
}
func (ic *indexClient) routeDeleteRelate(op *gtm.Op) (err error) {
if rs := relates[op.Namespace]; len(rs) != 0 {
var delData map[string]interface{}
useFind := false
for _, r := range rs {
if r.SrcField != "_id" {
useFind = true
break
}
}
if useFind {
delData = findDeletedSrcDoc(ic.config, ic.client, op)
} else {
delData = map[string]interface{}{
"_id": op.Id,
}
}
if delData != nil {
rop := >m.Op{
Id: op.Id,
Operation: op.Operation,
Namespace: op.Namespace,
Source: op.Source,
Timestamp: op.Timestamp,
Data: delData,
}
select {
case ic.relateC <- rop:
default:
errorLog.Printf(relateQueueOverloadMsg, rop.Namespace, rop.Id)
}
}
}
return
}
func (ic *indexClient) routeDelete(op *gtm.Op) (err error) {
if len(ic.config.Relate) > 0 {
err = ic.routeDeleteRelate(op)
}
ic.doDelete(op)
return
}
func (ic *indexClient) routeDataRelate(op *gtm.Op) (skip bool, err error) {
rs := relates[op.Namespace]
if len(rs) == 0 {
return
}
skip = true
for _, r := range rs {
if r.KeepSrc {
skip = false
break
}
}
if skip {
select {
case ic.relateC <- op:
default:
errorLog.Printf(relateQueueOverloadMsg, op.Namespace, op.Id)
}
} else {
rop := >m.Op{
Id: op.Id,
Operation: op.Operation,
Namespace: op.Namespace,
Source: op.Source,
Timestamp: op.Timestamp,
UpdateDescription: op.UpdateDescription,
}
var data []byte
data, err = bson.Marshal(op.Data)
if err == nil {
var m map[string]interface{}
err = bson.Unmarshal(data, &m)
if err == nil {
rop.Data = m
}
}
select {
case ic.relateC <- rop:
default:
errorLog.Printf(relateQueueOverloadMsg, rop.Namespace, rop.Id)
}
}
return
}
func (ic *indexClient) routeData(op *gtm.Op) (err error) {
skip := false
if op.IsSourceOplog() && len(ic.config.Relate) > 0 {
skip, err = ic.routeDataRelate(op)
}
if !skip {
if ic.hasFileContent(op) {
ic.fileC <- op
} else {
ic.indexC <- op
}
}
return
}
func (ic *indexClient) routeOp(op *gtm.Op) (err error) {
if processPlugin != nil {
err = ic.routeProcess(op)
}
if op.IsDrop() {
err = ic.routeDrop(op)
} else if op.IsDelete() {
err = ic.routeDelete(op)
} else if op.Data != nil {
err = ic.routeData(op)
}
return
}
func (ic *indexClient) processErr(err error) {
config := ic.config
mux.Lock()
defer mux.Unlock()
exitStatus = 1
errorLog.Println(err)
if config.FailFast {
os.Exit(exitStatus)
}
}
func (ic *indexClient) doIndexStats() (err error) {
var hostname string
doc := make(map[string]interface{})
t := time.Now().UTC()
doc["Timestamp"] = t.Format("2006-01-02T15:04:05")
hostname, err = os.Hostname()
if err == nil {
doc["Host"] = hostname
}
doc["Pid"] = os.Getpid()
doc["Stats"] = ic.bulk.Stats()
index := strings.ToLower(t.Format(ic.config.StatsIndexFormat))
req := elastic.NewBulkIndexRequest().Index(index)
req.UseEasyJSON(ic.config.EnableEasyJSON)
req.Doc(doc)
ic.bulkStats.Add(req)
return
}
func (ic *indexClient) dropDBMeta(db string) (err error) {
if ic.config.DeleteStrategy == statefulDeleteStrategy {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("meta")
q := bson.M{"db": db}
_, err = col.DeleteMany(context.Background(), q)
}
return
}
func (ic *indexClient) dropCollectionMeta(namespace string) (err error) {
if ic.config.DeleteStrategy == statefulDeleteStrategy {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("meta")
q := bson.M{"namespace": namespace}
_, err = col.DeleteMany(context.Background(), q)
}
return
}
func (meta *indexingMeta) load(metaAttrs map[string]interface{}) {
var v interface{}
var ok bool
var s string
if _, ok = metaAttrs["skip"]; ok {
meta.Skip = true
}
if v, ok = metaAttrs["routing"]; ok {
meta.Routing = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["index"]; ok {
meta.Index = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["id"]; ok {
op := >m.Op{
Id: v,
}
meta.ID = opIDToString(op)
}
if v, ok = metaAttrs["type"]; ok {
meta.Type = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["parent"]; ok {
meta.Parent = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["version"]; ok {
s = fmt.Sprintf("%v", v)
if version, err := strconv.ParseInt(s, 10, 64); err == nil {
meta.Version = version
}
}
if v, ok = metaAttrs["versionType"]; ok {
meta.VersionType = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["pipeline"]; ok {
meta.Pipeline = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["retryOnConflict"]; ok {
s = fmt.Sprintf("%v", v)
if roc, err := strconv.Atoi(s); err == nil {
meta.RetryOnConflict = roc
}
}
}
func (meta *indexingMeta) shouldSave(config *configOptions) bool {
if config.DeleteStrategy == statefulDeleteStrategy {
return (meta.Routing != "" ||
meta.Index != "" ||
meta.Type != "" ||
meta.Parent != "" ||
meta.Pipeline != "")
}
return false
}
func (ic *indexClient) setIndexMeta(namespace, id string, meta *indexingMeta) error {
config := ic.config
col := ic.mongo.Database(config.ConfigDatabaseName).Collection("meta")
metaID := fmt.Sprintf("%s.%s", namespace, id)
doc := map[string]interface{}{
"id": meta.ID,
"routing": meta.Routing,
"index": meta.Index,
"type": meta.Type,
"parent": meta.Parent,
"pipeline": meta.Pipeline,
"db": strings.SplitN(namespace, ".", 2)[0],
"namespace": namespace,
}
opts := options.Update()
opts.SetUpsert(true)
_, err := col.UpdateOne(context.Background(), bson.M{
"_id": metaID,
}, bson.M{
"$set": doc,
}, opts)
return err
}
func (ic *indexClient) getIndexMeta(namespace, id string) (meta *indexingMeta) {
meta = &indexingMeta{}
config := ic.config
col := ic.mongo.Database(config.ConfigDatabaseName).Collection("meta")
metaID := fmt.Sprintf("%s.%s", namespace, id)
result := col.FindOne(context.Background(), bson.M{
"_id": metaID,
})
if err := result.Err(); err == nil {
doc := make(map[string]interface{})
if err = result.Decode(&doc); err == nil {
if doc["id"] != nil {
meta.ID = doc["id"].(string)
}
if doc["routing"] != nil {
meta.Routing = doc["routing"].(string)
}
if doc["index"] != nil {
meta.Index = strings.ToLower(doc["index"].(string))
}
if doc["type"] != nil {
meta.Type = doc["type"].(string)
}
if doc["parent"] != nil {
meta.Parent = doc["parent"].(string)
}
if doc["pipeline"] != nil {
meta.Pipeline = doc["pipeline"].(string)
}
col.DeleteOne(context.Background(), bson.M{"_id": metaID})
}
}
return
}
func loadBuiltinFunctions(client *mongo.Client, config *configOptions) {
for ns, env := range mapEnvs {
var fa *findConf
fa = &findConf{
client: client,
name: "findId",
vm: env.VM,
ns: ns,
byId: true,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
errorLog.Fatalln(err)
}
fa = &findConf{
client: client,
name: "findOne",
vm: env.VM,
ns: ns,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
errorLog.Fatalln(err)
}
fa = &findConf{
client: client,
name: "find",
vm: env.VM,
ns: ns,
multi: true,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
errorLog.Fatalln(err)
}
fa = &findConf{
client: client,
name: "pipe",
vm: env.VM,
ns: ns,
multi: true,
pipe: true,
pipeAllowDisk: config.PipeAllowDisk,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
errorLog.Fatalln(err)
}
}
}
func (fc *findCall) setDatabase(topts map[string]interface{}) (err error) {
if ov, ok := topts["database"]; ok {
if ovs, ok := ov.(string); ok {
fc.db = ovs
} else {
err = errors.New("Invalid database option value")
}
}
return
}
func (fc *findCall) setCollection(topts map[string]interface{}) (err error) {
if ov, ok := topts["collection"]; ok {
if ovs, ok := ov.(string); ok {
fc.col = ovs
} else {
err = errors.New("Invalid collection option value")
}
}
return
}
func (fc *findCall) setSelect(topts map[string]interface{}) (err error) {
if ov, ok := topts["select"]; ok {
if ovsel, ok := ov.(map[string]interface{}); ok {
for k, v := range ovsel {
if vi, ok := v.(int64); ok {
fc.sel[k] = int(vi)
}
}
} else {
err = errors.New("Invalid select option value")
}
}
return
}
func (fc *findCall) setSort(topts map[string]interface{}) (err error) {
if ov, ok := topts["sort"]; ok {
if ovsort, ok := ov.(map[string]interface{}); ok {
for k, v := range ovsort {
if vi, ok := v.(int64); ok {
fc.sort[k] = int(vi)
}
}
} else {
err = errors.New("Invalid sort option value")
}
}
return
}
func (fc *findCall) setLimit(topts map[string]interface{}) (err error) {
if ov, ok := topts["limit"]; ok {
if ovl, ok := ov.(int64); ok {
fc.limit = int(ovl)
} else {
err = errors.New("Invalid limit option value")
}
}
return
}
func (fc *findCall) setQuery(v otto.Value) (err error) {
var q interface{}
if q, err = v.Export(); err == nil {
fc.query = fc.restoreIds(deepExportValue(q))
}
return
}
func (fc *findCall) setOptions(v otto.Value) (err error) {
var opts interface{}
if opts, err = v.Export(); err == nil {
switch topts := opts.(type) {
case map[string]interface{}:
if err = fc.setDatabase(topts); err != nil {
return
}
if err = fc.setCollection(topts); err != nil {
return
}
if err = fc.setSelect(topts); err != nil {
return
}
if fc.isMulti() {
if err = fc.setSort(topts); err != nil {
return
}
if err = fc.setLimit(topts); err != nil {
return
}
}
default:
err = errors.New("Invalid options argument")
return
}
} else {
err = errors.New("Invalid options argument")
}
return
}
func (fc *findCall) setDefaults() {
if fc.config.ns != "" {
ns := strings.SplitN(fc.config.ns, ".", 2)
fc.db = ns[0]
fc.col = ns[1]
}
}
func (fc *findCall) getCollection() *mongo.Collection {
return fc.client.Database(fc.db).Collection(fc.col)
}
func (fc *findCall) getVM() *otto.Otto {
return fc.config.vm
}
func (fc *findCall) getFunctionName() string {
return fc.config.name
}
func (fc *findCall) isMulti() bool {
return fc.config.multi
}
func (fc *findCall) isPipe() bool {
return fc.config.pipe
}
func (fc *findCall) pipeAllowDisk() bool {
return fc.config.pipeAllowDisk
}
func (fc *findCall) logError(err error) {
errorLog.Printf("Error in function %s: %s\n", fc.getFunctionName(), err)
}
func (fc *findCall) restoreIds(v interface{}) (r interface{}) {
switch vt := v.(type) {
case string:
if oi, err := primitive.ObjectIDFromHex(vt); err == nil {
r = oi
} else {
r = v
}
case []map[string]interface{}:
var avs []interface{}
for _, av := range vt {
mvs := make(map[string]interface{})
for k, v := range av {
mvs[k] = fc.restoreIds(v)
}
avs = append(avs, mvs)
}
r = avs
case []interface{}:
var avs []interface{}
for _, av := range vt {
avs = append(avs, fc.restoreIds(av))
}
r = avs
case map[string]interface{}:
mvs := make(map[string]interface{})
for k, v := range vt {
mvs[k] = fc.restoreIds(v)
}
r = mvs
default:
r = v
}
return
}
func (fc *findCall) execute() (r otto.Value, err error) {
var cursor *mongo.Cursor
col := fc.getCollection()
query := fc.query
if fc.isMulti() {
if fc.isPipe() {
ao := options.Aggregate()
ao.SetAllowDiskUse(fc.pipeAllowDisk())
cursor, err = col.Aggregate(context.Background(), query, ao)
if err != nil {
return
}
} else {
fo := options.Find()
if fc.limit > 0 {
fo.SetLimit(int64(fc.limit))
}
if len(fc.sort) > 0 {
fo.SetSort(fc.sort)
}
if len(fc.sel) > 0 {
fo.SetProjection(fc.sel)
}
cursor, err = col.Find(context.Background(), query, fo)
if err != nil {
return
}
}
var rdocs []map[string]interface{}
for cursor.Next(context.Background()) {
doc := make(map[string]interface{})
if err = cursor.Decode(&doc); err != nil {
return
}
rdocs = append(rdocs, convertMapJavascript(doc))
}
r, err = fc.getVM().ToValue(rdocs)
} else {
fo := options.FindOne()
if fc.config.byId {
query = bson.M{"_id": query}
}
if len(fc.sel) > 0 {
fo.SetProjection(fc.sel)
}
result := col.FindOne(context.Background(), query, fo)
if err = result.Err(); err == nil {
doc := make(map[string]interface{})
if err = result.Decode(&doc); err == nil {
rdoc := convertMapJavascript(doc)
r, err = fc.getVM().ToValue(rdoc)
}
}
}
return
}
func makeFind(fa *findConf) func(otto.FunctionCall) otto.Value {
return func(call otto.FunctionCall) (r otto.Value) {
var err error
fc := &findCall{
config: fa,
client: fa.client,
sort: make(map[string]int),
sel: make(map[string]int),
}
fc.setDefaults()
args := call.ArgumentList
argLen := len(args)
r = otto.NullValue()
if argLen >= 1 {
if argLen >= 2 {
if err = fc.setOptions(call.Argument(1)); err != nil {
fc.logError(err)
return
}
}
if fc.db == "" || fc.col == "" {
fc.logError(errors.New("Find call must specify db and collection"))
return
}
if err = fc.setQuery(call.Argument(0)); err == nil {
var result otto.Value
if result, err = fc.execute(); err == nil {
r = result
} else {
fc.logError(err)
}
} else {
fc.logError(err)
}
} else {
fc.logError(errors.New("At least one argument is required"))
}
return
}
}
func findDeletedSrcDoc(config *configOptions, client *elastic.Client, op *gtm.Op) map[string]interface{} {
objectID := opIDToString(op)
termQuery := elastic.NewTermQuery("_id", objectID)
search := client.Search()
search.Size(1)
search.Index(config.DeleteIndexPattern)
search.Query(termQuery)
searchResult, err := search.Do(context.Background())
if err != nil {
errorLog.Printf("Unable to find deleted document %s: %s", objectID, err)
return nil
}
if searchResult.Hits == nil {
errorLog.Printf("Unable to find deleted document %s", objectID)
return nil
}
if searchResult.TotalHits() == 0 {
errorLog.Printf("Found no hits for deleted document %s", objectID)
return nil
}
if searchResult.TotalHits() > 1 {
errorLog.Printf("Found multiple hits for deleted document %s", objectID)
return nil
}
hit := searchResult.Hits.Hits[0]
if hit.Source == nil {
errorLog.Printf("Source unavailable for deleted document %s", objectID)
return nil
}
var src map[string]interface{}
if err = json.Unmarshal(hit.Source, &src); err == nil {
src["_id"] = op.Id
return src
}
errorLog.Printf("Unable to unmarshal deleted document %s: %s", objectID, err)
return nil
}
func tsVersion(ts primitive.Timestamp) int64 {
t, i := int64(ts.T), int64(ts.I)
version := (t << 32) | i
return version
}
func (ic *indexClient) doDelete(op *gtm.Op) {
req := elastic.NewBulkDeleteRequest()
req.UseEasyJSON(ic.config.EnableEasyJSON)
if ic.config.DeleteStrategy == ignoreDeleteStrategy {
return
}
objectID, indexType, meta := opIDToString(op), ic.mapIndex(op), &indexingMeta{}
req.Id(objectID)
if ic.config.IndexAsUpdate == false {
req.Version(tsVersion(op.Timestamp))
req.VersionType("external")
}
if ic.config.DeleteStrategy == statefulDeleteStrategy {
if routingNamespaces[""] || routingNamespaces[op.Namespace] {
meta = ic.getIndexMeta(op.Namespace, objectID)
}
req.Index(indexType.Index)
if meta.Index != "" {
req.Index(meta.Index)
}
if meta.Routing != "" {
req.Routing(meta.Routing)
}
if meta.Parent != "" {
req.Parent(meta.Parent)
}
} else if ic.config.DeleteStrategy == statelessDeleteStrategy {
if routingNamespaces[""] || routingNamespaces[op.Namespace] {
termQuery := elastic.NewTermQuery("_id", objectID)
search := ic.client.Search()
search.FetchSource(false)
search.Size(1)
search.Index(ic.config.DeleteIndexPattern)
search.Query(termQuery)
searchResult, err := search.Do(context.Background())
if err != nil {
errorLog.Printf("Unable to delete document %s: %s",
objectID, err)
return
}
if searchResult.Hits != nil && searchResult.TotalHits() == 1 {
hit := searchResult.Hits.Hits[0]
req.Index(hit.Index)
req.Type(hit.Type)
if hit.Routing != "" {
req.Routing(hit.Routing)
}
if hit.Parent != "" {
req.Parent(hit.Parent)
}
} else {
errorLog.Printf("Failed to find unique document %s for deletion using index pattern %s",
objectID, ic.config.DeleteIndexPattern)
return
}
} else {
req.Index(indexType.Index)
}
} else {
return
}
ic.bulk.Add(req)
return
}
func gtmDefaultSettings() gtmSettings {
return gtmSettings{
ChannelSize: gtmChannelSizeDefault,
BufferSize: 32,
BufferDuration: "75ms",
}
}
func (ic *indexClient) notifySdFailed(err error) {
if err != nil {
errorLog.Printf("Systemd notification failed: %s", err)
} else {
if ic.config.Verbose {
warnLog.Println("Systemd notification not supported (i.e. NOTIFY_SOCKET is unset)")
}
}
}
func (ic *indexClient) watchdogSdFailed(err error) {
if err != nil {
errorLog.Printf("Error determining systemd WATCHDOG interval: %s", err)
} else {
if ic.config.Verbose {
warnLog.Println("Systemd WATCHDOG not enabled")
}
}
}
func (ctx *httpServerCtx) serveHttp() {
s := ctx.httpServer
if ctx.config.Verbose {
infoLog.Printf("Starting http server at %s", s.Addr)
}
ctx.started = time.Now()
err := s.ListenAndServe()
if !ctx.shutdown {
errorLog.Fatalf("Unable to serve http at address %s: %s", s.Addr, err)
}
}
func (ctx *httpServerCtx) buildServer() {
mux := http.NewServeMux()
mux.HandleFunc("/started", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
data := (time.Now().Sub(ctx.started)).String()
w.Write([]byte(data))
})
mux.HandleFunc("/healthz", func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(200)
w.Write([]byte("ok"))
})
if ctx.config.Stats {
mux.HandleFunc("/stats", func(w http.ResponseWriter, req *http.Request) {
stats, err := json.MarshalIndent(ctx.bulk.Stats(), "", " ")
if err == nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
w.Write(stats)
} else {
w.WriteHeader(500)
fmt.Fprintf(w, "Unable to print statistics: %s", err)
}
})
}
if ctx.config.Pprof {
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
}
s := &http.Server{
Addr: ctx.config.HTTPServerAddr,
Handler: mux,
ErrorLog: errorLog,
}
ctx.httpServer = s
}
func (ic *indexClient) startNotify() {
go ic.notifySd()
}
func (ic *indexClient) notifySd() {
var interval time.Duration
config := ic.config
if config.Verbose {
infoLog.Println("Sending systemd READY=1")
}
sent, err := daemon.SdNotify(false, "READY=1")
if sent {
if config.Verbose {
infoLog.Println("READY=1 successfully sent to systemd")
}
} else {
ic.notifySdFailed(err)
return
}
interval, err = daemon.SdWatchdogEnabled(false)
if err != nil || interval == 0 {
ic.watchdogSdFailed(err)
return
}
for {
if config.Verbose {
infoLog.Println("Sending systemd WATCHDOG=1")
}
sent, err = daemon.SdNotify(false, "WATCHDOG=1")
if sent {
if config.Verbose {
infoLog.Println("WATCHDOG=1 successfully sent to systemd")
}
} else {
ic.notifySdFailed(err)
return
}
time.Sleep(interval / 2)
}
}
func (config *configOptions) makeShardInsertHandler() gtm.ShardInsertHandler {
return func(shardInfo *gtm.ShardInfo) (*mongo.Client, error) {
shardURL := shardInfo.GetURL()
infoLog.Printf("Adding shard found at %s\n", cleanMongoURL(shardURL))
return config.dialMongo(shardURL)
}
}
func buildPipe(config *configOptions) func(string, bool) ([]interface{}, error) {
if pipePlugin != nil {
return pipePlugin
} else if len(pipeEnvs) > 0 {
return func(ns string, changeEvent bool) ([]interface{}, error) {
mux.Lock()
defer mux.Unlock()
nss := []string{"", ns}
for _, ns := range nss {
if env := pipeEnvs[ns]; env != nil {
env.lock.Lock()
defer env.lock.Unlock()
val, err := env.VM.Call("module.exports", ns, ns, changeEvent)
if err != nil {
return nil, err
}
if strings.ToLower(val.Class()) == "array" {
data, err := val.Export()
if err != nil {
return nil, err
} else if data == val {
return nil, errors.New("Exported pipeline function must return an array")
} else {
switch data.(type) {
case []map[string]interface{}:
ds := data.([]map[string]interface{})
var is []interface{} = make([]interface{}, len(ds))
for i, d := range ds {
is[i] = deepExportValue(d)
}
return is, nil
case []interface{}:
ds := data.([]interface{})
if len(ds) > 0 {
errorLog.Fatalln("Pipeline function must return an array of objects")
}
return nil, nil
default:
errorLog.Fatalln("Pipeline function must return an array of objects")
}
}
} else {
return nil, errors.New("Exported pipeline function must return an array")
}
}
}
return nil, nil
}
}
return nil
}
func (ic *indexClient) sigListen() {
go func() {
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)
<-sigs
ic.shutdown(10)
}()
}
func (ic *indexClient) startHttpServer() {
config := ic.config
if config.EnableHTTPServer {
ic.hsc = &httpServerCtx{
bulk: ic.bulk,
config: ic.config,
}
ic.hsc.buildServer()
go ic.hsc.serveHttp()
}
}
func (ic *indexClient) setupFileIndexing() {
config := ic.config
if config.IndexFiles {
if len(config.FileNamespaces) == 0 {
errorLog.Fatalln("File indexing is ON but no file namespaces are configured")
}
if err := ic.ensureFileMapping(); err != nil {
errorLog.Fatalf("Unable to setup file indexing: %s", err)
}
}
}
func (ic *indexClient) setupBulk() {
config := ic.config
bulk, err := config.newBulkProcessor(ic.client)
if err != nil {
errorLog.Fatalf("Unable to start bulk processor: %s", err)
}
var bulkStats *elastic.BulkProcessor
if config.IndexStats {
bulkStats, err = config.newStatsBulkProcessor(ic.client)
if err != nil {
errorLog.Fatalf("Unable to start stats bulk processor: %s", err)
}
}
ic.bulk = bulk
ic.bulkStats = bulkStats
}
func (ic *indexClient) run() {
ic.startNotify()
ic.setupFileIndexing()
ic.setupBulk()
ic.startHttpServer()
ic.sigListen()
ic.startCluster()
ic.startRelate()
ic.startIndex()
ic.startDownload()
ic.startPostProcess()
ic.startReadWait()
ic.clusterWait()
ic.startListen()
ic.eventLoop()
}
func (ic *indexClient) startDownload() {
for i := 0; i < ic.config.FileDownloaders; i++ {
ic.fileWg.Add(1)
go func() {
defer ic.fileWg.Done()
for op := range ic.fileC {
if err := ic.addFileContent(op); err != nil {
ic.processErr(err)
}
ic.indexC <- op
}
}()
}
}
func (ic *indexClient) startPostProcess() {
for i := 0; i < ic.config.PostProcessors; i++ {
ic.processWg.Add(1)
go func() {
defer ic.processWg.Done()
for op := range ic.processC {
if err := ic.runProcessor(op); err != nil {
ic.processErr(err)
}
}
}()
}
}
func (ic *indexClient) startReadWait() {
if len(ic.config.DirectReadNs) > 0 {
go func() {
ic.gtmCtx.DirectReadWg.Wait()
infoLog.Println("Direct reads completed")
if ic.config.Resume {
ic.saveTimestampFromReplStatus()
}
if ic.config.ExitAfterDirectReads {
infoLog.Println("Stopping all workers")
ic.gtmCtx.Stop()
<-ic.opsConsumed
close(ic.relateC)
ic.relateWg.Wait()
close(ic.fileC)
ic.fileWg.Wait()
close(ic.indexC)
ic.indexWg.Wait()
close(ic.processC)
ic.processWg.Wait()
ic.doneC <- 30
}
}()
}
}
func (ic *indexClient) dialShards() []*mongo.Client {
var mongos []*mongo.Client
// get the list of shard servers
shardInfos := gtm.GetShards(ic.mongoConfig)
if len(shardInfos) == 0 {
errorLog.Fatalln("Shards enabled but none found in config.shards collection")
}
// add each shard server to the sync list
for _, shardInfo := range shardInfos {
shardURL := shardInfo.GetURL()
infoLog.Printf("Adding shard found at %s\n", cleanMongoURL(shardURL))
shard, err := ic.config.dialMongo(shardURL)
if err != nil {
errorLog.Fatalf("Unable to connect to mongodb shard using URL %s: %s", cleanMongoURL(shardURL), err)
}
mongos = append(mongos, shard)
}
return mongos
}
func (ic *indexClient) buildTimestampGen() gtm.TimestampGenerator {
var after gtm.TimestampGenerator
config := ic.config
if config.Replay {
after = func(client *mongo.Client, options *gtm.Options) (primitive.Timestamp, error) {
return primitive.Timestamp{}, nil
}
} else if config.ResumeFromTimestamp != 0 {
after = func(client *mongo.Client, options *gtm.Options) (primitive.Timestamp, error) {
return primitive.Timestamp{
T: uint32(config.ResumeFromTimestamp >> 32),
I: uint32(config.ResumeFromTimestamp),
}, nil
}
} else if config.Resume {
after = func(client *mongo.Client, options *gtm.Options) (primitive.Timestamp, error) {
var ts primitive.Timestamp
var err error
col := client.Database(config.ConfigDatabaseName).Collection("monstache")
result := col.FindOne(context.Background(), bson.M{
"_id": config.ResumeName,
})
if err = result.Err(); err == nil {
doc := make(map[string]interface{})
if err = result.Decode(&doc); err == nil {
if doc["ts"] != nil {
ts = doc["ts"].(primitive.Timestamp)
ts.I += 1
}
}
}
if ts.T == 0 {
ts, _ = gtm.LastOpTimestamp(client, options)
}
infoLog.Printf("Resuming from timestamp %+v", ts)
return ts, nil
}
}
return after
}
func (ic *indexClient) buildConnections() []*mongo.Client {
var mongos []*mongo.Client
var err error
config := ic.config
if config.readShards() {
// if we have a config server URL then we are running in a sharded cluster
ic.mongoConfig, err = config.dialMongo(config.MongoConfigURL)
if err != nil {
errorLog.Fatalf("Unable to connect to mongodb config server using URL %s: %s",
cleanMongoURL(config.MongoConfigURL), err)
}
mongos = ic.dialShards()
} else {
mongos = append(mongos, ic.mongo)
}
return mongos
}
func (ic *indexClient) buildFilterChain() []gtm.OpFilter {
config := ic.config
filterChain := []gtm.OpFilter{notMonstache(config), notSystem, notChunks}
if config.readShards() {
filterChain = append(filterChain, notConfig)
}
if config.NsRegex != "" {
filterChain = append(filterChain, filterWithRegex(config.NsRegex))
}
if config.NsDropRegex != "" {
filterChain = append(filterChain, filterDropWithRegex(config.NsDropRegex))
}
if config.NsExcludeRegex != "" {
filterChain = append(filterChain, filterInverseWithRegex(config.NsExcludeRegex))
}
if config.NsDropExcludeRegex != "" {
filterChain = append(filterChain, filterDropInverseWithRegex(config.NsDropExcludeRegex))
}
return filterChain
}
func (ic *indexClient) buildFilterArray() []gtm.OpFilter {
config := ic.config
filterArray := []gtm.OpFilter{}
var pluginFilter gtm.OpFilter
if config.Worker != "" {
workerFilter, err := consistent.ConsistentHashFilter(config.Worker, config.Workers)
if err != nil {
errorLog.Fatalln(err)
}
filterArray = append(filterArray, workerFilter)
} else if config.Workers != nil {
errorLog.Fatalln("Workers configured but this worker is undefined. worker must be set to one of the workers.")
}
if filterPlugin != nil {
pluginFilter = filterWithPlugin()
filterArray = append(filterArray, pluginFilter)
} else if len(filterEnvs) > 0 {
pluginFilter = filterWithScript()
filterArray = append(filterArray, pluginFilter)
}
if pluginFilter != nil {
ic.filter = pluginFilter
}
return filterArray
}
func (ic *indexClient) buildGtmOptions() *gtm.Options {
var nsFilter, filter, directReadFilter gtm.OpFilter
config := ic.config
filterChain := ic.buildFilterChain()
filterArray := ic.buildFilterArray()
nsFilter = gtm.ChainOpFilters(filterChain...)
filter = gtm.ChainOpFilters(filterArray...)
directReadFilter = gtm.ChainOpFilters(filterArray...)
gtmBufferDuration, err := time.ParseDuration(config.GtmSettings.BufferDuration)
if err != nil {
errorLog.Fatalf("Unable to parse gtm buffer duration %s: %s",
config.GtmSettings.BufferDuration, err)
}
after := ic.buildTimestampGen()
gtmOpts := >m.Options{
After: after,
Filter: filter,
NamespaceFilter: nsFilter,
OpLogDisabled: config.EnableOplog == false,
OpLogDatabaseName: config.MongoOpLogDatabaseName,
OpLogCollectionName: config.MongoOpLogCollectionName,
ChannelSize: config.GtmSettings.ChannelSize,
Ordering: gtm.AnyOrder,
WorkerCount: 10,
BufferDuration: gtmBufferDuration,
BufferSize: config.GtmSettings.BufferSize,
DirectReadNs: config.DirectReadNs,
DirectReadSplitMax: int32(config.DirectReadSplitMax),
DirectReadConcur: config.DirectReadConcur,
DirectReadFilter: directReadFilter,
Log: infoLog,
Pipe: buildPipe(config),
ChangeStreamNs: config.ChangeStreamNs,
}
return gtmOpts
}
func (ic *indexClient) startListen() {
config := ic.config
gtmOpts := ic.buildGtmOptions()
ic.gtmCtx = gtm.StartMulti(ic.buildConnections(), gtmOpts)
if config.readShards() && !config.DisableChangeEvents {
ic.gtmCtx.AddShardListener(ic.mongoConfig, gtmOpts, config.makeShardInsertHandler())
}
}
func (ic *indexClient) clusterWait() {
if ic.config.ClusterName != "" {
if ic.enabled {
infoLog.Printf("Starting work for cluster %s", ic.config.ClusterName)
} else {
heartBeat := time.NewTicker(10 * time.Second)
defer heartBeat.Stop()
infoLog.Printf("Pausing work for cluster %s", ic.config.ClusterName)
ic.bulk.Stop()
for range heartBeat.C {
var err error
ic.enabled, err = ic.enableProcess()
if err != nil {
errorLog.Printf("Error attempting to become active cluster process: %s", err)
continue
}
if ic.enabled {
infoLog.Printf("Resuming work for cluster %s", ic.config.ClusterName)
ic.bulk.Start(context.Background())
break
}
}
}
}
}
func (ic *indexClient) nextTimestamp() {
if ic.lastTs.T > ic.lastTsSaved.T ||
(ic.lastTs.T == ic.lastTsSaved.T && ic.lastTs.I > ic.lastTsSaved.I) {
ic.bulk.Flush()
if err := ic.saveTimestamp(); err == nil {
ic.lastTsSaved = ic.lastTs
} else {
ic.processErr(err)
}
}
}
func (ic *indexClient) nextStats() {
if ic.config.IndexStats {
if err := ic.doIndexStats(); err != nil {
errorLog.Printf("Error indexing statistics: %s", err)
}
} else {
stats, err := json.Marshal(ic.bulk.Stats())
if err != nil {
errorLog.Printf("Unable to log statistics: %s", err)
} else {
statsLog.Println(string(stats))
}
}
}
func (ic *indexClient) nextHeartbeat() {
var err error
if ic.enabled {
ic.enabled, err = ic.ensureEnabled()
if err != nil {
ic.processErr(err)
}
if !ic.enabled {
infoLog.Printf("Pausing work for cluster %s", ic.config.ClusterName)
ic.gtmCtx.Pause()
ic.bulk.Stop()
heartBeat := time.NewTicker(10 * time.Second)
defer heartBeat.Stop()
for range heartBeat.C {
ic.enabled, err = ic.enableProcess()
if ic.enabled {
infoLog.Printf("Resuming work for cluster %s", ic.config.ClusterName)
ic.bulk.Start(context.Background())
ic.resumeWork()
break
}
}
}
} else {
ic.enabled, err = ic.enableProcess()
if ic.enabled {
infoLog.Printf("Resuming work for cluster %s", ic.config.ClusterName)
ic.bulk.Start(context.Background())
ic.resumeWork()
}
}
if err != nil {
ic.processErr(err)
}
}
func (ic *indexClient) eventLoop() {
var err error
var allOpsVisited bool
timestampTicker := time.NewTicker(10 * time.Second)
if ic.config.Resume == false {
timestampTicker.Stop()
}
heartBeat := time.NewTicker(10 * time.Second)
if ic.config.ClusterName == "" {
heartBeat.Stop()
}
statsTimeout := time.Duration(30) * time.Second
if ic.config.StatsDuration != "" {
statsTimeout, _ = time.ParseDuration(ic.config.StatsDuration)
}
printStats := time.NewTicker(statsTimeout)
if ic.config.Stats == false {
printStats.Stop()
}
infoLog.Println("Listening for events")
for {
select {
case timeout := <-ic.doneC:
ic.enabled = false
ic.shutdown(timeout)
return
case <-timestampTicker.C:
if !ic.enabled {
break
}
ic.nextTimestamp()
case <-heartBeat.C:
if ic.config.ClusterName == "" {
break
}
ic.nextHeartbeat()
case <-printStats.C:
if !ic.enabled {
break
}
ic.nextStats()
case err = <-ic.gtmCtx.ErrC:
if err == nil {
break
}
ic.processErr(err)
case op, open := <-ic.gtmCtx.OpC:
if !ic.enabled {
break
}
if op == nil {
if !open && !allOpsVisited {
allOpsVisited = true
ic.opsConsumed <- true
}
break
}
if op.IsSourceOplog() {
ic.lastTs = op.Timestamp
}
if err = ic.routeOp(op); err != nil {
ic.processErr(err)
}
}
}
}
func (ic *indexClient) startIndex() {
for i := 0; i < 5; i++ {
ic.indexWg.Add(1)
go func() {
defer ic.indexWg.Done()
for op := range ic.indexC {
if err := ic.doIndex(op); err != nil {
ic.processErr(err)
}
}
}()
}
}
func (ic *indexClient) startRelate() {
if len(ic.config.Relate) > 0 {
for i := 0; i < ic.config.RelateThreads; i++ {
ic.relateWg.Add(1)
go func() {
defer ic.relateWg.Done()
for op := range ic.relateC {
if err := ic.processRelated(op); err != nil {
ic.processErr(err)
}
}
}()
}
}
}
func (ic *indexClient) startCluster() {
if ic.config.ClusterName != "" {
var err error
if err = ic.ensureClusterTTL(); err == nil {
infoLog.Printf("Joined cluster %s", ic.config.ClusterName)
} else {
errorLog.Fatalf("Unable to enable cluster mode: %s", err)
}
ic.enabled, err = ic.enableProcess()
if err != nil {
errorLog.Fatalf("Unable to determine enabled cluster process: %s", err)
}
}
}
func (ic *indexClient) closeClient() {
if ic.mongo != nil && ic.config.ClusterName != "" {
ic.resetClusterState()
}
if ic.hsc != nil {
ic.hsc.shutdown = true
ic.hsc.httpServer.Shutdown(context.Background())
}
if ic.bulk != nil {
ic.bulk.Flush()
}
if ic.bulkStats != nil {
ic.bulkStats.Flush()
}
close(ic.closeC)
}
func (ic *indexClient) shutdown(timeout int) {
infoLog.Println("Shutting down")
go ic.closeClient()
doneC := make(chan bool)
go func() {
closeT := time.NewTicker(time.Duration(timeout) * time.Second)
defer closeT.Stop()
done := false
for !done {
select {
case <-ic.closeC:
done = true
close(doneC)
case <-closeT.C:
done = true
close(doneC)
}
}
}()
<-doneC
os.Exit(exitStatus)
}
func handlePanic() {
if r := recover(); r != nil {
errorLog.Println(r)
infoLog.Println("Shutting down with exit status 1 after panic.")
time.Sleep(3 * time.Second)
os.Exit(1)
}
}
func getBuildInfo(client *mongo.Client) (bi *buildInfo, err error) {
db := client.Database("admin")
result := db.RunCommand(context.Background(), bson.M{
"buildInfo": 1,
})
if err = result.Err(); err == nil {
bi = &buildInfo{}
err = result.Decode(bi)
}
return
}
func (ic *indexClient) saveTimestampFromReplStatus() {
if rs, err := gtm.GetReplStatus(ic.mongo); err == nil {
if ic.lastTs, err = rs.GetLastCommitted(); err == nil {
if err = ic.saveTimestamp(); err != nil {
ic.processErr(err)
}
} else {
ic.processErr(err)
}
} else {
ic.processErr(err)
}
}
func mustConfig() *configOptions {
config := &configOptions{
GtmSettings: gtmDefaultSettings(),
}
config.parseCommandLineFlags()
if config.Version {
fmt.Println(version)
os.Exit(0)
}
config.build()
if config.Print {
config.dump()
os.Exit(0)
}
config.setupLogging()
config.validate()
return config
}
func buildMongoClient(config *configOptions) *mongo.Client {
mongoClient, err := config.dialMongo(config.MongoURL)
if err != nil {
errorLog.Fatalf("Unable to connect to MongoDB using URL %s: %s",
cleanMongoURL(config.MongoURL), err)
}
infoLog.Printf("Started monstache version %s", version)
if mongoInfo, err := getBuildInfo(mongoClient); err == nil {
infoLog.Printf("Successfully connected to MongoDB version %s", mongoInfo.Version)
} else {
infoLog.Println("Successfully connected to MongoDB")
}
return mongoClient
}
func buildElasticClient(config *configOptions) *elastic.Client {
elasticClient, err := config.newElasticClient()
if err != nil {
errorLog.Fatalf("Unable to create Elasticsearch client: %s", err)
}
if config.ElasticVersion == "" {
if err := config.testElasticsearchConn(elasticClient); err != nil {
errorLog.Fatalf("Unable to validate connection to Elasticsearch: %s", err)
}
} else {
if err := config.parseElasticsearchVersion(config.ElasticVersion); err != nil {
errorLog.Fatalf("Elasticsearch version must conform to major.minor.fix: %s", err)
}
}
return elasticClient
}
func main() {
defer handlePanic()
config := mustConfig()
mongoClient := buildMongoClient(config)
loadBuiltinFunctions(mongoClient, config)
elasticClient := buildElasticClient(config)
ic := &indexClient{
config: config,
mongo: mongoClient,
client: elasticClient,
fileWg: &sync.WaitGroup{},
indexWg: &sync.WaitGroup{},
processWg: &sync.WaitGroup{},
relateWg: &sync.WaitGroup{},
opsConsumed: make(chan bool),
closeC: make(chan bool),
doneC: make(chan int),
enabled: true,
indexC: make(chan *gtm.Op),
processC: make(chan *gtm.Op),
fileC: make(chan *gtm.Op),
relateC: make(chan *gtm.Op, config.RelateBuffer),
}
ic.run()
}
refactor
// package main provides the monstache binary
package main
import (
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"flag"
"fmt"
"github.com/BurntSushi/toml"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/coreos/go-systemd/daemon"
"github.com/evanphx/json-patch"
"github.com/olivere/elastic/v7"
aws "github.com/olivere/elastic/v7/aws/v4"
"github.com/robertkrimen/otto"
_ "github.com/robertkrimen/otto/underscore"
"github.com/rwynn/gtm"
"github.com/rwynn/gtm/consistent"
"github.com/rwynn/monstache/monstachemap"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/bsontype"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/gridfs"
"go.mongodb.org/mongo-driver/mongo/options"
"gopkg.in/Graylog2/go-gelf.v2/gelf"
"gopkg.in/natefinch/lumberjack.v2"
"io/ioutil"
"log"
"math"
"net/http"
"net/http/pprof"
"os"
"os/signal"
"plugin"
"reflect"
"regexp"
"strconv"
"strings"
"sync"
"syscall"
"text/template"
"time"
)
var infoLog = log.New(os.Stdout, "INFO ", log.Flags())
var warnLog = log.New(os.Stdout, "WARN ", log.Flags())
var statsLog = log.New(os.Stdout, "STATS ", log.Flags())
var traceLog = log.New(os.Stdout, "TRACE ", log.Flags())
var errorLog = log.New(os.Stderr, "ERROR ", log.Flags())
var mapperPlugin func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error)
var filterPlugin func(*monstachemap.MapperPluginInput) (bool, error)
var processPlugin func(*monstachemap.ProcessPluginInput) error
var pipePlugin func(string, bool) ([]interface{}, error)
var mapEnvs map[string]*executionEnv = make(map[string]*executionEnv)
var filterEnvs map[string]*executionEnv = make(map[string]*executionEnv)
var pipeEnvs map[string]*executionEnv = make(map[string]*executionEnv)
var mapIndexTypes map[string]*indexMapping = make(map[string]*indexMapping)
var relates map[string][]*relation = make(map[string][]*relation)
var fileNamespaces map[string]bool = make(map[string]bool)
var patchNamespaces map[string]bool = make(map[string]bool)
var tmNamespaces map[string]bool = make(map[string]bool)
var routingNamespaces map[string]bool = make(map[string]bool)
var mux sync.Mutex
var chunksRegex = regexp.MustCompile("\\.chunks$")
var systemsRegex = regexp.MustCompile("system\\..+$")
var exitStatus = 0
const version = "6.0.6"
const mongoURLDefault string = "mongodb://localhost:27017"
const resumeNameDefault string = "default"
const elasticMaxConnsDefault int = 4
const elasticClientTimeoutDefault int = 0
const elasticMaxDocsDefault int = -1
const elasticMaxBytesDefault int = 8 * 1024 * 1024
const gtmChannelSizeDefault int = 512
const fileDownloadersDefault = 10
const relateThreadsDefault = 10
const relateBufferDefault = 1000
const postProcessorsDefault = 10
const redact = "REDACTED"
const configDatabaseNameDefault = "monstache"
const relateQueueOverloadMsg = "Relate queue is full. Skipping relate for %v.(%v) to keep pipeline healthy."
type deleteStrategy int
const (
statelessDeleteStrategy deleteStrategy = iota
statefulDeleteStrategy
ignoreDeleteStrategy
)
type buildInfo struct {
Version string
}
type stringargs []string
type indexClient struct {
gtmCtx *gtm.OpCtxMulti
config *configOptions
mongo *mongo.Client
mongoConfig *mongo.Client
bulk *elastic.BulkProcessor
bulkStats *elastic.BulkProcessor
client *elastic.Client
hsc *httpServerCtx
fileWg *sync.WaitGroup
indexWg *sync.WaitGroup
processWg *sync.WaitGroup
relateWg *sync.WaitGroup
opsConsumed chan bool
closeC chan bool
doneC chan int
enabled bool
lastTs primitive.Timestamp
lastTsSaved primitive.Timestamp
indexC chan *gtm.Op
processC chan *gtm.Op
fileC chan *gtm.Op
relateC chan *gtm.Op
filter gtm.OpFilter
}
type awsConnect struct {
AccessKey string `toml:"access-key"`
SecretKey string `toml:"secret-key"`
Region string
}
type executionEnv struct {
VM *otto.Otto
Script string
lock *sync.Mutex
}
type javascript struct {
Namespace string
Script string
Path string
Routing bool
}
type relation struct {
Namespace string
WithNamespace string `toml:"with-namespace"`
SrcField string `toml:"src-field"`
MatchField string `toml:"match-field"`
KeepSrc bool `toml:"keep-src"`
MaxDepth int `toml:"max-depth"`
db string
col string
}
type indexMapping struct {
Namespace string
Index string
}
type findConf struct {
vm *otto.Otto
ns string
name string
client *mongo.Client
byId bool
multi bool
pipe bool
pipeAllowDisk bool
}
type findCall struct {
config *findConf
client *mongo.Client
query interface{}
db string
col string
limit int
sort map[string]int
sel map[string]int
}
type logFiles struct {
Info string
Warn string
Error string
Trace string
Stats string
}
type indexingMeta struct {
Routing string
Index string
Type string
Parent string
Version int64
VersionType string
Pipeline string
RetryOnConflict int
Skip bool
ID string
}
type gtmSettings struct {
ChannelSize int `toml:"channel-size"`
BufferSize int `toml:"buffer-size"`
BufferDuration string `toml:"buffer-duration"`
}
type httpServerCtx struct {
httpServer *http.Server
bulk *elastic.BulkProcessor
config *configOptions
shutdown bool
started time.Time
}
type configOptions struct {
EnableTemplate bool
EnvDelimiter string
MongoURL string `toml:"mongo-url"`
MongoConfigURL string `toml:"mongo-config-url"`
MongoOpLogDatabaseName string `toml:"mongo-oplog-database-name"`
MongoOpLogCollectionName string `toml:"mongo-oplog-collection-name"`
GtmSettings gtmSettings `toml:"gtm-settings"`
AWSConnect awsConnect `toml:"aws-connect"`
Logs logFiles `toml:"logs"`
GraylogAddr string `toml:"graylog-addr"`
ElasticUrls stringargs `toml:"elasticsearch-urls"`
ElasticUser string `toml:"elasticsearch-user"`
ElasticPassword string `toml:"elasticsearch-password"`
ElasticPemFile string `toml:"elasticsearch-pem-file"`
ElasticValidatePemFile bool `toml:"elasticsearch-validate-pem-file"`
ElasticVersion string `toml:"elasticsearch-version"`
ElasticHealth0 int `toml:"elasticsearch-healthcheck-timeout-startup"`
ElasticHealth1 int `toml:"elasticsearch-healthcheck-timeout"`
ResumeName string `toml:"resume-name"`
NsRegex string `toml:"namespace-regex"`
NsDropRegex string `toml:"namespace-drop-regex"`
NsExcludeRegex string `toml:"namespace-exclude-regex"`
NsDropExcludeRegex string `toml:"namespace-drop-exclude-regex"`
ClusterName string `toml:"cluster-name"`
Print bool `toml:"print-config"`
Version bool
Pprof bool
EnableOplog bool `toml:"enable-oplog"`
DisableChangeEvents bool `toml:"disable-change-events"`
EnableEasyJSON bool `toml:"enable-easy-json"`
Stats bool
IndexStats bool `toml:"index-stats"`
StatsDuration string `toml:"stats-duration"`
StatsIndexFormat string `toml:"stats-index-format"`
Gzip bool
Verbose bool
Resume bool
ResumeWriteUnsafe bool `toml:"resume-write-unsafe"`
ResumeFromTimestamp int64 `toml:"resume-from-timestamp"`
Replay bool
DroppedDatabases bool `toml:"dropped-databases"`
DroppedCollections bool `toml:"dropped-collections"`
IndexFiles bool `toml:"index-files"`
IndexAsUpdate bool `toml:"index-as-update"`
FileHighlighting bool `toml:"file-highlighting"`
EnablePatches bool `toml:"enable-patches"`
FailFast bool `toml:"fail-fast"`
IndexOplogTime bool `toml:"index-oplog-time"`
OplogTsFieldName string `toml:"oplog-ts-field-name"`
OplogDateFieldName string `toml:"oplog-date-field-name"`
OplogDateFieldFormat string `toml:"oplog-date-field-format"`
ExitAfterDirectReads bool `toml:"exit-after-direct-reads"`
MergePatchAttr string `toml:"merge-patch-attribute"`
ElasticMaxConns int `toml:"elasticsearch-max-conns"`
ElasticRetry bool `toml:"elasticsearch-retry"`
ElasticMaxDocs int `toml:"elasticsearch-max-docs"`
ElasticMaxBytes int `toml:"elasticsearch-max-bytes"`
ElasticMaxSeconds int `toml:"elasticsearch-max-seconds"`
ElasticClientTimeout int `toml:"elasticsearch-client-timeout"`
ElasticMajorVersion int
ElasticMinorVersion int
MaxFileSize int64 `toml:"max-file-size"`
ConfigFile string
Script []javascript
Filter []javascript
Pipeline []javascript
Mapping []indexMapping
Relate []relation
FileNamespaces stringargs `toml:"file-namespaces"`
PatchNamespaces stringargs `toml:"patch-namespaces"`
Workers stringargs
Worker string
ChangeStreamNs stringargs `toml:"change-stream-namespaces"`
DirectReadNs stringargs `toml:"direct-read-namespaces"`
DirectReadSplitMax int `toml:"direct-read-split-max"`
DirectReadConcur int `toml:"direct-read-concur"`
MapperPluginPath string `toml:"mapper-plugin-path"`
EnableHTTPServer bool `toml:"enable-http-server"`
HTTPServerAddr string `toml:"http-server-addr"`
TimeMachineNamespaces stringargs `toml:"time-machine-namespaces"`
TimeMachineIndexPrefix string `toml:"time-machine-index-prefix"`
TimeMachineIndexSuffix string `toml:"time-machine-index-suffix"`
TimeMachineDirectReads bool `toml:"time-machine-direct-reads"`
PipeAllowDisk bool `toml:"pipe-allow-disk"`
RoutingNamespaces stringargs `toml:"routing-namespaces"`
DeleteStrategy deleteStrategy `toml:"delete-strategy"`
DeleteIndexPattern string `toml:"delete-index-pattern"`
ConfigDatabaseName string `toml:"config-database-name"`
FileDownloaders int `toml:"file-downloaders"`
RelateThreads int `toml:"relate-threads"`
RelateBuffer int `toml:"relate-buffer"`
PostProcessors int `toml:"post-processors"`
PruneInvalidJSON bool `toml:"prune-invalid-json"`
Debug bool
mongoClientOptions *options.ClientOptions
}
func (rel *relation) IsIdentity() bool {
if rel.SrcField == "_id" && rel.MatchField == "_id" {
return true
} else {
return false
}
}
func (l *logFiles) enabled() bool {
return l.Info != "" || l.Warn != "" || l.Error != "" || l.Trace != "" || l.Stats != ""
}
func (ac *awsConnect) validate() error {
if ac.AccessKey == "" && ac.SecretKey == "" {
return nil
} else if ac.AccessKey != "" && ac.SecretKey != "" {
return nil
}
return errors.New("AWS connect settings must include both access-key and secret-key")
}
func (ac *awsConnect) enabled() bool {
return ac.AccessKey != "" || ac.SecretKey != ""
}
func (arg *deleteStrategy) String() string {
return fmt.Sprintf("%d", *arg)
}
func (arg *deleteStrategy) Set(value string) (err error) {
var i int
if i, err = strconv.Atoi(value); err != nil {
return
}
ds := deleteStrategy(i)
*arg = ds
return
}
func (args *stringargs) String() string {
return fmt.Sprintf("%s", *args)
}
func (args *stringargs) Set(value string) error {
*args = append(*args, value)
return nil
}
func (config *configOptions) readShards() bool {
return len(config.ChangeStreamNs) == 0 && config.MongoConfigURL != ""
}
func afterBulk(executionId int64, requests []elastic.BulkableRequest, response *elastic.BulkResponse, err error) {
if response != nil && response.Errors {
failed := response.Failed()
if failed != nil {
for _, item := range failed {
json, err := json.Marshal(item)
if err != nil {
errorLog.Printf("Unable to marshal bulk response item: %s", err)
} else {
errorLog.Printf("Bulk response item: %s", string(json))
}
}
}
}
}
func (config *configOptions) parseElasticsearchVersion(number string) (err error) {
if number == "" {
err = errors.New("Elasticsearch version cannot be blank")
} else {
versionParts := strings.Split(number, ".")
var majorVersion, minorVersion int
majorVersion, err = strconv.Atoi(versionParts[0])
if err == nil {
config.ElasticMajorVersion = majorVersion
if majorVersion == 0 {
err = errors.New("Invalid Elasticsearch major version 0")
}
}
if len(versionParts) > 1 {
minorVersion, err = strconv.Atoi(versionParts[1])
if err == nil {
config.ElasticMinorVersion = minorVersion
}
}
}
return
}
func (config *configOptions) newBulkProcessor(client *elastic.Client) (bulk *elastic.BulkProcessor, err error) {
bulkService := client.BulkProcessor().Name("monstache")
bulkService.Workers(config.ElasticMaxConns)
bulkService.Stats(config.Stats)
bulkService.BulkActions(config.ElasticMaxDocs)
bulkService.BulkSize(config.ElasticMaxBytes)
if config.ElasticRetry == false {
bulkService.Backoff(&elastic.StopBackoff{})
}
bulkService.After(afterBulk)
bulkService.FlushInterval(time.Duration(config.ElasticMaxSeconds) * time.Second)
return bulkService.Do(context.Background())
}
func (config *configOptions) newStatsBulkProcessor(client *elastic.Client) (bulk *elastic.BulkProcessor, err error) {
bulkService := client.BulkProcessor().Name("monstache-stats")
bulkService.Workers(1)
bulkService.Stats(false)
bulkService.BulkActions(-1)
bulkService.BulkSize(-1)
bulkService.After(afterBulk)
bulkService.FlushInterval(time.Duration(5) * time.Second)
return bulkService.Do(context.Background())
}
func (config *configOptions) needsSecureScheme() bool {
if len(config.ElasticUrls) > 0 {
for _, url := range config.ElasticUrls {
if strings.HasPrefix(url, "https") {
return true
}
}
}
return false
}
func (config *configOptions) newElasticClient() (client *elastic.Client, err error) {
var clientOptions []elastic.ClientOptionFunc
var httpClient *http.Client
clientOptions = append(clientOptions, elastic.SetSniff(false))
if config.needsSecureScheme() {
clientOptions = append(clientOptions, elastic.SetScheme("https"))
}
if len(config.ElasticUrls) > 0 {
clientOptions = append(clientOptions, elastic.SetURL(config.ElasticUrls...))
} else {
config.ElasticUrls = append(config.ElasticUrls, elastic.DefaultURL)
}
if config.Verbose {
clientOptions = append(clientOptions, elastic.SetTraceLog(traceLog))
clientOptions = append(clientOptions, elastic.SetErrorLog(errorLog))
}
if config.ElasticUser != "" {
clientOptions = append(clientOptions, elastic.SetBasicAuth(config.ElasticUser, config.ElasticPassword))
}
if config.ElasticRetry {
d1, d2 := time.Duration(50)*time.Millisecond, time.Duration(20)*time.Second
retrier := elastic.NewBackoffRetrier(elastic.NewExponentialBackoff(d1, d2))
clientOptions = append(clientOptions, elastic.SetRetrier(retrier))
}
httpClient, err = config.NewHTTPClient()
if err != nil {
return client, err
}
clientOptions = append(clientOptions, elastic.SetHttpClient(httpClient))
clientOptions = append(clientOptions,
elastic.SetHealthcheckTimeoutStartup(time.Duration(config.ElasticHealth0)*time.Second))
clientOptions = append(clientOptions,
elastic.SetHealthcheckTimeout(time.Duration(config.ElasticHealth1)*time.Second))
return elastic.NewClient(clientOptions...)
}
func (config *configOptions) testElasticsearchConn(client *elastic.Client) (err error) {
var number string
url := config.ElasticUrls[0]
number, err = client.ElasticsearchVersion(url)
if err == nil {
infoLog.Printf("Successfully connected to Elasticsearch version %s", number)
err = config.parseElasticsearchVersion(number)
}
return
}
func (ic *indexClient) deleteIndexes(db string) (err error) {
index := strings.ToLower(db + "*")
for ns, m := range mapIndexTypes {
dbCol := strings.SplitN(ns, ".", 2)
if dbCol[0] == db {
if m.Index != "" {
index = strings.ToLower(m.Index + "*")
}
break
}
}
_, err = ic.client.DeleteIndex(index).Do(context.Background())
return
}
func (ic *indexClient) deleteIndex(namespace string) (err error) {
ctx := context.Background()
index := strings.ToLower(namespace)
if m := mapIndexTypes[namespace]; m != nil {
if m.Index != "" {
index = strings.ToLower(m.Index)
}
}
_, err = ic.client.DeleteIndex(index).Do(ctx)
return err
}
func (ic *indexClient) ensureFileMapping() (err error) {
ctx := context.Background()
pipeline := map[string]interface{}{
"description": "Extract file information",
"processors": [1]map[string]interface{}{
{
"attachment": map[string]interface{}{
"field": "file",
},
},
},
}
_, err = ic.client.IngestPutPipeline("attachment").BodyJson(pipeline).Do(ctx)
return err
}
func (ic *indexClient) defaultIndexMapping(op *gtm.Op) *indexMapping {
return &indexMapping{
Namespace: op.Namespace,
Index: strings.ToLower(op.Namespace),
}
}
func (ic *indexClient) mapIndex(op *gtm.Op) *indexMapping {
mapping := ic.defaultIndexMapping(op)
if m := mapIndexTypes[op.Namespace]; m != nil {
if m.Index != "" {
mapping.Index = m.Index
}
}
return mapping
}
func opIDToString(op *gtm.Op) string {
var opIDStr string
switch op.Id.(type) {
case primitive.ObjectID:
opIDStr = op.Id.(primitive.ObjectID).Hex()
case float64:
intID := int(op.Id.(float64))
if op.Id.(float64) == float64(intID) {
opIDStr = fmt.Sprintf("%v", intID)
} else {
opIDStr = fmt.Sprintf("%v", op.Id)
}
case float32:
intID := int(op.Id.(float32))
if op.Id.(float32) == float32(intID) {
opIDStr = fmt.Sprintf("%v", intID)
} else {
opIDStr = fmt.Sprintf("%v", op.Id)
}
default:
opIDStr = fmt.Sprintf("%v", op.Id)
}
return opIDStr
}
func convertSliceJavascript(a []interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
var avc interface{}
switch achild := av.(type) {
case map[string]interface{}:
avc = convertMapJavascript(achild)
case []interface{}:
avc = convertSliceJavascript(achild)
case primitive.ObjectID:
avc = achild.Hex()
default:
avc = av
}
avs = append(avs, avc)
}
return avs
}
func convertMapJavascript(e map[string]interface{}) map[string]interface{} {
o := make(map[string]interface{})
for k, v := range e {
switch child := v.(type) {
case map[string]interface{}:
o[k] = convertMapJavascript(child)
case []interface{}:
o[k] = convertSliceJavascript(child)
case primitive.ObjectID:
o[k] = child.Hex()
default:
o[k] = v
}
}
return o
}
func fixSlicePruneInvalidJSON(id string, key string, a []interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
var avc interface{}
switch achild := av.(type) {
case map[string]interface{}:
avc = fixPruneInvalidJSON(id, achild)
case []interface{}:
avc = fixSlicePruneInvalidJSON(id, key, achild)
case time.Time:
year := achild.Year()
if year < 0 || year > 9999 {
// year outside of valid range
warnLog.Printf("Dropping key %s element: invalid time.Time value: %s for document _id: %s", key, achild, id)
continue
} else {
avc = av
}
case float64:
if math.IsNaN(achild) {
// causes an error in the json serializer
warnLog.Printf("Dropping key %s element: invalid float64 value: %v for document _id: %s", key, achild, id)
continue
} else if math.IsInf(achild, 0) {
// causes an error in the json serializer
warnLog.Printf("Dropping key %s element: invalid float64 value: %v for document _id: %s", key, achild, id)
continue
} else {
avc = av
}
default:
avc = av
}
avs = append(avs, avc)
}
return avs
}
func fixPruneInvalidJSON(id string, e map[string]interface{}) map[string]interface{} {
o := make(map[string]interface{})
for k, v := range e {
switch child := v.(type) {
case map[string]interface{}:
o[k] = fixPruneInvalidJSON(id, child)
case []interface{}:
o[k] = fixSlicePruneInvalidJSON(id, k, child)
case time.Time:
year := child.Year()
if year < 0 || year > 9999 {
// year outside of valid range
warnLog.Printf("Dropping key %s: invalid time.Time value: %s for document _id: %s", k, child, id)
continue
} else {
o[k] = v
}
case float64:
if math.IsNaN(child) {
// causes an error in the json serializer
warnLog.Printf("Dropping key %s: invalid float64 value: %v for document _id: %s", k, child, id)
continue
} else if math.IsInf(child, 0) {
// causes an error in the json serializer
warnLog.Printf("Dropping key %s: invalid float64 value: %v for document _id: %s", k, child, id)
continue
} else {
o[k] = v
}
default:
o[k] = v
}
}
return o
}
func deepExportValue(a interface{}) (b interface{}) {
switch t := a.(type) {
case otto.Value:
ex, err := t.Export()
if t.Class() == "Date" {
ex, err = time.Parse("Mon, 2 Jan 2006 15:04:05 MST", t.String())
}
if err == nil {
b = deepExportValue(ex)
} else {
errorLog.Printf("Error exporting from javascript: %s", err)
}
case map[string]interface{}:
b = deepExportMap(t)
case []map[string]interface{}:
b = deepExportMapSlice(t)
case []interface{}:
b = deepExportSlice(t)
default:
b = a
}
return
}
func deepExportMapSlice(a []map[string]interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
avs = append(avs, deepExportMap(av))
}
return avs
}
func deepExportSlice(a []interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
avs = append(avs, deepExportValue(av))
}
return avs
}
func deepExportMap(e map[string]interface{}) map[string]interface{} {
o := make(map[string]interface{})
for k, v := range e {
o[k] = deepExportValue(v)
}
return o
}
func mapDataJavascript(op *gtm.Op) error {
names := []string{"", op.Namespace}
for _, name := range names {
env := mapEnvs[name]
if env == nil {
continue
}
env.lock.Lock()
defer env.lock.Unlock()
arg := convertMapJavascript(op.Data)
arg2 := op.Namespace
arg3 := convertMapJavascript(op.UpdateDescription)
val, err := env.VM.Call("module.exports", arg, arg, arg2, arg3)
if err != nil {
return err
}
if strings.ToLower(val.Class()) == "object" {
data, err := val.Export()
if err != nil {
return err
} else if data == val {
return errors.New("Exported function must return an object")
} else {
dm := data.(map[string]interface{})
op.Data = deepExportMap(dm)
}
} else {
indexed, err := val.ToBoolean()
if err != nil {
return err
} else if !indexed {
op.Data = nil
break
}
}
}
return nil
}
func mapDataGolang(client *mongo.Client, op *gtm.Op) error {
input := &monstachemap.MapperPluginInput{
Document: op.Data,
Namespace: op.Namespace,
Database: op.GetDatabase(),
Collection: op.GetCollection(),
Operation: op.Operation,
MongoClient: client,
UpdateDescription: op.UpdateDescription,
}
output, err := mapperPlugin(input)
if err != nil {
return err
}
if output == nil {
return nil
}
if output.Drop {
op.Data = nil
} else {
if output.Skip {
op.Data = map[string]interface{}{}
} else if output.Passthrough == false {
if output.Document == nil {
return errors.New("Map function must return a non-nil document")
}
op.Data = output.Document
}
meta := make(map[string]interface{})
if output.Skip {
meta["skip"] = true
}
if output.Index != "" {
meta["index"] = output.Index
}
if output.ID != "" {
meta["id"] = output.ID
}
if output.Type != "" {
meta["type"] = output.Type
}
if output.Routing != "" {
meta["routing"] = output.Routing
}
if output.Parent != "" {
meta["parent"] = output.Parent
}
if output.Version != 0 {
meta["version"] = output.Version
}
if output.VersionType != "" {
meta["versionType"] = output.VersionType
}
if output.Pipeline != "" {
meta["pipeline"] = output.Pipeline
}
if output.RetryOnConflict != 0 {
meta["retryOnConflict"] = output.RetryOnConflict
}
if len(meta) > 0 {
op.Data["_meta_monstache"] = meta
}
}
return nil
}
func mapData(client *mongo.Client, config *configOptions, op *gtm.Op) error {
if mapperPlugin != nil {
return mapDataGolang(client, op)
}
return mapDataJavascript(op)
}
func extractData(srcField string, data map[string]interface{}) (result interface{}, err error) {
var cur map[string]interface{} = data
fields := strings.Split(srcField, ".")
flen := len(fields)
for i, field := range fields {
if i+1 == flen {
result = cur[field]
} else {
if next, ok := cur[field].(map[string]interface{}); ok {
cur = next
} else {
break
}
}
}
if result == nil {
var detail interface{}
b, e := json.Marshal(data)
if e == nil {
detail = string(b)
} else {
detail = err
}
err = fmt.Errorf("Source field %s not found in document: %s", srcField, detail)
}
return
}
func buildSelector(matchField string, data interface{}) bson.M {
sel := bson.M{}
var cur bson.M = sel
fields := strings.Split(matchField, ".")
flen := len(fields)
for i, field := range fields {
if i+1 == flen {
cur[field] = data
} else {
next := bson.M{}
cur[field] = next
cur = next
}
}
return sel
}
func (ic *indexClient) processRelated(root *gtm.Op) (err error) {
var q []*gtm.Op
batch := []*gtm.Op{root}
depth := 1
for len(batch) > 0 {
for _, e := range batch {
op := e
if op.Data == nil {
continue
}
rs := relates[op.Namespace]
if len(rs) == 0 {
continue
}
for _, r := range rs {
if r.MaxDepth > 0 && r.MaxDepth < depth {
continue
}
if op.IsDelete() && r.IsIdentity() {
rop := >m.Op{
Id: op.Id,
Operation: op.Operation,
Namespace: r.WithNamespace,
Source: op.Source,
Timestamp: op.Timestamp,
Data: op.Data,
}
ic.doDelete(rop)
q = append(q, rop)
continue
}
var srcData interface{}
if srcData, err = extractData(r.SrcField, op.Data); err != nil {
ic.processErr(err)
continue
}
opts := &options.FindOptions{}
col := ic.mongo.Database(r.db).Collection(r.col)
sel := buildSelector(r.MatchField, srcData)
cursor, err := col.Find(context.Background(), sel, opts)
doc := make(map[string]interface{})
for cursor.Next(context.Background()) {
if err = cursor.Decode(&doc); err != nil {
ic.processErr(err)
continue
}
now := time.Now().UTC()
tstamp := primitive.Timestamp{
T: uint32(now.Unix()),
I: uint32(now.Nanosecond()),
}
rop := >m.Op{
Id: doc["_id"],
Data: doc,
Operation: root.Operation,
Namespace: r.WithNamespace,
Source: gtm.DirectQuerySource,
Timestamp: tstamp,
UpdateDescription: root.UpdateDescription,
}
doc = make(map[string]interface{})
if ic.filter != nil && !ic.filter(rop) {
continue
}
if processPlugin != nil {
pop := >m.Op{
Id: rop.Id,
Operation: rop.Operation,
Namespace: rop.Namespace,
Source: rop.Source,
Timestamp: rop.Timestamp,
UpdateDescription: rop.UpdateDescription,
}
var data []byte
data, err = bson.Marshal(rop.Data)
if err == nil {
var m map[string]interface{}
err = bson.Unmarshal(data, &m)
if err == nil {
pop.Data = m
}
}
ic.processC <- pop
}
skip := false
if rs2 := relates[rop.Namespace]; len(rs2) != 0 {
skip = true
visit := false
for _, r2 := range rs2 {
if r2.KeepSrc {
skip = false
}
if r2.MaxDepth < 1 || r2.MaxDepth >= (depth+1) {
visit = true
}
}
if visit {
q = append(q, rop)
}
}
if !skip {
if ic.hasFileContent(rop) {
ic.fileC <- rop
} else {
ic.indexC <- rop
}
}
}
cursor.Close(context.Background())
}
}
depth++
batch = q
q = nil
}
return
}
func (ic *indexClient) prepareDataForIndexing(op *gtm.Op) {
config := ic.config
data := op.Data
if config.IndexOplogTime {
secs := op.Timestamp.T
t := time.Unix(int64(secs), 0).UTC()
data[config.OplogTsFieldName] = op.Timestamp
data[config.OplogDateFieldName] = t.Format(config.OplogDateFieldFormat)
}
delete(data, "_id")
delete(data, "_meta_monstache")
if config.PruneInvalidJSON {
op.Data = fixPruneInvalidJSON(opIDToString(op), data)
}
op.Data = monstachemap.ConvertMapForJSON(op.Data)
}
func parseIndexMeta(op *gtm.Op) (meta *indexingMeta) {
meta = &indexingMeta{
Version: tsVersion(op.Timestamp),
VersionType: "external",
}
if m, ok := op.Data["_meta_monstache"]; ok {
switch m.(type) {
case map[string]interface{}:
metaAttrs := m.(map[string]interface{})
meta.load(metaAttrs)
case otto.Value:
ex, err := m.(otto.Value).Export()
if err == nil && ex != m {
switch ex.(type) {
case map[string]interface{}:
metaAttrs := ex.(map[string]interface{})
meta.load(metaAttrs)
default:
errorLog.Println("Invalid indexing metadata")
}
}
default:
errorLog.Println("Invalid indexing metadata")
}
}
return meta
}
func (ic *indexClient) addFileContent(op *gtm.Op) (err error) {
op.Data["file"] = ""
var gridByteBuffer bytes.Buffer
db, bucketName :=
ic.mongo.Database(op.GetDatabase()),
strings.SplitN(op.GetCollection(), ".", 2)[0]
encoder := base64.NewEncoder(base64.StdEncoding, &gridByteBuffer)
opts := &options.BucketOptions{}
opts.SetName(bucketName)
var bucket *gridfs.Bucket
bucket, err = gridfs.NewBucket(db, opts)
if err != nil {
return
}
var size int64
if size, err = bucket.DownloadToStream(op.Id, encoder); err != nil {
return
}
if ic.config.MaxFileSize > 0 {
if size > ic.config.MaxFileSize {
warnLog.Printf("File size %d exceeds max file size. file content omitted.", size)
return
}
}
if err = encoder.Close(); err != nil {
return
}
op.Data["file"] = string(gridByteBuffer.Bytes())
return
}
func notMonstache(config *configOptions) gtm.OpFilter {
db := config.ConfigDatabaseName
return func(op *gtm.Op) bool {
return op.GetDatabase() != db
}
}
func notChunks(op *gtm.Op) bool {
return !chunksRegex.MatchString(op.GetCollection())
}
func notConfig(op *gtm.Op) bool {
return op.GetDatabase() != "config"
}
func notSystem(op *gtm.Op) bool {
return !systemsRegex.MatchString(op.GetCollection())
}
func filterWithRegex(regex string) gtm.OpFilter {
var validNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return true
}
return validNameSpace.MatchString(op.Namespace)
}
}
func filterDropWithRegex(regex string) gtm.OpFilter {
var validNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return validNameSpace.MatchString(op.Namespace)
}
return true
}
}
func filterWithPlugin() gtm.OpFilter {
return func(op *gtm.Op) bool {
var keep bool = true
if (op.IsInsert() || op.IsUpdate()) && op.Data != nil {
keep = false
input := &monstachemap.MapperPluginInput{
Document: op.Data,
Namespace: op.Namespace,
Database: op.GetDatabase(),
Collection: op.GetCollection(),
Operation: op.Operation,
UpdateDescription: op.UpdateDescription,
}
if ok, err := filterPlugin(input); err == nil {
keep = ok
} else {
errorLog.Println(err)
}
}
return keep
}
}
func filterWithScript() gtm.OpFilter {
return func(op *gtm.Op) bool {
var keep bool = true
if (op.IsInsert() || op.IsUpdate()) && op.Data != nil {
nss := []string{"", op.Namespace}
for _, ns := range nss {
if env := filterEnvs[ns]; env != nil {
keep = false
arg := convertMapJavascript(op.Data)
arg2 := op.Namespace
arg3 := convertMapJavascript(op.UpdateDescription)
env.lock.Lock()
defer env.lock.Unlock()
val, err := env.VM.Call("module.exports", arg, arg, arg2, arg3)
if err != nil {
errorLog.Println(err)
} else {
if ok, err := val.ToBoolean(); err == nil {
keep = ok
} else {
errorLog.Println(err)
}
}
}
if !keep {
break
}
}
}
return keep
}
}
func filterInverseWithRegex(regex string) gtm.OpFilter {
var invalidNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return true
}
return !invalidNameSpace.MatchString(op.Namespace)
}
}
func filterDropInverseWithRegex(regex string) gtm.OpFilter {
var invalidNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return !invalidNameSpace.MatchString(op.Namespace)
}
return true
}
}
func (ic *indexClient) ensureClusterTTL() error {
io := options.Index()
io.SetName("expireAt")
io.SetBackground(true)
io.SetExpireAfterSeconds(30)
im := mongo.IndexModel{
Keys: bson.M{"expireAt": 1},
Options: io,
}
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("cluster")
iv := col.Indexes()
_, err := iv.CreateOne(context.Background(), im)
return err
}
func (ic *indexClient) enableProcess() (bool, error) {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("cluster")
doc := bson.M{}
doc["_id"] = ic.config.ResumeName
doc["expireAt"] = time.Now().UTC()
doc["pid"] = os.Getpid()
if host, err := os.Hostname(); err == nil {
doc["host"] = host
} else {
return false, err
}
_, err := col.InsertOne(context.Background(), doc)
if err == nil {
return true, nil
}
if isDup(err) {
return false, nil
}
return false, err
}
func isDup(err error) bool {
checkCodeAndMessage := func(code int, message string) bool {
return code == 11000 ||
code == 11001 ||
code == 12582 ||
strings.Contains(message, "E11000")
}
if we, ok := err.(mongo.WriteException); ok {
if we.WriteConcernError != nil {
wce := we.WriteConcernError
code, message := wce.Code, wce.Message
if checkCodeAndMessage(code, message) {
return true
}
}
if we.WriteErrors != nil {
we := we.WriteErrors
for _, e := range we {
code, message := e.Code, e.Message
if checkCodeAndMessage(code, message) {
return true
}
}
}
}
return false
}
func (ic *indexClient) resetClusterState() error {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("cluster")
_, err := col.DeleteOne(context.Background(), bson.M{"_id": ic.config.ResumeName})
return err
}
func (ic *indexClient) ensureEnabled() (enabled bool, err error) {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("cluster")
result := col.FindOne(context.Background(), bson.M{
"_id": ic.config.ResumeName,
})
if err = result.Err(); err == nil {
doc := make(map[string]interface{})
if err = result.Decode(&doc); err == nil {
if doc["pid"] != nil && doc["host"] != nil {
var hostname string
pid := doc["pid"].(int32)
host := doc["host"].(string)
if hostname, err = os.Hostname(); err == nil {
enabled = (int(pid) == os.Getpid() && host == hostname)
if enabled {
_, err = col.UpdateOne(context.Background(), bson.M{
"_id": ic.config.ResumeName,
}, bson.M{
"$set": bson.M{"expireAt": time.Now().UTC()},
})
}
}
}
}
}
return
}
func (ic *indexClient) resumeWork() {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("monstache")
result := col.FindOne(context.Background(), bson.M{
"_id": ic.config.ResumeName,
})
if err := result.Err(); err == nil {
doc := make(map[string]interface{})
if err = result.Decode(&doc); err == nil {
if doc["ts"] != nil {
ts := doc["ts"].(primitive.Timestamp)
ic.gtmCtx.Since(ts)
}
}
}
drained := false
for !drained {
select {
case _, open := <-ic.gtmCtx.OpC:
if !open {
drained = true
}
default:
drained = true
}
}
ic.gtmCtx.Resume()
}
func (ic *indexClient) saveTimestamp() error {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("monstache")
doc := map[string]interface{}{
"ts": ic.lastTs,
}
opts := options.Update()
opts.SetUpsert(true)
_, err := col.UpdateOne(context.Background(), bson.M{
"_id": ic.config.ResumeName,
}, bson.M{
"$set": doc,
}, opts)
return err
}
func (config *configOptions) parseCommandLineFlags() *configOptions {
flag.BoolVar(&config.Print, "print-config", false, "Print the configuration and then exit")
flag.BoolVar(&config.EnableTemplate, "tpl", false, "True to interpret the config file as a template")
flag.StringVar(&config.EnvDelimiter, "env-delimiter", ",", "A delimiter to use when splitting environment variable values")
flag.StringVar(&config.MongoURL, "mongo-url", "", "MongoDB server or router server connection URL")
flag.StringVar(&config.MongoConfigURL, "mongo-config-url", "", "MongoDB config server connection URL")
flag.StringVar(&config.MongoOpLogDatabaseName, "mongo-oplog-database-name", "", "Override the database name which contains the mongodb oplog")
flag.StringVar(&config.MongoOpLogCollectionName, "mongo-oplog-collection-name", "", "Override the collection name which contains the mongodb oplog")
flag.StringVar(&config.GraylogAddr, "graylog-addr", "", "Send logs to a Graylog server at this address")
flag.StringVar(&config.ElasticVersion, "elasticsearch-version", "", "Specify elasticsearch version directly instead of getting it from the server")
flag.StringVar(&config.ElasticUser, "elasticsearch-user", "", "The elasticsearch user name for basic auth")
flag.StringVar(&config.ElasticPassword, "elasticsearch-password", "", "The elasticsearch password for basic auth")
flag.StringVar(&config.ElasticPemFile, "elasticsearch-pem-file", "", "Path to a PEM file for secure connections to elasticsearch")
flag.BoolVar(&config.ElasticValidatePemFile, "elasticsearch-validate-pem-file", true, "Set to boolean false to not validate the Elasticsearch PEM file")
flag.IntVar(&config.ElasticMaxConns, "elasticsearch-max-conns", 0, "Elasticsearch max connections")
flag.IntVar(&config.PostProcessors, "post-processors", 0, "Number of post-processing go routines")
flag.IntVar(&config.FileDownloaders, "file-downloaders", 0, "GridFs download go routines")
flag.IntVar(&config.RelateThreads, "relate-threads", 0, "Number of threads dedicated to processing relationships")
flag.IntVar(&config.RelateBuffer, "relate-buffer", 0, "Number of relates to queue before skipping and reporting an error")
flag.BoolVar(&config.ElasticRetry, "elasticsearch-retry", false, "True to retry failed request to Elasticsearch")
flag.IntVar(&config.ElasticMaxDocs, "elasticsearch-max-docs", 0, "Number of docs to hold before flushing to Elasticsearch")
flag.IntVar(&config.ElasticMaxBytes, "elasticsearch-max-bytes", 0, "Number of bytes to hold before flushing to Elasticsearch")
flag.IntVar(&config.ElasticMaxSeconds, "elasticsearch-max-seconds", 0, "Number of seconds before flushing to Elasticsearch")
flag.IntVar(&config.ElasticClientTimeout, "elasticsearch-client-timeout", 0, "Number of seconds before a request to Elasticsearch is timed out")
flag.Int64Var(&config.MaxFileSize, "max-file-size", 0, "GridFs file content exceeding this limit in bytes will not be indexed in Elasticsearch")
flag.StringVar(&config.ConfigFile, "f", "", "Location of configuration file")
flag.BoolVar(&config.DroppedDatabases, "dropped-databases", true, "True to delete indexes from dropped databases")
flag.BoolVar(&config.DroppedCollections, "dropped-collections", true, "True to delete indexes from dropped collections")
flag.BoolVar(&config.Version, "v", false, "True to print the version number")
flag.BoolVar(&config.Gzip, "gzip", false, "True to enable gzip for requests to Elasticsearch")
flag.BoolVar(&config.Verbose, "verbose", false, "True to output verbose messages")
flag.BoolVar(&config.Pprof, "pprof", false, "True to enable pprof endpoints")
flag.BoolVar(&config.EnableOplog, "enable-oplog", false, "True to enable direct tailing of the oplog")
flag.BoolVar(&config.DisableChangeEvents, "disable-change-events", false, "True to disable listening for changes. You must provide direct-reads in this case")
flag.BoolVar(&config.EnableEasyJSON, "enable-easy-json", false, "True to enable easy-json serialization")
flag.BoolVar(&config.Stats, "stats", false, "True to print out statistics")
flag.BoolVar(&config.IndexStats, "index-stats", false, "True to index stats in elasticsearch")
flag.StringVar(&config.StatsDuration, "stats-duration", "", "The duration after which stats are logged")
flag.StringVar(&config.StatsIndexFormat, "stats-index-format", "", "time.Time supported format to use for the stats index names")
flag.BoolVar(&config.Resume, "resume", false, "True to capture the last timestamp of this run and resume on a subsequent run")
flag.Int64Var(&config.ResumeFromTimestamp, "resume-from-timestamp", 0, "Timestamp to resume syncing from")
flag.BoolVar(&config.ResumeWriteUnsafe, "resume-write-unsafe", false, "True to speedup writes of the last timestamp synched for resuming at the cost of error checking")
flag.BoolVar(&config.Replay, "replay", false, "True to replay all events from the oplog and index them in elasticsearch")
flag.BoolVar(&config.IndexFiles, "index-files", false, "True to index gridfs files into elasticsearch. Requires the elasticsearch mapper-attachments (deprecated) or ingest-attachment plugin")
flag.BoolVar(&config.IndexAsUpdate, "index-as-update", false, "True to index documents as updates instead of overwrites")
flag.BoolVar(&config.FileHighlighting, "file-highlighting", false, "True to enable the ability to highlight search times for a file query")
flag.BoolVar(&config.EnablePatches, "enable-patches", false, "True to include an json-patch field on updates")
flag.BoolVar(&config.FailFast, "fail-fast", false, "True to exit if a single _bulk request fails")
flag.BoolVar(&config.IndexOplogTime, "index-oplog-time", false, "True to add date/time information from the oplog to each document when indexing")
flag.BoolVar(&config.ExitAfterDirectReads, "exit-after-direct-reads", false, "True to exit the program after reading directly from the configured namespaces")
flag.StringVar(&config.MergePatchAttr, "merge-patch-attribute", "", "Attribute to store json-patch values under")
flag.StringVar(&config.ResumeName, "resume-name", "", "Name under which to load/store the resume state. Defaults to 'default'")
flag.StringVar(&config.ClusterName, "cluster-name", "", "Name of the monstache process cluster")
flag.StringVar(&config.Worker, "worker", "", "The name of this worker in a multi-worker configuration")
flag.StringVar(&config.MapperPluginPath, "mapper-plugin-path", "", "The path to a .so file to load as a document mapper plugin")
flag.StringVar(&config.NsRegex, "namespace-regex", "", "A regex which is matched against an operation's namespace (<database>.<collection>). Only operations which match are synched to elasticsearch")
flag.StringVar(&config.NsDropRegex, "namespace-drop-regex", "", "A regex which is matched against a drop operation's namespace (<database>.<collection>). Only drop operations which match are synched to elasticsearch")
flag.StringVar(&config.NsExcludeRegex, "namespace-exclude-regex", "", "A regex which is matched against an operation's namespace (<database>.<collection>). Only operations which do not match are synched to elasticsearch")
flag.StringVar(&config.NsDropExcludeRegex, "namespace-drop-exclude-regex", "", "A regex which is matched against a drop operation's namespace (<database>.<collection>). Only drop operations which do not match are synched to elasticsearch")
flag.Var(&config.ChangeStreamNs, "change-stream-namespace", "A list of change stream namespaces")
flag.Var(&config.DirectReadNs, "direct-read-namespace", "A list of direct read namespaces")
flag.IntVar(&config.DirectReadSplitMax, "direct-read-split-max", 0, "Max number of times to split a collection for direct reads")
flag.IntVar(&config.DirectReadConcur, "direct-read-concur", 0, "Max number of direct-read-namespaces to read concurrently. By default all givne are read concurrently")
flag.Var(&config.RoutingNamespaces, "routing-namespace", "A list of namespaces that override routing information")
flag.Var(&config.TimeMachineNamespaces, "time-machine-namespace", "A list of direct read namespaces")
flag.StringVar(&config.TimeMachineIndexPrefix, "time-machine-index-prefix", "", "A prefix to preprend to time machine indexes")
flag.StringVar(&config.TimeMachineIndexSuffix, "time-machine-index-suffix", "", "A suffix to append to time machine indexes")
flag.BoolVar(&config.TimeMachineDirectReads, "time-machine-direct-reads", false, "True to index the results of direct reads into the any time machine indexes")
flag.BoolVar(&config.PipeAllowDisk, "pipe-allow-disk", false, "True to allow MongoDB to use the disk for pipeline options with lots of results")
flag.Var(&config.ElasticUrls, "elasticsearch-url", "A list of Elasticsearch URLs")
flag.Var(&config.FileNamespaces, "file-namespace", "A list of file namespaces")
flag.Var(&config.PatchNamespaces, "patch-namespace", "A list of patch namespaces")
flag.Var(&config.Workers, "workers", "A list of worker names")
flag.BoolVar(&config.EnableHTTPServer, "enable-http-server", false, "True to enable an internal http server")
flag.StringVar(&config.HTTPServerAddr, "http-server-addr", "", "The address the internal http server listens on")
flag.BoolVar(&config.PruneInvalidJSON, "prune-invalid-json", false, "True to omit values which do not serialize to JSON such as +Inf and -Inf and thus cause errors")
flag.Var(&config.DeleteStrategy, "delete-strategy", "Stategy to use for deletes. 0=stateless,1=stateful,2=ignore")
flag.StringVar(&config.DeleteIndexPattern, "delete-index-pattern", "", "An Elasticsearch index-pattern to restric the scope of stateless deletes")
flag.StringVar(&config.ConfigDatabaseName, "config-database-name", "", "The MongoDB database name that monstache uses to store metadata")
flag.StringVar(&config.OplogTsFieldName, "oplog-ts-field-name", "", "Field name to use for the oplog timestamp")
flag.StringVar(&config.OplogDateFieldName, "oplog-date-field-name", "", "Field name to use for the oplog date")
flag.StringVar(&config.OplogDateFieldFormat, "oplog-date-field-format", "", "Format to use for the oplog date")
flag.BoolVar(&config.Debug, "debug", false, "True to enable verbose debug information")
flag.Parse()
return config
}
func (config *configOptions) loadReplacements() {
if config.Relate != nil {
for _, r := range config.Relate {
if r.Namespace != "" || r.WithNamespace != "" {
dbCol := strings.SplitN(r.WithNamespace, ".", 2)
if len(dbCol) != 2 {
errorLog.Fatalf("Replacement namespace is invalid: %s", r.WithNamespace)
}
database, collection := dbCol[0], dbCol[1]
r := &relation{
Namespace: r.Namespace,
WithNamespace: r.WithNamespace,
SrcField: r.SrcField,
MatchField: r.MatchField,
KeepSrc: r.KeepSrc,
MaxDepth: r.MaxDepth,
db: database,
col: collection,
}
if r.SrcField == "" {
r.SrcField = "_id"
}
if r.MatchField == "" {
r.MatchField = "_id"
}
relates[r.Namespace] = append(relates[r.Namespace], r)
} else {
errorLog.Fatalln("Relates must specify namespace and with-namespace")
}
}
}
}
func (config *configOptions) loadIndexTypes() {
if config.Mapping != nil {
for _, m := range config.Mapping {
if m.Namespace != "" && m.Index != "" {
mapIndexTypes[m.Namespace] = &indexMapping{
Namespace: m.Namespace,
Index: strings.ToLower(m.Index),
}
} else {
errorLog.Fatalln("Mappings must specify namespace and index")
}
}
}
}
func (config *configOptions) loadPipelines() {
for _, s := range config.Pipeline {
if s.Path == "" && s.Script == "" {
errorLog.Fatalln("Pipelines must specify path or script attributes")
}
if s.Path != "" && s.Script != "" {
errorLog.Fatalln("Pipelines must specify path or script but not both")
}
if s.Path != "" {
if script, err := ioutil.ReadFile(s.Path); err == nil {
s.Script = string(script[:])
} else {
errorLog.Fatalf("Unable to load pipeline at path %s: %s", s.Path, err)
}
}
if _, exists := filterEnvs[s.Namespace]; exists {
errorLog.Fatalf("Multiple pipelines with namespace: %s", s.Namespace)
}
env := &executionEnv{
VM: otto.New(),
Script: s.Script,
lock: &sync.Mutex{},
}
if err := env.VM.Set("module", make(map[string]interface{})); err != nil {
errorLog.Fatalln(err)
}
if _, err := env.VM.Run(env.Script); err != nil {
errorLog.Fatalln(err)
}
val, err := env.VM.Run("module.exports")
if err != nil {
errorLog.Fatalln(err)
} else if !val.IsFunction() {
errorLog.Fatalln("module.exports must be a function")
}
pipeEnvs[s.Namespace] = env
}
}
func (config *configOptions) loadFilters() {
for _, s := range config.Filter {
if s.Script != "" || s.Path != "" {
if s.Path != "" && s.Script != "" {
errorLog.Fatalln("Filters must specify path or script but not both")
}
if s.Path != "" {
if script, err := ioutil.ReadFile(s.Path); err == nil {
s.Script = string(script[:])
} else {
errorLog.Fatalf("Unable to load filter at path %s: %s", s.Path, err)
}
}
if _, exists := filterEnvs[s.Namespace]; exists {
errorLog.Fatalf("Multiple filters with namespace: %s", s.Namespace)
}
env := &executionEnv{
VM: otto.New(),
Script: s.Script,
lock: &sync.Mutex{},
}
if err := env.VM.Set("module", make(map[string]interface{})); err != nil {
errorLog.Fatalln(err)
}
if _, err := env.VM.Run(env.Script); err != nil {
errorLog.Fatalln(err)
}
val, err := env.VM.Run("module.exports")
if err != nil {
errorLog.Fatalln(err)
} else if !val.IsFunction() {
errorLog.Fatalln("module.exports must be a function")
}
filterEnvs[s.Namespace] = env
} else {
errorLog.Fatalln("Filters must specify path or script attributes")
}
}
}
func (config *configOptions) loadScripts() {
for _, s := range config.Script {
if s.Script != "" || s.Path != "" {
if s.Path != "" && s.Script != "" {
errorLog.Fatalln("Scripts must specify path or script but not both")
}
if s.Path != "" {
if script, err := ioutil.ReadFile(s.Path); err == nil {
s.Script = string(script[:])
} else {
errorLog.Fatalf("Unable to load script at path %s: %s", s.Path, err)
}
}
if _, exists := mapEnvs[s.Namespace]; exists {
errorLog.Fatalf("Multiple scripts with namespace: %s", s.Namespace)
}
env := &executionEnv{
VM: otto.New(),
Script: s.Script,
lock: &sync.Mutex{},
}
if err := env.VM.Set("module", make(map[string]interface{})); err != nil {
errorLog.Fatalln(err)
}
if _, err := env.VM.Run(env.Script); err != nil {
errorLog.Fatalln(err)
}
val, err := env.VM.Run("module.exports")
if err != nil {
errorLog.Fatalln(err)
} else if !val.IsFunction() {
errorLog.Fatalln("module.exports must be a function")
}
mapEnvs[s.Namespace] = env
if s.Routing {
routingNamespaces[s.Namespace] = true
}
} else {
errorLog.Fatalln("Scripts must specify path or script")
}
}
}
func (config *configOptions) loadPlugins() *configOptions {
if config.MapperPluginPath != "" {
funcDefined := false
p, err := plugin.Open(config.MapperPluginPath)
if err != nil {
errorLog.Fatalf("Unable to load mapper plugin %s: %s", config.MapperPluginPath, err)
}
mapper, err := p.Lookup("Map")
if err == nil {
funcDefined = true
switch mapper.(type) {
case func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error):
mapperPlugin = mapper.(func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error))
default:
errorLog.Fatalf("Plugin 'Map' function must be typed %T", mapperPlugin)
}
}
filter, err := p.Lookup("Filter")
if err == nil {
funcDefined = true
switch filter.(type) {
case func(*monstachemap.MapperPluginInput) (bool, error):
filterPlugin = filter.(func(*monstachemap.MapperPluginInput) (bool, error))
default:
errorLog.Fatalf("Plugin 'Filter' function must be typed %T", filterPlugin)
}
}
process, err := p.Lookup("Process")
if err == nil {
funcDefined = true
switch process.(type) {
case func(*monstachemap.ProcessPluginInput) error:
processPlugin = process.(func(*monstachemap.ProcessPluginInput) error)
default:
errorLog.Fatalf("Plugin 'Process' function must be typed %T", processPlugin)
}
}
pipe, err := p.Lookup("Pipeline")
if err == nil {
funcDefined = true
switch pipe.(type) {
case func(string, bool) ([]interface{}, error):
pipePlugin = pipe.(func(string, bool) ([]interface{}, error))
default:
errorLog.Fatalf("Plugin 'Pipeline' function must be typed %T", pipePlugin)
}
}
if !funcDefined {
warnLog.Println("Plugin loaded but did not find a Map, Filter, Process or Pipeline function")
}
}
return config
}
func (config *configOptions) decodeAsTemplate() *configOptions {
env := map[string]string{}
for _, e := range os.Environ() {
pair := strings.SplitN(e, "=", 2)
if len(pair) < 2 {
continue
}
name, val := pair[0], pair[1]
env[name] = val
}
tpl, err := ioutil.ReadFile(config.ConfigFile)
if err != nil {
errorLog.Fatalln(err)
}
var t = template.Must(template.New("config").Parse(string(tpl)))
var b bytes.Buffer
err = t.Execute(&b, env)
if err != nil {
errorLog.Fatalln(err)
}
if md, err := toml.Decode(b.String(), config); err != nil {
errorLog.Fatalln(err)
} else if ud := md.Undecoded(); len(ud) != 0 {
errorLog.Fatalf("Config file contains undecoded keys: %q", ud)
}
return config
}
func (config *configOptions) loadConfigFile() *configOptions {
if config.ConfigFile != "" {
var tomlConfig = configOptions{
ConfigFile: config.ConfigFile,
DroppedDatabases: true,
DroppedCollections: true,
GtmSettings: gtmDefaultSettings(),
}
if config.EnableTemplate {
tomlConfig.decodeAsTemplate()
} else {
if md, err := toml.DecodeFile(tomlConfig.ConfigFile, &tomlConfig); err != nil {
errorLog.Fatalln(err)
} else if ud := md.Undecoded(); len(ud) != 0 {
errorLog.Fatalf("Config file contains undecoded keys: %q", ud)
}
}
if config.MongoURL == "" {
config.MongoURL = tomlConfig.MongoURL
}
if config.MongoConfigURL == "" {
config.MongoConfigURL = tomlConfig.MongoConfigURL
}
if config.MongoOpLogDatabaseName == "" {
config.MongoOpLogDatabaseName = tomlConfig.MongoOpLogDatabaseName
}
if config.MongoOpLogCollectionName == "" {
config.MongoOpLogCollectionName = tomlConfig.MongoOpLogCollectionName
}
if config.ElasticUser == "" {
config.ElasticUser = tomlConfig.ElasticUser
}
if config.ElasticPassword == "" {
config.ElasticPassword = tomlConfig.ElasticPassword
}
if config.ElasticPemFile == "" {
config.ElasticPemFile = tomlConfig.ElasticPemFile
}
if config.ElasticValidatePemFile && !tomlConfig.ElasticValidatePemFile {
config.ElasticValidatePemFile = false
}
if config.ElasticVersion == "" {
config.ElasticVersion = tomlConfig.ElasticVersion
}
if config.ElasticMaxConns == 0 {
config.ElasticMaxConns = tomlConfig.ElasticMaxConns
}
if config.ElasticHealth0 == 0 {
config.ElasticHealth0 = tomlConfig.ElasticHealth0
}
if config.ElasticHealth1 == 0 {
config.ElasticHealth1 = tomlConfig.ElasticHealth1
}
if config.DirectReadSplitMax == 0 {
config.DirectReadSplitMax = tomlConfig.DirectReadSplitMax
}
if config.DirectReadConcur == 0 {
config.DirectReadConcur = tomlConfig.DirectReadConcur
}
if !config.ElasticRetry && tomlConfig.ElasticRetry {
config.ElasticRetry = true
}
if config.ElasticMaxDocs == 0 {
config.ElasticMaxDocs = tomlConfig.ElasticMaxDocs
}
if config.ElasticMaxBytes == 0 {
config.ElasticMaxBytes = tomlConfig.ElasticMaxBytes
}
if config.ElasticMaxSeconds == 0 {
config.ElasticMaxSeconds = tomlConfig.ElasticMaxSeconds
}
if config.ElasticClientTimeout == 0 {
config.ElasticClientTimeout = tomlConfig.ElasticClientTimeout
}
if config.MaxFileSize == 0 {
config.MaxFileSize = tomlConfig.MaxFileSize
}
if !config.IndexFiles {
config.IndexFiles = tomlConfig.IndexFiles
}
if config.FileDownloaders == 0 {
config.FileDownloaders = tomlConfig.FileDownloaders
}
if config.RelateThreads == 0 {
config.RelateThreads = tomlConfig.RelateThreads
}
if config.RelateBuffer == 0 {
config.RelateBuffer = tomlConfig.RelateBuffer
}
if config.PostProcessors == 0 {
config.PostProcessors = tomlConfig.PostProcessors
}
if config.DeleteStrategy == 0 {
config.DeleteStrategy = tomlConfig.DeleteStrategy
}
if config.DeleteIndexPattern == "" {
config.DeleteIndexPattern = tomlConfig.DeleteIndexPattern
}
if config.DroppedDatabases && !tomlConfig.DroppedDatabases {
config.DroppedDatabases = false
}
if config.DroppedCollections && !tomlConfig.DroppedCollections {
config.DroppedCollections = false
}
if !config.Gzip && tomlConfig.Gzip {
config.Gzip = true
}
if !config.Verbose && tomlConfig.Verbose {
config.Verbose = true
}
if !config.Stats && tomlConfig.Stats {
config.Stats = true
}
if !config.Pprof && tomlConfig.Pprof {
config.Pprof = true
}
if !config.EnableOplog && tomlConfig.EnableOplog {
config.EnableOplog = true
}
if !config.EnableEasyJSON && tomlConfig.EnableEasyJSON {
config.EnableEasyJSON = true
}
if !config.DisableChangeEvents && tomlConfig.DisableChangeEvents {
config.DisableChangeEvents = true
}
if !config.IndexStats && tomlConfig.IndexStats {
config.IndexStats = true
}
if config.StatsDuration == "" {
config.StatsDuration = tomlConfig.StatsDuration
}
if config.StatsIndexFormat == "" {
config.StatsIndexFormat = tomlConfig.StatsIndexFormat
}
if !config.IndexAsUpdate && tomlConfig.IndexAsUpdate {
config.IndexAsUpdate = true
}
if !config.FileHighlighting && tomlConfig.FileHighlighting {
config.FileHighlighting = true
}
if !config.EnablePatches && tomlConfig.EnablePatches {
config.EnablePatches = true
}
if !config.PruneInvalidJSON && tomlConfig.PruneInvalidJSON {
config.PruneInvalidJSON = true
}
if !config.Debug && tomlConfig.Debug {
config.Debug = true
}
if !config.Replay && tomlConfig.Replay {
config.Replay = true
}
if !config.Resume && tomlConfig.Resume {
config.Resume = true
}
if !config.ResumeWriteUnsafe && tomlConfig.ResumeWriteUnsafe {
config.ResumeWriteUnsafe = true
}
if config.ResumeFromTimestamp == 0 {
config.ResumeFromTimestamp = tomlConfig.ResumeFromTimestamp
}
if config.MergePatchAttr == "" {
config.MergePatchAttr = tomlConfig.MergePatchAttr
}
if !config.FailFast && tomlConfig.FailFast {
config.FailFast = true
}
if !config.IndexOplogTime && tomlConfig.IndexOplogTime {
config.IndexOplogTime = true
}
if config.OplogTsFieldName == "" {
config.OplogTsFieldName = tomlConfig.OplogTsFieldName
}
if config.OplogDateFieldName == "" {
config.OplogDateFieldName = tomlConfig.OplogDateFieldName
}
if config.OplogDateFieldFormat == "" {
config.OplogDateFieldFormat = tomlConfig.OplogDateFieldFormat
}
if config.ConfigDatabaseName == "" {
config.ConfigDatabaseName = tomlConfig.ConfigDatabaseName
}
if !config.ExitAfterDirectReads && tomlConfig.ExitAfterDirectReads {
config.ExitAfterDirectReads = true
}
if config.ResumeName == "" {
config.ResumeName = tomlConfig.ResumeName
}
if config.ClusterName == "" {
config.ClusterName = tomlConfig.ClusterName
}
if config.NsRegex == "" {
config.NsRegex = tomlConfig.NsRegex
}
if config.NsDropRegex == "" {
config.NsDropRegex = tomlConfig.NsDropRegex
}
if config.NsExcludeRegex == "" {
config.NsExcludeRegex = tomlConfig.NsExcludeRegex
}
if config.NsDropExcludeRegex == "" {
config.NsDropExcludeRegex = tomlConfig.NsDropExcludeRegex
}
if config.IndexFiles {
if len(config.FileNamespaces) == 0 {
config.FileNamespaces = tomlConfig.FileNamespaces
config.loadGridFsConfig()
}
}
if config.Worker == "" {
config.Worker = tomlConfig.Worker
}
if config.GraylogAddr == "" {
config.GraylogAddr = tomlConfig.GraylogAddr
}
if config.MapperPluginPath == "" {
config.MapperPluginPath = tomlConfig.MapperPluginPath
}
if config.EnablePatches {
if len(config.PatchNamespaces) == 0 {
config.PatchNamespaces = tomlConfig.PatchNamespaces
config.loadPatchNamespaces()
}
}
if len(config.RoutingNamespaces) == 0 {
config.RoutingNamespaces = tomlConfig.RoutingNamespaces
config.loadRoutingNamespaces()
}
if len(config.TimeMachineNamespaces) == 0 {
config.TimeMachineNamespaces = tomlConfig.TimeMachineNamespaces
config.loadTimeMachineNamespaces()
}
if config.TimeMachineIndexPrefix == "" {
config.TimeMachineIndexPrefix = tomlConfig.TimeMachineIndexPrefix
}
if config.TimeMachineIndexSuffix == "" {
config.TimeMachineIndexSuffix = tomlConfig.TimeMachineIndexSuffix
}
if !config.TimeMachineDirectReads {
config.TimeMachineDirectReads = tomlConfig.TimeMachineDirectReads
}
if !config.PipeAllowDisk {
config.PipeAllowDisk = tomlConfig.PipeAllowDisk
}
if len(config.DirectReadNs) == 0 {
config.DirectReadNs = tomlConfig.DirectReadNs
}
if len(config.ChangeStreamNs) == 0 {
config.ChangeStreamNs = tomlConfig.ChangeStreamNs
}
if len(config.ElasticUrls) == 0 {
config.ElasticUrls = tomlConfig.ElasticUrls
}
if len(config.Workers) == 0 {
config.Workers = tomlConfig.Workers
}
if !config.EnableHTTPServer && tomlConfig.EnableHTTPServer {
config.EnableHTTPServer = true
}
if config.HTTPServerAddr == "" {
config.HTTPServerAddr = tomlConfig.HTTPServerAddr
}
if !config.AWSConnect.enabled() {
config.AWSConnect = tomlConfig.AWSConnect
}
if !config.Logs.enabled() {
config.Logs = tomlConfig.Logs
}
config.GtmSettings = tomlConfig.GtmSettings
config.Relate = tomlConfig.Relate
tomlConfig.loadScripts()
tomlConfig.loadFilters()
tomlConfig.loadPipelines()
tomlConfig.loadIndexTypes()
tomlConfig.loadReplacements()
}
return config
}
func (config *configOptions) newLogger(path string) *lumberjack.Logger {
return &lumberjack.Logger{
Filename: path,
MaxSize: 500, // megabytes
MaxBackups: 5,
MaxAge: 28, //days
}
}
func (config *configOptions) setupLogging() *configOptions {
if config.GraylogAddr != "" {
gelfWriter, err := gelf.NewUDPWriter(config.GraylogAddr)
if err != nil {
errorLog.Fatalf("Error creating gelf writer: %s", err)
}
infoLog.SetOutput(gelfWriter)
warnLog.SetOutput(gelfWriter)
errorLog.SetOutput(gelfWriter)
traceLog.SetOutput(gelfWriter)
statsLog.SetOutput(gelfWriter)
} else {
logs := config.Logs
if logs.Info != "" {
infoLog.SetOutput(config.newLogger(logs.Info))
}
if logs.Warn != "" {
warnLog.SetOutput(config.newLogger(logs.Warn))
}
if logs.Error != "" {
errorLog.SetOutput(config.newLogger(logs.Error))
}
if logs.Trace != "" {
traceLog.SetOutput(config.newLogger(logs.Trace))
}
if logs.Stats != "" {
statsLog.SetOutput(config.newLogger(logs.Stats))
}
}
return config
}
func (config *configOptions) build() *configOptions {
config.loadEnvironment()
config.loadTimeMachineNamespaces()
config.loadRoutingNamespaces()
config.loadPatchNamespaces()
config.loadGridFsConfig()
config.loadConfigFile()
config.loadPlugins()
config.setDefaults()
return config
}
func (config *configOptions) loadEnvironment() *configOptions {
del := config.EnvDelimiter
if del == "" {
del = ","
}
for _, e := range os.Environ() {
pair := strings.SplitN(e, "=", 2)
if len(pair) < 2 {
continue
}
name, val := pair[0], pair[1]
if val == "" {
continue
}
switch name {
case "MONSTACHE_MONGO_URL":
if config.MongoURL == "" {
config.MongoURL = val
}
break
case "MONSTACHE_MONGO_CONFIG_URL":
if config.MongoConfigURL == "" {
config.MongoConfigURL = val
}
break
case "MONSTACHE_MONGO_OPLOG_DB":
if config.MongoOpLogDatabaseName == "" {
config.MongoOpLogDatabaseName = val
}
break
case "MONSTACHE_MONGO_OPLOG_COL":
if config.MongoOpLogCollectionName == "" {
config.MongoOpLogCollectionName = val
}
break
case "MONSTACHE_ES_URLS":
if len(config.ElasticUrls) == 0 {
config.ElasticUrls = strings.Split(val, del)
}
break
case "MONSTACHE_ES_USER":
if config.ElasticUser == "" {
config.ElasticUser = val
}
break
case "MONSTACHE_ES_PASS":
if config.ElasticPassword == "" {
config.ElasticPassword = val
}
break
case "MONSTACHE_ES_PEM":
if config.ElasticPemFile == "" {
config.ElasticPemFile = val
}
break
case "MONSTACHE_WORKER":
if config.Worker == "" {
config.Worker = val
}
break
case "MONSTACHE_CLUSTER":
if config.ClusterName == "" {
config.ClusterName = val
}
break
case "MONSTACHE_DIRECT_READ_NS":
if len(config.DirectReadNs) == 0 {
config.DirectReadNs = strings.Split(val, del)
}
break
case "MONSTACHE_CHANGE_STREAM_NS":
if len(config.ChangeStreamNs) == 0 {
config.ChangeStreamNs = strings.Split(val, del)
}
break
case "MONSTACHE_NS_REGEX":
if config.NsRegex == "" {
config.NsRegex = val
}
break
case "MONSTACHE_NS_EXCLUDE_REGEX":
if config.NsExcludeRegex == "" {
config.NsExcludeRegex = val
}
break
case "MONSTACHE_NS_DROP_REGEX":
if config.NsDropRegex == "" {
config.NsDropRegex = val
}
break
case "MONSTACHE_NS_DROP_EXCLUDE_REGEX":
if config.NsDropExcludeRegex == "" {
config.NsDropExcludeRegex = val
}
break
case "MONSTACHE_GRAYLOG_ADDR":
if config.GraylogAddr == "" {
config.GraylogAddr = val
}
break
case "MONSTACHE_AWS_ACCESS_KEY":
config.AWSConnect.AccessKey = val
break
case "MONSTACHE_AWS_SECRET_KEY":
config.AWSConnect.SecretKey = val
break
case "MONSTACHE_AWS_REGION":
config.AWSConnect.Region = val
break
case "MONSTACHE_LOG_DIR":
config.Logs.Info = val + "/info.log"
config.Logs.Warn = val + "/warn.log"
config.Logs.Error = val + "/error.log"
config.Logs.Trace = val + "/trace.log"
config.Logs.Stats = val + "/stats.log"
break
case "MONSTACHE_HTTP_ADDR":
if config.HTTPServerAddr == "" {
config.HTTPServerAddr = val
}
break
case "MONSTACHE_FILE_NS":
if len(config.FileNamespaces) == 0 {
config.FileNamespaces = strings.Split(val, del)
}
break
case "MONSTACHE_PATCH_NS":
if len(config.PatchNamespaces) == 0 {
config.PatchNamespaces = strings.Split(val, del)
}
break
case "MONSTACHE_TIME_MACHINE_NS":
if len(config.TimeMachineNamespaces) == 0 {
config.TimeMachineNamespaces = strings.Split(val, del)
}
break
default:
continue
}
}
return config
}
func (config *configOptions) loadRoutingNamespaces() *configOptions {
for _, namespace := range config.RoutingNamespaces {
routingNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadTimeMachineNamespaces() *configOptions {
for _, namespace := range config.TimeMachineNamespaces {
tmNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadPatchNamespaces() *configOptions {
for _, namespace := range config.PatchNamespaces {
patchNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadGridFsConfig() *configOptions {
for _, namespace := range config.FileNamespaces {
fileNamespaces[namespace] = true
}
return config
}
func (config configOptions) dump() {
if config.MongoURL != "" {
config.MongoURL = cleanMongoURL(config.MongoURL)
}
if config.MongoConfigURL != "" {
config.MongoConfigURL = cleanMongoURL(config.MongoConfigURL)
}
if config.ElasticUser != "" {
config.ElasticUser = redact
}
if config.ElasticPassword != "" {
config.ElasticPassword = redact
}
if config.AWSConnect.AccessKey != "" {
config.AWSConnect.AccessKey = redact
}
if config.AWSConnect.SecretKey != "" {
config.AWSConnect.SecretKey = redact
}
if config.AWSConnect.Region != "" {
config.AWSConnect.Region = redact
}
json, err := json.MarshalIndent(config, "", " ")
if err != nil {
errorLog.Printf("Unable to print configuration: %s", err)
} else {
infoLog.Println(string(json))
}
}
func (config *configOptions) validate() {
if config.DisableChangeEvents && len(config.DirectReadNs) == 0 {
errorLog.Fatalln("Direct read namespaces must be specified if change events are disabled")
}
if config.AWSConnect.enabled() {
if err := config.AWSConnect.validate(); err != nil {
errorLog.Fatalln(err)
}
}
if len(config.DirectReadNs) > 0 {
if config.ElasticMaxSeconds < 5 {
warnLog.Println("Direct read performance degrades with small values for elasticsearch-max-seconds. Set to 5s or greater to remove this warning.")
}
if config.ElasticMaxDocs > 0 {
warnLog.Println("For performance reasons it is recommended to use elasticsearch-max-bytes instead of elasticsearch-max-docs since doc size may vary")
}
}
if config.StatsDuration != "" {
_, err := time.ParseDuration(config.StatsDuration)
if err != nil {
errorLog.Fatalf("Unable to parse stats duration: %s", err)
}
}
}
func (config *configOptions) setDefaults() *configOptions {
if !config.EnableOplog && len(config.ChangeStreamNs) == 0 {
config.ChangeStreamNs = []string{""}
}
if config.DisableChangeEvents {
config.ChangeStreamNs = []string{}
config.EnableOplog = false
}
if config.MongoURL == "" {
config.MongoURL = mongoURLDefault
}
if config.ClusterName != "" {
if config.Worker != "" {
config.ResumeName = fmt.Sprintf("%s:%s", config.ClusterName, config.Worker)
} else {
config.ResumeName = config.ClusterName
}
config.Resume = true
} else if config.Worker != "" {
config.ResumeName = config.Worker
} else if config.ResumeName == "" {
config.ResumeName = resumeNameDefault
}
if config.ElasticMaxConns == 0 {
config.ElasticMaxConns = elasticMaxConnsDefault
}
if config.ElasticClientTimeout == 0 {
config.ElasticClientTimeout = elasticClientTimeoutDefault
}
if config.MergePatchAttr == "" {
config.MergePatchAttr = "json-merge-patches"
}
if config.ElasticMaxSeconds == 0 {
if len(config.DirectReadNs) > 0 {
config.ElasticMaxSeconds = 5
} else {
config.ElasticMaxSeconds = 1
}
}
if config.ElasticMaxDocs == 0 {
config.ElasticMaxDocs = elasticMaxDocsDefault
}
if config.ElasticMaxBytes == 0 {
config.ElasticMaxBytes = elasticMaxBytesDefault
}
if config.ElasticHealth0 == 0 {
config.ElasticHealth0 = 15
}
if config.ElasticHealth1 == 0 {
config.ElasticHealth1 = 5
}
if config.HTTPServerAddr == "" {
config.HTTPServerAddr = ":8080"
}
if config.StatsIndexFormat == "" {
config.StatsIndexFormat = "monstache.stats.2006-01-02"
}
if config.TimeMachineIndexPrefix == "" {
config.TimeMachineIndexPrefix = "log"
}
if config.TimeMachineIndexSuffix == "" {
config.TimeMachineIndexSuffix = "2006-01-02"
}
if config.DeleteIndexPattern == "" {
config.DeleteIndexPattern = "*"
}
if config.FileDownloaders == 0 && config.IndexFiles {
config.FileDownloaders = fileDownloadersDefault
}
if config.RelateThreads == 0 {
config.RelateThreads = relateThreadsDefault
}
if config.RelateBuffer == 0 {
config.RelateBuffer = relateBufferDefault
}
if config.PostProcessors == 0 && processPlugin != nil {
config.PostProcessors = postProcessorsDefault
}
if config.OplogTsFieldName == "" {
config.OplogTsFieldName = "oplog_ts"
}
if config.OplogDateFieldName == "" {
config.OplogDateFieldName = "oplog_date"
}
if config.OplogDateFieldFormat == "" {
config.OplogDateFieldFormat = "2006/01/02 15:04:05"
}
if config.ConfigDatabaseName == "" {
config.ConfigDatabaseName = configDatabaseNameDefault
}
if config.ResumeFromTimestamp > 0 {
if config.ResumeFromTimestamp <= math.MaxInt32 {
config.ResumeFromTimestamp = config.ResumeFromTimestamp << 32
}
}
return config
}
func cleanMongoURL(URL string) string {
const (
scheme = "mongodb://"
schemeSrv = "mongodb+srv://"
)
url := URL
hasScheme := strings.HasPrefix(url, scheme)
hasSchemeSrv := strings.HasPrefix(url, schemeSrv)
url = strings.TrimPrefix(url, scheme)
url = strings.TrimPrefix(url, schemeSrv)
userEnd := strings.IndexAny(url, "@")
if userEnd != -1 {
url = redact + "@" + url[userEnd+1:]
}
if hasScheme {
url = scheme + url
} else if hasSchemeSrv {
url = schemeSrv + url
}
return url
}
func (config *configOptions) cancelConnection(mongoOk chan bool) {
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)
defer signal.Stop(sigs)
select {
case <-mongoOk:
return
case <-sigs:
os.Exit(exitStatus)
}
}
func (config *configOptions) dialMongo(URL string) (*mongo.Client, error) {
var clientOptions *options.ClientOptions
if config.mongoClientOptions == nil {
// use the initial URL to create most of the client options
// save the client options for potential use later with shards
rb := bson.NewRegistryBuilder()
rb.RegisterTypeMapEntry(bsontype.DateTime, reflect.TypeOf(time.Time{}))
reg := rb.Build()
clientOptions = options.Client()
clientOptions.ApplyURI(URL)
clientOptions.SetAppName("monstache")
clientOptions.SetRegistry(reg)
config.mongoClientOptions = clientOptions
} else {
// subsequent client connections will only be for adding shards
// for shards we only have the hostname and replica set
// apply the hostname to the previously saved client options
clientOptions = config.mongoClientOptions
clientOptions.ApplyURI(URL)
}
client, err := mongo.NewClient(clientOptions)
if err != nil {
return nil, err
}
mongoOk := make(chan bool)
go config.cancelConnection(mongoOk)
err = client.Connect(context.Background())
if err != nil {
return nil, err
}
err = client.Ping(context.Background(), nil)
if err != nil {
return nil, err
}
close(mongoOk)
return client, nil
}
func (config *configOptions) NewHTTPClient() (client *http.Client, err error) {
tlsConfig := &tls.Config{}
if config.ElasticPemFile != "" {
var ca []byte
certs := x509.NewCertPool()
if ca, err = ioutil.ReadFile(config.ElasticPemFile); err == nil {
if ok := certs.AppendCertsFromPEM(ca); !ok {
errorLog.Printf("No certs parsed successfully from %s", config.ElasticPemFile)
}
tlsConfig.RootCAs = certs
} else {
return client, err
}
}
if config.ElasticValidatePemFile == false {
// Turn off validation
tlsConfig.InsecureSkipVerify = true
}
transport := &http.Transport{
DisableCompression: !config.Gzip,
TLSHandshakeTimeout: time.Duration(30) * time.Second,
TLSClientConfig: tlsConfig,
}
client = &http.Client{
Timeout: time.Duration(config.ElasticClientTimeout) * time.Second,
Transport: transport,
}
if config.AWSConnect.enabled() {
client = aws.NewV4SigningClientWithHTTPClient(credentials.NewStaticCredentials(
config.AWSConnect.AccessKey,
config.AWSConnect.SecretKey,
"",
), config.AWSConnect.Region, client)
}
return client, err
}
func (ic *indexClient) doDrop(op *gtm.Op) (err error) {
if db, drop := op.IsDropDatabase(); drop {
if ic.config.DroppedDatabases {
if err = ic.deleteIndexes(db); err == nil {
if e := ic.dropDBMeta(db); e != nil {
errorLog.Printf("Unable to delete metadata for db: %s", e)
}
}
}
} else if col, drop := op.IsDropCollection(); drop {
if ic.config.DroppedCollections {
if err = ic.deleteIndex(op.GetDatabase() + "." + col); err == nil {
if e := ic.dropCollectionMeta(op.GetDatabase() + "." + col); e != nil {
errorLog.Printf("Unable to delete metadata for collection: %s", e)
}
}
}
}
return
}
func (ic *indexClient) hasFileContent(op *gtm.Op) (ingest bool) {
if !ic.config.IndexFiles {
return
}
return fileNamespaces[op.Namespace]
}
func addPatch(config *configOptions, client *elastic.Client, op *gtm.Op,
objectID string, indexType *indexMapping, meta *indexingMeta) (err error) {
var merges []interface{}
var toJSON []byte
if op.IsSourceDirect() {
return nil
}
if op.Timestamp.T == 0 {
return nil
}
if op.IsUpdate() {
ctx := context.Background()
service := client.Get()
service.Id(objectID)
service.Index(indexType.Index)
if meta.ID != "" {
service.Id(meta.ID)
}
if meta.Index != "" {
service.Index(meta.Index)
}
if meta.Routing != "" {
service.Routing(meta.Routing)
}
if meta.Parent != "" {
service.Parent(meta.Parent)
}
var resp *elastic.GetResult
if resp, err = service.Do(ctx); err == nil {
if resp.Found {
var src map[string]interface{}
if err = json.Unmarshal(resp.Source, &src); err == nil {
if val, ok := src[config.MergePatchAttr]; ok {
merges = val.([]interface{})
for _, m := range merges {
entry := m.(map[string]interface{})
entry["ts"] = int(entry["ts"].(float64))
entry["v"] = int(entry["v"].(float64))
}
}
delete(src, config.MergePatchAttr)
var fromJSON, mergeDoc []byte
if fromJSON, err = json.Marshal(src); err == nil {
if toJSON, err = json.Marshal(op.Data); err == nil {
if mergeDoc, err = jsonpatch.CreateMergePatch(fromJSON, toJSON); err == nil {
merge := make(map[string]interface{})
merge["ts"] = op.Timestamp.T
merge["p"] = string(mergeDoc)
merge["v"] = len(merges) + 1
merges = append(merges, merge)
op.Data[config.MergePatchAttr] = merges
}
}
}
}
} else {
err = errors.New("Last document revision not found")
}
}
} else {
if _, found := op.Data[config.MergePatchAttr]; !found {
if toJSON, err = json.Marshal(op.Data); err == nil {
merge := make(map[string]interface{})
merge["v"] = 1
merge["ts"] = op.Timestamp.T
merge["p"] = string(toJSON)
merges = append(merges, merge)
op.Data[config.MergePatchAttr] = merges
}
}
}
return
}
func (ic *indexClient) doIndexing(op *gtm.Op) (err error) {
meta := parseIndexMeta(op)
if meta.Skip {
return
}
ic.prepareDataForIndexing(op)
objectID, indexType := opIDToString(op), ic.mapIndex(op)
if ic.config.EnablePatches {
if patchNamespaces[op.Namespace] {
if e := addPatch(ic.config, ic.client, op, objectID, indexType, meta); e != nil {
errorLog.Printf("Unable to save json-patch info: %s", e)
}
}
}
ingestAttachment := false
if ic.hasFileContent(op) {
ingestAttachment = op.Data["file"] != nil
}
if ic.config.IndexAsUpdate && meta.Pipeline == "" && ingestAttachment == false {
req := elastic.NewBulkUpdateRequest()
req.UseEasyJSON(ic.config.EnableEasyJSON)
req.Id(objectID)
req.Index(indexType.Index)
req.Doc(op.Data)
req.DocAsUpsert(true)
if meta.ID != "" {
req.Id(meta.ID)
}
if meta.Index != "" {
req.Index(meta.Index)
}
if meta.Type != "" {
}
if meta.Routing != "" {
req.Routing(meta.Routing)
}
if meta.Parent != "" {
req.Parent(meta.Parent)
}
if meta.RetryOnConflict != 0 {
req.RetryOnConflict(meta.RetryOnConflict)
}
if _, err = req.Source(); err == nil {
ic.bulk.Add(req)
}
} else {
req := elastic.NewBulkIndexRequest()
req.UseEasyJSON(ic.config.EnableEasyJSON)
req.Id(objectID)
req.Index(indexType.Index)
req.Doc(op.Data)
if meta.ID != "" {
req.Id(meta.ID)
}
if meta.Index != "" {
req.Index(meta.Index)
}
if meta.Routing != "" {
req.Routing(meta.Routing)
}
if meta.Parent != "" {
req.Parent(meta.Parent)
}
if meta.Version != 0 {
req.Version(meta.Version)
}
if meta.VersionType != "" {
req.VersionType(meta.VersionType)
}
if meta.Pipeline != "" {
req.Pipeline(meta.Pipeline)
}
if meta.RetryOnConflict != 0 {
req.RetryOnConflict(meta.RetryOnConflict)
}
if ingestAttachment {
req.Pipeline("attachment")
}
if _, err = req.Source(); err == nil {
ic.bulk.Add(req)
}
}
if meta.shouldSave(ic.config) {
if e := ic.setIndexMeta(op.Namespace, objectID, meta); e != nil {
errorLog.Printf("Unable to save routing info: %s", e)
}
}
if tmNamespaces[op.Namespace] {
if op.IsSourceOplog() || ic.config.TimeMachineDirectReads {
t := time.Now().UTC()
tmIndex := func(idx string) string {
pre, suf := ic.config.TimeMachineIndexPrefix, ic.config.TimeMachineIndexSuffix
tmFormat := strings.Join([]string{pre, idx, suf}, ".")
return strings.ToLower(t.Format(tmFormat))
}
data := make(map[string]interface{})
for k, v := range op.Data {
data[k] = v
}
data["_source_id"] = objectID
if ic.config.IndexOplogTime == false {
secs := int64(op.Timestamp.T)
t := time.Unix(secs, 0).UTC()
data[ic.config.OplogTsFieldName] = op.Timestamp
data[ic.config.OplogDateFieldName] = t.Format(ic.config.OplogDateFieldFormat)
}
req := elastic.NewBulkIndexRequest()
req.UseEasyJSON(ic.config.EnableEasyJSON)
req.Index(tmIndex(indexType.Index))
req.Routing(objectID)
req.Doc(data)
if meta.Index != "" {
req.Index(tmIndex(meta.Index))
}
if meta.Pipeline != "" {
req.Pipeline(meta.Pipeline)
}
if ingestAttachment {
req.Pipeline("attachment")
}
if _, err = req.Source(); err == nil {
ic.bulk.Add(req)
}
}
}
return
}
func (ic *indexClient) doIndex(op *gtm.Op) (err error) {
if err = mapData(ic.mongo, ic.config, op); err == nil {
if op.Data != nil {
err = ic.doIndexing(op)
} else if op.IsUpdate() {
ic.doDelete(op)
}
}
return
}
func (ic *indexClient) runProcessor(op *gtm.Op) (err error) {
input := &monstachemap.ProcessPluginInput{
ElasticClient: ic.client,
ElasticBulkProcessor: ic.bulk,
Timestamp: op.Timestamp,
}
input.Document = op.Data
if op.IsDelete() {
input.Document = map[string]interface{}{
"_id": op.Id,
}
}
input.Namespace = op.Namespace
input.Database = op.GetDatabase()
input.Collection = op.GetCollection()
input.Operation = op.Operation
input.MongoClient = ic.mongo
input.UpdateDescription = op.UpdateDescription
err = processPlugin(input)
return
}
func (ic *indexClient) routeProcess(op *gtm.Op) (err error) {
rop := >m.Op{
Id: op.Id,
Operation: op.Operation,
Namespace: op.Namespace,
Source: op.Source,
Timestamp: op.Timestamp,
UpdateDescription: op.UpdateDescription,
}
if op.Data != nil {
var data []byte
data, err = bson.Marshal(op.Data)
if err == nil {
var m map[string]interface{}
err = bson.Unmarshal(data, &m)
if err == nil {
rop.Data = m
}
}
}
ic.processC <- rop
return
}
func (ic *indexClient) routeDrop(op *gtm.Op) (err error) {
ic.bulk.Flush()
err = ic.doDrop(op)
return
}
func (ic *indexClient) routeDeleteRelate(op *gtm.Op) (err error) {
if rs := relates[op.Namespace]; len(rs) != 0 {
var delData map[string]interface{}
useFind := false
for _, r := range rs {
if r.SrcField != "_id" {
useFind = true
break
}
}
if useFind {
delData = findDeletedSrcDoc(ic.config, ic.client, op)
} else {
delData = map[string]interface{}{
"_id": op.Id,
}
}
if delData != nil {
rop := >m.Op{
Id: op.Id,
Operation: op.Operation,
Namespace: op.Namespace,
Source: op.Source,
Timestamp: op.Timestamp,
Data: delData,
}
select {
case ic.relateC <- rop:
default:
errorLog.Printf(relateQueueOverloadMsg, rop.Namespace, rop.Id)
}
}
}
return
}
func (ic *indexClient) routeDelete(op *gtm.Op) (err error) {
if len(ic.config.Relate) > 0 {
err = ic.routeDeleteRelate(op)
}
ic.doDelete(op)
return
}
func (ic *indexClient) routeDataRelate(op *gtm.Op) (skip bool, err error) {
rs := relates[op.Namespace]
if len(rs) == 0 {
return
}
skip = true
for _, r := range rs {
if r.KeepSrc {
skip = false
break
}
}
if skip {
select {
case ic.relateC <- op:
default:
errorLog.Printf(relateQueueOverloadMsg, op.Namespace, op.Id)
}
} else {
rop := >m.Op{
Id: op.Id,
Operation: op.Operation,
Namespace: op.Namespace,
Source: op.Source,
Timestamp: op.Timestamp,
UpdateDescription: op.UpdateDescription,
}
var data []byte
data, err = bson.Marshal(op.Data)
if err == nil {
var m map[string]interface{}
err = bson.Unmarshal(data, &m)
if err == nil {
rop.Data = m
}
}
select {
case ic.relateC <- rop:
default:
errorLog.Printf(relateQueueOverloadMsg, rop.Namespace, rop.Id)
}
}
return
}
func (ic *indexClient) routeData(op *gtm.Op) (err error) {
skip := false
if op.IsSourceOplog() && len(ic.config.Relate) > 0 {
skip, err = ic.routeDataRelate(op)
}
if !skip {
if ic.hasFileContent(op) {
ic.fileC <- op
} else {
ic.indexC <- op
}
}
return
}
func (ic *indexClient) routeOp(op *gtm.Op) (err error) {
if processPlugin != nil {
err = ic.routeProcess(op)
}
if op.IsDrop() {
err = ic.routeDrop(op)
} else if op.IsDelete() {
err = ic.routeDelete(op)
} else if op.Data != nil {
err = ic.routeData(op)
}
return
}
func (ic *indexClient) processErr(err error) {
config := ic.config
mux.Lock()
defer mux.Unlock()
exitStatus = 1
errorLog.Println(err)
if config.FailFast {
os.Exit(exitStatus)
}
}
func (ic *indexClient) doIndexStats() (err error) {
var hostname string
doc := make(map[string]interface{})
t := time.Now().UTC()
doc["Timestamp"] = t.Format("2006-01-02T15:04:05")
hostname, err = os.Hostname()
if err == nil {
doc["Host"] = hostname
}
doc["Pid"] = os.Getpid()
doc["Stats"] = ic.bulk.Stats()
index := strings.ToLower(t.Format(ic.config.StatsIndexFormat))
req := elastic.NewBulkIndexRequest().Index(index)
req.UseEasyJSON(ic.config.EnableEasyJSON)
req.Doc(doc)
ic.bulkStats.Add(req)
return
}
func (ic *indexClient) dropDBMeta(db string) (err error) {
if ic.config.DeleteStrategy == statefulDeleteStrategy {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("meta")
q := bson.M{"db": db}
_, err = col.DeleteMany(context.Background(), q)
}
return
}
func (ic *indexClient) dropCollectionMeta(namespace string) (err error) {
if ic.config.DeleteStrategy == statefulDeleteStrategy {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("meta")
q := bson.M{"namespace": namespace}
_, err = col.DeleteMany(context.Background(), q)
}
return
}
func (meta *indexingMeta) load(metaAttrs map[string]interface{}) {
var v interface{}
var ok bool
var s string
if _, ok = metaAttrs["skip"]; ok {
meta.Skip = true
}
if v, ok = metaAttrs["routing"]; ok {
meta.Routing = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["index"]; ok {
meta.Index = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["id"]; ok {
op := >m.Op{
Id: v,
}
meta.ID = opIDToString(op)
}
if v, ok = metaAttrs["type"]; ok {
meta.Type = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["parent"]; ok {
meta.Parent = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["version"]; ok {
s = fmt.Sprintf("%v", v)
if version, err := strconv.ParseInt(s, 10, 64); err == nil {
meta.Version = version
}
}
if v, ok = metaAttrs["versionType"]; ok {
meta.VersionType = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["pipeline"]; ok {
meta.Pipeline = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["retryOnConflict"]; ok {
s = fmt.Sprintf("%v", v)
if roc, err := strconv.Atoi(s); err == nil {
meta.RetryOnConflict = roc
}
}
}
func (meta *indexingMeta) shouldSave(config *configOptions) bool {
if config.DeleteStrategy == statefulDeleteStrategy {
return (meta.Routing != "" ||
meta.Index != "" ||
meta.Type != "" ||
meta.Parent != "" ||
meta.Pipeline != "")
}
return false
}
func (ic *indexClient) setIndexMeta(namespace, id string, meta *indexingMeta) error {
config := ic.config
col := ic.mongo.Database(config.ConfigDatabaseName).Collection("meta")
metaID := fmt.Sprintf("%s.%s", namespace, id)
doc := map[string]interface{}{
"id": meta.ID,
"routing": meta.Routing,
"index": meta.Index,
"type": meta.Type,
"parent": meta.Parent,
"pipeline": meta.Pipeline,
"db": strings.SplitN(namespace, ".", 2)[0],
"namespace": namespace,
}
opts := options.Update()
opts.SetUpsert(true)
_, err := col.UpdateOne(context.Background(), bson.M{
"_id": metaID,
}, bson.M{
"$set": doc,
}, opts)
return err
}
func (ic *indexClient) getIndexMeta(namespace, id string) (meta *indexingMeta) {
meta = &indexingMeta{}
config := ic.config
col := ic.mongo.Database(config.ConfigDatabaseName).Collection("meta")
metaID := fmt.Sprintf("%s.%s", namespace, id)
result := col.FindOne(context.Background(), bson.M{
"_id": metaID,
})
if err := result.Err(); err == nil {
doc := make(map[string]interface{})
if err = result.Decode(&doc); err == nil {
if doc["id"] != nil {
meta.ID = doc["id"].(string)
}
if doc["routing"] != nil {
meta.Routing = doc["routing"].(string)
}
if doc["index"] != nil {
meta.Index = strings.ToLower(doc["index"].(string))
}
if doc["type"] != nil {
meta.Type = doc["type"].(string)
}
if doc["parent"] != nil {
meta.Parent = doc["parent"].(string)
}
if doc["pipeline"] != nil {
meta.Pipeline = doc["pipeline"].(string)
}
col.DeleteOne(context.Background(), bson.M{"_id": metaID})
}
}
return
}
func loadBuiltinFunctions(client *mongo.Client, config *configOptions) {
for ns, env := range mapEnvs {
var fa *findConf
fa = &findConf{
client: client,
name: "findId",
vm: env.VM,
ns: ns,
byId: true,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
errorLog.Fatalln(err)
}
fa = &findConf{
client: client,
name: "findOne",
vm: env.VM,
ns: ns,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
errorLog.Fatalln(err)
}
fa = &findConf{
client: client,
name: "find",
vm: env.VM,
ns: ns,
multi: true,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
errorLog.Fatalln(err)
}
fa = &findConf{
client: client,
name: "pipe",
vm: env.VM,
ns: ns,
multi: true,
pipe: true,
pipeAllowDisk: config.PipeAllowDisk,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
errorLog.Fatalln(err)
}
}
}
func (fc *findCall) setDatabase(topts map[string]interface{}) (err error) {
if ov, ok := topts["database"]; ok {
if ovs, ok := ov.(string); ok {
fc.db = ovs
} else {
err = errors.New("Invalid database option value")
}
}
return
}
func (fc *findCall) setCollection(topts map[string]interface{}) (err error) {
if ov, ok := topts["collection"]; ok {
if ovs, ok := ov.(string); ok {
fc.col = ovs
} else {
err = errors.New("Invalid collection option value")
}
}
return
}
func (fc *findCall) setSelect(topts map[string]interface{}) (err error) {
if ov, ok := topts["select"]; ok {
if ovsel, ok := ov.(map[string]interface{}); ok {
for k, v := range ovsel {
if vi, ok := v.(int64); ok {
fc.sel[k] = int(vi)
}
}
} else {
err = errors.New("Invalid select option value")
}
}
return
}
func (fc *findCall) setSort(topts map[string]interface{}) (err error) {
if ov, ok := topts["sort"]; ok {
if ovsort, ok := ov.(map[string]interface{}); ok {
for k, v := range ovsort {
if vi, ok := v.(int64); ok {
fc.sort[k] = int(vi)
}
}
} else {
err = errors.New("Invalid sort option value")
}
}
return
}
func (fc *findCall) setLimit(topts map[string]interface{}) (err error) {
if ov, ok := topts["limit"]; ok {
if ovl, ok := ov.(int64); ok {
fc.limit = int(ovl)
} else {
err = errors.New("Invalid limit option value")
}
}
return
}
func (fc *findCall) setQuery(v otto.Value) (err error) {
var q interface{}
if q, err = v.Export(); err == nil {
fc.query = fc.restoreIds(deepExportValue(q))
}
return
}
func (fc *findCall) setOptions(v otto.Value) (err error) {
var opts interface{}
if opts, err = v.Export(); err == nil {
switch topts := opts.(type) {
case map[string]interface{}:
if err = fc.setDatabase(topts); err != nil {
return
}
if err = fc.setCollection(topts); err != nil {
return
}
if err = fc.setSelect(topts); err != nil {
return
}
if fc.isMulti() {
if err = fc.setSort(topts); err != nil {
return
}
if err = fc.setLimit(topts); err != nil {
return
}
}
default:
err = errors.New("Invalid options argument")
return
}
} else {
err = errors.New("Invalid options argument")
}
return
}
func (fc *findCall) setDefaults() {
if fc.config.ns != "" {
ns := strings.SplitN(fc.config.ns, ".", 2)
fc.db = ns[0]
fc.col = ns[1]
}
}
func (fc *findCall) getCollection() *mongo.Collection {
return fc.client.Database(fc.db).Collection(fc.col)
}
func (fc *findCall) getVM() *otto.Otto {
return fc.config.vm
}
func (fc *findCall) getFunctionName() string {
return fc.config.name
}
func (fc *findCall) isMulti() bool {
return fc.config.multi
}
func (fc *findCall) isPipe() bool {
return fc.config.pipe
}
func (fc *findCall) pipeAllowDisk() bool {
return fc.config.pipeAllowDisk
}
func (fc *findCall) logError(err error) {
errorLog.Printf("Error in function %s: %s\n", fc.getFunctionName(), err)
}
func (fc *findCall) restoreIds(v interface{}) (r interface{}) {
switch vt := v.(type) {
case string:
if oi, err := primitive.ObjectIDFromHex(vt); err == nil {
r = oi
} else {
r = v
}
case []map[string]interface{}:
var avs []interface{}
for _, av := range vt {
mvs := make(map[string]interface{})
for k, v := range av {
mvs[k] = fc.restoreIds(v)
}
avs = append(avs, mvs)
}
r = avs
case []interface{}:
var avs []interface{}
for _, av := range vt {
avs = append(avs, fc.restoreIds(av))
}
r = avs
case map[string]interface{}:
mvs := make(map[string]interface{})
for k, v := range vt {
mvs[k] = fc.restoreIds(v)
}
r = mvs
default:
r = v
}
return
}
func (fc *findCall) execute() (r otto.Value, err error) {
var cursor *mongo.Cursor
col := fc.getCollection()
query := fc.query
if fc.isMulti() {
if fc.isPipe() {
ao := options.Aggregate()
ao.SetAllowDiskUse(fc.pipeAllowDisk())
cursor, err = col.Aggregate(context.Background(), query, ao)
if err != nil {
return
}
} else {
fo := options.Find()
if fc.limit > 0 {
fo.SetLimit(int64(fc.limit))
}
if len(fc.sort) > 0 {
fo.SetSort(fc.sort)
}
if len(fc.sel) > 0 {
fo.SetProjection(fc.sel)
}
cursor, err = col.Find(context.Background(), query, fo)
if err != nil {
return
}
}
var rdocs []map[string]interface{}
for cursor.Next(context.Background()) {
doc := make(map[string]interface{})
if err = cursor.Decode(&doc); err != nil {
return
}
rdocs = append(rdocs, convertMapJavascript(doc))
}
r, err = fc.getVM().ToValue(rdocs)
} else {
fo := options.FindOne()
if fc.config.byId {
query = bson.M{"_id": query}
}
if len(fc.sel) > 0 {
fo.SetProjection(fc.sel)
}
result := col.FindOne(context.Background(), query, fo)
if err = result.Err(); err == nil {
doc := make(map[string]interface{})
if err = result.Decode(&doc); err == nil {
rdoc := convertMapJavascript(doc)
r, err = fc.getVM().ToValue(rdoc)
}
}
}
return
}
func makeFind(fa *findConf) func(otto.FunctionCall) otto.Value {
return func(call otto.FunctionCall) (r otto.Value) {
var err error
fc := &findCall{
config: fa,
client: fa.client,
sort: make(map[string]int),
sel: make(map[string]int),
}
fc.setDefaults()
args := call.ArgumentList
argLen := len(args)
r = otto.NullValue()
if argLen >= 1 {
if argLen >= 2 {
if err = fc.setOptions(call.Argument(1)); err != nil {
fc.logError(err)
return
}
}
if fc.db == "" || fc.col == "" {
fc.logError(errors.New("Find call must specify db and collection"))
return
}
if err = fc.setQuery(call.Argument(0)); err == nil {
var result otto.Value
if result, err = fc.execute(); err == nil {
r = result
} else {
fc.logError(err)
}
} else {
fc.logError(err)
}
} else {
fc.logError(errors.New("At least one argument is required"))
}
return
}
}
func findDeletedSrcDoc(config *configOptions, client *elastic.Client, op *gtm.Op) map[string]interface{} {
objectID := opIDToString(op)
termQuery := elastic.NewTermQuery("_id", objectID)
search := client.Search()
search.Size(1)
search.Index(config.DeleteIndexPattern)
search.Query(termQuery)
searchResult, err := search.Do(context.Background())
if err != nil {
errorLog.Printf("Unable to find deleted document %s: %s", objectID, err)
return nil
}
if searchResult.Hits == nil {
errorLog.Printf("Unable to find deleted document %s", objectID)
return nil
}
if searchResult.TotalHits() == 0 {
errorLog.Printf("Found no hits for deleted document %s", objectID)
return nil
}
if searchResult.TotalHits() > 1 {
errorLog.Printf("Found multiple hits for deleted document %s", objectID)
return nil
}
hit := searchResult.Hits.Hits[0]
if hit.Source == nil {
errorLog.Printf("Source unavailable for deleted document %s", objectID)
return nil
}
var src map[string]interface{}
if err = json.Unmarshal(hit.Source, &src); err == nil {
src["_id"] = op.Id
return src
}
errorLog.Printf("Unable to unmarshal deleted document %s: %s", objectID, err)
return nil
}
func tsVersion(ts primitive.Timestamp) int64 {
t, i := int64(ts.T), int64(ts.I)
version := (t << 32) | i
return version
}
func (ic *indexClient) doDelete(op *gtm.Op) {
req := elastic.NewBulkDeleteRequest()
req.UseEasyJSON(ic.config.EnableEasyJSON)
if ic.config.DeleteStrategy == ignoreDeleteStrategy {
return
}
objectID, indexType, meta := opIDToString(op), ic.mapIndex(op), &indexingMeta{}
req.Id(objectID)
if ic.config.IndexAsUpdate == false {
req.Version(tsVersion(op.Timestamp))
req.VersionType("external")
}
if ic.config.DeleteStrategy == statefulDeleteStrategy {
if routingNamespaces[""] || routingNamespaces[op.Namespace] {
meta = ic.getIndexMeta(op.Namespace, objectID)
}
req.Index(indexType.Index)
if meta.Index != "" {
req.Index(meta.Index)
}
if meta.Routing != "" {
req.Routing(meta.Routing)
}
if meta.Parent != "" {
req.Parent(meta.Parent)
}
} else if ic.config.DeleteStrategy == statelessDeleteStrategy {
if routingNamespaces[""] || routingNamespaces[op.Namespace] {
termQuery := elastic.NewTermQuery("_id", objectID)
search := ic.client.Search()
search.FetchSource(false)
search.Size(1)
search.Index(ic.config.DeleteIndexPattern)
search.Query(termQuery)
searchResult, err := search.Do(context.Background())
if err != nil {
errorLog.Printf("Unable to delete document %s: %s",
objectID, err)
return
}
if searchResult.Hits != nil && searchResult.TotalHits() == 1 {
hit := searchResult.Hits.Hits[0]
req.Index(hit.Index)
req.Type(hit.Type)
if hit.Routing != "" {
req.Routing(hit.Routing)
}
if hit.Parent != "" {
req.Parent(hit.Parent)
}
} else {
errorLog.Printf("Failed to find unique document %s for deletion using index pattern %s",
objectID, ic.config.DeleteIndexPattern)
return
}
} else {
req.Index(indexType.Index)
}
} else {
return
}
ic.bulk.Add(req)
return
}
func gtmDefaultSettings() gtmSettings {
return gtmSettings{
ChannelSize: gtmChannelSizeDefault,
BufferSize: 32,
BufferDuration: "75ms",
}
}
func (ic *indexClient) notifySdFailed(err error) {
if err != nil {
errorLog.Printf("Systemd notification failed: %s", err)
} else {
if ic.config.Verbose {
warnLog.Println("Systemd notification not supported (i.e. NOTIFY_SOCKET is unset)")
}
}
}
func (ic *indexClient) watchdogSdFailed(err error) {
if err != nil {
errorLog.Printf("Error determining systemd WATCHDOG interval: %s", err)
} else {
if ic.config.Verbose {
warnLog.Println("Systemd WATCHDOG not enabled")
}
}
}
func (ctx *httpServerCtx) serveHttp() {
s := ctx.httpServer
if ctx.config.Verbose {
infoLog.Printf("Starting http server at %s", s.Addr)
}
ctx.started = time.Now()
err := s.ListenAndServe()
if !ctx.shutdown {
errorLog.Fatalf("Unable to serve http at address %s: %s", s.Addr, err)
}
}
func (ctx *httpServerCtx) buildServer() {
mux := http.NewServeMux()
mux.HandleFunc("/started", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
data := (time.Now().Sub(ctx.started)).String()
w.Write([]byte(data))
})
mux.HandleFunc("/healthz", func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(200)
w.Write([]byte("ok"))
})
if ctx.config.Stats {
mux.HandleFunc("/stats", func(w http.ResponseWriter, req *http.Request) {
stats, err := json.MarshalIndent(ctx.bulk.Stats(), "", " ")
if err == nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
w.Write(stats)
} else {
w.WriteHeader(500)
fmt.Fprintf(w, "Unable to print statistics: %s", err)
}
})
}
if ctx.config.Pprof {
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
}
s := &http.Server{
Addr: ctx.config.HTTPServerAddr,
Handler: mux,
ErrorLog: errorLog,
}
ctx.httpServer = s
}
func (ic *indexClient) startNotify() {
go ic.notifySd()
}
func (ic *indexClient) notifySd() {
var interval time.Duration
config := ic.config
if config.Verbose {
infoLog.Println("Sending systemd READY=1")
}
sent, err := daemon.SdNotify(false, "READY=1")
if sent {
if config.Verbose {
infoLog.Println("READY=1 successfully sent to systemd")
}
} else {
ic.notifySdFailed(err)
return
}
interval, err = daemon.SdWatchdogEnabled(false)
if err != nil || interval == 0 {
ic.watchdogSdFailed(err)
return
}
for {
if config.Verbose {
infoLog.Println("Sending systemd WATCHDOG=1")
}
sent, err = daemon.SdNotify(false, "WATCHDOG=1")
if sent {
if config.Verbose {
infoLog.Println("WATCHDOG=1 successfully sent to systemd")
}
} else {
ic.notifySdFailed(err)
return
}
time.Sleep(interval / 2)
}
}
func (config *configOptions) makeShardInsertHandler() gtm.ShardInsertHandler {
return func(shardInfo *gtm.ShardInfo) (*mongo.Client, error) {
shardURL := shardInfo.GetURL()
infoLog.Printf("Adding shard found at %s\n", cleanMongoURL(shardURL))
return config.dialMongo(shardURL)
}
}
func buildPipe(config *configOptions) func(string, bool) ([]interface{}, error) {
if pipePlugin != nil {
return pipePlugin
} else if len(pipeEnvs) > 0 {
return func(ns string, changeEvent bool) ([]interface{}, error) {
mux.Lock()
defer mux.Unlock()
nss := []string{"", ns}
for _, ns := range nss {
if env := pipeEnvs[ns]; env != nil {
env.lock.Lock()
defer env.lock.Unlock()
val, err := env.VM.Call("module.exports", ns, ns, changeEvent)
if err != nil {
return nil, err
}
if strings.ToLower(val.Class()) == "array" {
data, err := val.Export()
if err != nil {
return nil, err
} else if data == val {
return nil, errors.New("Exported pipeline function must return an array")
} else {
switch data.(type) {
case []map[string]interface{}:
ds := data.([]map[string]interface{})
var is []interface{} = make([]interface{}, len(ds))
for i, d := range ds {
is[i] = deepExportValue(d)
}
return is, nil
case []interface{}:
ds := data.([]interface{})
if len(ds) > 0 {
errorLog.Fatalln("Pipeline function must return an array of objects")
}
return nil, nil
default:
errorLog.Fatalln("Pipeline function must return an array of objects")
}
}
} else {
return nil, errors.New("Exported pipeline function must return an array")
}
}
}
return nil, nil
}
}
return nil
}
func (ic *indexClient) sigListen() {
go func() {
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)
<-sigs
ic.shutdown(10)
}()
}
func (ic *indexClient) startHttpServer() {
config := ic.config
if config.EnableHTTPServer {
ic.hsc = &httpServerCtx{
bulk: ic.bulk,
config: ic.config,
}
ic.hsc.buildServer()
go ic.hsc.serveHttp()
}
}
func (ic *indexClient) setupFileIndexing() {
config := ic.config
if config.IndexFiles {
if len(config.FileNamespaces) == 0 {
errorLog.Fatalln("File indexing is ON but no file namespaces are configured")
}
if err := ic.ensureFileMapping(); err != nil {
errorLog.Fatalf("Unable to setup file indexing: %s", err)
}
}
}
func (ic *indexClient) setupBulk() {
config := ic.config
bulk, err := config.newBulkProcessor(ic.client)
if err != nil {
errorLog.Fatalf("Unable to start bulk processor: %s", err)
}
var bulkStats *elastic.BulkProcessor
if config.IndexStats {
bulkStats, err = config.newStatsBulkProcessor(ic.client)
if err != nil {
errorLog.Fatalf("Unable to start stats bulk processor: %s", err)
}
}
ic.bulk = bulk
ic.bulkStats = bulkStats
}
func (ic *indexClient) run() {
ic.startNotify()
ic.setupFileIndexing()
ic.setupBulk()
ic.startHttpServer()
ic.sigListen()
ic.startCluster()
ic.startRelate()
ic.startIndex()
ic.startDownload()
ic.startPostProcess()
ic.startReadWait()
ic.clusterWait()
ic.startListen()
ic.eventLoop()
}
func (ic *indexClient) startDownload() {
for i := 0; i < ic.config.FileDownloaders; i++ {
ic.fileWg.Add(1)
go func() {
defer ic.fileWg.Done()
for op := range ic.fileC {
if err := ic.addFileContent(op); err != nil {
ic.processErr(err)
}
ic.indexC <- op
}
}()
}
}
func (ic *indexClient) startPostProcess() {
for i := 0; i < ic.config.PostProcessors; i++ {
ic.processWg.Add(1)
go func() {
defer ic.processWg.Done()
for op := range ic.processC {
if err := ic.runProcessor(op); err != nil {
ic.processErr(err)
}
}
}()
}
}
func (ic *indexClient) startReadWait() {
if len(ic.config.DirectReadNs) > 0 {
go func() {
ic.gtmCtx.DirectReadWg.Wait()
infoLog.Println("Direct reads completed")
if ic.config.Resume {
ic.saveTimestampFromReplStatus()
}
if ic.config.ExitAfterDirectReads {
infoLog.Println("Stopping all workers")
ic.gtmCtx.Stop()
<-ic.opsConsumed
close(ic.relateC)
ic.relateWg.Wait()
close(ic.fileC)
ic.fileWg.Wait()
close(ic.indexC)
ic.indexWg.Wait()
close(ic.processC)
ic.processWg.Wait()
ic.doneC <- 30
}
}()
}
}
func (ic *indexClient) dialShards() []*mongo.Client {
var mongos []*mongo.Client
// get the list of shard servers
shardInfos := gtm.GetShards(ic.mongoConfig)
if len(shardInfos) == 0 {
errorLog.Fatalln("Shards enabled but none found in config.shards collection")
}
// add each shard server to the sync list
for _, shardInfo := range shardInfos {
shardURL := shardInfo.GetURL()
infoLog.Printf("Adding shard found at %s\n", cleanMongoURL(shardURL))
shard, err := ic.config.dialMongo(shardURL)
if err != nil {
errorLog.Fatalf("Unable to connect to mongodb shard using URL %s: %s", cleanMongoURL(shardURL), err)
}
mongos = append(mongos, shard)
}
return mongos
}
func (ic *indexClient) buildTimestampGen() gtm.TimestampGenerator {
var after gtm.TimestampGenerator
config := ic.config
if config.Replay {
after = func(client *mongo.Client, options *gtm.Options) (primitive.Timestamp, error) {
return primitive.Timestamp{}, nil
}
} else if config.ResumeFromTimestamp != 0 {
after = func(client *mongo.Client, options *gtm.Options) (primitive.Timestamp, error) {
return primitive.Timestamp{
T: uint32(config.ResumeFromTimestamp >> 32),
I: uint32(config.ResumeFromTimestamp),
}, nil
}
} else if config.Resume {
after = func(client *mongo.Client, options *gtm.Options) (primitive.Timestamp, error) {
var ts primitive.Timestamp
var err error
col := client.Database(config.ConfigDatabaseName).Collection("monstache")
result := col.FindOne(context.Background(), bson.M{
"_id": config.ResumeName,
})
if err = result.Err(); err == nil {
doc := make(map[string]interface{})
if err = result.Decode(&doc); err == nil {
if doc["ts"] != nil {
ts = doc["ts"].(primitive.Timestamp)
ts.I += 1
}
}
}
if ts.T == 0 {
ts, _ = gtm.LastOpTimestamp(client, options)
}
infoLog.Printf("Resuming from timestamp %+v", ts)
return ts, nil
}
}
return after
}
func (ic *indexClient) buildConnections() []*mongo.Client {
var mongos []*mongo.Client
var err error
config := ic.config
if config.readShards() {
// if we have a config server URL then we are running in a sharded cluster
ic.mongoConfig, err = config.dialMongo(config.MongoConfigURL)
if err != nil {
errorLog.Fatalf("Unable to connect to mongodb config server using URL %s: %s",
cleanMongoURL(config.MongoConfigURL), err)
}
mongos = ic.dialShards()
} else {
mongos = append(mongos, ic.mongo)
}
return mongos
}
func (ic *indexClient) buildFilterChain() []gtm.OpFilter {
config := ic.config
filterChain := []gtm.OpFilter{notMonstache(config), notSystem, notChunks}
if config.readShards() {
filterChain = append(filterChain, notConfig)
}
if config.NsRegex != "" {
filterChain = append(filterChain, filterWithRegex(config.NsRegex))
}
if config.NsDropRegex != "" {
filterChain = append(filterChain, filterDropWithRegex(config.NsDropRegex))
}
if config.NsExcludeRegex != "" {
filterChain = append(filterChain, filterInverseWithRegex(config.NsExcludeRegex))
}
if config.NsDropExcludeRegex != "" {
filterChain = append(filterChain, filterDropInverseWithRegex(config.NsDropExcludeRegex))
}
return filterChain
}
func (ic *indexClient) buildFilterArray() []gtm.OpFilter {
config := ic.config
filterArray := []gtm.OpFilter{}
var pluginFilter gtm.OpFilter
if config.Worker != "" {
workerFilter, err := consistent.ConsistentHashFilter(config.Worker, config.Workers)
if err != nil {
errorLog.Fatalln(err)
}
filterArray = append(filterArray, workerFilter)
} else if config.Workers != nil {
errorLog.Fatalln("Workers configured but this worker is undefined. worker must be set to one of the workers.")
}
if filterPlugin != nil {
pluginFilter = filterWithPlugin()
filterArray = append(filterArray, pluginFilter)
} else if len(filterEnvs) > 0 {
pluginFilter = filterWithScript()
filterArray = append(filterArray, pluginFilter)
}
if pluginFilter != nil {
ic.filter = pluginFilter
}
return filterArray
}
func (ic *indexClient) buildGtmOptions() *gtm.Options {
var nsFilter, filter, directReadFilter gtm.OpFilter
config := ic.config
filterChain := ic.buildFilterChain()
filterArray := ic.buildFilterArray()
nsFilter = gtm.ChainOpFilters(filterChain...)
filter = gtm.ChainOpFilters(filterArray...)
directReadFilter = gtm.ChainOpFilters(filterArray...)
gtmBufferDuration, err := time.ParseDuration(config.GtmSettings.BufferDuration)
if err != nil {
errorLog.Fatalf("Unable to parse gtm buffer duration %s: %s",
config.GtmSettings.BufferDuration, err)
}
after := ic.buildTimestampGen()
gtmOpts := >m.Options{
After: after,
Filter: filter,
NamespaceFilter: nsFilter,
OpLogDisabled: config.EnableOplog == false,
OpLogDatabaseName: config.MongoOpLogDatabaseName,
OpLogCollectionName: config.MongoOpLogCollectionName,
ChannelSize: config.GtmSettings.ChannelSize,
Ordering: gtm.AnyOrder,
WorkerCount: 10,
BufferDuration: gtmBufferDuration,
BufferSize: config.GtmSettings.BufferSize,
DirectReadNs: config.DirectReadNs,
DirectReadSplitMax: int32(config.DirectReadSplitMax),
DirectReadConcur: config.DirectReadConcur,
DirectReadFilter: directReadFilter,
Log: infoLog,
Pipe: buildPipe(config),
ChangeStreamNs: config.ChangeStreamNs,
}
return gtmOpts
}
func (ic *indexClient) startListen() {
config := ic.config
gtmOpts := ic.buildGtmOptions()
ic.gtmCtx = gtm.StartMulti(ic.buildConnections(), gtmOpts)
if config.readShards() && !config.DisableChangeEvents {
ic.gtmCtx.AddShardListener(ic.mongoConfig, gtmOpts, config.makeShardInsertHandler())
}
}
func (ic *indexClient) clusterWait() {
if ic.config.ClusterName != "" {
if ic.enabled {
infoLog.Printf("Starting work for cluster %s", ic.config.ClusterName)
} else {
heartBeat := time.NewTicker(10 * time.Second)
defer heartBeat.Stop()
infoLog.Printf("Pausing work for cluster %s", ic.config.ClusterName)
ic.bulk.Stop()
for range heartBeat.C {
var err error
ic.enabled, err = ic.enableProcess()
if err != nil {
errorLog.Printf("Error attempting to become active cluster process: %s", err)
continue
}
if ic.enabled {
infoLog.Printf("Resuming work for cluster %s", ic.config.ClusterName)
ic.bulk.Start(context.Background())
break
}
}
}
}
}
func (ic *indexClient) nextTimestamp() {
if ic.lastTs.T > ic.lastTsSaved.T ||
(ic.lastTs.T == ic.lastTsSaved.T && ic.lastTs.I > ic.lastTsSaved.I) {
ic.bulk.Flush()
if err := ic.saveTimestamp(); err == nil {
ic.lastTsSaved = ic.lastTs
} else {
ic.processErr(err)
}
}
}
func (ic *indexClient) nextStats() {
if ic.config.IndexStats {
if err := ic.doIndexStats(); err != nil {
errorLog.Printf("Error indexing statistics: %s", err)
}
} else {
stats, err := json.Marshal(ic.bulk.Stats())
if err != nil {
errorLog.Printf("Unable to log statistics: %s", err)
} else {
statsLog.Println(string(stats))
}
}
}
func (ic *indexClient) nextHeartbeat() {
var err error
if ic.enabled {
ic.enabled, err = ic.ensureEnabled()
if err != nil {
ic.processErr(err)
}
if !ic.enabled {
infoLog.Printf("Pausing work for cluster %s", ic.config.ClusterName)
ic.gtmCtx.Pause()
ic.bulk.Stop()
heartBeat := time.NewTicker(10 * time.Second)
defer heartBeat.Stop()
for range heartBeat.C {
ic.enabled, err = ic.enableProcess()
if ic.enabled {
infoLog.Printf("Resuming work for cluster %s", ic.config.ClusterName)
ic.bulk.Start(context.Background())
ic.resumeWork()
break
}
}
}
} else {
ic.enabled, err = ic.enableProcess()
if ic.enabled {
infoLog.Printf("Resuming work for cluster %s", ic.config.ClusterName)
ic.bulk.Start(context.Background())
ic.resumeWork()
}
}
if err != nil {
ic.processErr(err)
}
}
func (ic *indexClient) eventLoop() {
var err error
var allOpsVisited bool
timestampTicker := time.NewTicker(10 * time.Second)
if ic.config.Resume == false {
timestampTicker.Stop()
}
heartBeat := time.NewTicker(10 * time.Second)
if ic.config.ClusterName == "" {
heartBeat.Stop()
}
statsTimeout := time.Duration(30) * time.Second
if ic.config.StatsDuration != "" {
statsTimeout, _ = time.ParseDuration(ic.config.StatsDuration)
}
printStats := time.NewTicker(statsTimeout)
if ic.config.Stats == false {
printStats.Stop()
}
infoLog.Println("Listening for events")
for {
select {
case timeout := <-ic.doneC:
ic.enabled = false
ic.shutdown(timeout)
return
case <-timestampTicker.C:
if !ic.enabled {
break
}
ic.nextTimestamp()
case <-heartBeat.C:
if ic.config.ClusterName == "" {
break
}
ic.nextHeartbeat()
case <-printStats.C:
if !ic.enabled {
break
}
ic.nextStats()
case err = <-ic.gtmCtx.ErrC:
if err == nil {
break
}
ic.processErr(err)
case op, open := <-ic.gtmCtx.OpC:
if !ic.enabled {
break
}
if op == nil {
if !open && !allOpsVisited {
allOpsVisited = true
ic.opsConsumed <- true
}
break
}
if op.IsSourceOplog() {
ic.lastTs = op.Timestamp
}
if err = ic.routeOp(op); err != nil {
ic.processErr(err)
}
}
}
}
func (ic *indexClient) startIndex() {
for i := 0; i < 5; i++ {
ic.indexWg.Add(1)
go func() {
defer ic.indexWg.Done()
for op := range ic.indexC {
if err := ic.doIndex(op); err != nil {
ic.processErr(err)
}
}
}()
}
}
func (ic *indexClient) startRelate() {
if len(ic.config.Relate) > 0 {
for i := 0; i < ic.config.RelateThreads; i++ {
ic.relateWg.Add(1)
go func() {
defer ic.relateWg.Done()
for op := range ic.relateC {
if err := ic.processRelated(op); err != nil {
ic.processErr(err)
}
}
}()
}
}
}
func (ic *indexClient) startCluster() {
if ic.config.ClusterName != "" {
var err error
if err = ic.ensureClusterTTL(); err == nil {
infoLog.Printf("Joined cluster %s", ic.config.ClusterName)
} else {
errorLog.Fatalf("Unable to enable cluster mode: %s", err)
}
ic.enabled, err = ic.enableProcess()
if err != nil {
errorLog.Fatalf("Unable to determine enabled cluster process: %s", err)
}
}
}
func (ic *indexClient) closeClient() {
if ic.mongo != nil && ic.config.ClusterName != "" {
ic.resetClusterState()
}
if ic.hsc != nil {
ic.hsc.shutdown = true
ic.hsc.httpServer.Shutdown(context.Background())
}
if ic.bulk != nil {
ic.bulk.Flush()
}
if ic.bulkStats != nil {
ic.bulkStats.Flush()
}
close(ic.closeC)
}
func (ic *indexClient) shutdown(timeout int) {
infoLog.Println("Shutting down")
go ic.closeClient()
doneC := make(chan bool)
go func() {
closeT := time.NewTicker(time.Duration(timeout) * time.Second)
defer closeT.Stop()
done := false
for !done {
select {
case <-ic.closeC:
done = true
close(doneC)
case <-closeT.C:
done = true
close(doneC)
}
}
}()
<-doneC
os.Exit(exitStatus)
}
func handlePanic() {
if r := recover(); r != nil {
errorLog.Println(r)
infoLog.Println("Shutting down with exit status 1 after panic.")
time.Sleep(3 * time.Second)
os.Exit(1)
}
}
func getBuildInfo(client *mongo.Client) (bi *buildInfo, err error) {
db := client.Database("admin")
result := db.RunCommand(context.Background(), bson.M{
"buildInfo": 1,
})
if err = result.Err(); err == nil {
bi = &buildInfo{}
err = result.Decode(bi)
}
return
}
func (ic *indexClient) saveTimestampFromReplStatus() {
if rs, err := gtm.GetReplStatus(ic.mongo); err == nil {
if ic.lastTs, err = rs.GetLastCommitted(); err == nil {
if err = ic.saveTimestamp(); err != nil {
ic.processErr(err)
}
} else {
ic.processErr(err)
}
} else {
ic.processErr(err)
}
}
func mustConfig() *configOptions {
config := &configOptions{
GtmSettings: gtmDefaultSettings(),
}
config.parseCommandLineFlags()
if config.Version {
fmt.Println(version)
os.Exit(0)
}
config.build()
if config.Print {
config.dump()
os.Exit(0)
}
config.setupLogging()
config.validate()
return config
}
func buildMongoClient(config *configOptions) *mongo.Client {
mongoClient, err := config.dialMongo(config.MongoURL)
if err != nil {
errorLog.Fatalf("Unable to connect to MongoDB using URL %s: %s",
cleanMongoURL(config.MongoURL), err)
}
infoLog.Printf("Started monstache version %s", version)
if mongoInfo, err := getBuildInfo(mongoClient); err == nil {
infoLog.Printf("Successfully connected to MongoDB version %s", mongoInfo.Version)
} else {
infoLog.Println("Successfully connected to MongoDB")
}
return mongoClient
}
func buildElasticClient(config *configOptions) *elastic.Client {
elasticClient, err := config.newElasticClient()
if err != nil {
errorLog.Fatalf("Unable to create Elasticsearch client: %s", err)
}
if config.ElasticVersion == "" {
if err := config.testElasticsearchConn(elasticClient); err != nil {
errorLog.Fatalf("Unable to validate connection to Elasticsearch: %s", err)
}
} else {
if err := config.parseElasticsearchVersion(config.ElasticVersion); err != nil {
errorLog.Fatalf("Elasticsearch version must conform to major.minor.fix: %s", err)
}
}
return elasticClient
}
func main() {
defer handlePanic()
config := mustConfig()
mongoClient := buildMongoClient(config)
loadBuiltinFunctions(mongoClient, config)
elasticClient := buildElasticClient(config)
ic := &indexClient{
config: config,
mongo: mongoClient,
client: elasticClient,
fileWg: &sync.WaitGroup{},
indexWg: &sync.WaitGroup{},
processWg: &sync.WaitGroup{},
relateWg: &sync.WaitGroup{},
opsConsumed: make(chan bool),
closeC: make(chan bool),
doneC: make(chan int),
enabled: true,
indexC: make(chan *gtm.Op),
processC: make(chan *gtm.Op),
fileC: make(chan *gtm.Op),
relateC: make(chan *gtm.Op, config.RelateBuffer),
}
ic.run()
}
|
// package main provides the monstache binary
package main
import (
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"math"
"net/http"
"net/http/pprof"
"os"
"os/signal"
"plugin"
"reflect"
"regexp"
"strconv"
"strings"
"sync"
"syscall"
"text/template"
"time"
"github.com/BurntSushi/toml"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/coreos/go-systemd/daemon"
jsonpatch "github.com/evanphx/json-patch"
"github.com/olivere/elastic/v7"
aws "github.com/olivere/elastic/v7/aws/v4"
"github.com/robertkrimen/otto"
_ "github.com/robertkrimen/otto/underscore"
"github.com/rwynn/gtm"
"github.com/rwynn/gtm/consistent"
"github.com/rwynn/monstache/monstachemap"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/bsontype"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/gridfs"
"go.mongodb.org/mongo-driver/mongo/options"
mongoversion "go.mongodb.org/mongo-driver/version"
"gopkg.in/Graylog2/go-gelf.v2/gelf"
"gopkg.in/natefinch/lumberjack.v2"
)
var infoLog = log.New(os.Stdout, "INFO ", log.Flags())
var warnLog = log.New(os.Stdout, "WARN ", log.Flags())
var statsLog = log.New(os.Stdout, "STATS ", log.Flags())
var traceLog = log.New(os.Stdout, "TRACE ", log.Flags())
var errorLog = log.New(os.Stderr, "ERROR ", log.Flags())
var mapperPlugin func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error)
var filterPlugin func(*monstachemap.MapperPluginInput) (bool, error)
var processPlugin func(*monstachemap.ProcessPluginInput) error
var pipePlugin func(string, bool) ([]interface{}, error)
var mapEnvs = make(map[string]*executionEnv)
var filterEnvs = make(map[string]*executionEnv)
var pipeEnvs = make(map[string]*executionEnv)
var mapIndexTypes = make(map[string]*indexMapping)
var relates = make(map[string][]*relation)
var fileNamespaces = make(map[string]bool)
var patchNamespaces = make(map[string]bool)
var tmNamespaces = make(map[string]bool)
var routingNamespaces = make(map[string]bool)
var mux sync.Mutex
var chunksRegex = regexp.MustCompile("\\.chunks$")
var systemsRegex = regexp.MustCompile("system\\..+$")
var exitStatus = 0
const version = "6.4.0"
const mongoURLDefault string = "mongodb://localhost:27017"
const resumeNameDefault string = "default"
const elasticMaxConnsDefault int = 4
const elasticClientTimeoutDefault int = 0
const elasticMaxDocsDefault int = -1
const elasticMaxBytesDefault int = 8 * 1024 * 1024
const gtmChannelSizeDefault int = 512
const fileDownloadersDefault = 10
const relateThreadsDefault = 10
const relateBufferDefault = 1000
const postProcessorsDefault = 10
const redact = "REDACTED"
const configDatabaseNameDefault = "monstache"
const relateQueueOverloadMsg = "Relate queue is full. Skipping relate for %v.(%v) to keep pipeline healthy."
type deleteStrategy int
const (
statelessDeleteStrategy deleteStrategy = iota
statefulDeleteStrategy
ignoreDeleteStrategy
)
type resumeStrategy int
const (
timestampResumeStrategy resumeStrategy = iota
tokenResumeStrategy
)
type buildInfo struct {
Version string
}
type stringargs []string
type indexClient struct {
gtmCtx *gtm.OpCtxMulti
config *configOptions
mongo *mongo.Client
mongoConfig *mongo.Client
bulk *elastic.BulkProcessor
bulkStats *elastic.BulkProcessor
client *elastic.Client
hsc *httpServerCtx
fileWg *sync.WaitGroup
indexWg *sync.WaitGroup
processWg *sync.WaitGroup
relateWg *sync.WaitGroup
opsConsumed chan bool
closeC chan bool
doneC chan int
enabled bool
lastTs primitive.Timestamp
lastTsSaved primitive.Timestamp
tokens bson.M
indexC chan *gtm.Op
processC chan *gtm.Op
fileC chan *gtm.Op
relateC chan *gtm.Op
filter gtm.OpFilter
statusReqC chan *statusRequest
sigH *sigHandler
}
type sigHandler struct {
clientStartedC chan *indexClient
}
type awsConnect struct {
AccessKey string `toml:"access-key"`
SecretKey string `toml:"secret-key"`
Region string
}
type executionEnv struct {
VM *otto.Otto
Script string
lock *sync.Mutex
}
type javascript struct {
Namespace string
Script string
Path string
Routing bool
}
type relation struct {
Namespace string
WithNamespace string `toml:"with-namespace"`
SrcField string `toml:"src-field"`
MatchField string `toml:"match-field"`
KeepSrc bool `toml:"keep-src"`
MaxDepth int `toml:"max-depth"`
db string
col string
}
type indexMapping struct {
Namespace string
Index string
}
type findConf struct {
vm *otto.Otto
ns string
name string
client *mongo.Client
byID bool
multi bool
pipe bool
pipeAllowDisk bool
}
type findCall struct {
config *findConf
client *mongo.Client
query interface{}
db string
col string
limit int
sort map[string]int
sel map[string]int
}
type logRotate struct {
MaxSize int `toml:"max-size"`
MaxAge int `toml:"max-age"`
MaxBackups int `toml:"max-backups"`
LocalTime bool `toml:"localtime"`
Compress bool `toml:"compress"`
}
type logFiles struct {
Info string
Warn string
Error string
Trace string
Stats string
}
type indexingMeta struct {
Routing string
Index string
Type string
Parent string
Version int64
VersionType string
Pipeline string
RetryOnConflict int
Skip bool
ID string
}
type gtmSettings struct {
ChannelSize int `toml:"channel-size"`
BufferSize int `toml:"buffer-size"`
BufferDuration string `toml:"buffer-duration"`
MaxAwaitTime string `toml:"max-await-time"`
}
type httpServerCtx struct {
httpServer *http.Server
bulk *elastic.BulkProcessor
config *configOptions
shutdown bool
started time.Time
statusReqC chan *statusRequest
}
type instanceStatus struct {
Enabled bool `json:"enabled"`
Pid int `json:"pid"`
Hostname string `json:"hostname"`
ClusterName string `json:"cluster"`
ResumeName string `json:"resumeName"`
LastTs primitive.Timestamp `json:"lastTs"`
LastTsFormat string `json:"lastTsFormat,omitempty"`
}
type statusResponse struct {
enabled bool
lastTs primitive.Timestamp
}
type statusRequest struct {
responseC chan *statusResponse
}
type configOptions struct {
EnableTemplate bool
EnvDelimiter string
MongoURL string `toml:"mongo-url"`
MongoConfigURL string `toml:"mongo-config-url"`
MongoOpLogDatabaseName string `toml:"mongo-oplog-database-name"`
MongoOpLogCollectionName string `toml:"mongo-oplog-collection-name"`
GtmSettings gtmSettings `toml:"gtm-settings"`
AWSConnect awsConnect `toml:"aws-connect"`
LogRotate logRotate `toml:"log-rotate"`
Logs logFiles `toml:"logs"`
GraylogAddr string `toml:"graylog-addr"`
ElasticUrls stringargs `toml:"elasticsearch-urls"`
ElasticUser string `toml:"elasticsearch-user"`
ElasticPassword string `toml:"elasticsearch-password"`
ElasticPemFile string `toml:"elasticsearch-pem-file"`
ElasticValidatePemFile bool `toml:"elasticsearch-validate-pem-file"`
ElasticVersion string `toml:"elasticsearch-version"`
ElasticHealth0 int `toml:"elasticsearch-healthcheck-timeout-startup"`
ElasticHealth1 int `toml:"elasticsearch-healthcheck-timeout"`
ResumeName string `toml:"resume-name"`
NsRegex string `toml:"namespace-regex"`
NsDropRegex string `toml:"namespace-drop-regex"`
NsExcludeRegex string `toml:"namespace-exclude-regex"`
NsDropExcludeRegex string `toml:"namespace-drop-exclude-regex"`
ClusterName string `toml:"cluster-name"`
Print bool `toml:"print-config"`
Version bool
Pprof bool
EnableOplog bool `toml:"enable-oplog"`
DisableChangeEvents bool `toml:"disable-change-events"`
EnableEasyJSON bool `toml:"enable-easy-json"`
Stats bool
IndexStats bool `toml:"index-stats"`
StatsDuration string `toml:"stats-duration"`
StatsIndexFormat string `toml:"stats-index-format"`
Gzip bool
Verbose bool
Resume bool
ResumeStrategy resumeStrategy `toml:"resume-strategy"`
ResumeWriteUnsafe bool `toml:"resume-write-unsafe"`
ResumeFromTimestamp int64 `toml:"resume-from-timestamp"`
Replay bool
DroppedDatabases bool `toml:"dropped-databases"`
DroppedCollections bool `toml:"dropped-collections"`
IndexFiles bool `toml:"index-files"`
IndexAsUpdate bool `toml:"index-as-update"`
FileHighlighting bool `toml:"file-highlighting"`
DisableFilePipelinePut bool `toml:"disable-file-pipeline-put"`
EnablePatches bool `toml:"enable-patches"`
FailFast bool `toml:"fail-fast"`
IndexOplogTime bool `toml:"index-oplog-time"`
OplogTsFieldName string `toml:"oplog-ts-field-name"`
OplogDateFieldName string `toml:"oplog-date-field-name"`
OplogDateFieldFormat string `toml:"oplog-date-field-format"`
ExitAfterDirectReads bool `toml:"exit-after-direct-reads"`
MergePatchAttr string `toml:"merge-patch-attribute"`
ElasticMaxConns int `toml:"elasticsearch-max-conns"`
ElasticRetry bool `toml:"elasticsearch-retry"`
ElasticMaxDocs int `toml:"elasticsearch-max-docs"`
ElasticMaxBytes int `toml:"elasticsearch-max-bytes"`
ElasticMaxSeconds int `toml:"elasticsearch-max-seconds"`
ElasticClientTimeout int `toml:"elasticsearch-client-timeout"`
ElasticMajorVersion int
ElasticMinorVersion int
MaxFileSize int64 `toml:"max-file-size"`
ConfigFile string
Script []javascript
Filter []javascript
Pipeline []javascript
Mapping []indexMapping
Relate []relation
FileNamespaces stringargs `toml:"file-namespaces"`
PatchNamespaces stringargs `toml:"patch-namespaces"`
Workers stringargs
Worker string
ChangeStreamNs stringargs `toml:"change-stream-namespaces"`
DirectReadNs stringargs `toml:"direct-read-namespaces"`
DirectReadSplitMax int `toml:"direct-read-split-max"`
DirectReadConcur int `toml:"direct-read-concur"`
DirectReadNoTimeout bool `toml:"direct-read-no-timeout"`
DirectReadBounded bool `toml:"direct-read-bounded"`
DirectReadExcludeRegex string `toml:"direct-read-dynamic-exclude-regex"`
MapperPluginPath string `toml:"mapper-plugin-path"`
EnableHTTPServer bool `toml:"enable-http-server"`
HTTPServerAddr string `toml:"http-server-addr"`
TimeMachineNamespaces stringargs `toml:"time-machine-namespaces"`
TimeMachineIndexPrefix string `toml:"time-machine-index-prefix"`
TimeMachineIndexSuffix string `toml:"time-machine-index-suffix"`
TimeMachineDirectReads bool `toml:"time-machine-direct-reads"`
PipeAllowDisk bool `toml:"pipe-allow-disk"`
RoutingNamespaces stringargs `toml:"routing-namespaces"`
DeleteStrategy deleteStrategy `toml:"delete-strategy"`
DeleteIndexPattern string `toml:"delete-index-pattern"`
ConfigDatabaseName string `toml:"config-database-name"`
FileDownloaders int `toml:"file-downloaders"`
RelateThreads int `toml:"relate-threads"`
RelateBuffer int `toml:"relate-buffer"`
PostProcessors int `toml:"post-processors"`
PruneInvalidJSON bool `toml:"prune-invalid-json"`
Debug bool
mongoClientOptions *options.ClientOptions
}
func (rel *relation) IsIdentity() bool {
if rel.SrcField == "_id" && rel.MatchField == "_id" {
return true
}
return false
}
func (l *logFiles) enabled() bool {
return l.Info != "" || l.Warn != "" || l.Error != "" || l.Trace != "" || l.Stats != ""
}
func (ac *awsConnect) validate() error {
if ac.AccessKey == "" && ac.SecretKey == "" {
return nil
} else if ac.AccessKey != "" && ac.SecretKey != "" {
return nil
}
return errors.New("AWS connect settings must include both access-key and secret-key")
}
func (ac *awsConnect) enabled() bool {
return ac.AccessKey != "" || ac.SecretKey != ""
}
func (arg *deleteStrategy) String() string {
return fmt.Sprintf("%d", *arg)
}
func (arg *deleteStrategy) Set(value string) (err error) {
var i int
if i, err = strconv.Atoi(value); err != nil {
return
}
ds := deleteStrategy(i)
*arg = ds
return
}
func (arg *resumeStrategy) String() string {
return fmt.Sprintf("%d", *arg)
}
func (arg *resumeStrategy) Set(value string) (err error) {
var i int
if i, err = strconv.Atoi(value); err != nil {
return
}
rs := resumeStrategy(i)
*arg = rs
return
}
func (args *stringargs) String() string {
return fmt.Sprintf("%s", *args)
}
func (args *stringargs) Set(value string) error {
*args = append(*args, value)
return nil
}
func (config *configOptions) readShards() bool {
return len(config.ChangeStreamNs) == 0 && config.MongoConfigURL != ""
}
func (config *configOptions) dynamicDirectReadList() bool {
return len(config.DirectReadNs) == 1 && config.DirectReadNs[0] == ""
}
func (config *configOptions) ignoreDatabaseForDirectReads(db string) bool {
return db == "local" || db == "admin" || db == "config" || db == config.ConfigDatabaseName
}
func (config *configOptions) ignoreCollectionForDirectReads(col string) bool {
return strings.HasPrefix(col, "system.")
}
func afterBulk(executionID int64, requests []elastic.BulkableRequest, response *elastic.BulkResponse, err error) {
if response != nil && response.Errors {
failed := response.Failed()
if failed != nil {
for _, item := range failed {
if item.Status == 409 {
// ignore version conflict since this simply means the doc
// is already in the index
continue
}
json, err := json.Marshal(item)
if err != nil {
errorLog.Printf("Unable to marshal bulk response item: %s", err)
} else {
errorLog.Printf("Bulk response item: %s", string(json))
}
}
}
}
}
func (config *configOptions) parseElasticsearchVersion(number string) (err error) {
if number == "" {
err = errors.New("Elasticsearch version cannot be blank")
} else {
versionParts := strings.Split(number, ".")
var majorVersion, minorVersion int
majorVersion, err = strconv.Atoi(versionParts[0])
if err == nil {
config.ElasticMajorVersion = majorVersion
if majorVersion == 0 {
err = errors.New("Invalid Elasticsearch major version 0")
}
}
if len(versionParts) > 1 {
minorVersion, err = strconv.Atoi(versionParts[1])
if err == nil {
config.ElasticMinorVersion = minorVersion
}
}
}
return
}
func (config *configOptions) newBulkProcessor(client *elastic.Client) (bulk *elastic.BulkProcessor, err error) {
bulkService := client.BulkProcessor().Name("monstache")
bulkService.Workers(config.ElasticMaxConns)
bulkService.Stats(config.Stats)
bulkService.BulkActions(config.ElasticMaxDocs)
bulkService.BulkSize(config.ElasticMaxBytes)
if config.ElasticRetry == false {
bulkService.Backoff(&elastic.StopBackoff{})
}
bulkService.After(afterBulk)
bulkService.FlushInterval(time.Duration(config.ElasticMaxSeconds) * time.Second)
return bulkService.Do(context.Background())
}
func (config *configOptions) newStatsBulkProcessor(client *elastic.Client) (bulk *elastic.BulkProcessor, err error) {
bulkService := client.BulkProcessor().Name("monstache-stats")
bulkService.Workers(1)
bulkService.Stats(false)
bulkService.BulkActions(-1)
bulkService.BulkSize(-1)
bulkService.After(afterBulk)
bulkService.FlushInterval(time.Duration(5) * time.Second)
return bulkService.Do(context.Background())
}
func (config *configOptions) needsSecureScheme() bool {
if len(config.ElasticUrls) > 0 {
for _, url := range config.ElasticUrls {
if strings.HasPrefix(url, "https") {
return true
}
}
}
return false
}
func (config *configOptions) newElasticClient() (client *elastic.Client, err error) {
var clientOptions []elastic.ClientOptionFunc
var httpClient *http.Client
clientOptions = append(clientOptions, elastic.SetSniff(false))
if config.needsSecureScheme() {
clientOptions = append(clientOptions, elastic.SetScheme("https"))
}
if len(config.ElasticUrls) > 0 {
clientOptions = append(clientOptions, elastic.SetURL(config.ElasticUrls...))
} else {
config.ElasticUrls = append(config.ElasticUrls, elastic.DefaultURL)
}
if config.Verbose {
clientOptions = append(clientOptions, elastic.SetTraceLog(traceLog))
clientOptions = append(clientOptions, elastic.SetErrorLog(errorLog))
}
if config.ElasticUser != "" {
clientOptions = append(clientOptions, elastic.SetBasicAuth(config.ElasticUser, config.ElasticPassword))
}
if config.ElasticRetry {
d1, d2 := time.Duration(50)*time.Millisecond, time.Duration(20)*time.Second
retrier := elastic.NewBackoffRetrier(elastic.NewExponentialBackoff(d1, d2))
clientOptions = append(clientOptions, elastic.SetRetrier(retrier))
}
httpClient, err = config.NewHTTPClient()
if err != nil {
return client, err
}
clientOptions = append(clientOptions, elastic.SetHttpClient(httpClient))
clientOptions = append(clientOptions,
elastic.SetHealthcheckTimeoutStartup(time.Duration(config.ElasticHealth0)*time.Second))
clientOptions = append(clientOptions,
elastic.SetHealthcheckTimeout(time.Duration(config.ElasticHealth1)*time.Second))
return elastic.NewClient(clientOptions...)
}
func (config *configOptions) testElasticsearchConn(client *elastic.Client) (err error) {
var number string
url := config.ElasticUrls[0]
number, err = client.ElasticsearchVersion(url)
if err == nil {
infoLog.Printf("Successfully connected to Elasticsearch version %s", number)
err = config.parseElasticsearchVersion(number)
}
return
}
func (ic *indexClient) deleteIndexes(db string) (err error) {
index := strings.ToLower(db + "*")
for ns, m := range mapIndexTypes {
dbCol := strings.SplitN(ns, ".", 2)
if dbCol[0] == db {
if m.Index != "" {
index = strings.ToLower(m.Index + "*")
}
break
}
}
_, err = ic.client.DeleteIndex(index).Do(context.Background())
return
}
func (ic *indexClient) deleteIndex(namespace string) (err error) {
ctx := context.Background()
index := strings.ToLower(namespace)
if m := mapIndexTypes[namespace]; m != nil {
if m.Index != "" {
index = strings.ToLower(m.Index)
}
}
_, err = ic.client.DeleteIndex(index).Do(ctx)
return err
}
func (ic *indexClient) ensureFileMapping() (err error) {
config := ic.config
if config.DisableFilePipelinePut {
return nil
}
ctx := context.Background()
pipeline := map[string]interface{}{
"description": "Extract file information",
"processors": [1]map[string]interface{}{
{
"attachment": map[string]interface{}{
"field": "file",
},
},
},
}
_, err = ic.client.IngestPutPipeline("attachment").BodyJson(pipeline).Do(ctx)
return err
}
func (ic *indexClient) defaultIndexMapping(op *gtm.Op) *indexMapping {
return &indexMapping{
Namespace: op.Namespace,
Index: strings.ToLower(op.Namespace),
}
}
func (ic *indexClient) mapIndex(op *gtm.Op) *indexMapping {
mapping := ic.defaultIndexMapping(op)
if m := mapIndexTypes[op.Namespace]; m != nil {
if m.Index != "" {
mapping.Index = m.Index
}
}
return mapping
}
func opIDToString(op *gtm.Op) string {
var opIDStr string
switch id := op.Id.(type) {
case primitive.ObjectID:
opIDStr = id.Hex()
case primitive.Binary:
opIDStr = monstachemap.EncodeBinData(monstachemap.Binary{id})
case float64:
intID := int(id)
if id == float64(intID) {
opIDStr = fmt.Sprintf("%v", intID)
} else {
opIDStr = fmt.Sprintf("%v", op.Id)
}
case float32:
intID := int(id)
if id == float32(intID) {
opIDStr = fmt.Sprintf("%v", intID)
} else {
opIDStr = fmt.Sprintf("%v", op.Id)
}
default:
opIDStr = fmt.Sprintf("%v", op.Id)
}
return opIDStr
}
func convertSliceJavascript(a []interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
var avc interface{}
switch achild := av.(type) {
case map[string]interface{}:
avc = convertMapJavascript(achild)
case []interface{}:
avc = convertSliceJavascript(achild)
case primitive.ObjectID:
avc = achild.Hex()
default:
avc = av
}
avs = append(avs, avc)
}
return avs
}
func convertMapJavascript(e map[string]interface{}) map[string]interface{} {
o := make(map[string]interface{})
for k, v := range e {
switch child := v.(type) {
case map[string]interface{}:
o[k] = convertMapJavascript(child)
case []interface{}:
o[k] = convertSliceJavascript(child)
case primitive.ObjectID:
o[k] = child.Hex()
default:
o[k] = v
}
}
return o
}
func fixSlicePruneInvalidJSON(id string, key string, a []interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
var avc interface{}
switch achild := av.(type) {
case map[string]interface{}:
avc = fixPruneInvalidJSON(id, achild)
case []interface{}:
avc = fixSlicePruneInvalidJSON(id, key, achild)
case time.Time:
year := achild.Year()
if year < 0 || year > 9999 {
// year outside of valid range
warnLog.Printf("Dropping key %s element: invalid time.Time value: %s for document _id: %s", key, achild, id)
continue
} else {
avc = av
}
case float64:
if math.IsNaN(achild) {
// causes an error in the json serializer
warnLog.Printf("Dropping key %s element: invalid float64 value: %v for document _id: %s", key, achild, id)
continue
} else if math.IsInf(achild, 0) {
// causes an error in the json serializer
warnLog.Printf("Dropping key %s element: invalid float64 value: %v for document _id: %s", key, achild, id)
continue
} else {
avc = av
}
default:
avc = av
}
avs = append(avs, avc)
}
return avs
}
func fixPruneInvalidJSON(id string, e map[string]interface{}) map[string]interface{} {
o := make(map[string]interface{})
for k, v := range e {
switch child := v.(type) {
case map[string]interface{}:
o[k] = fixPruneInvalidJSON(id, child)
case []interface{}:
o[k] = fixSlicePruneInvalidJSON(id, k, child)
case time.Time:
year := child.Year()
if year < 0 || year > 9999 {
// year outside of valid range
warnLog.Printf("Dropping key %s: invalid time.Time value: %s for document _id: %s", k, child, id)
continue
} else {
o[k] = v
}
case float64:
if math.IsNaN(child) {
// causes an error in the json serializer
warnLog.Printf("Dropping key %s: invalid float64 value: %v for document _id: %s", k, child, id)
continue
} else if math.IsInf(child, 0) {
// causes an error in the json serializer
warnLog.Printf("Dropping key %s: invalid float64 value: %v for document _id: %s", k, child, id)
continue
} else {
o[k] = v
}
default:
o[k] = v
}
}
return o
}
func deepExportValue(a interface{}) (b interface{}) {
switch t := a.(type) {
case otto.Value:
ex, err := t.Export()
if t.Class() == "Date" {
ex, err = time.Parse("Mon, 2 Jan 2006 15:04:05 MST", t.String())
}
if err == nil {
b = deepExportValue(ex)
} else {
errorLog.Printf("Error exporting from javascript: %s", err)
}
case map[string]interface{}:
b = deepExportMap(t)
case []map[string]interface{}:
b = deepExportMapSlice(t)
case []interface{}:
b = deepExportSlice(t)
default:
b = a
}
return
}
func deepExportMapSlice(a []map[string]interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
avs = append(avs, deepExportMap(av))
}
return avs
}
func deepExportSlice(a []interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
avs = append(avs, deepExportValue(av))
}
return avs
}
func deepExportMap(e map[string]interface{}) map[string]interface{} {
o := make(map[string]interface{})
for k, v := range e {
o[k] = deepExportValue(v)
}
return o
}
func (ic *indexClient) mapDataJavascript(op *gtm.Op) error {
names := []string{"", op.Namespace}
for _, name := range names {
env := mapEnvs[name]
if env == nil {
continue
}
env.lock.Lock()
defer env.lock.Unlock()
arg := convertMapJavascript(op.Data)
arg2 := op.Namespace
arg3 := convertMapJavascript(op.UpdateDescription)
val, err := env.VM.Call("module.exports", arg, arg, arg2, arg3)
if err != nil {
return err
}
if strings.ToLower(val.Class()) == "object" {
data, err := val.Export()
if err != nil {
return err
} else if data == val {
return errors.New("Exported function must return an object")
} else {
dm := data.(map[string]interface{})
op.Data = deepExportMap(dm)
}
} else {
indexed, err := val.ToBoolean()
if err != nil {
return err
} else if !indexed {
op.Data = nil
break
}
}
}
return nil
}
func (ic *indexClient) mapDataGolang(op *gtm.Op) error {
input := &monstachemap.MapperPluginInput{
Document: op.Data,
Namespace: op.Namespace,
Database: op.GetDatabase(),
Collection: op.GetCollection(),
Operation: op.Operation,
MongoClient: ic.mongo,
UpdateDescription: op.UpdateDescription,
}
output, err := mapperPlugin(input)
if err != nil {
return err
}
if output == nil {
return nil
}
if output.Drop {
op.Data = nil
} else {
if output.Skip {
op.Data = map[string]interface{}{}
} else if output.Passthrough == false {
if output.Document == nil {
return errors.New("Map function must return a non-nil document")
}
op.Data = output.Document
}
meta := make(map[string]interface{})
if output.Skip {
meta["skip"] = true
}
if output.Index != "" {
meta["index"] = output.Index
}
if output.ID != "" {
meta["id"] = output.ID
}
if output.Type != "" {
meta["type"] = output.Type
}
if output.Routing != "" {
meta["routing"] = output.Routing
}
if output.Parent != "" {
meta["parent"] = output.Parent
}
if output.Version != 0 {
meta["version"] = output.Version
}
if output.VersionType != "" {
meta["versionType"] = output.VersionType
}
if output.Pipeline != "" {
meta["pipeline"] = output.Pipeline
}
if output.RetryOnConflict != 0 {
meta["retryOnConflict"] = output.RetryOnConflict
}
if len(meta) > 0 {
op.Data["_meta_monstache"] = meta
}
}
return nil
}
func (ic *indexClient) mapData(op *gtm.Op) error {
if mapperPlugin != nil {
return ic.mapDataGolang(op)
}
return ic.mapDataJavascript(op)
}
func extractData(srcField string, data map[string]interface{}) (result interface{}, err error) {
var cur = data
fields := strings.Split(srcField, ".")
flen := len(fields)
for i, field := range fields {
if i+1 == flen {
result = cur[field]
} else {
if next, ok := cur[field].(map[string]interface{}); ok {
cur = next
} else {
break
}
}
}
if result == nil {
var detail interface{}
b, e := json.Marshal(data)
if e == nil {
detail = string(b)
} else {
detail = err
}
err = fmt.Errorf("Source field %s not found in document: %s", srcField, detail)
}
return
}
func buildSelector(matchField string, data interface{}) bson.M {
sel := bson.M{}
var cur bson.M = sel
fields := strings.Split(matchField, ".")
flen := len(fields)
for i, field := range fields {
if i+1 == flen {
cur[field] = data
} else {
next := bson.M{}
cur[field] = next
cur = next
}
}
return sel
}
func (ic *indexClient) processRelated(root *gtm.Op) (err error) {
var q []*gtm.Op
batch := []*gtm.Op{root}
depth := 1
for len(batch) > 0 {
for _, e := range batch {
op := e
if op.Data == nil {
continue
}
rs := relates[op.Namespace]
if len(rs) == 0 {
continue
}
for _, r := range rs {
if r.MaxDepth > 0 && r.MaxDepth < depth {
continue
}
if op.IsDelete() && r.IsIdentity() {
rop := >m.Op{
Id: op.Id,
Operation: op.Operation,
Namespace: r.WithNamespace,
Source: op.Source,
Timestamp: op.Timestamp,
Data: op.Data,
}
ic.doDelete(rop)
q = append(q, rop)
continue
}
var srcData interface{}
if srcData, err = extractData(r.SrcField, op.Data); err != nil {
ic.processErr(err)
continue
}
opts := &options.FindOptions{}
if ic.config.DirectReadNoTimeout {
opts.SetNoCursorTimeout(true)
}
col := ic.mongo.Database(r.db).Collection(r.col)
sel := buildSelector(r.MatchField, srcData)
cursor, err := col.Find(context.Background(), sel, opts)
doc := make(map[string]interface{})
for cursor.Next(context.Background()) {
if err = cursor.Decode(&doc); err != nil {
ic.processErr(err)
continue
}
now := time.Now().UTC()
tstamp := primitive.Timestamp{
T: uint32(now.Unix()),
I: uint32(now.Nanosecond()),
}
rop := >m.Op{
Id: doc["_id"],
Data: doc,
Operation: root.Operation,
Namespace: r.WithNamespace,
Source: gtm.DirectQuerySource,
Timestamp: tstamp,
UpdateDescription: root.UpdateDescription,
}
doc = make(map[string]interface{})
if ic.filter != nil && !ic.filter(rop) {
continue
}
if processPlugin != nil {
pop := >m.Op{
Id: rop.Id,
Operation: rop.Operation,
Namespace: rop.Namespace,
Source: rop.Source,
Timestamp: rop.Timestamp,
UpdateDescription: rop.UpdateDescription,
}
var data []byte
data, err = bson.Marshal(rop.Data)
if err == nil {
var m map[string]interface{}
err = bson.Unmarshal(data, &m)
if err == nil {
pop.Data = m
}
}
ic.processC <- pop
}
skip := false
if rs2 := relates[rop.Namespace]; len(rs2) != 0 {
skip = true
visit := false
for _, r2 := range rs2 {
if r2.KeepSrc {
skip = false
}
if r2.MaxDepth < 1 || r2.MaxDepth >= (depth+1) {
visit = true
}
}
if visit {
q = append(q, rop)
}
}
if !skip {
if ic.hasFileContent(rop) {
ic.fileC <- rop
} else {
ic.indexC <- rop
}
}
}
cursor.Close(context.Background())
}
}
depth++
batch = q
q = nil
}
return
}
func (ic *indexClient) prepareDataForIndexing(op *gtm.Op) {
config := ic.config
data := op.Data
if config.IndexOplogTime {
secs := op.Timestamp.T
t := time.Unix(int64(secs), 0).UTC()
data[config.OplogTsFieldName] = op.Timestamp
data[config.OplogDateFieldName] = t.Format(config.OplogDateFieldFormat)
}
delete(data, "_id")
delete(data, "_meta_monstache")
if config.PruneInvalidJSON {
op.Data = fixPruneInvalidJSON(opIDToString(op), data)
}
op.Data = monstachemap.ConvertMapForJSON(op.Data)
}
func parseIndexMeta(op *gtm.Op) (meta *indexingMeta) {
meta = &indexingMeta{
Version: tsVersion(op.Timestamp),
VersionType: "external",
}
if m, ok := op.Data["_meta_monstache"]; ok {
switch m.(type) {
case map[string]interface{}:
metaAttrs := m.(map[string]interface{})
meta.load(metaAttrs)
case otto.Value:
ex, err := m.(otto.Value).Export()
if err == nil && ex != m {
switch ex.(type) {
case map[string]interface{}:
metaAttrs := ex.(map[string]interface{})
meta.load(metaAttrs)
default:
errorLog.Println("Invalid indexing metadata")
}
}
default:
errorLog.Println("Invalid indexing metadata")
}
}
return meta
}
func (ic *indexClient) addFileContent(op *gtm.Op) (err error) {
op.Data["file"] = ""
var gridByteBuffer bytes.Buffer
db, bucketName :=
ic.mongo.Database(op.GetDatabase()),
strings.SplitN(op.GetCollection(), ".", 2)[0]
encoder := base64.NewEncoder(base64.StdEncoding, &gridByteBuffer)
opts := &options.BucketOptions{}
opts.SetName(bucketName)
var bucket *gridfs.Bucket
bucket, err = gridfs.NewBucket(db, opts)
if err != nil {
return
}
var size int64
if size, err = bucket.DownloadToStream(op.Id, encoder); err != nil {
return
}
if ic.config.MaxFileSize > 0 {
if size > ic.config.MaxFileSize {
warnLog.Printf("File size %d exceeds max file size. file content omitted.", size)
return
}
}
if err = encoder.Close(); err != nil {
return
}
op.Data["file"] = string(gridByteBuffer.Bytes())
return
}
func notMonstache(config *configOptions) gtm.OpFilter {
db := config.ConfigDatabaseName
return func(op *gtm.Op) bool {
return op.GetDatabase() != db
}
}
func notChunks(op *gtm.Op) bool {
return !chunksRegex.MatchString(op.GetCollection())
}
func notConfig(op *gtm.Op) bool {
return op.GetDatabase() != "config"
}
func notSystem(op *gtm.Op) bool {
return !systemsRegex.MatchString(op.GetCollection())
}
func filterWithRegex(regex string) gtm.OpFilter {
var validNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return true
}
return validNameSpace.MatchString(op.Namespace)
}
}
func filterDropWithRegex(regex string) gtm.OpFilter {
var validNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return validNameSpace.MatchString(op.Namespace)
}
return true
}
}
func filterWithPlugin() gtm.OpFilter {
return func(op *gtm.Op) bool {
var keep = true
if (op.IsInsert() || op.IsUpdate()) && op.Data != nil {
keep = false
input := &monstachemap.MapperPluginInput{
Document: op.Data,
Namespace: op.Namespace,
Database: op.GetDatabase(),
Collection: op.GetCollection(),
Operation: op.Operation,
UpdateDescription: op.UpdateDescription,
}
if ok, err := filterPlugin(input); err == nil {
keep = ok
} else {
errorLog.Println(err)
}
}
return keep
}
}
func filterWithScript() gtm.OpFilter {
return func(op *gtm.Op) bool {
var keep = true
if (op.IsInsert() || op.IsUpdate()) && op.Data != nil {
nss := []string{"", op.Namespace}
for _, ns := range nss {
if env := filterEnvs[ns]; env != nil {
keep = false
arg := convertMapJavascript(op.Data)
arg2 := op.Namespace
arg3 := convertMapJavascript(op.UpdateDescription)
env.lock.Lock()
defer env.lock.Unlock()
val, err := env.VM.Call("module.exports", arg, arg, arg2, arg3)
if err != nil {
errorLog.Println(err)
} else {
if ok, err := val.ToBoolean(); err == nil {
keep = ok
} else {
errorLog.Println(err)
}
}
}
if !keep {
break
}
}
}
return keep
}
}
func filterInverseWithRegex(regex string) gtm.OpFilter {
var invalidNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return true
}
return !invalidNameSpace.MatchString(op.Namespace)
}
}
func filterDropInverseWithRegex(regex string) gtm.OpFilter {
var invalidNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return !invalidNameSpace.MatchString(op.Namespace)
}
return true
}
}
func (ic *indexClient) ensureClusterTTL() error {
io := options.Index()
io.SetName("expireAt")
io.SetBackground(true)
io.SetExpireAfterSeconds(30)
im := mongo.IndexModel{
Keys: bson.M{"expireAt": 1},
Options: io,
}
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("cluster")
iv := col.Indexes()
_, err := iv.CreateOne(context.Background(), im)
return err
}
func (ic *indexClient) enableProcess() (bool, error) {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("cluster")
doc := bson.M{}
doc["_id"] = ic.config.ResumeName
doc["pid"] = os.Getpid()
if host, err := os.Hostname(); err == nil {
doc["host"] = host
} else {
return false, err
}
doc["expireAt"] = time.Now().UTC()
_, err := col.InsertOne(context.Background(), doc)
if err == nil {
// update using $currentDate
ic.ensureEnabled()
return true, nil
}
if isDup(err) {
return false, nil
}
return false, err
}
func isDup(err error) bool {
checkCodeAndMessage := func(code int, message string) bool {
return code == 11000 ||
code == 11001 ||
code == 12582 ||
strings.Contains(message, "E11000")
}
if we, ok := err.(mongo.WriteException); ok {
if we.WriteConcernError != nil {
wce := we.WriteConcernError
code, message := wce.Code, wce.Message
if checkCodeAndMessage(code, message) {
return true
}
}
if we.WriteErrors != nil {
we := we.WriteErrors
for _, e := range we {
code, message := e.Code, e.Message
if checkCodeAndMessage(code, message) {
return true
}
}
}
}
return false
}
func (ic *indexClient) resetClusterState() error {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("cluster")
_, err := col.DeleteOne(context.Background(), bson.M{"_id": ic.config.ResumeName})
return err
}
func (ic *indexClient) ensureEnabled() (enabled bool, err error) {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("cluster")
result := col.FindOne(context.Background(), bson.M{
"_id": ic.config.ResumeName,
})
if err = result.Err(); err == nil {
doc := make(map[string]interface{})
if err = result.Decode(&doc); err == nil {
if doc["pid"] != nil && doc["host"] != nil {
var hostname string
pid := doc["pid"].(int32)
host := doc["host"].(string)
if hostname, err = os.Hostname(); err == nil {
enabled = (int(pid) == os.Getpid() && host == hostname)
if enabled {
_, err = col.UpdateOne(context.Background(), bson.M{
"_id": ic.config.ResumeName,
}, bson.M{
"$currentDate": bson.M{"expireAt": true},
})
}
}
}
}
}
if err == mongo.ErrNoDocuments {
err = nil
}
return
}
func (ic *indexClient) resumeWork() {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("monstache")
result := col.FindOne(context.Background(), bson.M{
"_id": ic.config.ResumeName,
})
if err := result.Err(); err == nil {
doc := make(map[string]interface{})
if err = result.Decode(&doc); err == nil {
if doc["ts"] != nil {
ts := doc["ts"].(primitive.Timestamp)
ic.gtmCtx.Since(ts)
}
}
}
drained := false
for !drained {
select {
case _, open := <-ic.gtmCtx.OpC:
if !open {
drained = true
}
default:
drained = true
}
}
ic.gtmCtx.Resume()
}
func (ic *indexClient) saveTokens() error {
var err error
if len(ic.tokens) == 0 {
return err
}
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("tokens")
bwo := options.BulkWrite().SetOrdered(false)
var models []mongo.WriteModel
for streamID, token := range ic.tokens {
filter := bson.M{
"resumeName": ic.config.ResumeName,
"streamID": streamID,
}
replacement := bson.M{
"resumeName": ic.config.ResumeName,
"streamID": streamID,
"token": token,
}
model := mongo.NewReplaceOneModel()
model.SetUpsert(true)
model.SetFilter(filter)
model.SetReplacement(replacement)
models = append(models, model)
}
_, err = col.BulkWrite(context.Background(), models, bwo)
if err == nil {
ic.tokens = bson.M{}
}
return err
}
func (ic *indexClient) saveTimestamp() error {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("monstache")
doc := map[string]interface{}{
"ts": ic.lastTs,
}
opts := options.Update()
opts.SetUpsert(true)
_, err := col.UpdateOne(context.Background(), bson.M{
"_id": ic.config.ResumeName,
}, bson.M{
"$set": doc,
}, opts)
return err
}
func (config *configOptions) parseCommandLineFlags() *configOptions {
flag.BoolVar(&config.Print, "print-config", false, "Print the configuration and then exit")
flag.BoolVar(&config.EnableTemplate, "tpl", false, "True to interpret the config file as a template")
flag.StringVar(&config.EnvDelimiter, "env-delimiter", ",", "A delimiter to use when splitting environment variable values")
flag.StringVar(&config.MongoURL, "mongo-url", "", "MongoDB server or router server connection URL")
flag.StringVar(&config.MongoConfigURL, "mongo-config-url", "", "MongoDB config server connection URL")
flag.StringVar(&config.MongoOpLogDatabaseName, "mongo-oplog-database-name", "", "Override the database name which contains the mongodb oplog")
flag.StringVar(&config.MongoOpLogCollectionName, "mongo-oplog-collection-name", "", "Override the collection name which contains the mongodb oplog")
flag.StringVar(&config.GraylogAddr, "graylog-addr", "", "Send logs to a Graylog server at this address")
flag.StringVar(&config.ElasticVersion, "elasticsearch-version", "", "Specify elasticsearch version directly instead of getting it from the server")
flag.StringVar(&config.ElasticUser, "elasticsearch-user", "", "The elasticsearch user name for basic auth")
flag.StringVar(&config.ElasticPassword, "elasticsearch-password", "", "The elasticsearch password for basic auth")
flag.StringVar(&config.ElasticPemFile, "elasticsearch-pem-file", "", "Path to a PEM file for secure connections to elasticsearch")
flag.BoolVar(&config.ElasticValidatePemFile, "elasticsearch-validate-pem-file", true, "Set to boolean false to not validate the Elasticsearch PEM file")
flag.IntVar(&config.ElasticMaxConns, "elasticsearch-max-conns", 0, "Elasticsearch max connections")
flag.IntVar(&config.PostProcessors, "post-processors", 0, "Number of post-processing go routines")
flag.IntVar(&config.FileDownloaders, "file-downloaders", 0, "GridFs download go routines")
flag.IntVar(&config.RelateThreads, "relate-threads", 0, "Number of threads dedicated to processing relationships")
flag.IntVar(&config.RelateBuffer, "relate-buffer", 0, "Number of relates to queue before skipping and reporting an error")
flag.BoolVar(&config.ElasticRetry, "elasticsearch-retry", false, "True to retry failed request to Elasticsearch")
flag.IntVar(&config.ElasticMaxDocs, "elasticsearch-max-docs", 0, "Number of docs to hold before flushing to Elasticsearch")
flag.IntVar(&config.ElasticMaxBytes, "elasticsearch-max-bytes", 0, "Number of bytes to hold before flushing to Elasticsearch")
flag.IntVar(&config.ElasticMaxSeconds, "elasticsearch-max-seconds", 0, "Number of seconds before flushing to Elasticsearch")
flag.IntVar(&config.ElasticClientTimeout, "elasticsearch-client-timeout", 0, "Number of seconds before a request to Elasticsearch is timed out")
flag.Int64Var(&config.MaxFileSize, "max-file-size", 0, "GridFs file content exceeding this limit in bytes will not be indexed in Elasticsearch")
flag.StringVar(&config.ConfigFile, "f", "", "Location of configuration file")
flag.BoolVar(&config.DroppedDatabases, "dropped-databases", true, "True to delete indexes from dropped databases")
flag.BoolVar(&config.DroppedCollections, "dropped-collections", true, "True to delete indexes from dropped collections")
flag.BoolVar(&config.Version, "v", false, "True to print the version number")
flag.BoolVar(&config.Gzip, "gzip", false, "True to enable gzip for requests to Elasticsearch")
flag.BoolVar(&config.Verbose, "verbose", false, "True to output verbose messages")
flag.BoolVar(&config.Pprof, "pprof", false, "True to enable pprof endpoints")
flag.BoolVar(&config.EnableOplog, "enable-oplog", false, "True to enable direct tailing of the oplog")
flag.BoolVar(&config.DisableChangeEvents, "disable-change-events", false, "True to disable listening for changes. You must provide direct-reads in this case")
flag.BoolVar(&config.EnableEasyJSON, "enable-easy-json", false, "True to enable easy-json serialization")
flag.BoolVar(&config.Stats, "stats", false, "True to print out statistics")
flag.BoolVar(&config.IndexStats, "index-stats", false, "True to index stats in elasticsearch")
flag.StringVar(&config.StatsDuration, "stats-duration", "", "The duration after which stats are logged")
flag.StringVar(&config.StatsIndexFormat, "stats-index-format", "", "time.Time supported format to use for the stats index names")
flag.BoolVar(&config.Resume, "resume", false, "True to capture the last timestamp of this run and resume on a subsequent run")
flag.Var(&config.ResumeStrategy, "resume-strategy", "Strategy to use for resuming. 0=timestamp,1=token")
flag.Int64Var(&config.ResumeFromTimestamp, "resume-from-timestamp", 0, "Timestamp to resume syncing from")
flag.BoolVar(&config.ResumeWriteUnsafe, "resume-write-unsafe", false, "True to speedup writes of the last timestamp synched for resuming at the cost of error checking")
flag.BoolVar(&config.Replay, "replay", false, "True to replay all events from the oplog and index them in elasticsearch")
flag.BoolVar(&config.IndexFiles, "index-files", false, "True to index gridfs files into elasticsearch. Requires the elasticsearch mapper-attachments (deprecated) or ingest-attachment plugin")
flag.BoolVar(&config.DisableFilePipelinePut, "disable-file-pipeline-put", false, "True to disable auto-creation of the ingest plugin pipeline")
flag.BoolVar(&config.IndexAsUpdate, "index-as-update", false, "True to index documents as updates instead of overwrites")
flag.BoolVar(&config.FileHighlighting, "file-highlighting", false, "True to enable the ability to highlight search times for a file query")
flag.BoolVar(&config.EnablePatches, "enable-patches", false, "True to include an json-patch field on updates")
flag.BoolVar(&config.FailFast, "fail-fast", false, "True to exit if a single _bulk request fails")
flag.BoolVar(&config.IndexOplogTime, "index-oplog-time", false, "True to add date/time information from the oplog to each document when indexing")
flag.BoolVar(&config.ExitAfterDirectReads, "exit-after-direct-reads", false, "True to exit the program after reading directly from the configured namespaces")
flag.StringVar(&config.MergePatchAttr, "merge-patch-attribute", "", "Attribute to store json-patch values under")
flag.StringVar(&config.ResumeName, "resume-name", "", "Name under which to load/store the resume state. Defaults to 'default'")
flag.StringVar(&config.ClusterName, "cluster-name", "", "Name of the monstache process cluster")
flag.StringVar(&config.Worker, "worker", "", "The name of this worker in a multi-worker configuration")
flag.StringVar(&config.MapperPluginPath, "mapper-plugin-path", "", "The path to a .so file to load as a document mapper plugin")
flag.StringVar(&config.DirectReadExcludeRegex, "direct-read-dynamic-exclude-regex", "", "A regex to use for excluding namespaces when using dynamic direct reads")
flag.StringVar(&config.NsRegex, "namespace-regex", "", "A regex which is matched against an operation's namespace (<database>.<collection>). Only operations which match are synched to elasticsearch")
flag.StringVar(&config.NsDropRegex, "namespace-drop-regex", "", "A regex which is matched against a drop operation's namespace (<database>.<collection>). Only drop operations which match are synched to elasticsearch")
flag.StringVar(&config.NsExcludeRegex, "namespace-exclude-regex", "", "A regex which is matched against an operation's namespace (<database>.<collection>). Only operations which do not match are synched to elasticsearch")
flag.StringVar(&config.NsDropExcludeRegex, "namespace-drop-exclude-regex", "", "A regex which is matched against a drop operation's namespace (<database>.<collection>). Only drop operations which do not match are synched to elasticsearch")
flag.Var(&config.ChangeStreamNs, "change-stream-namespace", "A list of change stream namespaces")
flag.Var(&config.DirectReadNs, "direct-read-namespace", "A list of direct read namespaces")
flag.IntVar(&config.DirectReadSplitMax, "direct-read-split-max", 0, "Max number of times to split a collection for direct reads")
flag.IntVar(&config.DirectReadConcur, "direct-read-concur", 0, "Max number of direct-read-namespaces to read concurrently. By default all givne are read concurrently")
flag.BoolVar(&config.DirectReadNoTimeout, "direct-read-no-timeout", false, "True to set the no cursor timeout flag for direct reads")
flag.BoolVar(&config.DirectReadBounded, "direct-read-bounded", false, "True to limit direct reads to the docs present at query start time")
flag.Var(&config.RoutingNamespaces, "routing-namespace", "A list of namespaces that override routing information")
flag.Var(&config.TimeMachineNamespaces, "time-machine-namespace", "A list of direct read namespaces")
flag.StringVar(&config.TimeMachineIndexPrefix, "time-machine-index-prefix", "", "A prefix to preprend to time machine indexes")
flag.StringVar(&config.TimeMachineIndexSuffix, "time-machine-index-suffix", "", "A suffix to append to time machine indexes")
flag.BoolVar(&config.TimeMachineDirectReads, "time-machine-direct-reads", false, "True to index the results of direct reads into the any time machine indexes")
flag.BoolVar(&config.PipeAllowDisk, "pipe-allow-disk", false, "True to allow MongoDB to use the disk for pipeline options with lots of results")
flag.Var(&config.ElasticUrls, "elasticsearch-url", "A list of Elasticsearch URLs")
flag.Var(&config.FileNamespaces, "file-namespace", "A list of file namespaces")
flag.Var(&config.PatchNamespaces, "patch-namespace", "A list of patch namespaces")
flag.Var(&config.Workers, "workers", "A list of worker names")
flag.BoolVar(&config.EnableHTTPServer, "enable-http-server", false, "True to enable an internal http server")
flag.StringVar(&config.HTTPServerAddr, "http-server-addr", "", "The address the internal http server listens on")
flag.BoolVar(&config.PruneInvalidJSON, "prune-invalid-json", false, "True to omit values which do not serialize to JSON such as +Inf and -Inf and thus cause errors")
flag.Var(&config.DeleteStrategy, "delete-strategy", "Stategy to use for deletes. 0=stateless,1=stateful,2=ignore")
flag.StringVar(&config.DeleteIndexPattern, "delete-index-pattern", "", "An Elasticsearch index-pattern to restric the scope of stateless deletes")
flag.StringVar(&config.ConfigDatabaseName, "config-database-name", "", "The MongoDB database name that monstache uses to store metadata")
flag.StringVar(&config.OplogTsFieldName, "oplog-ts-field-name", "", "Field name to use for the oplog timestamp")
flag.StringVar(&config.OplogDateFieldName, "oplog-date-field-name", "", "Field name to use for the oplog date")
flag.StringVar(&config.OplogDateFieldFormat, "oplog-date-field-format", "", "Format to use for the oplog date")
flag.BoolVar(&config.Debug, "debug", false, "True to enable verbose debug information")
flag.Parse()
return config
}
func (config *configOptions) loadReplacements() {
if config.Relate != nil {
for _, r := range config.Relate {
if r.Namespace != "" || r.WithNamespace != "" {
dbCol := strings.SplitN(r.WithNamespace, ".", 2)
if len(dbCol) != 2 {
errorLog.Fatalf("Replacement namespace is invalid: %s", r.WithNamespace)
}
database, collection := dbCol[0], dbCol[1]
r := &relation{
Namespace: r.Namespace,
WithNamespace: r.WithNamespace,
SrcField: r.SrcField,
MatchField: r.MatchField,
KeepSrc: r.KeepSrc,
MaxDepth: r.MaxDepth,
db: database,
col: collection,
}
if r.SrcField == "" {
r.SrcField = "_id"
}
if r.MatchField == "" {
r.MatchField = "_id"
}
relates[r.Namespace] = append(relates[r.Namespace], r)
} else {
errorLog.Fatalln("Relates must specify namespace and with-namespace")
}
}
}
}
func (config *configOptions) loadIndexTypes() {
if config.Mapping != nil {
for _, m := range config.Mapping {
if m.Namespace != "" && m.Index != "" {
mapIndexTypes[m.Namespace] = &indexMapping{
Namespace: m.Namespace,
Index: strings.ToLower(m.Index),
}
} else {
errorLog.Fatalln("Mappings must specify namespace and index")
}
}
}
}
func (config *configOptions) loadPipelines() {
for _, s := range config.Pipeline {
if s.Path == "" && s.Script == "" {
errorLog.Fatalln("Pipelines must specify path or script attributes")
}
if s.Path != "" && s.Script != "" {
errorLog.Fatalln("Pipelines must specify path or script but not both")
}
if s.Path != "" {
if script, err := ioutil.ReadFile(s.Path); err == nil {
s.Script = string(script[:])
} else {
errorLog.Fatalf("Unable to load pipeline at path %s: %s", s.Path, err)
}
}
if _, exists := filterEnvs[s.Namespace]; exists {
errorLog.Fatalf("Multiple pipelines with namespace: %s", s.Namespace)
}
env := &executionEnv{
VM: otto.New(),
Script: s.Script,
lock: &sync.Mutex{},
}
if err := env.VM.Set("module", make(map[string]interface{})); err != nil {
errorLog.Fatalln(err)
}
if _, err := env.VM.Run(env.Script); err != nil {
errorLog.Fatalln(err)
}
val, err := env.VM.Run("module.exports")
if err != nil {
errorLog.Fatalln(err)
} else if !val.IsFunction() {
errorLog.Fatalln("module.exports must be a function")
}
pipeEnvs[s.Namespace] = env
}
}
func (config *configOptions) loadFilters() {
for _, s := range config.Filter {
if s.Script != "" || s.Path != "" {
if s.Path != "" && s.Script != "" {
errorLog.Fatalln("Filters must specify path or script but not both")
}
if s.Path != "" {
if script, err := ioutil.ReadFile(s.Path); err == nil {
s.Script = string(script[:])
} else {
errorLog.Fatalf("Unable to load filter at path %s: %s", s.Path, err)
}
}
if _, exists := filterEnvs[s.Namespace]; exists {
errorLog.Fatalf("Multiple filters with namespace: %s", s.Namespace)
}
env := &executionEnv{
VM: otto.New(),
Script: s.Script,
lock: &sync.Mutex{},
}
if err := env.VM.Set("module", make(map[string]interface{})); err != nil {
errorLog.Fatalln(err)
}
if _, err := env.VM.Run(env.Script); err != nil {
errorLog.Fatalln(err)
}
val, err := env.VM.Run("module.exports")
if err != nil {
errorLog.Fatalln(err)
} else if !val.IsFunction() {
errorLog.Fatalln("module.exports must be a function")
}
filterEnvs[s.Namespace] = env
} else {
errorLog.Fatalln("Filters must specify path or script attributes")
}
}
}
func (config *configOptions) loadScripts() {
for _, s := range config.Script {
if s.Script != "" || s.Path != "" {
if s.Path != "" && s.Script != "" {
errorLog.Fatalln("Scripts must specify path or script but not both")
}
if s.Path != "" {
if script, err := ioutil.ReadFile(s.Path); err == nil {
s.Script = string(script[:])
} else {
errorLog.Fatalf("Unable to load script at path %s: %s", s.Path, err)
}
}
if _, exists := mapEnvs[s.Namespace]; exists {
errorLog.Fatalf("Multiple scripts with namespace: %s", s.Namespace)
}
env := &executionEnv{
VM: otto.New(),
Script: s.Script,
lock: &sync.Mutex{},
}
if err := env.VM.Set("module", make(map[string]interface{})); err != nil {
errorLog.Fatalln(err)
}
if _, err := env.VM.Run(env.Script); err != nil {
errorLog.Fatalln(err)
}
val, err := env.VM.Run("module.exports")
if err != nil {
errorLog.Fatalln(err)
} else if !val.IsFunction() {
errorLog.Fatalln("module.exports must be a function")
}
mapEnvs[s.Namespace] = env
if s.Routing {
routingNamespaces[s.Namespace] = true
}
} else {
errorLog.Fatalln("Scripts must specify path or script")
}
}
}
func (config *configOptions) loadPlugins() *configOptions {
if config.MapperPluginPath != "" {
funcDefined := false
p, err := plugin.Open(config.MapperPluginPath)
if err != nil {
errorLog.Fatalf("Unable to load mapper plugin %s: %s", config.MapperPluginPath, err)
}
mapper, err := p.Lookup("Map")
if err == nil {
funcDefined = true
switch mapper.(type) {
case func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error):
mapperPlugin = mapper.(func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error))
default:
errorLog.Fatalf("Plugin 'Map' function must be typed %T", mapperPlugin)
}
}
filter, err := p.Lookup("Filter")
if err == nil {
funcDefined = true
switch filter.(type) {
case func(*monstachemap.MapperPluginInput) (bool, error):
filterPlugin = filter.(func(*monstachemap.MapperPluginInput) (bool, error))
default:
errorLog.Fatalf("Plugin 'Filter' function must be typed %T", filterPlugin)
}
}
process, err := p.Lookup("Process")
if err == nil {
funcDefined = true
switch process.(type) {
case func(*monstachemap.ProcessPluginInput) error:
processPlugin = process.(func(*monstachemap.ProcessPluginInput) error)
default:
errorLog.Fatalf("Plugin 'Process' function must be typed %T", processPlugin)
}
}
pipe, err := p.Lookup("Pipeline")
if err == nil {
funcDefined = true
switch pipe.(type) {
case func(string, bool) ([]interface{}, error):
pipePlugin = pipe.(func(string, bool) ([]interface{}, error))
default:
errorLog.Fatalf("Plugin 'Pipeline' function must be typed %T", pipePlugin)
}
}
if !funcDefined {
warnLog.Println("Plugin loaded but did not find a Map, Filter, Process or Pipeline function")
}
}
return config
}
func (config *configOptions) decodeAsTemplate() *configOptions {
env := map[string]string{}
for _, e := range os.Environ() {
pair := strings.SplitN(e, "=", 2)
if len(pair) < 2 {
continue
}
name, val := pair[0], pair[1]
env[name] = val
}
tpl, err := ioutil.ReadFile(config.ConfigFile)
if err != nil {
errorLog.Fatalln(err)
}
var t = template.Must(template.New("config").Parse(string(tpl)))
var b bytes.Buffer
err = t.Execute(&b, env)
if err != nil {
errorLog.Fatalln(err)
}
if md, err := toml.Decode(b.String(), config); err != nil {
errorLog.Fatalln(err)
} else if ud := md.Undecoded(); len(ud) != 0 {
errorLog.Fatalf("Config file contains undecoded keys: %q", ud)
}
return config
}
func (config *configOptions) loadConfigFile() *configOptions {
if config.ConfigFile != "" {
var tomlConfig = configOptions{
ConfigFile: config.ConfigFile,
LogRotate: config.LogRotate,
DroppedDatabases: true,
DroppedCollections: true,
ElasticValidatePemFile: true,
GtmSettings: gtmDefaultSettings(),
}
if config.EnableTemplate {
tomlConfig.decodeAsTemplate()
} else {
if md, err := toml.DecodeFile(tomlConfig.ConfigFile, &tomlConfig); err != nil {
errorLog.Fatalln(err)
} else if ud := md.Undecoded(); len(ud) != 0 {
errorLog.Fatalf("Config file contains undecoded keys: %q", ud)
}
}
if config.MongoURL == "" {
config.MongoURL = tomlConfig.MongoURL
}
if config.MongoConfigURL == "" {
config.MongoConfigURL = tomlConfig.MongoConfigURL
}
if config.MongoOpLogDatabaseName == "" {
config.MongoOpLogDatabaseName = tomlConfig.MongoOpLogDatabaseName
}
if config.MongoOpLogCollectionName == "" {
config.MongoOpLogCollectionName = tomlConfig.MongoOpLogCollectionName
}
if config.ElasticUser == "" {
config.ElasticUser = tomlConfig.ElasticUser
}
if config.ElasticPassword == "" {
config.ElasticPassword = tomlConfig.ElasticPassword
}
if config.ElasticPemFile == "" {
config.ElasticPemFile = tomlConfig.ElasticPemFile
}
if config.ElasticValidatePemFile && !tomlConfig.ElasticValidatePemFile {
config.ElasticValidatePemFile = false
}
if config.ElasticVersion == "" {
config.ElasticVersion = tomlConfig.ElasticVersion
}
if config.ElasticMaxConns == 0 {
config.ElasticMaxConns = tomlConfig.ElasticMaxConns
}
if config.ElasticHealth0 == 0 {
config.ElasticHealth0 = tomlConfig.ElasticHealth0
}
if config.ElasticHealth1 == 0 {
config.ElasticHealth1 = tomlConfig.ElasticHealth1
}
if config.DirectReadSplitMax == 0 {
config.DirectReadSplitMax = tomlConfig.DirectReadSplitMax
}
if config.DirectReadConcur == 0 {
config.DirectReadConcur = tomlConfig.DirectReadConcur
}
if !config.DirectReadNoTimeout && tomlConfig.DirectReadNoTimeout {
config.DirectReadNoTimeout = true
}
if !config.DirectReadBounded && tomlConfig.DirectReadBounded {
config.DirectReadBounded = true
}
if !config.ElasticRetry && tomlConfig.ElasticRetry {
config.ElasticRetry = true
}
if config.ElasticMaxDocs == 0 {
config.ElasticMaxDocs = tomlConfig.ElasticMaxDocs
}
if config.ElasticMaxBytes == 0 {
config.ElasticMaxBytes = tomlConfig.ElasticMaxBytes
}
if config.ElasticMaxSeconds == 0 {
config.ElasticMaxSeconds = tomlConfig.ElasticMaxSeconds
}
if config.ElasticClientTimeout == 0 {
config.ElasticClientTimeout = tomlConfig.ElasticClientTimeout
}
if config.MaxFileSize == 0 {
config.MaxFileSize = tomlConfig.MaxFileSize
}
if !config.IndexFiles {
config.IndexFiles = tomlConfig.IndexFiles
}
if !config.DisableFilePipelinePut {
config.DisableFilePipelinePut = tomlConfig.DisableFilePipelinePut
}
if config.FileDownloaders == 0 {
config.FileDownloaders = tomlConfig.FileDownloaders
}
if config.RelateThreads == 0 {
config.RelateThreads = tomlConfig.RelateThreads
}
if config.RelateBuffer == 0 {
config.RelateBuffer = tomlConfig.RelateBuffer
}
if config.PostProcessors == 0 {
config.PostProcessors = tomlConfig.PostProcessors
}
if config.DeleteStrategy == 0 {
config.DeleteStrategy = tomlConfig.DeleteStrategy
}
if config.DeleteIndexPattern == "" {
config.DeleteIndexPattern = tomlConfig.DeleteIndexPattern
}
if config.DroppedDatabases && !tomlConfig.DroppedDatabases {
config.DroppedDatabases = false
}
if config.DroppedCollections && !tomlConfig.DroppedCollections {
config.DroppedCollections = false
}
if !config.Gzip && tomlConfig.Gzip {
config.Gzip = true
}
if !config.Verbose && tomlConfig.Verbose {
config.Verbose = true
}
if !config.Stats && tomlConfig.Stats {
config.Stats = true
}
if !config.Pprof && tomlConfig.Pprof {
config.Pprof = true
}
if !config.EnableOplog && tomlConfig.EnableOplog {
config.EnableOplog = true
}
if !config.EnableEasyJSON && tomlConfig.EnableEasyJSON {
config.EnableEasyJSON = true
}
if !config.DisableChangeEvents && tomlConfig.DisableChangeEvents {
config.DisableChangeEvents = true
}
if !config.IndexStats && tomlConfig.IndexStats {
config.IndexStats = true
}
if config.StatsDuration == "" {
config.StatsDuration = tomlConfig.StatsDuration
}
if config.StatsIndexFormat == "" {
config.StatsIndexFormat = tomlConfig.StatsIndexFormat
}
if !config.IndexAsUpdate && tomlConfig.IndexAsUpdate {
config.IndexAsUpdate = true
}
if !config.FileHighlighting && tomlConfig.FileHighlighting {
config.FileHighlighting = true
}
if !config.EnablePatches && tomlConfig.EnablePatches {
config.EnablePatches = true
}
if !config.PruneInvalidJSON && tomlConfig.PruneInvalidJSON {
config.PruneInvalidJSON = true
}
if !config.Debug && tomlConfig.Debug {
config.Debug = true
}
if !config.Replay && tomlConfig.Replay {
config.Replay = true
}
if !config.Resume && tomlConfig.Resume {
config.Resume = true
}
if !config.ResumeWriteUnsafe && tomlConfig.ResumeWriteUnsafe {
config.ResumeWriteUnsafe = true
}
if config.ResumeFromTimestamp == 0 {
config.ResumeFromTimestamp = tomlConfig.ResumeFromTimestamp
}
if config.MergePatchAttr == "" {
config.MergePatchAttr = tomlConfig.MergePatchAttr
}
if !config.FailFast && tomlConfig.FailFast {
config.FailFast = true
}
if !config.IndexOplogTime && tomlConfig.IndexOplogTime {
config.IndexOplogTime = true
}
if config.OplogTsFieldName == "" {
config.OplogTsFieldName = tomlConfig.OplogTsFieldName
}
if config.OplogDateFieldName == "" {
config.OplogDateFieldName = tomlConfig.OplogDateFieldName
}
if config.OplogDateFieldFormat == "" {
config.OplogDateFieldFormat = tomlConfig.OplogDateFieldFormat
}
if config.ConfigDatabaseName == "" {
config.ConfigDatabaseName = tomlConfig.ConfigDatabaseName
}
if !config.ExitAfterDirectReads && tomlConfig.ExitAfterDirectReads {
config.ExitAfterDirectReads = true
}
if config.ResumeName == "" {
config.ResumeName = tomlConfig.ResumeName
}
if config.ClusterName == "" {
config.ClusterName = tomlConfig.ClusterName
}
if config.ResumeStrategy == 0 {
config.ResumeStrategy = tomlConfig.ResumeStrategy
}
if config.DirectReadExcludeRegex == "" {
config.DirectReadExcludeRegex = tomlConfig.DirectReadExcludeRegex
}
if config.NsRegex == "" {
config.NsRegex = tomlConfig.NsRegex
}
if config.NsDropRegex == "" {
config.NsDropRegex = tomlConfig.NsDropRegex
}
if config.NsExcludeRegex == "" {
config.NsExcludeRegex = tomlConfig.NsExcludeRegex
}
if config.NsDropExcludeRegex == "" {
config.NsDropExcludeRegex = tomlConfig.NsDropExcludeRegex
}
if config.IndexFiles {
if len(config.FileNamespaces) == 0 {
config.FileNamespaces = tomlConfig.FileNamespaces
config.loadGridFsConfig()
}
}
if config.Worker == "" {
config.Worker = tomlConfig.Worker
}
if config.GraylogAddr == "" {
config.GraylogAddr = tomlConfig.GraylogAddr
}
if config.MapperPluginPath == "" {
config.MapperPluginPath = tomlConfig.MapperPluginPath
}
if config.EnablePatches {
if len(config.PatchNamespaces) == 0 {
config.PatchNamespaces = tomlConfig.PatchNamespaces
config.loadPatchNamespaces()
}
}
if len(config.RoutingNamespaces) == 0 {
config.RoutingNamespaces = tomlConfig.RoutingNamespaces
config.loadRoutingNamespaces()
}
if len(config.TimeMachineNamespaces) == 0 {
config.TimeMachineNamespaces = tomlConfig.TimeMachineNamespaces
config.loadTimeMachineNamespaces()
}
if config.TimeMachineIndexPrefix == "" {
config.TimeMachineIndexPrefix = tomlConfig.TimeMachineIndexPrefix
}
if config.TimeMachineIndexSuffix == "" {
config.TimeMachineIndexSuffix = tomlConfig.TimeMachineIndexSuffix
}
if !config.TimeMachineDirectReads {
config.TimeMachineDirectReads = tomlConfig.TimeMachineDirectReads
}
if !config.PipeAllowDisk {
config.PipeAllowDisk = tomlConfig.PipeAllowDisk
}
if len(config.DirectReadNs) == 0 {
config.DirectReadNs = tomlConfig.DirectReadNs
}
if len(config.ChangeStreamNs) == 0 {
config.ChangeStreamNs = tomlConfig.ChangeStreamNs
}
if len(config.ElasticUrls) == 0 {
config.ElasticUrls = tomlConfig.ElasticUrls
}
if len(config.Workers) == 0 {
config.Workers = tomlConfig.Workers
}
if !config.EnableHTTPServer && tomlConfig.EnableHTTPServer {
config.EnableHTTPServer = true
}
if config.HTTPServerAddr == "" {
config.HTTPServerAddr = tomlConfig.HTTPServerAddr
}
if !config.AWSConnect.enabled() {
config.AWSConnect = tomlConfig.AWSConnect
}
if !config.Logs.enabled() {
config.Logs = tomlConfig.Logs
}
config.GtmSettings = tomlConfig.GtmSettings
config.Relate = tomlConfig.Relate
config.LogRotate = tomlConfig.LogRotate
tomlConfig.loadScripts()
tomlConfig.loadFilters()
tomlConfig.loadPipelines()
tomlConfig.loadIndexTypes()
tomlConfig.loadReplacements()
}
return config
}
func (config *configOptions) newLogger(path string) *lumberjack.Logger {
return &lumberjack.Logger{
Filename: path,
MaxSize: config.LogRotate.MaxSize,
MaxBackups: config.LogRotate.MaxBackups,
MaxAge: config.LogRotate.MaxAge,
LocalTime: config.LogRotate.LocalTime,
Compress: config.LogRotate.Compress,
}
}
func (config *configOptions) setupLogging() *configOptions {
if config.GraylogAddr != "" {
gelfWriter, err := gelf.NewUDPWriter(config.GraylogAddr)
if err != nil {
errorLog.Fatalf("Error creating gelf writer: %s", err)
}
infoLog.SetOutput(gelfWriter)
warnLog.SetOutput(gelfWriter)
errorLog.SetOutput(gelfWriter)
traceLog.SetOutput(gelfWriter)
statsLog.SetOutput(gelfWriter)
} else {
logs := config.Logs
if logs.Info != "" {
infoLog.SetOutput(config.newLogger(logs.Info))
}
if logs.Warn != "" {
warnLog.SetOutput(config.newLogger(logs.Warn))
}
if logs.Error != "" {
errorLog.SetOutput(config.newLogger(logs.Error))
}
if logs.Trace != "" {
traceLog.SetOutput(config.newLogger(logs.Trace))
}
if logs.Stats != "" {
statsLog.SetOutput(config.newLogger(logs.Stats))
}
}
return config
}
func (config *configOptions) build() *configOptions {
config.loadEnvironment()
config.loadTimeMachineNamespaces()
config.loadRoutingNamespaces()
config.loadPatchNamespaces()
config.loadGridFsConfig()
config.loadConfigFile()
config.loadPlugins()
config.setDefaults()
return config
}
func (config *configOptions) loadEnvironment() *configOptions {
del := config.EnvDelimiter
if del == "" {
del = ","
}
for _, e := range os.Environ() {
pair := strings.SplitN(e, "=", 2)
if len(pair) < 2 {
continue
}
name, val := pair[0], pair[1]
if val == "" {
continue
}
switch name {
case "MONSTACHE_MONGO_URL":
if config.MongoURL == "" {
config.MongoURL = val
}
break
case "MONSTACHE_MONGO_CONFIG_URL":
if config.MongoConfigURL == "" {
config.MongoConfigURL = val
}
break
case "MONSTACHE_MONGO_OPLOG_DB":
if config.MongoOpLogDatabaseName == "" {
config.MongoOpLogDatabaseName = val
}
break
case "MONSTACHE_MONGO_OPLOG_COL":
if config.MongoOpLogCollectionName == "" {
config.MongoOpLogCollectionName = val
}
break
case "MONSTACHE_ES_URLS":
if len(config.ElasticUrls) == 0 {
config.ElasticUrls = strings.Split(val, del)
}
break
case "MONSTACHE_ES_USER":
if config.ElasticUser == "" {
config.ElasticUser = val
}
break
case "MONSTACHE_ES_PASS":
if config.ElasticPassword == "" {
config.ElasticPassword = val
}
break
case "MONSTACHE_ES_PEM":
if config.ElasticPemFile == "" {
config.ElasticPemFile = val
}
break
case "MONSTACHE_WORKER":
if config.Worker == "" {
config.Worker = val
}
break
case "MONSTACHE_CLUSTER":
if config.ClusterName == "" {
config.ClusterName = val
}
break
case "MONSTACHE_DIRECT_READ_NS":
if len(config.DirectReadNs) == 0 {
config.DirectReadNs = strings.Split(val, del)
}
break
case "MONSTACHE_CHANGE_STREAM_NS":
if len(config.ChangeStreamNs) == 0 {
config.ChangeStreamNs = strings.Split(val, del)
}
break
case "MONSTACHE_DIRECT_READ_NS_DYNAMIC_EXCLUDE_REGEX":
if config.DirectReadExcludeRegex == "" {
config.DirectReadExcludeRegex = val
}
break
case "MONSTACHE_NS_REGEX":
if config.NsRegex == "" {
config.NsRegex = val
}
break
case "MONSTACHE_NS_EXCLUDE_REGEX":
if config.NsExcludeRegex == "" {
config.NsExcludeRegex = val
}
break
case "MONSTACHE_NS_DROP_REGEX":
if config.NsDropRegex == "" {
config.NsDropRegex = val
}
break
case "MONSTACHE_NS_DROP_EXCLUDE_REGEX":
if config.NsDropExcludeRegex == "" {
config.NsDropExcludeRegex = val
}
break
case "MONSTACHE_GRAYLOG_ADDR":
if config.GraylogAddr == "" {
config.GraylogAddr = val
}
break
case "MONSTACHE_AWS_ACCESS_KEY":
config.AWSConnect.AccessKey = val
break
case "MONSTACHE_AWS_SECRET_KEY":
config.AWSConnect.SecretKey = val
break
case "MONSTACHE_AWS_REGION":
config.AWSConnect.Region = val
break
case "MONSTACHE_LOG_DIR":
config.Logs.Info = val + "/info.log"
config.Logs.Warn = val + "/warn.log"
config.Logs.Error = val + "/error.log"
config.Logs.Trace = val + "/trace.log"
config.Logs.Stats = val + "/stats.log"
break
case "MONSTACHE_LOG_MAX_SIZE":
i, err := strconv.ParseInt(val, 10, 64)
if err != nil {
errorLog.Fatalf("Failed to load MONSTACHE_LOG_MAX_SIZE: %s", err)
}
config.LogRotate.MaxSize = int(i)
break
case "MONSTACHE_LOG_MAX_BACKUPS":
i, err := strconv.ParseInt(val, 10, 64)
if err != nil {
errorLog.Fatalf("Failed to load MONSTACHE_LOG_MAX_BACKUPS: %s", err)
}
config.LogRotate.MaxBackups = int(i)
break
case "MONSTACHE_LOG_MAX_AGE":
i, err := strconv.ParseInt(val, 10, 64)
if err != nil {
errorLog.Fatalf("Failed to load MONSTACHE_LOG_MAX_AGE: %s", err)
}
config.LogRotate.MaxAge = int(i)
break
case "MONSTACHE_HTTP_ADDR":
if config.HTTPServerAddr == "" {
config.HTTPServerAddr = val
}
break
case "MONSTACHE_FILE_NS":
if len(config.FileNamespaces) == 0 {
config.FileNamespaces = strings.Split(val, del)
}
break
case "MONSTACHE_PATCH_NS":
if len(config.PatchNamespaces) == 0 {
config.PatchNamespaces = strings.Split(val, del)
}
break
case "MONSTACHE_TIME_MACHINE_NS":
if len(config.TimeMachineNamespaces) == 0 {
config.TimeMachineNamespaces = strings.Split(val, del)
}
break
default:
continue
}
}
return config
}
func (config *configOptions) loadRoutingNamespaces() *configOptions {
for _, namespace := range config.RoutingNamespaces {
routingNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadTimeMachineNamespaces() *configOptions {
for _, namespace := range config.TimeMachineNamespaces {
tmNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadPatchNamespaces() *configOptions {
for _, namespace := range config.PatchNamespaces {
patchNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadGridFsConfig() *configOptions {
for _, namespace := range config.FileNamespaces {
fileNamespaces[namespace] = true
}
return config
}
func (config configOptions) dump() {
if config.MongoURL != "" {
config.MongoURL = cleanMongoURL(config.MongoURL)
}
if config.MongoConfigURL != "" {
config.MongoConfigURL = cleanMongoURL(config.MongoConfigURL)
}
if config.ElasticUser != "" {
config.ElasticUser = redact
}
if config.ElasticPassword != "" {
config.ElasticPassword = redact
}
if config.AWSConnect.AccessKey != "" {
config.AWSConnect.AccessKey = redact
}
if config.AWSConnect.SecretKey != "" {
config.AWSConnect.SecretKey = redact
}
if config.AWSConnect.Region != "" {
config.AWSConnect.Region = redact
}
json, err := json.MarshalIndent(config, "", " ")
if err != nil {
errorLog.Printf("Unable to print configuration: %s", err)
} else {
infoLog.Println(string(json))
}
}
func (config *configOptions) validate() {
if config.DisableChangeEvents && len(config.DirectReadNs) == 0 {
errorLog.Fatalln("Direct read namespaces must be specified if change events are disabled")
}
if config.AWSConnect.enabled() {
if err := config.AWSConnect.validate(); err != nil {
errorLog.Fatalln(err)
}
}
if len(config.DirectReadNs) > 0 {
if config.ElasticMaxSeconds < 5 {
warnLog.Println("Direct read performance degrades with small values for elasticsearch-max-seconds. Set to 5s or greater to remove this warning.")
}
if config.ElasticMaxDocs > 0 {
warnLog.Println("For performance reasons it is recommended to use elasticsearch-max-bytes instead of elasticsearch-max-docs since doc size may vary")
}
}
if config.StatsDuration != "" {
_, err := time.ParseDuration(config.StatsDuration)
if err != nil {
errorLog.Fatalf("Unable to parse stats duration: %s", err)
}
}
}
func (config *configOptions) setDefaults() *configOptions {
if !config.EnableOplog && len(config.ChangeStreamNs) == 0 {
config.ChangeStreamNs = []string{""}
}
if config.DisableChangeEvents {
config.ChangeStreamNs = []string{}
config.EnableOplog = false
}
if config.MongoURL == "" {
config.MongoURL = mongoURLDefault
}
if config.ClusterName != "" {
if config.Worker != "" {
config.ResumeName = fmt.Sprintf("%s:%s", config.ClusterName, config.Worker)
} else {
config.ResumeName = config.ClusterName
}
config.Resume = true
} else if config.Worker != "" {
config.ResumeName = config.Worker
} else if config.ResumeName == "" {
config.ResumeName = resumeNameDefault
}
if config.ElasticMaxConns == 0 {
config.ElasticMaxConns = elasticMaxConnsDefault
}
if config.ElasticClientTimeout == 0 {
config.ElasticClientTimeout = elasticClientTimeoutDefault
}
if config.MergePatchAttr == "" {
config.MergePatchAttr = "json-merge-patches"
}
if config.ElasticMaxSeconds == 0 {
if len(config.DirectReadNs) > 0 {
config.ElasticMaxSeconds = 5
} else {
config.ElasticMaxSeconds = 1
}
}
if config.ElasticMaxDocs == 0 {
config.ElasticMaxDocs = elasticMaxDocsDefault
}
if config.ElasticMaxBytes == 0 {
config.ElasticMaxBytes = elasticMaxBytesDefault
}
if config.ElasticHealth0 == 0 {
config.ElasticHealth0 = 15
}
if config.ElasticHealth1 == 0 {
config.ElasticHealth1 = 5
}
if config.HTTPServerAddr == "" {
config.HTTPServerAddr = ":8080"
}
if config.StatsIndexFormat == "" {
config.StatsIndexFormat = "monstache.stats.2006-01-02"
}
if config.TimeMachineIndexPrefix == "" {
config.TimeMachineIndexPrefix = "log"
}
if config.TimeMachineIndexSuffix == "" {
config.TimeMachineIndexSuffix = "2006-01-02"
}
if config.DeleteIndexPattern == "" {
config.DeleteIndexPattern = "*"
}
if config.FileDownloaders == 0 && config.IndexFiles {
config.FileDownloaders = fileDownloadersDefault
}
if config.RelateThreads == 0 {
config.RelateThreads = relateThreadsDefault
}
if config.RelateBuffer == 0 {
config.RelateBuffer = relateBufferDefault
}
if config.PostProcessors == 0 && processPlugin != nil {
config.PostProcessors = postProcessorsDefault
}
if config.OplogTsFieldName == "" {
config.OplogTsFieldName = "oplog_ts"
}
if config.OplogDateFieldName == "" {
config.OplogDateFieldName = "oplog_date"
}
if config.OplogDateFieldFormat == "" {
config.OplogDateFieldFormat = "2006/01/02 15:04:05"
}
if config.ConfigDatabaseName == "" {
config.ConfigDatabaseName = configDatabaseNameDefault
}
if config.ResumeFromTimestamp > 0 {
if config.ResumeFromTimestamp <= math.MaxInt32 {
config.ResumeFromTimestamp = config.ResumeFromTimestamp << 32
}
}
return config
}
func cleanMongoURL(URL string) string {
const (
scheme = "mongodb://"
schemeSrv = "mongodb+srv://"
)
url := URL
hasScheme := strings.HasPrefix(url, scheme)
hasSchemeSrv := strings.HasPrefix(url, schemeSrv)
url = strings.TrimPrefix(url, scheme)
url = strings.TrimPrefix(url, schemeSrv)
userEnd := strings.IndexAny(url, "@")
if userEnd != -1 {
url = redact + "@" + url[userEnd+1:]
}
if hasScheme {
url = scheme + url
} else if hasSchemeSrv {
url = schemeSrv + url
}
return url
}
func (config *configOptions) dialMongo(URL string) (*mongo.Client, error) {
var clientOptions *options.ClientOptions
if config.mongoClientOptions == nil {
// use the initial URL to create most of the client options
// save the client options for potential use later with shards
rb := bson.NewRegistryBuilder()
rb.RegisterTypeMapEntry(bsontype.DateTime, reflect.TypeOf(time.Time{}))
reg := rb.Build()
clientOptions = options.Client()
clientOptions.ApplyURI(URL)
clientOptions.SetAppName("monstache")
clientOptions.SetRegistry(reg)
config.mongoClientOptions = clientOptions
} else {
// subsequent client connections will only be for adding shards
// for shards we only have the hostname and replica set
// apply the hostname to the previously saved client options
clientOptions = config.mongoClientOptions
clientOptions.ApplyURI(URL)
}
client, err := mongo.NewClient(clientOptions)
if err != nil {
return nil, err
}
err = client.Connect(context.Background())
if err != nil {
return nil, err
}
err = client.Ping(context.Background(), nil)
if err != nil {
return nil, err
}
return client, nil
}
func (config *configOptions) NewHTTPClient() (client *http.Client, err error) {
tlsConfig := &tls.Config{}
if config.ElasticPemFile != "" {
var ca []byte
certs := x509.NewCertPool()
if ca, err = ioutil.ReadFile(config.ElasticPemFile); err == nil {
if ok := certs.AppendCertsFromPEM(ca); !ok {
errorLog.Printf("No certs parsed successfully from %s", config.ElasticPemFile)
}
tlsConfig.RootCAs = certs
} else {
return client, err
}
}
if config.ElasticValidatePemFile == false {
// Turn off validation
tlsConfig.InsecureSkipVerify = true
}
transport := &http.Transport{
DisableCompression: !config.Gzip,
TLSHandshakeTimeout: time.Duration(30) * time.Second,
TLSClientConfig: tlsConfig,
}
client = &http.Client{
Timeout: time.Duration(config.ElasticClientTimeout) * time.Second,
Transport: transport,
}
if config.AWSConnect.enabled() {
client = aws.NewV4SigningClientWithHTTPClient(credentials.NewStaticCredentials(
config.AWSConnect.AccessKey,
config.AWSConnect.SecretKey,
"",
), config.AWSConnect.Region, client)
}
return client, err
}
func (ic *indexClient) doDrop(op *gtm.Op) (err error) {
if db, drop := op.IsDropDatabase(); drop {
if ic.config.DroppedDatabases {
if err = ic.deleteIndexes(db); err == nil {
if e := ic.dropDBMeta(db); e != nil {
errorLog.Printf("Unable to delete metadata for db: %s", e)
}
}
}
} else if col, drop := op.IsDropCollection(); drop {
if ic.config.DroppedCollections {
if err = ic.deleteIndex(op.GetDatabase() + "." + col); err == nil {
if e := ic.dropCollectionMeta(op.GetDatabase() + "." + col); e != nil {
errorLog.Printf("Unable to delete metadata for collection: %s", e)
}
}
}
}
return
}
func (ic *indexClient) hasFileContent(op *gtm.Op) (ingest bool) {
if !ic.config.IndexFiles {
return
}
return fileNamespaces[op.Namespace]
}
func (ic *indexClient) addPatch(op *gtm.Op, objectID string,
indexType *indexMapping, meta *indexingMeta) (err error) {
var merges []interface{}
var toJSON []byte
if op.IsSourceDirect() {
return nil
}
if op.Timestamp.T == 0 {
return nil
}
client, config := ic.client, ic.config
if op.IsUpdate() {
ctx := context.Background()
service := client.Get()
service.Id(objectID)
service.Index(indexType.Index)
if meta.ID != "" {
service.Id(meta.ID)
}
if meta.Index != "" {
service.Index(meta.Index)
}
if meta.Routing != "" {
service.Routing(meta.Routing)
}
if meta.Parent != "" {
service.Parent(meta.Parent)
}
var resp *elastic.GetResult
if resp, err = service.Do(ctx); err == nil {
if resp.Found {
var src map[string]interface{}
if err = json.Unmarshal(resp.Source, &src); err == nil {
if val, ok := src[config.MergePatchAttr]; ok {
merges = val.([]interface{})
for _, m := range merges {
entry := m.(map[string]interface{})
entry["ts"] = int(entry["ts"].(float64))
entry["v"] = int(entry["v"].(float64))
}
}
delete(src, config.MergePatchAttr)
var fromJSON, mergeDoc []byte
if fromJSON, err = json.Marshal(src); err == nil {
if toJSON, err = json.Marshal(op.Data); err == nil {
if mergeDoc, err = jsonpatch.CreateMergePatch(fromJSON, toJSON); err == nil {
merge := make(map[string]interface{})
merge["ts"] = op.Timestamp.T
merge["p"] = string(mergeDoc)
merge["v"] = len(merges) + 1
merges = append(merges, merge)
op.Data[config.MergePatchAttr] = merges
}
}
}
}
} else {
err = errors.New("Last document revision not found")
}
}
} else {
if _, found := op.Data[config.MergePatchAttr]; !found {
if toJSON, err = json.Marshal(op.Data); err == nil {
merge := make(map[string]interface{})
merge["v"] = 1
merge["ts"] = op.Timestamp.T
merge["p"] = string(toJSON)
merges = append(merges, merge)
op.Data[config.MergePatchAttr] = merges
}
}
}
return
}
func (ic *indexClient) doIndexing(op *gtm.Op) (err error) {
meta := parseIndexMeta(op)
if meta.Skip {
return
}
ic.prepareDataForIndexing(op)
objectID, indexType := opIDToString(op), ic.mapIndex(op)
if ic.config.EnablePatches {
if patchNamespaces[op.Namespace] {
if e := ic.addPatch(op, objectID, indexType, meta); e != nil {
errorLog.Printf("Unable to save json-patch info: %s", e)
}
}
}
ingestAttachment := false
if ic.hasFileContent(op) {
ingestAttachment = op.Data["file"] != nil
}
if ic.config.IndexAsUpdate && meta.Pipeline == "" && ingestAttachment == false {
req := elastic.NewBulkUpdateRequest()
req.UseEasyJSON(ic.config.EnableEasyJSON)
req.Id(objectID)
req.Index(indexType.Index)
req.Doc(op.Data)
req.DocAsUpsert(true)
if meta.ID != "" {
req.Id(meta.ID)
}
if meta.Index != "" {
req.Index(meta.Index)
}
if meta.Type != "" {
}
if meta.Routing != "" {
req.Routing(meta.Routing)
}
if meta.Parent != "" {
req.Parent(meta.Parent)
}
if meta.RetryOnConflict != 0 {
req.RetryOnConflict(meta.RetryOnConflict)
}
if _, err = req.Source(); err == nil {
ic.bulk.Add(req)
}
} else {
req := elastic.NewBulkIndexRequest()
req.UseEasyJSON(ic.config.EnableEasyJSON)
req.Id(objectID)
req.Index(indexType.Index)
req.Doc(op.Data)
if meta.ID != "" {
req.Id(meta.ID)
}
if meta.Index != "" {
req.Index(meta.Index)
}
if meta.Routing != "" {
req.Routing(meta.Routing)
}
if meta.Parent != "" {
req.Parent(meta.Parent)
}
if meta.Version != 0 {
req.Version(meta.Version)
}
if meta.VersionType != "" {
req.VersionType(meta.VersionType)
}
if meta.Pipeline != "" {
req.Pipeline(meta.Pipeline)
}
if meta.RetryOnConflict != 0 {
req.RetryOnConflict(meta.RetryOnConflict)
}
if ingestAttachment {
req.Pipeline("attachment")
}
if _, err = req.Source(); err == nil {
ic.bulk.Add(req)
}
}
if meta.shouldSave(ic.config) {
if e := ic.setIndexMeta(op.Namespace, objectID, meta); e != nil {
errorLog.Printf("Unable to save routing info: %s", e)
}
}
if tmNamespaces[op.Namespace] {
if op.IsSourceOplog() || ic.config.TimeMachineDirectReads {
t := time.Now().UTC()
tmIndex := func(idx string) string {
pre, suf := ic.config.TimeMachineIndexPrefix, ic.config.TimeMachineIndexSuffix
tmFormat := strings.Join([]string{pre, idx, suf}, ".")
return strings.ToLower(t.Format(tmFormat))
}
data := make(map[string]interface{})
for k, v := range op.Data {
data[k] = v
}
data["_source_id"] = objectID
if ic.config.IndexOplogTime == false {
secs := int64(op.Timestamp.T)
t := time.Unix(secs, 0).UTC()
data[ic.config.OplogTsFieldName] = op.Timestamp
data[ic.config.OplogDateFieldName] = t.Format(ic.config.OplogDateFieldFormat)
}
req := elastic.NewBulkIndexRequest()
req.UseEasyJSON(ic.config.EnableEasyJSON)
req.Index(tmIndex(indexType.Index))
req.Routing(objectID)
req.Doc(data)
if meta.Index != "" {
req.Index(tmIndex(meta.Index))
}
if meta.Pipeline != "" {
req.Pipeline(meta.Pipeline)
}
if ingestAttachment {
req.Pipeline("attachment")
}
if _, err = req.Source(); err == nil {
ic.bulk.Add(req)
}
}
}
return
}
func (ic *indexClient) doIndex(op *gtm.Op) (err error) {
if err = ic.mapData(op); err == nil {
if op.Data != nil {
err = ic.doIndexing(op)
} else if op.IsUpdate() {
ic.doDelete(op)
}
}
return
}
func (ic *indexClient) runProcessor(op *gtm.Op) (err error) {
input := &monstachemap.ProcessPluginInput{
ElasticClient: ic.client,
ElasticBulkProcessor: ic.bulk,
Timestamp: op.Timestamp,
}
input.Document = op.Data
if op.IsDelete() {
input.Document = map[string]interface{}{
"_id": op.Id,
}
}
input.Namespace = op.Namespace
input.Database = op.GetDatabase()
input.Collection = op.GetCollection()
input.Operation = op.Operation
input.MongoClient = ic.mongo
input.UpdateDescription = op.UpdateDescription
err = processPlugin(input)
return
}
func (ic *indexClient) routeProcess(op *gtm.Op) (err error) {
rop := >m.Op{
Id: op.Id,
Operation: op.Operation,
Namespace: op.Namespace,
Source: op.Source,
Timestamp: op.Timestamp,
UpdateDescription: op.UpdateDescription,
}
if op.Data != nil {
var data []byte
data, err = bson.Marshal(op.Data)
if err == nil {
var m map[string]interface{}
err = bson.Unmarshal(data, &m)
if err == nil {
rop.Data = m
}
}
}
ic.processC <- rop
return
}
func (ic *indexClient) routeDrop(op *gtm.Op) (err error) {
ic.bulk.Flush()
err = ic.doDrop(op)
return
}
func (ic *indexClient) routeDeleteRelate(op *gtm.Op) (err error) {
if rs := relates[op.Namespace]; len(rs) != 0 {
var delData map[string]interface{}
useFind := false
for _, r := range rs {
if r.SrcField != "_id" {
useFind = true
break
}
}
if useFind {
delData = ic.findDeletedSrcDoc(op)
} else {
delData = map[string]interface{}{
"_id": op.Id,
}
}
if delData != nil {
rop := >m.Op{
Id: op.Id,
Operation: op.Operation,
Namespace: op.Namespace,
Source: op.Source,
Timestamp: op.Timestamp,
Data: delData,
}
select {
case ic.relateC <- rop:
default:
errorLog.Printf(relateQueueOverloadMsg, rop.Namespace, rop.Id)
}
}
}
return
}
func (ic *indexClient) routeDelete(op *gtm.Op) (err error) {
if len(ic.config.Relate) > 0 {
err = ic.routeDeleteRelate(op)
}
ic.doDelete(op)
return
}
func (ic *indexClient) routeDataRelate(op *gtm.Op) (skip bool, err error) {
rs := relates[op.Namespace]
if len(rs) == 0 {
return
}
skip = true
for _, r := range rs {
if r.KeepSrc {
skip = false
break
}
}
if skip {
select {
case ic.relateC <- op:
default:
errorLog.Printf(relateQueueOverloadMsg, op.Namespace, op.Id)
}
} else {
rop := >m.Op{
Id: op.Id,
Operation: op.Operation,
Namespace: op.Namespace,
Source: op.Source,
Timestamp: op.Timestamp,
UpdateDescription: op.UpdateDescription,
}
var data []byte
data, err = bson.Marshal(op.Data)
if err == nil {
var m map[string]interface{}
err = bson.Unmarshal(data, &m)
if err == nil {
rop.Data = m
}
}
select {
case ic.relateC <- rop:
default:
errorLog.Printf(relateQueueOverloadMsg, rop.Namespace, rop.Id)
}
}
return
}
func (ic *indexClient) routeData(op *gtm.Op) (err error) {
skip := false
if op.IsSourceOplog() && len(ic.config.Relate) > 0 {
skip, err = ic.routeDataRelate(op)
}
if !skip {
if ic.hasFileContent(op) {
ic.fileC <- op
} else {
ic.indexC <- op
}
}
return
}
func (ic *indexClient) routeOp(op *gtm.Op) (err error) {
if processPlugin != nil {
err = ic.routeProcess(op)
}
if op.IsDrop() {
err = ic.routeDrop(op)
} else if op.IsDelete() {
err = ic.routeDelete(op)
} else if op.Data != nil {
err = ic.routeData(op)
}
return
}
func (ic *indexClient) processErr(err error) {
config := ic.config
mux.Lock()
defer mux.Unlock()
exitStatus = 1
errorLog.Println(err)
if config.FailFast {
os.Exit(exitStatus)
}
}
func (ic *indexClient) doIndexStats() (err error) {
var hostname string
doc := make(map[string]interface{})
t := time.Now().UTC()
doc["Timestamp"] = t.Format("2006-01-02T15:04:05")
hostname, err = os.Hostname()
if err == nil {
doc["Host"] = hostname
}
doc["Pid"] = os.Getpid()
doc["Stats"] = ic.bulk.Stats()
index := strings.ToLower(t.Format(ic.config.StatsIndexFormat))
req := elastic.NewBulkIndexRequest().Index(index)
req.UseEasyJSON(ic.config.EnableEasyJSON)
req.Doc(doc)
ic.bulkStats.Add(req)
return
}
func (ic *indexClient) dropDBMeta(db string) (err error) {
if ic.config.DeleteStrategy == statefulDeleteStrategy {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("meta")
q := bson.M{"db": db}
_, err = col.DeleteMany(context.Background(), q)
}
return
}
func (ic *indexClient) dropCollectionMeta(namespace string) (err error) {
if ic.config.DeleteStrategy == statefulDeleteStrategy {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("meta")
q := bson.M{"namespace": namespace}
_, err = col.DeleteMany(context.Background(), q)
}
return
}
func (meta *indexingMeta) load(metaAttrs map[string]interface{}) {
var v interface{}
var ok bool
var s string
if _, ok = metaAttrs["skip"]; ok {
meta.Skip = true
}
if v, ok = metaAttrs["routing"]; ok {
meta.Routing = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["index"]; ok {
meta.Index = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["id"]; ok {
op := >m.Op{
Id: v,
}
meta.ID = opIDToString(op)
}
if v, ok = metaAttrs["type"]; ok {
meta.Type = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["parent"]; ok {
meta.Parent = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["version"]; ok {
s = fmt.Sprintf("%v", v)
if version, err := strconv.ParseInt(s, 10, 64); err == nil {
meta.Version = version
}
}
if v, ok = metaAttrs["versionType"]; ok {
meta.VersionType = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["pipeline"]; ok {
meta.Pipeline = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["retryOnConflict"]; ok {
s = fmt.Sprintf("%v", v)
if roc, err := strconv.Atoi(s); err == nil {
meta.RetryOnConflict = roc
}
}
}
func (meta *indexingMeta) shouldSave(config *configOptions) bool {
if config.DeleteStrategy == statefulDeleteStrategy {
return (meta.Routing != "" ||
meta.Index != "" ||
meta.Type != "" ||
meta.Parent != "" ||
meta.Pipeline != "")
}
return false
}
func (ic *indexClient) setIndexMeta(namespace, id string, meta *indexingMeta) error {
config := ic.config
col := ic.mongo.Database(config.ConfigDatabaseName).Collection("meta")
metaID := fmt.Sprintf("%s.%s", namespace, id)
doc := map[string]interface{}{
"id": meta.ID,
"routing": meta.Routing,
"index": meta.Index,
"type": meta.Type,
"parent": meta.Parent,
"pipeline": meta.Pipeline,
"db": strings.SplitN(namespace, ".", 2)[0],
"namespace": namespace,
}
opts := options.Update()
opts.SetUpsert(true)
_, err := col.UpdateOne(context.Background(), bson.M{
"_id": metaID,
}, bson.M{
"$set": doc,
}, opts)
return err
}
func (ic *indexClient) getIndexMeta(namespace, id string) (meta *indexingMeta) {
meta = &indexingMeta{}
config := ic.config
col := ic.mongo.Database(config.ConfigDatabaseName).Collection("meta")
metaID := fmt.Sprintf("%s.%s", namespace, id)
result := col.FindOne(context.Background(), bson.M{
"_id": metaID,
})
if err := result.Err(); err == nil {
doc := make(map[string]interface{})
if err = result.Decode(&doc); err == nil {
if doc["id"] != nil {
meta.ID = doc["id"].(string)
}
if doc["routing"] != nil {
meta.Routing = doc["routing"].(string)
}
if doc["index"] != nil {
meta.Index = strings.ToLower(doc["index"].(string))
}
if doc["type"] != nil {
meta.Type = doc["type"].(string)
}
if doc["parent"] != nil {
meta.Parent = doc["parent"].(string)
}
if doc["pipeline"] != nil {
meta.Pipeline = doc["pipeline"].(string)
}
col.DeleteOne(context.Background(), bson.M{"_id": metaID})
}
}
return
}
func loadBuiltinFunctions(client *mongo.Client, config *configOptions) {
for ns, env := range mapEnvs {
var fa *findConf
fa = &findConf{
client: client,
name: "findId",
vm: env.VM,
ns: ns,
byID: true,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
errorLog.Fatalln(err)
}
fa = &findConf{
client: client,
name: "findOne",
vm: env.VM,
ns: ns,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
errorLog.Fatalln(err)
}
fa = &findConf{
client: client,
name: "find",
vm: env.VM,
ns: ns,
multi: true,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
errorLog.Fatalln(err)
}
fa = &findConf{
client: client,
name: "pipe",
vm: env.VM,
ns: ns,
multi: true,
pipe: true,
pipeAllowDisk: config.PipeAllowDisk,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
errorLog.Fatalln(err)
}
}
}
func (fc *findCall) setDatabase(topts map[string]interface{}) (err error) {
if ov, ok := topts["database"]; ok {
if ovs, ok := ov.(string); ok {
fc.db = ovs
} else {
err = errors.New("Invalid database option value")
}
}
return
}
func (fc *findCall) setCollection(topts map[string]interface{}) (err error) {
if ov, ok := topts["collection"]; ok {
if ovs, ok := ov.(string); ok {
fc.col = ovs
} else {
err = errors.New("Invalid collection option value")
}
}
return
}
func (fc *findCall) setSelect(topts map[string]interface{}) (err error) {
if ov, ok := topts["select"]; ok {
if ovsel, ok := ov.(map[string]interface{}); ok {
for k, v := range ovsel {
if vi, ok := v.(int64); ok {
fc.sel[k] = int(vi)
}
}
} else {
err = errors.New("Invalid select option value")
}
}
return
}
func (fc *findCall) setSort(topts map[string]interface{}) (err error) {
if ov, ok := topts["sort"]; ok {
if ovsort, ok := ov.(map[string]interface{}); ok {
for k, v := range ovsort {
if vi, ok := v.(int64); ok {
fc.sort[k] = int(vi)
}
}
} else {
err = errors.New("Invalid sort option value")
}
fc.setSort(map[string]interface{}{"joe": "rick"})
}
return
}
func (fc *findCall) setLimit(topts map[string]interface{}) (err error) {
if ov, ok := topts["limit"]; ok {
if ovl, ok := ov.(int64); ok {
fc.limit = int(ovl)
} else {
err = errors.New("Invalid limit option value")
}
}
return
}
func (fc *findCall) setQuery(v otto.Value) (err error) {
var q interface{}
if q, err = v.Export(); err == nil {
fc.query = fc.restoreIds(deepExportValue(q))
}
return
}
func (fc *findCall) setOptions(v otto.Value) (err error) {
var opts interface{}
if opts, err = v.Export(); err == nil {
switch topts := opts.(type) {
case map[string]interface{}:
if err = fc.setDatabase(topts); err != nil {
return
}
if err = fc.setCollection(topts); err != nil {
return
}
if err = fc.setSelect(topts); err != nil {
return
}
if fc.isMulti() {
if err = fc.setSort(topts); err != nil {
return
}
if err = fc.setLimit(topts); err != nil {
return
}
}
default:
err = errors.New("Invalid options argument")
return
}
} else {
err = errors.New("Invalid options argument")
}
return
}
func (fc *findCall) setDefaults() {
if fc.config.ns != "" {
ns := strings.SplitN(fc.config.ns, ".", 2)
fc.db = ns[0]
fc.col = ns[1]
}
}
func (fc *findCall) getCollection() *mongo.Collection {
return fc.client.Database(fc.db).Collection(fc.col)
}
func (fc *findCall) getVM() *otto.Otto {
return fc.config.vm
}
func (fc *findCall) getFunctionName() string {
return fc.config.name
}
func (fc *findCall) isMulti() bool {
return fc.config.multi
}
func (fc *findCall) isPipe() bool {
return fc.config.pipe
}
func (fc *findCall) pipeAllowDisk() bool {
return fc.config.pipeAllowDisk
}
func (fc *findCall) logError(err error) {
errorLog.Printf("Error in function %s: %s\n", fc.getFunctionName(), err)
}
func (fc *findCall) restoreIds(v interface{}) (r interface{}) {
switch vt := v.(type) {
case string:
if oi, err := primitive.ObjectIDFromHex(vt); err == nil {
r = oi
} else {
r = v
}
case []map[string]interface{}:
var avs []interface{}
for _, av := range vt {
mvs := make(map[string]interface{})
for k, v := range av {
mvs[k] = fc.restoreIds(v)
}
avs = append(avs, mvs)
}
r = avs
case []interface{}:
var avs []interface{}
for _, av := range vt {
avs = append(avs, fc.restoreIds(av))
}
r = avs
case map[string]interface{}:
mvs := make(map[string]interface{})
for k, v := range vt {
mvs[k] = fc.restoreIds(v)
}
r = mvs
default:
r = v
}
return
}
func (fc *findCall) execute() (r otto.Value, err error) {
var cursor *mongo.Cursor
col := fc.getCollection()
query := fc.query
if fc.isMulti() {
if fc.isPipe() {
ao := options.Aggregate()
ao.SetAllowDiskUse(fc.pipeAllowDisk())
cursor, err = col.Aggregate(context.Background(), query, ao)
if err != nil {
return
}
} else {
fo := options.Find()
if fc.limit > 0 {
fo.SetLimit(int64(fc.limit))
}
if len(fc.sort) > 0 {
fo.SetSort(fc.sort)
}
if len(fc.sel) > 0 {
fo.SetProjection(fc.sel)
}
cursor, err = col.Find(context.Background(), query, fo)
if err != nil {
return
}
}
var rdocs []map[string]interface{}
for cursor.Next(context.Background()) {
doc := make(map[string]interface{})
if err = cursor.Decode(&doc); err != nil {
return
}
rdocs = append(rdocs, convertMapJavascript(doc))
}
r, err = fc.getVM().ToValue(rdocs)
} else {
fo := options.FindOne()
if fc.config.byID {
query = bson.M{"_id": query}
}
if len(fc.sel) > 0 {
fo.SetProjection(fc.sel)
}
result := col.FindOne(context.Background(), query, fo)
if err = result.Err(); err == nil {
doc := make(map[string]interface{})
if err = result.Decode(&doc); err == nil {
rdoc := convertMapJavascript(doc)
r, err = fc.getVM().ToValue(rdoc)
}
}
}
return
}
func makeFind(fa *findConf) func(otto.FunctionCall) otto.Value {
return func(call otto.FunctionCall) (r otto.Value) {
var err error
fc := &findCall{
config: fa,
client: fa.client,
sort: make(map[string]int),
sel: make(map[string]int),
}
fc.setDefaults()
args := call.ArgumentList
argLen := len(args)
r = otto.NullValue()
if argLen >= 1 {
if argLen >= 2 {
if err = fc.setOptions(call.Argument(1)); err != nil {
fc.logError(err)
return
}
}
if fc.db == "" || fc.col == "" {
fc.logError(errors.New("Find call must specify db and collection"))
return
}
if err = fc.setQuery(call.Argument(0)); err == nil {
var result otto.Value
if result, err = fc.execute(); err == nil {
r = result
} else {
fc.logError(err)
}
} else {
fc.logError(err)
}
} else {
fc.logError(errors.New("At least one argument is required"))
}
return
}
}
func (ic *indexClient) findDeletedSrcDoc(op *gtm.Op) map[string]interface{} {
objectID := opIDToString(op)
termQuery := elastic.NewTermQuery("_id", objectID)
search := ic.client.Search()
search.Size(1)
search.Index(ic.config.DeleteIndexPattern)
search.Query(termQuery)
searchResult, err := search.Do(context.Background())
if err != nil {
errorLog.Printf("Unable to find deleted document %s: %s", objectID, err)
return nil
}
if searchResult.Hits == nil {
errorLog.Printf("Unable to find deleted document %s", objectID)
return nil
}
if searchResult.TotalHits() == 0 {
errorLog.Printf("Found no hits for deleted document %s", objectID)
return nil
}
if searchResult.TotalHits() > 1 {
errorLog.Printf("Found multiple hits for deleted document %s", objectID)
return nil
}
hit := searchResult.Hits.Hits[0]
if hit.Source == nil {
errorLog.Printf("Source unavailable for deleted document %s", objectID)
return nil
}
var src map[string]interface{}
if err = json.Unmarshal(hit.Source, &src); err == nil {
src["_id"] = op.Id
return src
}
errorLog.Printf("Unable to unmarshal deleted document %s: %s", objectID, err)
return nil
}
func tsVersion(ts primitive.Timestamp) int64 {
t, i := int64(ts.T), int64(ts.I)
version := (t << 32) | i
return version
}
func (ic *indexClient) doDelete(op *gtm.Op) {
req := elastic.NewBulkDeleteRequest()
req.UseEasyJSON(ic.config.EnableEasyJSON)
if ic.config.DeleteStrategy == ignoreDeleteStrategy {
return
}
objectID, indexType, meta := opIDToString(op), ic.mapIndex(op), &indexingMeta{}
req.Id(objectID)
if ic.config.IndexAsUpdate == false {
req.Version(tsVersion(op.Timestamp))
req.VersionType("external")
}
if ic.config.DeleteStrategy == statefulDeleteStrategy {
if routingNamespaces[""] || routingNamespaces[op.Namespace] {
meta = ic.getIndexMeta(op.Namespace, objectID)
}
req.Index(indexType.Index)
if meta.Index != "" {
req.Index(meta.Index)
}
if meta.Routing != "" {
req.Routing(meta.Routing)
}
if meta.Parent != "" {
req.Parent(meta.Parent)
}
} else if ic.config.DeleteStrategy == statelessDeleteStrategy {
if routingNamespaces[""] || routingNamespaces[op.Namespace] {
termQuery := elastic.NewTermQuery("_id", objectID)
search := ic.client.Search()
search.FetchSource(false)
search.Size(1)
search.Index(ic.config.DeleteIndexPattern)
search.Query(termQuery)
searchResult, err := search.Do(context.Background())
if err != nil {
errorLog.Printf("Unable to delete document %s: %s",
objectID, err)
return
}
if searchResult.Hits != nil && searchResult.TotalHits() == 1 {
hit := searchResult.Hits.Hits[0]
req.Index(hit.Index)
if hit.Routing != "" {
req.Routing(hit.Routing)
}
if hit.Parent != "" {
req.Parent(hit.Parent)
}
} else {
errorLog.Printf("Failed to find unique document %s for deletion using index pattern %s",
objectID, ic.config.DeleteIndexPattern)
return
}
} else {
req.Index(indexType.Index)
}
} else {
return
}
ic.bulk.Add(req)
return
}
func logRotateDefaults() logRotate {
return logRotate{
MaxSize: 500, //megabytes
MaxAge: 28, // days
MaxBackups: 5,
LocalTime: false,
Compress: false,
}
}
func gtmDefaultSettings() gtmSettings {
return gtmSettings{
ChannelSize: gtmChannelSizeDefault,
BufferSize: 32,
BufferDuration: "75ms",
MaxAwaitTime: "",
}
}
func (ic *indexClient) notifySdFailed(err error) {
if err != nil {
errorLog.Printf("Systemd notification failed: %s", err)
} else {
if ic.config.Verbose {
warnLog.Println("Systemd notification not supported (i.e. NOTIFY_SOCKET is unset)")
}
}
}
func (ic *indexClient) watchdogSdFailed(err error) {
if err != nil {
errorLog.Printf("Error determining systemd WATCHDOG interval: %s", err)
} else {
if ic.config.Verbose {
warnLog.Println("Systemd WATCHDOG not enabled")
}
}
}
func (ctx *httpServerCtx) serveHTTP() {
s := ctx.httpServer
if ctx.config.Verbose {
infoLog.Printf("Starting http server at %s", s.Addr)
}
ctx.started = time.Now()
err := s.ListenAndServe()
if !ctx.shutdown {
errorLog.Fatalf("Unable to serve http at address %s: %s", s.Addr, err)
}
}
func (ctx *httpServerCtx) buildServer() {
mux := http.NewServeMux()
mux.HandleFunc("/started", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
data := (time.Now().Sub(ctx.started)).String()
w.Write([]byte(data))
})
mux.HandleFunc("/healthz", func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(200)
w.Write([]byte("ok"))
})
if ctx.config.Stats {
mux.HandleFunc("/stats", func(w http.ResponseWriter, req *http.Request) {
stats, err := json.MarshalIndent(ctx.bulk.Stats(), "", " ")
if err == nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
w.Write(stats)
fmt.Fprintln(w)
} else {
w.WriteHeader(500)
fmt.Fprintf(w, "Unable to print statistics: %s", err)
}
})
}
mux.HandleFunc("/instance", func(w http.ResponseWriter, req *http.Request) {
hostname, err := os.Hostname()
if err != nil {
w.WriteHeader(500)
fmt.Fprintf(w, "Unable to get hostname for instance info: %s", err)
return
}
status := instanceStatus{
Pid: os.Getpid(),
Hostname: hostname,
ResumeName: ctx.config.ResumeName,
ClusterName: ctx.config.ClusterName,
}
respC := make(chan *statusResponse)
statusReq := &statusRequest{
responseC: respC,
}
timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
case ctx.statusReqC <- statusReq:
srsp := <-respC
if srsp != nil {
status.Enabled = srsp.enabled
status.LastTs = srsp.lastTs
if srsp.lastTs.T != 0 {
status.LastTsFormat = time.Unix(int64(srsp.lastTs.T), 0).Format("2006-01-02T15:04:05")
}
}
data, err := json.Marshal(status)
if err != nil {
w.WriteHeader(500)
fmt.Fprintf(w, "Unable to print instance info: %s", err)
break
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
w.Write(data)
fmt.Fprintln(w)
break
case <-timer.C:
w.WriteHeader(500)
fmt.Fprintf(w, "Timeout getting instance info")
break
}
})
if ctx.config.Pprof {
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
}
s := &http.Server{
Addr: ctx.config.HTTPServerAddr,
Handler: mux,
ErrorLog: errorLog,
}
ctx.httpServer = s
}
func (ic *indexClient) startNotify() {
go ic.notifySd()
}
func (ic *indexClient) notifySd() {
var interval time.Duration
config := ic.config
if config.Verbose {
infoLog.Println("Sending systemd READY=1")
}
sent, err := daemon.SdNotify(false, "READY=1")
if sent {
if config.Verbose {
infoLog.Println("READY=1 successfully sent to systemd")
}
} else {
ic.notifySdFailed(err)
return
}
interval, err = daemon.SdWatchdogEnabled(false)
if err != nil || interval == 0 {
ic.watchdogSdFailed(err)
return
}
for {
if config.Verbose {
infoLog.Println("Sending systemd WATCHDOG=1")
}
sent, err = daemon.SdNotify(false, "WATCHDOG=1")
if sent {
if config.Verbose {
infoLog.Println("WATCHDOG=1 successfully sent to systemd")
}
} else {
ic.notifySdFailed(err)
return
}
time.Sleep(interval / 2)
}
}
func (config *configOptions) makeShardInsertHandler() gtm.ShardInsertHandler {
return func(shardInfo *gtm.ShardInfo) (*mongo.Client, error) {
shardURL := shardInfo.GetURL()
infoLog.Printf("Adding shard found at %s\n", cleanMongoURL(shardURL))
return config.dialMongo(shardURL)
}
}
func buildPipe(config *configOptions) func(string, bool) ([]interface{}, error) {
if pipePlugin != nil {
return pipePlugin
} else if len(pipeEnvs) > 0 {
return func(ns string, changeEvent bool) ([]interface{}, error) {
mux.Lock()
defer mux.Unlock()
nss := []string{"", ns}
for _, ns := range nss {
if env := pipeEnvs[ns]; env != nil {
env.lock.Lock()
defer env.lock.Unlock()
val, err := env.VM.Call("module.exports", ns, ns, changeEvent)
if err != nil {
return nil, err
}
if strings.ToLower(val.Class()) == "array" {
data, err := val.Export()
if err != nil {
return nil, err
} else if data == val {
return nil, errors.New("Exported pipeline function must return an array")
} else {
switch data.(type) {
case []map[string]interface{}:
ds := data.([]map[string]interface{})
var is []interface{} = make([]interface{}, len(ds))
for i, d := range ds {
is[i] = deepExportValue(d)
}
return is, nil
case []interface{}:
ds := data.([]interface{})
if len(ds) > 0 {
errorLog.Fatalln("Pipeline function must return an array of objects")
}
return nil, nil
default:
errorLog.Fatalln("Pipeline function must return an array of objects")
}
}
} else {
return nil, errors.New("Exported pipeline function must return an array")
}
}
}
return nil, nil
}
}
return nil
}
func (sh *sigHandler) start() {
go func() {
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)
select {
case <-sigs:
// we never got started so simply exit
os.Exit(0)
case ic := <-sh.clientStartedC:
<-sigs
go func() {
// forced shutdown on 2nd signal
<-sigs
infoLog.Println("Forcing shutdown, bye bye...")
os.Exit(1)
}()
// we started processing events so do a clean shutdown
ic.stopAllWorkers()
ic.doneC <- 10
}
}()
}
func (ic *indexClient) startHTTPServer() {
config := ic.config
if config.EnableHTTPServer {
ic.hsc = &httpServerCtx{
bulk: ic.bulk,
config: ic.config,
statusReqC: ic.statusReqC,
}
ic.hsc.buildServer()
go ic.hsc.serveHTTP()
}
}
func (ic *indexClient) setupFileIndexing() {
config := ic.config
if config.IndexFiles {
if len(config.FileNamespaces) == 0 {
errorLog.Fatalln("File indexing is ON but no file namespaces are configured")
}
if err := ic.ensureFileMapping(); err != nil {
errorLog.Fatalf("Unable to setup file indexing: %s", err)
}
}
}
func (ic *indexClient) setupBulk() {
config := ic.config
bulk, err := config.newBulkProcessor(ic.client)
if err != nil {
errorLog.Fatalf("Unable to start bulk processor: %s", err)
}
var bulkStats *elastic.BulkProcessor
if config.IndexStats {
bulkStats, err = config.newStatsBulkProcessor(ic.client)
if err != nil {
errorLog.Fatalf("Unable to start stats bulk processor: %s", err)
}
}
ic.bulk = bulk
ic.bulkStats = bulkStats
}
func (ic *indexClient) run() {
ic.startNotify()
ic.setupFileIndexing()
ic.setupBulk()
ic.startHTTPServer()
ic.startCluster()
ic.startRelate()
ic.startIndex()
ic.startDownload()
ic.startPostProcess()
ic.clusterWait()
ic.startListen()
ic.startReadWait()
ic.eventLoop()
}
func (ic *indexClient) startDownload() {
for i := 0; i < ic.config.FileDownloaders; i++ {
ic.fileWg.Add(1)
go func() {
defer ic.fileWg.Done()
for op := range ic.fileC {
if err := ic.addFileContent(op); err != nil {
ic.processErr(err)
}
ic.indexC <- op
}
}()
}
}
func (ic *indexClient) startPostProcess() {
for i := 0; i < ic.config.PostProcessors; i++ {
ic.processWg.Add(1)
go func() {
defer ic.processWg.Done()
for op := range ic.processC {
if err := ic.runProcessor(op); err != nil {
ic.processErr(err)
}
}
}()
}
}
func (ic *indexClient) stopAllWorkers() {
infoLog.Println("Stopping all workers")
ic.gtmCtx.Stop()
<-ic.opsConsumed
close(ic.relateC)
ic.relateWg.Wait()
close(ic.fileC)
ic.fileWg.Wait()
close(ic.indexC)
ic.indexWg.Wait()
close(ic.processC)
ic.processWg.Wait()
}
func (ic *indexClient) startReadWait() {
if len(ic.config.DirectReadNs) > 0 {
go func() {
ic.gtmCtx.DirectReadWg.Wait()
infoLog.Println("Direct reads completed")
if ic.config.Resume {
ic.saveTimestampFromReplStatus()
}
if ic.config.ExitAfterDirectReads {
ic.stopAllWorkers()
ic.doneC <- 30
}
}()
}
}
func (ic *indexClient) dialShards() []*mongo.Client {
var mongos []*mongo.Client
// get the list of shard servers
shardInfos := gtm.GetShards(ic.mongoConfig)
if len(shardInfos) == 0 {
errorLog.Fatalln("Shards enabled but none found in config.shards collection")
}
// add each shard server to the sync list
for _, shardInfo := range shardInfos {
shardURL := shardInfo.GetURL()
infoLog.Printf("Adding shard found at %s\n", cleanMongoURL(shardURL))
shard, err := ic.config.dialMongo(shardURL)
if err != nil {
errorLog.Fatalf("Unable to connect to mongodb shard using URL %s: %s", cleanMongoURL(shardURL), err)
}
mongos = append(mongos, shard)
}
return mongos
}
func (ic *indexClient) buildTokenGen() gtm.ResumeTokenGenenerator {
config := ic.config
var token gtm.ResumeTokenGenenerator
if !config.Resume || (config.ResumeStrategy != tokenResumeStrategy) {
return token
}
token = func(client *mongo.Client, streamID string, options *gtm.Options) (interface{}, error) {
var t interface{} = nil
var err error
col := client.Database(config.ConfigDatabaseName).Collection("tokens")
result := col.FindOne(context.Background(), bson.M{
"resumeName": config.ResumeName,
"streamID": streamID,
})
if err = result.Err(); err == nil {
doc := make(map[string]interface{})
if err = result.Decode(&doc); err == nil {
t = doc["token"]
if t != nil {
infoLog.Printf("Resuming stream '%s' from collection %s.tokens using resume name '%s'",
streamID, config.ConfigDatabaseName, config.ResumeName)
}
}
}
return t, err
}
return token
}
func (ic *indexClient) buildTimestampGen() gtm.TimestampGenerator {
var after gtm.TimestampGenerator
config := ic.config
if config.ResumeStrategy != timestampResumeStrategy {
return after
}
if config.Replay {
after = func(client *mongo.Client, options *gtm.Options) (primitive.Timestamp, error) {
ts, _ := gtm.FirstOpTimestamp(client, options)
// add ten seconds as oldest items often fall off the oplog
ts.T += 10
ts.I = 0
infoLog.Printf("Replaying from timestamp %+v", ts)
return ts, nil
}
} else if config.ResumeFromTimestamp != 0 {
after = func(client *mongo.Client, options *gtm.Options) (primitive.Timestamp, error) {
return primitive.Timestamp{
T: uint32(config.ResumeFromTimestamp >> 32),
I: uint32(config.ResumeFromTimestamp),
}, nil
}
} else if config.Resume {
after = func(client *mongo.Client, options *gtm.Options) (primitive.Timestamp, error) {
var ts primitive.Timestamp
var err error
col := client.Database(config.ConfigDatabaseName).Collection("monstache")
result := col.FindOne(context.Background(), bson.M{
"_id": config.ResumeName,
})
if err = result.Err(); err == nil {
doc := make(map[string]interface{})
if err = result.Decode(&doc); err == nil {
if doc["ts"] != nil {
ts = doc["ts"].(primitive.Timestamp)
ts.I++
}
}
}
if ts.T == 0 {
ts, _ = gtm.LastOpTimestamp(client, options)
}
infoLog.Printf("Resuming from timestamp %+v", ts)
return ts, nil
}
}
return after
}
func (ic *indexClient) buildConnections() []*mongo.Client {
var mongos []*mongo.Client
var err error
config := ic.config
if config.readShards() {
// if we have a config server URL then we are running in a sharded cluster
ic.mongoConfig, err = config.dialMongo(config.MongoConfigURL)
if err != nil {
errorLog.Fatalf("Unable to connect to mongodb config server using URL %s: %s",
cleanMongoURL(config.MongoConfigURL), err)
}
mongos = ic.dialShards()
} else {
mongos = append(mongos, ic.mongo)
}
return mongos
}
func (ic *indexClient) buildFilterChain() []gtm.OpFilter {
config := ic.config
filterChain := []gtm.OpFilter{notMonstache(config), notSystem, notChunks}
if config.readShards() {
filterChain = append(filterChain, notConfig)
}
if config.NsRegex != "" {
filterChain = append(filterChain, filterWithRegex(config.NsRegex))
}
if config.NsDropRegex != "" {
filterChain = append(filterChain, filterDropWithRegex(config.NsDropRegex))
}
if config.NsExcludeRegex != "" {
filterChain = append(filterChain, filterInverseWithRegex(config.NsExcludeRegex))
}
if config.NsDropExcludeRegex != "" {
filterChain = append(filterChain, filterDropInverseWithRegex(config.NsDropExcludeRegex))
}
return filterChain
}
func (ic *indexClient) buildFilterArray() []gtm.OpFilter {
config := ic.config
filterArray := []gtm.OpFilter{}
var pluginFilter gtm.OpFilter
if config.Worker != "" {
workerFilter, err := consistent.ConsistentHashFilter(config.Worker, config.Workers)
if err != nil {
errorLog.Fatalln(err)
}
filterArray = append(filterArray, workerFilter)
} else if config.Workers != nil {
errorLog.Fatalln("Workers configured but this worker is undefined. worker must be set to one of the workers.")
}
if filterPlugin != nil {
pluginFilter = filterWithPlugin()
filterArray = append(filterArray, pluginFilter)
} else if len(filterEnvs) > 0 {
pluginFilter = filterWithScript()
filterArray = append(filterArray, pluginFilter)
}
if pluginFilter != nil {
ic.filter = pluginFilter
}
return filterArray
}
func (ic *indexClient) buildDynamicDirectReadNs(filter gtm.OpFilter) (names []string) {
client, config := ic.mongo, ic.config
if config.DirectReadExcludeRegex != "" {
filter = gtm.ChainOpFilters(filterInverseWithRegex(config.DirectReadExcludeRegex), filter)
}
dbs, err := client.ListDatabaseNames(context.Background(), bson.M{})
if err != nil {
errorLog.Fatalf("Failed to read database names for dynamic direct reads: %s", err)
}
for _, d := range dbs {
if config.ignoreDatabaseForDirectReads(d) {
continue
}
db := client.Database(d)
cols, err := db.ListCollectionNames(context.Background(), bson.M{})
if err != nil {
errorLog.Fatalf("Failed to read db %s collection names for dynamic direct reads: %s", d, err)
return
}
for _, c := range cols {
if config.ignoreCollectionForDirectReads(c) {
continue
}
ns := strings.Join([]string{d, c}, ".")
if filter(>m.Op{Namespace: ns}) {
names = append(names, ns)
} else {
infoLog.Printf("Excluding collection [%s] for dynamic direct reads", ns)
}
}
}
if len(names) == 0 {
warnLog.Println("Dynamic direct read candidates: NONE")
} else {
infoLog.Printf("Dynamic direct read candidates: %v", names)
}
return
}
func (ic *indexClient) parseBufferDuration() time.Duration {
config := ic.config
gtmBufferDuration, err := time.ParseDuration(config.GtmSettings.BufferDuration)
if err != nil {
errorLog.Fatalf("Unable to parse gtm buffer duration %s: %s",
config.GtmSettings.BufferDuration, err)
}
return gtmBufferDuration
}
func (ic *indexClient) parseMaxAwaitTime() time.Duration {
config := ic.config
var maxAwaitTime time.Duration
if config.GtmSettings.MaxAwaitTime != "" {
var err error
maxAwaitTime, err = time.ParseDuration(config.GtmSettings.MaxAwaitTime)
if err != nil {
errorLog.Fatalf("Unable to parse gtm max await time %s: %s",
config.GtmSettings.MaxAwaitTime, err)
}
}
return maxAwaitTime
}
func (ic *indexClient) buildGtmOptions() *gtm.Options {
var nsFilter, filter, directReadFilter gtm.OpFilter
config := ic.config
filterChain := ic.buildFilterChain()
filterArray := ic.buildFilterArray()
nsFilter = gtm.ChainOpFilters(filterChain...)
filter = gtm.ChainOpFilters(filterArray...)
directReadFilter = gtm.ChainOpFilters(filterArray...)
after := ic.buildTimestampGen()
token := ic.buildTokenGen()
if config.dynamicDirectReadList() {
config.DirectReadNs = ic.buildDynamicDirectReadNs(nsFilter)
}
gtmOpts := >m.Options{
After: after,
Token: token,
Filter: filter,
NamespaceFilter: nsFilter,
OpLogDisabled: config.EnableOplog == false,
OpLogDatabaseName: config.MongoOpLogDatabaseName,
OpLogCollectionName: config.MongoOpLogCollectionName,
ChannelSize: config.GtmSettings.ChannelSize,
Ordering: gtm.AnyOrder,
WorkerCount: 10,
BufferDuration: ic.parseBufferDuration(),
BufferSize: config.GtmSettings.BufferSize,
DirectReadNs: config.DirectReadNs,
DirectReadSplitMax: int32(config.DirectReadSplitMax),
DirectReadConcur: config.DirectReadConcur,
DirectReadNoTimeout: config.DirectReadNoTimeout,
DirectReadFilter: directReadFilter,
Log: infoLog,
Pipe: buildPipe(config),
ChangeStreamNs: config.ChangeStreamNs,
DirectReadBounded: config.DirectReadBounded,
MaxAwaitTime: ic.parseMaxAwaitTime(),
}
return gtmOpts
}
func (ic *indexClient) startListen() {
config := ic.config
gtmOpts := ic.buildGtmOptions()
ic.gtmCtx = gtm.StartMulti(ic.buildConnections(), gtmOpts)
if config.readShards() && !config.DisableChangeEvents {
ic.gtmCtx.AddShardListener(ic.mongoConfig, gtmOpts, config.makeShardInsertHandler())
}
}
func (ic *indexClient) clusterWait() {
if ic.config.ClusterName != "" {
if ic.enabled {
infoLog.Printf("Starting work for cluster %s", ic.config.ClusterName)
} else {
heartBeat := time.NewTicker(10 * time.Second)
defer heartBeat.Stop()
infoLog.Printf("Pausing work for cluster %s", ic.config.ClusterName)
ic.bulk.Stop()
wait := true
for wait {
select {
case req := <-ic.statusReqC:
req.responseC <- nil
case <-heartBeat.C:
var err error
ic.enabled, err = ic.enableProcess()
if err != nil {
errorLog.Printf("Error attempting to become active cluster process: %s", err)
break
}
if ic.enabled {
infoLog.Printf("Resuming work for cluster %s", ic.config.ClusterName)
ic.bulk.Start(context.Background())
wait = false
}
}
}
}
}
}
func (ic *indexClient) hasNewEvents() bool {
if ic.lastTs.T > ic.lastTsSaved.T ||
(ic.lastTs.T == ic.lastTsSaved.T && ic.lastTs.I > ic.lastTsSaved.I) {
return true
}
return false
}
func (ic *indexClient) nextTokens() {
if ic.hasNewEvents() {
ic.bulk.Flush()
if err := ic.saveTokens(); err == nil {
ic.lastTsSaved = ic.lastTs
} else {
ic.processErr(err)
}
}
}
func (ic *indexClient) nextTimestamp() {
if ic.hasNewEvents() {
ic.bulk.Flush()
if err := ic.saveTimestamp(); err == nil {
ic.lastTsSaved = ic.lastTs
} else {
ic.processErr(err)
}
}
}
func (ic *indexClient) nextStats() {
if ic.config.IndexStats {
if err := ic.doIndexStats(); err != nil {
errorLog.Printf("Error indexing statistics: %s", err)
}
} else {
stats, err := json.Marshal(ic.bulk.Stats())
if err != nil {
errorLog.Printf("Unable to log statistics: %s", err)
} else {
statsLog.Println(string(stats))
}
}
}
func (ic *indexClient) nextHeartbeat() {
var err error
if ic.enabled {
ic.enabled, err = ic.ensureEnabled()
if err != nil {
ic.processErr(err)
}
if !ic.enabled {
infoLog.Printf("Pausing work for cluster %s", ic.config.ClusterName)
ic.gtmCtx.Pause()
ic.bulk.Stop()
heartBeat := time.NewTicker(10 * time.Second)
defer heartBeat.Stop()
wait := true
for wait {
select {
case req := <-ic.statusReqC:
req.responseC <- nil
case <-heartBeat.C:
ic.enabled, err = ic.enableProcess()
if ic.enabled {
wait = false
infoLog.Printf("Resuming work for cluster %s", ic.config.ClusterName)
ic.bulk.Start(context.Background())
ic.resumeWork()
break
}
}
}
}
} else {
ic.enabled, err = ic.enableProcess()
if ic.enabled {
infoLog.Printf("Resuming work for cluster %s", ic.config.ClusterName)
ic.bulk.Start(context.Background())
ic.resumeWork()
}
}
if err != nil {
ic.processErr(err)
}
}
func (ic *indexClient) eventLoop() {
var err error
var allOpsVisited bool
timestampTicker := time.NewTicker(10 * time.Second)
if ic.config.Resume == false {
timestampTicker.Stop()
}
heartBeat := time.NewTicker(10 * time.Second)
if ic.config.ClusterName == "" {
heartBeat.Stop()
}
statsTimeout := time.Duration(30) * time.Second
if ic.config.StatsDuration != "" {
statsTimeout, _ = time.ParseDuration(ic.config.StatsDuration)
}
printStats := time.NewTicker(statsTimeout)
if ic.config.Stats == false {
printStats.Stop()
}
infoLog.Println("Listening for events")
ic.sigH.clientStartedC <- ic
for {
select {
case timeout := <-ic.doneC:
ic.enabled = false
ic.shutdown(timeout)
return
case <-timestampTicker.C:
if !ic.enabled {
break
}
if ic.config.ResumeStrategy == tokenResumeStrategy {
ic.nextTokens()
} else {
ic.nextTimestamp()
}
case <-heartBeat.C:
if ic.config.ClusterName == "" {
break
}
ic.nextHeartbeat()
case <-printStats.C:
if !ic.enabled {
break
}
ic.nextStats()
case req := <-ic.statusReqC:
enabled, lastTs := ic.enabled, ic.lastTs
statusResp := &statusResponse{
enabled: enabled,
lastTs: lastTs,
}
req.responseC <- statusResp
case err = <-ic.gtmCtx.ErrC:
if err == nil {
break
}
ic.processErr(err)
case op, open := <-ic.gtmCtx.OpC:
if !ic.enabled {
break
}
if op == nil {
if !open && !allOpsVisited {
allOpsVisited = true
ic.opsConsumed <- true
}
break
}
if op.IsSourceOplog() {
ic.lastTs = op.Timestamp
if ic.config.ResumeStrategy == tokenResumeStrategy {
ic.tokens[op.ResumeToken.StreamID] = op.ResumeToken.ResumeToken
}
}
if err = ic.routeOp(op); err != nil {
ic.processErr(err)
}
}
}
}
func (ic *indexClient) startIndex() {
for i := 0; i < 5; i++ {
ic.indexWg.Add(1)
go func() {
defer ic.indexWg.Done()
for op := range ic.indexC {
if err := ic.doIndex(op); err != nil {
ic.processErr(err)
}
}
}()
}
}
func (ic *indexClient) startRelate() {
if len(ic.config.Relate) > 0 {
for i := 0; i < ic.config.RelateThreads; i++ {
ic.relateWg.Add(1)
go func() {
defer ic.relateWg.Done()
for op := range ic.relateC {
if err := ic.processRelated(op); err != nil {
ic.processErr(err)
}
}
}()
}
}
}
func (ic *indexClient) startCluster() {
if ic.config.ClusterName != "" {
var err error
if err = ic.ensureClusterTTL(); err == nil {
infoLog.Printf("Joined cluster %s", ic.config.ClusterName)
} else {
errorLog.Fatalf("Unable to enable cluster mode: %s", err)
}
ic.enabled, err = ic.enableProcess()
if err != nil {
errorLog.Fatalf("Unable to determine enabled cluster process: %s", err)
}
}
}
func (ic *indexClient) closeClient() {
if ic.mongo != nil && ic.config.ClusterName != "" {
ic.resetClusterState()
}
if ic.hsc != nil {
ic.hsc.shutdown = true
ic.hsc.httpServer.Shutdown(context.Background())
}
if ic.bulk != nil {
ic.bulk.Close()
}
if ic.bulkStats != nil {
ic.bulkStats.Close()
}
close(ic.closeC)
}
func (ic *indexClient) shutdown(timeout int) {
infoLog.Println("Shutting down")
go ic.closeClient()
doneC := make(chan bool)
go func() {
closeT := time.NewTimer(time.Duration(timeout) * time.Second)
defer closeT.Stop()
done := false
for !done {
select {
case <-ic.closeC:
done = true
close(doneC)
case <-closeT.C:
done = true
close(doneC)
}
}
}()
<-doneC
os.Exit(exitStatus)
}
func getBuildInfo(client *mongo.Client) (bi *buildInfo, err error) {
db := client.Database("admin")
result := db.RunCommand(context.Background(), bson.M{
"buildInfo": 1,
})
if err = result.Err(); err == nil {
bi = &buildInfo{}
err = result.Decode(bi)
}
return
}
func (ic *indexClient) saveTimestampFromReplStatus() {
if rs, err := gtm.GetReplStatus(ic.mongo); err == nil {
if ic.lastTs, err = rs.GetLastCommitted(); err == nil {
if err = ic.saveTimestamp(); err != nil {
ic.processErr(err)
}
} else {
ic.processErr(err)
}
} else {
ic.processErr(err)
}
}
func mustConfig() *configOptions {
config := &configOptions{
GtmSettings: gtmDefaultSettings(),
LogRotate: logRotateDefaults(),
}
config.parseCommandLineFlags()
if config.Version {
fmt.Println(version)
os.Exit(0)
}
config.build()
if config.Print {
config.dump()
os.Exit(0)
}
config.setupLogging()
config.validate()
return config
}
func buildMongoClient(config *configOptions) *mongo.Client {
mongoClient, err := config.dialMongo(config.MongoURL)
if err != nil {
errorLog.Fatalf("Unable to connect to MongoDB using URL %s: %s",
cleanMongoURL(config.MongoURL), err)
}
infoLog.Printf("Started monstache version %s", version)
infoLog.Printf("MongoDB go driver %s", mongoversion.Driver)
infoLog.Printf("Elasticsearch go driver %s", elastic.Version)
if mongoInfo, err := getBuildInfo(mongoClient); err == nil {
infoLog.Printf("Successfully connected to MongoDB version %s", mongoInfo.Version)
} else {
infoLog.Println("Successfully connected to MongoDB")
}
return mongoClient
}
func buildElasticClient(config *configOptions) *elastic.Client {
elasticClient, err := config.newElasticClient()
if err != nil {
errorLog.Fatalf("Unable to create Elasticsearch client: %s", err)
}
if config.ElasticVersion == "" {
if err := config.testElasticsearchConn(elasticClient); err != nil {
errorLog.Fatalf("Unable to validate connection to Elasticsearch: %s", err)
}
} else {
if err := config.parseElasticsearchVersion(config.ElasticVersion); err != nil {
errorLog.Fatalf("Elasticsearch version must conform to major.minor.fix: %s", err)
}
}
return elasticClient
}
func main() {
config := mustConfig()
sh := &sigHandler{
clientStartedC: make(chan *indexClient),
}
sh.start()
mongoClient := buildMongoClient(config)
loadBuiltinFunctions(mongoClient, config)
elasticClient := buildElasticClient(config)
ic := &indexClient{
config: config,
mongo: mongoClient,
client: elasticClient,
fileWg: &sync.WaitGroup{},
indexWg: &sync.WaitGroup{},
processWg: &sync.WaitGroup{},
relateWg: &sync.WaitGroup{},
opsConsumed: make(chan bool),
closeC: make(chan bool),
doneC: make(chan int),
enabled: true,
indexC: make(chan *gtm.Op),
processC: make(chan *gtm.Op),
fileC: make(chan *gtm.Op),
relateC: make(chan *gtm.Op, config.RelateBuffer),
statusReqC: make(chan *statusRequest),
sigH: sh,
tokens: bson.M{},
}
ic.run()
}
validate resume strategy
// package main provides the monstache binary
package main
import (
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"math"
"net/http"
"net/http/pprof"
"os"
"os/signal"
"plugin"
"reflect"
"regexp"
"strconv"
"strings"
"sync"
"syscall"
"text/template"
"time"
"github.com/BurntSushi/toml"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/coreos/go-systemd/daemon"
jsonpatch "github.com/evanphx/json-patch"
"github.com/olivere/elastic/v7"
aws "github.com/olivere/elastic/v7/aws/v4"
"github.com/robertkrimen/otto"
_ "github.com/robertkrimen/otto/underscore"
"github.com/rwynn/gtm"
"github.com/rwynn/gtm/consistent"
"github.com/rwynn/monstache/monstachemap"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/bsontype"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/gridfs"
"go.mongodb.org/mongo-driver/mongo/options"
mongoversion "go.mongodb.org/mongo-driver/version"
"gopkg.in/Graylog2/go-gelf.v2/gelf"
"gopkg.in/natefinch/lumberjack.v2"
)
var infoLog = log.New(os.Stdout, "INFO ", log.Flags())
var warnLog = log.New(os.Stdout, "WARN ", log.Flags())
var statsLog = log.New(os.Stdout, "STATS ", log.Flags())
var traceLog = log.New(os.Stdout, "TRACE ", log.Flags())
var errorLog = log.New(os.Stderr, "ERROR ", log.Flags())
var mapperPlugin func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error)
var filterPlugin func(*monstachemap.MapperPluginInput) (bool, error)
var processPlugin func(*monstachemap.ProcessPluginInput) error
var pipePlugin func(string, bool) ([]interface{}, error)
var mapEnvs = make(map[string]*executionEnv)
var filterEnvs = make(map[string]*executionEnv)
var pipeEnvs = make(map[string]*executionEnv)
var mapIndexTypes = make(map[string]*indexMapping)
var relates = make(map[string][]*relation)
var fileNamespaces = make(map[string]bool)
var patchNamespaces = make(map[string]bool)
var tmNamespaces = make(map[string]bool)
var routingNamespaces = make(map[string]bool)
var mux sync.Mutex
var chunksRegex = regexp.MustCompile("\\.chunks$")
var systemsRegex = regexp.MustCompile("system\\..+$")
var exitStatus = 0
const version = "6.4.0"
const mongoURLDefault string = "mongodb://localhost:27017"
const resumeNameDefault string = "default"
const elasticMaxConnsDefault int = 4
const elasticClientTimeoutDefault int = 0
const elasticMaxDocsDefault int = -1
const elasticMaxBytesDefault int = 8 * 1024 * 1024
const gtmChannelSizeDefault int = 512
const fileDownloadersDefault = 10
const relateThreadsDefault = 10
const relateBufferDefault = 1000
const postProcessorsDefault = 10
const redact = "REDACTED"
const configDatabaseNameDefault = "monstache"
const relateQueueOverloadMsg = "Relate queue is full. Skipping relate for %v.(%v) to keep pipeline healthy."
const resumeStrategyInvalid = "resume-strategy 0 is incompatible with MongoDB API < 4. Set resume-stategy = 1"
type deleteStrategy int
const (
statelessDeleteStrategy deleteStrategy = iota
statefulDeleteStrategy
ignoreDeleteStrategy
)
type resumeStrategy int
const (
timestampResumeStrategy resumeStrategy = iota
tokenResumeStrategy
)
type buildInfo struct {
Version string
VersionArray []int `bson:"versionArray"`
}
type stringargs []string
type indexClient struct {
gtmCtx *gtm.OpCtxMulti
config *configOptions
mongo *mongo.Client
mongoConfig *mongo.Client
bulk *elastic.BulkProcessor
bulkStats *elastic.BulkProcessor
client *elastic.Client
hsc *httpServerCtx
fileWg *sync.WaitGroup
indexWg *sync.WaitGroup
processWg *sync.WaitGroup
relateWg *sync.WaitGroup
opsConsumed chan bool
closeC chan bool
doneC chan int
enabled bool
lastTs primitive.Timestamp
lastTsSaved primitive.Timestamp
tokens bson.M
indexC chan *gtm.Op
processC chan *gtm.Op
fileC chan *gtm.Op
relateC chan *gtm.Op
filter gtm.OpFilter
statusReqC chan *statusRequest
sigH *sigHandler
}
type sigHandler struct {
clientStartedC chan *indexClient
}
type awsConnect struct {
AccessKey string `toml:"access-key"`
SecretKey string `toml:"secret-key"`
Region string
}
type executionEnv struct {
VM *otto.Otto
Script string
lock *sync.Mutex
}
type javascript struct {
Namespace string
Script string
Path string
Routing bool
}
type relation struct {
Namespace string
WithNamespace string `toml:"with-namespace"`
SrcField string `toml:"src-field"`
MatchField string `toml:"match-field"`
KeepSrc bool `toml:"keep-src"`
MaxDepth int `toml:"max-depth"`
db string
col string
}
type indexMapping struct {
Namespace string
Index string
}
type findConf struct {
vm *otto.Otto
ns string
name string
client *mongo.Client
byID bool
multi bool
pipe bool
pipeAllowDisk bool
}
type findCall struct {
config *findConf
client *mongo.Client
query interface{}
db string
col string
limit int
sort map[string]int
sel map[string]int
}
type logRotate struct {
MaxSize int `toml:"max-size"`
MaxAge int `toml:"max-age"`
MaxBackups int `toml:"max-backups"`
LocalTime bool `toml:"localtime"`
Compress bool `toml:"compress"`
}
type logFiles struct {
Info string
Warn string
Error string
Trace string
Stats string
}
type indexingMeta struct {
Routing string
Index string
Type string
Parent string
Version int64
VersionType string
Pipeline string
RetryOnConflict int
Skip bool
ID string
}
type gtmSettings struct {
ChannelSize int `toml:"channel-size"`
BufferSize int `toml:"buffer-size"`
BufferDuration string `toml:"buffer-duration"`
MaxAwaitTime string `toml:"max-await-time"`
}
type httpServerCtx struct {
httpServer *http.Server
bulk *elastic.BulkProcessor
config *configOptions
shutdown bool
started time.Time
statusReqC chan *statusRequest
}
type instanceStatus struct {
Enabled bool `json:"enabled"`
Pid int `json:"pid"`
Hostname string `json:"hostname"`
ClusterName string `json:"cluster"`
ResumeName string `json:"resumeName"`
LastTs primitive.Timestamp `json:"lastTs"`
LastTsFormat string `json:"lastTsFormat,omitempty"`
}
type statusResponse struct {
enabled bool
lastTs primitive.Timestamp
}
type statusRequest struct {
responseC chan *statusResponse
}
type configOptions struct {
EnableTemplate bool
EnvDelimiter string
MongoURL string `toml:"mongo-url"`
MongoConfigURL string `toml:"mongo-config-url"`
MongoOpLogDatabaseName string `toml:"mongo-oplog-database-name"`
MongoOpLogCollectionName string `toml:"mongo-oplog-collection-name"`
GtmSettings gtmSettings `toml:"gtm-settings"`
AWSConnect awsConnect `toml:"aws-connect"`
LogRotate logRotate `toml:"log-rotate"`
Logs logFiles `toml:"logs"`
GraylogAddr string `toml:"graylog-addr"`
ElasticUrls stringargs `toml:"elasticsearch-urls"`
ElasticUser string `toml:"elasticsearch-user"`
ElasticPassword string `toml:"elasticsearch-password"`
ElasticPemFile string `toml:"elasticsearch-pem-file"`
ElasticValidatePemFile bool `toml:"elasticsearch-validate-pem-file"`
ElasticVersion string `toml:"elasticsearch-version"`
ElasticHealth0 int `toml:"elasticsearch-healthcheck-timeout-startup"`
ElasticHealth1 int `toml:"elasticsearch-healthcheck-timeout"`
ResumeName string `toml:"resume-name"`
NsRegex string `toml:"namespace-regex"`
NsDropRegex string `toml:"namespace-drop-regex"`
NsExcludeRegex string `toml:"namespace-exclude-regex"`
NsDropExcludeRegex string `toml:"namespace-drop-exclude-regex"`
ClusterName string `toml:"cluster-name"`
Print bool `toml:"print-config"`
Version bool
Pprof bool
EnableOplog bool `toml:"enable-oplog"`
DisableChangeEvents bool `toml:"disable-change-events"`
EnableEasyJSON bool `toml:"enable-easy-json"`
Stats bool
IndexStats bool `toml:"index-stats"`
StatsDuration string `toml:"stats-duration"`
StatsIndexFormat string `toml:"stats-index-format"`
Gzip bool
Verbose bool
Resume bool
ResumeStrategy resumeStrategy `toml:"resume-strategy"`
ResumeWriteUnsafe bool `toml:"resume-write-unsafe"`
ResumeFromTimestamp int64 `toml:"resume-from-timestamp"`
Replay bool
DroppedDatabases bool `toml:"dropped-databases"`
DroppedCollections bool `toml:"dropped-collections"`
IndexFiles bool `toml:"index-files"`
IndexAsUpdate bool `toml:"index-as-update"`
FileHighlighting bool `toml:"file-highlighting"`
DisableFilePipelinePut bool `toml:"disable-file-pipeline-put"`
EnablePatches bool `toml:"enable-patches"`
FailFast bool `toml:"fail-fast"`
IndexOplogTime bool `toml:"index-oplog-time"`
OplogTsFieldName string `toml:"oplog-ts-field-name"`
OplogDateFieldName string `toml:"oplog-date-field-name"`
OplogDateFieldFormat string `toml:"oplog-date-field-format"`
ExitAfterDirectReads bool `toml:"exit-after-direct-reads"`
MergePatchAttr string `toml:"merge-patch-attribute"`
ElasticMaxConns int `toml:"elasticsearch-max-conns"`
ElasticRetry bool `toml:"elasticsearch-retry"`
ElasticMaxDocs int `toml:"elasticsearch-max-docs"`
ElasticMaxBytes int `toml:"elasticsearch-max-bytes"`
ElasticMaxSeconds int `toml:"elasticsearch-max-seconds"`
ElasticClientTimeout int `toml:"elasticsearch-client-timeout"`
ElasticMajorVersion int
ElasticMinorVersion int
MaxFileSize int64 `toml:"max-file-size"`
ConfigFile string
Script []javascript
Filter []javascript
Pipeline []javascript
Mapping []indexMapping
Relate []relation
FileNamespaces stringargs `toml:"file-namespaces"`
PatchNamespaces stringargs `toml:"patch-namespaces"`
Workers stringargs
Worker string
ChangeStreamNs stringargs `toml:"change-stream-namespaces"`
DirectReadNs stringargs `toml:"direct-read-namespaces"`
DirectReadSplitMax int `toml:"direct-read-split-max"`
DirectReadConcur int `toml:"direct-read-concur"`
DirectReadNoTimeout bool `toml:"direct-read-no-timeout"`
DirectReadBounded bool `toml:"direct-read-bounded"`
DirectReadExcludeRegex string `toml:"direct-read-dynamic-exclude-regex"`
MapperPluginPath string `toml:"mapper-plugin-path"`
EnableHTTPServer bool `toml:"enable-http-server"`
HTTPServerAddr string `toml:"http-server-addr"`
TimeMachineNamespaces stringargs `toml:"time-machine-namespaces"`
TimeMachineIndexPrefix string `toml:"time-machine-index-prefix"`
TimeMachineIndexSuffix string `toml:"time-machine-index-suffix"`
TimeMachineDirectReads bool `toml:"time-machine-direct-reads"`
PipeAllowDisk bool `toml:"pipe-allow-disk"`
RoutingNamespaces stringargs `toml:"routing-namespaces"`
DeleteStrategy deleteStrategy `toml:"delete-strategy"`
DeleteIndexPattern string `toml:"delete-index-pattern"`
ConfigDatabaseName string `toml:"config-database-name"`
FileDownloaders int `toml:"file-downloaders"`
RelateThreads int `toml:"relate-threads"`
RelateBuffer int `toml:"relate-buffer"`
PostProcessors int `toml:"post-processors"`
PruneInvalidJSON bool `toml:"prune-invalid-json"`
Debug bool
mongoClientOptions *options.ClientOptions
}
func (rel *relation) IsIdentity() bool {
if rel.SrcField == "_id" && rel.MatchField == "_id" {
return true
}
return false
}
func (l *logFiles) enabled() bool {
return l.Info != "" || l.Warn != "" || l.Error != "" || l.Trace != "" || l.Stats != ""
}
func (ac *awsConnect) validate() error {
if ac.AccessKey == "" && ac.SecretKey == "" {
return nil
} else if ac.AccessKey != "" && ac.SecretKey != "" {
return nil
}
return errors.New("AWS connect settings must include both access-key and secret-key")
}
func (ac *awsConnect) enabled() bool {
return ac.AccessKey != "" || ac.SecretKey != ""
}
func (arg *deleteStrategy) String() string {
return fmt.Sprintf("%d", *arg)
}
func (arg *deleteStrategy) Set(value string) (err error) {
var i int
if i, err = strconv.Atoi(value); err != nil {
return
}
ds := deleteStrategy(i)
*arg = ds
return
}
func (arg *resumeStrategy) String() string {
return fmt.Sprintf("%d", *arg)
}
func (arg *resumeStrategy) Set(value string) (err error) {
var i int
if i, err = strconv.Atoi(value); err != nil {
return
}
rs := resumeStrategy(i)
*arg = rs
return
}
func (args *stringargs) String() string {
return fmt.Sprintf("%s", *args)
}
func (args *stringargs) Set(value string) error {
*args = append(*args, value)
return nil
}
func (config *configOptions) readShards() bool {
return len(config.ChangeStreamNs) == 0 && config.MongoConfigURL != ""
}
func (config *configOptions) dynamicDirectReadList() bool {
return len(config.DirectReadNs) == 1 && config.DirectReadNs[0] == ""
}
func (config *configOptions) ignoreDatabaseForDirectReads(db string) bool {
return db == "local" || db == "admin" || db == "config" || db == config.ConfigDatabaseName
}
func (config *configOptions) ignoreCollectionForDirectReads(col string) bool {
return strings.HasPrefix(col, "system.")
}
func afterBulk(executionID int64, requests []elastic.BulkableRequest, response *elastic.BulkResponse, err error) {
if response != nil && response.Errors {
failed := response.Failed()
if failed != nil {
for _, item := range failed {
if item.Status == 409 {
// ignore version conflict since this simply means the doc
// is already in the index
continue
}
json, err := json.Marshal(item)
if err != nil {
errorLog.Printf("Unable to marshal bulk response item: %s", err)
} else {
errorLog.Printf("Bulk response item: %s", string(json))
}
}
}
}
}
func (config *configOptions) parseElasticsearchVersion(number string) (err error) {
if number == "" {
err = errors.New("Elasticsearch version cannot be blank")
} else {
versionParts := strings.Split(number, ".")
var majorVersion, minorVersion int
majorVersion, err = strconv.Atoi(versionParts[0])
if err == nil {
config.ElasticMajorVersion = majorVersion
if majorVersion == 0 {
err = errors.New("Invalid Elasticsearch major version 0")
}
}
if len(versionParts) > 1 {
minorVersion, err = strconv.Atoi(versionParts[1])
if err == nil {
config.ElasticMinorVersion = minorVersion
}
}
}
return
}
func (config *configOptions) newBulkProcessor(client *elastic.Client) (bulk *elastic.BulkProcessor, err error) {
bulkService := client.BulkProcessor().Name("monstache")
bulkService.Workers(config.ElasticMaxConns)
bulkService.Stats(config.Stats)
bulkService.BulkActions(config.ElasticMaxDocs)
bulkService.BulkSize(config.ElasticMaxBytes)
if config.ElasticRetry == false {
bulkService.Backoff(&elastic.StopBackoff{})
}
bulkService.After(afterBulk)
bulkService.FlushInterval(time.Duration(config.ElasticMaxSeconds) * time.Second)
return bulkService.Do(context.Background())
}
func (config *configOptions) newStatsBulkProcessor(client *elastic.Client) (bulk *elastic.BulkProcessor, err error) {
bulkService := client.BulkProcessor().Name("monstache-stats")
bulkService.Workers(1)
bulkService.Stats(false)
bulkService.BulkActions(-1)
bulkService.BulkSize(-1)
bulkService.After(afterBulk)
bulkService.FlushInterval(time.Duration(5) * time.Second)
return bulkService.Do(context.Background())
}
func (config *configOptions) needsSecureScheme() bool {
if len(config.ElasticUrls) > 0 {
for _, url := range config.ElasticUrls {
if strings.HasPrefix(url, "https") {
return true
}
}
}
return false
}
func (config *configOptions) newElasticClient() (client *elastic.Client, err error) {
var clientOptions []elastic.ClientOptionFunc
var httpClient *http.Client
clientOptions = append(clientOptions, elastic.SetSniff(false))
if config.needsSecureScheme() {
clientOptions = append(clientOptions, elastic.SetScheme("https"))
}
if len(config.ElasticUrls) > 0 {
clientOptions = append(clientOptions, elastic.SetURL(config.ElasticUrls...))
} else {
config.ElasticUrls = append(config.ElasticUrls, elastic.DefaultURL)
}
if config.Verbose {
clientOptions = append(clientOptions, elastic.SetTraceLog(traceLog))
clientOptions = append(clientOptions, elastic.SetErrorLog(errorLog))
}
if config.ElasticUser != "" {
clientOptions = append(clientOptions, elastic.SetBasicAuth(config.ElasticUser, config.ElasticPassword))
}
if config.ElasticRetry {
d1, d2 := time.Duration(50)*time.Millisecond, time.Duration(20)*time.Second
retrier := elastic.NewBackoffRetrier(elastic.NewExponentialBackoff(d1, d2))
clientOptions = append(clientOptions, elastic.SetRetrier(retrier))
}
httpClient, err = config.NewHTTPClient()
if err != nil {
return client, err
}
clientOptions = append(clientOptions, elastic.SetHttpClient(httpClient))
clientOptions = append(clientOptions,
elastic.SetHealthcheckTimeoutStartup(time.Duration(config.ElasticHealth0)*time.Second))
clientOptions = append(clientOptions,
elastic.SetHealthcheckTimeout(time.Duration(config.ElasticHealth1)*time.Second))
return elastic.NewClient(clientOptions...)
}
func (config *configOptions) testElasticsearchConn(client *elastic.Client) (err error) {
var number string
url := config.ElasticUrls[0]
number, err = client.ElasticsearchVersion(url)
if err == nil {
infoLog.Printf("Successfully connected to Elasticsearch version %s", number)
err = config.parseElasticsearchVersion(number)
}
return
}
func (ic *indexClient) deleteIndexes(db string) (err error) {
index := strings.ToLower(db + "*")
for ns, m := range mapIndexTypes {
dbCol := strings.SplitN(ns, ".", 2)
if dbCol[0] == db {
if m.Index != "" {
index = strings.ToLower(m.Index + "*")
}
break
}
}
_, err = ic.client.DeleteIndex(index).Do(context.Background())
return
}
func (ic *indexClient) deleteIndex(namespace string) (err error) {
ctx := context.Background()
index := strings.ToLower(namespace)
if m := mapIndexTypes[namespace]; m != nil {
if m.Index != "" {
index = strings.ToLower(m.Index)
}
}
_, err = ic.client.DeleteIndex(index).Do(ctx)
return err
}
func (ic *indexClient) ensureFileMapping() (err error) {
config := ic.config
if config.DisableFilePipelinePut {
return nil
}
ctx := context.Background()
pipeline := map[string]interface{}{
"description": "Extract file information",
"processors": [1]map[string]interface{}{
{
"attachment": map[string]interface{}{
"field": "file",
},
},
},
}
_, err = ic.client.IngestPutPipeline("attachment").BodyJson(pipeline).Do(ctx)
return err
}
func (ic *indexClient) defaultIndexMapping(op *gtm.Op) *indexMapping {
return &indexMapping{
Namespace: op.Namespace,
Index: strings.ToLower(op.Namespace),
}
}
func (ic *indexClient) mapIndex(op *gtm.Op) *indexMapping {
mapping := ic.defaultIndexMapping(op)
if m := mapIndexTypes[op.Namespace]; m != nil {
if m.Index != "" {
mapping.Index = m.Index
}
}
return mapping
}
func opIDToString(op *gtm.Op) string {
var opIDStr string
switch id := op.Id.(type) {
case primitive.ObjectID:
opIDStr = id.Hex()
case primitive.Binary:
opIDStr = monstachemap.EncodeBinData(monstachemap.Binary{id})
case float64:
intID := int(id)
if id == float64(intID) {
opIDStr = fmt.Sprintf("%v", intID)
} else {
opIDStr = fmt.Sprintf("%v", op.Id)
}
case float32:
intID := int(id)
if id == float32(intID) {
opIDStr = fmt.Sprintf("%v", intID)
} else {
opIDStr = fmt.Sprintf("%v", op.Id)
}
default:
opIDStr = fmt.Sprintf("%v", op.Id)
}
return opIDStr
}
func convertSliceJavascript(a []interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
var avc interface{}
switch achild := av.(type) {
case map[string]interface{}:
avc = convertMapJavascript(achild)
case []interface{}:
avc = convertSliceJavascript(achild)
case primitive.ObjectID:
avc = achild.Hex()
default:
avc = av
}
avs = append(avs, avc)
}
return avs
}
func convertMapJavascript(e map[string]interface{}) map[string]interface{} {
o := make(map[string]interface{})
for k, v := range e {
switch child := v.(type) {
case map[string]interface{}:
o[k] = convertMapJavascript(child)
case []interface{}:
o[k] = convertSliceJavascript(child)
case primitive.ObjectID:
o[k] = child.Hex()
default:
o[k] = v
}
}
return o
}
func fixSlicePruneInvalidJSON(id string, key string, a []interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
var avc interface{}
switch achild := av.(type) {
case map[string]interface{}:
avc = fixPruneInvalidJSON(id, achild)
case []interface{}:
avc = fixSlicePruneInvalidJSON(id, key, achild)
case time.Time:
year := achild.Year()
if year < 0 || year > 9999 {
// year outside of valid range
warnLog.Printf("Dropping key %s element: invalid time.Time value: %s for document _id: %s", key, achild, id)
continue
} else {
avc = av
}
case float64:
if math.IsNaN(achild) {
// causes an error in the json serializer
warnLog.Printf("Dropping key %s element: invalid float64 value: %v for document _id: %s", key, achild, id)
continue
} else if math.IsInf(achild, 0) {
// causes an error in the json serializer
warnLog.Printf("Dropping key %s element: invalid float64 value: %v for document _id: %s", key, achild, id)
continue
} else {
avc = av
}
default:
avc = av
}
avs = append(avs, avc)
}
return avs
}
func fixPruneInvalidJSON(id string, e map[string]interface{}) map[string]interface{} {
o := make(map[string]interface{})
for k, v := range e {
switch child := v.(type) {
case map[string]interface{}:
o[k] = fixPruneInvalidJSON(id, child)
case []interface{}:
o[k] = fixSlicePruneInvalidJSON(id, k, child)
case time.Time:
year := child.Year()
if year < 0 || year > 9999 {
// year outside of valid range
warnLog.Printf("Dropping key %s: invalid time.Time value: %s for document _id: %s", k, child, id)
continue
} else {
o[k] = v
}
case float64:
if math.IsNaN(child) {
// causes an error in the json serializer
warnLog.Printf("Dropping key %s: invalid float64 value: %v for document _id: %s", k, child, id)
continue
} else if math.IsInf(child, 0) {
// causes an error in the json serializer
warnLog.Printf("Dropping key %s: invalid float64 value: %v for document _id: %s", k, child, id)
continue
} else {
o[k] = v
}
default:
o[k] = v
}
}
return o
}
func deepExportValue(a interface{}) (b interface{}) {
switch t := a.(type) {
case otto.Value:
ex, err := t.Export()
if t.Class() == "Date" {
ex, err = time.Parse("Mon, 2 Jan 2006 15:04:05 MST", t.String())
}
if err == nil {
b = deepExportValue(ex)
} else {
errorLog.Printf("Error exporting from javascript: %s", err)
}
case map[string]interface{}:
b = deepExportMap(t)
case []map[string]interface{}:
b = deepExportMapSlice(t)
case []interface{}:
b = deepExportSlice(t)
default:
b = a
}
return
}
func deepExportMapSlice(a []map[string]interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
avs = append(avs, deepExportMap(av))
}
return avs
}
func deepExportSlice(a []interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
avs = append(avs, deepExportValue(av))
}
return avs
}
func deepExportMap(e map[string]interface{}) map[string]interface{} {
o := make(map[string]interface{})
for k, v := range e {
o[k] = deepExportValue(v)
}
return o
}
func (ic *indexClient) mapDataJavascript(op *gtm.Op) error {
names := []string{"", op.Namespace}
for _, name := range names {
env := mapEnvs[name]
if env == nil {
continue
}
env.lock.Lock()
defer env.lock.Unlock()
arg := convertMapJavascript(op.Data)
arg2 := op.Namespace
arg3 := convertMapJavascript(op.UpdateDescription)
val, err := env.VM.Call("module.exports", arg, arg, arg2, arg3)
if err != nil {
return err
}
if strings.ToLower(val.Class()) == "object" {
data, err := val.Export()
if err != nil {
return err
} else if data == val {
return errors.New("Exported function must return an object")
} else {
dm := data.(map[string]interface{})
op.Data = deepExportMap(dm)
}
} else {
indexed, err := val.ToBoolean()
if err != nil {
return err
} else if !indexed {
op.Data = nil
break
}
}
}
return nil
}
func (ic *indexClient) mapDataGolang(op *gtm.Op) error {
input := &monstachemap.MapperPluginInput{
Document: op.Data,
Namespace: op.Namespace,
Database: op.GetDatabase(),
Collection: op.GetCollection(),
Operation: op.Operation,
MongoClient: ic.mongo,
UpdateDescription: op.UpdateDescription,
}
output, err := mapperPlugin(input)
if err != nil {
return err
}
if output == nil {
return nil
}
if output.Drop {
op.Data = nil
} else {
if output.Skip {
op.Data = map[string]interface{}{}
} else if output.Passthrough == false {
if output.Document == nil {
return errors.New("Map function must return a non-nil document")
}
op.Data = output.Document
}
meta := make(map[string]interface{})
if output.Skip {
meta["skip"] = true
}
if output.Index != "" {
meta["index"] = output.Index
}
if output.ID != "" {
meta["id"] = output.ID
}
if output.Type != "" {
meta["type"] = output.Type
}
if output.Routing != "" {
meta["routing"] = output.Routing
}
if output.Parent != "" {
meta["parent"] = output.Parent
}
if output.Version != 0 {
meta["version"] = output.Version
}
if output.VersionType != "" {
meta["versionType"] = output.VersionType
}
if output.Pipeline != "" {
meta["pipeline"] = output.Pipeline
}
if output.RetryOnConflict != 0 {
meta["retryOnConflict"] = output.RetryOnConflict
}
if len(meta) > 0 {
op.Data["_meta_monstache"] = meta
}
}
return nil
}
func (ic *indexClient) mapData(op *gtm.Op) error {
if mapperPlugin != nil {
return ic.mapDataGolang(op)
}
return ic.mapDataJavascript(op)
}
func extractData(srcField string, data map[string]interface{}) (result interface{}, err error) {
var cur = data
fields := strings.Split(srcField, ".")
flen := len(fields)
for i, field := range fields {
if i+1 == flen {
result = cur[field]
} else {
if next, ok := cur[field].(map[string]interface{}); ok {
cur = next
} else {
break
}
}
}
if result == nil {
var detail interface{}
b, e := json.Marshal(data)
if e == nil {
detail = string(b)
} else {
detail = err
}
err = fmt.Errorf("Source field %s not found in document: %s", srcField, detail)
}
return
}
func buildSelector(matchField string, data interface{}) bson.M {
sel := bson.M{}
var cur bson.M = sel
fields := strings.Split(matchField, ".")
flen := len(fields)
for i, field := range fields {
if i+1 == flen {
cur[field] = data
} else {
next := bson.M{}
cur[field] = next
cur = next
}
}
return sel
}
func (ic *indexClient) processRelated(root *gtm.Op) (err error) {
var q []*gtm.Op
batch := []*gtm.Op{root}
depth := 1
for len(batch) > 0 {
for _, e := range batch {
op := e
if op.Data == nil {
continue
}
rs := relates[op.Namespace]
if len(rs) == 0 {
continue
}
for _, r := range rs {
if r.MaxDepth > 0 && r.MaxDepth < depth {
continue
}
if op.IsDelete() && r.IsIdentity() {
rop := >m.Op{
Id: op.Id,
Operation: op.Operation,
Namespace: r.WithNamespace,
Source: op.Source,
Timestamp: op.Timestamp,
Data: op.Data,
}
ic.doDelete(rop)
q = append(q, rop)
continue
}
var srcData interface{}
if srcData, err = extractData(r.SrcField, op.Data); err != nil {
ic.processErr(err)
continue
}
opts := &options.FindOptions{}
if ic.config.DirectReadNoTimeout {
opts.SetNoCursorTimeout(true)
}
col := ic.mongo.Database(r.db).Collection(r.col)
sel := buildSelector(r.MatchField, srcData)
cursor, err := col.Find(context.Background(), sel, opts)
doc := make(map[string]interface{})
for cursor.Next(context.Background()) {
if err = cursor.Decode(&doc); err != nil {
ic.processErr(err)
continue
}
now := time.Now().UTC()
tstamp := primitive.Timestamp{
T: uint32(now.Unix()),
I: uint32(now.Nanosecond()),
}
rop := >m.Op{
Id: doc["_id"],
Data: doc,
Operation: root.Operation,
Namespace: r.WithNamespace,
Source: gtm.DirectQuerySource,
Timestamp: tstamp,
UpdateDescription: root.UpdateDescription,
}
doc = make(map[string]interface{})
if ic.filter != nil && !ic.filter(rop) {
continue
}
if processPlugin != nil {
pop := >m.Op{
Id: rop.Id,
Operation: rop.Operation,
Namespace: rop.Namespace,
Source: rop.Source,
Timestamp: rop.Timestamp,
UpdateDescription: rop.UpdateDescription,
}
var data []byte
data, err = bson.Marshal(rop.Data)
if err == nil {
var m map[string]interface{}
err = bson.Unmarshal(data, &m)
if err == nil {
pop.Data = m
}
}
ic.processC <- pop
}
skip := false
if rs2 := relates[rop.Namespace]; len(rs2) != 0 {
skip = true
visit := false
for _, r2 := range rs2 {
if r2.KeepSrc {
skip = false
}
if r2.MaxDepth < 1 || r2.MaxDepth >= (depth+1) {
visit = true
}
}
if visit {
q = append(q, rop)
}
}
if !skip {
if ic.hasFileContent(rop) {
ic.fileC <- rop
} else {
ic.indexC <- rop
}
}
}
cursor.Close(context.Background())
}
}
depth++
batch = q
q = nil
}
return
}
func (ic *indexClient) prepareDataForIndexing(op *gtm.Op) {
config := ic.config
data := op.Data
if config.IndexOplogTime {
secs := op.Timestamp.T
t := time.Unix(int64(secs), 0).UTC()
data[config.OplogTsFieldName] = op.Timestamp
data[config.OplogDateFieldName] = t.Format(config.OplogDateFieldFormat)
}
delete(data, "_id")
delete(data, "_meta_monstache")
if config.PruneInvalidJSON {
op.Data = fixPruneInvalidJSON(opIDToString(op), data)
}
op.Data = monstachemap.ConvertMapForJSON(op.Data)
}
func parseIndexMeta(op *gtm.Op) (meta *indexingMeta) {
meta = &indexingMeta{
Version: tsVersion(op.Timestamp),
VersionType: "external",
}
if m, ok := op.Data["_meta_monstache"]; ok {
switch m.(type) {
case map[string]interface{}:
metaAttrs := m.(map[string]interface{})
meta.load(metaAttrs)
case otto.Value:
ex, err := m.(otto.Value).Export()
if err == nil && ex != m {
switch ex.(type) {
case map[string]interface{}:
metaAttrs := ex.(map[string]interface{})
meta.load(metaAttrs)
default:
errorLog.Println("Invalid indexing metadata")
}
}
default:
errorLog.Println("Invalid indexing metadata")
}
}
return meta
}
func (ic *indexClient) addFileContent(op *gtm.Op) (err error) {
op.Data["file"] = ""
var gridByteBuffer bytes.Buffer
db, bucketName :=
ic.mongo.Database(op.GetDatabase()),
strings.SplitN(op.GetCollection(), ".", 2)[0]
encoder := base64.NewEncoder(base64.StdEncoding, &gridByteBuffer)
opts := &options.BucketOptions{}
opts.SetName(bucketName)
var bucket *gridfs.Bucket
bucket, err = gridfs.NewBucket(db, opts)
if err != nil {
return
}
var size int64
if size, err = bucket.DownloadToStream(op.Id, encoder); err != nil {
return
}
if ic.config.MaxFileSize > 0 {
if size > ic.config.MaxFileSize {
warnLog.Printf("File size %d exceeds max file size. file content omitted.", size)
return
}
}
if err = encoder.Close(); err != nil {
return
}
op.Data["file"] = string(gridByteBuffer.Bytes())
return
}
func notMonstache(config *configOptions) gtm.OpFilter {
db := config.ConfigDatabaseName
return func(op *gtm.Op) bool {
return op.GetDatabase() != db
}
}
func notChunks(op *gtm.Op) bool {
return !chunksRegex.MatchString(op.GetCollection())
}
func notConfig(op *gtm.Op) bool {
return op.GetDatabase() != "config"
}
func notSystem(op *gtm.Op) bool {
return !systemsRegex.MatchString(op.GetCollection())
}
func filterWithRegex(regex string) gtm.OpFilter {
var validNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return true
}
return validNameSpace.MatchString(op.Namespace)
}
}
func filterDropWithRegex(regex string) gtm.OpFilter {
var validNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return validNameSpace.MatchString(op.Namespace)
}
return true
}
}
func filterWithPlugin() gtm.OpFilter {
return func(op *gtm.Op) bool {
var keep = true
if (op.IsInsert() || op.IsUpdate()) && op.Data != nil {
keep = false
input := &monstachemap.MapperPluginInput{
Document: op.Data,
Namespace: op.Namespace,
Database: op.GetDatabase(),
Collection: op.GetCollection(),
Operation: op.Operation,
UpdateDescription: op.UpdateDescription,
}
if ok, err := filterPlugin(input); err == nil {
keep = ok
} else {
errorLog.Println(err)
}
}
return keep
}
}
func filterWithScript() gtm.OpFilter {
return func(op *gtm.Op) bool {
var keep = true
if (op.IsInsert() || op.IsUpdate()) && op.Data != nil {
nss := []string{"", op.Namespace}
for _, ns := range nss {
if env := filterEnvs[ns]; env != nil {
keep = false
arg := convertMapJavascript(op.Data)
arg2 := op.Namespace
arg3 := convertMapJavascript(op.UpdateDescription)
env.lock.Lock()
defer env.lock.Unlock()
val, err := env.VM.Call("module.exports", arg, arg, arg2, arg3)
if err != nil {
errorLog.Println(err)
} else {
if ok, err := val.ToBoolean(); err == nil {
keep = ok
} else {
errorLog.Println(err)
}
}
}
if !keep {
break
}
}
}
return keep
}
}
func filterInverseWithRegex(regex string) gtm.OpFilter {
var invalidNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return true
}
return !invalidNameSpace.MatchString(op.Namespace)
}
}
func filterDropInverseWithRegex(regex string) gtm.OpFilter {
var invalidNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return !invalidNameSpace.MatchString(op.Namespace)
}
return true
}
}
func (ic *indexClient) ensureClusterTTL() error {
io := options.Index()
io.SetName("expireAt")
io.SetBackground(true)
io.SetExpireAfterSeconds(30)
im := mongo.IndexModel{
Keys: bson.M{"expireAt": 1},
Options: io,
}
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("cluster")
iv := col.Indexes()
_, err := iv.CreateOne(context.Background(), im)
return err
}
func (ic *indexClient) enableProcess() (bool, error) {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("cluster")
doc := bson.M{}
doc["_id"] = ic.config.ResumeName
doc["pid"] = os.Getpid()
if host, err := os.Hostname(); err == nil {
doc["host"] = host
} else {
return false, err
}
doc["expireAt"] = time.Now().UTC()
_, err := col.InsertOne(context.Background(), doc)
if err == nil {
// update using $currentDate
ic.ensureEnabled()
return true, nil
}
if isDup(err) {
return false, nil
}
return false, err
}
func isDup(err error) bool {
checkCodeAndMessage := func(code int, message string) bool {
return code == 11000 ||
code == 11001 ||
code == 12582 ||
strings.Contains(message, "E11000")
}
if we, ok := err.(mongo.WriteException); ok {
if we.WriteConcernError != nil {
wce := we.WriteConcernError
code, message := wce.Code, wce.Message
if checkCodeAndMessage(code, message) {
return true
}
}
if we.WriteErrors != nil {
we := we.WriteErrors
for _, e := range we {
code, message := e.Code, e.Message
if checkCodeAndMessage(code, message) {
return true
}
}
}
}
return false
}
func (ic *indexClient) resetClusterState() error {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("cluster")
_, err := col.DeleteOne(context.Background(), bson.M{"_id": ic.config.ResumeName})
return err
}
func (ic *indexClient) ensureEnabled() (enabled bool, err error) {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("cluster")
result := col.FindOne(context.Background(), bson.M{
"_id": ic.config.ResumeName,
})
if err = result.Err(); err == nil {
doc := make(map[string]interface{})
if err = result.Decode(&doc); err == nil {
if doc["pid"] != nil && doc["host"] != nil {
var hostname string
pid := doc["pid"].(int32)
host := doc["host"].(string)
if hostname, err = os.Hostname(); err == nil {
enabled = (int(pid) == os.Getpid() && host == hostname)
if enabled {
_, err = col.UpdateOne(context.Background(), bson.M{
"_id": ic.config.ResumeName,
}, bson.M{
"$currentDate": bson.M{"expireAt": true},
})
}
}
}
}
}
if err == mongo.ErrNoDocuments {
err = nil
}
return
}
func (ic *indexClient) resumeWork() {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("monstache")
result := col.FindOne(context.Background(), bson.M{
"_id": ic.config.ResumeName,
})
if err := result.Err(); err == nil {
doc := make(map[string]interface{})
if err = result.Decode(&doc); err == nil {
if doc["ts"] != nil {
ts := doc["ts"].(primitive.Timestamp)
ic.gtmCtx.Since(ts)
}
}
}
drained := false
for !drained {
select {
case _, open := <-ic.gtmCtx.OpC:
if !open {
drained = true
}
default:
drained = true
}
}
ic.gtmCtx.Resume()
}
func (ic *indexClient) saveTokens() error {
var err error
if len(ic.tokens) == 0 {
return err
}
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("tokens")
bwo := options.BulkWrite().SetOrdered(false)
var models []mongo.WriteModel
for streamID, token := range ic.tokens {
filter := bson.M{
"resumeName": ic.config.ResumeName,
"streamID": streamID,
}
replacement := bson.M{
"resumeName": ic.config.ResumeName,
"streamID": streamID,
"token": token,
}
model := mongo.NewReplaceOneModel()
model.SetUpsert(true)
model.SetFilter(filter)
model.SetReplacement(replacement)
models = append(models, model)
}
_, err = col.BulkWrite(context.Background(), models, bwo)
if err == nil {
ic.tokens = bson.M{}
}
return err
}
func (ic *indexClient) saveTimestamp() error {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("monstache")
doc := map[string]interface{}{
"ts": ic.lastTs,
}
opts := options.Update()
opts.SetUpsert(true)
_, err := col.UpdateOne(context.Background(), bson.M{
"_id": ic.config.ResumeName,
}, bson.M{
"$set": doc,
}, opts)
return err
}
func (config *configOptions) parseCommandLineFlags() *configOptions {
flag.BoolVar(&config.Print, "print-config", false, "Print the configuration and then exit")
flag.BoolVar(&config.EnableTemplate, "tpl", false, "True to interpret the config file as a template")
flag.StringVar(&config.EnvDelimiter, "env-delimiter", ",", "A delimiter to use when splitting environment variable values")
flag.StringVar(&config.MongoURL, "mongo-url", "", "MongoDB server or router server connection URL")
flag.StringVar(&config.MongoConfigURL, "mongo-config-url", "", "MongoDB config server connection URL")
flag.StringVar(&config.MongoOpLogDatabaseName, "mongo-oplog-database-name", "", "Override the database name which contains the mongodb oplog")
flag.StringVar(&config.MongoOpLogCollectionName, "mongo-oplog-collection-name", "", "Override the collection name which contains the mongodb oplog")
flag.StringVar(&config.GraylogAddr, "graylog-addr", "", "Send logs to a Graylog server at this address")
flag.StringVar(&config.ElasticVersion, "elasticsearch-version", "", "Specify elasticsearch version directly instead of getting it from the server")
flag.StringVar(&config.ElasticUser, "elasticsearch-user", "", "The elasticsearch user name for basic auth")
flag.StringVar(&config.ElasticPassword, "elasticsearch-password", "", "The elasticsearch password for basic auth")
flag.StringVar(&config.ElasticPemFile, "elasticsearch-pem-file", "", "Path to a PEM file for secure connections to elasticsearch")
flag.BoolVar(&config.ElasticValidatePemFile, "elasticsearch-validate-pem-file", true, "Set to boolean false to not validate the Elasticsearch PEM file")
flag.IntVar(&config.ElasticMaxConns, "elasticsearch-max-conns", 0, "Elasticsearch max connections")
flag.IntVar(&config.PostProcessors, "post-processors", 0, "Number of post-processing go routines")
flag.IntVar(&config.FileDownloaders, "file-downloaders", 0, "GridFs download go routines")
flag.IntVar(&config.RelateThreads, "relate-threads", 0, "Number of threads dedicated to processing relationships")
flag.IntVar(&config.RelateBuffer, "relate-buffer", 0, "Number of relates to queue before skipping and reporting an error")
flag.BoolVar(&config.ElasticRetry, "elasticsearch-retry", false, "True to retry failed request to Elasticsearch")
flag.IntVar(&config.ElasticMaxDocs, "elasticsearch-max-docs", 0, "Number of docs to hold before flushing to Elasticsearch")
flag.IntVar(&config.ElasticMaxBytes, "elasticsearch-max-bytes", 0, "Number of bytes to hold before flushing to Elasticsearch")
flag.IntVar(&config.ElasticMaxSeconds, "elasticsearch-max-seconds", 0, "Number of seconds before flushing to Elasticsearch")
flag.IntVar(&config.ElasticClientTimeout, "elasticsearch-client-timeout", 0, "Number of seconds before a request to Elasticsearch is timed out")
flag.Int64Var(&config.MaxFileSize, "max-file-size", 0, "GridFs file content exceeding this limit in bytes will not be indexed in Elasticsearch")
flag.StringVar(&config.ConfigFile, "f", "", "Location of configuration file")
flag.BoolVar(&config.DroppedDatabases, "dropped-databases", true, "True to delete indexes from dropped databases")
flag.BoolVar(&config.DroppedCollections, "dropped-collections", true, "True to delete indexes from dropped collections")
flag.BoolVar(&config.Version, "v", false, "True to print the version number")
flag.BoolVar(&config.Gzip, "gzip", false, "True to enable gzip for requests to Elasticsearch")
flag.BoolVar(&config.Verbose, "verbose", false, "True to output verbose messages")
flag.BoolVar(&config.Pprof, "pprof", false, "True to enable pprof endpoints")
flag.BoolVar(&config.EnableOplog, "enable-oplog", false, "True to enable direct tailing of the oplog")
flag.BoolVar(&config.DisableChangeEvents, "disable-change-events", false, "True to disable listening for changes. You must provide direct-reads in this case")
flag.BoolVar(&config.EnableEasyJSON, "enable-easy-json", false, "True to enable easy-json serialization")
flag.BoolVar(&config.Stats, "stats", false, "True to print out statistics")
flag.BoolVar(&config.IndexStats, "index-stats", false, "True to index stats in elasticsearch")
flag.StringVar(&config.StatsDuration, "stats-duration", "", "The duration after which stats are logged")
flag.StringVar(&config.StatsIndexFormat, "stats-index-format", "", "time.Time supported format to use for the stats index names")
flag.BoolVar(&config.Resume, "resume", false, "True to capture the last timestamp of this run and resume on a subsequent run")
flag.Var(&config.ResumeStrategy, "resume-strategy", "Strategy to use for resuming. 0=timestamp,1=token")
flag.Int64Var(&config.ResumeFromTimestamp, "resume-from-timestamp", 0, "Timestamp to resume syncing from")
flag.BoolVar(&config.ResumeWriteUnsafe, "resume-write-unsafe", false, "True to speedup writes of the last timestamp synched for resuming at the cost of error checking")
flag.BoolVar(&config.Replay, "replay", false, "True to replay all events from the oplog and index them in elasticsearch")
flag.BoolVar(&config.IndexFiles, "index-files", false, "True to index gridfs files into elasticsearch. Requires the elasticsearch mapper-attachments (deprecated) or ingest-attachment plugin")
flag.BoolVar(&config.DisableFilePipelinePut, "disable-file-pipeline-put", false, "True to disable auto-creation of the ingest plugin pipeline")
flag.BoolVar(&config.IndexAsUpdate, "index-as-update", false, "True to index documents as updates instead of overwrites")
flag.BoolVar(&config.FileHighlighting, "file-highlighting", false, "True to enable the ability to highlight search times for a file query")
flag.BoolVar(&config.EnablePatches, "enable-patches", false, "True to include an json-patch field on updates")
flag.BoolVar(&config.FailFast, "fail-fast", false, "True to exit if a single _bulk request fails")
flag.BoolVar(&config.IndexOplogTime, "index-oplog-time", false, "True to add date/time information from the oplog to each document when indexing")
flag.BoolVar(&config.ExitAfterDirectReads, "exit-after-direct-reads", false, "True to exit the program after reading directly from the configured namespaces")
flag.StringVar(&config.MergePatchAttr, "merge-patch-attribute", "", "Attribute to store json-patch values under")
flag.StringVar(&config.ResumeName, "resume-name", "", "Name under which to load/store the resume state. Defaults to 'default'")
flag.StringVar(&config.ClusterName, "cluster-name", "", "Name of the monstache process cluster")
flag.StringVar(&config.Worker, "worker", "", "The name of this worker in a multi-worker configuration")
flag.StringVar(&config.MapperPluginPath, "mapper-plugin-path", "", "The path to a .so file to load as a document mapper plugin")
flag.StringVar(&config.DirectReadExcludeRegex, "direct-read-dynamic-exclude-regex", "", "A regex to use for excluding namespaces when using dynamic direct reads")
flag.StringVar(&config.NsRegex, "namespace-regex", "", "A regex which is matched against an operation's namespace (<database>.<collection>). Only operations which match are synched to elasticsearch")
flag.StringVar(&config.NsDropRegex, "namespace-drop-regex", "", "A regex which is matched against a drop operation's namespace (<database>.<collection>). Only drop operations which match are synched to elasticsearch")
flag.StringVar(&config.NsExcludeRegex, "namespace-exclude-regex", "", "A regex which is matched against an operation's namespace (<database>.<collection>). Only operations which do not match are synched to elasticsearch")
flag.StringVar(&config.NsDropExcludeRegex, "namespace-drop-exclude-regex", "", "A regex which is matched against a drop operation's namespace (<database>.<collection>). Only drop operations which do not match are synched to elasticsearch")
flag.Var(&config.ChangeStreamNs, "change-stream-namespace", "A list of change stream namespaces")
flag.Var(&config.DirectReadNs, "direct-read-namespace", "A list of direct read namespaces")
flag.IntVar(&config.DirectReadSplitMax, "direct-read-split-max", 0, "Max number of times to split a collection for direct reads")
flag.IntVar(&config.DirectReadConcur, "direct-read-concur", 0, "Max number of direct-read-namespaces to read concurrently. By default all givne are read concurrently")
flag.BoolVar(&config.DirectReadNoTimeout, "direct-read-no-timeout", false, "True to set the no cursor timeout flag for direct reads")
flag.BoolVar(&config.DirectReadBounded, "direct-read-bounded", false, "True to limit direct reads to the docs present at query start time")
flag.Var(&config.RoutingNamespaces, "routing-namespace", "A list of namespaces that override routing information")
flag.Var(&config.TimeMachineNamespaces, "time-machine-namespace", "A list of direct read namespaces")
flag.StringVar(&config.TimeMachineIndexPrefix, "time-machine-index-prefix", "", "A prefix to preprend to time machine indexes")
flag.StringVar(&config.TimeMachineIndexSuffix, "time-machine-index-suffix", "", "A suffix to append to time machine indexes")
flag.BoolVar(&config.TimeMachineDirectReads, "time-machine-direct-reads", false, "True to index the results of direct reads into the any time machine indexes")
flag.BoolVar(&config.PipeAllowDisk, "pipe-allow-disk", false, "True to allow MongoDB to use the disk for pipeline options with lots of results")
flag.Var(&config.ElasticUrls, "elasticsearch-url", "A list of Elasticsearch URLs")
flag.Var(&config.FileNamespaces, "file-namespace", "A list of file namespaces")
flag.Var(&config.PatchNamespaces, "patch-namespace", "A list of patch namespaces")
flag.Var(&config.Workers, "workers", "A list of worker names")
flag.BoolVar(&config.EnableHTTPServer, "enable-http-server", false, "True to enable an internal http server")
flag.StringVar(&config.HTTPServerAddr, "http-server-addr", "", "The address the internal http server listens on")
flag.BoolVar(&config.PruneInvalidJSON, "prune-invalid-json", false, "True to omit values which do not serialize to JSON such as +Inf and -Inf and thus cause errors")
flag.Var(&config.DeleteStrategy, "delete-strategy", "Stategy to use for deletes. 0=stateless,1=stateful,2=ignore")
flag.StringVar(&config.DeleteIndexPattern, "delete-index-pattern", "", "An Elasticsearch index-pattern to restric the scope of stateless deletes")
flag.StringVar(&config.ConfigDatabaseName, "config-database-name", "", "The MongoDB database name that monstache uses to store metadata")
flag.StringVar(&config.OplogTsFieldName, "oplog-ts-field-name", "", "Field name to use for the oplog timestamp")
flag.StringVar(&config.OplogDateFieldName, "oplog-date-field-name", "", "Field name to use for the oplog date")
flag.StringVar(&config.OplogDateFieldFormat, "oplog-date-field-format", "", "Format to use for the oplog date")
flag.BoolVar(&config.Debug, "debug", false, "True to enable verbose debug information")
flag.Parse()
return config
}
func (config *configOptions) loadReplacements() {
if config.Relate != nil {
for _, r := range config.Relate {
if r.Namespace != "" || r.WithNamespace != "" {
dbCol := strings.SplitN(r.WithNamespace, ".", 2)
if len(dbCol) != 2 {
errorLog.Fatalf("Replacement namespace is invalid: %s", r.WithNamespace)
}
database, collection := dbCol[0], dbCol[1]
r := &relation{
Namespace: r.Namespace,
WithNamespace: r.WithNamespace,
SrcField: r.SrcField,
MatchField: r.MatchField,
KeepSrc: r.KeepSrc,
MaxDepth: r.MaxDepth,
db: database,
col: collection,
}
if r.SrcField == "" {
r.SrcField = "_id"
}
if r.MatchField == "" {
r.MatchField = "_id"
}
relates[r.Namespace] = append(relates[r.Namespace], r)
} else {
errorLog.Fatalln("Relates must specify namespace and with-namespace")
}
}
}
}
func (config *configOptions) loadIndexTypes() {
if config.Mapping != nil {
for _, m := range config.Mapping {
if m.Namespace != "" && m.Index != "" {
mapIndexTypes[m.Namespace] = &indexMapping{
Namespace: m.Namespace,
Index: strings.ToLower(m.Index),
}
} else {
errorLog.Fatalln("Mappings must specify namespace and index")
}
}
}
}
func (config *configOptions) loadPipelines() {
for _, s := range config.Pipeline {
if s.Path == "" && s.Script == "" {
errorLog.Fatalln("Pipelines must specify path or script attributes")
}
if s.Path != "" && s.Script != "" {
errorLog.Fatalln("Pipelines must specify path or script but not both")
}
if s.Path != "" {
if script, err := ioutil.ReadFile(s.Path); err == nil {
s.Script = string(script[:])
} else {
errorLog.Fatalf("Unable to load pipeline at path %s: %s", s.Path, err)
}
}
if _, exists := filterEnvs[s.Namespace]; exists {
errorLog.Fatalf("Multiple pipelines with namespace: %s", s.Namespace)
}
env := &executionEnv{
VM: otto.New(),
Script: s.Script,
lock: &sync.Mutex{},
}
if err := env.VM.Set("module", make(map[string]interface{})); err != nil {
errorLog.Fatalln(err)
}
if _, err := env.VM.Run(env.Script); err != nil {
errorLog.Fatalln(err)
}
val, err := env.VM.Run("module.exports")
if err != nil {
errorLog.Fatalln(err)
} else if !val.IsFunction() {
errorLog.Fatalln("module.exports must be a function")
}
pipeEnvs[s.Namespace] = env
}
}
func (config *configOptions) loadFilters() {
for _, s := range config.Filter {
if s.Script != "" || s.Path != "" {
if s.Path != "" && s.Script != "" {
errorLog.Fatalln("Filters must specify path or script but not both")
}
if s.Path != "" {
if script, err := ioutil.ReadFile(s.Path); err == nil {
s.Script = string(script[:])
} else {
errorLog.Fatalf("Unable to load filter at path %s: %s", s.Path, err)
}
}
if _, exists := filterEnvs[s.Namespace]; exists {
errorLog.Fatalf("Multiple filters with namespace: %s", s.Namespace)
}
env := &executionEnv{
VM: otto.New(),
Script: s.Script,
lock: &sync.Mutex{},
}
if err := env.VM.Set("module", make(map[string]interface{})); err != nil {
errorLog.Fatalln(err)
}
if _, err := env.VM.Run(env.Script); err != nil {
errorLog.Fatalln(err)
}
val, err := env.VM.Run("module.exports")
if err != nil {
errorLog.Fatalln(err)
} else if !val.IsFunction() {
errorLog.Fatalln("module.exports must be a function")
}
filterEnvs[s.Namespace] = env
} else {
errorLog.Fatalln("Filters must specify path or script attributes")
}
}
}
func (config *configOptions) loadScripts() {
for _, s := range config.Script {
if s.Script != "" || s.Path != "" {
if s.Path != "" && s.Script != "" {
errorLog.Fatalln("Scripts must specify path or script but not both")
}
if s.Path != "" {
if script, err := ioutil.ReadFile(s.Path); err == nil {
s.Script = string(script[:])
} else {
errorLog.Fatalf("Unable to load script at path %s: %s", s.Path, err)
}
}
if _, exists := mapEnvs[s.Namespace]; exists {
errorLog.Fatalf("Multiple scripts with namespace: %s", s.Namespace)
}
env := &executionEnv{
VM: otto.New(),
Script: s.Script,
lock: &sync.Mutex{},
}
if err := env.VM.Set("module", make(map[string]interface{})); err != nil {
errorLog.Fatalln(err)
}
if _, err := env.VM.Run(env.Script); err != nil {
errorLog.Fatalln(err)
}
val, err := env.VM.Run("module.exports")
if err != nil {
errorLog.Fatalln(err)
} else if !val.IsFunction() {
errorLog.Fatalln("module.exports must be a function")
}
mapEnvs[s.Namespace] = env
if s.Routing {
routingNamespaces[s.Namespace] = true
}
} else {
errorLog.Fatalln("Scripts must specify path or script")
}
}
}
func (config *configOptions) loadPlugins() *configOptions {
if config.MapperPluginPath != "" {
funcDefined := false
p, err := plugin.Open(config.MapperPluginPath)
if err != nil {
errorLog.Fatalf("Unable to load mapper plugin %s: %s", config.MapperPluginPath, err)
}
mapper, err := p.Lookup("Map")
if err == nil {
funcDefined = true
switch mapper.(type) {
case func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error):
mapperPlugin = mapper.(func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error))
default:
errorLog.Fatalf("Plugin 'Map' function must be typed %T", mapperPlugin)
}
}
filter, err := p.Lookup("Filter")
if err == nil {
funcDefined = true
switch filter.(type) {
case func(*monstachemap.MapperPluginInput) (bool, error):
filterPlugin = filter.(func(*monstachemap.MapperPluginInput) (bool, error))
default:
errorLog.Fatalf("Plugin 'Filter' function must be typed %T", filterPlugin)
}
}
process, err := p.Lookup("Process")
if err == nil {
funcDefined = true
switch process.(type) {
case func(*monstachemap.ProcessPluginInput) error:
processPlugin = process.(func(*monstachemap.ProcessPluginInput) error)
default:
errorLog.Fatalf("Plugin 'Process' function must be typed %T", processPlugin)
}
}
pipe, err := p.Lookup("Pipeline")
if err == nil {
funcDefined = true
switch pipe.(type) {
case func(string, bool) ([]interface{}, error):
pipePlugin = pipe.(func(string, bool) ([]interface{}, error))
default:
errorLog.Fatalf("Plugin 'Pipeline' function must be typed %T", pipePlugin)
}
}
if !funcDefined {
warnLog.Println("Plugin loaded but did not find a Map, Filter, Process or Pipeline function")
}
}
return config
}
func (config *configOptions) decodeAsTemplate() *configOptions {
env := map[string]string{}
for _, e := range os.Environ() {
pair := strings.SplitN(e, "=", 2)
if len(pair) < 2 {
continue
}
name, val := pair[0], pair[1]
env[name] = val
}
tpl, err := ioutil.ReadFile(config.ConfigFile)
if err != nil {
errorLog.Fatalln(err)
}
var t = template.Must(template.New("config").Parse(string(tpl)))
var b bytes.Buffer
err = t.Execute(&b, env)
if err != nil {
errorLog.Fatalln(err)
}
if md, err := toml.Decode(b.String(), config); err != nil {
errorLog.Fatalln(err)
} else if ud := md.Undecoded(); len(ud) != 0 {
errorLog.Fatalf("Config file contains undecoded keys: %q", ud)
}
return config
}
func (config *configOptions) loadConfigFile() *configOptions {
if config.ConfigFile != "" {
var tomlConfig = configOptions{
ConfigFile: config.ConfigFile,
LogRotate: config.LogRotate,
DroppedDatabases: true,
DroppedCollections: true,
ElasticValidatePemFile: true,
GtmSettings: gtmDefaultSettings(),
}
if config.EnableTemplate {
tomlConfig.decodeAsTemplate()
} else {
if md, err := toml.DecodeFile(tomlConfig.ConfigFile, &tomlConfig); err != nil {
errorLog.Fatalln(err)
} else if ud := md.Undecoded(); len(ud) != 0 {
errorLog.Fatalf("Config file contains undecoded keys: %q", ud)
}
}
if config.MongoURL == "" {
config.MongoURL = tomlConfig.MongoURL
}
if config.MongoConfigURL == "" {
config.MongoConfigURL = tomlConfig.MongoConfigURL
}
if config.MongoOpLogDatabaseName == "" {
config.MongoOpLogDatabaseName = tomlConfig.MongoOpLogDatabaseName
}
if config.MongoOpLogCollectionName == "" {
config.MongoOpLogCollectionName = tomlConfig.MongoOpLogCollectionName
}
if config.ElasticUser == "" {
config.ElasticUser = tomlConfig.ElasticUser
}
if config.ElasticPassword == "" {
config.ElasticPassword = tomlConfig.ElasticPassword
}
if config.ElasticPemFile == "" {
config.ElasticPemFile = tomlConfig.ElasticPemFile
}
if config.ElasticValidatePemFile && !tomlConfig.ElasticValidatePemFile {
config.ElasticValidatePemFile = false
}
if config.ElasticVersion == "" {
config.ElasticVersion = tomlConfig.ElasticVersion
}
if config.ElasticMaxConns == 0 {
config.ElasticMaxConns = tomlConfig.ElasticMaxConns
}
if config.ElasticHealth0 == 0 {
config.ElasticHealth0 = tomlConfig.ElasticHealth0
}
if config.ElasticHealth1 == 0 {
config.ElasticHealth1 = tomlConfig.ElasticHealth1
}
if config.DirectReadSplitMax == 0 {
config.DirectReadSplitMax = tomlConfig.DirectReadSplitMax
}
if config.DirectReadConcur == 0 {
config.DirectReadConcur = tomlConfig.DirectReadConcur
}
if !config.DirectReadNoTimeout && tomlConfig.DirectReadNoTimeout {
config.DirectReadNoTimeout = true
}
if !config.DirectReadBounded && tomlConfig.DirectReadBounded {
config.DirectReadBounded = true
}
if !config.ElasticRetry && tomlConfig.ElasticRetry {
config.ElasticRetry = true
}
if config.ElasticMaxDocs == 0 {
config.ElasticMaxDocs = tomlConfig.ElasticMaxDocs
}
if config.ElasticMaxBytes == 0 {
config.ElasticMaxBytes = tomlConfig.ElasticMaxBytes
}
if config.ElasticMaxSeconds == 0 {
config.ElasticMaxSeconds = tomlConfig.ElasticMaxSeconds
}
if config.ElasticClientTimeout == 0 {
config.ElasticClientTimeout = tomlConfig.ElasticClientTimeout
}
if config.MaxFileSize == 0 {
config.MaxFileSize = tomlConfig.MaxFileSize
}
if !config.IndexFiles {
config.IndexFiles = tomlConfig.IndexFiles
}
if !config.DisableFilePipelinePut {
config.DisableFilePipelinePut = tomlConfig.DisableFilePipelinePut
}
if config.FileDownloaders == 0 {
config.FileDownloaders = tomlConfig.FileDownloaders
}
if config.RelateThreads == 0 {
config.RelateThreads = tomlConfig.RelateThreads
}
if config.RelateBuffer == 0 {
config.RelateBuffer = tomlConfig.RelateBuffer
}
if config.PostProcessors == 0 {
config.PostProcessors = tomlConfig.PostProcessors
}
if config.DeleteStrategy == 0 {
config.DeleteStrategy = tomlConfig.DeleteStrategy
}
if config.DeleteIndexPattern == "" {
config.DeleteIndexPattern = tomlConfig.DeleteIndexPattern
}
if config.DroppedDatabases && !tomlConfig.DroppedDatabases {
config.DroppedDatabases = false
}
if config.DroppedCollections && !tomlConfig.DroppedCollections {
config.DroppedCollections = false
}
if !config.Gzip && tomlConfig.Gzip {
config.Gzip = true
}
if !config.Verbose && tomlConfig.Verbose {
config.Verbose = true
}
if !config.Stats && tomlConfig.Stats {
config.Stats = true
}
if !config.Pprof && tomlConfig.Pprof {
config.Pprof = true
}
if !config.EnableOplog && tomlConfig.EnableOplog {
config.EnableOplog = true
}
if !config.EnableEasyJSON && tomlConfig.EnableEasyJSON {
config.EnableEasyJSON = true
}
if !config.DisableChangeEvents && tomlConfig.DisableChangeEvents {
config.DisableChangeEvents = true
}
if !config.IndexStats && tomlConfig.IndexStats {
config.IndexStats = true
}
if config.StatsDuration == "" {
config.StatsDuration = tomlConfig.StatsDuration
}
if config.StatsIndexFormat == "" {
config.StatsIndexFormat = tomlConfig.StatsIndexFormat
}
if !config.IndexAsUpdate && tomlConfig.IndexAsUpdate {
config.IndexAsUpdate = true
}
if !config.FileHighlighting && tomlConfig.FileHighlighting {
config.FileHighlighting = true
}
if !config.EnablePatches && tomlConfig.EnablePatches {
config.EnablePatches = true
}
if !config.PruneInvalidJSON && tomlConfig.PruneInvalidJSON {
config.PruneInvalidJSON = true
}
if !config.Debug && tomlConfig.Debug {
config.Debug = true
}
if !config.Replay && tomlConfig.Replay {
config.Replay = true
}
if !config.Resume && tomlConfig.Resume {
config.Resume = true
}
if !config.ResumeWriteUnsafe && tomlConfig.ResumeWriteUnsafe {
config.ResumeWriteUnsafe = true
}
if config.ResumeFromTimestamp == 0 {
config.ResumeFromTimestamp = tomlConfig.ResumeFromTimestamp
}
if config.MergePatchAttr == "" {
config.MergePatchAttr = tomlConfig.MergePatchAttr
}
if !config.FailFast && tomlConfig.FailFast {
config.FailFast = true
}
if !config.IndexOplogTime && tomlConfig.IndexOplogTime {
config.IndexOplogTime = true
}
if config.OplogTsFieldName == "" {
config.OplogTsFieldName = tomlConfig.OplogTsFieldName
}
if config.OplogDateFieldName == "" {
config.OplogDateFieldName = tomlConfig.OplogDateFieldName
}
if config.OplogDateFieldFormat == "" {
config.OplogDateFieldFormat = tomlConfig.OplogDateFieldFormat
}
if config.ConfigDatabaseName == "" {
config.ConfigDatabaseName = tomlConfig.ConfigDatabaseName
}
if !config.ExitAfterDirectReads && tomlConfig.ExitAfterDirectReads {
config.ExitAfterDirectReads = true
}
if config.ResumeName == "" {
config.ResumeName = tomlConfig.ResumeName
}
if config.ClusterName == "" {
config.ClusterName = tomlConfig.ClusterName
}
if config.ResumeStrategy == 0 {
config.ResumeStrategy = tomlConfig.ResumeStrategy
}
if config.DirectReadExcludeRegex == "" {
config.DirectReadExcludeRegex = tomlConfig.DirectReadExcludeRegex
}
if config.NsRegex == "" {
config.NsRegex = tomlConfig.NsRegex
}
if config.NsDropRegex == "" {
config.NsDropRegex = tomlConfig.NsDropRegex
}
if config.NsExcludeRegex == "" {
config.NsExcludeRegex = tomlConfig.NsExcludeRegex
}
if config.NsDropExcludeRegex == "" {
config.NsDropExcludeRegex = tomlConfig.NsDropExcludeRegex
}
if config.IndexFiles {
if len(config.FileNamespaces) == 0 {
config.FileNamespaces = tomlConfig.FileNamespaces
config.loadGridFsConfig()
}
}
if config.Worker == "" {
config.Worker = tomlConfig.Worker
}
if config.GraylogAddr == "" {
config.GraylogAddr = tomlConfig.GraylogAddr
}
if config.MapperPluginPath == "" {
config.MapperPluginPath = tomlConfig.MapperPluginPath
}
if config.EnablePatches {
if len(config.PatchNamespaces) == 0 {
config.PatchNamespaces = tomlConfig.PatchNamespaces
config.loadPatchNamespaces()
}
}
if len(config.RoutingNamespaces) == 0 {
config.RoutingNamespaces = tomlConfig.RoutingNamespaces
config.loadRoutingNamespaces()
}
if len(config.TimeMachineNamespaces) == 0 {
config.TimeMachineNamespaces = tomlConfig.TimeMachineNamespaces
config.loadTimeMachineNamespaces()
}
if config.TimeMachineIndexPrefix == "" {
config.TimeMachineIndexPrefix = tomlConfig.TimeMachineIndexPrefix
}
if config.TimeMachineIndexSuffix == "" {
config.TimeMachineIndexSuffix = tomlConfig.TimeMachineIndexSuffix
}
if !config.TimeMachineDirectReads {
config.TimeMachineDirectReads = tomlConfig.TimeMachineDirectReads
}
if !config.PipeAllowDisk {
config.PipeAllowDisk = tomlConfig.PipeAllowDisk
}
if len(config.DirectReadNs) == 0 {
config.DirectReadNs = tomlConfig.DirectReadNs
}
if len(config.ChangeStreamNs) == 0 {
config.ChangeStreamNs = tomlConfig.ChangeStreamNs
}
if len(config.ElasticUrls) == 0 {
config.ElasticUrls = tomlConfig.ElasticUrls
}
if len(config.Workers) == 0 {
config.Workers = tomlConfig.Workers
}
if !config.EnableHTTPServer && tomlConfig.EnableHTTPServer {
config.EnableHTTPServer = true
}
if config.HTTPServerAddr == "" {
config.HTTPServerAddr = tomlConfig.HTTPServerAddr
}
if !config.AWSConnect.enabled() {
config.AWSConnect = tomlConfig.AWSConnect
}
if !config.Logs.enabled() {
config.Logs = tomlConfig.Logs
}
config.GtmSettings = tomlConfig.GtmSettings
config.Relate = tomlConfig.Relate
config.LogRotate = tomlConfig.LogRotate
tomlConfig.loadScripts()
tomlConfig.loadFilters()
tomlConfig.loadPipelines()
tomlConfig.loadIndexTypes()
tomlConfig.loadReplacements()
}
return config
}
func (config *configOptions) newLogger(path string) *lumberjack.Logger {
return &lumberjack.Logger{
Filename: path,
MaxSize: config.LogRotate.MaxSize,
MaxBackups: config.LogRotate.MaxBackups,
MaxAge: config.LogRotate.MaxAge,
LocalTime: config.LogRotate.LocalTime,
Compress: config.LogRotate.Compress,
}
}
func (config *configOptions) setupLogging() *configOptions {
if config.GraylogAddr != "" {
gelfWriter, err := gelf.NewUDPWriter(config.GraylogAddr)
if err != nil {
errorLog.Fatalf("Error creating gelf writer: %s", err)
}
infoLog.SetOutput(gelfWriter)
warnLog.SetOutput(gelfWriter)
errorLog.SetOutput(gelfWriter)
traceLog.SetOutput(gelfWriter)
statsLog.SetOutput(gelfWriter)
} else {
logs := config.Logs
if logs.Info != "" {
infoLog.SetOutput(config.newLogger(logs.Info))
}
if logs.Warn != "" {
warnLog.SetOutput(config.newLogger(logs.Warn))
}
if logs.Error != "" {
errorLog.SetOutput(config.newLogger(logs.Error))
}
if logs.Trace != "" {
traceLog.SetOutput(config.newLogger(logs.Trace))
}
if logs.Stats != "" {
statsLog.SetOutput(config.newLogger(logs.Stats))
}
}
return config
}
func (config *configOptions) build() *configOptions {
config.loadEnvironment()
config.loadTimeMachineNamespaces()
config.loadRoutingNamespaces()
config.loadPatchNamespaces()
config.loadGridFsConfig()
config.loadConfigFile()
config.loadPlugins()
config.setDefaults()
return config
}
func (config *configOptions) loadEnvironment() *configOptions {
del := config.EnvDelimiter
if del == "" {
del = ","
}
for _, e := range os.Environ() {
pair := strings.SplitN(e, "=", 2)
if len(pair) < 2 {
continue
}
name, val := pair[0], pair[1]
if val == "" {
continue
}
switch name {
case "MONSTACHE_MONGO_URL":
if config.MongoURL == "" {
config.MongoURL = val
}
break
case "MONSTACHE_MONGO_CONFIG_URL":
if config.MongoConfigURL == "" {
config.MongoConfigURL = val
}
break
case "MONSTACHE_MONGO_OPLOG_DB":
if config.MongoOpLogDatabaseName == "" {
config.MongoOpLogDatabaseName = val
}
break
case "MONSTACHE_MONGO_OPLOG_COL":
if config.MongoOpLogCollectionName == "" {
config.MongoOpLogCollectionName = val
}
break
case "MONSTACHE_ES_URLS":
if len(config.ElasticUrls) == 0 {
config.ElasticUrls = strings.Split(val, del)
}
break
case "MONSTACHE_ES_USER":
if config.ElasticUser == "" {
config.ElasticUser = val
}
break
case "MONSTACHE_ES_PASS":
if config.ElasticPassword == "" {
config.ElasticPassword = val
}
break
case "MONSTACHE_ES_PEM":
if config.ElasticPemFile == "" {
config.ElasticPemFile = val
}
break
case "MONSTACHE_WORKER":
if config.Worker == "" {
config.Worker = val
}
break
case "MONSTACHE_CLUSTER":
if config.ClusterName == "" {
config.ClusterName = val
}
break
case "MONSTACHE_DIRECT_READ_NS":
if len(config.DirectReadNs) == 0 {
config.DirectReadNs = strings.Split(val, del)
}
break
case "MONSTACHE_CHANGE_STREAM_NS":
if len(config.ChangeStreamNs) == 0 {
config.ChangeStreamNs = strings.Split(val, del)
}
break
case "MONSTACHE_DIRECT_READ_NS_DYNAMIC_EXCLUDE_REGEX":
if config.DirectReadExcludeRegex == "" {
config.DirectReadExcludeRegex = val
}
break
case "MONSTACHE_NS_REGEX":
if config.NsRegex == "" {
config.NsRegex = val
}
break
case "MONSTACHE_NS_EXCLUDE_REGEX":
if config.NsExcludeRegex == "" {
config.NsExcludeRegex = val
}
break
case "MONSTACHE_NS_DROP_REGEX":
if config.NsDropRegex == "" {
config.NsDropRegex = val
}
break
case "MONSTACHE_NS_DROP_EXCLUDE_REGEX":
if config.NsDropExcludeRegex == "" {
config.NsDropExcludeRegex = val
}
break
case "MONSTACHE_GRAYLOG_ADDR":
if config.GraylogAddr == "" {
config.GraylogAddr = val
}
break
case "MONSTACHE_AWS_ACCESS_KEY":
config.AWSConnect.AccessKey = val
break
case "MONSTACHE_AWS_SECRET_KEY":
config.AWSConnect.SecretKey = val
break
case "MONSTACHE_AWS_REGION":
config.AWSConnect.Region = val
break
case "MONSTACHE_LOG_DIR":
config.Logs.Info = val + "/info.log"
config.Logs.Warn = val + "/warn.log"
config.Logs.Error = val + "/error.log"
config.Logs.Trace = val + "/trace.log"
config.Logs.Stats = val + "/stats.log"
break
case "MONSTACHE_LOG_MAX_SIZE":
i, err := strconv.ParseInt(val, 10, 64)
if err != nil {
errorLog.Fatalf("Failed to load MONSTACHE_LOG_MAX_SIZE: %s", err)
}
config.LogRotate.MaxSize = int(i)
break
case "MONSTACHE_LOG_MAX_BACKUPS":
i, err := strconv.ParseInt(val, 10, 64)
if err != nil {
errorLog.Fatalf("Failed to load MONSTACHE_LOG_MAX_BACKUPS: %s", err)
}
config.LogRotate.MaxBackups = int(i)
break
case "MONSTACHE_LOG_MAX_AGE":
i, err := strconv.ParseInt(val, 10, 64)
if err != nil {
errorLog.Fatalf("Failed to load MONSTACHE_LOG_MAX_AGE: %s", err)
}
config.LogRotate.MaxAge = int(i)
break
case "MONSTACHE_HTTP_ADDR":
if config.HTTPServerAddr == "" {
config.HTTPServerAddr = val
}
break
case "MONSTACHE_FILE_NS":
if len(config.FileNamespaces) == 0 {
config.FileNamespaces = strings.Split(val, del)
}
break
case "MONSTACHE_PATCH_NS":
if len(config.PatchNamespaces) == 0 {
config.PatchNamespaces = strings.Split(val, del)
}
break
case "MONSTACHE_TIME_MACHINE_NS":
if len(config.TimeMachineNamespaces) == 0 {
config.TimeMachineNamespaces = strings.Split(val, del)
}
break
default:
continue
}
}
return config
}
func (config *configOptions) loadRoutingNamespaces() *configOptions {
for _, namespace := range config.RoutingNamespaces {
routingNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadTimeMachineNamespaces() *configOptions {
for _, namespace := range config.TimeMachineNamespaces {
tmNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadPatchNamespaces() *configOptions {
for _, namespace := range config.PatchNamespaces {
patchNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadGridFsConfig() *configOptions {
for _, namespace := range config.FileNamespaces {
fileNamespaces[namespace] = true
}
return config
}
func (config configOptions) dump() {
if config.MongoURL != "" {
config.MongoURL = cleanMongoURL(config.MongoURL)
}
if config.MongoConfigURL != "" {
config.MongoConfigURL = cleanMongoURL(config.MongoConfigURL)
}
if config.ElasticUser != "" {
config.ElasticUser = redact
}
if config.ElasticPassword != "" {
config.ElasticPassword = redact
}
if config.AWSConnect.AccessKey != "" {
config.AWSConnect.AccessKey = redact
}
if config.AWSConnect.SecretKey != "" {
config.AWSConnect.SecretKey = redact
}
if config.AWSConnect.Region != "" {
config.AWSConnect.Region = redact
}
json, err := json.MarshalIndent(config, "", " ")
if err != nil {
errorLog.Printf("Unable to print configuration: %s", err)
} else {
infoLog.Println(string(json))
}
}
func (config *configOptions) validate() {
if config.DisableChangeEvents && len(config.DirectReadNs) == 0 {
errorLog.Fatalln("Direct read namespaces must be specified if change events are disabled")
}
if config.AWSConnect.enabled() {
if err := config.AWSConnect.validate(); err != nil {
errorLog.Fatalln(err)
}
}
if len(config.DirectReadNs) > 0 {
if config.ElasticMaxSeconds < 5 {
warnLog.Println("Direct read performance degrades with small values for elasticsearch-max-seconds. Set to 5s or greater to remove this warning.")
}
if config.ElasticMaxDocs > 0 {
warnLog.Println("For performance reasons it is recommended to use elasticsearch-max-bytes instead of elasticsearch-max-docs since doc size may vary")
}
}
if config.StatsDuration != "" {
_, err := time.ParseDuration(config.StatsDuration)
if err != nil {
errorLog.Fatalf("Unable to parse stats duration: %s", err)
}
}
}
func (config *configOptions) setDefaults() *configOptions {
if !config.EnableOplog && len(config.ChangeStreamNs) == 0 {
config.ChangeStreamNs = []string{""}
}
if config.DisableChangeEvents {
config.ChangeStreamNs = []string{}
config.EnableOplog = false
}
if config.MongoURL == "" {
config.MongoURL = mongoURLDefault
}
if config.ClusterName != "" {
if config.Worker != "" {
config.ResumeName = fmt.Sprintf("%s:%s", config.ClusterName, config.Worker)
} else {
config.ResumeName = config.ClusterName
}
config.Resume = true
} else if config.Worker != "" {
config.ResumeName = config.Worker
} else if config.ResumeName == "" {
config.ResumeName = resumeNameDefault
}
if config.ElasticMaxConns == 0 {
config.ElasticMaxConns = elasticMaxConnsDefault
}
if config.ElasticClientTimeout == 0 {
config.ElasticClientTimeout = elasticClientTimeoutDefault
}
if config.MergePatchAttr == "" {
config.MergePatchAttr = "json-merge-patches"
}
if config.ElasticMaxSeconds == 0 {
if len(config.DirectReadNs) > 0 {
config.ElasticMaxSeconds = 5
} else {
config.ElasticMaxSeconds = 1
}
}
if config.ElasticMaxDocs == 0 {
config.ElasticMaxDocs = elasticMaxDocsDefault
}
if config.ElasticMaxBytes == 0 {
config.ElasticMaxBytes = elasticMaxBytesDefault
}
if config.ElasticHealth0 == 0 {
config.ElasticHealth0 = 15
}
if config.ElasticHealth1 == 0 {
config.ElasticHealth1 = 5
}
if config.HTTPServerAddr == "" {
config.HTTPServerAddr = ":8080"
}
if config.StatsIndexFormat == "" {
config.StatsIndexFormat = "monstache.stats.2006-01-02"
}
if config.TimeMachineIndexPrefix == "" {
config.TimeMachineIndexPrefix = "log"
}
if config.TimeMachineIndexSuffix == "" {
config.TimeMachineIndexSuffix = "2006-01-02"
}
if config.DeleteIndexPattern == "" {
config.DeleteIndexPattern = "*"
}
if config.FileDownloaders == 0 && config.IndexFiles {
config.FileDownloaders = fileDownloadersDefault
}
if config.RelateThreads == 0 {
config.RelateThreads = relateThreadsDefault
}
if config.RelateBuffer == 0 {
config.RelateBuffer = relateBufferDefault
}
if config.PostProcessors == 0 && processPlugin != nil {
config.PostProcessors = postProcessorsDefault
}
if config.OplogTsFieldName == "" {
config.OplogTsFieldName = "oplog_ts"
}
if config.OplogDateFieldName == "" {
config.OplogDateFieldName = "oplog_date"
}
if config.OplogDateFieldFormat == "" {
config.OplogDateFieldFormat = "2006/01/02 15:04:05"
}
if config.ConfigDatabaseName == "" {
config.ConfigDatabaseName = configDatabaseNameDefault
}
if config.ResumeFromTimestamp > 0 {
if config.ResumeFromTimestamp <= math.MaxInt32 {
config.ResumeFromTimestamp = config.ResumeFromTimestamp << 32
}
}
return config
}
func cleanMongoURL(URL string) string {
const (
scheme = "mongodb://"
schemeSrv = "mongodb+srv://"
)
url := URL
hasScheme := strings.HasPrefix(url, scheme)
hasSchemeSrv := strings.HasPrefix(url, schemeSrv)
url = strings.TrimPrefix(url, scheme)
url = strings.TrimPrefix(url, schemeSrv)
userEnd := strings.IndexAny(url, "@")
if userEnd != -1 {
url = redact + "@" + url[userEnd+1:]
}
if hasScheme {
url = scheme + url
} else if hasSchemeSrv {
url = schemeSrv + url
}
return url
}
func (config *configOptions) dialMongo(URL string) (*mongo.Client, error) {
var clientOptions *options.ClientOptions
if config.mongoClientOptions == nil {
// use the initial URL to create most of the client options
// save the client options for potential use later with shards
rb := bson.NewRegistryBuilder()
rb.RegisterTypeMapEntry(bsontype.DateTime, reflect.TypeOf(time.Time{}))
reg := rb.Build()
clientOptions = options.Client()
clientOptions.ApplyURI(URL)
clientOptions.SetAppName("monstache")
clientOptions.SetRegistry(reg)
config.mongoClientOptions = clientOptions
} else {
// subsequent client connections will only be for adding shards
// for shards we only have the hostname and replica set
// apply the hostname to the previously saved client options
clientOptions = config.mongoClientOptions
clientOptions.ApplyURI(URL)
}
client, err := mongo.NewClient(clientOptions)
if err != nil {
return nil, err
}
err = client.Connect(context.Background())
if err != nil {
return nil, err
}
err = client.Ping(context.Background(), nil)
if err != nil {
return nil, err
}
return client, nil
}
func (config *configOptions) NewHTTPClient() (client *http.Client, err error) {
tlsConfig := &tls.Config{}
if config.ElasticPemFile != "" {
var ca []byte
certs := x509.NewCertPool()
if ca, err = ioutil.ReadFile(config.ElasticPemFile); err == nil {
if ok := certs.AppendCertsFromPEM(ca); !ok {
errorLog.Printf("No certs parsed successfully from %s", config.ElasticPemFile)
}
tlsConfig.RootCAs = certs
} else {
return client, err
}
}
if config.ElasticValidatePemFile == false {
// Turn off validation
tlsConfig.InsecureSkipVerify = true
}
transport := &http.Transport{
DisableCompression: !config.Gzip,
TLSHandshakeTimeout: time.Duration(30) * time.Second,
TLSClientConfig: tlsConfig,
}
client = &http.Client{
Timeout: time.Duration(config.ElasticClientTimeout) * time.Second,
Transport: transport,
}
if config.AWSConnect.enabled() {
client = aws.NewV4SigningClientWithHTTPClient(credentials.NewStaticCredentials(
config.AWSConnect.AccessKey,
config.AWSConnect.SecretKey,
"",
), config.AWSConnect.Region, client)
}
return client, err
}
func (ic *indexClient) doDrop(op *gtm.Op) (err error) {
if db, drop := op.IsDropDatabase(); drop {
if ic.config.DroppedDatabases {
if err = ic.deleteIndexes(db); err == nil {
if e := ic.dropDBMeta(db); e != nil {
errorLog.Printf("Unable to delete metadata for db: %s", e)
}
}
}
} else if col, drop := op.IsDropCollection(); drop {
if ic.config.DroppedCollections {
if err = ic.deleteIndex(op.GetDatabase() + "." + col); err == nil {
if e := ic.dropCollectionMeta(op.GetDatabase() + "." + col); e != nil {
errorLog.Printf("Unable to delete metadata for collection: %s", e)
}
}
}
}
return
}
func (ic *indexClient) hasFileContent(op *gtm.Op) (ingest bool) {
if !ic.config.IndexFiles {
return
}
return fileNamespaces[op.Namespace]
}
func (ic *indexClient) addPatch(op *gtm.Op, objectID string,
indexType *indexMapping, meta *indexingMeta) (err error) {
var merges []interface{}
var toJSON []byte
if op.IsSourceDirect() {
return nil
}
if op.Timestamp.T == 0 {
return nil
}
client, config := ic.client, ic.config
if op.IsUpdate() {
ctx := context.Background()
service := client.Get()
service.Id(objectID)
service.Index(indexType.Index)
if meta.ID != "" {
service.Id(meta.ID)
}
if meta.Index != "" {
service.Index(meta.Index)
}
if meta.Routing != "" {
service.Routing(meta.Routing)
}
if meta.Parent != "" {
service.Parent(meta.Parent)
}
var resp *elastic.GetResult
if resp, err = service.Do(ctx); err == nil {
if resp.Found {
var src map[string]interface{}
if err = json.Unmarshal(resp.Source, &src); err == nil {
if val, ok := src[config.MergePatchAttr]; ok {
merges = val.([]interface{})
for _, m := range merges {
entry := m.(map[string]interface{})
entry["ts"] = int(entry["ts"].(float64))
entry["v"] = int(entry["v"].(float64))
}
}
delete(src, config.MergePatchAttr)
var fromJSON, mergeDoc []byte
if fromJSON, err = json.Marshal(src); err == nil {
if toJSON, err = json.Marshal(op.Data); err == nil {
if mergeDoc, err = jsonpatch.CreateMergePatch(fromJSON, toJSON); err == nil {
merge := make(map[string]interface{})
merge["ts"] = op.Timestamp.T
merge["p"] = string(mergeDoc)
merge["v"] = len(merges) + 1
merges = append(merges, merge)
op.Data[config.MergePatchAttr] = merges
}
}
}
}
} else {
err = errors.New("Last document revision not found")
}
}
} else {
if _, found := op.Data[config.MergePatchAttr]; !found {
if toJSON, err = json.Marshal(op.Data); err == nil {
merge := make(map[string]interface{})
merge["v"] = 1
merge["ts"] = op.Timestamp.T
merge["p"] = string(toJSON)
merges = append(merges, merge)
op.Data[config.MergePatchAttr] = merges
}
}
}
return
}
func (ic *indexClient) doIndexing(op *gtm.Op) (err error) {
meta := parseIndexMeta(op)
if meta.Skip {
return
}
ic.prepareDataForIndexing(op)
objectID, indexType := opIDToString(op), ic.mapIndex(op)
if ic.config.EnablePatches {
if patchNamespaces[op.Namespace] {
if e := ic.addPatch(op, objectID, indexType, meta); e != nil {
errorLog.Printf("Unable to save json-patch info: %s", e)
}
}
}
ingestAttachment := false
if ic.hasFileContent(op) {
ingestAttachment = op.Data["file"] != nil
}
if ic.config.IndexAsUpdate && meta.Pipeline == "" && ingestAttachment == false {
req := elastic.NewBulkUpdateRequest()
req.UseEasyJSON(ic.config.EnableEasyJSON)
req.Id(objectID)
req.Index(indexType.Index)
req.Doc(op.Data)
req.DocAsUpsert(true)
if meta.ID != "" {
req.Id(meta.ID)
}
if meta.Index != "" {
req.Index(meta.Index)
}
if meta.Type != "" {
}
if meta.Routing != "" {
req.Routing(meta.Routing)
}
if meta.Parent != "" {
req.Parent(meta.Parent)
}
if meta.RetryOnConflict != 0 {
req.RetryOnConflict(meta.RetryOnConflict)
}
if _, err = req.Source(); err == nil {
ic.bulk.Add(req)
}
} else {
req := elastic.NewBulkIndexRequest()
req.UseEasyJSON(ic.config.EnableEasyJSON)
req.Id(objectID)
req.Index(indexType.Index)
req.Doc(op.Data)
if meta.ID != "" {
req.Id(meta.ID)
}
if meta.Index != "" {
req.Index(meta.Index)
}
if meta.Routing != "" {
req.Routing(meta.Routing)
}
if meta.Parent != "" {
req.Parent(meta.Parent)
}
if meta.Version != 0 {
req.Version(meta.Version)
}
if meta.VersionType != "" {
req.VersionType(meta.VersionType)
}
if meta.Pipeline != "" {
req.Pipeline(meta.Pipeline)
}
if meta.RetryOnConflict != 0 {
req.RetryOnConflict(meta.RetryOnConflict)
}
if ingestAttachment {
req.Pipeline("attachment")
}
if _, err = req.Source(); err == nil {
ic.bulk.Add(req)
}
}
if meta.shouldSave(ic.config) {
if e := ic.setIndexMeta(op.Namespace, objectID, meta); e != nil {
errorLog.Printf("Unable to save routing info: %s", e)
}
}
if tmNamespaces[op.Namespace] {
if op.IsSourceOplog() || ic.config.TimeMachineDirectReads {
t := time.Now().UTC()
tmIndex := func(idx string) string {
pre, suf := ic.config.TimeMachineIndexPrefix, ic.config.TimeMachineIndexSuffix
tmFormat := strings.Join([]string{pre, idx, suf}, ".")
return strings.ToLower(t.Format(tmFormat))
}
data := make(map[string]interface{})
for k, v := range op.Data {
data[k] = v
}
data["_source_id"] = objectID
if ic.config.IndexOplogTime == false {
secs := int64(op.Timestamp.T)
t := time.Unix(secs, 0).UTC()
data[ic.config.OplogTsFieldName] = op.Timestamp
data[ic.config.OplogDateFieldName] = t.Format(ic.config.OplogDateFieldFormat)
}
req := elastic.NewBulkIndexRequest()
req.UseEasyJSON(ic.config.EnableEasyJSON)
req.Index(tmIndex(indexType.Index))
req.Routing(objectID)
req.Doc(data)
if meta.Index != "" {
req.Index(tmIndex(meta.Index))
}
if meta.Pipeline != "" {
req.Pipeline(meta.Pipeline)
}
if ingestAttachment {
req.Pipeline("attachment")
}
if _, err = req.Source(); err == nil {
ic.bulk.Add(req)
}
}
}
return
}
func (ic *indexClient) doIndex(op *gtm.Op) (err error) {
if err = ic.mapData(op); err == nil {
if op.Data != nil {
err = ic.doIndexing(op)
} else if op.IsUpdate() {
ic.doDelete(op)
}
}
return
}
func (ic *indexClient) runProcessor(op *gtm.Op) (err error) {
input := &monstachemap.ProcessPluginInput{
ElasticClient: ic.client,
ElasticBulkProcessor: ic.bulk,
Timestamp: op.Timestamp,
}
input.Document = op.Data
if op.IsDelete() {
input.Document = map[string]interface{}{
"_id": op.Id,
}
}
input.Namespace = op.Namespace
input.Database = op.GetDatabase()
input.Collection = op.GetCollection()
input.Operation = op.Operation
input.MongoClient = ic.mongo
input.UpdateDescription = op.UpdateDescription
err = processPlugin(input)
return
}
func (ic *indexClient) routeProcess(op *gtm.Op) (err error) {
rop := >m.Op{
Id: op.Id,
Operation: op.Operation,
Namespace: op.Namespace,
Source: op.Source,
Timestamp: op.Timestamp,
UpdateDescription: op.UpdateDescription,
}
if op.Data != nil {
var data []byte
data, err = bson.Marshal(op.Data)
if err == nil {
var m map[string]interface{}
err = bson.Unmarshal(data, &m)
if err == nil {
rop.Data = m
}
}
}
ic.processC <- rop
return
}
func (ic *indexClient) routeDrop(op *gtm.Op) (err error) {
ic.bulk.Flush()
err = ic.doDrop(op)
return
}
func (ic *indexClient) routeDeleteRelate(op *gtm.Op) (err error) {
if rs := relates[op.Namespace]; len(rs) != 0 {
var delData map[string]interface{}
useFind := false
for _, r := range rs {
if r.SrcField != "_id" {
useFind = true
break
}
}
if useFind {
delData = ic.findDeletedSrcDoc(op)
} else {
delData = map[string]interface{}{
"_id": op.Id,
}
}
if delData != nil {
rop := >m.Op{
Id: op.Id,
Operation: op.Operation,
Namespace: op.Namespace,
Source: op.Source,
Timestamp: op.Timestamp,
Data: delData,
}
select {
case ic.relateC <- rop:
default:
errorLog.Printf(relateQueueOverloadMsg, rop.Namespace, rop.Id)
}
}
}
return
}
func (ic *indexClient) routeDelete(op *gtm.Op) (err error) {
if len(ic.config.Relate) > 0 {
err = ic.routeDeleteRelate(op)
}
ic.doDelete(op)
return
}
func (ic *indexClient) routeDataRelate(op *gtm.Op) (skip bool, err error) {
rs := relates[op.Namespace]
if len(rs) == 0 {
return
}
skip = true
for _, r := range rs {
if r.KeepSrc {
skip = false
break
}
}
if skip {
select {
case ic.relateC <- op:
default:
errorLog.Printf(relateQueueOverloadMsg, op.Namespace, op.Id)
}
} else {
rop := >m.Op{
Id: op.Id,
Operation: op.Operation,
Namespace: op.Namespace,
Source: op.Source,
Timestamp: op.Timestamp,
UpdateDescription: op.UpdateDescription,
}
var data []byte
data, err = bson.Marshal(op.Data)
if err == nil {
var m map[string]interface{}
err = bson.Unmarshal(data, &m)
if err == nil {
rop.Data = m
}
}
select {
case ic.relateC <- rop:
default:
errorLog.Printf(relateQueueOverloadMsg, rop.Namespace, rop.Id)
}
}
return
}
func (ic *indexClient) routeData(op *gtm.Op) (err error) {
skip := false
if op.IsSourceOplog() && len(ic.config.Relate) > 0 {
skip, err = ic.routeDataRelate(op)
}
if !skip {
if ic.hasFileContent(op) {
ic.fileC <- op
} else {
ic.indexC <- op
}
}
return
}
func (ic *indexClient) routeOp(op *gtm.Op) (err error) {
if processPlugin != nil {
err = ic.routeProcess(op)
}
if op.IsDrop() {
err = ic.routeDrop(op)
} else if op.IsDelete() {
err = ic.routeDelete(op)
} else if op.Data != nil {
err = ic.routeData(op)
}
return
}
func (ic *indexClient) processErr(err error) {
config := ic.config
mux.Lock()
defer mux.Unlock()
exitStatus = 1
errorLog.Println(err)
if config.FailFast {
os.Exit(exitStatus)
}
}
func (ic *indexClient) doIndexStats() (err error) {
var hostname string
doc := make(map[string]interface{})
t := time.Now().UTC()
doc["Timestamp"] = t.Format("2006-01-02T15:04:05")
hostname, err = os.Hostname()
if err == nil {
doc["Host"] = hostname
}
doc["Pid"] = os.Getpid()
doc["Stats"] = ic.bulk.Stats()
index := strings.ToLower(t.Format(ic.config.StatsIndexFormat))
req := elastic.NewBulkIndexRequest().Index(index)
req.UseEasyJSON(ic.config.EnableEasyJSON)
req.Doc(doc)
ic.bulkStats.Add(req)
return
}
func (ic *indexClient) dropDBMeta(db string) (err error) {
if ic.config.DeleteStrategy == statefulDeleteStrategy {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("meta")
q := bson.M{"db": db}
_, err = col.DeleteMany(context.Background(), q)
}
return
}
func (ic *indexClient) dropCollectionMeta(namespace string) (err error) {
if ic.config.DeleteStrategy == statefulDeleteStrategy {
col := ic.mongo.Database(ic.config.ConfigDatabaseName).Collection("meta")
q := bson.M{"namespace": namespace}
_, err = col.DeleteMany(context.Background(), q)
}
return
}
func (meta *indexingMeta) load(metaAttrs map[string]interface{}) {
var v interface{}
var ok bool
var s string
if _, ok = metaAttrs["skip"]; ok {
meta.Skip = true
}
if v, ok = metaAttrs["routing"]; ok {
meta.Routing = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["index"]; ok {
meta.Index = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["id"]; ok {
op := >m.Op{
Id: v,
}
meta.ID = opIDToString(op)
}
if v, ok = metaAttrs["type"]; ok {
meta.Type = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["parent"]; ok {
meta.Parent = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["version"]; ok {
s = fmt.Sprintf("%v", v)
if version, err := strconv.ParseInt(s, 10, 64); err == nil {
meta.Version = version
}
}
if v, ok = metaAttrs["versionType"]; ok {
meta.VersionType = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["pipeline"]; ok {
meta.Pipeline = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["retryOnConflict"]; ok {
s = fmt.Sprintf("%v", v)
if roc, err := strconv.Atoi(s); err == nil {
meta.RetryOnConflict = roc
}
}
}
func (meta *indexingMeta) shouldSave(config *configOptions) bool {
if config.DeleteStrategy == statefulDeleteStrategy {
return (meta.Routing != "" ||
meta.Index != "" ||
meta.Type != "" ||
meta.Parent != "" ||
meta.Pipeline != "")
}
return false
}
func (ic *indexClient) setIndexMeta(namespace, id string, meta *indexingMeta) error {
config := ic.config
col := ic.mongo.Database(config.ConfigDatabaseName).Collection("meta")
metaID := fmt.Sprintf("%s.%s", namespace, id)
doc := map[string]interface{}{
"id": meta.ID,
"routing": meta.Routing,
"index": meta.Index,
"type": meta.Type,
"parent": meta.Parent,
"pipeline": meta.Pipeline,
"db": strings.SplitN(namespace, ".", 2)[0],
"namespace": namespace,
}
opts := options.Update()
opts.SetUpsert(true)
_, err := col.UpdateOne(context.Background(), bson.M{
"_id": metaID,
}, bson.M{
"$set": doc,
}, opts)
return err
}
func (ic *indexClient) getIndexMeta(namespace, id string) (meta *indexingMeta) {
meta = &indexingMeta{}
config := ic.config
col := ic.mongo.Database(config.ConfigDatabaseName).Collection("meta")
metaID := fmt.Sprintf("%s.%s", namespace, id)
result := col.FindOne(context.Background(), bson.M{
"_id": metaID,
})
if err := result.Err(); err == nil {
doc := make(map[string]interface{})
if err = result.Decode(&doc); err == nil {
if doc["id"] != nil {
meta.ID = doc["id"].(string)
}
if doc["routing"] != nil {
meta.Routing = doc["routing"].(string)
}
if doc["index"] != nil {
meta.Index = strings.ToLower(doc["index"].(string))
}
if doc["type"] != nil {
meta.Type = doc["type"].(string)
}
if doc["parent"] != nil {
meta.Parent = doc["parent"].(string)
}
if doc["pipeline"] != nil {
meta.Pipeline = doc["pipeline"].(string)
}
col.DeleteOne(context.Background(), bson.M{"_id": metaID})
}
}
return
}
func loadBuiltinFunctions(client *mongo.Client, config *configOptions) {
for ns, env := range mapEnvs {
var fa *findConf
fa = &findConf{
client: client,
name: "findId",
vm: env.VM,
ns: ns,
byID: true,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
errorLog.Fatalln(err)
}
fa = &findConf{
client: client,
name: "findOne",
vm: env.VM,
ns: ns,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
errorLog.Fatalln(err)
}
fa = &findConf{
client: client,
name: "find",
vm: env.VM,
ns: ns,
multi: true,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
errorLog.Fatalln(err)
}
fa = &findConf{
client: client,
name: "pipe",
vm: env.VM,
ns: ns,
multi: true,
pipe: true,
pipeAllowDisk: config.PipeAllowDisk,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
errorLog.Fatalln(err)
}
}
}
func (fc *findCall) setDatabase(topts map[string]interface{}) (err error) {
if ov, ok := topts["database"]; ok {
if ovs, ok := ov.(string); ok {
fc.db = ovs
} else {
err = errors.New("Invalid database option value")
}
}
return
}
func (fc *findCall) setCollection(topts map[string]interface{}) (err error) {
if ov, ok := topts["collection"]; ok {
if ovs, ok := ov.(string); ok {
fc.col = ovs
} else {
err = errors.New("Invalid collection option value")
}
}
return
}
func (fc *findCall) setSelect(topts map[string]interface{}) (err error) {
if ov, ok := topts["select"]; ok {
if ovsel, ok := ov.(map[string]interface{}); ok {
for k, v := range ovsel {
if vi, ok := v.(int64); ok {
fc.sel[k] = int(vi)
}
}
} else {
err = errors.New("Invalid select option value")
}
}
return
}
func (fc *findCall) setSort(topts map[string]interface{}) (err error) {
if ov, ok := topts["sort"]; ok {
if ovsort, ok := ov.(map[string]interface{}); ok {
for k, v := range ovsort {
if vi, ok := v.(int64); ok {
fc.sort[k] = int(vi)
}
}
} else {
err = errors.New("Invalid sort option value")
}
fc.setSort(map[string]interface{}{"joe": "rick"})
}
return
}
func (fc *findCall) setLimit(topts map[string]interface{}) (err error) {
if ov, ok := topts["limit"]; ok {
if ovl, ok := ov.(int64); ok {
fc.limit = int(ovl)
} else {
err = errors.New("Invalid limit option value")
}
}
return
}
func (fc *findCall) setQuery(v otto.Value) (err error) {
var q interface{}
if q, err = v.Export(); err == nil {
fc.query = fc.restoreIds(deepExportValue(q))
}
return
}
func (fc *findCall) setOptions(v otto.Value) (err error) {
var opts interface{}
if opts, err = v.Export(); err == nil {
switch topts := opts.(type) {
case map[string]interface{}:
if err = fc.setDatabase(topts); err != nil {
return
}
if err = fc.setCollection(topts); err != nil {
return
}
if err = fc.setSelect(topts); err != nil {
return
}
if fc.isMulti() {
if err = fc.setSort(topts); err != nil {
return
}
if err = fc.setLimit(topts); err != nil {
return
}
}
default:
err = errors.New("Invalid options argument")
return
}
} else {
err = errors.New("Invalid options argument")
}
return
}
func (fc *findCall) setDefaults() {
if fc.config.ns != "" {
ns := strings.SplitN(fc.config.ns, ".", 2)
fc.db = ns[0]
fc.col = ns[1]
}
}
func (fc *findCall) getCollection() *mongo.Collection {
return fc.client.Database(fc.db).Collection(fc.col)
}
func (fc *findCall) getVM() *otto.Otto {
return fc.config.vm
}
func (fc *findCall) getFunctionName() string {
return fc.config.name
}
func (fc *findCall) isMulti() bool {
return fc.config.multi
}
func (fc *findCall) isPipe() bool {
return fc.config.pipe
}
func (fc *findCall) pipeAllowDisk() bool {
return fc.config.pipeAllowDisk
}
func (fc *findCall) logError(err error) {
errorLog.Printf("Error in function %s: %s\n", fc.getFunctionName(), err)
}
func (fc *findCall) restoreIds(v interface{}) (r interface{}) {
switch vt := v.(type) {
case string:
if oi, err := primitive.ObjectIDFromHex(vt); err == nil {
r = oi
} else {
r = v
}
case []map[string]interface{}:
var avs []interface{}
for _, av := range vt {
mvs := make(map[string]interface{})
for k, v := range av {
mvs[k] = fc.restoreIds(v)
}
avs = append(avs, mvs)
}
r = avs
case []interface{}:
var avs []interface{}
for _, av := range vt {
avs = append(avs, fc.restoreIds(av))
}
r = avs
case map[string]interface{}:
mvs := make(map[string]interface{})
for k, v := range vt {
mvs[k] = fc.restoreIds(v)
}
r = mvs
default:
r = v
}
return
}
func (fc *findCall) execute() (r otto.Value, err error) {
var cursor *mongo.Cursor
col := fc.getCollection()
query := fc.query
if fc.isMulti() {
if fc.isPipe() {
ao := options.Aggregate()
ao.SetAllowDiskUse(fc.pipeAllowDisk())
cursor, err = col.Aggregate(context.Background(), query, ao)
if err != nil {
return
}
} else {
fo := options.Find()
if fc.limit > 0 {
fo.SetLimit(int64(fc.limit))
}
if len(fc.sort) > 0 {
fo.SetSort(fc.sort)
}
if len(fc.sel) > 0 {
fo.SetProjection(fc.sel)
}
cursor, err = col.Find(context.Background(), query, fo)
if err != nil {
return
}
}
var rdocs []map[string]interface{}
for cursor.Next(context.Background()) {
doc := make(map[string]interface{})
if err = cursor.Decode(&doc); err != nil {
return
}
rdocs = append(rdocs, convertMapJavascript(doc))
}
r, err = fc.getVM().ToValue(rdocs)
} else {
fo := options.FindOne()
if fc.config.byID {
query = bson.M{"_id": query}
}
if len(fc.sel) > 0 {
fo.SetProjection(fc.sel)
}
result := col.FindOne(context.Background(), query, fo)
if err = result.Err(); err == nil {
doc := make(map[string]interface{})
if err = result.Decode(&doc); err == nil {
rdoc := convertMapJavascript(doc)
r, err = fc.getVM().ToValue(rdoc)
}
}
}
return
}
func makeFind(fa *findConf) func(otto.FunctionCall) otto.Value {
return func(call otto.FunctionCall) (r otto.Value) {
var err error
fc := &findCall{
config: fa,
client: fa.client,
sort: make(map[string]int),
sel: make(map[string]int),
}
fc.setDefaults()
args := call.ArgumentList
argLen := len(args)
r = otto.NullValue()
if argLen >= 1 {
if argLen >= 2 {
if err = fc.setOptions(call.Argument(1)); err != nil {
fc.logError(err)
return
}
}
if fc.db == "" || fc.col == "" {
fc.logError(errors.New("Find call must specify db and collection"))
return
}
if err = fc.setQuery(call.Argument(0)); err == nil {
var result otto.Value
if result, err = fc.execute(); err == nil {
r = result
} else {
fc.logError(err)
}
} else {
fc.logError(err)
}
} else {
fc.logError(errors.New("At least one argument is required"))
}
return
}
}
func (ic *indexClient) findDeletedSrcDoc(op *gtm.Op) map[string]interface{} {
objectID := opIDToString(op)
termQuery := elastic.NewTermQuery("_id", objectID)
search := ic.client.Search()
search.Size(1)
search.Index(ic.config.DeleteIndexPattern)
search.Query(termQuery)
searchResult, err := search.Do(context.Background())
if err != nil {
errorLog.Printf("Unable to find deleted document %s: %s", objectID, err)
return nil
}
if searchResult.Hits == nil {
errorLog.Printf("Unable to find deleted document %s", objectID)
return nil
}
if searchResult.TotalHits() == 0 {
errorLog.Printf("Found no hits for deleted document %s", objectID)
return nil
}
if searchResult.TotalHits() > 1 {
errorLog.Printf("Found multiple hits for deleted document %s", objectID)
return nil
}
hit := searchResult.Hits.Hits[0]
if hit.Source == nil {
errorLog.Printf("Source unavailable for deleted document %s", objectID)
return nil
}
var src map[string]interface{}
if err = json.Unmarshal(hit.Source, &src); err == nil {
src["_id"] = op.Id
return src
}
errorLog.Printf("Unable to unmarshal deleted document %s: %s", objectID, err)
return nil
}
func tsVersion(ts primitive.Timestamp) int64 {
t, i := int64(ts.T), int64(ts.I)
version := (t << 32) | i
return version
}
func (ic *indexClient) doDelete(op *gtm.Op) {
req := elastic.NewBulkDeleteRequest()
req.UseEasyJSON(ic.config.EnableEasyJSON)
if ic.config.DeleteStrategy == ignoreDeleteStrategy {
return
}
objectID, indexType, meta := opIDToString(op), ic.mapIndex(op), &indexingMeta{}
req.Id(objectID)
if ic.config.IndexAsUpdate == false {
req.Version(tsVersion(op.Timestamp))
req.VersionType("external")
}
if ic.config.DeleteStrategy == statefulDeleteStrategy {
if routingNamespaces[""] || routingNamespaces[op.Namespace] {
meta = ic.getIndexMeta(op.Namespace, objectID)
}
req.Index(indexType.Index)
if meta.Index != "" {
req.Index(meta.Index)
}
if meta.Routing != "" {
req.Routing(meta.Routing)
}
if meta.Parent != "" {
req.Parent(meta.Parent)
}
} else if ic.config.DeleteStrategy == statelessDeleteStrategy {
if routingNamespaces[""] || routingNamespaces[op.Namespace] {
termQuery := elastic.NewTermQuery("_id", objectID)
search := ic.client.Search()
search.FetchSource(false)
search.Size(1)
search.Index(ic.config.DeleteIndexPattern)
search.Query(termQuery)
searchResult, err := search.Do(context.Background())
if err != nil {
errorLog.Printf("Unable to delete document %s: %s",
objectID, err)
return
}
if searchResult.Hits != nil && searchResult.TotalHits() == 1 {
hit := searchResult.Hits.Hits[0]
req.Index(hit.Index)
if hit.Routing != "" {
req.Routing(hit.Routing)
}
if hit.Parent != "" {
req.Parent(hit.Parent)
}
} else {
errorLog.Printf("Failed to find unique document %s for deletion using index pattern %s",
objectID, ic.config.DeleteIndexPattern)
return
}
} else {
req.Index(indexType.Index)
}
} else {
return
}
ic.bulk.Add(req)
return
}
func logRotateDefaults() logRotate {
return logRotate{
MaxSize: 500, //megabytes
MaxAge: 28, // days
MaxBackups: 5,
LocalTime: false,
Compress: false,
}
}
func gtmDefaultSettings() gtmSettings {
return gtmSettings{
ChannelSize: gtmChannelSizeDefault,
BufferSize: 32,
BufferDuration: "75ms",
MaxAwaitTime: "",
}
}
func (ic *indexClient) notifySdFailed(err error) {
if err != nil {
errorLog.Printf("Systemd notification failed: %s", err)
} else {
if ic.config.Verbose {
warnLog.Println("Systemd notification not supported (i.e. NOTIFY_SOCKET is unset)")
}
}
}
func (ic *indexClient) watchdogSdFailed(err error) {
if err != nil {
errorLog.Printf("Error determining systemd WATCHDOG interval: %s", err)
} else {
if ic.config.Verbose {
warnLog.Println("Systemd WATCHDOG not enabled")
}
}
}
func (ctx *httpServerCtx) serveHTTP() {
s := ctx.httpServer
if ctx.config.Verbose {
infoLog.Printf("Starting http server at %s", s.Addr)
}
ctx.started = time.Now()
err := s.ListenAndServe()
if !ctx.shutdown {
errorLog.Fatalf("Unable to serve http at address %s: %s", s.Addr, err)
}
}
func (ctx *httpServerCtx) buildServer() {
mux := http.NewServeMux()
mux.HandleFunc("/started", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
data := (time.Now().Sub(ctx.started)).String()
w.Write([]byte(data))
})
mux.HandleFunc("/healthz", func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(200)
w.Write([]byte("ok"))
})
if ctx.config.Stats {
mux.HandleFunc("/stats", func(w http.ResponseWriter, req *http.Request) {
stats, err := json.MarshalIndent(ctx.bulk.Stats(), "", " ")
if err == nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
w.Write(stats)
fmt.Fprintln(w)
} else {
w.WriteHeader(500)
fmt.Fprintf(w, "Unable to print statistics: %s", err)
}
})
}
mux.HandleFunc("/instance", func(w http.ResponseWriter, req *http.Request) {
hostname, err := os.Hostname()
if err != nil {
w.WriteHeader(500)
fmt.Fprintf(w, "Unable to get hostname for instance info: %s", err)
return
}
status := instanceStatus{
Pid: os.Getpid(),
Hostname: hostname,
ResumeName: ctx.config.ResumeName,
ClusterName: ctx.config.ClusterName,
}
respC := make(chan *statusResponse)
statusReq := &statusRequest{
responseC: respC,
}
timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
case ctx.statusReqC <- statusReq:
srsp := <-respC
if srsp != nil {
status.Enabled = srsp.enabled
status.LastTs = srsp.lastTs
if srsp.lastTs.T != 0 {
status.LastTsFormat = time.Unix(int64(srsp.lastTs.T), 0).Format("2006-01-02T15:04:05")
}
}
data, err := json.Marshal(status)
if err != nil {
w.WriteHeader(500)
fmt.Fprintf(w, "Unable to print instance info: %s", err)
break
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
w.Write(data)
fmt.Fprintln(w)
break
case <-timer.C:
w.WriteHeader(500)
fmt.Fprintf(w, "Timeout getting instance info")
break
}
})
if ctx.config.Pprof {
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
}
s := &http.Server{
Addr: ctx.config.HTTPServerAddr,
Handler: mux,
ErrorLog: errorLog,
}
ctx.httpServer = s
}
func (ic *indexClient) startNotify() {
go ic.notifySd()
}
func (ic *indexClient) notifySd() {
var interval time.Duration
config := ic.config
if config.Verbose {
infoLog.Println("Sending systemd READY=1")
}
sent, err := daemon.SdNotify(false, "READY=1")
if sent {
if config.Verbose {
infoLog.Println("READY=1 successfully sent to systemd")
}
} else {
ic.notifySdFailed(err)
return
}
interval, err = daemon.SdWatchdogEnabled(false)
if err != nil || interval == 0 {
ic.watchdogSdFailed(err)
return
}
for {
if config.Verbose {
infoLog.Println("Sending systemd WATCHDOG=1")
}
sent, err = daemon.SdNotify(false, "WATCHDOG=1")
if sent {
if config.Verbose {
infoLog.Println("WATCHDOG=1 successfully sent to systemd")
}
} else {
ic.notifySdFailed(err)
return
}
time.Sleep(interval / 2)
}
}
func (config *configOptions) makeShardInsertHandler() gtm.ShardInsertHandler {
return func(shardInfo *gtm.ShardInfo) (*mongo.Client, error) {
shardURL := shardInfo.GetURL()
infoLog.Printf("Adding shard found at %s\n", cleanMongoURL(shardURL))
return config.dialMongo(shardURL)
}
}
func buildPipe(config *configOptions) func(string, bool) ([]interface{}, error) {
if pipePlugin != nil {
return pipePlugin
} else if len(pipeEnvs) > 0 {
return func(ns string, changeEvent bool) ([]interface{}, error) {
mux.Lock()
defer mux.Unlock()
nss := []string{"", ns}
for _, ns := range nss {
if env := pipeEnvs[ns]; env != nil {
env.lock.Lock()
defer env.lock.Unlock()
val, err := env.VM.Call("module.exports", ns, ns, changeEvent)
if err != nil {
return nil, err
}
if strings.ToLower(val.Class()) == "array" {
data, err := val.Export()
if err != nil {
return nil, err
} else if data == val {
return nil, errors.New("Exported pipeline function must return an array")
} else {
switch data.(type) {
case []map[string]interface{}:
ds := data.([]map[string]interface{})
var is []interface{} = make([]interface{}, len(ds))
for i, d := range ds {
is[i] = deepExportValue(d)
}
return is, nil
case []interface{}:
ds := data.([]interface{})
if len(ds) > 0 {
errorLog.Fatalln("Pipeline function must return an array of objects")
}
return nil, nil
default:
errorLog.Fatalln("Pipeline function must return an array of objects")
}
}
} else {
return nil, errors.New("Exported pipeline function must return an array")
}
}
}
return nil, nil
}
}
return nil
}
func (sh *sigHandler) start() {
go func() {
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)
select {
case <-sigs:
// we never got started so simply exit
os.Exit(0)
case ic := <-sh.clientStartedC:
<-sigs
go func() {
// forced shutdown on 2nd signal
<-sigs
infoLog.Println("Forcing shutdown, bye bye...")
os.Exit(1)
}()
// we started processing events so do a clean shutdown
ic.stopAllWorkers()
ic.doneC <- 10
}
}()
}
func (ic *indexClient) startHTTPServer() {
config := ic.config
if config.EnableHTTPServer {
ic.hsc = &httpServerCtx{
bulk: ic.bulk,
config: ic.config,
statusReqC: ic.statusReqC,
}
ic.hsc.buildServer()
go ic.hsc.serveHTTP()
}
}
func (ic *indexClient) setupFileIndexing() {
config := ic.config
if config.IndexFiles {
if len(config.FileNamespaces) == 0 {
errorLog.Fatalln("File indexing is ON but no file namespaces are configured")
}
if err := ic.ensureFileMapping(); err != nil {
errorLog.Fatalf("Unable to setup file indexing: %s", err)
}
}
}
func (ic *indexClient) setupBulk() {
config := ic.config
bulk, err := config.newBulkProcessor(ic.client)
if err != nil {
errorLog.Fatalf("Unable to start bulk processor: %s", err)
}
var bulkStats *elastic.BulkProcessor
if config.IndexStats {
bulkStats, err = config.newStatsBulkProcessor(ic.client)
if err != nil {
errorLog.Fatalf("Unable to start stats bulk processor: %s", err)
}
}
ic.bulk = bulk
ic.bulkStats = bulkStats
}
func (ic *indexClient) run() {
ic.startNotify()
ic.setupFileIndexing()
ic.setupBulk()
ic.startHTTPServer()
ic.startCluster()
ic.startRelate()
ic.startIndex()
ic.startDownload()
ic.startPostProcess()
ic.clusterWait()
ic.startListen()
ic.startReadWait()
ic.eventLoop()
}
func (ic *indexClient) startDownload() {
for i := 0; i < ic.config.FileDownloaders; i++ {
ic.fileWg.Add(1)
go func() {
defer ic.fileWg.Done()
for op := range ic.fileC {
if err := ic.addFileContent(op); err != nil {
ic.processErr(err)
}
ic.indexC <- op
}
}()
}
}
func (ic *indexClient) startPostProcess() {
for i := 0; i < ic.config.PostProcessors; i++ {
ic.processWg.Add(1)
go func() {
defer ic.processWg.Done()
for op := range ic.processC {
if err := ic.runProcessor(op); err != nil {
ic.processErr(err)
}
}
}()
}
}
func (ic *indexClient) stopAllWorkers() {
infoLog.Println("Stopping all workers")
ic.gtmCtx.Stop()
<-ic.opsConsumed
close(ic.relateC)
ic.relateWg.Wait()
close(ic.fileC)
ic.fileWg.Wait()
close(ic.indexC)
ic.indexWg.Wait()
close(ic.processC)
ic.processWg.Wait()
}
func (ic *indexClient) startReadWait() {
if len(ic.config.DirectReadNs) > 0 {
go func() {
ic.gtmCtx.DirectReadWg.Wait()
infoLog.Println("Direct reads completed")
if ic.config.Resume {
ic.saveTimestampFromReplStatus()
}
if ic.config.ExitAfterDirectReads {
ic.stopAllWorkers()
ic.doneC <- 30
}
}()
}
}
func (ic *indexClient) dialShards() []*mongo.Client {
var mongos []*mongo.Client
// get the list of shard servers
shardInfos := gtm.GetShards(ic.mongoConfig)
if len(shardInfos) == 0 {
errorLog.Fatalln("Shards enabled but none found in config.shards collection")
}
// add each shard server to the sync list
for _, shardInfo := range shardInfos {
shardURL := shardInfo.GetURL()
infoLog.Printf("Adding shard found at %s\n", cleanMongoURL(shardURL))
shard, err := ic.config.dialMongo(shardURL)
if err != nil {
errorLog.Fatalf("Unable to connect to mongodb shard using URL %s: %s", cleanMongoURL(shardURL), err)
}
mongos = append(mongos, shard)
}
return mongos
}
func (ic *indexClient) buildTokenGen() gtm.ResumeTokenGenenerator {
config := ic.config
var token gtm.ResumeTokenGenenerator
if !config.Resume || (config.ResumeStrategy != tokenResumeStrategy) {
return token
}
token = func(client *mongo.Client, streamID string, options *gtm.Options) (interface{}, error) {
var t interface{} = nil
var err error
col := client.Database(config.ConfigDatabaseName).Collection("tokens")
result := col.FindOne(context.Background(), bson.M{
"resumeName": config.ResumeName,
"streamID": streamID,
})
if err = result.Err(); err == nil {
doc := make(map[string]interface{})
if err = result.Decode(&doc); err == nil {
t = doc["token"]
if t != nil {
infoLog.Printf("Resuming stream '%s' from collection %s.tokens using resume name '%s'",
streamID, config.ConfigDatabaseName, config.ResumeName)
}
}
}
return t, err
}
return token
}
func (ic *indexClient) buildTimestampGen() gtm.TimestampGenerator {
var after gtm.TimestampGenerator
config := ic.config
if config.ResumeStrategy != timestampResumeStrategy {
return after
}
if config.Replay {
after = func(client *mongo.Client, options *gtm.Options) (primitive.Timestamp, error) {
ts, _ := gtm.FirstOpTimestamp(client, options)
// add ten seconds as oldest items often fall off the oplog
ts.T += 10
ts.I = 0
infoLog.Printf("Replaying from timestamp %+v", ts)
return ts, nil
}
} else if config.ResumeFromTimestamp != 0 {
after = func(client *mongo.Client, options *gtm.Options) (primitive.Timestamp, error) {
return primitive.Timestamp{
T: uint32(config.ResumeFromTimestamp >> 32),
I: uint32(config.ResumeFromTimestamp),
}, nil
}
} else if config.Resume {
after = func(client *mongo.Client, options *gtm.Options) (primitive.Timestamp, error) {
var ts primitive.Timestamp
var err error
col := client.Database(config.ConfigDatabaseName).Collection("monstache")
result := col.FindOne(context.Background(), bson.M{
"_id": config.ResumeName,
})
if err = result.Err(); err == nil {
doc := make(map[string]interface{})
if err = result.Decode(&doc); err == nil {
if doc["ts"] != nil {
ts = doc["ts"].(primitive.Timestamp)
ts.I++
}
}
}
if ts.T == 0 {
ts, _ = gtm.LastOpTimestamp(client, options)
}
infoLog.Printf("Resuming from timestamp %+v", ts)
return ts, nil
}
}
return after
}
func (ic *indexClient) buildConnections() []*mongo.Client {
var mongos []*mongo.Client
var err error
config := ic.config
if config.readShards() {
// if we have a config server URL then we are running in a sharded cluster
ic.mongoConfig, err = config.dialMongo(config.MongoConfigURL)
if err != nil {
errorLog.Fatalf("Unable to connect to mongodb config server using URL %s: %s",
cleanMongoURL(config.MongoConfigURL), err)
}
mongos = ic.dialShards()
} else {
mongos = append(mongos, ic.mongo)
}
return mongos
}
func (ic *indexClient) buildFilterChain() []gtm.OpFilter {
config := ic.config
filterChain := []gtm.OpFilter{notMonstache(config), notSystem, notChunks}
if config.readShards() {
filterChain = append(filterChain, notConfig)
}
if config.NsRegex != "" {
filterChain = append(filterChain, filterWithRegex(config.NsRegex))
}
if config.NsDropRegex != "" {
filterChain = append(filterChain, filterDropWithRegex(config.NsDropRegex))
}
if config.NsExcludeRegex != "" {
filterChain = append(filterChain, filterInverseWithRegex(config.NsExcludeRegex))
}
if config.NsDropExcludeRegex != "" {
filterChain = append(filterChain, filterDropInverseWithRegex(config.NsDropExcludeRegex))
}
return filterChain
}
func (ic *indexClient) buildFilterArray() []gtm.OpFilter {
config := ic.config
filterArray := []gtm.OpFilter{}
var pluginFilter gtm.OpFilter
if config.Worker != "" {
workerFilter, err := consistent.ConsistentHashFilter(config.Worker, config.Workers)
if err != nil {
errorLog.Fatalln(err)
}
filterArray = append(filterArray, workerFilter)
} else if config.Workers != nil {
errorLog.Fatalln("Workers configured but this worker is undefined. worker must be set to one of the workers.")
}
if filterPlugin != nil {
pluginFilter = filterWithPlugin()
filterArray = append(filterArray, pluginFilter)
} else if len(filterEnvs) > 0 {
pluginFilter = filterWithScript()
filterArray = append(filterArray, pluginFilter)
}
if pluginFilter != nil {
ic.filter = pluginFilter
}
return filterArray
}
func (ic *indexClient) buildDynamicDirectReadNs(filter gtm.OpFilter) (names []string) {
client, config := ic.mongo, ic.config
if config.DirectReadExcludeRegex != "" {
filter = gtm.ChainOpFilters(filterInverseWithRegex(config.DirectReadExcludeRegex), filter)
}
dbs, err := client.ListDatabaseNames(context.Background(), bson.M{})
if err != nil {
errorLog.Fatalf("Failed to read database names for dynamic direct reads: %s", err)
}
for _, d := range dbs {
if config.ignoreDatabaseForDirectReads(d) {
continue
}
db := client.Database(d)
cols, err := db.ListCollectionNames(context.Background(), bson.M{})
if err != nil {
errorLog.Fatalf("Failed to read db %s collection names for dynamic direct reads: %s", d, err)
return
}
for _, c := range cols {
if config.ignoreCollectionForDirectReads(c) {
continue
}
ns := strings.Join([]string{d, c}, ".")
if filter(>m.Op{Namespace: ns}) {
names = append(names, ns)
} else {
infoLog.Printf("Excluding collection [%s] for dynamic direct reads", ns)
}
}
}
if len(names) == 0 {
warnLog.Println("Dynamic direct read candidates: NONE")
} else {
infoLog.Printf("Dynamic direct read candidates: %v", names)
}
return
}
func (ic *indexClient) parseBufferDuration() time.Duration {
config := ic.config
gtmBufferDuration, err := time.ParseDuration(config.GtmSettings.BufferDuration)
if err != nil {
errorLog.Fatalf("Unable to parse gtm buffer duration %s: %s",
config.GtmSettings.BufferDuration, err)
}
return gtmBufferDuration
}
func (ic *indexClient) parseMaxAwaitTime() time.Duration {
config := ic.config
var maxAwaitTime time.Duration
if config.GtmSettings.MaxAwaitTime != "" {
var err error
maxAwaitTime, err = time.ParseDuration(config.GtmSettings.MaxAwaitTime)
if err != nil {
errorLog.Fatalf("Unable to parse gtm max await time %s: %s",
config.GtmSettings.MaxAwaitTime, err)
}
}
return maxAwaitTime
}
func (ic *indexClient) buildGtmOptions() *gtm.Options {
var nsFilter, filter, directReadFilter gtm.OpFilter
config := ic.config
filterChain := ic.buildFilterChain()
filterArray := ic.buildFilterArray()
nsFilter = gtm.ChainOpFilters(filterChain...)
filter = gtm.ChainOpFilters(filterArray...)
directReadFilter = gtm.ChainOpFilters(filterArray...)
after := ic.buildTimestampGen()
token := ic.buildTokenGen()
if config.dynamicDirectReadList() {
config.DirectReadNs = ic.buildDynamicDirectReadNs(nsFilter)
}
gtmOpts := >m.Options{
After: after,
Token: token,
Filter: filter,
NamespaceFilter: nsFilter,
OpLogDisabled: config.EnableOplog == false,
OpLogDatabaseName: config.MongoOpLogDatabaseName,
OpLogCollectionName: config.MongoOpLogCollectionName,
ChannelSize: config.GtmSettings.ChannelSize,
Ordering: gtm.AnyOrder,
WorkerCount: 10,
BufferDuration: ic.parseBufferDuration(),
BufferSize: config.GtmSettings.BufferSize,
DirectReadNs: config.DirectReadNs,
DirectReadSplitMax: int32(config.DirectReadSplitMax),
DirectReadConcur: config.DirectReadConcur,
DirectReadNoTimeout: config.DirectReadNoTimeout,
DirectReadFilter: directReadFilter,
Log: infoLog,
Pipe: buildPipe(config),
ChangeStreamNs: config.ChangeStreamNs,
DirectReadBounded: config.DirectReadBounded,
MaxAwaitTime: ic.parseMaxAwaitTime(),
}
return gtmOpts
}
func (ic *indexClient) startListen() {
config := ic.config
gtmOpts := ic.buildGtmOptions()
ic.gtmCtx = gtm.StartMulti(ic.buildConnections(), gtmOpts)
if config.readShards() && !config.DisableChangeEvents {
ic.gtmCtx.AddShardListener(ic.mongoConfig, gtmOpts, config.makeShardInsertHandler())
}
}
func (ic *indexClient) clusterWait() {
if ic.config.ClusterName != "" {
if ic.enabled {
infoLog.Printf("Starting work for cluster %s", ic.config.ClusterName)
} else {
heartBeat := time.NewTicker(10 * time.Second)
defer heartBeat.Stop()
infoLog.Printf("Pausing work for cluster %s", ic.config.ClusterName)
ic.bulk.Stop()
wait := true
for wait {
select {
case req := <-ic.statusReqC:
req.responseC <- nil
case <-heartBeat.C:
var err error
ic.enabled, err = ic.enableProcess()
if err != nil {
errorLog.Printf("Error attempting to become active cluster process: %s", err)
break
}
if ic.enabled {
infoLog.Printf("Resuming work for cluster %s", ic.config.ClusterName)
ic.bulk.Start(context.Background())
wait = false
}
}
}
}
}
}
func (ic *indexClient) hasNewEvents() bool {
if ic.lastTs.T > ic.lastTsSaved.T ||
(ic.lastTs.T == ic.lastTsSaved.T && ic.lastTs.I > ic.lastTsSaved.I) {
return true
}
return false
}
func (ic *indexClient) nextTokens() {
if ic.hasNewEvents() {
ic.bulk.Flush()
if err := ic.saveTokens(); err == nil {
ic.lastTsSaved = ic.lastTs
} else {
ic.processErr(err)
}
}
}
func (ic *indexClient) nextTimestamp() {
if ic.hasNewEvents() {
ic.bulk.Flush()
if err := ic.saveTimestamp(); err == nil {
ic.lastTsSaved = ic.lastTs
} else {
ic.processErr(err)
}
}
}
func (ic *indexClient) nextStats() {
if ic.config.IndexStats {
if err := ic.doIndexStats(); err != nil {
errorLog.Printf("Error indexing statistics: %s", err)
}
} else {
stats, err := json.Marshal(ic.bulk.Stats())
if err != nil {
errorLog.Printf("Unable to log statistics: %s", err)
} else {
statsLog.Println(string(stats))
}
}
}
func (ic *indexClient) nextHeartbeat() {
var err error
if ic.enabled {
ic.enabled, err = ic.ensureEnabled()
if err != nil {
ic.processErr(err)
}
if !ic.enabled {
infoLog.Printf("Pausing work for cluster %s", ic.config.ClusterName)
ic.gtmCtx.Pause()
ic.bulk.Stop()
heartBeat := time.NewTicker(10 * time.Second)
defer heartBeat.Stop()
wait := true
for wait {
select {
case req := <-ic.statusReqC:
req.responseC <- nil
case <-heartBeat.C:
ic.enabled, err = ic.enableProcess()
if ic.enabled {
wait = false
infoLog.Printf("Resuming work for cluster %s", ic.config.ClusterName)
ic.bulk.Start(context.Background())
ic.resumeWork()
break
}
}
}
}
} else {
ic.enabled, err = ic.enableProcess()
if ic.enabled {
infoLog.Printf("Resuming work for cluster %s", ic.config.ClusterName)
ic.bulk.Start(context.Background())
ic.resumeWork()
}
}
if err != nil {
ic.processErr(err)
}
}
func (ic *indexClient) eventLoop() {
var err error
var allOpsVisited bool
timestampTicker := time.NewTicker(10 * time.Second)
if ic.config.Resume == false {
timestampTicker.Stop()
}
heartBeat := time.NewTicker(10 * time.Second)
if ic.config.ClusterName == "" {
heartBeat.Stop()
}
statsTimeout := time.Duration(30) * time.Second
if ic.config.StatsDuration != "" {
statsTimeout, _ = time.ParseDuration(ic.config.StatsDuration)
}
printStats := time.NewTicker(statsTimeout)
if ic.config.Stats == false {
printStats.Stop()
}
infoLog.Println("Listening for events")
ic.sigH.clientStartedC <- ic
for {
select {
case timeout := <-ic.doneC:
ic.enabled = false
ic.shutdown(timeout)
return
case <-timestampTicker.C:
if !ic.enabled {
break
}
if ic.config.ResumeStrategy == tokenResumeStrategy {
ic.nextTokens()
} else {
ic.nextTimestamp()
}
case <-heartBeat.C:
if ic.config.ClusterName == "" {
break
}
ic.nextHeartbeat()
case <-printStats.C:
if !ic.enabled {
break
}
ic.nextStats()
case req := <-ic.statusReqC:
enabled, lastTs := ic.enabled, ic.lastTs
statusResp := &statusResponse{
enabled: enabled,
lastTs: lastTs,
}
req.responseC <- statusResp
case err = <-ic.gtmCtx.ErrC:
if err == nil {
break
}
ic.processErr(err)
case op, open := <-ic.gtmCtx.OpC:
if !ic.enabled {
break
}
if op == nil {
if !open && !allOpsVisited {
allOpsVisited = true
ic.opsConsumed <- true
}
break
}
if op.IsSourceOplog() {
ic.lastTs = op.Timestamp
if ic.config.ResumeStrategy == tokenResumeStrategy {
ic.tokens[op.ResumeToken.StreamID] = op.ResumeToken.ResumeToken
}
}
if err = ic.routeOp(op); err != nil {
ic.processErr(err)
}
}
}
}
func (ic *indexClient) startIndex() {
for i := 0; i < 5; i++ {
ic.indexWg.Add(1)
go func() {
defer ic.indexWg.Done()
for op := range ic.indexC {
if err := ic.doIndex(op); err != nil {
ic.processErr(err)
}
}
}()
}
}
func (ic *indexClient) startRelate() {
if len(ic.config.Relate) > 0 {
for i := 0; i < ic.config.RelateThreads; i++ {
ic.relateWg.Add(1)
go func() {
defer ic.relateWg.Done()
for op := range ic.relateC {
if err := ic.processRelated(op); err != nil {
ic.processErr(err)
}
}
}()
}
}
}
func (ic *indexClient) startCluster() {
if ic.config.ClusterName != "" {
var err error
if err = ic.ensureClusterTTL(); err == nil {
infoLog.Printf("Joined cluster %s", ic.config.ClusterName)
} else {
errorLog.Fatalf("Unable to enable cluster mode: %s", err)
}
ic.enabled, err = ic.enableProcess()
if err != nil {
errorLog.Fatalf("Unable to determine enabled cluster process: %s", err)
}
}
}
func (ic *indexClient) closeClient() {
if ic.mongo != nil && ic.config.ClusterName != "" {
ic.resetClusterState()
}
if ic.hsc != nil {
ic.hsc.shutdown = true
ic.hsc.httpServer.Shutdown(context.Background())
}
if ic.bulk != nil {
ic.bulk.Close()
}
if ic.bulkStats != nil {
ic.bulkStats.Close()
}
close(ic.closeC)
}
func (ic *indexClient) shutdown(timeout int) {
infoLog.Println("Shutting down")
go ic.closeClient()
doneC := make(chan bool)
go func() {
closeT := time.NewTimer(time.Duration(timeout) * time.Second)
defer closeT.Stop()
done := false
for !done {
select {
case <-ic.closeC:
done = true
close(doneC)
case <-closeT.C:
done = true
close(doneC)
}
}
}()
<-doneC
os.Exit(exitStatus)
}
func getBuildInfo(client *mongo.Client) (bi *buildInfo, err error) {
db := client.Database("admin")
result := db.RunCommand(context.Background(), bson.M{
"buildInfo": 1,
})
if err = result.Err(); err == nil {
bi = &buildInfo{}
err = result.Decode(bi)
}
return
}
func (ic *indexClient) saveTimestampFromReplStatus() {
if rs, err := gtm.GetReplStatus(ic.mongo); err == nil {
if ic.lastTs, err = rs.GetLastCommitted(); err == nil {
if err = ic.saveTimestamp(); err != nil {
ic.processErr(err)
}
} else {
ic.processErr(err)
}
} else {
ic.processErr(err)
}
}
func mustConfig() *configOptions {
config := &configOptions{
GtmSettings: gtmDefaultSettings(),
LogRotate: logRotateDefaults(),
}
config.parseCommandLineFlags()
if config.Version {
fmt.Println(version)
os.Exit(0)
}
config.build()
if config.Print {
config.dump()
os.Exit(0)
}
config.setupLogging()
config.validate()
return config
}
func validateResumeStrategy(config *configOptions, mongoInfo *buildInfo) {
if len(mongoInfo.VersionArray) == 0 {
return
}
if config.ResumeStrategy == timestampResumeStrategy {
if config.Resume || config.Replay || config.ResumeFromTimestamp > 0 {
const requiredMajorVersion = 4
majorVersion := mongoInfo.VersionArray[0]
if majorVersion < requiredMajorVersion {
errorLog.Println(resumeStrategyInvalid)
}
}
}
}
func buildMongoClient(config *configOptions) *mongo.Client {
mongoClient, err := config.dialMongo(config.MongoURL)
if err != nil {
errorLog.Fatalf("Unable to connect to MongoDB using URL %s: %s",
cleanMongoURL(config.MongoURL), err)
}
infoLog.Printf("Started monstache version %s", version)
infoLog.Printf("MongoDB go driver %s", mongoversion.Driver)
infoLog.Printf("Elasticsearch go driver %s", elastic.Version)
if mongoInfo, err := getBuildInfo(mongoClient); err == nil {
infoLog.Printf("Successfully connected to MongoDB version %s", mongoInfo.Version)
validateResumeStrategy(config, mongoInfo)
} else {
infoLog.Println("Successfully connected to MongoDB")
}
return mongoClient
}
func buildElasticClient(config *configOptions) *elastic.Client {
elasticClient, err := config.newElasticClient()
if err != nil {
errorLog.Fatalf("Unable to create Elasticsearch client: %s", err)
}
if config.ElasticVersion == "" {
if err := config.testElasticsearchConn(elasticClient); err != nil {
errorLog.Fatalf("Unable to validate connection to Elasticsearch: %s", err)
}
} else {
if err := config.parseElasticsearchVersion(config.ElasticVersion); err != nil {
errorLog.Fatalf("Elasticsearch version must conform to major.minor.fix: %s", err)
}
}
return elasticClient
}
func main() {
config := mustConfig()
sh := &sigHandler{
clientStartedC: make(chan *indexClient),
}
sh.start()
mongoClient := buildMongoClient(config)
loadBuiltinFunctions(mongoClient, config)
elasticClient := buildElasticClient(config)
ic := &indexClient{
config: config,
mongo: mongoClient,
client: elasticClient,
fileWg: &sync.WaitGroup{},
indexWg: &sync.WaitGroup{},
processWg: &sync.WaitGroup{},
relateWg: &sync.WaitGroup{},
opsConsumed: make(chan bool),
closeC: make(chan bool),
doneC: make(chan int),
enabled: true,
indexC: make(chan *gtm.Op),
processC: make(chan *gtm.Op),
fileC: make(chan *gtm.Op),
relateC: make(chan *gtm.Op, config.RelateBuffer),
statusReqC: make(chan *statusRequest),
sigH: sh,
tokens: bson.M{},
}
ic.run()
}
|
package moldova
import (
"bytes"
"errors"
"fmt"
"math/rand"
"os"
"strconv"
"strings"
"testing"
"time"
)
type TestComparator func(string) error
type TestCase struct {
Template string
Comparator TestComparator
ParseFailure bool
WriteFailure bool
}
var GUIDCases = []TestCase{
{
Template: "{guid}",
Comparator: func(s string) error {
p := strings.Split(s, "-")
if len(p) == 5 &&
len(p[0]) == 8 &&
len(p[1]) == len(p[2]) && len(p[2]) == len(p[3]) && len(p[3]) == 4 &&
len(p[4]) == 12 {
return nil
}
return errors.New("Guid not in correct format: " + s)
},
},
{
Template: "{guid}@{guid:ordinal:0}",
Comparator: func(s string) error {
p := strings.Split(s, "@")
if p[0] == p[1] {
return nil
}
return errors.New("Guid at position 1 not equal to guid at position 0: " + p[0] + " " + p[1])
},
},
{
Template: "{guid}@{guid:ordinal:1}",
WriteFailure: true,
},
}
var NowCases = []TestCase{
{
// There is no proper deterministic way to test what the value of now is, without
// something like rubys timecop (but the go-equivalent is not viable) or relying
// on luck, which will run out if tests are run at just the wrong moment.
// Therefore, for the basic test, i'm just asserting nothing went wrong for now.
Template: "{now}",
Comparator: func(s string) error {
if len(s) > 0 {
return nil
}
return errors.New("Now not in correct format: " + s)
},
},
{
Template: "{now}@{now:ordinal:0}",
Comparator: func(s string) error {
p := strings.Split(s, "@")
if p[0] == p[1] {
return nil
}
return errors.New("Now at position 1 not equal to now at position 0: " + p[0] + " " + p[1])
},
},
{
Template: "{now}@{now:ordinal:1}",
WriteFailure: true,
},
}
var TimeCases = []TestCase{
{
Template: "{time:min:1|max:1|format:simple|zone:EST}",
Comparator: func(s string) error {
if s == "1969-12-31 19:00:01" {
return nil
}
return errors.New("Time value was not the expected value")
},
},
{
Template: "{time:min:1|max:1|format:simpletz|zone:EST}",
Comparator: func(s string) error {
if s == "1969-12-31 19:00:01 -0500" {
return nil
}
return errors.New("Time value was not the expected value")
},
},
{
Template: "{time:min:1|max:1|format:2006//01//02@@15_04_05|zone:EST}",
Comparator: func(s string) error {
if s == "1969//12//31@@19_00_01" {
return nil
}
return errors.New("Time value was not the expected value")
},
},
{
Template: "{time}@{time:ordinal:0}",
Comparator: func(s string) error {
p := strings.Split(s, "@")
if p[0] == p[1] {
return nil
}
return errors.New("Time at position 1 not equal to time at position 0: " + p[0] + " " + p[1])
},
},
{
Template: "{time}@{time:ordinal:1}",
WriteFailure: true,
},
}
var CountryCases = []TestCase{
{
Template: "{country}",
Comparator: func(s string) error {
// TODO better check here in case we ever support different types of country codes
if len(s) == 2 {
return nil
}
return errors.New("Invalid country code generated somehow")
},
},
{
Template: "{country:case:up}",
Comparator: func(s string) error {
// Since I can't know which country comes out, i'll invert the result
// If the ToLowered result is not the same as the original result, we know
// that the original was successfully output in upper case
if strings.ToLower(s) != s {
return nil
}
return errors.New("Country was returned in lowercase, but was requested in uppercase")
},
},
{
Template: "{country:case:down}",
Comparator: func(s string) error {
// Since I can't know which country comes out, i'll invert the result
// If the ToLowered result is not the same as the original result, we know
// that the original was successfully output in upper case
if strings.ToUpper(s) != s {
return nil
}
return errors.New("Country was returned in uppercase, but was requested in lowercase")
},
},
{
Template: "{country}@{country:ordinal:0}",
Comparator: func(s string) error {
p := strings.Split(s, "@")
if p[0] == p[1] {
return nil
}
return errors.New("Country at position 1 not equal to country at position 0: " + p[0] + " " + p[1])
},
},
{
Template: "{country}@{country:ordinal:1}",
WriteFailure: true,
},
}
// Placeholders
var FloatCases = []TestCase{
{
Template: "{float}",
Comparator: func(s string) error {
i, err := strconv.ParseFloat(s, 64)
if err != nil {
return err
}
if i >= 0.0 && i <= 100.0 {
return nil
}
return errors.New("Float out of range for default min/max values")
},
},
{
Template: "{float:max:5000.0|min:4999.0}",
Comparator: func(s string) error {
i, err := strconv.ParseFloat(s, 64)
if err != nil {
return err
}
if i >= 4999.0 && i <= 5000.0 {
return nil
}
return errors.New("Float out of range for custom min/max values")
},
},
{
Template: "{float:max:-5000.0|min:-5001.0}",
Comparator: func(s string) error {
i, err := strconv.ParseFloat(s, 64)
if err != nil {
return err
}
if i >= -5001.0 && i <= -5000.0 {
return nil
}
return errors.New("Float out of range for custom min/max values")
},
},
{
Template: "{float}@{float:ordinal:0}",
Comparator: func(s string) error {
p := strings.Split(s, "@")
if p[0] == p[1] {
return nil
}
return errors.New("Float at position 1 not equal to Float at position 0: " + p[0] + " " + p[1])
},
},
{
Template: "{float}@{float:ordinal:1}",
WriteFailure: true,
},
}
var IntegerCases = []TestCase{
{
Template: "{int}",
Comparator: func(s string) error {
i, err := strconv.Atoi(s)
if err != nil {
return err
}
if i >= 0 && i <= 100 {
return nil
}
return errors.New("Int out of range for default min/max values")
},
},
{
Template: "{int:max:5000|min:4999}",
Comparator: func(s string) error {
i, err := strconv.Atoi(s)
if err != nil {
return err
}
if i >= 4999 && i <= 5000 {
return nil
}
return errors.New("Int out of range for custom min/max values")
},
},
{
Template: "{int:max:-5000|min:-5001}",
Comparator: func(s string) error {
i, err := strconv.Atoi(s)
if err != nil {
return err
}
if i >= -5001 && i <= -5000 {
return nil
}
return errors.New("Int out of range for custom min/max values")
},
},
{
Template: "{int}@{int:ordinal:0}",
Comparator: func(s string) error {
p := strings.Split(s, "@")
if p[0] == p[1] {
return nil
}
return errors.New("Int at position 1 not equal to int at position 0: " + p[0] + " " + p[1])
},
},
{
Template: "{int}@{int:ordinal:1}",
WriteFailure: true,
},
}
var UnicodeCases = []TestCase{
{
Template: "{unicode}",
Comparator: func(s string) error {
if len([]rune(s)) == 2 {
return nil
}
return errors.New("Unicode string not the correct length")
},
},
{
Template: "{unicode:length:10}",
Comparator: func(s string) error {
if len([]rune(s)) == 10 {
return nil
}
return errors.New("Unicode string not the correct length")
},
},
{
Template: "{unicode:length:10|case:up}",
Comparator: func(s string) error {
if len([]rune(s)) == 10 || strings.ToLower(s) != strings.ToUpper(s) {
return nil
}
return errors.New("Unicode string not the correct length")
},
},
{
Template: "{unicode}@{unicode:ordinal:0}",
Comparator: func(s string) error {
p := strings.Split(s, "@")
if p[0] == p[1] {
return nil
}
return errors.New("Unicode at position 1 not equal to Unicode at position 0: " + p[0] + " " + p[1])
},
},
{
Template: "{unicode}@{unicode:ordinal:1}",
WriteFailure: true,
},
}
var FirstNameCases = []TestCase{
{
Template: "{firstname}",
Comparator: func(s string) error {
if len(s) > 0 {
return nil
}
return errors.New("First Name string not the correct length")
},
},
{
Template: "{firstname}@{firstname:ordinal:0}",
Comparator: func(s string) error {
p := strings.Split(s, "@")
if p[0] == p[1] {
return nil
}
return errors.New("First Name at position 1 not equal to Unicode at position 0: " + p[0] + " " + p[1])
},
},
{
Template: "{firstname}@{firstname:ordinal:1}",
WriteFailure: true,
},
}
var LastNameCases = []TestCase{
{
Template: "{lastname}",
Comparator: func(s string) error {
if len(s) > 0 {
return nil
}
return errors.New("First Name string not the correct length")
},
},
{
Template: "{lastname}@{lastname:ordinal:0}",
Comparator: func(s string) error {
p := strings.Split(s, "@")
if p[0] == p[1] {
return nil
}
return errors.New("First Name at position 1 not equal to Unicode at position 0: " + p[0] + " " + p[1])
},
},
{
Template: "{lastname}@{lastname:ordinal:1}",
WriteFailure: true,
},
}
var FullNameCases = []TestCase{
{
Template: "{firstname} {lastname}",
Comparator: func(s string) error {
if len(s) > 0 {
return nil
}
return errors.New("First / Last Name string not the correct length")
},
},
}
var InvalidTokenCases = []TestCase{
{
Template: "{firstnmae} {plastname}",
WriteFailure: true,
},
}
var AllCases = [][]TestCase{
GUIDCases,
NowCases,
TimeCases,
CountryCases,
FloatCases,
IntegerCases,
UnicodeCases,
FirstNameCases,
LastNameCases,
FullNameCases,
InvalidTokenCases,
}
// TODO Test each random function individually, under a number of inputs to make supported
// all the options behave as expected.
func TestMain(m *testing.M) {
rand.Seed(time.Now().Unix())
os.Exit(m.Run())
}
func TestAllCases(t *testing.T) {
// TODO The library should be threadsafe, I should go wide here to run all specs
// in parallel, like the natural tests would be. Channel + waitgroup to collect
// and report on errors once they all finish
for _, cs := range AllCases {
for _, c := range cs {
cs, err := BuildCallstack(c.Template)
// If we get an error and weren't expecting it
// Or, if we didn't get one but were expecting it
if err != nil && !c.ParseFailure {
t.Error(err)
} else if err == nil && c.ParseFailure {
t.Error("Expected to encounter Parse Failure, but did not for Test Case ", c.Template)
}
result := &bytes.Buffer{}
err = cs.Write(result)
// If we get an error and weren't expecting it
// Or, if we didn't get one but were expecting it
if err != nil && !c.WriteFailure {
t.Error(err)
} else if err == nil && c.ParseFailure {
t.Error("Expected to encounter Write Failure, but did not for Test Case ", c.Template)
}
if c.Comparator != nil {
if err := c.Comparator(result.String()); err != nil {
t.Error(err)
}
}
}
}
}
func TestGeneratedStringLength(t *testing.T) {
template := "Hey I'm {int:min:1|max:9} years old"
sampleresult := "Hey I'm 1 years old"
cs, err := BuildCallstack(template)
if err != nil {
t.Error(err)
}
result := &bytes.Buffer{}
err = cs.Write(result)
if err != nil {
t.Error(err)
}
if len([]rune(sampleresult)) != len([]rune(result.String())) {
fmt.Println(result.String())
t.Error("Missing parts of the rendered templtate")
}
}
func BenchmarkGUID(b *testing.B) {
c := GUIDCases[0]
var cs *Callstack
var err error
if cs, err = BuildCallstack(c.Template); err != nil {
b.Error(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
result := &bytes.Buffer{}
err = cs.Write(result)
if err != nil {
b.Error(err)
}
}
}
func BenchmarkNow(b *testing.B) {
c := NowCases[0]
var cs *Callstack
var err error
if cs, err = BuildCallstack(c.Template); err != nil {
b.Error(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
result := &bytes.Buffer{}
err = cs.Write(result)
if err != nil {
b.Error(err)
}
}
}
func BenchmarkTime(b *testing.B) {
c := TimeCases[0]
var cs *Callstack
var err error
if cs, err = BuildCallstack(c.Template); err != nil {
b.Error(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
result := &bytes.Buffer{}
err = cs.Write(result)
if err != nil {
b.Error(err)
}
}
}
func BenchmarkInteger(b *testing.B) {
c := IntegerCases[0]
var cs *Callstack
var err error
if cs, err = BuildCallstack(c.Template); err != nil {
b.Error(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
result := &bytes.Buffer{}
err = cs.Write(result)
if err != nil {
b.Error(err)
}
}
}
func BenchmarkFloat(b *testing.B) {
c := FloatCases[0]
var cs *Callstack
var err error
if cs, err = BuildCallstack(c.Template); err != nil {
b.Error(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
result := &bytes.Buffer{}
err = cs.Write(result)
if err != nil {
b.Error(err)
}
}
}
func BenchmarkCountry(b *testing.B) {
c := CountryCases[0]
var cs *Callstack
var err error
if cs, err = BuildCallstack(c.Template); err != nil {
b.Error(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
result := &bytes.Buffer{}
err = cs.Write(result)
if err != nil {
b.Error(err)
}
}
}
func BenchmarkUnicode(b *testing.B) {
c := UnicodeCases[0]
var cs *Callstack
var err error
if cs, err = BuildCallstack(c.Template); err != nil {
b.Error(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
result := &bytes.Buffer{}
err = cs.Write(result)
if err != nil {
b.Error(err)
}
}
}
func BenchmarkFirstName(b *testing.B) {
c := FirstNameCases[0]
var cs *Callstack
var err error
if cs, err = BuildCallstack(c.Template); err != nil {
b.Error(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
result := &bytes.Buffer{}
err = cs.Write(result)
if err != nil {
b.Error(err)
}
}
}
func BenchmarkLastName(b *testing.B) {
c := FirstNameCases[0]
var cs *Callstack
var err error
if cs, err = BuildCallstack(c.Template); err != nil {
b.Error(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
result := &bytes.Buffer{}
err = cs.Write(result)
if err != nil {
b.Error(err)
}
}
}
func BenchmarkFullName(b *testing.B) {
c := FullNameCases[0]
var cs *Callstack
var err error
if cs, err = BuildCallstack(c.Template); err != nil {
b.Error(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
result := &bytes.Buffer{}
err = cs.Write(result)
if err != nil {
b.Error(err)
}
}
}
Fix test harness, and add tests for bad language
package moldova
import (
"bytes"
"errors"
"fmt"
"math/rand"
"os"
"strconv"
"strings"
"testing"
"time"
)
type TestComparator func(string) error
type TestCase struct {
Template string
Comparator TestComparator
ParseFailure bool
WriteFailure bool
}
var GUIDCases = []TestCase{
{
Template: "{guid}",
Comparator: func(s string) error {
p := strings.Split(s, "-")
if len(p) == 5 &&
len(p[0]) == 8 &&
len(p[1]) == len(p[2]) && len(p[2]) == len(p[3]) && len(p[3]) == 4 &&
len(p[4]) == 12 {
return nil
}
return errors.New("Guid not in correct format: " + s)
},
},
{
Template: "{guid}@{guid:ordinal:0}",
Comparator: func(s string) error {
p := strings.Split(s, "@")
if p[0] == p[1] {
return nil
}
return errors.New("Guid at position 1 not equal to guid at position 0: " + p[0] + " " + p[1])
},
},
{
Template: "{guid}@{guid:ordinal:1}",
WriteFailure: true,
},
}
var NowCases = []TestCase{
{
// There is no proper deterministic way to test what the value of now is, without
// something like rubys timecop (but the go-equivalent is not viable) or relying
// on luck, which will run out if tests are run at just the wrong moment.
// Therefore, for the basic test, i'm just asserting nothing went wrong for now.
Template: "{now}",
Comparator: func(s string) error {
if len(s) > 0 {
return nil
}
return errors.New("Now not in correct format: " + s)
},
},
{
Template: "{now}@{now:ordinal:0}",
Comparator: func(s string) error {
p := strings.Split(s, "@")
if p[0] == p[1] {
return nil
}
return errors.New("Now at position 1 not equal to now at position 0: " + p[0] + " " + p[1])
},
},
{
Template: "{now}@{now:ordinal:1}",
WriteFailure: true,
},
}
var TimeCases = []TestCase{
{
Template: "{time:min:1|max:1|format:simple|zone:EST}",
Comparator: func(s string) error {
if s == "1969-12-31 19:00:01" {
return nil
}
return errors.New("Time value was not the expected value")
},
},
{
Template: "{time:min:1|max:1|format:simpletz|zone:EST}",
Comparator: func(s string) error {
if s == "1969-12-31 19:00:01 -0500" {
return nil
}
return errors.New("Time value was not the expected value")
},
},
{
Template: "{time:min:1|max:1|format:2006//01//02@@15_04_05|zone:EST}",
Comparator: func(s string) error {
if s == "1969//12//31@@19_00_01" {
return nil
}
return errors.New("Time value was not the expected value")
},
},
{
Template: "{time}@{time:ordinal:0}",
Comparator: func(s string) error {
p := strings.Split(s, "@")
if p[0] == p[1] {
return nil
}
return errors.New("Time at position 1 not equal to time at position 0: " + p[0] + " " + p[1])
},
},
{
Template: "{time}@{time:ordinal:1}",
WriteFailure: true,
},
}
var CountryCases = []TestCase{
{
Template: "{country}",
Comparator: func(s string) error {
// TODO better check here in case we ever support different types of country codes
if len(s) == 2 {
return nil
}
return errors.New("Invalid country code generated somehow")
},
},
{
Template: "{country:case:up}",
Comparator: func(s string) error {
// Since I can't know which country comes out, i'll invert the result
// If the ToLowered result is not the same as the original result, we know
// that the original was successfully output in upper case
if strings.ToLower(s) != s {
return nil
}
return errors.New("Country was returned in lowercase, but was requested in uppercase")
},
},
{
Template: "{country:case:down}",
Comparator: func(s string) error {
// Since I can't know which country comes out, i'll invert the result
// If the ToLowered result is not the same as the original result, we know
// that the original was successfully output in upper case
if strings.ToUpper(s) != s {
return nil
}
return errors.New("Country was returned in uppercase, but was requested in lowercase")
},
},
{
Template: "{country}@{country:ordinal:0}",
Comparator: func(s string) error {
p := strings.Split(s, "@")
if p[0] == p[1] {
return nil
}
return errors.New("Country at position 1 not equal to country at position 0: " + p[0] + " " + p[1])
},
},
{
Template: "{country}@{country:ordinal:1}",
WriteFailure: true,
},
}
// Placeholders
var FloatCases = []TestCase{
{
Template: "{float}",
Comparator: func(s string) error {
i, err := strconv.ParseFloat(s, 64)
if err != nil {
return err
}
if i >= 0.0 && i <= 100.0 {
return nil
}
return errors.New("Float out of range for default min/max values")
},
},
{
Template: "{float:max:5000.0|min:4999.0}",
Comparator: func(s string) error {
i, err := strconv.ParseFloat(s, 64)
if err != nil {
return err
}
if i >= 4999.0 && i <= 5000.0 {
return nil
}
return errors.New("Float out of range for custom min/max values")
},
},
{
Template: "{float:max:-5000.0|min:-5001.0}",
Comparator: func(s string) error {
i, err := strconv.ParseFloat(s, 64)
if err != nil {
return err
}
if i >= -5001.0 && i <= -5000.0 {
return nil
}
return errors.New("Float out of range for custom min/max values")
},
},
{
Template: "{float}@{float:ordinal:0}",
Comparator: func(s string) error {
p := strings.Split(s, "@")
if p[0] == p[1] {
return nil
}
return errors.New("Float at position 1 not equal to Float at position 0: " + p[0] + " " + p[1])
},
},
{
Template: "{float}@{float:ordinal:1}",
WriteFailure: true,
},
}
var IntegerCases = []TestCase{
{
Template: "{int}",
Comparator: func(s string) error {
i, err := strconv.Atoi(s)
if err != nil {
return err
}
if i >= 0 && i <= 100 {
return nil
}
return errors.New("Int out of range for default min/max values")
},
},
{
Template: "{int:max:5000|min:4999}",
Comparator: func(s string) error {
i, err := strconv.Atoi(s)
if err != nil {
return err
}
if i >= 4999 && i <= 5000 {
return nil
}
return errors.New("Int out of range for custom min/max values")
},
},
{
Template: "{int:max:-5000|min:-5001}",
Comparator: func(s string) error {
i, err := strconv.Atoi(s)
if err != nil {
return err
}
if i >= -5001 && i <= -5000 {
return nil
}
return errors.New("Int out of range for custom min/max values")
},
},
{
Template: "{int}@{int:ordinal:0}",
Comparator: func(s string) error {
p := strings.Split(s, "@")
if p[0] == p[1] {
return nil
}
return errors.New("Int at position 1 not equal to int at position 0: " + p[0] + " " + p[1])
},
},
{
Template: "{int}@{int:ordinal:1}",
WriteFailure: true,
},
}
var UnicodeCases = []TestCase{
{
Template: "{unicode}",
Comparator: func(s string) error {
if len([]rune(s)) == 2 {
return nil
}
return errors.New("Unicode string not the correct length")
},
},
{
Template: "{unicode:length:10}",
Comparator: func(s string) error {
if len([]rune(s)) == 10 {
return nil
}
return errors.New("Unicode string not the correct length")
},
},
{
Template: "{unicode:length:10|case:up}",
Comparator: func(s string) error {
if len([]rune(s)) == 10 || strings.ToLower(s) != strings.ToUpper(s) {
return nil
}
return errors.New("Unicode string not the correct length")
},
},
{
Template: "{unicode}@{unicode:ordinal:0}",
Comparator: func(s string) error {
p := strings.Split(s, "@")
if p[0] == p[1] {
return nil
}
return errors.New("Unicode at position 1 not equal to Unicode at position 0: " + p[0] + " " + p[1])
},
},
{
Template: "{unicode}@{unicode:ordinal:1}",
WriteFailure: true,
},
}
var FirstNameCases = []TestCase{
{
Template: "{firstname}",
Comparator: func(s string) error {
if len(s) > 0 {
return nil
}
return errors.New("First Name string not the correct length")
},
},
{
Template: "{firstname}@{firstname:ordinal:0}",
Comparator: func(s string) error {
p := strings.Split(s, "@")
if p[0] == p[1] {
return nil
}
return errors.New("First Name at position 1 not equal to Unicode at position 0: " + p[0] + " " + p[1])
},
},
{
Template: "{firstname}@{firstname:ordinal:1}",
WriteFailure: true,
},
}
var LastNameCases = []TestCase{
{
Template: "{lastname}",
Comparator: func(s string) error {
if len(s) > 0 {
return nil
}
return errors.New("First Name string not the correct length")
},
},
{
Template: "{lastname}@{lastname:ordinal:0}",
Comparator: func(s string) error {
p := strings.Split(s, "@")
if p[0] == p[1] {
return nil
}
return errors.New("First Name at position 1 not equal to Unicode at position 0: " + p[0] + " " + p[1])
},
},
{
Template: "{lastname}@{lastname:ordinal:1}",
WriteFailure: true,
},
}
var FullNameCases = []TestCase{
{
Template: "{firstname} {lastname}",
Comparator: func(s string) error {
if len(s) > 0 {
return nil
}
return errors.New("First / Last Name string not the correct length")
},
},
}
var InvalidTokenCases = []TestCase{
{
Template: "{firstname} {plastname}",
WriteFailure: true,
},
{
Template: "{firstname:language:onglish}",
WriteFailure: true,
},
}
var AllCases = [][]TestCase{
GUIDCases,
NowCases,
TimeCases,
CountryCases,
FloatCases,
IntegerCases,
UnicodeCases,
FirstNameCases,
LastNameCases,
FullNameCases,
InvalidTokenCases,
}
// TODO Test each random function individually, under a number of inputs to make supported
// all the options behave as expected.
func TestMain(m *testing.M) {
rand.Seed(time.Now().Unix())
os.Exit(m.Run())
}
func TestAllCases(t *testing.T) {
// TODO The library should be threadsafe, I should go wide here to run all specs
// in parallel, like the natural tests would be. Channel + waitgroup to collect
// and report on errors once they all finish
for _, cs := range AllCases {
for _, c := range cs {
cs, err := BuildCallstack(c.Template)
// If we get an error and weren't expecting it
// Or, if we didn't get one but were expecting it
if err != nil && !c.ParseFailure {
t.Error(err)
} else if err == nil && c.ParseFailure {
t.Error("Expected to encounter Parse Failure, but did not for Test Case ", c.Template)
}
result := &bytes.Buffer{}
err = cs.Write(result)
// If we get an error and weren't expecting it
// Or, if we didn't get one but were expecting it
if err != nil && !c.WriteFailure {
t.Error(err)
} else if err == nil && c.WriteFailure {
t.Error("Expected to encounter Write Failure, but did not for Test Case ", c.Template)
}
if c.Comparator != nil {
if err := c.Comparator(result.String()); err != nil {
t.Error(err)
}
}
}
}
}
func TestGeneratedStringLength(t *testing.T) {
template := "Hey I'm {int:min:1|max:9} years old"
sampleresult := "Hey I'm 1 years old"
cs, err := BuildCallstack(template)
if err != nil {
t.Error(err)
}
result := &bytes.Buffer{}
err = cs.Write(result)
if err != nil {
t.Error(err)
}
if len([]rune(sampleresult)) != len([]rune(result.String())) {
fmt.Println(result.String())
t.Error("Missing parts of the rendered templtate")
}
}
func BenchmarkGUID(b *testing.B) {
c := GUIDCases[0]
var cs *Callstack
var err error
if cs, err = BuildCallstack(c.Template); err != nil {
b.Error(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
result := &bytes.Buffer{}
err = cs.Write(result)
if err != nil {
b.Error(err)
}
}
}
func BenchmarkNow(b *testing.B) {
c := NowCases[0]
var cs *Callstack
var err error
if cs, err = BuildCallstack(c.Template); err != nil {
b.Error(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
result := &bytes.Buffer{}
err = cs.Write(result)
if err != nil {
b.Error(err)
}
}
}
func BenchmarkTime(b *testing.B) {
c := TimeCases[0]
var cs *Callstack
var err error
if cs, err = BuildCallstack(c.Template); err != nil {
b.Error(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
result := &bytes.Buffer{}
err = cs.Write(result)
if err != nil {
b.Error(err)
}
}
}
func BenchmarkInteger(b *testing.B) {
c := IntegerCases[0]
var cs *Callstack
var err error
if cs, err = BuildCallstack(c.Template); err != nil {
b.Error(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
result := &bytes.Buffer{}
err = cs.Write(result)
if err != nil {
b.Error(err)
}
}
}
func BenchmarkFloat(b *testing.B) {
c := FloatCases[0]
var cs *Callstack
var err error
if cs, err = BuildCallstack(c.Template); err != nil {
b.Error(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
result := &bytes.Buffer{}
err = cs.Write(result)
if err != nil {
b.Error(err)
}
}
}
func BenchmarkCountry(b *testing.B) {
c := CountryCases[0]
var cs *Callstack
var err error
if cs, err = BuildCallstack(c.Template); err != nil {
b.Error(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
result := &bytes.Buffer{}
err = cs.Write(result)
if err != nil {
b.Error(err)
}
}
}
func BenchmarkUnicode(b *testing.B) {
c := UnicodeCases[0]
var cs *Callstack
var err error
if cs, err = BuildCallstack(c.Template); err != nil {
b.Error(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
result := &bytes.Buffer{}
err = cs.Write(result)
if err != nil {
b.Error(err)
}
}
}
func BenchmarkFirstName(b *testing.B) {
c := FirstNameCases[0]
var cs *Callstack
var err error
if cs, err = BuildCallstack(c.Template); err != nil {
b.Error(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
result := &bytes.Buffer{}
err = cs.Write(result)
if err != nil {
b.Error(err)
}
}
}
func BenchmarkLastName(b *testing.B) {
c := FirstNameCases[0]
var cs *Callstack
var err error
if cs, err = BuildCallstack(c.Template); err != nil {
b.Error(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
result := &bytes.Buffer{}
err = cs.Write(result)
if err != nil {
b.Error(err)
}
}
}
func BenchmarkFullName(b *testing.B) {
c := FullNameCases[0]
var cs *Callstack
var err error
if cs, err = BuildCallstack(c.Template); err != nil {
b.Error(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
result := &bytes.Buffer{}
err = cs.Write(result)
if err != nil {
b.Error(err)
}
}
}
|
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grumpy
import (
"bytes"
"fmt"
"io"
"math/big"
"os"
"testing"
)
func TestBuiltinDelAttr(t *testing.T) {
f := NewRootFrame()
delattr := mustNotRaise(Builtins.GetItemString(f, "delattr"))
fooType := newTestClass("Foo", []*Type{ObjectType}, NewDict())
fooForDelAttr := newObject(fooType)
fooValue := newObject(ObjectType)
mustNotRaise(nil, SetAttr(f, fooForDelAttr, NewStr("bar"), fooValue))
fun := wrapFuncForTest(func(f *Frame, args ...*Object) (*Object, *BaseException) {
result, raised := delattr.Call(f, args, nil)
if raised != nil {
return nil, raised
}
val, raised := GetAttr(f, args[0], toStrUnsafe(args[1]), None)
return newTestTuple(result, val == fooValue).ToObject(), nil
})
cases := []invokeTestCase{
{args: wrapArgs(fooForDelAttr, "bar"), want: newTestTuple(None, False.ToObject()).ToObject()},
{args: wrapArgs(fooForDelAttr, "baz"), wantExc: mustCreateException(AttributeErrorType, "'Foo' object has no attribute 'baz'")},
{args: wrapArgs(fooForDelAttr), wantExc: mustCreateException(TypeErrorType, "'delattr' requires 2 arguments")},
{args: wrapArgs(fooForDelAttr, "foo", "bar"), wantExc: mustCreateException(TypeErrorType, "'delattr' requires 2 arguments")},
}
for _, cas := range cases {
if err := runInvokeTestCase(fun, &cas); err != "" {
t.Error(err)
}
}
}
func TestBuiltinFuncs(t *testing.T) {
f := NewRootFrame()
objectDir := ObjectType.dict.Keys(f)
objectDir.Sort(f)
fooType := newTestClass("Foo", []*Type{ObjectType}, newStringDict(map[string]*Object{"bar": None}))
fooTypeDir := NewList(objectDir.elems...)
fooTypeDir.Append(NewStr("bar").ToObject())
fooTypeDir.Sort(f)
foo := newObject(fooType)
SetAttr(f, foo, NewStr("baz"), None)
fooDir := NewList(fooTypeDir.elems...)
fooDir.Append(NewStr("baz").ToObject())
fooDir.Sort(f)
iter := mustNotRaise(Iter(f, mustNotRaise(xrangeType.Call(f, wrapArgs(5), nil))))
neg := wrapFuncForTest(func(f *Frame, i int) int { return -i })
raiseKey := wrapFuncForTest(func(f *Frame, o *Object) *BaseException { return f.RaiseType(RuntimeErrorType, "foo") })
hexOctType := newTestClass("HexOct", []*Type{ObjectType}, newStringDict(map[string]*Object{
"__hex__": newBuiltinFunction("__hex__", func(f *Frame, _ Args, _ KWArgs) (*Object, *BaseException) {
return NewStr("0xhexadecimal").ToObject(), nil
}).ToObject(),
"__oct__": newBuiltinFunction("__hex__", func(f *Frame, _ Args, _ KWArgs) (*Object, *BaseException) {
return NewStr("0octal").ToObject(), nil
}).ToObject(),
}))
badNonZeroType := newTestClass("BadNonZeroType", []*Type{ObjectType}, newStringDict(map[string]*Object{
"__nonzero__": newBuiltinFunction("__nonzero__", func(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
return nil, f.RaiseType(RuntimeErrorType, "foo")
}).ToObject(),
}))
badNextType := newTestClass("BadNextType", []*Type{ObjectType}, newStringDict(map[string]*Object{
"next": newBuiltinFunction("next", func(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
return nil, f.RaiseType(RuntimeErrorType, "foo")
}).ToObject(),
}))
badIterType := newTestClass("BadIterType", []*Type{ObjectType}, newStringDict(map[string]*Object{
"__iter__": newBuiltinFunction("__iter__", func(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
return newObject(badNextType), nil
}).ToObject(),
}))
addType := newTestClass("Add", []*Type{ObjectType}, newStringDict(map[string]*Object{
"__add__": newBuiltinFunction("__add__", func(f *Frame, _ Args, _ KWArgs) (*Object, *BaseException) {
return NewInt(1).ToObject(), nil
}).ToObject(),
}))
fooBuiltinFunc := newBuiltinFunction("foo", func(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
return newTestTuple(NewTuple(args.makeCopy()...), kwargs.makeDict()).ToObject(), nil
}).ToObject()
fooFunc := NewFunction(NewCode("foo", "foo.py", nil, CodeFlagVarArg, func(f *Frame, args []*Object) (*Object, *BaseException) {
return args[0], nil
}), nil)
cases := []struct {
f string
args Args
kwargs KWArgs
want *Object
wantExc *BaseException
}{
{f: "abs", args: wrapArgs(1, 2, 3), wantExc: mustCreateException(TypeErrorType, "'abs' requires 1 arguments")},
{f: "abs", args: wrapArgs(1), want: NewInt(1).ToObject()},
{f: "abs", args: wrapArgs(-1), want: NewInt(1).ToObject()},
{f: "abs", args: wrapArgs(big.NewInt(2)), want: NewLong(big.NewInt(2)).ToObject()},
{f: "abs", args: wrapArgs(big.NewInt(-2)), want: NewLong(big.NewInt(2)).ToObject()},
{f: "abs", args: wrapArgs(NewFloat(3.4)), want: NewFloat(3.4).ToObject()},
{f: "abs", args: wrapArgs(NewFloat(-3.4)), want: NewFloat(3.4).ToObject()},
{f: "abs", args: wrapArgs(MinInt), want: NewLong(big.NewInt(MinInt).Neg(minIntBig)).ToObject()},
{f: "abs", args: wrapArgs(NewStr("a")), wantExc: mustCreateException(TypeErrorType, "bad operand type for abs(): 'str'")},
{f: "all", args: wrapArgs(newTestList()), want: True.ToObject()},
{f: "all", args: wrapArgs(newTestList(1, 2, 3)), want: True.ToObject()},
{f: "all", args: wrapArgs(newTestList(1, 0, 1)), want: False.ToObject()},
{f: "all", args: wrapArgs(13), wantExc: mustCreateException(TypeErrorType, "'int' object is not iterable")},
{f: "all", args: wrapArgs(newTestList(newObject(badNonZeroType))), wantExc: mustCreateException(RuntimeErrorType, "foo")},
{f: "all", args: wrapArgs(newObject(badIterType)), wantExc: mustCreateException(RuntimeErrorType, "foo")},
{f: "any", args: wrapArgs(newTestList()), want: False.ToObject()},
{f: "any", args: wrapArgs(newTestList(1, 2, 3)), want: True.ToObject()},
{f: "any", args: wrapArgs(newTestList(1, 0, 1)), want: True.ToObject()},
{f: "any", args: wrapArgs(newTestList(0, 0, 0)), want: False.ToObject()},
{f: "any", args: wrapArgs(newTestList(False.ToObject(), False.ToObject())), want: False.ToObject()},
{f: "any", args: wrapArgs(13), wantExc: mustCreateException(TypeErrorType, "'int' object is not iterable")},
{f: "any", args: wrapArgs(newTestList(newObject(badNonZeroType))), wantExc: mustCreateException(RuntimeErrorType, "foo")},
{f: "any", args: wrapArgs(newObject(badIterType)), wantExc: mustCreateException(RuntimeErrorType, "foo")},
{f: "bin", args: wrapArgs(64 + 8 + 1), want: NewStr("0b1001001").ToObject()},
{f: "bin", args: wrapArgs(MinInt), want: NewStr(fmt.Sprintf("-0b%b0", -(MinInt >> 1))).ToObject()},
{f: "bin", args: wrapArgs(0), want: NewStr("0b0").ToObject()},
{f: "bin", args: wrapArgs(1), want: NewStr("0b1").ToObject()},
{f: "bin", args: wrapArgs(-1), want: NewStr("-0b1").ToObject()},
{f: "bin", args: wrapArgs(big.NewInt(-1)), want: NewStr("-0b1").ToObject()},
{f: "bin", args: wrapArgs("foo"), wantExc: mustCreateException(TypeErrorType, "str object cannot be interpreted as an index")},
{f: "bin", args: wrapArgs(0.1), wantExc: mustCreateException(TypeErrorType, "float object cannot be interpreted as an index")},
{f: "bin", args: wrapArgs(1, 2, 3), wantExc: mustCreateException(TypeErrorType, "'bin' requires 1 arguments")},
{f: "bin", args: wrapArgs(newTestIndexObject(123)), want: NewStr("0b1111011").ToObject()},
{f: "callable", args: wrapArgs(fooBuiltinFunc), want: True.ToObject()},
{f: "callable", args: wrapArgs(fooFunc), want: True.ToObject()},
{f: "callable", args: wrapArgs(0), want: False.ToObject()},
{f: "callable", args: wrapArgs(0.1), want: False.ToObject()},
{f: "callable", args: wrapArgs("foo"), want: False.ToObject()},
{f: "callable", args: wrapArgs(newTestDict("foo", 1, "bar", 2)), want: False.ToObject()},
{f: "callable", args: wrapArgs(newTestList(1, 2, 3)), want: False.ToObject()},
{f: "callable", args: wrapArgs(iter), want: False.ToObject()},
{f: "callable", args: wrapArgs(1, 2), wantExc: mustCreateException(TypeErrorType, "'callable' requires 1 arguments")},
{f: "chr", args: wrapArgs(0), want: NewStr("\x00").ToObject()},
{f: "chr", args: wrapArgs(65), want: NewStr("A").ToObject()},
{f: "chr", args: wrapArgs(300), wantExc: mustCreateException(ValueErrorType, "chr() arg not in range(256)")},
{f: "chr", args: wrapArgs(-1), wantExc: mustCreateException(ValueErrorType, "chr() arg not in range(256)")},
{f: "chr", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "'chr' requires 1 arguments")},
{f: "dir", args: wrapArgs(newObject(ObjectType)), want: objectDir.ToObject()},
{f: "dir", args: wrapArgs(newObject(fooType)), want: fooTypeDir.ToObject()},
{f: "dir", args: wrapArgs(foo), want: fooDir.ToObject()},
{f: "dir", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "'dir' requires 1 arguments")},
{f: "divmod", args: wrapArgs(12, 7), want: NewTuple2(NewInt(1).ToObject(), NewInt(5).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(-12, 7), want: NewTuple2(NewInt(-2).ToObject(), NewInt(2).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(12, -7), want: NewTuple2(NewInt(-2).ToObject(), NewInt(-2).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(-12, -7), want: NewTuple2(NewInt(1).ToObject(), NewInt(-5).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(MaxInt, MinInt), want: NewTuple2(NewInt(-1).ToObject(), NewInt(-1).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(MinInt, MaxInt), want: NewTuple2(NewInt(-2).ToObject(), NewInt(MaxInt-1).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(MinInt, -1), want: NewTuple2(NewLong(new(big.Int).Neg(minIntBig)).ToObject(), NewLong(big.NewInt(0)).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(big.NewInt(12), big.NewInt(7)), want: NewTuple2(NewLong(big.NewInt(1)).ToObject(), NewLong(big.NewInt(5)).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(big.NewInt(-12), big.NewInt(7)), want: NewTuple2(NewLong(big.NewInt(-2)).ToObject(), NewLong(big.NewInt(2)).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(big.NewInt(12), big.NewInt(-7)), want: NewTuple2(NewLong(big.NewInt(-2)).ToObject(), NewLong(big.NewInt(-2)).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(big.NewInt(-12), big.NewInt(-7)), want: NewTuple2(NewLong(big.NewInt(1)).ToObject(), NewLong(big.NewInt(-5)).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(3.25, 1.0), want: NewTuple2(NewFloat(3.0).ToObject(), NewFloat(0.25).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(-3.25, 1.0), want: NewTuple2(NewFloat(-4.0).ToObject(), NewFloat(0.75).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(3.25, -1.0), want: NewTuple2(NewFloat(-4.0).ToObject(), NewFloat(-0.75).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(-3.25, -1.0), want: NewTuple2(NewFloat(3.0).ToObject(), NewFloat(-0.25).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(NewStr("a"), NewStr("b")), wantExc: mustCreateException(TypeErrorType, "unsupported operand type(s) for divmod(): 'str' and 'str'")},
{f: "divmod", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "'divmod' requires 2 arguments")},
{f: "getattr", args: wrapArgs(None, NewStr("foo").ToObject(), NewStr("bar").ToObject()), want: NewStr("bar").ToObject()},
{f: "getattr", args: wrapArgs(None, NewStr("foo").ToObject()), wantExc: mustCreateException(AttributeErrorType, "'NoneType' object has no attribute 'foo'")},
{f: "hasattr", args: wrapArgs(newObject(ObjectType), NewStr("foo").ToObject()), want: False.ToObject()},
{f: "hasattr", args: wrapArgs(foo, NewStr("bar").ToObject()), want: True.ToObject()},
{f: "hasattr", args: wrapArgs(foo, NewStr("baz").ToObject()), want: True.ToObject()},
{f: "hasattr", args: wrapArgs(foo, NewStr("qux").ToObject()), want: False.ToObject()},
{f: "hash", args: wrapArgs(123), want: NewInt(123).ToObject()},
{f: "hash", args: wrapArgs("foo"), want: hashFoo},
{f: "hash", args: wrapArgs(NewList()), wantExc: mustCreateException(TypeErrorType, "unhashable type: 'list'")},
{f: "hex", args: wrapArgs(0x63adbeef), want: NewStr("0x63adbeef").ToObject()},
{f: "hex", args: wrapArgs(0), want: NewStr("0x0").ToObject()},
{f: "hex", args: wrapArgs(1), want: NewStr("0x1").ToObject()},
{f: "hex", args: wrapArgs(-1), want: NewStr("-0x1").ToObject()},
{f: "hex", args: wrapArgs(big.NewInt(-1)), want: NewStr("-0x1L").ToObject()},
{f: "hex", args: wrapArgs("foo"), wantExc: mustCreateException(TypeErrorType, "hex() argument can't be converted to hex")},
{f: "hex", args: wrapArgs(0.1), wantExc: mustCreateException(TypeErrorType, "hex() argument can't be converted to hex")},
{f: "hex", args: wrapArgs(1, 2, 3), wantExc: mustCreateException(TypeErrorType, "'hex' requires 1 arguments")},
{f: "hex", args: wrapArgs(newObject(hexOctType)), want: NewStr("0xhexadecimal").ToObject()},
{f: "id", args: wrapArgs(foo), want: NewInt(int(uintptr(foo.toPointer()))).ToObject()},
{f: "id", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "'id' requires 1 arguments")},
{f: "isinstance", args: wrapArgs(NewInt(42).ToObject(), IntType.ToObject()), want: True.ToObject()},
{f: "isinstance", args: wrapArgs(NewStr("foo").ToObject(), TupleType.ToObject()), want: False.ToObject()},
{f: "isinstance", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "'isinstance' requires 2 arguments")},
{f: "issubclass", args: wrapArgs(IntType, IntType), want: True.ToObject()},
{f: "issubclass", args: wrapArgs(fooType, IntType), want: False.ToObject()},
{f: "issubclass", args: wrapArgs(fooType, ObjectType), want: True.ToObject()},
{f: "issubclass", args: wrapArgs(FloatType, newTestTuple(IntType, StrType)), want: False.ToObject()},
{f: "issubclass", args: wrapArgs(FloatType, newTestTuple(IntType, FloatType)), want: True.ToObject()},
{f: "issubclass", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "'issubclass' requires 2 arguments")},
{f: "iter", args: wrapArgs(iter), want: iter},
{f: "iter", args: wrapArgs(42), wantExc: mustCreateException(TypeErrorType, "'int' object is not iterable")},
{f: "len", args: wrapArgs(newTestList(1, 2, 3)), want: NewInt(3).ToObject()},
{f: "len", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "'len' requires 1 arguments")},
{f: "map", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "map() requires at least two args")},
{f: "map", args: wrapArgs(StrType), wantExc: mustCreateException(TypeErrorType, "map() requires at least two args")},
{f: "map", args: wrapArgs(None, newTestList()), want: newTestList().ToObject()},
{f: "map", args: wrapArgs(None, newTestList(1, 2, 3)), want: newTestList(1, 2, 3).ToObject()},
{f: "map", args: wrapArgs(None, newTestDict("foo", 1, "bar", 3)), want: newTestList("foo", "bar").ToObject()},
{f: "map", args: wrapArgs(None, None), wantExc: mustCreateException(TypeErrorType, "'NoneType' object is not iterable")},
{f: "map", args: wrapArgs(StrType, None), wantExc: mustCreateException(TypeErrorType, "'NoneType' object is not iterable")},
{f: "map", args: wrapArgs(StrType, newTestList(), None), wantExc: mustCreateException(TypeErrorType, "'NoneType' object is not iterable")},
{f: "map", args: wrapArgs(newTestList(), newTestList(1, 2, 3)), wantExc: mustCreateException(TypeErrorType, "'list' object is not callable")},
{f: "map", args: wrapArgs(StrType, newTestList()), want: newTestList().ToObject()},
{f: "map", args: wrapArgs(StrType, newTestList(1, 2, 3)), want: newTestList("1", "2", "3").ToObject()},
{f: "map", args: wrapArgs(StrType, newTestList(-1, -2, -3)), want: newTestList("-1", "-2", "-3").ToObject()},
{f: "map", args: wrapArgs(IntType, newTestList("1", "2", "3")), want: newTestList(1, 2, 3).ToObject()},
{f: "map", args: wrapArgs(IntType, newTestList("-1", "-2", "-3")), want: newTestList(-1, -2, -3).ToObject()},
{f: "map", args: wrapArgs(IntType, "123"), want: newTestList(1, 2, 3).ToObject()},
{f: "map", args: wrapArgs(IntType, newTestDict("1", "11", "2", "22")), want: newTestList(1, 2).ToObject()},
{f: "map", args: wrapArgs(IntType, 1), wantExc: mustCreateException(TypeErrorType, "'int' object is not iterable")},
{f: "map", args: wrapArgs(1, newTestList(1, 2, 3)), wantExc: mustCreateException(TypeErrorType, "'int' object is not callable")},
{f: "map", args: wrapArgs(StrType, newTestList(), 1), wantExc: mustCreateException(TypeErrorType, "'int' object is not iterable")},
{f: "max", args: wrapArgs(2, 3, 1), want: NewInt(3).ToObject()},
{f: "max", args: wrapArgs("bar", "foo"), want: NewStr("foo").ToObject()},
{f: "max", args: wrapArgs(newTestList(2, 3, 1)), want: NewInt(3).ToObject()},
{f: "max", args: wrapArgs(newTestList("bar", "foo")), want: NewStr("foo").ToObject()},
{f: "max", args: wrapArgs(2, 3, 1), want: NewInt(3).ToObject()},
{f: "max", args: wrapArgs("bar", "foo"), want: NewStr("foo").ToObject()},
{f: "max", args: wrapArgs(newTestList(2, 3, 1)), want: NewInt(3).ToObject()},
{f: "max", args: wrapArgs(newTestList("bar", "foo")), want: NewStr("foo").ToObject()},
{f: "max", args: wrapArgs(2, 3, 1), kwargs: wrapKWArgs("key", neg), want: NewInt(1).ToObject()},
{f: "max", args: wrapArgs(1, 2, 3), kwargs: wrapKWArgs("key", neg), want: NewInt(1).ToObject()},
{f: "max", args: wrapArgs(newTestList(2, 3, 1)), kwargs: wrapKWArgs("key", neg), want: NewInt(1).ToObject()},
{f: "max", args: wrapArgs(newTestList(1, 2, 3)), kwargs: wrapKWArgs("key", neg), want: NewInt(1).ToObject()},
{f: "max", args: wrapArgs(2, 3, 1), kwargs: wrapKWArgs("key", neg), want: NewInt(1).ToObject()},
{f: "max", args: wrapArgs(1, 2, 3), kwargs: wrapKWArgs("key", neg), want: NewInt(1).ToObject()},
{f: "max", args: wrapArgs(newTestList(2, 3, 1)), kwargs: wrapKWArgs("key", neg), want: NewInt(1).ToObject()},
{f: "max", args: wrapArgs(newTestList(1, 2, 3)), kwargs: wrapKWArgs("key", neg), want: NewInt(1).ToObject()},
{f: "max", args: wrapArgs(newTestList("foo")), want: NewStr("foo").ToObject()},
{f: "max", args: wrapArgs(1), wantExc: mustCreateException(TypeErrorType, "'int' object is not iterable")},
{f: "max", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "'max' requires 1 arguments")},
{f: "max", args: wrapArgs(newTestList()), wantExc: mustCreateException(ValueErrorType, "max() arg is an empty sequence")},
{f: "max", args: wrapArgs(1, 2), kwargs: wrapKWArgs("key", raiseKey), wantExc: mustCreateException(RuntimeErrorType, "foo")},
{f: "min", args: wrapArgs(2, 3, 1), want: NewInt(1).ToObject()},
{f: "min", args: wrapArgs("bar", "foo"), want: NewStr("bar").ToObject()},
{f: "min", args: wrapArgs(newTestList(2, 3, 1)), want: NewInt(1).ToObject()},
{f: "min", args: wrapArgs(newTestList("bar", "foo")), want: NewStr("bar").ToObject()},
{f: "min", args: wrapArgs(2, 3, 1), want: NewInt(1).ToObject()},
{f: "min", args: wrapArgs("bar", "foo"), want: NewStr("bar").ToObject()},
{f: "min", args: wrapArgs(newTestList(2, 3, 1)), want: NewInt(1).ToObject()},
{f: "min", args: wrapArgs(newTestList("bar", "foo")), want: NewStr("bar").ToObject()},
{f: "min", args: wrapArgs(2, 3, 1), kwargs: wrapKWArgs("key", neg), want: NewInt(3).ToObject()},
{f: "min", args: wrapArgs(1, 2, 3), kwargs: wrapKWArgs("key", neg), want: NewInt(3).ToObject()},
{f: "min", args: wrapArgs(newTestList(2, 3, 1)), kwargs: wrapKWArgs("key", neg), want: NewInt(3).ToObject()},
{f: "min", args: wrapArgs(newTestList(1, 2, 3)), kwargs: wrapKWArgs("key", neg), want: NewInt(3).ToObject()},
{f: "min", args: wrapArgs(2, 3, 1), kwargs: wrapKWArgs("key", neg), want: NewInt(3).ToObject()},
{f: "min", args: wrapArgs(1, 2, 3), kwargs: wrapKWArgs("key", neg), want: NewInt(3).ToObject()},
{f: "min", args: wrapArgs(newTestList(2, 3, 1)), kwargs: wrapKWArgs("key", neg), want: NewInt(3).ToObject()},
{f: "min", args: wrapArgs(newTestList(1, 2, 3)), kwargs: wrapKWArgs("key", neg), want: NewInt(3).ToObject()},
{f: "min", args: wrapArgs(newTestList("foo")), want: NewStr("foo").ToObject()},
{f: "min", args: wrapArgs(1), wantExc: mustCreateException(TypeErrorType, "'int' object is not iterable")},
{f: "min", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "'min' requires 1 arguments")},
{f: "min", args: wrapArgs(newTestList()), wantExc: mustCreateException(ValueErrorType, "min() arg is an empty sequence")},
{f: "min", args: wrapArgs(1, 2), kwargs: wrapKWArgs("key", raiseKey), wantExc: mustCreateException(RuntimeErrorType, "foo")},
{f: "oct", args: wrapArgs(077), want: NewStr("077").ToObject()},
{f: "oct", args: wrapArgs(0), want: NewStr("0").ToObject()},
{f: "oct", args: wrapArgs(1), want: NewStr("01").ToObject()},
{f: "oct", args: wrapArgs(-1), want: NewStr("-01").ToObject()},
{f: "oct", args: wrapArgs(big.NewInt(-1)), want: NewStr("-01L").ToObject()},
{f: "oct", args: wrapArgs("foo"), wantExc: mustCreateException(TypeErrorType, "oct() argument can't be converted to oct")},
{f: "oct", args: wrapArgs(0.1), wantExc: mustCreateException(TypeErrorType, "oct() argument can't be converted to oct")},
{f: "oct", args: wrapArgs(1, 2, 3), wantExc: mustCreateException(TypeErrorType, "'oct' requires 1 arguments")},
{f: "oct", args: wrapArgs(newObject(hexOctType)), want: NewStr("0octal").ToObject()},
{f: "ord", args: wrapArgs("a"), want: NewInt(97).ToObject()},
{f: "ord", args: wrapArgs(NewUnicode("樂")), want: NewInt(63764).ToObject()},
{f: "ord", args: wrapArgs("foo"), wantExc: mustCreateException(ValueErrorType, "ord() expected a character, but string of length 3 found")},
{f: "ord", args: wrapArgs(NewUnicode("волн")), wantExc: mustCreateException(ValueErrorType, "ord() expected a character, but string of length 4 found")},
{f: "ord", args: wrapArgs(1, 2, 3), wantExc: mustCreateException(TypeErrorType, "'ord' requires 1 arguments")},
{f: "range", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "'__new__' of 'int' requires 3 arguments")},
{f: "range", args: wrapArgs(3), want: newTestList(0, 1, 2).ToObject()},
{f: "range", args: wrapArgs(10, 0), want: NewList().ToObject()},
{f: "range", args: wrapArgs(-12, -23, -5), want: newTestList(-12, -17, -22).ToObject()},
{f: "repr", args: wrapArgs(123), want: NewStr("123").ToObject()},
{f: "repr", args: wrapArgs(NewUnicode("abc")), want: NewStr("u'abc'").ToObject()},
{f: "repr", args: wrapArgs(newTestTuple("foo", "bar")), want: NewStr("('foo', 'bar')").ToObject()},
{f: "repr", args: wrapArgs("a", "b", "c"), wantExc: mustCreateException(TypeErrorType, "'repr' requires 1 arguments")},
{f: "round", args: wrapArgs(1234.567), want: NewFloat(1235).ToObject()},
{f: "round", args: wrapArgs(1234.111), want: NewFloat(1234).ToObject()},
{f: "round", args: wrapArgs(-1234.567), want: NewFloat(-1235).ToObject()},
{f: "round", args: wrapArgs(-1234.111), want: NewFloat(-1234).ToObject()},
{f: "round", args: wrapArgs(1234.567, newTestIndexObject(0)), want: NewFloat(1235).ToObject()},
{f: "round", args: wrapArgs("foo"), wantExc: mustCreateException(TypeErrorType, "a float is required")},
{f: "round", args: wrapArgs(12.5, 0), want: NewFloat(13.0).ToObject()},
{f: "round", args: wrapArgs(-12.5, 0), want: NewFloat(-13.0).ToObject()},
{f: "round", args: wrapArgs(12.5, 3), want: NewFloat(12.5).ToObject()},
{f: "round", args: wrapArgs(1234.5, 1), want: NewFloat(1234.5).ToObject()},
{f: "round", args: wrapArgs(1234.5, 1), want: NewFloat(1234.5).ToObject()},
{f: "round", args: wrapArgs(1234.56, 1), want: NewFloat(1234.6).ToObject()},
{f: "round", args: wrapArgs(-1234.56, 1), want: NewFloat(-1234.6).ToObject()},
{f: "round", args: wrapArgs(-1234.56, -2), want: NewFloat(-1200.0).ToObject()},
{f: "round", args: wrapArgs(-1234.56, -8), want: NewFloat(0.0).ToObject()},
{f: "round", args: wrapArgs(63.4, -3), want: NewFloat(0.0).ToObject()},
{f: "round", args: wrapArgs(63.4, -2), want: NewFloat(100.0).ToObject()},
{f: "sorted", args: wrapArgs(NewList()), want: NewList().ToObject()},
{f: "sorted", args: wrapArgs(newTestList("foo", "bar")), want: newTestList("bar", "foo").ToObject()},
{f: "sorted", args: wrapArgs(newTestList(true, false)), want: newTestList(false, true).ToObject()},
{f: "sorted", args: wrapArgs(newTestList(1, 2, 0, 3)), want: newTestRange(4).ToObject()},
{f: "sorted", args: wrapArgs(newTestRange(100)), want: newTestRange(100).ToObject()},
{f: "sorted", args: wrapArgs(newTestTuple(1, 2, 0, 3)), want: newTestRange(4).ToObject()},
{f: "sorted", args: wrapArgs(newTestDict("foo", 1, "bar", 2)), want: newTestList("bar", "foo").ToObject()},
{f: "sorted", args: wrapArgs(1), wantExc: mustCreateException(TypeErrorType, "'int' object is not iterable")},
{f: "sorted", args: wrapArgs(newTestList("foo", "bar"), 2), wantExc: mustCreateException(TypeErrorType, "'sorted' requires 1 arguments")},
{f: "sum", args: wrapArgs(newTestList(1, 2, 3, 4)), want: NewInt(10).ToObject()},
{f: "sum", args: wrapArgs(newTestList(1, 2), 3), want: NewFloat(6).ToObject()},
{f: "sum", args: wrapArgs(newTestList(2, 1.1)), want: NewFloat(3.1).ToObject()},
{f: "sum", args: wrapArgs(newTestList(2, 1.1, 2)), want: NewFloat(5.1).ToObject()},
{f: "sum", args: wrapArgs(newTestList(2, 1.1, 2.0)), want: NewFloat(5.1).ToObject()},
{f: "sum", args: wrapArgs(newTestList(1), newObject(addType)), want: NewInt(1).ToObject()},
{f: "sum", args: wrapArgs(newTestList(newObject(addType)), newObject(addType)), want: NewInt(1).ToObject()},
{f: "unichr", args: wrapArgs(0), want: NewUnicode("\x00").ToObject()},
{f: "unichr", args: wrapArgs(65), want: NewStr("A").ToObject()},
{f: "unichr", args: wrapArgs(0x120000), wantExc: mustCreateException(ValueErrorType, "unichr() arg not in range(0x10ffff)")},
{f: "unichr", args: wrapArgs(-1), wantExc: mustCreateException(ValueErrorType, "unichr() arg not in range(0x10ffff)")},
{f: "unichr", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "'unichr' requires 1 arguments")},
{f: "zip", args: wrapArgs(), want: newTestList().ToObject()},
{f: "zip", args: wrapArgs(newTestTuple()), want: newTestList().ToObject()},
{f: "zip", args: wrapArgs(newTestList()), want: newTestList().ToObject()},
{f: "zip", args: wrapArgs(newTestList(1)), want: newTestList(newTestTuple(1).ToObject()).ToObject()},
{f: "zip", args: wrapArgs(newTestList(1, 2, 3)), want: newTestList(newTestTuple(1).ToObject(), newTestTuple(2).ToObject(), newTestTuple(3).ToObject()).ToObject()},
{f: "zip", args: wrapArgs(newTestRange(3)), want: newTestList(newTestTuple(0).ToObject(), newTestTuple(1).ToObject(), newTestTuple(2).ToObject()).ToObject()},
{f: "zip", args: wrapArgs(newTestTuple(1, 2, 3), newTestTuple(4, 5, 6)), want: NewList(newTestTuple(1, 4).ToObject(), newTestTuple(2, 5).ToObject(), newTestTuple(3, 6).ToObject()).ToObject()},
{f: "zip", args: wrapArgs(newTestTuple(1, 2, 3), newTestTuple(4, 5)), want: NewList(newTestTuple(1, 4).ToObject(), newTestTuple(2, 5).ToObject()).ToObject()},
{f: "zip", args: wrapArgs(newTestTuple(1, 2), newTestTuple(4, 5, 5)), want: NewList(newTestTuple(1, 4).ToObject(), newTestTuple(2, 5).ToObject()).ToObject()},
{f: "zip", args: wrapArgs(1), wantExc: mustCreateException(TypeErrorType, "'int' object is not iterable")},
{f: "zip", args: wrapArgs(newTestDict("foo", 1, "bar", 2)), want: newTestList(newTestTuple("foo").ToObject(), newTestTuple("bar").ToObject()).ToObject()},
}
for _, cas := range cases {
fun := mustNotRaise(Builtins.GetItemString(NewRootFrame(), cas.f))
if fun == nil {
t.Fatalf("%s not found in builtins: %v", cas.f, Builtins)
}
testCase := invokeTestCase{args: cas.args, kwargs: cas.kwargs, want: cas.want, wantExc: cas.wantExc}
if err := runInvokeTestCase(fun, &testCase); err != "" {
t.Error(err)
}
}
}
func TestBuiltinGlobals(t *testing.T) {
f := NewRootFrame()
f.globals = newTestDict("foo", 1, "bar", 2, 42, None)
globals := mustNotRaise(Builtins.GetItemString(f, "globals"))
got, raised := globals.Call(f, nil, nil)
want := newTestDict("foo", 1, "bar", 2, 42, None).ToObject()
switch checkResult(got, want, raised, nil) {
case checkInvokeResultExceptionMismatch:
t.Errorf("globals() = %v, want %v", got, want)
case checkInvokeResultReturnValueMismatch:
t.Errorf("globals() raised %v, want nil", raised)
}
}
func TestEllipsisRepr(t *testing.T) {
cas := invokeTestCase{args: wrapArgs(Ellipsis), want: NewStr("Ellipsis").ToObject()}
if err := runInvokeMethodTestCase(EllipsisType, "__repr__", &cas); err != "" {
t.Error(err)
}
}
func TestNoneRepr(t *testing.T) {
cas := invokeTestCase{args: wrapArgs(None), want: NewStr("None").ToObject()}
if err := runInvokeMethodTestCase(NoneType, "__repr__", &cas); err != "" {
t.Error(err)
}
}
func TestNotImplementedRepr(t *testing.T) {
cas := invokeTestCase{args: wrapArgs(NotImplemented), want: NewStr("NotImplemented").ToObject()}
if err := runInvokeMethodTestCase(NotImplementedType, "__repr__", &cas); err != "" {
t.Error(err)
}
}
// captureStdout invokes a function closure which writes to stdout and captures
// its output as string.
func captureStdout(f *Frame, fn func() *BaseException) (string, *BaseException) {
r, w, err := os.Pipe()
if err != nil {
return "", f.RaiseType(RuntimeErrorType, fmt.Sprintf("failed to open pipe: %v", err))
}
oldStdout := Stdout
Stdout = NewFileFromFD(w.Fd())
defer func() {
Stdout = oldStdout
}()
done := make(chan struct{})
var raised *BaseException
go func() {
defer close(done)
defer w.Close()
raised = fn()
}()
var buf bytes.Buffer
if _, err := io.Copy(&buf, r); err != nil {
return "", f.RaiseType(RuntimeErrorType, fmt.Sprintf("failed to copy buffer: %v", err))
}
<-done
if raised != nil {
return "", raised
}
return buf.String(), nil
}
func TestBuiltinPrint(t *testing.T) {
fun := wrapFuncForTest(func(f *Frame, args *Tuple, kwargs KWArgs) (string, *BaseException) {
return captureStdout(f, func() *BaseException {
_, raised := builtinPrint(NewRootFrame(), args.elems, kwargs)
return raised
})
})
cases := []invokeTestCase{
{args: wrapArgs(NewTuple(), wrapKWArgs()), want: NewStr("\n").ToObject()},
{args: wrapArgs(newTestTuple("abc"), wrapKWArgs()), want: NewStr("abc\n").ToObject()},
{args: wrapArgs(newTestTuple("abc", 123), wrapKWArgs()), want: NewStr("abc 123\n").ToObject()},
{args: wrapArgs(newTestTuple("abc", 123), wrapKWArgs("sep", "")), want: NewStr("abc123\n").ToObject()},
{args: wrapArgs(newTestTuple("abc", 123), wrapKWArgs("end", "")), want: NewStr("abc 123").ToObject()},
{args: wrapArgs(newTestTuple("abc", 123), wrapKWArgs("sep", "XX", "end", "--")), want: NewStr("abcXX123--").ToObject()},
}
for _, cas := range cases {
if err := runInvokeTestCase(fun, &cas); err != "" {
t.Error(err)
}
}
}
func TestBuiltinSetAttr(t *testing.T) {
setattr := mustNotRaise(Builtins.GetItemString(NewRootFrame(), "setattr"))
fooType := newTestClass("Foo", []*Type{ObjectType}, newStringDict(map[string]*Object{}))
foo := newObject(fooType)
fun := wrapFuncForTest(func(f *Frame, args ...*Object) (*Object, *BaseException) {
result, raised := setattr.Call(f, args, nil)
if raised != nil {
return nil, raised
}
val, raised := GetAttr(f, args[0], toStrUnsafe(args[1]), nil)
if raised != nil {
return nil, raised
}
return newTestTuple(result, val).ToObject(), nil
})
cases := []invokeTestCase{
{args: wrapArgs(foo), wantExc: mustCreateException(TypeErrorType, "'setattr' requires 3 arguments")},
{args: wrapArgs(newObject(fooType), "foo", "bar"), want: newTestTuple(None, "bar").ToObject()},
{args: wrapArgs(newObject(fooType), "foo", 123), want: newTestTuple(None, 123).ToObject()},
{args: wrapArgs(foo, "foo"), wantExc: mustCreateException(TypeErrorType, "'setattr' requires 3 arguments")},
{args: wrapArgs(foo, "foo", 123, None), wantExc: mustCreateException(TypeErrorType, "'setattr' requires 3 arguments")},
{args: wrapArgs(foo, 123, 123), wantExc: mustCreateException(TypeErrorType, "'setattr' requires a 'str' object but received a \"int\"")},
}
for _, cas := range cases {
if err := runInvokeTestCase(fun, &cas); err != "" {
t.Error(err)
}
}
}
func TestRawInput(t *testing.T) {
fun := wrapFuncForTest(func(f *Frame, s string, args ...*Object) (*Object, *BaseException) {
// Create a fake Stdin for input test.
stdinFile, w, err := os.Pipe()
if err != nil {
return nil, f.RaiseType(RuntimeErrorType, fmt.Sprintf("failed to open pipe: %v", err))
}
go func() {
w.Write([]byte(s))
w.Close()
}()
oldStdin := Stdin
Stdin = NewFileFromFD(stdinFile.Fd())
defer func() {
Stdin = oldStdin
stdinFile.Close()
}()
var input *Object
output, raised := captureStdout(f, func() *BaseException {
in, raised := builtinRawInput(f, args, nil)
input = in
return raised
})
if raised != nil {
return nil, raised
}
return newTestTuple(input, output).ToObject(), nil
})
cases := []invokeTestCase{
{args: wrapArgs("HelloGrumpy\n", ""), want: newTestTuple("HelloGrumpy", "").ToObject()},
{args: wrapArgs("HelloGrumpy\n", "ShouldBeShown\nShouldBeShown\t"), want: newTestTuple("HelloGrumpy", "ShouldBeShown\nShouldBeShown\t").ToObject()},
{args: wrapArgs("HelloGrumpy\n", 5, 4), wantExc: mustCreateException(TypeErrorType, "[raw_]input expcted at most 1 arguments, got 2")},
{args: wrapArgs("HelloGrumpy\nHelloGrumpy\n", ""), want: newTestTuple("HelloGrumpy", "").ToObject()},
{args: wrapArgs("HelloGrumpy\nHelloGrumpy\n", "ShouldBeShown\nShouldBeShown\t"), want: newTestTuple("HelloGrumpy", "ShouldBeShown\nShouldBeShown\t").ToObject()},
{args: wrapArgs("HelloGrumpy\nHelloGrumpy\n", 5, 4), wantExc: mustCreateException(TypeErrorType, "[raw_]input expcted at most 1 arguments, got 2")},
{args: wrapArgs("", ""), wantExc: mustCreateException(EOFErrorType, "EOF when reading a line")},
{args: wrapArgs("", "ShouldBeShown\nShouldBeShown\t"), wantExc: mustCreateException(EOFErrorType, "EOF when reading a line")},
{args: wrapArgs("", 5, 4), wantExc: mustCreateException(TypeErrorType, "[raw_]input expcted at most 1 arguments, got 2")},
}
for _, cas := range cases {
if err := runInvokeTestCase(fun, &cas); err != "" {
t.Error(err)
}
}
}
func newTestIndexObject(index int) *Object {
indexType := newTestClass("Index", []*Type{ObjectType}, newStringDict(map[string]*Object{
"__index__": newBuiltinFunction("__index__", func(f *Frame, _ Args, _ KWArgs) (*Object, *BaseException) {
return NewInt(index).ToObject(), nil
}).ToObject(),
}))
return newObject(indexType)
}
Comment out TestRawInput
This test is causing too many Travis failures. Will re-enable when https://github.com/google/grumpy/issues/282 is addressed.
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grumpy
import (
"bytes"
"fmt"
"io"
"math/big"
"os"
"testing"
)
func TestBuiltinDelAttr(t *testing.T) {
f := NewRootFrame()
delattr := mustNotRaise(Builtins.GetItemString(f, "delattr"))
fooType := newTestClass("Foo", []*Type{ObjectType}, NewDict())
fooForDelAttr := newObject(fooType)
fooValue := newObject(ObjectType)
mustNotRaise(nil, SetAttr(f, fooForDelAttr, NewStr("bar"), fooValue))
fun := wrapFuncForTest(func(f *Frame, args ...*Object) (*Object, *BaseException) {
result, raised := delattr.Call(f, args, nil)
if raised != nil {
return nil, raised
}
val, raised := GetAttr(f, args[0], toStrUnsafe(args[1]), None)
return newTestTuple(result, val == fooValue).ToObject(), nil
})
cases := []invokeTestCase{
{args: wrapArgs(fooForDelAttr, "bar"), want: newTestTuple(None, False.ToObject()).ToObject()},
{args: wrapArgs(fooForDelAttr, "baz"), wantExc: mustCreateException(AttributeErrorType, "'Foo' object has no attribute 'baz'")},
{args: wrapArgs(fooForDelAttr), wantExc: mustCreateException(TypeErrorType, "'delattr' requires 2 arguments")},
{args: wrapArgs(fooForDelAttr, "foo", "bar"), wantExc: mustCreateException(TypeErrorType, "'delattr' requires 2 arguments")},
}
for _, cas := range cases {
if err := runInvokeTestCase(fun, &cas); err != "" {
t.Error(err)
}
}
}
func TestBuiltinFuncs(t *testing.T) {
f := NewRootFrame()
objectDir := ObjectType.dict.Keys(f)
objectDir.Sort(f)
fooType := newTestClass("Foo", []*Type{ObjectType}, newStringDict(map[string]*Object{"bar": None}))
fooTypeDir := NewList(objectDir.elems...)
fooTypeDir.Append(NewStr("bar").ToObject())
fooTypeDir.Sort(f)
foo := newObject(fooType)
SetAttr(f, foo, NewStr("baz"), None)
fooDir := NewList(fooTypeDir.elems...)
fooDir.Append(NewStr("baz").ToObject())
fooDir.Sort(f)
iter := mustNotRaise(Iter(f, mustNotRaise(xrangeType.Call(f, wrapArgs(5), nil))))
neg := wrapFuncForTest(func(f *Frame, i int) int { return -i })
raiseKey := wrapFuncForTest(func(f *Frame, o *Object) *BaseException { return f.RaiseType(RuntimeErrorType, "foo") })
hexOctType := newTestClass("HexOct", []*Type{ObjectType}, newStringDict(map[string]*Object{
"__hex__": newBuiltinFunction("__hex__", func(f *Frame, _ Args, _ KWArgs) (*Object, *BaseException) {
return NewStr("0xhexadecimal").ToObject(), nil
}).ToObject(),
"__oct__": newBuiltinFunction("__hex__", func(f *Frame, _ Args, _ KWArgs) (*Object, *BaseException) {
return NewStr("0octal").ToObject(), nil
}).ToObject(),
}))
badNonZeroType := newTestClass("BadNonZeroType", []*Type{ObjectType}, newStringDict(map[string]*Object{
"__nonzero__": newBuiltinFunction("__nonzero__", func(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
return nil, f.RaiseType(RuntimeErrorType, "foo")
}).ToObject(),
}))
badNextType := newTestClass("BadNextType", []*Type{ObjectType}, newStringDict(map[string]*Object{
"next": newBuiltinFunction("next", func(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
return nil, f.RaiseType(RuntimeErrorType, "foo")
}).ToObject(),
}))
badIterType := newTestClass("BadIterType", []*Type{ObjectType}, newStringDict(map[string]*Object{
"__iter__": newBuiltinFunction("__iter__", func(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
return newObject(badNextType), nil
}).ToObject(),
}))
addType := newTestClass("Add", []*Type{ObjectType}, newStringDict(map[string]*Object{
"__add__": newBuiltinFunction("__add__", func(f *Frame, _ Args, _ KWArgs) (*Object, *BaseException) {
return NewInt(1).ToObject(), nil
}).ToObject(),
}))
fooBuiltinFunc := newBuiltinFunction("foo", func(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
return newTestTuple(NewTuple(args.makeCopy()...), kwargs.makeDict()).ToObject(), nil
}).ToObject()
fooFunc := NewFunction(NewCode("foo", "foo.py", nil, CodeFlagVarArg, func(f *Frame, args []*Object) (*Object, *BaseException) {
return args[0], nil
}), nil)
cases := []struct {
f string
args Args
kwargs KWArgs
want *Object
wantExc *BaseException
}{
{f: "abs", args: wrapArgs(1, 2, 3), wantExc: mustCreateException(TypeErrorType, "'abs' requires 1 arguments")},
{f: "abs", args: wrapArgs(1), want: NewInt(1).ToObject()},
{f: "abs", args: wrapArgs(-1), want: NewInt(1).ToObject()},
{f: "abs", args: wrapArgs(big.NewInt(2)), want: NewLong(big.NewInt(2)).ToObject()},
{f: "abs", args: wrapArgs(big.NewInt(-2)), want: NewLong(big.NewInt(2)).ToObject()},
{f: "abs", args: wrapArgs(NewFloat(3.4)), want: NewFloat(3.4).ToObject()},
{f: "abs", args: wrapArgs(NewFloat(-3.4)), want: NewFloat(3.4).ToObject()},
{f: "abs", args: wrapArgs(MinInt), want: NewLong(big.NewInt(MinInt).Neg(minIntBig)).ToObject()},
{f: "abs", args: wrapArgs(NewStr("a")), wantExc: mustCreateException(TypeErrorType, "bad operand type for abs(): 'str'")},
{f: "all", args: wrapArgs(newTestList()), want: True.ToObject()},
{f: "all", args: wrapArgs(newTestList(1, 2, 3)), want: True.ToObject()},
{f: "all", args: wrapArgs(newTestList(1, 0, 1)), want: False.ToObject()},
{f: "all", args: wrapArgs(13), wantExc: mustCreateException(TypeErrorType, "'int' object is not iterable")},
{f: "all", args: wrapArgs(newTestList(newObject(badNonZeroType))), wantExc: mustCreateException(RuntimeErrorType, "foo")},
{f: "all", args: wrapArgs(newObject(badIterType)), wantExc: mustCreateException(RuntimeErrorType, "foo")},
{f: "any", args: wrapArgs(newTestList()), want: False.ToObject()},
{f: "any", args: wrapArgs(newTestList(1, 2, 3)), want: True.ToObject()},
{f: "any", args: wrapArgs(newTestList(1, 0, 1)), want: True.ToObject()},
{f: "any", args: wrapArgs(newTestList(0, 0, 0)), want: False.ToObject()},
{f: "any", args: wrapArgs(newTestList(False.ToObject(), False.ToObject())), want: False.ToObject()},
{f: "any", args: wrapArgs(13), wantExc: mustCreateException(TypeErrorType, "'int' object is not iterable")},
{f: "any", args: wrapArgs(newTestList(newObject(badNonZeroType))), wantExc: mustCreateException(RuntimeErrorType, "foo")},
{f: "any", args: wrapArgs(newObject(badIterType)), wantExc: mustCreateException(RuntimeErrorType, "foo")},
{f: "bin", args: wrapArgs(64 + 8 + 1), want: NewStr("0b1001001").ToObject()},
{f: "bin", args: wrapArgs(MinInt), want: NewStr(fmt.Sprintf("-0b%b0", -(MinInt >> 1))).ToObject()},
{f: "bin", args: wrapArgs(0), want: NewStr("0b0").ToObject()},
{f: "bin", args: wrapArgs(1), want: NewStr("0b1").ToObject()},
{f: "bin", args: wrapArgs(-1), want: NewStr("-0b1").ToObject()},
{f: "bin", args: wrapArgs(big.NewInt(-1)), want: NewStr("-0b1").ToObject()},
{f: "bin", args: wrapArgs("foo"), wantExc: mustCreateException(TypeErrorType, "str object cannot be interpreted as an index")},
{f: "bin", args: wrapArgs(0.1), wantExc: mustCreateException(TypeErrorType, "float object cannot be interpreted as an index")},
{f: "bin", args: wrapArgs(1, 2, 3), wantExc: mustCreateException(TypeErrorType, "'bin' requires 1 arguments")},
{f: "bin", args: wrapArgs(newTestIndexObject(123)), want: NewStr("0b1111011").ToObject()},
{f: "callable", args: wrapArgs(fooBuiltinFunc), want: True.ToObject()},
{f: "callable", args: wrapArgs(fooFunc), want: True.ToObject()},
{f: "callable", args: wrapArgs(0), want: False.ToObject()},
{f: "callable", args: wrapArgs(0.1), want: False.ToObject()},
{f: "callable", args: wrapArgs("foo"), want: False.ToObject()},
{f: "callable", args: wrapArgs(newTestDict("foo", 1, "bar", 2)), want: False.ToObject()},
{f: "callable", args: wrapArgs(newTestList(1, 2, 3)), want: False.ToObject()},
{f: "callable", args: wrapArgs(iter), want: False.ToObject()},
{f: "callable", args: wrapArgs(1, 2), wantExc: mustCreateException(TypeErrorType, "'callable' requires 1 arguments")},
{f: "chr", args: wrapArgs(0), want: NewStr("\x00").ToObject()},
{f: "chr", args: wrapArgs(65), want: NewStr("A").ToObject()},
{f: "chr", args: wrapArgs(300), wantExc: mustCreateException(ValueErrorType, "chr() arg not in range(256)")},
{f: "chr", args: wrapArgs(-1), wantExc: mustCreateException(ValueErrorType, "chr() arg not in range(256)")},
{f: "chr", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "'chr' requires 1 arguments")},
{f: "dir", args: wrapArgs(newObject(ObjectType)), want: objectDir.ToObject()},
{f: "dir", args: wrapArgs(newObject(fooType)), want: fooTypeDir.ToObject()},
{f: "dir", args: wrapArgs(foo), want: fooDir.ToObject()},
{f: "dir", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "'dir' requires 1 arguments")},
{f: "divmod", args: wrapArgs(12, 7), want: NewTuple2(NewInt(1).ToObject(), NewInt(5).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(-12, 7), want: NewTuple2(NewInt(-2).ToObject(), NewInt(2).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(12, -7), want: NewTuple2(NewInt(-2).ToObject(), NewInt(-2).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(-12, -7), want: NewTuple2(NewInt(1).ToObject(), NewInt(-5).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(MaxInt, MinInt), want: NewTuple2(NewInt(-1).ToObject(), NewInt(-1).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(MinInt, MaxInt), want: NewTuple2(NewInt(-2).ToObject(), NewInt(MaxInt-1).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(MinInt, -1), want: NewTuple2(NewLong(new(big.Int).Neg(minIntBig)).ToObject(), NewLong(big.NewInt(0)).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(big.NewInt(12), big.NewInt(7)), want: NewTuple2(NewLong(big.NewInt(1)).ToObject(), NewLong(big.NewInt(5)).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(big.NewInt(-12), big.NewInt(7)), want: NewTuple2(NewLong(big.NewInt(-2)).ToObject(), NewLong(big.NewInt(2)).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(big.NewInt(12), big.NewInt(-7)), want: NewTuple2(NewLong(big.NewInt(-2)).ToObject(), NewLong(big.NewInt(-2)).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(big.NewInt(-12), big.NewInt(-7)), want: NewTuple2(NewLong(big.NewInt(1)).ToObject(), NewLong(big.NewInt(-5)).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(3.25, 1.0), want: NewTuple2(NewFloat(3.0).ToObject(), NewFloat(0.25).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(-3.25, 1.0), want: NewTuple2(NewFloat(-4.0).ToObject(), NewFloat(0.75).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(3.25, -1.0), want: NewTuple2(NewFloat(-4.0).ToObject(), NewFloat(-0.75).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(-3.25, -1.0), want: NewTuple2(NewFloat(3.0).ToObject(), NewFloat(-0.25).ToObject()).ToObject()},
{f: "divmod", args: wrapArgs(NewStr("a"), NewStr("b")), wantExc: mustCreateException(TypeErrorType, "unsupported operand type(s) for divmod(): 'str' and 'str'")},
{f: "divmod", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "'divmod' requires 2 arguments")},
{f: "getattr", args: wrapArgs(None, NewStr("foo").ToObject(), NewStr("bar").ToObject()), want: NewStr("bar").ToObject()},
{f: "getattr", args: wrapArgs(None, NewStr("foo").ToObject()), wantExc: mustCreateException(AttributeErrorType, "'NoneType' object has no attribute 'foo'")},
{f: "hasattr", args: wrapArgs(newObject(ObjectType), NewStr("foo").ToObject()), want: False.ToObject()},
{f: "hasattr", args: wrapArgs(foo, NewStr("bar").ToObject()), want: True.ToObject()},
{f: "hasattr", args: wrapArgs(foo, NewStr("baz").ToObject()), want: True.ToObject()},
{f: "hasattr", args: wrapArgs(foo, NewStr("qux").ToObject()), want: False.ToObject()},
{f: "hash", args: wrapArgs(123), want: NewInt(123).ToObject()},
{f: "hash", args: wrapArgs("foo"), want: hashFoo},
{f: "hash", args: wrapArgs(NewList()), wantExc: mustCreateException(TypeErrorType, "unhashable type: 'list'")},
{f: "hex", args: wrapArgs(0x63adbeef), want: NewStr("0x63adbeef").ToObject()},
{f: "hex", args: wrapArgs(0), want: NewStr("0x0").ToObject()},
{f: "hex", args: wrapArgs(1), want: NewStr("0x1").ToObject()},
{f: "hex", args: wrapArgs(-1), want: NewStr("-0x1").ToObject()},
{f: "hex", args: wrapArgs(big.NewInt(-1)), want: NewStr("-0x1L").ToObject()},
{f: "hex", args: wrapArgs("foo"), wantExc: mustCreateException(TypeErrorType, "hex() argument can't be converted to hex")},
{f: "hex", args: wrapArgs(0.1), wantExc: mustCreateException(TypeErrorType, "hex() argument can't be converted to hex")},
{f: "hex", args: wrapArgs(1, 2, 3), wantExc: mustCreateException(TypeErrorType, "'hex' requires 1 arguments")},
{f: "hex", args: wrapArgs(newObject(hexOctType)), want: NewStr("0xhexadecimal").ToObject()},
{f: "id", args: wrapArgs(foo), want: NewInt(int(uintptr(foo.toPointer()))).ToObject()},
{f: "id", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "'id' requires 1 arguments")},
{f: "isinstance", args: wrapArgs(NewInt(42).ToObject(), IntType.ToObject()), want: True.ToObject()},
{f: "isinstance", args: wrapArgs(NewStr("foo").ToObject(), TupleType.ToObject()), want: False.ToObject()},
{f: "isinstance", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "'isinstance' requires 2 arguments")},
{f: "issubclass", args: wrapArgs(IntType, IntType), want: True.ToObject()},
{f: "issubclass", args: wrapArgs(fooType, IntType), want: False.ToObject()},
{f: "issubclass", args: wrapArgs(fooType, ObjectType), want: True.ToObject()},
{f: "issubclass", args: wrapArgs(FloatType, newTestTuple(IntType, StrType)), want: False.ToObject()},
{f: "issubclass", args: wrapArgs(FloatType, newTestTuple(IntType, FloatType)), want: True.ToObject()},
{f: "issubclass", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "'issubclass' requires 2 arguments")},
{f: "iter", args: wrapArgs(iter), want: iter},
{f: "iter", args: wrapArgs(42), wantExc: mustCreateException(TypeErrorType, "'int' object is not iterable")},
{f: "len", args: wrapArgs(newTestList(1, 2, 3)), want: NewInt(3).ToObject()},
{f: "len", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "'len' requires 1 arguments")},
{f: "map", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "map() requires at least two args")},
{f: "map", args: wrapArgs(StrType), wantExc: mustCreateException(TypeErrorType, "map() requires at least two args")},
{f: "map", args: wrapArgs(None, newTestList()), want: newTestList().ToObject()},
{f: "map", args: wrapArgs(None, newTestList(1, 2, 3)), want: newTestList(1, 2, 3).ToObject()},
{f: "map", args: wrapArgs(None, newTestDict("foo", 1, "bar", 3)), want: newTestList("foo", "bar").ToObject()},
{f: "map", args: wrapArgs(None, None), wantExc: mustCreateException(TypeErrorType, "'NoneType' object is not iterable")},
{f: "map", args: wrapArgs(StrType, None), wantExc: mustCreateException(TypeErrorType, "'NoneType' object is not iterable")},
{f: "map", args: wrapArgs(StrType, newTestList(), None), wantExc: mustCreateException(TypeErrorType, "'NoneType' object is not iterable")},
{f: "map", args: wrapArgs(newTestList(), newTestList(1, 2, 3)), wantExc: mustCreateException(TypeErrorType, "'list' object is not callable")},
{f: "map", args: wrapArgs(StrType, newTestList()), want: newTestList().ToObject()},
{f: "map", args: wrapArgs(StrType, newTestList(1, 2, 3)), want: newTestList("1", "2", "3").ToObject()},
{f: "map", args: wrapArgs(StrType, newTestList(-1, -2, -3)), want: newTestList("-1", "-2", "-3").ToObject()},
{f: "map", args: wrapArgs(IntType, newTestList("1", "2", "3")), want: newTestList(1, 2, 3).ToObject()},
{f: "map", args: wrapArgs(IntType, newTestList("-1", "-2", "-3")), want: newTestList(-1, -2, -3).ToObject()},
{f: "map", args: wrapArgs(IntType, "123"), want: newTestList(1, 2, 3).ToObject()},
{f: "map", args: wrapArgs(IntType, newTestDict("1", "11", "2", "22")), want: newTestList(1, 2).ToObject()},
{f: "map", args: wrapArgs(IntType, 1), wantExc: mustCreateException(TypeErrorType, "'int' object is not iterable")},
{f: "map", args: wrapArgs(1, newTestList(1, 2, 3)), wantExc: mustCreateException(TypeErrorType, "'int' object is not callable")},
{f: "map", args: wrapArgs(StrType, newTestList(), 1), wantExc: mustCreateException(TypeErrorType, "'int' object is not iterable")},
{f: "max", args: wrapArgs(2, 3, 1), want: NewInt(3).ToObject()},
{f: "max", args: wrapArgs("bar", "foo"), want: NewStr("foo").ToObject()},
{f: "max", args: wrapArgs(newTestList(2, 3, 1)), want: NewInt(3).ToObject()},
{f: "max", args: wrapArgs(newTestList("bar", "foo")), want: NewStr("foo").ToObject()},
{f: "max", args: wrapArgs(2, 3, 1), want: NewInt(3).ToObject()},
{f: "max", args: wrapArgs("bar", "foo"), want: NewStr("foo").ToObject()},
{f: "max", args: wrapArgs(newTestList(2, 3, 1)), want: NewInt(3).ToObject()},
{f: "max", args: wrapArgs(newTestList("bar", "foo")), want: NewStr("foo").ToObject()},
{f: "max", args: wrapArgs(2, 3, 1), kwargs: wrapKWArgs("key", neg), want: NewInt(1).ToObject()},
{f: "max", args: wrapArgs(1, 2, 3), kwargs: wrapKWArgs("key", neg), want: NewInt(1).ToObject()},
{f: "max", args: wrapArgs(newTestList(2, 3, 1)), kwargs: wrapKWArgs("key", neg), want: NewInt(1).ToObject()},
{f: "max", args: wrapArgs(newTestList(1, 2, 3)), kwargs: wrapKWArgs("key", neg), want: NewInt(1).ToObject()},
{f: "max", args: wrapArgs(2, 3, 1), kwargs: wrapKWArgs("key", neg), want: NewInt(1).ToObject()},
{f: "max", args: wrapArgs(1, 2, 3), kwargs: wrapKWArgs("key", neg), want: NewInt(1).ToObject()},
{f: "max", args: wrapArgs(newTestList(2, 3, 1)), kwargs: wrapKWArgs("key", neg), want: NewInt(1).ToObject()},
{f: "max", args: wrapArgs(newTestList(1, 2, 3)), kwargs: wrapKWArgs("key", neg), want: NewInt(1).ToObject()},
{f: "max", args: wrapArgs(newTestList("foo")), want: NewStr("foo").ToObject()},
{f: "max", args: wrapArgs(1), wantExc: mustCreateException(TypeErrorType, "'int' object is not iterable")},
{f: "max", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "'max' requires 1 arguments")},
{f: "max", args: wrapArgs(newTestList()), wantExc: mustCreateException(ValueErrorType, "max() arg is an empty sequence")},
{f: "max", args: wrapArgs(1, 2), kwargs: wrapKWArgs("key", raiseKey), wantExc: mustCreateException(RuntimeErrorType, "foo")},
{f: "min", args: wrapArgs(2, 3, 1), want: NewInt(1).ToObject()},
{f: "min", args: wrapArgs("bar", "foo"), want: NewStr("bar").ToObject()},
{f: "min", args: wrapArgs(newTestList(2, 3, 1)), want: NewInt(1).ToObject()},
{f: "min", args: wrapArgs(newTestList("bar", "foo")), want: NewStr("bar").ToObject()},
{f: "min", args: wrapArgs(2, 3, 1), want: NewInt(1).ToObject()},
{f: "min", args: wrapArgs("bar", "foo"), want: NewStr("bar").ToObject()},
{f: "min", args: wrapArgs(newTestList(2, 3, 1)), want: NewInt(1).ToObject()},
{f: "min", args: wrapArgs(newTestList("bar", "foo")), want: NewStr("bar").ToObject()},
{f: "min", args: wrapArgs(2, 3, 1), kwargs: wrapKWArgs("key", neg), want: NewInt(3).ToObject()},
{f: "min", args: wrapArgs(1, 2, 3), kwargs: wrapKWArgs("key", neg), want: NewInt(3).ToObject()},
{f: "min", args: wrapArgs(newTestList(2, 3, 1)), kwargs: wrapKWArgs("key", neg), want: NewInt(3).ToObject()},
{f: "min", args: wrapArgs(newTestList(1, 2, 3)), kwargs: wrapKWArgs("key", neg), want: NewInt(3).ToObject()},
{f: "min", args: wrapArgs(2, 3, 1), kwargs: wrapKWArgs("key", neg), want: NewInt(3).ToObject()},
{f: "min", args: wrapArgs(1, 2, 3), kwargs: wrapKWArgs("key", neg), want: NewInt(3).ToObject()},
{f: "min", args: wrapArgs(newTestList(2, 3, 1)), kwargs: wrapKWArgs("key", neg), want: NewInt(3).ToObject()},
{f: "min", args: wrapArgs(newTestList(1, 2, 3)), kwargs: wrapKWArgs("key", neg), want: NewInt(3).ToObject()},
{f: "min", args: wrapArgs(newTestList("foo")), want: NewStr("foo").ToObject()},
{f: "min", args: wrapArgs(1), wantExc: mustCreateException(TypeErrorType, "'int' object is not iterable")},
{f: "min", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "'min' requires 1 arguments")},
{f: "min", args: wrapArgs(newTestList()), wantExc: mustCreateException(ValueErrorType, "min() arg is an empty sequence")},
{f: "min", args: wrapArgs(1, 2), kwargs: wrapKWArgs("key", raiseKey), wantExc: mustCreateException(RuntimeErrorType, "foo")},
{f: "oct", args: wrapArgs(077), want: NewStr("077").ToObject()},
{f: "oct", args: wrapArgs(0), want: NewStr("0").ToObject()},
{f: "oct", args: wrapArgs(1), want: NewStr("01").ToObject()},
{f: "oct", args: wrapArgs(-1), want: NewStr("-01").ToObject()},
{f: "oct", args: wrapArgs(big.NewInt(-1)), want: NewStr("-01L").ToObject()},
{f: "oct", args: wrapArgs("foo"), wantExc: mustCreateException(TypeErrorType, "oct() argument can't be converted to oct")},
{f: "oct", args: wrapArgs(0.1), wantExc: mustCreateException(TypeErrorType, "oct() argument can't be converted to oct")},
{f: "oct", args: wrapArgs(1, 2, 3), wantExc: mustCreateException(TypeErrorType, "'oct' requires 1 arguments")},
{f: "oct", args: wrapArgs(newObject(hexOctType)), want: NewStr("0octal").ToObject()},
{f: "ord", args: wrapArgs("a"), want: NewInt(97).ToObject()},
{f: "ord", args: wrapArgs(NewUnicode("樂")), want: NewInt(63764).ToObject()},
{f: "ord", args: wrapArgs("foo"), wantExc: mustCreateException(ValueErrorType, "ord() expected a character, but string of length 3 found")},
{f: "ord", args: wrapArgs(NewUnicode("волн")), wantExc: mustCreateException(ValueErrorType, "ord() expected a character, but string of length 4 found")},
{f: "ord", args: wrapArgs(1, 2, 3), wantExc: mustCreateException(TypeErrorType, "'ord' requires 1 arguments")},
{f: "range", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "'__new__' of 'int' requires 3 arguments")},
{f: "range", args: wrapArgs(3), want: newTestList(0, 1, 2).ToObject()},
{f: "range", args: wrapArgs(10, 0), want: NewList().ToObject()},
{f: "range", args: wrapArgs(-12, -23, -5), want: newTestList(-12, -17, -22).ToObject()},
{f: "repr", args: wrapArgs(123), want: NewStr("123").ToObject()},
{f: "repr", args: wrapArgs(NewUnicode("abc")), want: NewStr("u'abc'").ToObject()},
{f: "repr", args: wrapArgs(newTestTuple("foo", "bar")), want: NewStr("('foo', 'bar')").ToObject()},
{f: "repr", args: wrapArgs("a", "b", "c"), wantExc: mustCreateException(TypeErrorType, "'repr' requires 1 arguments")},
{f: "round", args: wrapArgs(1234.567), want: NewFloat(1235).ToObject()},
{f: "round", args: wrapArgs(1234.111), want: NewFloat(1234).ToObject()},
{f: "round", args: wrapArgs(-1234.567), want: NewFloat(-1235).ToObject()},
{f: "round", args: wrapArgs(-1234.111), want: NewFloat(-1234).ToObject()},
{f: "round", args: wrapArgs(1234.567, newTestIndexObject(0)), want: NewFloat(1235).ToObject()},
{f: "round", args: wrapArgs("foo"), wantExc: mustCreateException(TypeErrorType, "a float is required")},
{f: "round", args: wrapArgs(12.5, 0), want: NewFloat(13.0).ToObject()},
{f: "round", args: wrapArgs(-12.5, 0), want: NewFloat(-13.0).ToObject()},
{f: "round", args: wrapArgs(12.5, 3), want: NewFloat(12.5).ToObject()},
{f: "round", args: wrapArgs(1234.5, 1), want: NewFloat(1234.5).ToObject()},
{f: "round", args: wrapArgs(1234.5, 1), want: NewFloat(1234.5).ToObject()},
{f: "round", args: wrapArgs(1234.56, 1), want: NewFloat(1234.6).ToObject()},
{f: "round", args: wrapArgs(-1234.56, 1), want: NewFloat(-1234.6).ToObject()},
{f: "round", args: wrapArgs(-1234.56, -2), want: NewFloat(-1200.0).ToObject()},
{f: "round", args: wrapArgs(-1234.56, -8), want: NewFloat(0.0).ToObject()},
{f: "round", args: wrapArgs(63.4, -3), want: NewFloat(0.0).ToObject()},
{f: "round", args: wrapArgs(63.4, -2), want: NewFloat(100.0).ToObject()},
{f: "sorted", args: wrapArgs(NewList()), want: NewList().ToObject()},
{f: "sorted", args: wrapArgs(newTestList("foo", "bar")), want: newTestList("bar", "foo").ToObject()},
{f: "sorted", args: wrapArgs(newTestList(true, false)), want: newTestList(false, true).ToObject()},
{f: "sorted", args: wrapArgs(newTestList(1, 2, 0, 3)), want: newTestRange(4).ToObject()},
{f: "sorted", args: wrapArgs(newTestRange(100)), want: newTestRange(100).ToObject()},
{f: "sorted", args: wrapArgs(newTestTuple(1, 2, 0, 3)), want: newTestRange(4).ToObject()},
{f: "sorted", args: wrapArgs(newTestDict("foo", 1, "bar", 2)), want: newTestList("bar", "foo").ToObject()},
{f: "sorted", args: wrapArgs(1), wantExc: mustCreateException(TypeErrorType, "'int' object is not iterable")},
{f: "sorted", args: wrapArgs(newTestList("foo", "bar"), 2), wantExc: mustCreateException(TypeErrorType, "'sorted' requires 1 arguments")},
{f: "sum", args: wrapArgs(newTestList(1, 2, 3, 4)), want: NewInt(10).ToObject()},
{f: "sum", args: wrapArgs(newTestList(1, 2), 3), want: NewFloat(6).ToObject()},
{f: "sum", args: wrapArgs(newTestList(2, 1.1)), want: NewFloat(3.1).ToObject()},
{f: "sum", args: wrapArgs(newTestList(2, 1.1, 2)), want: NewFloat(5.1).ToObject()},
{f: "sum", args: wrapArgs(newTestList(2, 1.1, 2.0)), want: NewFloat(5.1).ToObject()},
{f: "sum", args: wrapArgs(newTestList(1), newObject(addType)), want: NewInt(1).ToObject()},
{f: "sum", args: wrapArgs(newTestList(newObject(addType)), newObject(addType)), want: NewInt(1).ToObject()},
{f: "unichr", args: wrapArgs(0), want: NewUnicode("\x00").ToObject()},
{f: "unichr", args: wrapArgs(65), want: NewStr("A").ToObject()},
{f: "unichr", args: wrapArgs(0x120000), wantExc: mustCreateException(ValueErrorType, "unichr() arg not in range(0x10ffff)")},
{f: "unichr", args: wrapArgs(-1), wantExc: mustCreateException(ValueErrorType, "unichr() arg not in range(0x10ffff)")},
{f: "unichr", args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "'unichr' requires 1 arguments")},
{f: "zip", args: wrapArgs(), want: newTestList().ToObject()},
{f: "zip", args: wrapArgs(newTestTuple()), want: newTestList().ToObject()},
{f: "zip", args: wrapArgs(newTestList()), want: newTestList().ToObject()},
{f: "zip", args: wrapArgs(newTestList(1)), want: newTestList(newTestTuple(1).ToObject()).ToObject()},
{f: "zip", args: wrapArgs(newTestList(1, 2, 3)), want: newTestList(newTestTuple(1).ToObject(), newTestTuple(2).ToObject(), newTestTuple(3).ToObject()).ToObject()},
{f: "zip", args: wrapArgs(newTestRange(3)), want: newTestList(newTestTuple(0).ToObject(), newTestTuple(1).ToObject(), newTestTuple(2).ToObject()).ToObject()},
{f: "zip", args: wrapArgs(newTestTuple(1, 2, 3), newTestTuple(4, 5, 6)), want: NewList(newTestTuple(1, 4).ToObject(), newTestTuple(2, 5).ToObject(), newTestTuple(3, 6).ToObject()).ToObject()},
{f: "zip", args: wrapArgs(newTestTuple(1, 2, 3), newTestTuple(4, 5)), want: NewList(newTestTuple(1, 4).ToObject(), newTestTuple(2, 5).ToObject()).ToObject()},
{f: "zip", args: wrapArgs(newTestTuple(1, 2), newTestTuple(4, 5, 5)), want: NewList(newTestTuple(1, 4).ToObject(), newTestTuple(2, 5).ToObject()).ToObject()},
{f: "zip", args: wrapArgs(1), wantExc: mustCreateException(TypeErrorType, "'int' object is not iterable")},
{f: "zip", args: wrapArgs(newTestDict("foo", 1, "bar", 2)), want: newTestList(newTestTuple("foo").ToObject(), newTestTuple("bar").ToObject()).ToObject()},
}
for _, cas := range cases {
fun := mustNotRaise(Builtins.GetItemString(NewRootFrame(), cas.f))
if fun == nil {
t.Fatalf("%s not found in builtins: %v", cas.f, Builtins)
}
testCase := invokeTestCase{args: cas.args, kwargs: cas.kwargs, want: cas.want, wantExc: cas.wantExc}
if err := runInvokeTestCase(fun, &testCase); err != "" {
t.Error(err)
}
}
}
func TestBuiltinGlobals(t *testing.T) {
f := NewRootFrame()
f.globals = newTestDict("foo", 1, "bar", 2, 42, None)
globals := mustNotRaise(Builtins.GetItemString(f, "globals"))
got, raised := globals.Call(f, nil, nil)
want := newTestDict("foo", 1, "bar", 2, 42, None).ToObject()
switch checkResult(got, want, raised, nil) {
case checkInvokeResultExceptionMismatch:
t.Errorf("globals() = %v, want %v", got, want)
case checkInvokeResultReturnValueMismatch:
t.Errorf("globals() raised %v, want nil", raised)
}
}
func TestEllipsisRepr(t *testing.T) {
cas := invokeTestCase{args: wrapArgs(Ellipsis), want: NewStr("Ellipsis").ToObject()}
if err := runInvokeMethodTestCase(EllipsisType, "__repr__", &cas); err != "" {
t.Error(err)
}
}
func TestNoneRepr(t *testing.T) {
cas := invokeTestCase{args: wrapArgs(None), want: NewStr("None").ToObject()}
if err := runInvokeMethodTestCase(NoneType, "__repr__", &cas); err != "" {
t.Error(err)
}
}
func TestNotImplementedRepr(t *testing.T) {
cas := invokeTestCase{args: wrapArgs(NotImplemented), want: NewStr("NotImplemented").ToObject()}
if err := runInvokeMethodTestCase(NotImplementedType, "__repr__", &cas); err != "" {
t.Error(err)
}
}
// captureStdout invokes a function closure which writes to stdout and captures
// its output as string.
func captureStdout(f *Frame, fn func() *BaseException) (string, *BaseException) {
r, w, err := os.Pipe()
if err != nil {
return "", f.RaiseType(RuntimeErrorType, fmt.Sprintf("failed to open pipe: %v", err))
}
oldStdout := Stdout
Stdout = NewFileFromFD(w.Fd())
defer func() {
Stdout = oldStdout
}()
done := make(chan struct{})
var raised *BaseException
go func() {
defer close(done)
defer w.Close()
raised = fn()
}()
var buf bytes.Buffer
if _, err := io.Copy(&buf, r); err != nil {
return "", f.RaiseType(RuntimeErrorType, fmt.Sprintf("failed to copy buffer: %v", err))
}
<-done
if raised != nil {
return "", raised
}
return buf.String(), nil
}
func TestBuiltinPrint(t *testing.T) {
fun := wrapFuncForTest(func(f *Frame, args *Tuple, kwargs KWArgs) (string, *BaseException) {
return captureStdout(f, func() *BaseException {
_, raised := builtinPrint(NewRootFrame(), args.elems, kwargs)
return raised
})
})
cases := []invokeTestCase{
{args: wrapArgs(NewTuple(), wrapKWArgs()), want: NewStr("\n").ToObject()},
{args: wrapArgs(newTestTuple("abc"), wrapKWArgs()), want: NewStr("abc\n").ToObject()},
{args: wrapArgs(newTestTuple("abc", 123), wrapKWArgs()), want: NewStr("abc 123\n").ToObject()},
{args: wrapArgs(newTestTuple("abc", 123), wrapKWArgs("sep", "")), want: NewStr("abc123\n").ToObject()},
{args: wrapArgs(newTestTuple("abc", 123), wrapKWArgs("end", "")), want: NewStr("abc 123").ToObject()},
{args: wrapArgs(newTestTuple("abc", 123), wrapKWArgs("sep", "XX", "end", "--")), want: NewStr("abcXX123--").ToObject()},
}
for _, cas := range cases {
if err := runInvokeTestCase(fun, &cas); err != "" {
t.Error(err)
}
}
}
func TestBuiltinSetAttr(t *testing.T) {
setattr := mustNotRaise(Builtins.GetItemString(NewRootFrame(), "setattr"))
fooType := newTestClass("Foo", []*Type{ObjectType}, newStringDict(map[string]*Object{}))
foo := newObject(fooType)
fun := wrapFuncForTest(func(f *Frame, args ...*Object) (*Object, *BaseException) {
result, raised := setattr.Call(f, args, nil)
if raised != nil {
return nil, raised
}
val, raised := GetAttr(f, args[0], toStrUnsafe(args[1]), nil)
if raised != nil {
return nil, raised
}
return newTestTuple(result, val).ToObject(), nil
})
cases := []invokeTestCase{
{args: wrapArgs(foo), wantExc: mustCreateException(TypeErrorType, "'setattr' requires 3 arguments")},
{args: wrapArgs(newObject(fooType), "foo", "bar"), want: newTestTuple(None, "bar").ToObject()},
{args: wrapArgs(newObject(fooType), "foo", 123), want: newTestTuple(None, 123).ToObject()},
{args: wrapArgs(foo, "foo"), wantExc: mustCreateException(TypeErrorType, "'setattr' requires 3 arguments")},
{args: wrapArgs(foo, "foo", 123, None), wantExc: mustCreateException(TypeErrorType, "'setattr' requires 3 arguments")},
{args: wrapArgs(foo, 123, 123), wantExc: mustCreateException(TypeErrorType, "'setattr' requires a 'str' object but received a \"int\"")},
}
for _, cas := range cases {
if err := runInvokeTestCase(fun, &cas); err != "" {
t.Error(err)
}
}
}
// TODO(corona10): Re-enable once #282 is addressed.
/*func TestRawInput(t *testing.T) {
fun := wrapFuncForTest(func(f *Frame, s string, args ...*Object) (*Object, *BaseException) {
// Create a fake Stdin for input test.
stdinFile, w, err := os.Pipe()
if err != nil {
return nil, f.RaiseType(RuntimeErrorType, fmt.Sprintf("failed to open pipe: %v", err))
}
go func() {
w.Write([]byte(s))
w.Close()
}()
oldStdin := Stdin
Stdin = NewFileFromFD(stdinFile.Fd())
defer func() {
Stdin = oldStdin
stdinFile.Close()
}()
var input *Object
output, raised := captureStdout(f, func() *BaseException {
in, raised := builtinRawInput(f, args, nil)
input = in
return raised
})
if raised != nil {
return nil, raised
}
return newTestTuple(input, output).ToObject(), nil
})
cases := []invokeTestCase{
{args: wrapArgs("HelloGrumpy\n", ""), want: newTestTuple("HelloGrumpy", "").ToObject()},
{args: wrapArgs("HelloGrumpy\n", "ShouldBeShown\nShouldBeShown\t"), want: newTestTuple("HelloGrumpy", "ShouldBeShown\nShouldBeShown\t").ToObject()},
{args: wrapArgs("HelloGrumpy\n", 5, 4), wantExc: mustCreateException(TypeErrorType, "[raw_]input expcted at most 1 arguments, got 2")},
{args: wrapArgs("HelloGrumpy\nHelloGrumpy\n", ""), want: newTestTuple("HelloGrumpy", "").ToObject()},
{args: wrapArgs("HelloGrumpy\nHelloGrumpy\n", "ShouldBeShown\nShouldBeShown\t"), want: newTestTuple("HelloGrumpy", "ShouldBeShown\nShouldBeShown\t").ToObject()},
{args: wrapArgs("HelloGrumpy\nHelloGrumpy\n", 5, 4), wantExc: mustCreateException(TypeErrorType, "[raw_]input expcted at most 1 arguments, got 2")},
{args: wrapArgs("", ""), wantExc: mustCreateException(EOFErrorType, "EOF when reading a line")},
{args: wrapArgs("", "ShouldBeShown\nShouldBeShown\t"), wantExc: mustCreateException(EOFErrorType, "EOF when reading a line")},
{args: wrapArgs("", 5, 4), wantExc: mustCreateException(TypeErrorType, "[raw_]input expcted at most 1 arguments, got 2")},
}
for _, cas := range cases {
if err := runInvokeTestCase(fun, &cas); err != "" {
t.Error(err)
}
}
}*/
func newTestIndexObject(index int) *Object {
indexType := newTestClass("Index", []*Type{ObjectType}, newStringDict(map[string]*Object{
"__index__": newBuiltinFunction("__index__", func(f *Frame, _ Args, _ KWArgs) (*Object, *BaseException) {
return NewInt(index).ToObject(), nil
}).ToObject(),
}))
return newObject(indexType)
}
|
package modd
import (
"io/ioutil"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
"time"
"github.com/cortesi/modd/conf"
"github.com/cortesi/modd/utils"
"github.com/cortesi/moddwatch"
"github.com/cortesi/termlog"
)
const timeout = 2 * time.Second
func touch(p string) {
p = filepath.FromSlash(p)
d := filepath.Dir(p)
err := os.MkdirAll(d, 0777)
if err != nil {
panic(err)
}
f, err := os.OpenFile(p, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0777)
if err != nil {
panic(err)
}
if _, err := f.Write([]byte("teststring")); err != nil {
panic(err)
}
if err := f.Close(); err != nil {
panic(err)
}
ioutil.ReadFile(p)
}
func events(p string) []string {
parts := []string{}
for _, p := range strings.Split(p, "\n") {
if strings.HasPrefix(p, ":") {
p = strings.TrimSpace(p)
if !strings.HasSuffix(p, ":") {
parts = append(parts, strings.TrimSpace(p))
}
}
}
return parts
}
func _testWatch(t *testing.T, modfunc func(), trigger string, expected []string) {
defer utils.WithTempDir(t)()
err := os.MkdirAll("a/inner", 0777)
if err != nil {
t.Fatal(err)
}
err = os.MkdirAll("b", 0777)
if err != nil {
t.Fatal(err)
}
touch("a/initial")
// There's some race condition in rjeczalik/notify. If we don't wait a bit
// here, we sometimes receive notifications for the change above even
// though we haven't started the watcher.
time.Sleep(200 * time.Millisecond)
confTxt := `
@shell = bash
** {
prep +onchange: echo ":skipit:" @mods
prep: echo ":all:" @mods
}
a/* {
prep: echo ":a:" @mods
}
b/* {
prep: echo ":b:" @mods
}
a/**/*.xxx {
prep: echo ":c:" @mods
}
a/direct {
prep: echo ":d:" @mods
}
direct {
prep: echo ":e:" @mods
}
`
cnf, err := conf.Parse("test", confTxt)
if err != nil {
t.Fatal(err)
}
lt := termlog.NewLogTest()
modchan := make(chan *moddwatch.Mod, 1024)
cback := func() {
start := time.Now()
modfunc()
for {
if strings.Contains(lt.String(), trigger) {
break
}
if time.Now().Sub(start) > timeout {
break
}
time.Sleep(50 * time.Millisecond)
}
modchan <- nil
}
mr := ModRunner{
Log: lt.Log,
Config: cnf,
}
err = mr.runOnChan(modchan, cback)
if err != nil {
t.Fatalf("runOnChan: %s", err)
}
ret := events(lt.String())
if !reflect.DeepEqual(ret, expected) {
t.Errorf("Expected\n%#v\nGot\n%#v", expected, ret)
}
}
func TestWatch(t *testing.T) {
_testWatch(
t,
func() { touch("a/touched") },
"touched",
[]string{
":all: ./a/initial",
":a: ./a/initial",
":skipit: ./a/touched",
":all: ./a/touched",
":a: ./a/touched",
},
)
_testWatch(
t,
func() {
touch("a/touched")
touch("b/touched")
},
"touched",
[]string{
":all: ./a/initial",
":a: ./a/initial",
":skipit: ./a/touched ./b/touched",
":all: ./a/touched ./b/touched",
":a: ./a/touched",
":b: ./b/touched",
},
)
_testWatch(
t,
func() {
touch("a/inner/touched.xxx")
},
"touched",
[]string{
":all: ./a/initial",
":a: ./a/initial",
":skipit: ./a/inner/touched.xxx",
":all: ./a/inner/touched.xxx",
":c: ./a/inner/touched.xxx",
},
)
_testWatch(
t,
func() {
touch("a/direct")
},
"touched",
[]string{
":all: ./a/initial",
":a: ./a/initial",
":skipit: ./a/direct",
":all: ./a/direct",
":a: ./a/direct",
":d: ./a/direct",
},
)
_testWatch(
t,
func() {
touch("direct")
},
"touched",
[]string{
":all: ./a/initial",
":a: ./a/initial",
":skipit: ./direct",
":all: ./direct",
":e: ./direct",
},
)
}
Tease out watch tests to give better visibility
package modd
import (
"io/ioutil"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
"time"
"github.com/cortesi/modd/conf"
"github.com/cortesi/modd/utils"
"github.com/cortesi/moddwatch"
"github.com/cortesi/termlog"
)
const timeout = 2 * time.Second
func touch(p string) {
p = filepath.FromSlash(p)
d := filepath.Dir(p)
err := os.MkdirAll(d, 0777)
if err != nil {
panic(err)
}
f, err := os.OpenFile(p, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0777)
if err != nil {
panic(err)
}
if _, err := f.Write([]byte("teststring")); err != nil {
panic(err)
}
if err := f.Close(); err != nil {
panic(err)
}
ioutil.ReadFile(p)
}
func events(p string) []string {
parts := []string{}
for _, p := range strings.Split(p, "\n") {
if strings.HasPrefix(p, ":") {
p = strings.TrimSpace(p)
if !strings.HasSuffix(p, ":") {
parts = append(parts, strings.TrimSpace(p))
}
}
}
return parts
}
func _testWatch(t *testing.T, modfunc func(), trigger string, expected []string) {
defer utils.WithTempDir(t)()
err := os.MkdirAll("a/inner", 0777)
if err != nil {
t.Fatal(err)
}
err = os.MkdirAll("b", 0777)
if err != nil {
t.Fatal(err)
}
touch("a/initial")
// There's some race condition in rjeczalik/notify. If we don't wait a bit
// here, we sometimes receive notifications for the change above even
// though we haven't started the watcher.
time.Sleep(200 * time.Millisecond)
confTxt := `
@shell = bash
** {
prep +onchange: echo ":skipit:" @mods
prep: echo ":all:" @mods
}
a/* {
prep: echo ":a:" @mods
}
b/* {
prep: echo ":b:" @mods
}
a/**/*.xxx {
prep: echo ":c:" @mods
}
a/direct {
prep: echo ":d:" @mods
}
direct {
prep: echo ":e:" @mods
}
`
cnf, err := conf.Parse("test", confTxt)
if err != nil {
t.Fatal(err)
}
lt := termlog.NewLogTest()
modchan := make(chan *moddwatch.Mod, 1024)
cback := func() {
start := time.Now()
modfunc()
for {
if strings.Contains(lt.String(), trigger) {
break
}
if time.Now().Sub(start) > timeout {
break
}
time.Sleep(50 * time.Millisecond)
}
modchan <- nil
}
mr := ModRunner{
Log: lt.Log,
Config: cnf,
}
err = mr.runOnChan(modchan, cback)
if err != nil {
t.Fatalf("runOnChan: %s", err)
}
ret := events(lt.String())
if !reflect.DeepEqual(ret, expected) {
t.Errorf("Expected\n%#v\nGot\n%#v", expected, ret)
}
}
func TestWatch(t *testing.T) {
t.Run(
"single",
func(t *testing.T) {
_testWatch(
t,
func() { touch("a/touched") },
"touched",
[]string{
":all: ./a/initial",
":a: ./a/initial",
":skipit: ./a/touched",
":all: ./a/touched",
":a: ./a/touched",
},
)
},
)
t.Run(
"double",
func(t *testing.T) {
_testWatch(
t,
func() {
touch("a/touched")
touch("b/touched")
},
"touched",
[]string{
":all: ./a/initial",
":a: ./a/initial",
":skipit: ./a/touched ./b/touched",
":all: ./a/touched ./b/touched",
":a: ./a/touched",
":b: ./b/touched",
},
)
},
)
t.Run(
"inner",
func(t *testing.T) {
_testWatch(
t,
func() {
touch("a/inner/touched.xxx")
},
"touched",
[]string{
":all: ./a/initial",
":a: ./a/initial",
":skipit: ./a/inner/touched.xxx",
":all: ./a/inner/touched.xxx",
":c: ./a/inner/touched.xxx",
},
)
},
)
t.Run(
"direct",
func(t *testing.T) {
_testWatch(
t,
func() {
touch("a/direct")
},
"direct",
[]string{
":all: ./a/initial",
":a: ./a/initial",
":skipit: ./a/direct",
":all: ./a/direct",
":a: ./a/direct",
":d: ./a/direct",
},
)
},
)
t.Run(
"rootdirect",
func(t *testing.T) {
_testWatch(
t,
func() {
touch("direct")
},
"direct",
[]string{
":all: ./a/initial",
":a: ./a/initial",
":skipit: ./direct",
":all: ./direct",
":e: ./direct",
},
)
},
)
}
|
package msg
import (
"time"
)
type Service struct {
UUID string
Name string
Version string
Environment string
Region string
Host string
Port uint16
TTL uint32 // Seconds
Expires time.Time
Callback map[string]*Callback `json:"-"` // Callbacks are found by UUID
}
// Returns the amount of time remaining before expiration
func (s *Service) RemainingTTL() uint32 {
d := s.Expires.Sub(time.Now())
ttl := uint32(d.Seconds())
if ttl < 1 {
return 0
}
return ttl
}
// Updates TTL property to the RemainingTTL
func (s *Service) UpdateTTL() {
s.TTL = s.RemainingTTL()
}
type Callback struct {
UUID string
// Name of the service
Name string
Version string
Environment string
Region string
Host string
Reply string
Port uint16
}
remove newline
package msg
import (
"time"
)
type Service struct {
UUID string
Name string
Version string
Environment string
Region string
Host string
Port uint16
TTL uint32 // Seconds
Expires time.Time
Callback map[string]*Callback `json:"-"` // Callbacks are found by UUID
}
// Returns the amount of time remaining before expiration
func (s *Service) RemainingTTL() uint32 {
d := s.Expires.Sub(time.Now())
ttl := uint32(d.Seconds())
if ttl < 1 {
return 0
}
return ttl
}
// Updates TTL property to the RemainingTTL
func (s *Service) UpdateTTL() {
s.TTL = s.RemainingTTL()
}
type Callback struct {
UUID string
// Name of the service
Name string
Version string
Environment string
Region string
Host string
Reply string
Port uint16
}
|
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promql
import (
"context"
"fmt"
"testing"
"time"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/timestamp"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/util/teststorage"
"github.com/prometheus/prometheus/util/testutil"
)
func TestDeriv(t *testing.T) {
// https://github.com/prometheus/prometheus/issues/2674#issuecomment-315439393
// This requires more precision than the usual test system offers,
// so we test it by hand.
storage := teststorage.New(t)
defer storage.Close()
opts := EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 10000,
Timeout: 10 * time.Second,
}
engine := NewEngine(opts)
a := storage.Appender()
metric := labels.FromStrings("__name__", "foo")
a.Add(metric, 1493712816939, 1.0)
a.Add(metric, 1493712846939, 1.0)
testutil.Ok(t, a.Commit())
query, err := engine.NewInstantQuery(storage, "deriv(foo[30m])", timestamp.Time(1493712846939))
testutil.Ok(t, err)
result := query.Exec(context.Background())
testutil.Ok(t, result.Err)
vec, _ := result.Vector()
testutil.Assert(t, len(vec) == 1, "Expected 1 result, got %d", len(vec))
testutil.Assert(t, vec[0].V == 0.0, "Expected 0.0 as value, got %f", vec[0].V)
}
func TestFunctionList(t *testing.T) {
// Test that Functions and parser.Functions list the same functions
for i := range FunctionCalls {
_, ok := parser.Functions[i]
if !ok {
panic(fmt.Sprintf("function %s exists in promql package, but not in parser package", i))
}
}
for i := range parser.Functions {
_, ok := FunctionCalls[i]
if !ok {
panic(fmt.Sprintf("function %s exists in promql package, but not in parser package", i))
}
}
}
Julien's suggestion
Signed-off-by: Tobias Guggenmos <tguggenm@redhat.com>
Co-Authored-By: Julien Pivotto <df1ed88a312a603dfee9f157a90fa8f7f195ab59@gmail.com>
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promql
import (
"context"
"fmt"
"testing"
"time"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/timestamp"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/util/teststorage"
"github.com/prometheus/prometheus/util/testutil"
)
func TestDeriv(t *testing.T) {
// https://github.com/prometheus/prometheus/issues/2674#issuecomment-315439393
// This requires more precision than the usual test system offers,
// so we test it by hand.
storage := teststorage.New(t)
defer storage.Close()
opts := EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 10000,
Timeout: 10 * time.Second,
}
engine := NewEngine(opts)
a := storage.Appender()
metric := labels.FromStrings("__name__", "foo")
a.Add(metric, 1493712816939, 1.0)
a.Add(metric, 1493712846939, 1.0)
testutil.Ok(t, a.Commit())
query, err := engine.NewInstantQuery(storage, "deriv(foo[30m])", timestamp.Time(1493712846939))
testutil.Ok(t, err)
result := query.Exec(context.Background())
testutil.Ok(t, result.Err)
vec, _ := result.Vector()
testutil.Assert(t, len(vec) == 1, "Expected 1 result, got %d", len(vec))
testutil.Assert(t, vec[0].V == 0.0, "Expected 0.0 as value, got %f", vec[0].V)
}
func TestFunctionList(t *testing.T) {
// Test that Functions and parser.Functions list the same functions
for i := range FunctionCalls {
_, ok := parser.Functions[i]
if !ok {
panic(fmt.Sprintf("function %s exists in promql package, but not in parser package", i))
}
}
for i := range parser.Functions {
_, ok := FunctionCalls[i]
if !ok {
panic(fmt.Sprintf("function %s exists in parser package, but not in promql package", i))
}
}
}
|
package msgp
// size of every object on the wire,
// plus type information. gives us
// constant-time type information
// for traversing composite objects.
//
var sizes = [256]bytespec{
mnil: {size: 1, extra: constsize, typ: NilType},
mfalse: {size: 1, extra: constsize, typ: BoolType},
mtrue: {size: 1, extra: constsize, typ: BoolType},
mbin8: {size: 2, extra: extra8, typ: BinType},
mbin16: {size: 3, extra: extra16, typ: BinType},
mbin32: {size: 5, extra: extra32, typ: BinType},
mext8: {size: 3, extra: extra8, typ: ExtensionType},
mext16: {size: 4, extra: extra16, typ: ExtensionType},
mext32: {size: 6, extra: extra32, typ: ExtensionType},
mfloat32: {size: 5, extra: constsize, typ: Float32Type},
mfloat64: {size: 9, extra: constsize, typ: Float64Type},
muint8: {size: 2, extra: constsize, typ: UintType},
muint16: {size: 3, extra: constsize, typ: UintType},
muint32: {size: 5, extra: constsize, typ: UintType},
muint64: {size: 9, extra: constsize, typ: UintType},
mint8: {size: 2, extra: constsize, typ: IntType},
mint16: {size: 3, extra: constsize, typ: IntType},
mint32: {size: 5, extra: constsize, typ: IntType},
mint64: {size: 9, extra: constsize, typ: IntType},
mfixext1: {size: 3, extra: constsize, typ: ExtensionType},
mfixext2: {size: 4, extra: constsize, typ: ExtensionType},
mfixext4: {size: 6, extra: constsize, typ: ExtensionType},
mfixext8: {size: 10, extra: constsize, typ: ExtensionType},
mfixext16: {size: 18, extra: constsize, typ: ExtensionType},
mstr8: {size: 2, extra: extra8, typ: StrType},
mstr16: {size: 3, extra: extra16, typ: StrType},
mstr32: {size: 5, extra: extra32, typ: StrType},
marray16: {size: 3, extra: array16v, typ: ArrayType},
marray32: {size: 5, extra: array32v, typ: ArrayType},
mmap16: {size: 3, extra: map16v, typ: MapType},
mmap32: {size: 5, extra: map32v, typ: MapType},
}
func init() {
// set up fixed fields
// fixint
for i := mfixint; i < 0x80; i++ {
sizes[i] = bytespec{size: 1, extra: constsize, typ: IntType}
}
// nfixint
for i := mnfixint; i < 0xff; i++ {
sizes[i] = bytespec{size: 1, extra: constsize, typ: IntType}
}
// fixstr gets constsize,
// since the prefix yields the size
for i := mfixstr; i < 0xc0; i++ {
sizes[i] = bytespec{size: 1 + rfixstr(i), extra: constsize, typ: StrType}
}
// fixmap
for i := mfixmap; i < 0x90; i++ {
sizes[i] = bytespec{size: 1, extra: varmode(2 * rfixmap(i)), typ: MapType}
}
// fixarray
for i := mfixarray; i < 0xa0; i++ {
sizes[i] = bytespec{size: 1, extra: varmode(rfixarray(i)), typ: ArrayType}
}
}
// a valid bytespsec has
// non-zero 'size' and
// non-zero 'typ'
type bytespec struct {
size uint8 // prefix size information
extra varmode // extra size information
typ Type // type
_ byte // makes bytespec 4 bytes (yes, this matters)
}
// size mode
// if positive, # elements for composites
type varmode int8
const (
constsize varmode = 0 // constant size (size bytes + uint8(varmode) objects)
extra8 = -1 // has uint8(p[1]) extra bytes
extra16 = -2 // has be16(p[1:]) extra bytes
extra32 = -3 // has be32(p[1:]) extra bytes
map16v = -4 // use map16
map32v = -5 // use map32
array16v = -6 // use array16
array32v = -7 // use array32
)
func getType(v byte) Type {
return sizes[v].typ
}
handle 0xff (negative fixed int -1) when setting up sizes
package msgp
// size of every object on the wire,
// plus type information. gives us
// constant-time type information
// for traversing composite objects.
//
var sizes = [256]bytespec{
mnil: {size: 1, extra: constsize, typ: NilType},
mfalse: {size: 1, extra: constsize, typ: BoolType},
mtrue: {size: 1, extra: constsize, typ: BoolType},
mbin8: {size: 2, extra: extra8, typ: BinType},
mbin16: {size: 3, extra: extra16, typ: BinType},
mbin32: {size: 5, extra: extra32, typ: BinType},
mext8: {size: 3, extra: extra8, typ: ExtensionType},
mext16: {size: 4, extra: extra16, typ: ExtensionType},
mext32: {size: 6, extra: extra32, typ: ExtensionType},
mfloat32: {size: 5, extra: constsize, typ: Float32Type},
mfloat64: {size: 9, extra: constsize, typ: Float64Type},
muint8: {size: 2, extra: constsize, typ: UintType},
muint16: {size: 3, extra: constsize, typ: UintType},
muint32: {size: 5, extra: constsize, typ: UintType},
muint64: {size: 9, extra: constsize, typ: UintType},
mint8: {size: 2, extra: constsize, typ: IntType},
mint16: {size: 3, extra: constsize, typ: IntType},
mint32: {size: 5, extra: constsize, typ: IntType},
mint64: {size: 9, extra: constsize, typ: IntType},
mfixext1: {size: 3, extra: constsize, typ: ExtensionType},
mfixext2: {size: 4, extra: constsize, typ: ExtensionType},
mfixext4: {size: 6, extra: constsize, typ: ExtensionType},
mfixext8: {size: 10, extra: constsize, typ: ExtensionType},
mfixext16: {size: 18, extra: constsize, typ: ExtensionType},
mstr8: {size: 2, extra: extra8, typ: StrType},
mstr16: {size: 3, extra: extra16, typ: StrType},
mstr32: {size: 5, extra: extra32, typ: StrType},
marray16: {size: 3, extra: array16v, typ: ArrayType},
marray32: {size: 5, extra: array32v, typ: ArrayType},
mmap16: {size: 3, extra: map16v, typ: MapType},
mmap32: {size: 5, extra: map32v, typ: MapType},
}
func init() {
// set up fixed fields
// fixint
for i := mfixint; i < 0x80; i++ {
sizes[i] = bytespec{size: 1, extra: constsize, typ: IntType}
}
// nfixint
for i := uint16(mnfixint); i < 0x100; i++ {
sizes[uint8(i)] = bytespec{size: 1, extra: constsize, typ: IntType}
}
// fixstr gets constsize,
// since the prefix yields the size
for i := mfixstr; i < 0xc0; i++ {
sizes[i] = bytespec{size: 1 + rfixstr(i), extra: constsize, typ: StrType}
}
// fixmap
for i := mfixmap; i < 0x90; i++ {
sizes[i] = bytespec{size: 1, extra: varmode(2 * rfixmap(i)), typ: MapType}
}
// fixarray
for i := mfixarray; i < 0xa0; i++ {
sizes[i] = bytespec{size: 1, extra: varmode(rfixarray(i)), typ: ArrayType}
}
}
// a valid bytespsec has
// non-zero 'size' and
// non-zero 'typ'
type bytespec struct {
size uint8 // prefix size information
extra varmode // extra size information
typ Type // type
_ byte // makes bytespec 4 bytes (yes, this matters)
}
// size mode
// if positive, # elements for composites
type varmode int8
const (
constsize varmode = 0 // constant size (size bytes + uint8(varmode) objects)
extra8 = -1 // has uint8(p[1]) extra bytes
extra16 = -2 // has be16(p[1:]) extra bytes
extra32 = -3 // has be32(p[1:]) extra bytes
map16v = -4 // use map16
map32v = -5 // use map32
array16v = -6 // use array16
array32v = -7 // use array32
)
func getType(v byte) Type {
return sizes[v].typ
}
|
package hatchery
import (
"fmt"
"os/exec"
"sync"
"time"
"github.com/ovh/cds/engine/log"
"github.com/ovh/cds/sdk"
)
// Interface describe an interface for each hatchery mode (mesos, local)
type Interface interface {
Init() error
KillWorker(worker sdk.Worker) error
SpawnWorker(model *sdk.Model, job *sdk.PipelineBuildJob) error
CanSpawn(model *sdk.Model, job *sdk.PipelineBuildJob) bool
WorkersStartedByModel(model *sdk.Model) int
WorkersStarted() int
Hatchery() *sdk.Hatchery
ModelType() string
ID() int64
}
var (
// Client is a CDS Client
Client sdk.HTTPClient
)
// CheckRequirement checks binary requirement in path
func CheckRequirement(r sdk.Requirement) (bool, error) {
switch r.Type {
case sdk.BinaryRequirement:
if _, err := exec.LookPath(r.Value); err != nil {
// Return nil because the error contains 'Exit status X', that's what we wanted
return false, nil
}
return true, nil
default:
return false, nil
}
}
func routine(h Interface, maxWorkers, provision int, hostname string, timestamp int64, lastSpawnedIDs []int64, warningSeconds, criticalSeconds, graceSeconds int) ([]int64, error) {
defer logTime(fmt.Sprintf("routine> %d", timestamp), time.Now(), warningSeconds, criticalSeconds)
log.Debug("routine> %d enter", timestamp)
if h.Hatchery() == nil || h.Hatchery().ID == 0 {
log.Debug("Create> continue")
return nil, nil
}
workersStarted := h.WorkersStarted()
if workersStarted > maxWorkers {
log.Notice("routine> %d max workers reached. current:%d max:%d", timestamp, workersStarted, maxWorkers)
return nil, nil
}
log.Debug("routine> %d - workers already started:%d", timestamp, workersStarted)
jobs, errbq := sdk.GetBuildQueue()
if errbq != nil {
log.Critical("routine> %d error on GetBuildQueue:%e", timestamp, errbq)
return nil, errbq
}
if len(jobs) == 0 {
log.Debug("routine> %d - Job queue is empty", timestamp)
return nil, nil
}
log.Debug("routine> %d - Job queue size:%d", timestamp, len(jobs))
models, errwm := sdk.GetWorkerModels()
if errwm != nil {
log.Debug("routine> %d - error on GetWorkerModels:%e", timestamp, errwm)
return nil, errwm
}
if len(models) == 0 {
return nil, fmt.Errorf("routine> %d - No model returned by GetWorkerModels", timestamp)
}
log.Debug("routine> %d - models received: %d", timestamp, len(models))
spawnedIDs := []int64{}
wg := &sync.WaitGroup{}
nToRun := len(jobs)
if len(jobs) > maxWorkers-workersStarted {
nToRun = maxWorkers - workersStarted
if nToRun < 0 { // should never occur, just to be sure
nToRun = 1
}
log.Info("routine> %d - work only on %d jobs from queue. queue size:%d workersStarted:%d maxWorkers:%d", timestamp, nToRun, len(jobs), workersStarted, maxWorkers)
}
for i := range jobs[:nToRun] {
wg.Add(1)
go func(job *sdk.PipelineBuildJob) {
defer logTime(fmt.Sprintf("routine> %d - job %d>", timestamp, job.ID), time.Now(), warningSeconds, criticalSeconds)
if sdk.IsInArray(job.ID, lastSpawnedIDs) {
log.Debug("routine> %d - job %d already spawned in previous routine", timestamp, job.ID)
wg.Done()
return
}
if job.QueuedSeconds < int64(graceSeconds) {
log.Debug("routine> %d - job %d is too fresh, queued since %d seconds, let existing waiting worker check it", timestamp, job.ID, job.QueuedSeconds)
wg.Done()
return
}
log.Debug("routine> %d - work on job %d queued since %d seconds", timestamp, job.ID, job.QueuedSeconds)
if job.BookedBy.ID != 0 {
t := "current hatchery"
if job.BookedBy.ID != h.Hatchery().ID {
t = "another hatchery"
}
log.Debug("routine> %d - job %d already booked by %s %s (%d)", timestamp, job.ID, t, job.BookedBy.Name, job.BookedBy.ID)
wg.Done()
return
}
for _, model := range models {
if canRunJob(h, job, &model, hostname) {
if err := sdk.BookPipelineBuildJob(job.ID); err != nil {
// perhaps already booked by another hatchery
log.Debug("routine> %d - cannot book job %d %s: %s", timestamp, job.ID, model.Name, err)
break // go to next job
}
log.Debug("routine> %d - send book job %d %s by hatchery %d", timestamp, job.ID, model.Name, h.Hatchery().ID)
start := time.Now()
infos := []sdk.SpawnInfo{
{
RemoteTime: start,
Message: sdk.SpawnMsg{ID: sdk.MsgSpawnInfoHatcheryStarts.ID, Args: []interface{}{fmt.Sprintf("%d", h.Hatchery().ID), model.Name}},
},
}
if err := h.SpawnWorker(&model, job); err != nil {
log.Warning("routine> %d - cannot spawn worker %s for job %d: %s", timestamp, model.Name, job.ID, err)
infos = append(infos, sdk.SpawnInfo{
RemoteTime: time.Now(),
Message: sdk.SpawnMsg{ID: sdk.MsgSpawnInfoHatcheryErrorSpawn.ID, Args: []interface{}{fmt.Sprintf("%d", h.Hatchery().ID), model.Name, sdk.Round(time.Since(start), time.Second).String(), err.Error()}},
})
if err := sdk.AddSpawnInfosPipelineBuildJob(job.ID, infos); err != nil {
log.Warning("routine> %d - cannot record AddSpawnInfosPipelineBuildJob for job (err spawn)%d: %s", timestamp, job.ID, err)
}
continue // try another model
}
spawnedIDs = append(spawnedIDs, job.ID)
infos = append(infos, sdk.SpawnInfo{
RemoteTime: time.Now(),
Message: sdk.SpawnMsg{ID: sdk.MsgSpawnInfoHatcheryStartsSuccessfully.ID, Args: []interface{}{fmt.Sprintf("%d", h.Hatchery().ID), sdk.Round(time.Since(start), time.Second).String()}},
})
if err := sdk.AddSpawnInfosPipelineBuildJob(job.ID, infos); err != nil {
log.Warning("routine> %d - cannot record AddSpawnInfosPipelineBuildJob for job %d: %s", timestamp, job.ID, err)
}
break // ok for this job
}
}
wg.Done()
}(&jobs[i])
}
wg.Wait()
return spawnedIDs, nil
}
func provisioning(h Interface, provision int) {
if provision == 0 {
log.Debug("provisioning> no provisioning to do")
return
}
models, errwm := sdk.GetWorkerModels()
if errwm != nil {
log.Debug("provisioning> error on GetWorkerModels:%e", errwm)
return
}
for k := range models {
if h.WorkersStartedByModel(&models[k]) < provision {
if models[k].Type == h.ModelType() {
go func(m sdk.Model) {
if err := h.SpawnWorker(&m, nil); err != nil {
log.Warning("provisioning> cannot spawn worker for provisioning: %s", m.Name, err)
}
}(models[k])
}
}
}
}
func canRunJob(h Interface, job *sdk.PipelineBuildJob, model *sdk.Model, hostname string) bool {
if model.Type != h.ModelType() {
return false
}
// Common check
for _, r := range job.Job.Action.Requirements {
// If requirement is a Model requirement, it's easy. It's either can or can't run
if r.Type == sdk.ModelRequirement && r.Value != model.Name {
return false
}
// If requirement is an hostname requirement, it's for a specific worker
if r.Type == sdk.HostnameRequirement && r.Value != hostname {
return false
}
// service and memory requirements are only supported by docker model
if model.Type != sdk.Docker && (r.Type == sdk.ServiceRequirement || r.Type == sdk.MemoryRequirement) {
return false
}
found := false
// Skip network access requirement as we can't check it
if r.Type == sdk.NetworkAccessRequirement || r.Type == sdk.PluginRequirement || r.Type == sdk.ServiceRequirement || r.Type == sdk.MemoryRequirement {
continue
}
// Check binary requirement against worker model capabilities
for _, c := range model.Capabilities {
if r.Value == c.Value || r.Value == c.Name {
found = true
break
}
}
if !found {
return false
}
}
return h.CanSpawn(model, job)
}
func logTime(name string, then time.Time, warningSeconds, criticalSeconds int) {
d := time.Since(then)
if d > time.Duration(criticalSeconds)*time.Second {
log.Critical("%s took %s to execute", name, d)
return
}
if d > time.Duration(warningSeconds)*time.Second {
log.Warning("%s took %s to execute", name, d)
return
}
log.Debug("%s took %s to execute", name, d)
}
fix (hatchery): compute canSpawn Binary (#418)
Signed-off-by: Yvonnick Esnault <05f95ceb78e0d41f782014c15bff0a420d6ac71a@corp.ovh.com>
package hatchery
import (
"fmt"
"os/exec"
"sync"
"time"
"github.com/ovh/cds/engine/log"
"github.com/ovh/cds/sdk"
)
// Interface describe an interface for each hatchery mode (mesos, local)
type Interface interface {
Init() error
KillWorker(worker sdk.Worker) error
SpawnWorker(model *sdk.Model, job *sdk.PipelineBuildJob) error
CanSpawn(model *sdk.Model, job *sdk.PipelineBuildJob) bool
WorkersStartedByModel(model *sdk.Model) int
WorkersStarted() int
Hatchery() *sdk.Hatchery
ModelType() string
ID() int64
}
var (
// Client is a CDS Client
Client sdk.HTTPClient
)
// CheckRequirement checks binary requirement in path
func CheckRequirement(r sdk.Requirement) (bool, error) {
switch r.Type {
case sdk.BinaryRequirement:
if _, err := exec.LookPath(r.Value); err != nil {
// Return nil because the error contains 'Exit status X', that's what we wanted
return false, nil
}
return true, nil
default:
return false, nil
}
}
func routine(h Interface, maxWorkers, provision int, hostname string, timestamp int64, lastSpawnedIDs []int64, warningSeconds, criticalSeconds, graceSeconds int) ([]int64, error) {
defer logTime(fmt.Sprintf("routine> %d", timestamp), time.Now(), warningSeconds, criticalSeconds)
log.Debug("routine> %d enter", timestamp)
if h.Hatchery() == nil || h.Hatchery().ID == 0 {
log.Debug("Create> continue")
return nil, nil
}
workersStarted := h.WorkersStarted()
if workersStarted > maxWorkers {
log.Notice("routine> %d max workers reached. current:%d max:%d", timestamp, workersStarted, maxWorkers)
return nil, nil
}
log.Debug("routine> %d - workers already started:%d", timestamp, workersStarted)
jobs, errbq := sdk.GetBuildQueue()
if errbq != nil {
log.Critical("routine> %d error on GetBuildQueue:%e", timestamp, errbq)
return nil, errbq
}
if len(jobs) == 0 {
log.Debug("routine> %d - Job queue is empty", timestamp)
return nil, nil
}
log.Debug("routine> %d - Job queue size:%d", timestamp, len(jobs))
models, errwm := sdk.GetWorkerModels()
if errwm != nil {
log.Debug("routine> %d - error on GetWorkerModels:%e", timestamp, errwm)
return nil, errwm
}
if len(models) == 0 {
return nil, fmt.Errorf("routine> %d - No model returned by GetWorkerModels", timestamp)
}
log.Debug("routine> %d - models received: %d", timestamp, len(models))
spawnedIDs := []int64{}
wg := &sync.WaitGroup{}
nToRun := len(jobs)
if len(jobs) > maxWorkers-workersStarted {
nToRun = maxWorkers - workersStarted
if nToRun < 0 { // should never occur, just to be sure
nToRun = 1
}
log.Info("routine> %d - work only on %d jobs from queue. queue size:%d workersStarted:%d maxWorkers:%d", timestamp, nToRun, len(jobs), workersStarted, maxWorkers)
}
for i := range jobs[:nToRun] {
wg.Add(1)
go func(job *sdk.PipelineBuildJob) {
defer logTime(fmt.Sprintf("routine> %d - job %d>", timestamp, job.ID), time.Now(), warningSeconds, criticalSeconds)
if sdk.IsInArray(job.ID, lastSpawnedIDs) {
log.Debug("routine> %d - job %d already spawned in previous routine", timestamp, job.ID)
wg.Done()
return
}
if job.QueuedSeconds < int64(graceSeconds) {
log.Debug("routine> %d - job %d is too fresh, queued since %d seconds, let existing waiting worker check it", timestamp, job.ID, job.QueuedSeconds)
wg.Done()
return
}
log.Debug("routine> %d - work on job %d queued since %d seconds", timestamp, job.ID, job.QueuedSeconds)
if job.BookedBy.ID != 0 {
t := "current hatchery"
if job.BookedBy.ID != h.Hatchery().ID {
t = "another hatchery"
}
log.Debug("routine> %d - job %d already booked by %s %s (%d)", timestamp, job.ID, t, job.BookedBy.Name, job.BookedBy.ID)
wg.Done()
return
}
for _, model := range models {
if canRunJob(h, timestamp, job, &model, hostname) {
if err := sdk.BookPipelineBuildJob(job.ID); err != nil {
// perhaps already booked by another hatchery
log.Debug("routine> %d - cannot book job %d %s: %s", timestamp, job.ID, model.Name, err)
break // go to next job
}
log.Debug("routine> %d - send book job %d %s by hatchery %d", timestamp, job.ID, model.Name, h.Hatchery().ID)
start := time.Now()
infos := []sdk.SpawnInfo{
{
RemoteTime: start,
Message: sdk.SpawnMsg{ID: sdk.MsgSpawnInfoHatcheryStarts.ID, Args: []interface{}{fmt.Sprintf("%d", h.Hatchery().ID), model.Name}},
},
}
if err := h.SpawnWorker(&model, job); err != nil {
log.Warning("routine> %d - cannot spawn worker %s for job %d: %s", timestamp, model.Name, job.ID, err)
infos = append(infos, sdk.SpawnInfo{
RemoteTime: time.Now(),
Message: sdk.SpawnMsg{ID: sdk.MsgSpawnInfoHatcheryErrorSpawn.ID, Args: []interface{}{fmt.Sprintf("%d", h.Hatchery().ID), model.Name, sdk.Round(time.Since(start), time.Second).String(), err.Error()}},
})
if err := sdk.AddSpawnInfosPipelineBuildJob(job.ID, infos); err != nil {
log.Warning("routine> %d - cannot record AddSpawnInfosPipelineBuildJob for job (err spawn)%d: %s", timestamp, job.ID, err)
}
continue // try another model
}
spawnedIDs = append(spawnedIDs, job.ID)
infos = append(infos, sdk.SpawnInfo{
RemoteTime: time.Now(),
Message: sdk.SpawnMsg{ID: sdk.MsgSpawnInfoHatcheryStartsSuccessfully.ID, Args: []interface{}{fmt.Sprintf("%d", h.Hatchery().ID), sdk.Round(time.Since(start), time.Second).String()}},
})
if err := sdk.AddSpawnInfosPipelineBuildJob(job.ID, infos); err != nil {
log.Warning("routine> %d - cannot record AddSpawnInfosPipelineBuildJob for job %d: %s", timestamp, job.ID, err)
}
break // ok for this job
}
}
wg.Done()
}(&jobs[i])
}
wg.Wait()
return spawnedIDs, nil
}
func provisioning(h Interface, provision int) {
if provision == 0 {
log.Debug("provisioning> no provisioning to do")
return
}
models, errwm := sdk.GetWorkerModels()
if errwm != nil {
log.Debug("provisioning> error on GetWorkerModels:%e", errwm)
return
}
for k := range models {
if h.WorkersStartedByModel(&models[k]) < provision {
if models[k].Type == h.ModelType() {
go func(m sdk.Model) {
if err := h.SpawnWorker(&m, nil); err != nil {
log.Warning("provisioning> cannot spawn worker for provisioning: %s", m.Name, err)
}
}(models[k])
}
}
}
}
func canRunJob(h Interface, timestamp int64, job *sdk.PipelineBuildJob, model *sdk.Model, hostname string) bool {
if model.Type != h.ModelType() {
return false
}
// Common check
for _, r := range job.Job.Action.Requirements {
// If requirement is a Model requirement, it's easy. It's either can or can't run
if r.Type == sdk.ModelRequirement && r.Value != model.Name {
log.Debug("canRunJob> %d - job %d - model requirement r.Value(%s) != model.Name(%s)", timestamp, job.ID, r.Value, model.Name)
return false
}
// If requirement is an hostname requirement, it's for a specific worker
if r.Type == sdk.HostnameRequirement && r.Value != hostname {
log.Debug("canRunJob> %d - job %d - hostname requirement r.Value(%s) != hostname(%s)", timestamp, job.ID, r.Value, hostname)
return false
}
// service and memory requirements are only supported by docker model
if model.Type != sdk.Docker && (r.Type == sdk.ServiceRequirement || r.Type == sdk.MemoryRequirement) {
log.Debug("canRunJob> %d - job %d - job with service requirement or memory requirement: only for model docker. current model:%s", timestamp, job.ID, model.Type)
return false
}
// Skip network access requirement as we can't check it
if r.Type == sdk.NetworkAccessRequirement || r.Type == sdk.PluginRequirement || r.Type == sdk.ServiceRequirement || r.Type == sdk.MemoryRequirement {
log.Debug("canRunJob> %d - job %d - job with service requirement or memory requirement: only for model docker. current model:%s", timestamp, job.ID, model.Type)
continue
}
if r.Type == sdk.BinaryRequirement {
found := false
// Check binary requirement against worker model capabilities
for _, c := range model.Capabilities {
if r.Value == c.Value || r.Value == c.Name {
found = true
break
}
}
if !found {
log.Debug("canRunJob> %d - job %d - model(%s) does not have binary %s(%s) for this job.", timestamp, job.ID, model.Name, r.Name, r.Value)
return false
}
}
}
return h.CanSpawn(model, job)
}
func logTime(name string, then time.Time, warningSeconds, criticalSeconds int) {
d := time.Since(then)
if d > time.Duration(criticalSeconds)*time.Second {
log.Critical("%s took %s to execute", name, d)
return
}
if d > time.Duration(warningSeconds)*time.Second {
log.Warning("%s took %s to execute", name, d)
return
}
log.Debug("%s took %s to execute", name, d)
}
|
package rt_test
import (
"fmt"
"io"
"io/ioutil"
"os"
"reflect"
"regexp"
"testing"
"time"
"veyron.io/veyron/veyron2/options"
"veyron.io/veyron/veyron2/rt"
"veyron.io/veyron/veyron2/security"
"veyron.io/veyron/veyron2/vlog"
"veyron.io/veyron/veyron/lib/expect"
"veyron.io/veyron/veyron/lib/flags/consts"
"veyron.io/veyron/veyron/lib/modules"
"veyron.io/veyron/veyron/lib/testutil"
vsecurity "veyron.io/veyron/veyron/security"
)
func init() {
testutil.Init()
modules.RegisterChild("child", "", child)
modules.RegisterChild("principal", "", principal)
modules.RegisterChild("runner", "", runner)
}
func TestHelperProcess(t *testing.T) {
modules.DispatchInTest()
}
func TestInit(t *testing.T) {
r, err := rt.New(profileOpt)
if err != nil {
t.Fatalf("error: %s", err)
}
l := r.Logger()
args := fmt.Sprintf("%s", l)
expected := regexp.MustCompile("name=veyron logdirs=\\[/tmp\\] logtostderr=true|false alsologtostderr=false|true max_stack_buf_size=4292608 v=[0-9] stderrthreshold=2 vmodule= log_backtrace_at=:0")
if !expected.MatchString(args) {
t.Errorf("unexpected default args: %q", args)
}
p := r.Principal()
if p == nil {
t.Fatalf("A new principal should have been created")
}
if p.BlessingStore() == nil {
t.Fatalf("The principal must have a BlessingStore")
}
if p.BlessingStore().Default() == nil {
t.Errorf("Principal().BlessingStore().Default() should not be nil")
}
if p.BlessingStore().ForPeer() == nil {
t.Errorf("Principal().BlessingStore().ForPeer() should not be nil")
}
}
func child(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
r := rt.Init()
vlog.Infof("%s\n", r.Logger())
fmt.Fprintf(stdout, "%s\n", r.Logger())
modules.WaitForEOF(stdin)
fmt.Fprintf(stdout, "done\n")
return nil
}
func TestInitArgs(t *testing.T) {
sh := modules.NewShell()
defer sh.Cleanup(os.Stderr, os.Stderr)
h, err := sh.Start("child", nil, "--logtostderr=true", "--vv=3", "--", "foobar")
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
s := expect.NewSession(t, h.Stdout(), time.Minute)
s.Expect(fmt.Sprintf("name=veyron "+
"logdirs=[%s] "+
"logtostderr=true "+
"alsologtostderr=true "+
"max_stack_buf_size=4292608 "+
"v=3 "+
"stderrthreshold=2 "+
"vmodule= "+
"log_backtrace_at=:0",
os.TempDir()))
h.CloseStdin()
s.Expect("done")
s.ExpectEOF()
h.Shutdown(os.Stderr, os.Stderr)
}
func validatePrincipal(p security.Principal) error {
if p == nil {
return fmt.Errorf("nil principal")
}
blessings := p.BlessingStore().Default()
if blessings == nil {
return fmt.Errorf("rt.Principal().BlessingStore().Default() returned nil")
}
ctx := security.NewContext(&security.ContextParams{LocalPrincipal: p})
if n := len(blessings.ForContext(ctx)); n != 1 {
fmt.Errorf("rt.Principal().BlessingStore().Default() returned Blessing %v with %d recognized blessings, want exactly one recognized blessing", blessings, n)
}
return nil
}
func defaultBlessing(p security.Principal) string {
return p.BlessingStore().Default().ForContext(security.NewContext(&security.ContextParams{LocalPrincipal: p}))[0]
}
func tmpDir(t *testing.T) string {
dir, err := ioutil.TempDir("", "rt_test_dir")
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
return dir
}
func principal(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
r := rt.Init()
if err := validatePrincipal(r.Principal()); err != nil {
return err
}
fmt.Fprintf(stdout, "DEFAULT_BLESSING=%s\n", defaultBlessing(r.Principal()))
return nil
}
// Runner runs a principal as a subprocess and reports back with its
// own security info and it's childs.
func runner(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
r := rt.Init()
if err := validatePrincipal(r.Principal()); err != nil {
return err
}
fmt.Fprintf(stdout, "RUNNER_DEFAULT_BLESSING=%v\n", defaultBlessing(r.Principal()))
sh := modules.NewShell()
if _, err := sh.Start("principal", nil, args[1:]...); err != nil {
return err
}
// Cleanup copies the output of sh to these Writers.
sh.Cleanup(stdout, stderr)
return nil
}
func createCredentialsInDir(t *testing.T, dir string, blessing string) {
principal, err := vsecurity.CreatePersistentPrincipal(dir, nil)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if err := vsecurity.InitDefaultBlessings(principal, blessing); err != nil {
t.Fatalf("unexpected error: %s", err)
}
}
func TestPrincipalInheritance(t *testing.T) {
sh := modules.NewShell()
defer func() {
sh.Cleanup(os.Stdout, os.Stderr)
}()
// Test that the child inherits from the parent's credentials correctly.
// The running test process may or may not have a credentials directory set
// up so we have to use a 'runner' process to ensure the correct setup.
cdir := tmpDir(t)
defer os.RemoveAll(cdir)
createCredentialsInDir(t, cdir, "test")
// directory supplied by the environment.
credEnv := []string{consts.VeyronCredentials + "=" + cdir}
h, err := sh.Start("runner", credEnv)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
s := expect.NewSession(t, h.Stdout(), 2*time.Second) //time.Minute)
runnerBlessing := s.ExpectVar("RUNNER_DEFAULT_BLESSING")
principalBlessing := s.ExpectVar("DEFAULT_BLESSING")
if err := s.Error(); err != nil {
t.Fatalf("failed to read input from children: %s", err)
}
h.Shutdown(os.Stdout, os.Stderr)
wantRunnerBlessing := "test"
wantPrincipalBlessing := "test/child"
if runnerBlessing != wantRunnerBlessing || principalBlessing != wantPrincipalBlessing {
t.Fatalf("unexpected default blessing: got runner %s, principal %s, want runner %s, principal %s", runnerBlessing, principalBlessing, wantRunnerBlessing, wantPrincipalBlessing)
}
}
func TestPrincipalInit(t *testing.T) {
// Collect the process' public key and error status
collect := func(sh *modules.Shell, env []string, args ...string) string {
h, err := sh.Start("principal", env, args...)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
s := expect.NewSession(t, h.Stdout(), time.Minute)
s.SetVerbosity(testing.Verbose())
return s.ExpectVar("DEFAULT_BLESSING")
}
// A credentials directory may, or may, not have been already specified.
// Either way, we want to use our own, so we set it aside and use our own.
origCredentialsDir := os.Getenv(consts.VeyronCredentials)
defer os.Setenv(consts.VeyronCredentials, origCredentialsDir)
// Test that with VEYRON_CREDENTIALS unset the runtime's Principal
// is correctly initialized.
if err := os.Setenv(consts.VeyronCredentials, ""); err != nil {
t.Fatal(err)
}
sh := modules.NewShell()
defer sh.Cleanup(os.Stderr, os.Stderr)
blessing := collect(sh, nil)
if len(blessing) == 0 {
t.Fatalf("child returned an empty default blessings set")
}
// Test specifying credentials via VEYRON_CREDENTIALS environment.
cdir1 := tmpDir(t)
defer os.RemoveAll(cdir1)
createCredentialsInDir(t, cdir1, "test_env")
credEnv := []string{consts.VeyronCredentials + "=" + cdir1}
blessing = collect(sh, credEnv)
if got, want := blessing, "test_env"; got != want {
t.Errorf("got default blessings: %q, want %q", got, want)
}
// Test specifying credentials via the command line and that the
// comand line overrides the environment
cdir2 := tmpDir(t)
defer os.RemoveAll(cdir2)
createCredentialsInDir(t, cdir2, "test_cmd")
blessing = collect(sh, credEnv, "--veyron.credentials="+cdir2)
if got, want := blessing, "test_cmd"; got != want {
t.Errorf("got %q, want %q", got, want)
}
}
func TestInitPrincipalFromOption(t *testing.T) {
p, err := vsecurity.NewPrincipal()
if err != nil {
t.Fatalf("NewPrincipal() failed: %v", err)
}
r, err := rt.New(profileOpt, options.RuntimePrincipal{p})
if err != nil {
t.Fatalf("rt.New failed: %v", err)
}
if got := r.Principal(); !reflect.DeepEqual(got, p) {
t.Fatalf("r.Principal(): got %v, want %v", got, p)
}
}
runtimes/google/rt: fix overly short timeout in a test.
Change-Id: Id80cd1e0f84fc8e22205081ca3fd84845d2ae87d
package rt_test
import (
"fmt"
"io"
"io/ioutil"
"os"
"reflect"
"regexp"
"testing"
"time"
"veyron.io/veyron/veyron2/options"
"veyron.io/veyron/veyron2/rt"
"veyron.io/veyron/veyron2/security"
"veyron.io/veyron/veyron2/vlog"
"veyron.io/veyron/veyron/lib/expect"
"veyron.io/veyron/veyron/lib/flags/consts"
"veyron.io/veyron/veyron/lib/modules"
"veyron.io/veyron/veyron/lib/testutil"
vsecurity "veyron.io/veyron/veyron/security"
)
func init() {
testutil.Init()
modules.RegisterChild("child", "", child)
modules.RegisterChild("principal", "", principal)
modules.RegisterChild("runner", "", runner)
}
func TestHelperProcess(t *testing.T) {
modules.DispatchInTest()
}
func TestInit(t *testing.T) {
r, err := rt.New(profileOpt)
if err != nil {
t.Fatalf("error: %s", err)
}
l := r.Logger()
args := fmt.Sprintf("%s", l)
expected := regexp.MustCompile("name=veyron logdirs=\\[/tmp\\] logtostderr=true|false alsologtostderr=false|true max_stack_buf_size=4292608 v=[0-9] stderrthreshold=2 vmodule= log_backtrace_at=:0")
if !expected.MatchString(args) {
t.Errorf("unexpected default args: %q", args)
}
p := r.Principal()
if p == nil {
t.Fatalf("A new principal should have been created")
}
if p.BlessingStore() == nil {
t.Fatalf("The principal must have a BlessingStore")
}
if p.BlessingStore().Default() == nil {
t.Errorf("Principal().BlessingStore().Default() should not be nil")
}
if p.BlessingStore().ForPeer() == nil {
t.Errorf("Principal().BlessingStore().ForPeer() should not be nil")
}
}
func child(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
r := rt.Init()
vlog.Infof("%s\n", r.Logger())
fmt.Fprintf(stdout, "%s\n", r.Logger())
modules.WaitForEOF(stdin)
fmt.Fprintf(stdout, "done\n")
return nil
}
func TestInitArgs(t *testing.T) {
sh := modules.NewShell()
defer sh.Cleanup(os.Stderr, os.Stderr)
h, err := sh.Start("child", nil, "--logtostderr=true", "--vv=3", "--", "foobar")
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
s := expect.NewSession(t, h.Stdout(), time.Minute)
s.Expect(fmt.Sprintf("name=veyron "+
"logdirs=[%s] "+
"logtostderr=true "+
"alsologtostderr=true "+
"max_stack_buf_size=4292608 "+
"v=3 "+
"stderrthreshold=2 "+
"vmodule= "+
"log_backtrace_at=:0",
os.TempDir()))
h.CloseStdin()
s.Expect("done")
s.ExpectEOF()
h.Shutdown(os.Stderr, os.Stderr)
}
func validatePrincipal(p security.Principal) error {
if p == nil {
return fmt.Errorf("nil principal")
}
blessings := p.BlessingStore().Default()
if blessings == nil {
return fmt.Errorf("rt.Principal().BlessingStore().Default() returned nil")
}
ctx := security.NewContext(&security.ContextParams{LocalPrincipal: p})
if n := len(blessings.ForContext(ctx)); n != 1 {
fmt.Errorf("rt.Principal().BlessingStore().Default() returned Blessing %v with %d recognized blessings, want exactly one recognized blessing", blessings, n)
}
return nil
}
func defaultBlessing(p security.Principal) string {
return p.BlessingStore().Default().ForContext(security.NewContext(&security.ContextParams{LocalPrincipal: p}))[0]
}
func tmpDir(t *testing.T) string {
dir, err := ioutil.TempDir("", "rt_test_dir")
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
return dir
}
func principal(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
r := rt.Init()
if err := validatePrincipal(r.Principal()); err != nil {
return err
}
fmt.Fprintf(stdout, "DEFAULT_BLESSING=%s\n", defaultBlessing(r.Principal()))
return nil
}
// Runner runs a principal as a subprocess and reports back with its
// own security info and it's childs.
func runner(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
r := rt.Init()
if err := validatePrincipal(r.Principal()); err != nil {
return err
}
fmt.Fprintf(stdout, "RUNNER_DEFAULT_BLESSING=%v\n", defaultBlessing(r.Principal()))
sh := modules.NewShell()
if _, err := sh.Start("principal", nil, args[1:]...); err != nil {
return err
}
// Cleanup copies the output of sh to these Writers.
sh.Cleanup(stdout, stderr)
return nil
}
func createCredentialsInDir(t *testing.T, dir string, blessing string) {
principal, err := vsecurity.CreatePersistentPrincipal(dir, nil)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if err := vsecurity.InitDefaultBlessings(principal, blessing); err != nil {
t.Fatalf("unexpected error: %s", err)
}
}
func TestPrincipalInheritance(t *testing.T) {
sh := modules.NewShell()
defer func() {
sh.Cleanup(os.Stdout, os.Stderr)
}()
// Test that the child inherits from the parent's credentials correctly.
// The running test process may or may not have a credentials directory set
// up so we have to use a 'runner' process to ensure the correct setup.
cdir := tmpDir(t)
defer os.RemoveAll(cdir)
createCredentialsInDir(t, cdir, "test")
// directory supplied by the environment.
credEnv := []string{consts.VeyronCredentials + "=" + cdir}
h, err := sh.Start("runner", credEnv)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
s := expect.NewSession(t, h.Stdout(), time.Minute)
runnerBlessing := s.ExpectVar("RUNNER_DEFAULT_BLESSING")
principalBlessing := s.ExpectVar("DEFAULT_BLESSING")
if err := s.Error(); err != nil {
t.Fatalf("failed to read input from children: %s", err)
}
h.Shutdown(os.Stdout, os.Stderr)
wantRunnerBlessing := "test"
wantPrincipalBlessing := "test/child"
if runnerBlessing != wantRunnerBlessing || principalBlessing != wantPrincipalBlessing {
t.Fatalf("unexpected default blessing: got runner %s, principal %s, want runner %s, principal %s", runnerBlessing, principalBlessing, wantRunnerBlessing, wantPrincipalBlessing)
}
}
func TestPrincipalInit(t *testing.T) {
// Collect the process' public key and error status
collect := func(sh *modules.Shell, env []string, args ...string) string {
h, err := sh.Start("principal", env, args...)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
s := expect.NewSession(t, h.Stdout(), time.Minute)
s.SetVerbosity(testing.Verbose())
return s.ExpectVar("DEFAULT_BLESSING")
}
// A credentials directory may, or may, not have been already specified.
// Either way, we want to use our own, so we set it aside and use our own.
origCredentialsDir := os.Getenv(consts.VeyronCredentials)
defer os.Setenv(consts.VeyronCredentials, origCredentialsDir)
// Test that with VEYRON_CREDENTIALS unset the runtime's Principal
// is correctly initialized.
if err := os.Setenv(consts.VeyronCredentials, ""); err != nil {
t.Fatal(err)
}
sh := modules.NewShell()
defer sh.Cleanup(os.Stderr, os.Stderr)
blessing := collect(sh, nil)
if len(blessing) == 0 {
t.Fatalf("child returned an empty default blessings set")
}
// Test specifying credentials via VEYRON_CREDENTIALS environment.
cdir1 := tmpDir(t)
defer os.RemoveAll(cdir1)
createCredentialsInDir(t, cdir1, "test_env")
credEnv := []string{consts.VeyronCredentials + "=" + cdir1}
blessing = collect(sh, credEnv)
if got, want := blessing, "test_env"; got != want {
t.Errorf("got default blessings: %q, want %q", got, want)
}
// Test specifying credentials via the command line and that the
// comand line overrides the environment
cdir2 := tmpDir(t)
defer os.RemoveAll(cdir2)
createCredentialsInDir(t, cdir2, "test_cmd")
blessing = collect(sh, credEnv, "--veyron.credentials="+cdir2)
if got, want := blessing, "test_cmd"; got != want {
t.Errorf("got %q, want %q", got, want)
}
}
func TestInitPrincipalFromOption(t *testing.T) {
p, err := vsecurity.NewPrincipal()
if err != nil {
t.Fatalf("NewPrincipal() failed: %v", err)
}
r, err := rt.New(profileOpt, options.RuntimePrincipal{p})
if err != nil {
t.Fatalf("rt.New failed: %v", err)
}
if got := r.Principal(); !reflect.DeepEqual(got, p) {
t.Fatalf("r.Principal(): got %v, want %v", got, p)
}
}
|
package stripe
import (
"net/url"
"strconv"
)
// Subscription Statuses
const (
SubscriptionTrialing = "trialing"
SubscriptionActive = "active"
SubscriptionPastDue = "past_due"
SubscriptionCanceled = "canceled"
SubscriptionUnpaid = "unpaid"
)
// see https://stripe.com/docs/api#subscription_object
type Subscription struct {
Customer string "custoer"
Status string "status"
Plan *Plan "plan"
Start int64 "start" // Date the subscription started
EndedAt Int64 "ended_at" // If the subscription has ended (either because it was canceled or because the customer was switched to a subscription to a new plan), the date the subscription ended
CurrentPeriodStart Int64 "current_period_start" // Start of the current period that the subscription has been invoiced for
CurrentPeriodEnd Int64 "current_period_end" // End of the current period that the subscription has been invoiced for. At the end of this period, a new invoice will be created.
TrialStart Int64 "trial_start" // If the subscription has a trial, the beginning of that trial
TrialEnd Int64 "trial_end" // If the subscription has a trial, the end of that trial.
CanceledAt Int64 "canceled_at" // If the subscription has been canceled, the date of that cancellation. If the subscription was canceled with cancel_at_period_end, canceled_at will still reflect the date of the initial cancellation request, not the end of the subscription period when the subscription is automatically moved to a canceled state.
CancelAtPeriodEnd bool "cancel_at_period_end" // If the subscription has been canceled with the at_period_end flag set to true, cancel_at_period_end on the subscription will be true. You can use this attribute to determine whether a subscription that has a status of active is scheduled to be canceled at the end of the current period.
}
type SubscriptionClient struct{}
type UpdateSubscriptionReq struct {
Plan string // The identifier of the plan to subscribe the customer to.
Coupon string // The code of the coupon to apply to the customer if you would like to apply it at the same time as creating the subscription.
Prorate bool // Flag telling us whether to prorate switching plans during a billing cycle
TrialEnd int64 // UTC integer timestamp representing the end of the trial period the customer will get before being charged for the first time. If set, trial_end will override the default trial period of the plan the customer is being subscribed to.
//Card *Card // A new card to attach to the customer. The card can either be a token, like the ones returned by our Stripe.js, or a Map containing a user's credit card details
}
// Subscribes a customer to a plan, meaning the customer will be billed monthly
// starting from signup. If the customer already has an active subscription,
// we'll update it to the new plan and optionally prorate the price we charge
// next month to make up for any price changes.
// TODO unable to include the card parameter at this time
//
// see https://stripe.com/docs/api?lang=java#update_subscription
func (self *SubscriptionClient) Update(customerId string, req *UpdateSubscriptionReq) (*Subscription, error) {
values := url.Values{"plan": {req.Plan}}
if len(req.Coupon) != 0 {
values.Add("coupon", req.Coupon)
}
if req.Prorate {
values.Add("prorate", "true")
}
if req.TrialEnd != 0 {
values.Add("trial_end", strconv.FormatInt(req.TrialEnd, 10))
}
s := Subscription{}
path := "/v1/customers/" + url.QueryEscape(customerId) + "/subscription"
err := query("POST", path, values, &s)
return &s, err
}
// Cancels the subscription if it exists. If you set the atPeriodEnd parameter
// to true, the subscription will remain active until the end of the period, at
// which point it will be cancelled and not renewed.
// TODO at_period_end is not currently working
//
// see https://stripe.com/docs/api?lang=java#cancel_subscription
func (self *SubscriptionClient) Cancel(customerId string, atPeriodEnd bool) (*Subscription, error) {
s := Subscription{}
path := "/v1/customers/" + url.QueryEscape(customerId) + "/subscription"
err := query("DELETE", path, nil, &s)
return &s, err
}
fixed json parsing meta data for struct
package stripe
import (
"net/url"
"strconv"
)
// Subscription Statuses
const (
SubscriptionTrialing = "trialing"
SubscriptionActive = "active"
SubscriptionPastDue = "past_due"
SubscriptionCanceled = "canceled"
SubscriptionUnpaid = "unpaid"
)
// see https://stripe.com/docs/api#subscription_object
type Subscription struct {
Customer string `json:"customer"`
Status string `json:"status"`
Plan *Plan `json:"plan"`
// Date the subscription started
Start int64 `json:"start"`
// If the subscription has ended (either because it was canceled or because
// the customer was switched to a subscription to a new plan), the date the
// subscription ended
EndedAt Int64 `json:"ended_at"`
// Start of the current period that the subscription has been invoiced for
CurrentPeriodStart Int64 `json:"current_period_start"`
// End of the current period that the subscription has been invoiced for.
// At the end of this period, a new invoice will be created.
CurrentPeriodEnd Int64 `json:"current_period_end"`
// If the subscription has a trial, the beginning of that trial
TrialStart Int64 `json:"trial_start"`
// If the subscription has a trial, the end of that trial.
TrialEnd Int64 `json:"trial_end"`
// If the subscription has been canceled, the date of that cancellation. If
// the subscription was canceled with cancel_at_period_end, canceled_at will
// still reflect the date of the initial cancellation request, not the end
// of the subscription period when the subscription is automatically moved
// to a canceled state.
CanceledAt Int64 `json:"canceled_at"`
// If the subscription has been canceled with the at_period_end flag set to
// true, cancel_at_period_end on the subscription will be true. You can use
// this attribute to determine whether a subscription that has a status of
// active is scheduled to be canceled at the end of the current period.
CancelAtPeriodEnd bool `json:"cancel_at_period_end"`
}
type SubscriptionClient struct{}
type SubscriptionParams struct {
// The identifier of the plan to subscribe the customer to.
Plan string
// The code of the coupon to apply to the customer if you would like to
// apply it at the same time as creating the subscription.
Coupon string
// Flag telling us whether to prorate switching plans during a billing cycle
Prorate bool
// UTC integer timestamp representing the end of the trial period the
// customer will get before being charged for the first time. If set,
// trial_end will override the default trial period of the plan the customer
// is being subscribed to.
TrialEnd int64
// A new card to attach to the customer. The card can either be a token,
// like the ones returned by our Stripe.js, or a Map containing a user's
// credit card details
//Card *Card
}
// Subscribes a customer to a plan, meaning the customer will be billed monthly
// starting from signup. If the customer already has an active subscription,
// we'll update it to the new plan and optionally prorate the price we charge
// next month to make up for any price changes.
// TODO unable to include the card parameter at this time
//
// see https://stripe.com/docs/api#update_subscription
func (self *SubscriptionClient) Update(customerId string, params *SubscriptionParams) (*Subscription, error) {
values := url.Values{"plan": {params.Plan}}
if len(params.Coupon) != 0 {
values.Add("coupon", params.Coupon)
}
if params.Prorate {
values.Add("prorate", "true")
}
if params.TrialEnd != 0 {
values.Add("trial_end", strconv.FormatInt(params.TrialEnd, 10))
}
s := Subscription{}
path := "/v1/customers/" + url.QueryEscape(customerId) + "/subscription"
err := query("POST", path, values, &s)
return &s, err
}
// Cancels the subscription if it exists. If you set the atPeriodEnd parameter
// to true, the subscription will remain active until the end of the period, at
// which point it will be cancelled and not renewed.
// TODO enable at_period_end parameter
//
// see https://stripe.com/docs/api#cancel_subscription
func (self *SubscriptionClient) Cancel(customerId string) (*Subscription, error) {
s := Subscription{}
path := "/v1/customers/" + url.QueryEscape(customerId) + "/subscription"
err := query("DELETE", path, nil, &s)
return &s, err
}
|
package jiffy
import (
"time"
)
var (
// The maximum number of messages to buffer in a subscription.
ResponseBufferSize = 100
)
type Subscription struct {
Name string
Topic *Topic
Response chan *Message
uuid string
expireAt time.Time
}
func NewSubscription(name string, topic *Topic, ttl time.Duration) *Subscription {
subscription := &Subscription{
name,
topic,
make(chan *Message, ResponseBufferSize),
UUID(),
time.Now().Add(ttl),
}
return subscription
}
// Publishes a message to the subscription.
func (subscription *Subscription) Publish(message *Message) {
subscription.Response <- message
}
// Deletes the subscription from its topic.
func (subscription *Subscription) Expire() {
subscription.expireAt = time.Now()
}
// Expires the subscription.
func (subscription *Subscription) Expired() bool {
return time.Now().After(subscription.expireAt)
}
// Extends the subscription's expiration by the input TTL.
func (subscription *Subscription) ExtendExpiration(ttl time.Duration) {
subscription.Activate()
subscription.expireAt = time.Now().Add(ttl)
}
// Returns true if the subscription is active on a topic.
func (subscription *Subscription) Active() bool {
if topicSubscription, ok := subscription.Topic.Subscriptions[subscription.Name]; ok {
return (topicSubscription.uuid == subscription.uuid) && !subscription.Expired()
}
return false
}
// Resubscribes a subscription to its configured topic.
func (subscription *Subscription) Activate() {
subscription.Topic.Subscriptions[subscription.Name] = subscription
}
Timeout publishes
package jiffy
import (
"time"
)
var (
// The maximum number of messages to buffer in a subscription.
ResponseBufferSize = 100
// The wait before timing a publish.
PublishTimeout = 10 * time.Minute
)
type Subscription struct {
Name string
Topic *Topic
Response chan *Message
uuid string
expireAt time.Time
}
func NewSubscription(name string, topic *Topic, ttl time.Duration) *Subscription {
subscription := &Subscription{
name,
topic,
make(chan *Message, ResponseBufferSize),
UUID(),
time.Now().Add(ttl),
}
return subscription
}
// Publishes a message to the subscription.
func (subscription *Subscription) Publish(message *Message) {
ticker := time.NewTicker(PublishTimeout)
select {
case subscription.Response <- message:
case <-ticker.C:
}
}
// Deletes the subscription from its topic.
func (subscription *Subscription) Expire() {
subscription.expireAt = time.Now()
}
// Expires the subscription.
func (subscription *Subscription) Expired() bool {
return time.Now().After(subscription.expireAt)
}
// Extends the subscription's expiration by the input TTL.
func (subscription *Subscription) ExtendExpiration(ttl time.Duration) {
subscription.Activate()
subscription.expireAt = time.Now().Add(ttl)
}
// Returns true if the subscription is active on a topic.
func (subscription *Subscription) Active() bool {
if topicSubscription, ok := subscription.Topic.Subscriptions[subscription.Name]; ok {
return (topicSubscription.uuid == subscription.uuid) && !subscription.Expired()
}
return false
}
// Resubscribes a subscription to its configured topic.
func (subscription *Subscription) Activate() {
subscription.Topic.Subscriptions[subscription.Name] = subscription
}
|
// Package s3 provides an interface to Amazon S3 oject storage
package s3
// FIXME need to prevent anything but ListDir working for s3://
/*
Progress of port to aws-sdk
* Don't really need o.meta at all?
What happens if you CTRL-C a multipart upload
* get an incomplete upload
* disappears when you delete the bucket
*/
import (
"bytes"
"context"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"encoding/xml"
"fmt"
"io"
"net/http"
"net/url"
"path"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/ncw/swift"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/sync/errgroup"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "s3",
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
NewFs: NewFs,
Options: []fs.Option{{
Name: fs.ConfigProvider,
Help: "Choose your S3 provider.",
Examples: []fs.OptionExample{{
Value: "AWS",
Help: "Amazon Web Services (AWS) S3",
}, {
Value: "Alibaba",
Help: "Alibaba Cloud Object Storage System (OSS) formerly Aliyun",
}, {
Value: "Ceph",
Help: "Ceph Object Storage",
}, {
Value: "DigitalOcean",
Help: "Digital Ocean Spaces",
}, {
Value: "Dreamhost",
Help: "Dreamhost DreamObjects",
}, {
Value: "IBMCOS",
Help: "IBM COS S3",
}, {
Value: "Minio",
Help: "Minio Object Storage",
}, {
Value: "Netease",
Help: "Netease Object Storage (NOS)",
}, {
Value: "StackPath",
Help: "StackPath Object Storage",
}, {
Value: "Wasabi",
Help: "Wasabi Object Storage",
}, {
Value: "Other",
Help: "Any other S3 compatible provider",
}},
}, {
Name: "env_auth",
Help: "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\nOnly applies if access_key_id and secret_access_key is blank.",
Default: false,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter AWS credentials in the next step",
}, {
Value: "true",
Help: "Get AWS credentials from the environment (env vars or IAM)",
}},
}, {
Name: "access_key_id",
Help: "AWS Access Key ID.\nLeave blank for anonymous access or runtime credentials.",
}, {
Name: "secret_access_key",
Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
}, {
Name: "region",
Help: "Region to connect to.",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "us-east-1",
Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia or Pacific Northwest.\nLeave location constraint empty.",
}, {
Value: "us-east-2",
Help: "US East (Ohio) Region\nNeeds location constraint us-east-2.",
}, {
Value: "us-west-2",
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
}, {
Value: "us-west-1",
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
}, {
Value: "ca-central-1",
Help: "Canada (Central) Region\nNeeds location constraint ca-central-1.",
}, {
Value: "eu-west-1",
Help: "EU (Ireland) Region\nNeeds location constraint EU or eu-west-1.",
}, {
Value: "eu-west-2",
Help: "EU (London) Region\nNeeds location constraint eu-west-2.",
}, {
Value: "eu-north-1",
Help: "EU (Stockholm) Region\nNeeds location constraint eu-north-1.",
}, {
Value: "eu-central-1",
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
}, {
Value: "ap-southeast-1",
Help: "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1.",
}, {
Value: "ap-southeast-2",
Help: "Asia Pacific (Sydney) Region\nNeeds location constraint ap-southeast-2.",
}, {
Value: "ap-northeast-1",
Help: "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1.",
}, {
Value: "ap-northeast-2",
Help: "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2.",
}, {
Value: "ap-south-1",
Help: "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.",
}, {
Value: "ap-east-1",
Help: "Asia Patific (Hong Kong) Region\nNeeds location constraint ap-east-1.",
}, {
Value: "sa-east-1",
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
}},
}, {
Name: "region",
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
Provider: "!AWS,Alibaba",
Examples: []fs.OptionExample{{
Value: "",
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
}, {
Value: "other-v2-signature",
Help: "Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.",
}},
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.",
Provider: "AWS",
}, {
Name: "endpoint",
Help: "Endpoint for IBM COS S3 API.\nSpecify if using an IBM COS On Premise.",
Provider: "IBMCOS",
Examples: []fs.OptionExample{{
Value: "s3-api.us-geo.objectstorage.softlayer.net",
Help: "US Cross Region Endpoint",
}, {
Value: "s3-api.dal.us-geo.objectstorage.softlayer.net",
Help: "US Cross Region Dallas Endpoint",
}, {
Value: "s3-api.wdc-us-geo.objectstorage.softlayer.net",
Help: "US Cross Region Washington DC Endpoint",
}, {
Value: "s3-api.sjc-us-geo.objectstorage.softlayer.net",
Help: "US Cross Region San Jose Endpoint",
}, {
Value: "s3-api.us-geo.objectstorage.service.networklayer.com",
Help: "US Cross Region Private Endpoint",
}, {
Value: "s3-api.dal-us-geo.objectstorage.service.networklayer.com",
Help: "US Cross Region Dallas Private Endpoint",
}, {
Value: "s3-api.wdc-us-geo.objectstorage.service.networklayer.com",
Help: "US Cross Region Washington DC Private Endpoint",
}, {
Value: "s3-api.sjc-us-geo.objectstorage.service.networklayer.com",
Help: "US Cross Region San Jose Private Endpoint",
}, {
Value: "s3.us-east.objectstorage.softlayer.net",
Help: "US Region East Endpoint",
}, {
Value: "s3.us-east.objectstorage.service.networklayer.com",
Help: "US Region East Private Endpoint",
}, {
Value: "s3.us-south.objectstorage.softlayer.net",
Help: "US Region South Endpoint",
}, {
Value: "s3.us-south.objectstorage.service.networklayer.com",
Help: "US Region South Private Endpoint",
}, {
Value: "s3.eu-geo.objectstorage.softlayer.net",
Help: "EU Cross Region Endpoint",
}, {
Value: "s3.fra-eu-geo.objectstorage.softlayer.net",
Help: "EU Cross Region Frankfurt Endpoint",
}, {
Value: "s3.mil-eu-geo.objectstorage.softlayer.net",
Help: "EU Cross Region Milan Endpoint",
}, {
Value: "s3.ams-eu-geo.objectstorage.softlayer.net",
Help: "EU Cross Region Amsterdam Endpoint",
}, {
Value: "s3.eu-geo.objectstorage.service.networklayer.com",
Help: "EU Cross Region Private Endpoint",
}, {
Value: "s3.fra-eu-geo.objectstorage.service.networklayer.com",
Help: "EU Cross Region Frankfurt Private Endpoint",
}, {
Value: "s3.mil-eu-geo.objectstorage.service.networklayer.com",
Help: "EU Cross Region Milan Private Endpoint",
}, {
Value: "s3.ams-eu-geo.objectstorage.service.networklayer.com",
Help: "EU Cross Region Amsterdam Private Endpoint",
}, {
Value: "s3.eu-gb.objectstorage.softlayer.net",
Help: "Great Britain Endpoint",
}, {
Value: "s3.eu-gb.objectstorage.service.networklayer.com",
Help: "Great Britain Private Endpoint",
}, {
Value: "s3.ap-geo.objectstorage.softlayer.net",
Help: "APAC Cross Regional Endpoint",
}, {
Value: "s3.tok-ap-geo.objectstorage.softlayer.net",
Help: "APAC Cross Regional Tokyo Endpoint",
}, {
Value: "s3.hkg-ap-geo.objectstorage.softlayer.net",
Help: "APAC Cross Regional HongKong Endpoint",
}, {
Value: "s3.seo-ap-geo.objectstorage.softlayer.net",
Help: "APAC Cross Regional Seoul Endpoint",
}, {
Value: "s3.ap-geo.objectstorage.service.networklayer.com",
Help: "APAC Cross Regional Private Endpoint",
}, {
Value: "s3.tok-ap-geo.objectstorage.service.networklayer.com",
Help: "APAC Cross Regional Tokyo Private Endpoint",
}, {
Value: "s3.hkg-ap-geo.objectstorage.service.networklayer.com",
Help: "APAC Cross Regional HongKong Private Endpoint",
}, {
Value: "s3.seo-ap-geo.objectstorage.service.networklayer.com",
Help: "APAC Cross Regional Seoul Private Endpoint",
}, {
Value: "s3.mel01.objectstorage.softlayer.net",
Help: "Melbourne Single Site Endpoint",
}, {
Value: "s3.mel01.objectstorage.service.networklayer.com",
Help: "Melbourne Single Site Private Endpoint",
}, {
Value: "s3.tor01.objectstorage.softlayer.net",
Help: "Toronto Single Site Endpoint",
}, {
Value: "s3.tor01.objectstorage.service.networklayer.com",
Help: "Toronto Single Site Private Endpoint",
}},
}, {
// oss endpoints: https://help.aliyun.com/document_detail/31837.html
Name: "endpoint",
Help: "Endpoint for OSS API.",
Provider: "Alibaba",
Examples: []fs.OptionExample{{
Value: "oss-cn-hangzhou.aliyuncs.com",
Help: "East China 1 (Hangzhou)",
}, {
Value: "oss-cn-shanghai.aliyuncs.com",
Help: "East China 2 (Shanghai)",
}, {
Value: "oss-cn-qingdao.aliyuncs.com",
Help: "North China 1 (Qingdao)",
}, {
Value: "oss-cn-beijing.aliyuncs.com",
Help: "North China 2 (Beijing)",
}, {
Value: "oss-cn-zhangjiakou.aliyuncs.com",
Help: "North China 3 (Zhangjiakou)",
}, {
Value: "oss-cn-huhehaote.aliyuncs.com",
Help: "North China 5 (Huhehaote)",
}, {
Value: "oss-cn-shenzhen.aliyuncs.com",
Help: "South China 1 (Shenzhen)",
}, {
Value: "oss-cn-hongkong.aliyuncs.com",
Help: "Hong Kong (Hong Kong)",
}, {
Value: "oss-us-west-1.aliyuncs.com",
Help: "US West 1 (Silicon Valley)",
}, {
Value: "oss-us-east-1.aliyuncs.com",
Help: "US East 1 (Virginia)",
}, {
Value: "oss-ap-southeast-1.aliyuncs.com",
Help: "Southeast Asia Southeast 1 (Singapore)",
}, {
Value: "oss-ap-southeast-2.aliyuncs.com",
Help: "Asia Pacific Southeast 2 (Sydney)",
}, {
Value: "oss-ap-southeast-3.aliyuncs.com",
Help: "Southeast Asia Southeast 3 (Kuala Lumpur)",
}, {
Value: "oss-ap-southeast-5.aliyuncs.com",
Help: "Asia Pacific Southeast 5 (Jakarta)",
}, {
Value: "oss-ap-northeast-1.aliyuncs.com",
Help: "Asia Pacific Northeast 1 (Japan)",
}, {
Value: "oss-ap-south-1.aliyuncs.com",
Help: "Asia Pacific South 1 (Mumbai)",
}, {
Value: "oss-eu-central-1.aliyuncs.com",
Help: "Central Europe 1 (Frankfurt)",
}, {
Value: "oss-eu-west-1.aliyuncs.com",
Help: "West Europe (London)",
}, {
Value: "oss-me-east-1.aliyuncs.com",
Help: "Middle East 1 (Dubai)",
}},
}, {
Name: "endpoint",
Help: "Endpoint for StackPath Object Storage.",
Provider: "StackPath",
Examples: []fs.OptionExample{{
Value: "s3.us-east-2.stackpathstorage.com",
Help: "US East Endpoint",
}, {
Value: "s3.us-west-1.stackpathstorage.com",
Help: "US West Endpoint",
}, {
Value: "s3.eu-central-1.stackpathstorage.com",
Help: "EU Endpoint",
}},
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
Provider: "!AWS,IBMCOS,Alibaba,StackPath",
Examples: []fs.OptionExample{{
Value: "objects-us-east-1.dream.io",
Help: "Dream Objects endpoint",
Provider: "Dreamhost",
}, {
Value: "nyc3.digitaloceanspaces.com",
Help: "Digital Ocean Spaces New York 3",
Provider: "DigitalOcean",
}, {
Value: "ams3.digitaloceanspaces.com",
Help: "Digital Ocean Spaces Amsterdam 3",
Provider: "DigitalOcean",
}, {
Value: "sgp1.digitaloceanspaces.com",
Help: "Digital Ocean Spaces Singapore 1",
Provider: "DigitalOcean",
}, {
Value: "s3.wasabisys.com",
Help: "Wasabi US East endpoint",
Provider: "Wasabi",
}, {
Value: "s3.us-west-1.wasabisys.com",
Help: "Wasabi US West endpoint",
Provider: "Wasabi",
}, {
Value: "s3.eu-central-1.wasabisys.com",
Help: "Wasabi EU Central endpoint",
Provider: "Wasabi",
}},
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\nUsed when creating buckets only.",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "Empty for US Region, Northern Virginia or Pacific Northwest.",
}, {
Value: "us-east-2",
Help: "US East (Ohio) Region.",
}, {
Value: "us-west-2",
Help: "US West (Oregon) Region.",
}, {
Value: "us-west-1",
Help: "US West (Northern California) Region.",
}, {
Value: "ca-central-1",
Help: "Canada (Central) Region.",
}, {
Value: "eu-west-1",
Help: "EU (Ireland) Region.",
}, {
Value: "eu-west-2",
Help: "EU (London) Region.",
}, {
Value: "eu-north-1",
Help: "EU (Stockholm) Region.",
}, {
Value: "EU",
Help: "EU Region.",
}, {
Value: "ap-southeast-1",
Help: "Asia Pacific (Singapore) Region.",
}, {
Value: "ap-southeast-2",
Help: "Asia Pacific (Sydney) Region.",
}, {
Value: "ap-northeast-1",
Help: "Asia Pacific (Tokyo) Region.",
}, {
Value: "ap-northeast-2",
Help: "Asia Pacific (Seoul)",
}, {
Value: "ap-south-1",
Help: "Asia Pacific (Mumbai)",
}, {
Value: "ap-east-1",
Help: "Asia Pacific (Hong Kong)",
}, {
Value: "sa-east-1",
Help: "South America (Sao Paulo) Region.",
}},
}, {
Name: "location_constraint",
Help: "Location constraint - must match endpoint when using IBM Cloud Public.\nFor on-prem COS, do not make a selection from this list, hit enter",
Provider: "IBMCOS",
Examples: []fs.OptionExample{{
Value: "us-standard",
Help: "US Cross Region Standard",
}, {
Value: "us-vault",
Help: "US Cross Region Vault",
}, {
Value: "us-cold",
Help: "US Cross Region Cold",
}, {
Value: "us-flex",
Help: "US Cross Region Flex",
}, {
Value: "us-east-standard",
Help: "US East Region Standard",
}, {
Value: "us-east-vault",
Help: "US East Region Vault",
}, {
Value: "us-east-cold",
Help: "US East Region Cold",
}, {
Value: "us-east-flex",
Help: "US East Region Flex",
}, {
Value: "us-south-standard",
Help: "US South Region Standard",
}, {
Value: "us-south-vault",
Help: "US South Region Vault",
}, {
Value: "us-south-cold",
Help: "US South Region Cold",
}, {
Value: "us-south-flex",
Help: "US South Region Flex",
}, {
Value: "eu-standard",
Help: "EU Cross Region Standard",
}, {
Value: "eu-vault",
Help: "EU Cross Region Vault",
}, {
Value: "eu-cold",
Help: "EU Cross Region Cold",
}, {
Value: "eu-flex",
Help: "EU Cross Region Flex",
}, {
Value: "eu-gb-standard",
Help: "Great Britain Standard",
}, {
Value: "eu-gb-vault",
Help: "Great Britain Vault",
}, {
Value: "eu-gb-cold",
Help: "Great Britain Cold",
}, {
Value: "eu-gb-flex",
Help: "Great Britain Flex",
}, {
Value: "ap-standard",
Help: "APAC Standard",
}, {
Value: "ap-vault",
Help: "APAC Vault",
}, {
Value: "ap-cold",
Help: "APAC Cold",
}, {
Value: "ap-flex",
Help: "APAC Flex",
}, {
Value: "mel01-standard",
Help: "Melbourne Standard",
}, {
Value: "mel01-vault",
Help: "Melbourne Vault",
}, {
Value: "mel01-cold",
Help: "Melbourne Cold",
}, {
Value: "mel01-flex",
Help: "Melbourne Flex",
}, {
Value: "tor01-standard",
Help: "Toronto Standard",
}, {
Value: "tor01-vault",
Help: "Toronto Vault",
}, {
Value: "tor01-cold",
Help: "Toronto Cold",
}, {
Value: "tor01-flex",
Help: "Toronto Flex",
}},
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
Provider: "!AWS,IBMCOS,Alibaba,StackPath",
}, {
Name: "acl",
Help: `Canned ACL used when creating buckets and storing or copying objects.
This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Note that this ACL is applied when server side copying objects as S3
doesn't copy the ACL from the source but rather writes a fresh one.`,
Examples: []fs.OptionExample{{
Value: "private",
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
Provider: "!IBMCOS",
}, {
Value: "public-read",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
Provider: "!IBMCOS",
}, {
Value: "public-read-write",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
Provider: "!IBMCOS",
}, {
Value: "authenticated-read",
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
Provider: "!IBMCOS",
}, {
Value: "bucket-owner-read",
Help: "Object owner gets FULL_CONTROL. Bucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
Provider: "!IBMCOS",
}, {
Value: "bucket-owner-full-control",
Help: "Both the object owner and the bucket owner get FULL_CONTROL over the object.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
Provider: "!IBMCOS",
}, {
Value: "private",
Help: "Owner gets FULL_CONTROL. No one else has access rights (default). This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS",
Provider: "IBMCOS",
}, {
Value: "public-read",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access. This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS",
Provider: "IBMCOS",
}, {
Value: "public-read-write",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access. This acl is available on IBM Cloud (Infra), On-Premise IBM COS",
Provider: "IBMCOS",
}, {
Value: "authenticated-read",
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access. Not supported on Buckets. This acl is available on IBM Cloud (Infra) and On-Premise IBM COS",
Provider: "IBMCOS",
}},
}, {
Name: "bucket_acl",
Help: `Canned ACL used when creating buckets.
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Note that this ACL is applied when only when creating buckets. If it
isn't set then "acl" is used instead.`,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "private",
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
}, {
Value: "public-read",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
}, {
Value: "public-read-write",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
}, {
Value: "authenticated-read",
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
}},
}, {
Name: "server_side_encryption",
Help: "The server-side encryption algorithm used when storing this object in S3.",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}, {
Value: "AES256",
Help: "AES256",
}, {
Value: "aws:kms",
Help: "aws:kms",
}},
}, {
Name: "sse_kms_key_id",
Help: "If using KMS ID you must provide the ARN of Key.",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}, {
Value: "arn:aws:kms:us-east-1:*",
Help: "arn:aws:kms:*",
}},
}, {
Name: "storage_class",
Help: "The storage class to use when storing new objects in S3.",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "Default",
}, {
Value: "STANDARD",
Help: "Standard storage class",
}, {
Value: "REDUCED_REDUNDANCY",
Help: "Reduced redundancy storage class",
}, {
Value: "STANDARD_IA",
Help: "Standard Infrequent Access storage class",
}, {
Value: "ONEZONE_IA",
Help: "One Zone Infrequent Access storage class",
}, {
Value: "GLACIER",
Help: "Glacier storage class",
}, {
Value: "DEEP_ARCHIVE",
Help: "Glacier Deep Archive storage class",
}, {
Value: "INTELLIGENT_TIERING",
Help: "Intelligent-Tiering storage class",
}},
}, {
// Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm
Name: "storage_class",
Help: "The storage class to use when storing new objects in OSS.",
Provider: "Alibaba",
Examples: []fs.OptionExample{{
Value: "",
Help: "Default",
}, {
Value: "STANDARD",
Help: "Standard storage class",
}, {
Value: "GLACIER",
Help: "Archive storage mode.",
}, {
Value: "STANDARD_IA",
Help: "Infrequent access storage mode.",
}},
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload
Any files larger than this will be uploaded in chunks of chunk_size.
The minimum is 0 and the maximum is 5GB.`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Chunk size to use for uploading.
When uploading files larger than upload_cutoff or files with unknown
size (eg from "rclone rcat" or uploaded with "rclone mount" or google
photos or google docs) they will be uploaded as multipart uploads
using this chunk size.
Note that "--s3-upload-concurrency" chunks of this size are buffered
in memory per transfer.
If you are transferring large files over high speed links and you have
enough memory, then increasing this will speed up the transfers.
Rclone will automatically increase the chunk size when uploading a
large file of known size to stay below the 10,000 chunks limit.
Files of unknown size are uploaded with the configured
chunk_size. Since the default chunk size is 5MB and there can be at
most 10,000 chunks, this means that by default the maximum size of
file you can stream upload is 48GB. If you wish to stream upload
larger files then you will need to increase chunk_size.`,
Default: minChunkSize,
Advanced: true,
}, {
Name: "copy_cutoff",
Help: `Cutoff for switching to multipart copy
Any files larger than this that need to be server side copied will be
copied in chunks of this size.
The minimum is 0 and the maximum is 5GB.`,
Default: fs.SizeSuffix(maxSizeForCopy),
Advanced: true,
}, {
Name: "disable_checksum",
Help: "Don't store MD5 checksum with object metadata",
Default: false,
Advanced: true,
}, {
Name: "session_token",
Help: "An AWS session token",
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
If you are uploading small numbers of large file over high speed link
and these uploads do not fully utilize your bandwidth, then increasing
this may help to speed up the transfers.`,
Default: 4,
Advanced: true,
}, {
Name: "force_path_style",
Help: `If true use path style access if false use virtual hosted style.
If this is true (the default) then rclone will use path style access,
if false then rclone will use virtual path style. See [the AWS S3
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
for more info.
Some providers (eg AWS, Aliyun OSS or Netease COS) require this set to
false - rclone will do this automatically based on the provider
setting.`,
Default: true,
Advanced: true,
}, {
Name: "v2_auth",
Help: `If true use v2 authentication.
If this is false (the default) then rclone will use v4 authentication.
If it is set then rclone will use v2 authentication.
Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`,
Default: false,
Advanced: true,
}, {
Name: "use_accelerate_endpoint",
Provider: "AWS",
Help: `If true use the AWS S3 accelerated endpoint.
See: [AWS S3 Transfer acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration-examples.html)`,
Default: false,
Advanced: true,
}, {
Name: "leave_parts_on_error",
Provider: "AWS",
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
It should be set to true for resuming uploads across different sessions.
WARNING: Storing parts of an incomplete multipart upload counts towards space usage on S3 and will add additional costs if not cleaned up.
`,
Default: false,
Advanced: true,
}, {
Name: "list_chunk",
Help: `Size of listing chunk (response list for each ListObject S3 request).
This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification.
Most services truncate the response list to 1000 objects even if requested more than that.
In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html).
In Ceph, this can be increased with the "rgw list buckets max chunk" option.
`,
Default: 1000,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Any UTF-8 character is valid in a key, however it can't handle
// invalid UTF-8 and / have a special meaning.
//
// The SDK can't seem to handle uploading files called '.'
//
// FIXME would be nice to add
// - initial / encoding
// - doubled / encoding
// - trailing / encoding
// so that AWS keys are always valid file names
Default: encoder.EncodeInvalidUtf8 |
encoder.EncodeSlash |
encoder.EncodeDot,
}, {
Name: "memory_pool_flush_time",
Default: memoryPoolFlushTime,
Advanced: true,
Help: `How often internal memory buffer pools will be flushed.
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
}, {
Name: "memory_pool_use_mmap",
Default: memoryPoolUseMmap,
Advanced: true,
Help: `Whether to use mmap buffers in internal memory pool.`,
},
}})
}
// Constants
const (
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
maxUploadParts = 10000 // maximum allowed number of parts in a multi-part upload
minChunkSize = fs.SizeSuffix(1024 * 1024 * 5)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
)
// Options defines the configuration for this backend
type Options struct {
Provider string `config:"provider"`
EnvAuth bool `config:"env_auth"`
AccessKeyID string `config:"access_key_id"`
SecretAccessKey string `config:"secret_access_key"`
Region string `config:"region"`
Endpoint string `config:"endpoint"`
LocationConstraint string `config:"location_constraint"`
ACL string `config:"acl"`
BucketACL string `config:"bucket_acl"`
ServerSideEncryption string `config:"server_side_encryption"`
SSEKMSKeyID string `config:"sse_kms_key_id"`
StorageClass string `config:"storage_class"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DisableChecksum bool `config:"disable_checksum"`
SessionToken string `config:"session_token"`
UploadConcurrency int `config:"upload_concurrency"`
ForcePathStyle bool `config:"force_path_style"`
V2Auth bool `config:"v2_auth"`
UseAccelerateEndpoint bool `config:"use_accelerate_endpoint"`
LeavePartsOnError bool `config:"leave_parts_on_error"`
ListChunk int64 `config:"list_chunk"`
Enc encoder.MultiEncoder `config:"encoding"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
}
// Fs represents a remote s3 server
type Fs struct {
name string // the name of the remote
root string // root of the bucket - ignore all objects above this
opt Options // parsed options
features *fs.Features // optional features
c *s3.S3 // the connection to the s3 server
ses *session.Session // the s3 session
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache for bucket creation status
pacer *fs.Pacer // To pace the API calls
srv *http.Client // a plain http client
poolMu sync.Mutex // mutex protecting memory pools map
pools map[int64]*pool.Pool // memory pools
}
// Object describes a s3 object
type Object struct {
// Will definitely have everything but meta which may be nil
//
// List will read everything but meta & mimeType - to fill
// that in you need to call readMetaData
fs *Fs // what this object is part of
remote string // The remote path
etag string // md5sum of the object
bytes int64 // size of the object
lastModified time.Time // Last modified
meta map[string]*string // The object metadata if known - may be nil
mimeType string // MimeType of object - may be ""
storageClass string // eg GLACIER
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootBucket == "" {
return fmt.Sprintf("S3 root")
}
if f.rootDirectory == "" {
return fmt.Sprintf("S3 bucket %s", f.rootBucket)
}
return fmt.Sprintf("S3 bucket %s path %s", f.rootBucket, f.rootDirectory)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// retryErrorCodes is a slice of error codes that we will retry
// See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
var retryErrorCodes = []int{
500, // Internal Server Error - "We encountered an internal error. Please try again."
503, // Service Unavailable/Slow Down - "Reduce your request rate"
}
//S3 is pretty resilient, and the built in retry handling is probably sufficient
// as it should notice closed connections and timeouts which are the most likely
// sort of failure modes
func (f *Fs) shouldRetry(err error) (bool, error) {
// If this is an awserr object, try and extract more useful information to determine if we should retry
if awsError, ok := err.(awserr.Error); ok {
// Simple case, check the original embedded error in case it's generically retryable
if fserrors.ShouldRetry(awsError.OrigErr()) {
return true, err
}
// Failing that, if it's a RequestFailure it's probably got an http status code we can check
if reqErr, ok := err.(awserr.RequestFailure); ok {
// 301 if wrong region for bucket - can only update if running from a bucket
if f.rootBucket != "" {
if reqErr.StatusCode() == http.StatusMovedPermanently {
urfbErr := f.updateRegionForBucket(f.rootBucket)
if urfbErr != nil {
fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr)
return false, err
}
return true, err
}
}
for _, e := range retryErrorCodes {
if reqErr.StatusCode() == e {
return true, err
}
}
}
}
// Ok, not an awserr, check for generic failure conditions
return fserrors.ShouldRetry(err), err
}
// parsePath parses a remote 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
}
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
}
// split returns bucket and bucketPath from the object
func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote)
}
// s3Connection makes a connection to s3
func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
// Make the auth
v := credentials.Value{
AccessKeyID: opt.AccessKeyID,
SecretAccessKey: opt.SecretAccessKey,
SessionToken: opt.SessionToken,
}
lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service
def := defaults.Get()
def.Config.HTTPClient = lowTimeoutClient
// first provider to supply a credential set "wins"
providers := []credentials.Provider{
// use static credentials if they're present (checked by provider)
&credentials.StaticProvider{Value: v},
// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
&credentials.EnvProvider{},
// A SharedCredentialsProvider retrieves credentials
// from the current user's home directory. It checks
// AWS_SHARED_CREDENTIALS_FILE and AWS_PROFILE too.
&credentials.SharedCredentialsProvider{},
// Pick up IAM role if we're in an ECS task
defaults.RemoteCredProvider(*def.Config, def.Handlers),
// Pick up IAM role in case we're on EC2
&ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.New(session.New(), &aws.Config{
HTTPClient: lowTimeoutClient,
}),
ExpiryWindow: 3 * time.Minute,
},
// Pick up IAM role if we are in EKS
&stscreds.WebIdentityRoleProvider{
ExpiryWindow: 3 * time.Minute,
},
}
cred := credentials.NewChainCredentials(providers)
switch {
case opt.EnvAuth:
// No need for empty checks if "env_auth" is true
case v.AccessKeyID == "" && v.SecretAccessKey == "":
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
cred = credentials.AnonymousCredentials
case v.AccessKeyID == "":
return nil, nil, errors.New("access_key_id not found")
case v.SecretAccessKey == "":
return nil, nil, errors.New("secret_access_key not found")
}
if opt.Region == "" && opt.Endpoint == "" {
opt.Endpoint = "https://s3.amazonaws.com/"
}
if opt.Region == "" {
opt.Region = "us-east-1"
}
if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.UseAccelerateEndpoint {
opt.ForcePathStyle = false
}
awsConfig := aws.NewConfig().
WithMaxRetries(fs.Config.LowLevelRetries).
WithCredentials(cred).
WithHTTPClient(fshttp.NewClient(fs.Config)).
WithS3ForcePathStyle(opt.ForcePathStyle).
WithS3UseAccelerate(opt.UseAccelerateEndpoint)
if opt.Region != "" {
awsConfig.WithRegion(opt.Region)
}
if opt.Endpoint != "" {
awsConfig.WithEndpoint(opt.Endpoint)
}
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
awsSessionOpts := session.Options{
Config: *awsConfig,
}
if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
// Enable loading config options from ~/.aws/config (selected by AWS_PROFILE env)
awsSessionOpts.SharedConfigState = session.SharedConfigEnable
// The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source
// (from the shared config file) if the passed-in Options.Config.Credentials is nil.
awsSessionOpts.Config.Credentials = nil
}
ses, err := session.NewSessionWithOptions(awsSessionOpts)
if err != nil {
return nil, nil, err
}
c := s3.New(ses)
if opt.V2Auth || opt.Region == "other-v2-signature" {
fs.Debugf(nil, "Using v2 auth")
signer := func(req *request.Request) {
// Ignore AnonymousCredentials object
if req.Config.Credentials == credentials.AnonymousCredentials {
return
}
sign(v.AccessKeyID, v.SecretAccessKey, req.HTTPRequest)
}
c.Handlers.Sign.Clear()
c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
c.Handlers.Sign.PushBack(signer)
}
return c, ses, nil
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxUploadCutoff {
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
}
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "s3: chunk size")
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "s3: upload cutoff")
}
if opt.ACL == "" {
opt.ACL = "private"
}
if opt.BucketACL == "" {
opt.BucketACL = opt.ACL
}
c, ses, err := s3Connection(opt)
if err != nil {
return nil, err
}
pc := fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep)))
// Set pacer retries to 0 because we are relying on SDK retry mechanism.
// Setting it to 1 because in context of pacer it means 1 attempt.
pc.SetRetries(1)
f := &Fs{
name: name,
opt: *opt,
c: c,
ses: ses,
pacer: pc,
cache: bucket.NewCache(),
srv: fshttp.NewClient(fs.Config),
pools: make(map[int64]*pool.Pool),
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
SetTier: true,
GetTier: true,
}).Fill(f)
if f.rootBucket != "" && f.rootDirectory != "" {
// Check to see if the object exists
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
req := s3.HeadObjectInput{
Bucket: &f.rootBucket,
Key: &encodedDirectory,
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.c.HeadObject(&req)
return f.shouldRetry(err)
})
if err == nil {
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
}
f.setRoot(newRoot)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
}
// f.listMultipartUploads()
return f, nil
}
// Return an Object from a path
//
//If it can't be found it returns the error ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Object) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
if info != nil {
// Set info but not meta
if info.LastModified == nil {
fs.Logf(o, "Failed to read last modified")
o.lastModified = time.Now()
} else {
o.lastModified = *info.LastModified
}
o.etag = aws.StringValue(info.ETag)
o.bytes = aws.Int64Value(info.Size)
o.storageClass = aws.StringValue(info.StorageClass)
} else {
err := o.readMetaData(ctx) // reads info and meta, returning an error
if err != nil {
return nil, err
}
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
}
// Gets the bucket location
func (f *Fs) getBucketLocation(bucket string) (string, error) {
req := s3.GetBucketLocationInput{
Bucket: &bucket,
}
var resp *s3.GetBucketLocationOutput
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.GetBucketLocation(&req)
return f.shouldRetry(err)
})
if err != nil {
return "", err
}
return s3.NormalizeBucketLocation(aws.StringValue(resp.LocationConstraint)), nil
}
// Updates the region for the bucket by reading the region from the
// bucket then updating the session.
func (f *Fs) updateRegionForBucket(bucket string) error {
region, err := f.getBucketLocation(bucket)
if err != nil {
return errors.Wrap(err, "reading bucket location failed")
}
if aws.StringValue(f.c.Config.Endpoint) != "" {
return errors.Errorf("can't set region to %q as endpoint is set", region)
}
if aws.StringValue(f.c.Config.Region) == region {
return errors.Errorf("region is already %q - not updating", region)
}
// Make a new session with the new region
oldRegion := f.opt.Region
f.opt.Region = region
c, ses, err := s3Connection(&f.opt)
if err != nil {
return errors.Wrap(err, "creating new session failed")
}
f.c = c
f.ses = ses
fs.Logf(f, "Switched region to %q from %q", region, oldRegion)
return nil
}
// listFn is called from list to handle an object.
type listFn func(remote string, object *s3.Object, isDirectory bool) error
// list lists the objects into the function supplied from
// the bucket and directory supplied. The remote has prefix
// removed from it and if addBucket is set then it adds the
// bucket to the start.
//
// Set recurse to read sub directories
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) error {
if prefix != "" {
prefix += "/"
}
if directory != "" {
directory += "/"
}
delimiter := ""
if !recurse {
delimiter = "/"
}
var marker *string
// URL encode the listings so we can use control characters in object names
// See: https://github.com/aws/aws-sdk-go/issues/1914
//
// However this doesn't work perfectly under Ceph (and hence DigitalOcean/Dreamhost) because
// it doesn't encode CommonPrefixes.
// See: https://tracker.ceph.com/issues/41870
//
// This does not work under IBM COS also: See https://github.com/rclone/rclone/issues/3345
// though maybe it does on some versions.
//
// This does work with minio but was only added relatively recently
// https://github.com/minio/minio/pull/7265
//
// So we enable only on providers we know supports it properly, all others can retry when a
// XML Syntax error is detected.
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba" || f.opt.Provider == "Minio")
for {
// FIXME need to implement ALL loop
req := s3.ListObjectsInput{
Bucket: &bucket,
Delimiter: &delimiter,
Prefix: &directory,
MaxKeys: &f.opt.ListChunk,
Marker: marker,
}
if urlEncodeListings {
req.EncodingType = aws.String(s3.EncodingTypeUrl)
}
var resp *s3.ListObjectsOutput
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.ListObjectsWithContext(ctx, &req)
if err != nil && !urlEncodeListings {
if awsErr, ok := err.(awserr.RequestFailure); ok {
if origErr := awsErr.OrigErr(); origErr != nil {
if _, ok := origErr.(*xml.SyntaxError); ok {
// Retry the listing with URL encoding as there were characters that XML can't encode
urlEncodeListings = true
req.EncodingType = aws.String(s3.EncodingTypeUrl)
fs.Debugf(f, "Retrying listing because of characters which can't be XML encoded")
return true, err
}
}
}
}
return f.shouldRetry(err)
})
if err != nil {
if awsErr, ok := err.(awserr.RequestFailure); ok {
if awsErr.StatusCode() == http.StatusNotFound {
err = fs.ErrorDirNotFound
}
}
if f.rootBucket == "" {
// if listing from the root ignore wrong region requests returning
// empty directory
if reqErr, ok := err.(awserr.RequestFailure); ok {
// 301 if wrong region for bucket
if reqErr.StatusCode() == http.StatusMovedPermanently {
fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucket)
return nil
}
}
}
return err
}
if !recurse {
for _, commonPrefix := range resp.CommonPrefixes {
if commonPrefix.Prefix == nil {
fs.Logf(f, "Nil common prefix received")
continue
}
remote := *commonPrefix.Prefix
if urlEncodeListings {
remote, err = url.QueryUnescape(remote)
if err != nil {
fs.Logf(f, "failed to URL decode %q in listing common prefix: %v", *commonPrefix.Prefix, err)
continue
}
}
remote = f.opt.Enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
remote = remote[len(prefix):]
if addBucket {
remote = path.Join(bucket, remote)
}
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
err = fn(remote, &s3.Object{Key: &remote}, true)
if err != nil {
return err
}
}
}
for _, object := range resp.Contents {
remote := aws.StringValue(object.Key)
if urlEncodeListings {
remote, err = url.QueryUnescape(remote)
if err != nil {
fs.Logf(f, "failed to URL decode %q in listing: %v", aws.StringValue(object.Key), err)
continue
}
}
remote = f.opt.Enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
remote = remote[len(prefix):]
isDirectory := strings.HasSuffix(remote, "/")
if addBucket {
remote = path.Join(bucket, remote)
}
// is this a directory marker?
if isDirectory && object.Size != nil && *object.Size == 0 {
continue // skip directory marker
}
err = fn(remote, object, false)
if err != nil {
return err
}
}
if !aws.BoolValue(resp.IsTruncated) {
break
}
// Use NextMarker if set, otherwise use last Key
if resp.NextMarker == nil || *resp.NextMarker == "" {
if len(resp.Contents) == 0 {
return errors.New("s3 protocol error: received listing with IsTruncated set, no NextMarker and no Contents")
}
marker = resp.Contents[len(resp.Contents)-1].Key
} else {
marker = resp.NextMarker
}
if urlEncodeListings {
*marker, err = url.QueryUnescape(*marker)
if err != nil {
return errors.Wrapf(err, "failed to URL decode NextMarker %q", *marker)
}
}
}
return nil
}
// Convert a list item into a DirEntry
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *s3.Object, isDirectory bool) (fs.DirEntry, error) {
if isDirectory {
size := int64(0)
if object.Size != nil {
size = *object.Size
}
d := fs.NewDir(remote, time.Time{}).SetSize(size)
return d, nil
}
o, err := f.newObjectWithInfo(ctx, remote, object)
if err != nil {
return nil, err
}
return o, nil
}
// listDir lists files and directories to out
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
// List the objects and directories
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *s3.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
}
if entry != nil {
entries = append(entries, entry)
}
return nil
})
if err != nil {
return nil, err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return entries, nil
}
// listBuckets lists the buckets to out
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
req := s3.ListBucketsInput{}
var resp *s3.ListBucketsOutput
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.ListBucketsWithContext(ctx, &req)
return f.shouldRetry(err)
})
if err != nil {
return nil, err
}
for _, bucket := range resp.Buckets {
bucketName := f.opt.Enc.ToStandardName(aws.StringValue(bucket.Name))
f.cache.MarkOK(bucketName)
d := fs.NewDir(bucketName, aws.TimeValue(bucket.CreationDate))
entries = append(entries, d)
}
return entries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
}
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively than doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *s3.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
}
if bucket == "" {
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
bucket := entry.Remote()
err = listR(bucket, "", f.rootDirectory, true)
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
}
} else {
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
}
return list.Flush()
}
// Put the Object into the bucket
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
fs := &Object{
fs: f,
remote: src.Remote(),
}
return fs, fs.Update(ctx, in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
// Check if the bucket exists
//
// NB this can return incorrect results if called immediately after bucket deletion
func (f *Fs) bucketExists(ctx context.Context, bucket string) (bool, error) {
req := s3.HeadBucketInput{
Bucket: &bucket,
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.HeadBucketWithContext(ctx, &req)
return f.shouldRetry(err)
})
if err == nil {
return true, nil
}
if err, ok := err.(awserr.RequestFailure); ok {
if err.StatusCode() == http.StatusNotFound {
return false, nil
}
}
return false, err
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
bucket, _ := f.split(dir)
return f.makeBucket(ctx, bucket)
}
// makeBucket creates the bucket if it doesn't exist
func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
return f.cache.Create(bucket, func() error {
req := s3.CreateBucketInput{
Bucket: &bucket,
ACL: &f.opt.BucketACL,
}
if f.opt.LocationConstraint != "" {
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
LocationConstraint: &f.opt.LocationConstraint,
}
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.CreateBucketWithContext(ctx, &req)
return f.shouldRetry(err)
})
if err == nil {
fs.Infof(f, "Bucket %q created with ACL %q", bucket, f.opt.BucketACL)
}
if err, ok := err.(awserr.Error); ok {
if err.Code() == "BucketAlreadyOwnedByYou" {
err = nil
}
}
return nil
}, func() (bool, error) {
return f.bucketExists(ctx, bucket)
})
}
// Rmdir deletes the bucket if the fs is at the root
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
bucket, directory := f.split(dir)
if bucket == "" || directory != "" {
return nil
}
return f.cache.Remove(bucket, func() error {
req := s3.DeleteBucketInput{
Bucket: &bucket,
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.DeleteBucketWithContext(ctx, &req)
return f.shouldRetry(err)
})
if err == nil {
fs.Infof(f, "Bucket %q deleted", bucket)
}
return err
})
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return time.Nanosecond
}
// pathEscape escapes s as for a URL path. It uses rest.URLPathEscape
// but also escapes '+' for S3 and Digital Ocean spaces compatibility
func pathEscape(s string) string {
return strings.Replace(rest.URLPathEscape(s), "+", "%2B", -1)
}
// copy does a server side copy
//
// It adds the boiler plate to the req passed in and calls the s3
// method
func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, srcSize int64) error {
req.Bucket = &dstBucket
req.ACL = &f.opt.ACL
req.Key = &dstPath
source := pathEscape(path.Join(srcBucket, srcPath))
req.CopySource = &source
if f.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &f.opt.ServerSideEncryption
}
if f.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &f.opt.SSEKMSKeyID
}
if req.StorageClass == nil && f.opt.StorageClass != "" {
req.StorageClass = &f.opt.StorageClass
}
if srcSize >= int64(f.opt.CopyCutoff) {
return f.copyMultipart(ctx, req, dstBucket, dstPath, srcBucket, srcPath, srcSize)
}
return f.pacer.Call(func() (bool, error) {
_, err := f.c.CopyObjectWithContext(ctx, req)
return f.shouldRetry(err)
})
}
func calculateRange(partSize, partIndex, numParts, totalSize int64) string {
start := partIndex * partSize
var ends string
if partIndex == numParts-1 {
if totalSize >= 1 {
ends = strconv.FormatInt(totalSize-1, 10)
}
} else {
ends = strconv.FormatInt(start+partSize-1, 10)
}
return fmt.Sprintf("bytes=%v-%v", start, ends)
}
func (f *Fs) copyMultipart(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, srcSize int64) (err error) {
var cout *s3.CreateMultipartUploadOutput
if err := f.pacer.Call(func() (bool, error) {
var err error
cout, err = f.c.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
Bucket: &dstBucket,
Key: &dstPath,
})
return f.shouldRetry(err)
}); err != nil {
return err
}
uid := cout.UploadId
defer func() {
if err != nil {
// We can try to abort the upload, but ignore the error.
fs.Debugf(nil, "Cancelling multipart copy")
_ = f.pacer.Call(func() (bool, error) {
_, err := f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{
Bucket: &dstBucket,
Key: &dstPath,
UploadId: uid,
RequestPayer: req.RequestPayer,
})
return f.shouldRetry(err)
})
}
}()
partSize := int64(f.opt.CopyCutoff)
numParts := (srcSize-1)/partSize + 1
var parts []*s3.CompletedPart
for partNum := int64(1); partNum <= numParts; partNum++ {
if err := f.pacer.Call(func() (bool, error) {
partNum := partNum
uploadPartReq := &s3.UploadPartCopyInput{
Bucket: &dstBucket,
Key: &dstPath,
PartNumber: &partNum,
UploadId: uid,
CopySourceRange: aws.String(calculateRange(partSize, partNum-1, numParts, srcSize)),
// Args copy from req
CopySource: req.CopySource,
CopySourceIfMatch: req.CopySourceIfMatch,
CopySourceIfModifiedSince: req.CopySourceIfModifiedSince,
CopySourceIfNoneMatch: req.CopySourceIfNoneMatch,
CopySourceIfUnmodifiedSince: req.CopySourceIfUnmodifiedSince,
CopySourceSSECustomerAlgorithm: req.CopySourceSSECustomerAlgorithm,
CopySourceSSECustomerKey: req.CopySourceSSECustomerKey,
CopySourceSSECustomerKeyMD5: req.CopySourceSSECustomerKeyMD5,
RequestPayer: req.RequestPayer,
SSECustomerAlgorithm: req.SSECustomerAlgorithm,
SSECustomerKey: req.SSECustomerKey,
SSECustomerKeyMD5: req.SSECustomerKeyMD5,
}
uout, err := f.c.UploadPartCopyWithContext(ctx, uploadPartReq)
if err != nil {
return f.shouldRetry(err)
}
parts = append(parts, &s3.CompletedPart{
PartNumber: &partNum,
ETag: uout.CopyPartResult.ETag,
})
return false, nil
}); err != nil {
return err
}
}
return f.pacer.Call(func() (bool, error) {
_, err := f.c.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
Bucket: &dstBucket,
Key: &dstPath,
MultipartUpload: &s3.CompletedMultipartUpload{
Parts: parts,
},
RequestPayer: req.RequestPayer,
UploadId: uid,
})
return f.shouldRetry(err)
})
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote)
err := f.makeBucket(ctx, dstBucket)
if err != nil {
return nil, err
}
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcBucket, srcPath := srcObj.split()
req := s3.CopyObjectInput{
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
}
err = f.copy(ctx, &req, dstBucket, dstPath, srcBucket, srcPath, srcObj.Size())
if err != nil {
return nil, err
}
return f.NewObject(ctx, remote)
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
f.poolMu.Lock()
defer f.poolMu.Unlock()
_, ok := f.pools[size]
if !ok {
f.pools[size] = pool.New(
time.Duration(f.opt.MemoryPoolFlushTime),
int(f.opt.ChunkSize),
f.opt.UploadConcurrency*fs.Config.Transfers,
f.opt.MemoryPoolUseMmap,
)
}
return f.pools[size]
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
hash := strings.Trim(strings.ToLower(o.etag), `"`)
// Check the etag is a valid md5sum
if !matchMd5.MatchString(hash) {
err := o.readMetaData(ctx)
if err != nil {
return "", err
}
if md5sum, ok := o.meta[metaMD5Hash]; ok {
md5sumBytes, err := base64.StdEncoding.DecodeString(*md5sum)
if err != nil {
return "", err
}
hash = hex.EncodeToString(md5sumBytes)
} else {
hash = ""
}
}
return hash, nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.bytes
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData(ctx context.Context) (err error) {
if o.meta != nil {
return nil
}
bucket, bucketPath := o.split()
req := s3.HeadObjectInput{
Bucket: &bucket,
Key: &bucketPath,
}
var resp *s3.HeadObjectOutput
err = o.fs.pacer.Call(func() (bool, error) {
var err error
resp, err = o.fs.c.HeadObjectWithContext(ctx, &req)
return o.fs.shouldRetry(err)
})
if err != nil {
if awsErr, ok := err.(awserr.RequestFailure); ok {
if awsErr.StatusCode() == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
}
return err
}
var size int64
// Ignore missing Content-Length assuming it is 0
// Some versions of ceph do this due their apache proxies
if resp.ContentLength != nil {
size = *resp.ContentLength
}
o.etag = aws.StringValue(resp.ETag)
o.bytes = size
o.meta = resp.Metadata
if o.meta == nil {
o.meta = map[string]*string{}
}
o.storageClass = aws.StringValue(resp.StorageClass)
if resp.LastModified == nil {
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
o.lastModified = time.Now()
} else {
o.lastModified = *resp.LastModified
}
o.mimeType = aws.StringValue(resp.ContentType)
return nil
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
if fs.Config.UseServerModTime {
return o.lastModified
}
err := o.readMetaData(ctx)
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
}
// read mtime out of metadata if available
d, ok := o.meta[metaMtime]
if !ok || d == nil {
// fs.Debugf(o, "No metadata")
return o.lastModified
}
modTime, err := swift.FloatStringToTime(*d)
if err != nil {
fs.Logf(o, "Failed to read mtime from object: %v", err)
return o.lastModified
}
return modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
err := o.readMetaData(ctx)
if err != nil {
return err
}
o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
// Can't update metadata here, so return this error to force a recopy
if o.storageClass == "GLACIER" || o.storageClass == "DEEP_ARCHIVE" {
return fs.ErrorCantSetModTime
}
// Copy the object to itself to update the metadata
bucket, bucketPath := o.split()
req := s3.CopyObjectInput{
ContentType: aws.String(fs.MimeType(ctx, o)), // Guess the content type
Metadata: o.meta,
MetadataDirective: aws.String(s3.MetadataDirectiveReplace), // replace metadata with that passed in
}
return o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o.bytes)
}
// Storable raturns a boolean indicating if this object is storable
func (o *Object) Storable() bool {
return true
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
bucket, bucketPath := o.split()
req := s3.GetObjectInput{
Bucket: &bucket,
Key: &bucketPath,
}
fs.FixRangeOption(options, o.bytes)
for _, option := range options {
switch option.(type) {
case *fs.RangeOption, *fs.SeekOption:
_, value := option.Header()
req.Range = &value
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
var resp *s3.GetObjectOutput
err = o.fs.pacer.Call(func() (bool, error) {
var err error
resp, err = o.fs.c.GetObjectWithContext(ctx, &req)
return o.fs.shouldRetry(err)
})
if err, ok := err.(awserr.RequestFailure); ok {
if err.Code() == "InvalidObjectState" {
return nil, errors.Errorf("Object in GLACIER, restore first: bucket=%q, key=%q", bucket, bucketPath)
}
}
if err != nil {
return nil, err
}
return resp.Body, nil
}
var warnStreamUpload sync.Once
func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (err error) {
f := o.fs
// make concurrency machinery
concurrency := f.opt.UploadConcurrency
if concurrency < 1 {
concurrency = 1
}
tokens := pacer.NewTokenDispenser(concurrency)
// calculate size of parts
partSize := int(f.opt.ChunkSize)
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
// buffers here (default 5MB). With a maximum number of parts (10,000) this will be a file of
// 48GB which seems like a not too unreasonable limit.
if size == -1 {
warnStreamUpload.Do(func() {
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
f.opt.ChunkSize, fs.SizeSuffix(partSize*maxUploadParts))
})
} else {
// Adjust partSize until the number of parts is small enough.
if size/int64(partSize) >= maxUploadParts {
// Calculate partition size rounded up to the nearest MB
partSize = int((((size / maxUploadParts) >> 20) + 1) << 20)
}
}
memPool := f.getMemoryPool(int64(partSize))
var cout *s3.CreateMultipartUploadOutput
err = f.pacer.Call(func() (bool, error) {
var err error
cout, err = f.c.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
Bucket: req.Bucket,
ACL: req.ACL,
Key: req.Key,
ContentType: req.ContentType,
Metadata: req.Metadata,
ServerSideEncryption: req.ServerSideEncryption,
SSEKMSKeyId: req.SSEKMSKeyId,
StorageClass: req.StorageClass,
})
return f.shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "multipart upload failed to initialise")
}
uid := cout.UploadId
defer func() {
if o.fs.opt.LeavePartsOnError {
return
}
if err != nil {
// We can try to abort the upload, but ignore the error.
fs.Debugf(o, "Cancelling multipart upload")
errCancel := f.pacer.Call(func() (bool, error) {
_, err := f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{
Bucket: req.Bucket,
Key: req.Key,
UploadId: uid,
RequestPayer: req.RequestPayer,
})
return f.shouldRetry(err)
})
if errCancel != nil {
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
}
}
}()
var (
g, gCtx = errgroup.WithContext(ctx)
finished = false
partsMu sync.Mutex // to protect parts
parts []*s3.CompletedPart
off int64
)
for partNum := int64(1); !finished; partNum++ {
// Get a block of memory from the pool and token which limits concurrency.
tokens.Get()
buf := memPool.Get()
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
break
}
// Read the chunk
var n int
n, err = readers.ReadFill(in, buf) // this can never return 0, nil
if err == io.EOF {
if n == 0 && partNum != 1 { // end if no data and if not first chunk
break
}
finished = true
} else if err != nil {
return errors.Wrap(err, "multipart upload failed to read source")
}
buf = buf[:n]
partNum := partNum
fs.Debugf(o, "multipart upload starting chunk %d size %v offset %v/%v", partNum, fs.SizeSuffix(n), fs.SizeSuffix(off), fs.SizeSuffix(size))
off += int64(n)
g.Go(func() (err error) {
partLength := int64(len(buf))
// create checksum of buffer for integrity checking
md5sumBinary := md5.Sum(buf)
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
err = f.pacer.Call(func() (bool, error) {
uploadPartReq := &s3.UploadPartInput{
Body: bytes.NewReader(buf),
Bucket: req.Bucket,
Key: req.Key,
PartNumber: &partNum,
UploadId: uid,
ContentMD5: &md5sum,
ContentLength: &partLength,
RequestPayer: req.RequestPayer,
SSECustomerAlgorithm: req.SSECustomerAlgorithm,
SSECustomerKey: req.SSECustomerKey,
SSECustomerKeyMD5: req.SSECustomerKeyMD5,
}
uout, err := f.c.UploadPartWithContext(gCtx, uploadPartReq)
if err != nil {
if partNum <= int64(concurrency) {
return f.shouldRetry(err)
}
// retry all chunks once have done the first batch
return true, err
}
partsMu.Lock()
parts = append(parts, &s3.CompletedPart{
PartNumber: &partNum,
ETag: uout.ETag,
})
partsMu.Unlock()
return false, nil
})
// return the memory and token
memPool.Put(buf[:partSize])
tokens.Put()
if err != nil {
return errors.Wrap(err, "multipart upload failed to upload part")
}
return nil
})
}
err = g.Wait()
if err != nil {
return err
}
// sort the completed parts by part number
sort.Slice(parts, func(i, j int) bool {
return *parts[i].PartNumber < *parts[j].PartNumber
})
err = f.pacer.Call(func() (bool, error) {
_, err := f.c.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
Bucket: req.Bucket,
Key: req.Key,
MultipartUpload: &s3.CompletedMultipartUpload{
Parts: parts,
},
RequestPayer: req.RequestPayer,
UploadId: uid,
})
return f.shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "multipart upload failed to finalise")
}
return nil
}
// Update the Object from in with modTime and size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
bucket, bucketPath := o.split()
err := o.fs.makeBucket(ctx, bucket)
if err != nil {
return err
}
modTime := src.ModTime(ctx)
size := src.Size()
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
// Set the mtime in the meta data
metadata := map[string]*string{
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
}
// read the md5sum if available
// - for non multpart
// - so we can add a ContentMD5
// - for multipart provided checksums aren't disabled
// - so we can add the md5sum in the metadata as metaMD5Hash
var md5sum string
if !multipart || !o.fs.opt.DisableChecksum {
hash, err := src.Hash(ctx, hash.MD5)
if err == nil && matchMd5.MatchString(hash) {
hashBytes, err := hex.DecodeString(hash)
if err == nil {
md5sum = base64.StdEncoding.EncodeToString(hashBytes)
if multipart {
metadata[metaMD5Hash] = &md5sum
}
}
}
}
// Guess the content type
mimeType := fs.MimeType(ctx, src)
req := s3.PutObjectInput{
Bucket: &bucket,
ACL: &o.fs.opt.ACL,
Key: &bucketPath,
ContentType: &mimeType,
Metadata: metadata,
}
if md5sum != "" {
req.ContentMD5 = &md5sum
}
if o.fs.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
}
if o.fs.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
}
if o.fs.opt.StorageClass != "" {
req.StorageClass = &o.fs.opt.StorageClass
}
if multipart {
err = o.uploadMultipart(ctx, &req, size, in)
if err != nil {
return err
}
} else {
// Create the request
putObj, _ := o.fs.c.PutObjectRequest(&req)
// Sign it so we can upload using a presigned request.
//
// Note the SDK doesn't currently support streaming to
// PutObject so we'll use this work-around.
url, headers, err := putObj.PresignRequest(15 * time.Minute)
if err != nil {
return errors.Wrap(err, "s3 upload: sign request")
}
if o.fs.opt.V2Auth && headers == nil {
headers = putObj.HTTPRequest.Header
}
// Set request to nil if empty so as not to make chunked encoding
if size == 0 {
in = nil
}
// create the vanilla http request
httpReq, err := http.NewRequest("PUT", url, in)
if err != nil {
return errors.Wrap(err, "s3 upload: new request")
}
httpReq = httpReq.WithContext(ctx) // go1.13 can use NewRequestWithContext
// set the headers we signed and the length
httpReq.Header = headers
httpReq.ContentLength = size
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err := o.fs.srv.Do(httpReq)
if err != nil {
return o.fs.shouldRetry(err)
}
body, err := rest.ReadBody(resp)
if err != nil {
return o.fs.shouldRetry(err)
}
if resp.StatusCode >= 200 && resp.StatusCode < 299 {
return false, nil
}
err = errors.Errorf("s3 upload: %s: %s", resp.Status, body)
return fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
})
if err != nil {
return err
}
}
// Read the metadata from the newly created object
o.meta = nil // wipe old metadata
err = o.readMetaData(ctx)
return err
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
bucket, bucketPath := o.split()
req := s3.DeleteObjectInput{
Bucket: &bucket,
Key: &bucketPath,
}
err := o.fs.pacer.Call(func() (bool, error) {
_, err := o.fs.c.DeleteObjectWithContext(ctx, &req)
return o.fs.shouldRetry(err)
})
return err
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
err := o.readMetaData(ctx)
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return ""
}
return o.mimeType
}
// SetTier performs changing storage class
func (o *Object) SetTier(tier string) (err error) {
ctx := context.TODO()
tier = strings.ToUpper(tier)
bucket, bucketPath := o.split()
req := s3.CopyObjectInput{
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
StorageClass: aws.String(tier),
}
err = o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o.bytes)
if err != nil {
return err
}
o.storageClass = tier
return err
}
// GetTier returns storage class as string
func (o *Object) GetTier() string {
if o.storageClass == "" {
return "STANDARD"
}
return o.storageClass
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.GetTierer = &Object{}
_ fs.SetTierer = &Object{}
)
backend/s3: replace deprecated session.New() with session.NewSession()
// Package s3 provides an interface to Amazon S3 oject storage
package s3
// FIXME need to prevent anything but ListDir working for s3://
/*
Progress of port to aws-sdk
* Don't really need o.meta at all?
What happens if you CTRL-C a multipart upload
* get an incomplete upload
* disappears when you delete the bucket
*/
import (
"bytes"
"context"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"encoding/xml"
"fmt"
"io"
"net/http"
"net/url"
"path"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/ncw/swift"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/sync/errgroup"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "s3",
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
NewFs: NewFs,
Options: []fs.Option{{
Name: fs.ConfigProvider,
Help: "Choose your S3 provider.",
Examples: []fs.OptionExample{{
Value: "AWS",
Help: "Amazon Web Services (AWS) S3",
}, {
Value: "Alibaba",
Help: "Alibaba Cloud Object Storage System (OSS) formerly Aliyun",
}, {
Value: "Ceph",
Help: "Ceph Object Storage",
}, {
Value: "DigitalOcean",
Help: "Digital Ocean Spaces",
}, {
Value: "Dreamhost",
Help: "Dreamhost DreamObjects",
}, {
Value: "IBMCOS",
Help: "IBM COS S3",
}, {
Value: "Minio",
Help: "Minio Object Storage",
}, {
Value: "Netease",
Help: "Netease Object Storage (NOS)",
}, {
Value: "StackPath",
Help: "StackPath Object Storage",
}, {
Value: "Wasabi",
Help: "Wasabi Object Storage",
}, {
Value: "Other",
Help: "Any other S3 compatible provider",
}},
}, {
Name: "env_auth",
Help: "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\nOnly applies if access_key_id and secret_access_key is blank.",
Default: false,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter AWS credentials in the next step",
}, {
Value: "true",
Help: "Get AWS credentials from the environment (env vars or IAM)",
}},
}, {
Name: "access_key_id",
Help: "AWS Access Key ID.\nLeave blank for anonymous access or runtime credentials.",
}, {
Name: "secret_access_key",
Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
}, {
Name: "region",
Help: "Region to connect to.",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "us-east-1",
Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia or Pacific Northwest.\nLeave location constraint empty.",
}, {
Value: "us-east-2",
Help: "US East (Ohio) Region\nNeeds location constraint us-east-2.",
}, {
Value: "us-west-2",
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
}, {
Value: "us-west-1",
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
}, {
Value: "ca-central-1",
Help: "Canada (Central) Region\nNeeds location constraint ca-central-1.",
}, {
Value: "eu-west-1",
Help: "EU (Ireland) Region\nNeeds location constraint EU or eu-west-1.",
}, {
Value: "eu-west-2",
Help: "EU (London) Region\nNeeds location constraint eu-west-2.",
}, {
Value: "eu-north-1",
Help: "EU (Stockholm) Region\nNeeds location constraint eu-north-1.",
}, {
Value: "eu-central-1",
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
}, {
Value: "ap-southeast-1",
Help: "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1.",
}, {
Value: "ap-southeast-2",
Help: "Asia Pacific (Sydney) Region\nNeeds location constraint ap-southeast-2.",
}, {
Value: "ap-northeast-1",
Help: "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1.",
}, {
Value: "ap-northeast-2",
Help: "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2.",
}, {
Value: "ap-south-1",
Help: "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.",
}, {
Value: "ap-east-1",
Help: "Asia Patific (Hong Kong) Region\nNeeds location constraint ap-east-1.",
}, {
Value: "sa-east-1",
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
}},
}, {
Name: "region",
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
Provider: "!AWS,Alibaba",
Examples: []fs.OptionExample{{
Value: "",
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
}, {
Value: "other-v2-signature",
Help: "Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.",
}},
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.",
Provider: "AWS",
}, {
Name: "endpoint",
Help: "Endpoint for IBM COS S3 API.\nSpecify if using an IBM COS On Premise.",
Provider: "IBMCOS",
Examples: []fs.OptionExample{{
Value: "s3-api.us-geo.objectstorage.softlayer.net",
Help: "US Cross Region Endpoint",
}, {
Value: "s3-api.dal.us-geo.objectstorage.softlayer.net",
Help: "US Cross Region Dallas Endpoint",
}, {
Value: "s3-api.wdc-us-geo.objectstorage.softlayer.net",
Help: "US Cross Region Washington DC Endpoint",
}, {
Value: "s3-api.sjc-us-geo.objectstorage.softlayer.net",
Help: "US Cross Region San Jose Endpoint",
}, {
Value: "s3-api.us-geo.objectstorage.service.networklayer.com",
Help: "US Cross Region Private Endpoint",
}, {
Value: "s3-api.dal-us-geo.objectstorage.service.networklayer.com",
Help: "US Cross Region Dallas Private Endpoint",
}, {
Value: "s3-api.wdc-us-geo.objectstorage.service.networklayer.com",
Help: "US Cross Region Washington DC Private Endpoint",
}, {
Value: "s3-api.sjc-us-geo.objectstorage.service.networklayer.com",
Help: "US Cross Region San Jose Private Endpoint",
}, {
Value: "s3.us-east.objectstorage.softlayer.net",
Help: "US Region East Endpoint",
}, {
Value: "s3.us-east.objectstorage.service.networklayer.com",
Help: "US Region East Private Endpoint",
}, {
Value: "s3.us-south.objectstorage.softlayer.net",
Help: "US Region South Endpoint",
}, {
Value: "s3.us-south.objectstorage.service.networklayer.com",
Help: "US Region South Private Endpoint",
}, {
Value: "s3.eu-geo.objectstorage.softlayer.net",
Help: "EU Cross Region Endpoint",
}, {
Value: "s3.fra-eu-geo.objectstorage.softlayer.net",
Help: "EU Cross Region Frankfurt Endpoint",
}, {
Value: "s3.mil-eu-geo.objectstorage.softlayer.net",
Help: "EU Cross Region Milan Endpoint",
}, {
Value: "s3.ams-eu-geo.objectstorage.softlayer.net",
Help: "EU Cross Region Amsterdam Endpoint",
}, {
Value: "s3.eu-geo.objectstorage.service.networklayer.com",
Help: "EU Cross Region Private Endpoint",
}, {
Value: "s3.fra-eu-geo.objectstorage.service.networklayer.com",
Help: "EU Cross Region Frankfurt Private Endpoint",
}, {
Value: "s3.mil-eu-geo.objectstorage.service.networklayer.com",
Help: "EU Cross Region Milan Private Endpoint",
}, {
Value: "s3.ams-eu-geo.objectstorage.service.networklayer.com",
Help: "EU Cross Region Amsterdam Private Endpoint",
}, {
Value: "s3.eu-gb.objectstorage.softlayer.net",
Help: "Great Britain Endpoint",
}, {
Value: "s3.eu-gb.objectstorage.service.networklayer.com",
Help: "Great Britain Private Endpoint",
}, {
Value: "s3.ap-geo.objectstorage.softlayer.net",
Help: "APAC Cross Regional Endpoint",
}, {
Value: "s3.tok-ap-geo.objectstorage.softlayer.net",
Help: "APAC Cross Regional Tokyo Endpoint",
}, {
Value: "s3.hkg-ap-geo.objectstorage.softlayer.net",
Help: "APAC Cross Regional HongKong Endpoint",
}, {
Value: "s3.seo-ap-geo.objectstorage.softlayer.net",
Help: "APAC Cross Regional Seoul Endpoint",
}, {
Value: "s3.ap-geo.objectstorage.service.networklayer.com",
Help: "APAC Cross Regional Private Endpoint",
}, {
Value: "s3.tok-ap-geo.objectstorage.service.networklayer.com",
Help: "APAC Cross Regional Tokyo Private Endpoint",
}, {
Value: "s3.hkg-ap-geo.objectstorage.service.networklayer.com",
Help: "APAC Cross Regional HongKong Private Endpoint",
}, {
Value: "s3.seo-ap-geo.objectstorage.service.networklayer.com",
Help: "APAC Cross Regional Seoul Private Endpoint",
}, {
Value: "s3.mel01.objectstorage.softlayer.net",
Help: "Melbourne Single Site Endpoint",
}, {
Value: "s3.mel01.objectstorage.service.networklayer.com",
Help: "Melbourne Single Site Private Endpoint",
}, {
Value: "s3.tor01.objectstorage.softlayer.net",
Help: "Toronto Single Site Endpoint",
}, {
Value: "s3.tor01.objectstorage.service.networklayer.com",
Help: "Toronto Single Site Private Endpoint",
}},
}, {
// oss endpoints: https://help.aliyun.com/document_detail/31837.html
Name: "endpoint",
Help: "Endpoint for OSS API.",
Provider: "Alibaba",
Examples: []fs.OptionExample{{
Value: "oss-cn-hangzhou.aliyuncs.com",
Help: "East China 1 (Hangzhou)",
}, {
Value: "oss-cn-shanghai.aliyuncs.com",
Help: "East China 2 (Shanghai)",
}, {
Value: "oss-cn-qingdao.aliyuncs.com",
Help: "North China 1 (Qingdao)",
}, {
Value: "oss-cn-beijing.aliyuncs.com",
Help: "North China 2 (Beijing)",
}, {
Value: "oss-cn-zhangjiakou.aliyuncs.com",
Help: "North China 3 (Zhangjiakou)",
}, {
Value: "oss-cn-huhehaote.aliyuncs.com",
Help: "North China 5 (Huhehaote)",
}, {
Value: "oss-cn-shenzhen.aliyuncs.com",
Help: "South China 1 (Shenzhen)",
}, {
Value: "oss-cn-hongkong.aliyuncs.com",
Help: "Hong Kong (Hong Kong)",
}, {
Value: "oss-us-west-1.aliyuncs.com",
Help: "US West 1 (Silicon Valley)",
}, {
Value: "oss-us-east-1.aliyuncs.com",
Help: "US East 1 (Virginia)",
}, {
Value: "oss-ap-southeast-1.aliyuncs.com",
Help: "Southeast Asia Southeast 1 (Singapore)",
}, {
Value: "oss-ap-southeast-2.aliyuncs.com",
Help: "Asia Pacific Southeast 2 (Sydney)",
}, {
Value: "oss-ap-southeast-3.aliyuncs.com",
Help: "Southeast Asia Southeast 3 (Kuala Lumpur)",
}, {
Value: "oss-ap-southeast-5.aliyuncs.com",
Help: "Asia Pacific Southeast 5 (Jakarta)",
}, {
Value: "oss-ap-northeast-1.aliyuncs.com",
Help: "Asia Pacific Northeast 1 (Japan)",
}, {
Value: "oss-ap-south-1.aliyuncs.com",
Help: "Asia Pacific South 1 (Mumbai)",
}, {
Value: "oss-eu-central-1.aliyuncs.com",
Help: "Central Europe 1 (Frankfurt)",
}, {
Value: "oss-eu-west-1.aliyuncs.com",
Help: "West Europe (London)",
}, {
Value: "oss-me-east-1.aliyuncs.com",
Help: "Middle East 1 (Dubai)",
}},
}, {
Name: "endpoint",
Help: "Endpoint for StackPath Object Storage.",
Provider: "StackPath",
Examples: []fs.OptionExample{{
Value: "s3.us-east-2.stackpathstorage.com",
Help: "US East Endpoint",
}, {
Value: "s3.us-west-1.stackpathstorage.com",
Help: "US West Endpoint",
}, {
Value: "s3.eu-central-1.stackpathstorage.com",
Help: "EU Endpoint",
}},
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
Provider: "!AWS,IBMCOS,Alibaba,StackPath",
Examples: []fs.OptionExample{{
Value: "objects-us-east-1.dream.io",
Help: "Dream Objects endpoint",
Provider: "Dreamhost",
}, {
Value: "nyc3.digitaloceanspaces.com",
Help: "Digital Ocean Spaces New York 3",
Provider: "DigitalOcean",
}, {
Value: "ams3.digitaloceanspaces.com",
Help: "Digital Ocean Spaces Amsterdam 3",
Provider: "DigitalOcean",
}, {
Value: "sgp1.digitaloceanspaces.com",
Help: "Digital Ocean Spaces Singapore 1",
Provider: "DigitalOcean",
}, {
Value: "s3.wasabisys.com",
Help: "Wasabi US East endpoint",
Provider: "Wasabi",
}, {
Value: "s3.us-west-1.wasabisys.com",
Help: "Wasabi US West endpoint",
Provider: "Wasabi",
}, {
Value: "s3.eu-central-1.wasabisys.com",
Help: "Wasabi EU Central endpoint",
Provider: "Wasabi",
}},
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\nUsed when creating buckets only.",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "Empty for US Region, Northern Virginia or Pacific Northwest.",
}, {
Value: "us-east-2",
Help: "US East (Ohio) Region.",
}, {
Value: "us-west-2",
Help: "US West (Oregon) Region.",
}, {
Value: "us-west-1",
Help: "US West (Northern California) Region.",
}, {
Value: "ca-central-1",
Help: "Canada (Central) Region.",
}, {
Value: "eu-west-1",
Help: "EU (Ireland) Region.",
}, {
Value: "eu-west-2",
Help: "EU (London) Region.",
}, {
Value: "eu-north-1",
Help: "EU (Stockholm) Region.",
}, {
Value: "EU",
Help: "EU Region.",
}, {
Value: "ap-southeast-1",
Help: "Asia Pacific (Singapore) Region.",
}, {
Value: "ap-southeast-2",
Help: "Asia Pacific (Sydney) Region.",
}, {
Value: "ap-northeast-1",
Help: "Asia Pacific (Tokyo) Region.",
}, {
Value: "ap-northeast-2",
Help: "Asia Pacific (Seoul)",
}, {
Value: "ap-south-1",
Help: "Asia Pacific (Mumbai)",
}, {
Value: "ap-east-1",
Help: "Asia Pacific (Hong Kong)",
}, {
Value: "sa-east-1",
Help: "South America (Sao Paulo) Region.",
}},
}, {
Name: "location_constraint",
Help: "Location constraint - must match endpoint when using IBM Cloud Public.\nFor on-prem COS, do not make a selection from this list, hit enter",
Provider: "IBMCOS",
Examples: []fs.OptionExample{{
Value: "us-standard",
Help: "US Cross Region Standard",
}, {
Value: "us-vault",
Help: "US Cross Region Vault",
}, {
Value: "us-cold",
Help: "US Cross Region Cold",
}, {
Value: "us-flex",
Help: "US Cross Region Flex",
}, {
Value: "us-east-standard",
Help: "US East Region Standard",
}, {
Value: "us-east-vault",
Help: "US East Region Vault",
}, {
Value: "us-east-cold",
Help: "US East Region Cold",
}, {
Value: "us-east-flex",
Help: "US East Region Flex",
}, {
Value: "us-south-standard",
Help: "US South Region Standard",
}, {
Value: "us-south-vault",
Help: "US South Region Vault",
}, {
Value: "us-south-cold",
Help: "US South Region Cold",
}, {
Value: "us-south-flex",
Help: "US South Region Flex",
}, {
Value: "eu-standard",
Help: "EU Cross Region Standard",
}, {
Value: "eu-vault",
Help: "EU Cross Region Vault",
}, {
Value: "eu-cold",
Help: "EU Cross Region Cold",
}, {
Value: "eu-flex",
Help: "EU Cross Region Flex",
}, {
Value: "eu-gb-standard",
Help: "Great Britain Standard",
}, {
Value: "eu-gb-vault",
Help: "Great Britain Vault",
}, {
Value: "eu-gb-cold",
Help: "Great Britain Cold",
}, {
Value: "eu-gb-flex",
Help: "Great Britain Flex",
}, {
Value: "ap-standard",
Help: "APAC Standard",
}, {
Value: "ap-vault",
Help: "APAC Vault",
}, {
Value: "ap-cold",
Help: "APAC Cold",
}, {
Value: "ap-flex",
Help: "APAC Flex",
}, {
Value: "mel01-standard",
Help: "Melbourne Standard",
}, {
Value: "mel01-vault",
Help: "Melbourne Vault",
}, {
Value: "mel01-cold",
Help: "Melbourne Cold",
}, {
Value: "mel01-flex",
Help: "Melbourne Flex",
}, {
Value: "tor01-standard",
Help: "Toronto Standard",
}, {
Value: "tor01-vault",
Help: "Toronto Vault",
}, {
Value: "tor01-cold",
Help: "Toronto Cold",
}, {
Value: "tor01-flex",
Help: "Toronto Flex",
}},
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
Provider: "!AWS,IBMCOS,Alibaba,StackPath",
}, {
Name: "acl",
Help: `Canned ACL used when creating buckets and storing or copying objects.
This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Note that this ACL is applied when server side copying objects as S3
doesn't copy the ACL from the source but rather writes a fresh one.`,
Examples: []fs.OptionExample{{
Value: "private",
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
Provider: "!IBMCOS",
}, {
Value: "public-read",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
Provider: "!IBMCOS",
}, {
Value: "public-read-write",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
Provider: "!IBMCOS",
}, {
Value: "authenticated-read",
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
Provider: "!IBMCOS",
}, {
Value: "bucket-owner-read",
Help: "Object owner gets FULL_CONTROL. Bucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
Provider: "!IBMCOS",
}, {
Value: "bucket-owner-full-control",
Help: "Both the object owner and the bucket owner get FULL_CONTROL over the object.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
Provider: "!IBMCOS",
}, {
Value: "private",
Help: "Owner gets FULL_CONTROL. No one else has access rights (default). This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS",
Provider: "IBMCOS",
}, {
Value: "public-read",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access. This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS",
Provider: "IBMCOS",
}, {
Value: "public-read-write",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access. This acl is available on IBM Cloud (Infra), On-Premise IBM COS",
Provider: "IBMCOS",
}, {
Value: "authenticated-read",
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access. Not supported on Buckets. This acl is available on IBM Cloud (Infra) and On-Premise IBM COS",
Provider: "IBMCOS",
}},
}, {
Name: "bucket_acl",
Help: `Canned ACL used when creating buckets.
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Note that this ACL is applied when only when creating buckets. If it
isn't set then "acl" is used instead.`,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "private",
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
}, {
Value: "public-read",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
}, {
Value: "public-read-write",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
}, {
Value: "authenticated-read",
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
}},
}, {
Name: "server_side_encryption",
Help: "The server-side encryption algorithm used when storing this object in S3.",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}, {
Value: "AES256",
Help: "AES256",
}, {
Value: "aws:kms",
Help: "aws:kms",
}},
}, {
Name: "sse_kms_key_id",
Help: "If using KMS ID you must provide the ARN of Key.",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}, {
Value: "arn:aws:kms:us-east-1:*",
Help: "arn:aws:kms:*",
}},
}, {
Name: "storage_class",
Help: "The storage class to use when storing new objects in S3.",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "Default",
}, {
Value: "STANDARD",
Help: "Standard storage class",
}, {
Value: "REDUCED_REDUNDANCY",
Help: "Reduced redundancy storage class",
}, {
Value: "STANDARD_IA",
Help: "Standard Infrequent Access storage class",
}, {
Value: "ONEZONE_IA",
Help: "One Zone Infrequent Access storage class",
}, {
Value: "GLACIER",
Help: "Glacier storage class",
}, {
Value: "DEEP_ARCHIVE",
Help: "Glacier Deep Archive storage class",
}, {
Value: "INTELLIGENT_TIERING",
Help: "Intelligent-Tiering storage class",
}},
}, {
// Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm
Name: "storage_class",
Help: "The storage class to use when storing new objects in OSS.",
Provider: "Alibaba",
Examples: []fs.OptionExample{{
Value: "",
Help: "Default",
}, {
Value: "STANDARD",
Help: "Standard storage class",
}, {
Value: "GLACIER",
Help: "Archive storage mode.",
}, {
Value: "STANDARD_IA",
Help: "Infrequent access storage mode.",
}},
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload
Any files larger than this will be uploaded in chunks of chunk_size.
The minimum is 0 and the maximum is 5GB.`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Chunk size to use for uploading.
When uploading files larger than upload_cutoff or files with unknown
size (eg from "rclone rcat" or uploaded with "rclone mount" or google
photos or google docs) they will be uploaded as multipart uploads
using this chunk size.
Note that "--s3-upload-concurrency" chunks of this size are buffered
in memory per transfer.
If you are transferring large files over high speed links and you have
enough memory, then increasing this will speed up the transfers.
Rclone will automatically increase the chunk size when uploading a
large file of known size to stay below the 10,000 chunks limit.
Files of unknown size are uploaded with the configured
chunk_size. Since the default chunk size is 5MB and there can be at
most 10,000 chunks, this means that by default the maximum size of
file you can stream upload is 48GB. If you wish to stream upload
larger files then you will need to increase chunk_size.`,
Default: minChunkSize,
Advanced: true,
}, {
Name: "copy_cutoff",
Help: `Cutoff for switching to multipart copy
Any files larger than this that need to be server side copied will be
copied in chunks of this size.
The minimum is 0 and the maximum is 5GB.`,
Default: fs.SizeSuffix(maxSizeForCopy),
Advanced: true,
}, {
Name: "disable_checksum",
Help: "Don't store MD5 checksum with object metadata",
Default: false,
Advanced: true,
}, {
Name: "session_token",
Help: "An AWS session token",
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
If you are uploading small numbers of large file over high speed link
and these uploads do not fully utilize your bandwidth, then increasing
this may help to speed up the transfers.`,
Default: 4,
Advanced: true,
}, {
Name: "force_path_style",
Help: `If true use path style access if false use virtual hosted style.
If this is true (the default) then rclone will use path style access,
if false then rclone will use virtual path style. See [the AWS S3
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
for more info.
Some providers (eg AWS, Aliyun OSS or Netease COS) require this set to
false - rclone will do this automatically based on the provider
setting.`,
Default: true,
Advanced: true,
}, {
Name: "v2_auth",
Help: `If true use v2 authentication.
If this is false (the default) then rclone will use v4 authentication.
If it is set then rclone will use v2 authentication.
Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`,
Default: false,
Advanced: true,
}, {
Name: "use_accelerate_endpoint",
Provider: "AWS",
Help: `If true use the AWS S3 accelerated endpoint.
See: [AWS S3 Transfer acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration-examples.html)`,
Default: false,
Advanced: true,
}, {
Name: "leave_parts_on_error",
Provider: "AWS",
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
It should be set to true for resuming uploads across different sessions.
WARNING: Storing parts of an incomplete multipart upload counts towards space usage on S3 and will add additional costs if not cleaned up.
`,
Default: false,
Advanced: true,
}, {
Name: "list_chunk",
Help: `Size of listing chunk (response list for each ListObject S3 request).
This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification.
Most services truncate the response list to 1000 objects even if requested more than that.
In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html).
In Ceph, this can be increased with the "rgw list buckets max chunk" option.
`,
Default: 1000,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Any UTF-8 character is valid in a key, however it can't handle
// invalid UTF-8 and / have a special meaning.
//
// The SDK can't seem to handle uploading files called '.'
//
// FIXME would be nice to add
// - initial / encoding
// - doubled / encoding
// - trailing / encoding
// so that AWS keys are always valid file names
Default: encoder.EncodeInvalidUtf8 |
encoder.EncodeSlash |
encoder.EncodeDot,
}, {
Name: "memory_pool_flush_time",
Default: memoryPoolFlushTime,
Advanced: true,
Help: `How often internal memory buffer pools will be flushed.
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
}, {
Name: "memory_pool_use_mmap",
Default: memoryPoolUseMmap,
Advanced: true,
Help: `Whether to use mmap buffers in internal memory pool.`,
},
}})
}
// Constants
const (
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
maxUploadParts = 10000 // maximum allowed number of parts in a multi-part upload
minChunkSize = fs.SizeSuffix(1024 * 1024 * 5)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
)
// Options defines the configuration for this backend
type Options struct {
Provider string `config:"provider"`
EnvAuth bool `config:"env_auth"`
AccessKeyID string `config:"access_key_id"`
SecretAccessKey string `config:"secret_access_key"`
Region string `config:"region"`
Endpoint string `config:"endpoint"`
LocationConstraint string `config:"location_constraint"`
ACL string `config:"acl"`
BucketACL string `config:"bucket_acl"`
ServerSideEncryption string `config:"server_side_encryption"`
SSEKMSKeyID string `config:"sse_kms_key_id"`
StorageClass string `config:"storage_class"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DisableChecksum bool `config:"disable_checksum"`
SessionToken string `config:"session_token"`
UploadConcurrency int `config:"upload_concurrency"`
ForcePathStyle bool `config:"force_path_style"`
V2Auth bool `config:"v2_auth"`
UseAccelerateEndpoint bool `config:"use_accelerate_endpoint"`
LeavePartsOnError bool `config:"leave_parts_on_error"`
ListChunk int64 `config:"list_chunk"`
Enc encoder.MultiEncoder `config:"encoding"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
}
// Fs represents a remote s3 server
type Fs struct {
name string // the name of the remote
root string // root of the bucket - ignore all objects above this
opt Options // parsed options
features *fs.Features // optional features
c *s3.S3 // the connection to the s3 server
ses *session.Session // the s3 session
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache for bucket creation status
pacer *fs.Pacer // To pace the API calls
srv *http.Client // a plain http client
poolMu sync.Mutex // mutex protecting memory pools map
pools map[int64]*pool.Pool // memory pools
}
// Object describes a s3 object
type Object struct {
// Will definitely have everything but meta which may be nil
//
// List will read everything but meta & mimeType - to fill
// that in you need to call readMetaData
fs *Fs // what this object is part of
remote string // The remote path
etag string // md5sum of the object
bytes int64 // size of the object
lastModified time.Time // Last modified
meta map[string]*string // The object metadata if known - may be nil
mimeType string // MimeType of object - may be ""
storageClass string // eg GLACIER
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootBucket == "" {
return fmt.Sprintf("S3 root")
}
if f.rootDirectory == "" {
return fmt.Sprintf("S3 bucket %s", f.rootBucket)
}
return fmt.Sprintf("S3 bucket %s path %s", f.rootBucket, f.rootDirectory)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// retryErrorCodes is a slice of error codes that we will retry
// See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
var retryErrorCodes = []int{
500, // Internal Server Error - "We encountered an internal error. Please try again."
503, // Service Unavailable/Slow Down - "Reduce your request rate"
}
//S3 is pretty resilient, and the built in retry handling is probably sufficient
// as it should notice closed connections and timeouts which are the most likely
// sort of failure modes
func (f *Fs) shouldRetry(err error) (bool, error) {
// If this is an awserr object, try and extract more useful information to determine if we should retry
if awsError, ok := err.(awserr.Error); ok {
// Simple case, check the original embedded error in case it's generically retryable
if fserrors.ShouldRetry(awsError.OrigErr()) {
return true, err
}
// Failing that, if it's a RequestFailure it's probably got an http status code we can check
if reqErr, ok := err.(awserr.RequestFailure); ok {
// 301 if wrong region for bucket - can only update if running from a bucket
if f.rootBucket != "" {
if reqErr.StatusCode() == http.StatusMovedPermanently {
urfbErr := f.updateRegionForBucket(f.rootBucket)
if urfbErr != nil {
fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr)
return false, err
}
return true, err
}
}
for _, e := range retryErrorCodes {
if reqErr.StatusCode() == e {
return true, err
}
}
}
}
// Ok, not an awserr, check for generic failure conditions
return fserrors.ShouldRetry(err), err
}
// parsePath parses a remote 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
}
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
}
// split returns bucket and bucketPath from the object
func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote)
}
// s3Connection makes a connection to s3
func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
// Make the auth
v := credentials.Value{
AccessKeyID: opt.AccessKeyID,
SecretAccessKey: opt.SecretAccessKey,
SessionToken: opt.SessionToken,
}
lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service
def := defaults.Get()
def.Config.HTTPClient = lowTimeoutClient
// start a new AWS session
awsSession, err := session.NewSession()
if err != nil {
return nil, nil, errors.Wrap(err, "NewSession")
}
// first provider to supply a credential set "wins"
providers := []credentials.Provider{
// use static credentials if they're present (checked by provider)
&credentials.StaticProvider{Value: v},
// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
&credentials.EnvProvider{},
// A SharedCredentialsProvider retrieves credentials
// from the current user's home directory. It checks
// AWS_SHARED_CREDENTIALS_FILE and AWS_PROFILE too.
&credentials.SharedCredentialsProvider{},
// Pick up IAM role if we're in an ECS task
defaults.RemoteCredProvider(*def.Config, def.Handlers),
// Pick up IAM role in case we're on EC2
&ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.New(awsSession, &aws.Config{
HTTPClient: lowTimeoutClient,
}),
ExpiryWindow: 3 * time.Minute,
},
// Pick up IAM role if we are in EKS
&stscreds.WebIdentityRoleProvider{
ExpiryWindow: 3 * time.Minute,
},
}
cred := credentials.NewChainCredentials(providers)
switch {
case opt.EnvAuth:
// No need for empty checks if "env_auth" is true
case v.AccessKeyID == "" && v.SecretAccessKey == "":
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
cred = credentials.AnonymousCredentials
case v.AccessKeyID == "":
return nil, nil, errors.New("access_key_id not found")
case v.SecretAccessKey == "":
return nil, nil, errors.New("secret_access_key not found")
}
if opt.Region == "" && opt.Endpoint == "" {
opt.Endpoint = "https://s3.amazonaws.com/"
}
if opt.Region == "" {
opt.Region = "us-east-1"
}
if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.UseAccelerateEndpoint {
opt.ForcePathStyle = false
}
awsConfig := aws.NewConfig().
WithMaxRetries(fs.Config.LowLevelRetries).
WithCredentials(cred).
WithHTTPClient(fshttp.NewClient(fs.Config)).
WithS3ForcePathStyle(opt.ForcePathStyle).
WithS3UseAccelerate(opt.UseAccelerateEndpoint)
if opt.Region != "" {
awsConfig.WithRegion(opt.Region)
}
if opt.Endpoint != "" {
awsConfig.WithEndpoint(opt.Endpoint)
}
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
awsSessionOpts := session.Options{
Config: *awsConfig,
}
if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
// Enable loading config options from ~/.aws/config (selected by AWS_PROFILE env)
awsSessionOpts.SharedConfigState = session.SharedConfigEnable
// The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source
// (from the shared config file) if the passed-in Options.Config.Credentials is nil.
awsSessionOpts.Config.Credentials = nil
}
ses, err := session.NewSessionWithOptions(awsSessionOpts)
if err != nil {
return nil, nil, err
}
c := s3.New(ses)
if opt.V2Auth || opt.Region == "other-v2-signature" {
fs.Debugf(nil, "Using v2 auth")
signer := func(req *request.Request) {
// Ignore AnonymousCredentials object
if req.Config.Credentials == credentials.AnonymousCredentials {
return
}
sign(v.AccessKeyID, v.SecretAccessKey, req.HTTPRequest)
}
c.Handlers.Sign.Clear()
c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
c.Handlers.Sign.PushBack(signer)
}
return c, ses, nil
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxUploadCutoff {
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
}
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "s3: chunk size")
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "s3: upload cutoff")
}
if opt.ACL == "" {
opt.ACL = "private"
}
if opt.BucketACL == "" {
opt.BucketACL = opt.ACL
}
c, ses, err := s3Connection(opt)
if err != nil {
return nil, err
}
pc := fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep)))
// Set pacer retries to 0 because we are relying on SDK retry mechanism.
// Setting it to 1 because in context of pacer it means 1 attempt.
pc.SetRetries(1)
f := &Fs{
name: name,
opt: *opt,
c: c,
ses: ses,
pacer: pc,
cache: bucket.NewCache(),
srv: fshttp.NewClient(fs.Config),
pools: make(map[int64]*pool.Pool),
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
SetTier: true,
GetTier: true,
}).Fill(f)
if f.rootBucket != "" && f.rootDirectory != "" {
// Check to see if the object exists
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
req := s3.HeadObjectInput{
Bucket: &f.rootBucket,
Key: &encodedDirectory,
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.c.HeadObject(&req)
return f.shouldRetry(err)
})
if err == nil {
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
}
f.setRoot(newRoot)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
}
// f.listMultipartUploads()
return f, nil
}
// Return an Object from a path
//
//If it can't be found it returns the error ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Object) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
if info != nil {
// Set info but not meta
if info.LastModified == nil {
fs.Logf(o, "Failed to read last modified")
o.lastModified = time.Now()
} else {
o.lastModified = *info.LastModified
}
o.etag = aws.StringValue(info.ETag)
o.bytes = aws.Int64Value(info.Size)
o.storageClass = aws.StringValue(info.StorageClass)
} else {
err := o.readMetaData(ctx) // reads info and meta, returning an error
if err != nil {
return nil, err
}
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
}
// Gets the bucket location
func (f *Fs) getBucketLocation(bucket string) (string, error) {
req := s3.GetBucketLocationInput{
Bucket: &bucket,
}
var resp *s3.GetBucketLocationOutput
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.GetBucketLocation(&req)
return f.shouldRetry(err)
})
if err != nil {
return "", err
}
return s3.NormalizeBucketLocation(aws.StringValue(resp.LocationConstraint)), nil
}
// Updates the region for the bucket by reading the region from the
// bucket then updating the session.
func (f *Fs) updateRegionForBucket(bucket string) error {
region, err := f.getBucketLocation(bucket)
if err != nil {
return errors.Wrap(err, "reading bucket location failed")
}
if aws.StringValue(f.c.Config.Endpoint) != "" {
return errors.Errorf("can't set region to %q as endpoint is set", region)
}
if aws.StringValue(f.c.Config.Region) == region {
return errors.Errorf("region is already %q - not updating", region)
}
// Make a new session with the new region
oldRegion := f.opt.Region
f.opt.Region = region
c, ses, err := s3Connection(&f.opt)
if err != nil {
return errors.Wrap(err, "creating new session failed")
}
f.c = c
f.ses = ses
fs.Logf(f, "Switched region to %q from %q", region, oldRegion)
return nil
}
// listFn is called from list to handle an object.
type listFn func(remote string, object *s3.Object, isDirectory bool) error
// list lists the objects into the function supplied from
// the bucket and directory supplied. The remote has prefix
// removed from it and if addBucket is set then it adds the
// bucket to the start.
//
// Set recurse to read sub directories
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) error {
if prefix != "" {
prefix += "/"
}
if directory != "" {
directory += "/"
}
delimiter := ""
if !recurse {
delimiter = "/"
}
var marker *string
// URL encode the listings so we can use control characters in object names
// See: https://github.com/aws/aws-sdk-go/issues/1914
//
// However this doesn't work perfectly under Ceph (and hence DigitalOcean/Dreamhost) because
// it doesn't encode CommonPrefixes.
// See: https://tracker.ceph.com/issues/41870
//
// This does not work under IBM COS also: See https://github.com/rclone/rclone/issues/3345
// though maybe it does on some versions.
//
// This does work with minio but was only added relatively recently
// https://github.com/minio/minio/pull/7265
//
// So we enable only on providers we know supports it properly, all others can retry when a
// XML Syntax error is detected.
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba" || f.opt.Provider == "Minio")
for {
// FIXME need to implement ALL loop
req := s3.ListObjectsInput{
Bucket: &bucket,
Delimiter: &delimiter,
Prefix: &directory,
MaxKeys: &f.opt.ListChunk,
Marker: marker,
}
if urlEncodeListings {
req.EncodingType = aws.String(s3.EncodingTypeUrl)
}
var resp *s3.ListObjectsOutput
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.ListObjectsWithContext(ctx, &req)
if err != nil && !urlEncodeListings {
if awsErr, ok := err.(awserr.RequestFailure); ok {
if origErr := awsErr.OrigErr(); origErr != nil {
if _, ok := origErr.(*xml.SyntaxError); ok {
// Retry the listing with URL encoding as there were characters that XML can't encode
urlEncodeListings = true
req.EncodingType = aws.String(s3.EncodingTypeUrl)
fs.Debugf(f, "Retrying listing because of characters which can't be XML encoded")
return true, err
}
}
}
}
return f.shouldRetry(err)
})
if err != nil {
if awsErr, ok := err.(awserr.RequestFailure); ok {
if awsErr.StatusCode() == http.StatusNotFound {
err = fs.ErrorDirNotFound
}
}
if f.rootBucket == "" {
// if listing from the root ignore wrong region requests returning
// empty directory
if reqErr, ok := err.(awserr.RequestFailure); ok {
// 301 if wrong region for bucket
if reqErr.StatusCode() == http.StatusMovedPermanently {
fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucket)
return nil
}
}
}
return err
}
if !recurse {
for _, commonPrefix := range resp.CommonPrefixes {
if commonPrefix.Prefix == nil {
fs.Logf(f, "Nil common prefix received")
continue
}
remote := *commonPrefix.Prefix
if urlEncodeListings {
remote, err = url.QueryUnescape(remote)
if err != nil {
fs.Logf(f, "failed to URL decode %q in listing common prefix: %v", *commonPrefix.Prefix, err)
continue
}
}
remote = f.opt.Enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
remote = remote[len(prefix):]
if addBucket {
remote = path.Join(bucket, remote)
}
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
err = fn(remote, &s3.Object{Key: &remote}, true)
if err != nil {
return err
}
}
}
for _, object := range resp.Contents {
remote := aws.StringValue(object.Key)
if urlEncodeListings {
remote, err = url.QueryUnescape(remote)
if err != nil {
fs.Logf(f, "failed to URL decode %q in listing: %v", aws.StringValue(object.Key), err)
continue
}
}
remote = f.opt.Enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
remote = remote[len(prefix):]
isDirectory := strings.HasSuffix(remote, "/")
if addBucket {
remote = path.Join(bucket, remote)
}
// is this a directory marker?
if isDirectory && object.Size != nil && *object.Size == 0 {
continue // skip directory marker
}
err = fn(remote, object, false)
if err != nil {
return err
}
}
if !aws.BoolValue(resp.IsTruncated) {
break
}
// Use NextMarker if set, otherwise use last Key
if resp.NextMarker == nil || *resp.NextMarker == "" {
if len(resp.Contents) == 0 {
return errors.New("s3 protocol error: received listing with IsTruncated set, no NextMarker and no Contents")
}
marker = resp.Contents[len(resp.Contents)-1].Key
} else {
marker = resp.NextMarker
}
if urlEncodeListings {
*marker, err = url.QueryUnescape(*marker)
if err != nil {
return errors.Wrapf(err, "failed to URL decode NextMarker %q", *marker)
}
}
}
return nil
}
// Convert a list item into a DirEntry
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *s3.Object, isDirectory bool) (fs.DirEntry, error) {
if isDirectory {
size := int64(0)
if object.Size != nil {
size = *object.Size
}
d := fs.NewDir(remote, time.Time{}).SetSize(size)
return d, nil
}
o, err := f.newObjectWithInfo(ctx, remote, object)
if err != nil {
return nil, err
}
return o, nil
}
// listDir lists files and directories to out
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
// List the objects and directories
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *s3.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
}
if entry != nil {
entries = append(entries, entry)
}
return nil
})
if err != nil {
return nil, err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return entries, nil
}
// listBuckets lists the buckets to out
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
req := s3.ListBucketsInput{}
var resp *s3.ListBucketsOutput
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.ListBucketsWithContext(ctx, &req)
return f.shouldRetry(err)
})
if err != nil {
return nil, err
}
for _, bucket := range resp.Buckets {
bucketName := f.opt.Enc.ToStandardName(aws.StringValue(bucket.Name))
f.cache.MarkOK(bucketName)
d := fs.NewDir(bucketName, aws.TimeValue(bucket.CreationDate))
entries = append(entries, d)
}
return entries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
}
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively than doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *s3.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
}
if bucket == "" {
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
bucket := entry.Remote()
err = listR(bucket, "", f.rootDirectory, true)
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
}
} else {
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
}
return list.Flush()
}
// Put the Object into the bucket
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
fs := &Object{
fs: f,
remote: src.Remote(),
}
return fs, fs.Update(ctx, in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
// Check if the bucket exists
//
// NB this can return incorrect results if called immediately after bucket deletion
func (f *Fs) bucketExists(ctx context.Context, bucket string) (bool, error) {
req := s3.HeadBucketInput{
Bucket: &bucket,
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.HeadBucketWithContext(ctx, &req)
return f.shouldRetry(err)
})
if err == nil {
return true, nil
}
if err, ok := err.(awserr.RequestFailure); ok {
if err.StatusCode() == http.StatusNotFound {
return false, nil
}
}
return false, err
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
bucket, _ := f.split(dir)
return f.makeBucket(ctx, bucket)
}
// makeBucket creates the bucket if it doesn't exist
func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
return f.cache.Create(bucket, func() error {
req := s3.CreateBucketInput{
Bucket: &bucket,
ACL: &f.opt.BucketACL,
}
if f.opt.LocationConstraint != "" {
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
LocationConstraint: &f.opt.LocationConstraint,
}
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.CreateBucketWithContext(ctx, &req)
return f.shouldRetry(err)
})
if err == nil {
fs.Infof(f, "Bucket %q created with ACL %q", bucket, f.opt.BucketACL)
}
if err, ok := err.(awserr.Error); ok {
if err.Code() == "BucketAlreadyOwnedByYou" {
err = nil
}
}
return nil
}, func() (bool, error) {
return f.bucketExists(ctx, bucket)
})
}
// Rmdir deletes the bucket if the fs is at the root
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
bucket, directory := f.split(dir)
if bucket == "" || directory != "" {
return nil
}
return f.cache.Remove(bucket, func() error {
req := s3.DeleteBucketInput{
Bucket: &bucket,
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.DeleteBucketWithContext(ctx, &req)
return f.shouldRetry(err)
})
if err == nil {
fs.Infof(f, "Bucket %q deleted", bucket)
}
return err
})
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return time.Nanosecond
}
// pathEscape escapes s as for a URL path. It uses rest.URLPathEscape
// but also escapes '+' for S3 and Digital Ocean spaces compatibility
func pathEscape(s string) string {
return strings.Replace(rest.URLPathEscape(s), "+", "%2B", -1)
}
// copy does a server side copy
//
// It adds the boiler plate to the req passed in and calls the s3
// method
func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, srcSize int64) error {
req.Bucket = &dstBucket
req.ACL = &f.opt.ACL
req.Key = &dstPath
source := pathEscape(path.Join(srcBucket, srcPath))
req.CopySource = &source
if f.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &f.opt.ServerSideEncryption
}
if f.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &f.opt.SSEKMSKeyID
}
if req.StorageClass == nil && f.opt.StorageClass != "" {
req.StorageClass = &f.opt.StorageClass
}
if srcSize >= int64(f.opt.CopyCutoff) {
return f.copyMultipart(ctx, req, dstBucket, dstPath, srcBucket, srcPath, srcSize)
}
return f.pacer.Call(func() (bool, error) {
_, err := f.c.CopyObjectWithContext(ctx, req)
return f.shouldRetry(err)
})
}
func calculateRange(partSize, partIndex, numParts, totalSize int64) string {
start := partIndex * partSize
var ends string
if partIndex == numParts-1 {
if totalSize >= 1 {
ends = strconv.FormatInt(totalSize-1, 10)
}
} else {
ends = strconv.FormatInt(start+partSize-1, 10)
}
return fmt.Sprintf("bytes=%v-%v", start, ends)
}
func (f *Fs) copyMultipart(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, srcSize int64) (err error) {
var cout *s3.CreateMultipartUploadOutput
if err := f.pacer.Call(func() (bool, error) {
var err error
cout, err = f.c.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
Bucket: &dstBucket,
Key: &dstPath,
})
return f.shouldRetry(err)
}); err != nil {
return err
}
uid := cout.UploadId
defer func() {
if err != nil {
// We can try to abort the upload, but ignore the error.
fs.Debugf(nil, "Cancelling multipart copy")
_ = f.pacer.Call(func() (bool, error) {
_, err := f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{
Bucket: &dstBucket,
Key: &dstPath,
UploadId: uid,
RequestPayer: req.RequestPayer,
})
return f.shouldRetry(err)
})
}
}()
partSize := int64(f.opt.CopyCutoff)
numParts := (srcSize-1)/partSize + 1
var parts []*s3.CompletedPart
for partNum := int64(1); partNum <= numParts; partNum++ {
if err := f.pacer.Call(func() (bool, error) {
partNum := partNum
uploadPartReq := &s3.UploadPartCopyInput{
Bucket: &dstBucket,
Key: &dstPath,
PartNumber: &partNum,
UploadId: uid,
CopySourceRange: aws.String(calculateRange(partSize, partNum-1, numParts, srcSize)),
// Args copy from req
CopySource: req.CopySource,
CopySourceIfMatch: req.CopySourceIfMatch,
CopySourceIfModifiedSince: req.CopySourceIfModifiedSince,
CopySourceIfNoneMatch: req.CopySourceIfNoneMatch,
CopySourceIfUnmodifiedSince: req.CopySourceIfUnmodifiedSince,
CopySourceSSECustomerAlgorithm: req.CopySourceSSECustomerAlgorithm,
CopySourceSSECustomerKey: req.CopySourceSSECustomerKey,
CopySourceSSECustomerKeyMD5: req.CopySourceSSECustomerKeyMD5,
RequestPayer: req.RequestPayer,
SSECustomerAlgorithm: req.SSECustomerAlgorithm,
SSECustomerKey: req.SSECustomerKey,
SSECustomerKeyMD5: req.SSECustomerKeyMD5,
}
uout, err := f.c.UploadPartCopyWithContext(ctx, uploadPartReq)
if err != nil {
return f.shouldRetry(err)
}
parts = append(parts, &s3.CompletedPart{
PartNumber: &partNum,
ETag: uout.CopyPartResult.ETag,
})
return false, nil
}); err != nil {
return err
}
}
return f.pacer.Call(func() (bool, error) {
_, err := f.c.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
Bucket: &dstBucket,
Key: &dstPath,
MultipartUpload: &s3.CompletedMultipartUpload{
Parts: parts,
},
RequestPayer: req.RequestPayer,
UploadId: uid,
})
return f.shouldRetry(err)
})
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote)
err := f.makeBucket(ctx, dstBucket)
if err != nil {
return nil, err
}
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcBucket, srcPath := srcObj.split()
req := s3.CopyObjectInput{
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
}
err = f.copy(ctx, &req, dstBucket, dstPath, srcBucket, srcPath, srcObj.Size())
if err != nil {
return nil, err
}
return f.NewObject(ctx, remote)
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
f.poolMu.Lock()
defer f.poolMu.Unlock()
_, ok := f.pools[size]
if !ok {
f.pools[size] = pool.New(
time.Duration(f.opt.MemoryPoolFlushTime),
int(f.opt.ChunkSize),
f.opt.UploadConcurrency*fs.Config.Transfers,
f.opt.MemoryPoolUseMmap,
)
}
return f.pools[size]
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
hash := strings.Trim(strings.ToLower(o.etag), `"`)
// Check the etag is a valid md5sum
if !matchMd5.MatchString(hash) {
err := o.readMetaData(ctx)
if err != nil {
return "", err
}
if md5sum, ok := o.meta[metaMD5Hash]; ok {
md5sumBytes, err := base64.StdEncoding.DecodeString(*md5sum)
if err != nil {
return "", err
}
hash = hex.EncodeToString(md5sumBytes)
} else {
hash = ""
}
}
return hash, nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.bytes
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData(ctx context.Context) (err error) {
if o.meta != nil {
return nil
}
bucket, bucketPath := o.split()
req := s3.HeadObjectInput{
Bucket: &bucket,
Key: &bucketPath,
}
var resp *s3.HeadObjectOutput
err = o.fs.pacer.Call(func() (bool, error) {
var err error
resp, err = o.fs.c.HeadObjectWithContext(ctx, &req)
return o.fs.shouldRetry(err)
})
if err != nil {
if awsErr, ok := err.(awserr.RequestFailure); ok {
if awsErr.StatusCode() == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
}
return err
}
var size int64
// Ignore missing Content-Length assuming it is 0
// Some versions of ceph do this due their apache proxies
if resp.ContentLength != nil {
size = *resp.ContentLength
}
o.etag = aws.StringValue(resp.ETag)
o.bytes = size
o.meta = resp.Metadata
if o.meta == nil {
o.meta = map[string]*string{}
}
o.storageClass = aws.StringValue(resp.StorageClass)
if resp.LastModified == nil {
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
o.lastModified = time.Now()
} else {
o.lastModified = *resp.LastModified
}
o.mimeType = aws.StringValue(resp.ContentType)
return nil
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
if fs.Config.UseServerModTime {
return o.lastModified
}
err := o.readMetaData(ctx)
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
}
// read mtime out of metadata if available
d, ok := o.meta[metaMtime]
if !ok || d == nil {
// fs.Debugf(o, "No metadata")
return o.lastModified
}
modTime, err := swift.FloatStringToTime(*d)
if err != nil {
fs.Logf(o, "Failed to read mtime from object: %v", err)
return o.lastModified
}
return modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
err := o.readMetaData(ctx)
if err != nil {
return err
}
o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
// Can't update metadata here, so return this error to force a recopy
if o.storageClass == "GLACIER" || o.storageClass == "DEEP_ARCHIVE" {
return fs.ErrorCantSetModTime
}
// Copy the object to itself to update the metadata
bucket, bucketPath := o.split()
req := s3.CopyObjectInput{
ContentType: aws.String(fs.MimeType(ctx, o)), // Guess the content type
Metadata: o.meta,
MetadataDirective: aws.String(s3.MetadataDirectiveReplace), // replace metadata with that passed in
}
return o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o.bytes)
}
// Storable raturns a boolean indicating if this object is storable
func (o *Object) Storable() bool {
return true
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
bucket, bucketPath := o.split()
req := s3.GetObjectInput{
Bucket: &bucket,
Key: &bucketPath,
}
fs.FixRangeOption(options, o.bytes)
for _, option := range options {
switch option.(type) {
case *fs.RangeOption, *fs.SeekOption:
_, value := option.Header()
req.Range = &value
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
var resp *s3.GetObjectOutput
err = o.fs.pacer.Call(func() (bool, error) {
var err error
resp, err = o.fs.c.GetObjectWithContext(ctx, &req)
return o.fs.shouldRetry(err)
})
if err, ok := err.(awserr.RequestFailure); ok {
if err.Code() == "InvalidObjectState" {
return nil, errors.Errorf("Object in GLACIER, restore first: bucket=%q, key=%q", bucket, bucketPath)
}
}
if err != nil {
return nil, err
}
return resp.Body, nil
}
var warnStreamUpload sync.Once
func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (err error) {
f := o.fs
// make concurrency machinery
concurrency := f.opt.UploadConcurrency
if concurrency < 1 {
concurrency = 1
}
tokens := pacer.NewTokenDispenser(concurrency)
// calculate size of parts
partSize := int(f.opt.ChunkSize)
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
// buffers here (default 5MB). With a maximum number of parts (10,000) this will be a file of
// 48GB which seems like a not too unreasonable limit.
if size == -1 {
warnStreamUpload.Do(func() {
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
f.opt.ChunkSize, fs.SizeSuffix(partSize*maxUploadParts))
})
} else {
// Adjust partSize until the number of parts is small enough.
if size/int64(partSize) >= maxUploadParts {
// Calculate partition size rounded up to the nearest MB
partSize = int((((size / maxUploadParts) >> 20) + 1) << 20)
}
}
memPool := f.getMemoryPool(int64(partSize))
var cout *s3.CreateMultipartUploadOutput
err = f.pacer.Call(func() (bool, error) {
var err error
cout, err = f.c.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
Bucket: req.Bucket,
ACL: req.ACL,
Key: req.Key,
ContentType: req.ContentType,
Metadata: req.Metadata,
ServerSideEncryption: req.ServerSideEncryption,
SSEKMSKeyId: req.SSEKMSKeyId,
StorageClass: req.StorageClass,
})
return f.shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "multipart upload failed to initialise")
}
uid := cout.UploadId
defer func() {
if o.fs.opt.LeavePartsOnError {
return
}
if err != nil {
// We can try to abort the upload, but ignore the error.
fs.Debugf(o, "Cancelling multipart upload")
errCancel := f.pacer.Call(func() (bool, error) {
_, err := f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{
Bucket: req.Bucket,
Key: req.Key,
UploadId: uid,
RequestPayer: req.RequestPayer,
})
return f.shouldRetry(err)
})
if errCancel != nil {
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
}
}
}()
var (
g, gCtx = errgroup.WithContext(ctx)
finished = false
partsMu sync.Mutex // to protect parts
parts []*s3.CompletedPart
off int64
)
for partNum := int64(1); !finished; partNum++ {
// Get a block of memory from the pool and token which limits concurrency.
tokens.Get()
buf := memPool.Get()
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
break
}
// Read the chunk
var n int
n, err = readers.ReadFill(in, buf) // this can never return 0, nil
if err == io.EOF {
if n == 0 && partNum != 1 { // end if no data and if not first chunk
break
}
finished = true
} else if err != nil {
return errors.Wrap(err, "multipart upload failed to read source")
}
buf = buf[:n]
partNum := partNum
fs.Debugf(o, "multipart upload starting chunk %d size %v offset %v/%v", partNum, fs.SizeSuffix(n), fs.SizeSuffix(off), fs.SizeSuffix(size))
off += int64(n)
g.Go(func() (err error) {
partLength := int64(len(buf))
// create checksum of buffer for integrity checking
md5sumBinary := md5.Sum(buf)
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
err = f.pacer.Call(func() (bool, error) {
uploadPartReq := &s3.UploadPartInput{
Body: bytes.NewReader(buf),
Bucket: req.Bucket,
Key: req.Key,
PartNumber: &partNum,
UploadId: uid,
ContentMD5: &md5sum,
ContentLength: &partLength,
RequestPayer: req.RequestPayer,
SSECustomerAlgorithm: req.SSECustomerAlgorithm,
SSECustomerKey: req.SSECustomerKey,
SSECustomerKeyMD5: req.SSECustomerKeyMD5,
}
uout, err := f.c.UploadPartWithContext(gCtx, uploadPartReq)
if err != nil {
if partNum <= int64(concurrency) {
return f.shouldRetry(err)
}
// retry all chunks once have done the first batch
return true, err
}
partsMu.Lock()
parts = append(parts, &s3.CompletedPart{
PartNumber: &partNum,
ETag: uout.ETag,
})
partsMu.Unlock()
return false, nil
})
// return the memory and token
memPool.Put(buf[:partSize])
tokens.Put()
if err != nil {
return errors.Wrap(err, "multipart upload failed to upload part")
}
return nil
})
}
err = g.Wait()
if err != nil {
return err
}
// sort the completed parts by part number
sort.Slice(parts, func(i, j int) bool {
return *parts[i].PartNumber < *parts[j].PartNumber
})
err = f.pacer.Call(func() (bool, error) {
_, err := f.c.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
Bucket: req.Bucket,
Key: req.Key,
MultipartUpload: &s3.CompletedMultipartUpload{
Parts: parts,
},
RequestPayer: req.RequestPayer,
UploadId: uid,
})
return f.shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "multipart upload failed to finalise")
}
return nil
}
// Update the Object from in with modTime and size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
bucket, bucketPath := o.split()
err := o.fs.makeBucket(ctx, bucket)
if err != nil {
return err
}
modTime := src.ModTime(ctx)
size := src.Size()
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
// Set the mtime in the meta data
metadata := map[string]*string{
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
}
// read the md5sum if available
// - for non multpart
// - so we can add a ContentMD5
// - for multipart provided checksums aren't disabled
// - so we can add the md5sum in the metadata as metaMD5Hash
var md5sum string
if !multipart || !o.fs.opt.DisableChecksum {
hash, err := src.Hash(ctx, hash.MD5)
if err == nil && matchMd5.MatchString(hash) {
hashBytes, err := hex.DecodeString(hash)
if err == nil {
md5sum = base64.StdEncoding.EncodeToString(hashBytes)
if multipart {
metadata[metaMD5Hash] = &md5sum
}
}
}
}
// Guess the content type
mimeType := fs.MimeType(ctx, src)
req := s3.PutObjectInput{
Bucket: &bucket,
ACL: &o.fs.opt.ACL,
Key: &bucketPath,
ContentType: &mimeType,
Metadata: metadata,
}
if md5sum != "" {
req.ContentMD5 = &md5sum
}
if o.fs.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
}
if o.fs.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
}
if o.fs.opt.StorageClass != "" {
req.StorageClass = &o.fs.opt.StorageClass
}
if multipart {
err = o.uploadMultipart(ctx, &req, size, in)
if err != nil {
return err
}
} else {
// Create the request
putObj, _ := o.fs.c.PutObjectRequest(&req)
// Sign it so we can upload using a presigned request.
//
// Note the SDK doesn't currently support streaming to
// PutObject so we'll use this work-around.
url, headers, err := putObj.PresignRequest(15 * time.Minute)
if err != nil {
return errors.Wrap(err, "s3 upload: sign request")
}
if o.fs.opt.V2Auth && headers == nil {
headers = putObj.HTTPRequest.Header
}
// Set request to nil if empty so as not to make chunked encoding
if size == 0 {
in = nil
}
// create the vanilla http request
httpReq, err := http.NewRequest("PUT", url, in)
if err != nil {
return errors.Wrap(err, "s3 upload: new request")
}
httpReq = httpReq.WithContext(ctx) // go1.13 can use NewRequestWithContext
// set the headers we signed and the length
httpReq.Header = headers
httpReq.ContentLength = size
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err := o.fs.srv.Do(httpReq)
if err != nil {
return o.fs.shouldRetry(err)
}
body, err := rest.ReadBody(resp)
if err != nil {
return o.fs.shouldRetry(err)
}
if resp.StatusCode >= 200 && resp.StatusCode < 299 {
return false, nil
}
err = errors.Errorf("s3 upload: %s: %s", resp.Status, body)
return fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
})
if err != nil {
return err
}
}
// Read the metadata from the newly created object
o.meta = nil // wipe old metadata
err = o.readMetaData(ctx)
return err
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
bucket, bucketPath := o.split()
req := s3.DeleteObjectInput{
Bucket: &bucket,
Key: &bucketPath,
}
err := o.fs.pacer.Call(func() (bool, error) {
_, err := o.fs.c.DeleteObjectWithContext(ctx, &req)
return o.fs.shouldRetry(err)
})
return err
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
err := o.readMetaData(ctx)
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return ""
}
return o.mimeType
}
// SetTier performs changing storage class
func (o *Object) SetTier(tier string) (err error) {
ctx := context.TODO()
tier = strings.ToUpper(tier)
bucket, bucketPath := o.split()
req := s3.CopyObjectInput{
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
StorageClass: aws.String(tier),
}
err = o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o.bytes)
if err != nil {
return err
}
o.storageClass = tier
return err
}
// GetTier returns storage class as string
func (o *Object) GetTier() string {
if o.storageClass == "" {
return "STANDARD"
}
return o.storageClass
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.GetTierer = &Object{}
_ fs.SetTierer = &Object{}
)
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"io"
"path/filepath"
"strings"
"text/template"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/gengo/args"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
"k8s.io/klog/v2"
)
// These are the comment tags that carry parameters for fitask generation.
const tagName = "kops:fitask"
func extractTag(comments []string) []string {
return types.ExtractCommentTags("+", comments)[tagName]
}
const perTypeDef = `
// {{.Name}}
// JSON marshaling boilerplate
type real{{.Name}} {{.Name}}
// UnmarshalJSON implements conversion to JSON, supporting an alternate specification of the object as a string
func (o *{{.Name}}) UnmarshalJSON(data []byte) error {
var jsonName string
if err := json.Unmarshal(data, &jsonName); err == nil {
o.Name = &jsonName
return nil
}
var r real{{.Name}}
if err := json.Unmarshal(data, &r); err != nil {
return err
}
*o = {{.Name}}(r)
return nil
}
var _ fi.HasLifecycle = &{{.Name}}{}
// GetLifecycle returns the Lifecycle of the object, implementing fi.HasLifecycle
func (o *{{.Name}}) GetLifecycle() *fi.Lifecycle {
return o.Lifecycle
}
// SetLifecycle sets the Lifecycle of the object, implementing fi.SetLifecycle
func (o *{{.Name}}) SetLifecycle(lifecycle fi.Lifecycle) {
o.Lifecycle = &lifecycle
}
var _ fi.HasName = &{{.Name}}{}
// GetName returns the Name of the object, implementing fi.HasName
func (o *{{.Name}}) GetName() *string {
return o.Name
}
// SetName sets the Name of the object, implementing fi.SetName
func (o *{{.Name}}) SetName(name string) {
o.Name = &name
}
// String is the stringer function for the task, producing readable output using fi.TaskAsString
func (o *{{.Name}}) String() string {
return fi.TaskAsString(o)
}
`
// NameSystems returns the name system used by the generators in this package.
func NameSystems() namer.NameSystems {
return namer.NameSystems{
"public": namer.NewPublicNamer(0),
"private": namer.NewPrivateNamer(0),
"raw": namer.NewRawNamer("", nil),
}
}
// DefaultNameSystem returns the default name system for ordering the types to be
// processed by the generators in this package.
func DefaultNameSystem() string {
return "public"
}
// Packages makes the sets package definition.
func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages {
boilerplate, err := arguments.LoadGoBoilerplate()
if err != nil {
klog.Fatalf("Failed loading boilerplate: %v", err)
}
inputs := sets.NewString(context.Inputs...)
packages := generator.Packages{}
header := append([]byte(fmt.Sprintf("// +build !%s\n\n", arguments.GeneratedBuildTag)), boilerplate...)
for i := range inputs {
klog.V(5).Infof("considering pkg %q", i)
pkg := context.Universe[i]
if pkg == nil {
// If the input had no Go files, for example.
continue
}
fitasks := map[*types.Type]bool{}
for _, t := range pkg.Types {
if t.Kind == types.Struct && len(extractTag(t.CommentLines)) > 0 {
fitasks[t] = true
}
}
packages = append(packages, &generator.DefaultPackage{
PackageName: filepath.Base(pkg.Path),
PackagePath: strings.TrimPrefix(pkg.Path, "k8s.io/kops/"),
HeaderText: header,
GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) {
for t := range fitasks {
generators = append(generators, NewGenFitask(t))
}
return generators
},
FilterFunc: func(c *generator.Context, t *types.Type) bool {
return fitasks[t]
},
})
}
return packages
}
type genFitask struct {
generator.DefaultGen
typeToMatch *types.Type
}
func NewGenFitask(t *types.Type) generator.Generator {
return &genFitask{
DefaultGen: generator.DefaultGen{
OptionalName: strings.ToLower(t.Name.Name) + "_fitask",
},
typeToMatch: t,
}
}
// Filter ignores all but one type because we're making a single file per type.
func (g *genFitask) Filter(c *generator.Context, t *types.Type) bool { return t == g.typeToMatch }
func (g *genFitask) Imports(c *generator.Context) (imports []string) {
return []string{
"encoding/json",
"k8s.io/kops/upup/pkg/fi",
}
}
type TypeData struct {
Name string
}
func (g *genFitask) GenerateType(_ *generator.Context, t *types.Type, w io.Writer) error {
tmpl := template.Must(template.New("PerType").Parse(perTypeDef))
d := &TypeData{}
d.Name = t.Name.Name
return tmpl.Execute(w, d)
}
Remove dead code from fitask generator
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"io"
"path/filepath"
"strings"
"text/template"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/gengo/args"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
"k8s.io/klog/v2"
)
// These are the comment tags that carry parameters for fitask generation.
const tagName = "kops:fitask"
func extractTag(comments []string) []string {
return types.ExtractCommentTags("+", comments)[tagName]
}
const perTypeDef = `
// {{.Name}}
var _ fi.HasLifecycle = &{{.Name}}{}
// GetLifecycle returns the Lifecycle of the object, implementing fi.HasLifecycle
func (o *{{.Name}}) GetLifecycle() *fi.Lifecycle {
return o.Lifecycle
}
// SetLifecycle sets the Lifecycle of the object, implementing fi.SetLifecycle
func (o *{{.Name}}) SetLifecycle(lifecycle fi.Lifecycle) {
o.Lifecycle = &lifecycle
}
var _ fi.HasName = &{{.Name}}{}
// GetName returns the Name of the object, implementing fi.HasName
func (o *{{.Name}}) GetName() *string {
return o.Name
}
// String is the stringer function for the task, producing readable output using fi.TaskAsString
func (o *{{.Name}}) String() string {
return fi.TaskAsString(o)
}
`
// NameSystems returns the name system used by the generators in this package.
func NameSystems() namer.NameSystems {
return namer.NameSystems{
"public": namer.NewPublicNamer(0),
"private": namer.NewPrivateNamer(0),
"raw": namer.NewRawNamer("", nil),
}
}
// DefaultNameSystem returns the default name system for ordering the types to be
// processed by the generators in this package.
func DefaultNameSystem() string {
return "public"
}
// Packages makes the sets package definition.
func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages {
boilerplate, err := arguments.LoadGoBoilerplate()
if err != nil {
klog.Fatalf("Failed loading boilerplate: %v", err)
}
inputs := sets.NewString(context.Inputs...)
packages := generator.Packages{}
header := append([]byte(fmt.Sprintf("// +build !%s\n\n", arguments.GeneratedBuildTag)), boilerplate...)
for i := range inputs {
klog.V(5).Infof("considering pkg %q", i)
pkg := context.Universe[i]
if pkg == nil {
// If the input had no Go files, for example.
continue
}
fitasks := map[*types.Type]bool{}
for _, t := range pkg.Types {
if t.Kind == types.Struct && len(extractTag(t.CommentLines)) > 0 {
fitasks[t] = true
}
}
packages = append(packages, &generator.DefaultPackage{
PackageName: filepath.Base(pkg.Path),
PackagePath: strings.TrimPrefix(pkg.Path, "k8s.io/kops/"),
HeaderText: header,
GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) {
for t := range fitasks {
generators = append(generators, NewGenFitask(t))
}
return generators
},
FilterFunc: func(c *generator.Context, t *types.Type) bool {
return fitasks[t]
},
})
}
return packages
}
type genFitask struct {
generator.DefaultGen
typeToMatch *types.Type
}
func NewGenFitask(t *types.Type) generator.Generator {
return &genFitask{
DefaultGen: generator.DefaultGen{
OptionalName: strings.ToLower(t.Name.Name) + "_fitask",
},
typeToMatch: t,
}
}
// Filter ignores all but one type because we're making a single file per type.
func (g *genFitask) Filter(c *generator.Context, t *types.Type) bool { return t == g.typeToMatch }
func (g *genFitask) Imports(c *generator.Context) (imports []string) {
return []string{
"k8s.io/kops/upup/pkg/fi",
}
}
type TypeData struct {
Name string
}
func (g *genFitask) GenerateType(_ *generator.Context, t *types.Type, w io.Writer) error {
tmpl := template.Must(template.New("PerType").Parse(perTypeDef))
d := &TypeData{}
d.Name = t.Name.Name
return tmpl.Execute(w, d)
}
|
/*
moves is a convenience package that implements composable Moves to make it
easy to implement common logic. The Base move type is a very simple move that
implements the basic stubs necessary for your straightforward moves to have
minimal boilerplate. Although it's technically optional, a lot of the magic
features throughout the framework depend on some if its base logic, so it's
recommended to always embed it anonymously in your move struct (or embed a
struct that embeds it).
You interact with and configure various move types by implementing interfaces.
Those interfaes are defined in the interfaces subpackage, to make this
package's design more clear.
There are many move types defined. Some are designed to be used directly with
minimal modification; others are powerful move types that are designed to be
sub-classed.
Automatic MoveConfig Generation
Creating MoveConfig's is a necessary part of installing moves on your
GameManager, but it's verbose and error-prone. You need to create a lot of
extra structs, and then remember to provide the right properties in your
config. And to use many of the powerful moves in the moves package, you
need to write a lot of boilerplate methods to integrate correctly.
Finally, you end up repeating yourself often--which makes it a pain if you
change the name of a move.
Take this example:
//boardgame:codegen
type MoveDealInitialCards struct {
moves.DealComponentsUntilPlayerCountReached
}
var moveDealInitialCardsConfig = boardgame.MoveConfig {
Name: "Deal Initial Cards",
Constructor: func() boardgame.Move {
return new(MoveDealInitialCards)
},
}
func (m *MoveDealInitialCards) GameStack(gState boardgame.MutableSubState) boardgame.MutableStack {
return gState.(*gameState).DrawStack
}
func (m *MoveDealInitialCards) PlayerStack(pState boardgame.MutablePlayerState) boardgame.MutableStack {
return pState.(*playerState).Hand
}
func (m *MoveDealInitialCards) TargetCount() int {
return 2
}
func (g *gameDelegate) ConfigureMoves() []boardgame.MoveConfig {
return moves.Add(
&moveDealInitialCardsConfig,
)
}
auto.Config (and its panic-y sibling auto.MustConfig) help reduce this
signficantly:
func (g *gameDelegate) ConfigureMoves() []boardgame.MoveConfig {
auto := moves.NewAutoConfigurer(g)
return moves.Add(
auto.MustConfig(
new(moves.DealComponentsUntilPlayerCountReached),
moves.WithGameProperty("DrawStack"),
moves.WithPlayerProperty("Hand"),
moves.WithTargetCount(2),
)
)
}
Basic Usage
AutoConfigurer takes an example struct representing your move, and then a
list of 0 to n interfaces.CustomConfigurationOption. These options are
given a boardgame.PropertyCollection and then add specific properties to
it, and then stash that on the CustomConfiguration property of the
returned MoveTypeConfig. Different move methods will then reach into that
configuration to alter the behavior of moves of that type.
The moves/with package defines a large collection of
CustomConfigurationOption for use with the moves in the moves package.
Moves that are used with AutoConfigurer must satisfy the AutoConfigurableMove
interface, which adds one method: DeriveName() string. AutoConfigurer.Config()
primarily consists of some set up and then using those return values as fields
on the returned MoveConfig. These methods are implemented in moves.Default,
which means that any move structs that embed moves.Default (directly or
indirectly) can be used with AutoConfigurer.
moves.Default does a fair bit of magic in these methods to implement much of
the logic of AutoConfigurer. In general, if you pass a configuration option
(via WithMoveName, for example) then that option will be used for that
method. moves.Default.DeriveName() also will use reflection to automatically
set a struct name like "MoveDealInitialCards" to "Deal Initial Cards". All
of the moves in the moves package will also automatically return
reasonable names for DeriveName(), so in many cases you can use those
structs directly without having to pass WithMoveName().
Other moves in the moves package, like DealCountComponents, will use
configuration, like WithGameProperty(), to power their default GameStack()
method.
All moves in the moves package are designed to return an error from
ValidConfiguration(), which means that if you forgot to pass a required
configuration property (e.g. you don't override GameStack and also don't
provide WithGameProperty), when you try to create NewGameManager() and all
moves' ValidConfiguration() is checked, you'll get an error. This helps
catch mis-configurations during boot time.
Refer to the documentation of the various methods in that package for
their precise behavior and how to configure them.
Idiomatic Move Definition and Installation
AutoConfigurer is at the core of idiomatic definition and installation of
moves, and typically is used for every move you install in your game. The
following paragraphs describe the high-level idioms to follow.
Never create your own MoveConfig objects--it's just another global
variable that clutters up your code and makes it harder to change.
Instead, use AutoConfigurer. There are some rare cases where you do want to
refer to the move by name (and not rely on finicky string-based lookup),
such as when you want an Agent to propose a speciifc type of move. In
those cases use AutoConfigurer to create the move type config, then save the
resulting config's Name to a global variable that you use elsewhere, and
then pass the created config to moves.Add() (and its cousins)
In general, you should only create a bespoke Move struct in your game if
it is not possible to use one of the off-the-shelf moves from the moves
package, combined with configuarion options, to do what you want. In
practice this means that only if you need to override a method on one of
the base moves do you need to create a bespoke struct. This typically
allows you to drastically reduce the number of bespoke move structs your
game defines, saving thousands of lines of code (each bespoke struct also
has hundreds of lines of auto-generated PropertyReader code).
If you do create a bespoke struct, name it like this: "MoveNameOfMyMove",
so that moves.Default's default DeriveName() will give it a reasonable name
automatically (in this example, "Name Of My Move").
In many cases if you subclass powerful moves like DealCountComponents the
default HelpText() value is sufficient (especially if it's a FixUp
move that won't ever be seen by players). In other cases, WithHelpText()
is often the only config option you will pass to AutoConfigurer.
If your move will be a FixUp move that doesn't sublcass one of the more
advanced fix up moves (like RoundRobin or DealCountComponents), embed
moves.FixUp into your struct. That will cause IsFixUp to return the right
value even without using WithIsFixUp--because WithIsFixUp is easy to
forget given that it's often in a different file. In almost all cases if
you use WithIsFixUp you should simply embed moves.FixUp instead.
AutoConfigurer.MustConfig is like AutoConfigurer.Config, but instead of returning a MoveConfig
and an error, it simply returns a MoveConfig--and panics if it would have
returned an error. Since your GameDelegate's ConfigureMoves() is typically
called during the boot-up sequence of your game, it is safe to use
AutoConfigurer.MustConfig exclusively, which saves many lines of boilerplate error
checking.
Configure Move Helpers
Your Game Delegate's ConfigureMoves() []boardgame.MoveConfig is where the
action happens for installing moves. In practice you can do whatever you want
in there as long as you return a list of MoveConfigs. In practice you often
use AutoConfigurer (see section above). If you have a very simple game type you
might not need to do anythign special.
If, however, your game type is complicated enough to need the notion of
phases, then you'll probably want to use some of the convenience methods for
installing moves: Combine, Add, AddForPhase, and AddOrderedForPhase. These
methods make sure that enough information is stored for the Legal() methods of
those moves to know when the move is legal. Technically they're just
convenience wrappers (each describes the straightforward things it's doing),
but in practice they're the best way to do it. See the tutorial in the main
package for more.
Move Type Hierarchy
The moves in this package are all defined as a hierarchy of structs that
anonymously embed higher level structs, overriding, modifying, and extending
the behavior of the struct they embed.
You can use many of these moves directly, using the configuration options like
WithSourceProperty() to configure which properties they should operate on.
Alternatively, you can embed them in your own move struct, overriding or
tweaking their behavior, perhaps adding an additional check to their Legal
check.
For convenience, here's the type hierarchy, with a brief description of the
diff each has on the one above it. See the documentation for each struct for
more.
* base.Move - The simplest, unopinonated stub of a move, from the base package.
* Default - Substantial base logic, including base property overriding for with and especially in Legal() around move progressions and phases.
* Done - A simple move that does nothing in its Apply and has no extra Legal() logic, meaning it's primarily a non-fix-up move applied by a player to move out of a move progression.
* CurrentPlayer - Defaults to the GameDelegate.CurrentPlayerIndex, and only lets the move be made if it's on behalf of that player.
* FixUp - Overrides IsFixUp() to always return true, making the move eligible for base.GameDelegate.ProposeFixUpMove.
* NoOp - A move that does nothing. Useful for specific edge cases of MoveProessionMatching, and also to signal to AddOrderedForPhase that the lack of a StartPhase move was intentional.
* Increment - Increments the provided SourceProperty by Amount. Useful to run automatically at a given spot in a move progression.
* ShuffleStack - Shuffles the stack at SourceProperty. Useful to run automatically at a certain time in a MoveProgression.
* StartPhase - Calls BeforeLeavePhase, then BeforeEnterPhase, then SetCurrentPhase. Generally you have one of these at the end of an AddOrderedForPhase.
* FinishTurn - Checks if State.CurrentPlayer().TurnDone() is true, and if so increments CurrentPlayerIndex to the next player, calling playerState.ResetForTurnEnd() and then ResetForTurnStart.
* FixUpMulti - Overrides AllowMultipleInProgression() to true, meaning multiple of the same move are legal to apply in a row according to Deafult.Legal()
* DefaultComponent - Looks at each component in SourceStack() and sees which one's method of Legal() returns nil, selecting that component for you to operate on in your own Apply.
* ApplyUntil - Legal() returns nil only once ConditionMet() returns nil.
* ApplyUntilCount - Supplies a ConditionMet that returns true when Count() is the same as TargetCount().
* ApplyCountTimes - Supplies a Count() that is the number of times this move has been applied in a row.
* MoveCountComponents - Moves components from SourceStack to DestinationStack until TargetCount have been moved.
* MoveComponentsUntilCountReached - Overrides Count to be how many components are in DestinatinoStack
* MoveComponentsUntilCountLeft - Overrides Count to be how many components are left in SourceStack
* MoveAllComponents - Overrides TargetCount to be 0
* RoundRobin - Applies around and aroudn for each player until PlayerConditionMet returns true for all. You must embed RoundRobinGameStateProperties in your GameState, as all of these moves store state in properties.
* RoundRobinNumRounds - Also checks that no more than NumRounds() around have happened
* DealCountComponents - Moves a component from GameStack to PlayerSTack() one at a time until each player has been dealt TargetCount() components.
* DealComponentsUntilPlayerCountReached - Instead of a fixed number, done when every player has TargetCount or more components in PlayerStack.
* CollectComponentsUntilPlayerCountReached - Flip so that the components move from PlayerStack to GameStack.
* CollectComponentsUntilPlayerCountLeft - Flips it so the TargetCount is when each PlayerStack has that many items or fewer.
*CollectAllComponents - Overrides TargetCount to 0, colleting all components.
* CollectComponentsUntilPlayerCountLeft - Flip movement to be from PlayerStack to GameStack, and flip TargetCount to be when all PlayerStack have TargetCount or less.
* DealComponentsUntilGameCountLeft - Instead of a fixed number, done when GameStack's count is TargetCount or less.
* DealAllComponents - Overrides TargetCount to be 0
* CollectComponentsUntilGameCountLeft - Flips move so it's from PlayerStack to GameStack
* CollectCountComponents - Flips so components move from PlayerStacks to GameStack
Default Move
Implementing a Move requires a lot of stub methods to implement the
boardgame.Move interface, but also a lot of logic, especially to support
Phases correctly. moves.Default is a move that all moves should embed somewhere
in their hierarchy. It is very important to always call your superclasse's
Legal(), because moves.Default.Legal contains important logic to implement phases
and ordered moves within phases.
Default also includes a number of methods needed for moves to work well with
AutoConfigurer.
FixUp Move
FixUp moves are simple embedding of move.Default, but they default to having
IsFixUp generated by AutoConfigurer be true instead of false. This is useful so
you don't forget to pass WithIsFixUp(true) yourself in AutoConfigurer.Config.
FixUpMulti is the same as FixUp, but also has a AllowMultipleInProgression
that returns true, meaning that the ordered move logic within phases will
allow multiple of this move type to apply in a row.
Default Component Move
DefaultComponent is a move type that, in DefaultsForState, searches through
all of the components in the stack provided with WithSourceProperty, and testing
the Legal() method of each component. It sets the first one that returns nil
to m.ComponentIndex. Its Legal() returns whether there is a valid component
specified, and what its Legal returns. You provide your own Apply().
It's useful for fixup moves that need to apply actions to components in a
given stack when certain conditions are met--for example, crowning a token
that makes it to the opposite end of a board in checkers.
The componentValues.Legal() takes a legalType. This is the way you can use
multiple DefaultComponent moves for the same type of component. If you only
have one then you can skip passing WithLegalType, and just default to 0. If
you do have multiple legalTypes, the idiomatic way is to have those be members
of an Enum for that purpose.
Current Player Move
These moves are for moves that are only legal to be made by the current
player. Their Legal() will verify that it is the proposer's turn.
Move Deal and Collect Component Moves
Generally when moving components from one place to another it makes sense to
move one component at a time, so that each component is animated separately.
However, this is a pain to implement, because it requires implementing a move
that knows how many times to apply itself in a row, which is fincky and error-
prone.
There is a collection of 9 moves that all do basically the same thing for
moving components, one at a time, from stack to stack. Move-type moves move
components between two specific stacks, often both on your GameState. Deal and
Collect type moves move components between a stack in GameState and a stack in
each Player's PlayerState. Deal-type moves move components from the game stack
to the player stack, and Collect-type moves move components from each player
to the GameState.
All of these moves define a way to define the source and destination stack.
For Move-type moves, you define SourceStack() and DestinationStack(). For Deal
and Collect-type moves, you implement GameStack() and PlayerStack().
All moves in this collection implement TargetCount() int, and all of them
default to 1. Override this if you want a different number of components
checked for in the end condition.
In practice you'll often use WithTargetCount, WithGameProperty, and friends as
configuration to AutoConfigurer.Config instead of overriding those yourself.
In fact, in many cases configuartion options are powerful enough to allow you
to use these moves types on their own directly in your game. See the
documentation in the sections above for more examples.
Each of Move, Deal, and Collect have three variants based on the end
condition. Note that Move-type moves have only two stacks, but Deal and
Collect type moves operate on n pairs of stacks, where n is the number of
players in the game. In general for Deal and Collect type moves, the condition
is met when all pairs of stacks meet the end condition.
{Move,Deal,Collect}CountComponents simply apply that many moves without regard
to the number of components in the source or destination stacks. Move names
that end in CountReached operate until the destination stacks all have
TargetCount or more items. Move names that end in CountLeft operate until the
source stacks all have TargetCount or fewer items in them.
Since a common configuration of these moves is to use
{Move,Deal,Collect}ComponentsUntil*Reached with a TargetCount of 0, each also
provides a *AllComponents as sugar.
ApplyUntil ApplyUntilCount and ApplyCountTimes
These moves are what the MoveComponents moves are based off of and are
designed to be subclassed. They apply the move in question until some
condition is reached.
RoundRobin and RoundRobinNumRounds
Round Robin moves are like ApplyUntilCount and friends, except they go around
and operate on each player in succession. RoundRobinNumRounds goes around each
player until NumRounds() cycles have completed. The base RoundRobin goes
around until the PlayerCondition has been met for each player. These are the
most complicated moves in the set; if you subclass one directly you're most
likely to subclass RoundRobinNumRounds.
FinishTurn
FinishTurn is a move that is designed to be used as a fix-up move during
normal phases of play in your game. It checks whether the current player's
turn is done (based on criteria you specify) and if so advances to the next
player, resetting state as appropriate. In practice you often can use this
move directly in your game without even passing any WithOPTION configuration
to AutoConfigurer.Config.
StartPhase
The StartPhase move is designed to set your game's phase to the next phase.
It's generally used as the last move in an ordered phase, for example, the
last move in your game's SetUp phase. This move can also generally be used
directly in your game, by using the WithPhaseToStart configuration option in
AutoConfigurer.Config.
ShuffleStack
Shuffle stack is a simple move that just shuffles the stack denoted by
SourceStack.
Increment
Increment is a simple move that's useful to include in a MoveProgression when
you want to increment a simple int property, for example by adding 1 to round
count at the end of a scoring phase.
Groups
Groups allow you to specify a specific set of moves that must occur in a given
order. You pass them to AddOrderedForPhase. All of the groups are of type
MoveProgressionGroup, and this package defines 5: Serial, Parallel,
ParallelCount, Repeat, and Optional. They can be nested as often as you'd
like to express the semantics of your move progression.
They are defined as functions that return anonymous underlying structs so
that when used in configuration you can avoid needing to wrap your
children list with []MoveProgressionGroup, saving you typing.
//Example
//AddOrderedForPhase accepts move configs from auto.Config, or
//groups.
moves.AddOrderedForPhase(PhaseNormal,
//Top level groups are all joined implicitly into a group.Serial.
auto.MustConfig(new(MoveZero)),
moves.Serial(
auto.MustConfig(new(MoveOne)),
moves.Optional(
moves.Serial(
auto.MustConfig(new(MoveTwo)),
auto.MustConfig(new(MoveThree)),
),
),
moves.ParallelCount(
CountAny(),
auto.MustConfig(new(MoveFour)),
auto.MustConfig(new(MoveFive)),
moves.Repeat(
CountAtMost(2),
moves.Serial(
auto.MustConfig(new(MoveSix)),
auto.MustConfig(new(MoveSeven)),
),
),
),
),
)
Move names must be unique, but sometimes you want to use the same underlying
move at multiple points in a progression. WithMoveNameSuffix is useful for
that case.
*/
package moves
Remove some unnecessary documentation from package doc for moves. Fixes #734.
/*
moves is a convenience package that implements composable Moves to make it
easy to implement common logic. The Base move type is a very simple move that
implements the basic stubs necessary for your straightforward moves to have
minimal boilerplate. Although it's technically optional, a lot of the magic
features throughout the framework depend on some if its base logic, so it's
recommended to always embed it anonymously in your move struct (or embed a
struct that embeds it).
You interact with and configure various move types by implementing interfaces.
Those interfaes are defined in the interfaces subpackage, to make this
package's design more clear.
There are many move types defined. Some are designed to be used directly with
minimal modification; others are powerful move types that are designed to be
sub-classed.
Automatic MoveConfig Generation
Creating MoveConfig's is a necessary part of installing moves on your
GameManager, but it's verbose and error-prone. You need to create a lot of
extra structs, and then remember to provide the right properties in your
config. And to use many of the powerful moves in the moves package, you
need to write a lot of boilerplate methods to integrate correctly.
Finally, you end up repeating yourself often--which makes it a pain if you
change the name of a move.
Take this example:
//boardgame:codegen
type MoveDealInitialCards struct {
moves.DealComponentsUntilPlayerCountReached
}
var moveDealInitialCardsConfig = boardgame.MoveConfig {
Name: "Deal Initial Cards",
Constructor: func() boardgame.Move {
return new(MoveDealInitialCards)
},
}
func (m *MoveDealInitialCards) GameStack(gState boardgame.MutableSubState) boardgame.MutableStack {
return gState.(*gameState).DrawStack
}
func (m *MoveDealInitialCards) PlayerStack(pState boardgame.MutablePlayerState) boardgame.MutableStack {
return pState.(*playerState).Hand
}
func (m *MoveDealInitialCards) TargetCount() int {
return 2
}
func (g *gameDelegate) ConfigureMoves() []boardgame.MoveConfig {
return moves.Add(
&moveDealInitialCardsConfig,
)
}
auto.Config (and its panic-y sibling auto.MustConfig) help reduce this
signficantly:
func (g *gameDelegate) ConfigureMoves() []boardgame.MoveConfig {
auto := moves.NewAutoConfigurer(g)
return moves.Add(
auto.MustConfig(
new(moves.DealComponentsUntilPlayerCountReached),
moves.WithGameProperty("DrawStack"),
moves.WithPlayerProperty("Hand"),
moves.WithTargetCount(2),
)
)
}
Basic Usage
AutoConfigurer takes an example struct representing your move, and then a
list of 0 to n interfaces.CustomConfigurationOption. These options are
given a boardgame.PropertyCollection and then add specific properties to
it, and then stash that on the CustomConfiguration property of the
returned MoveTypeConfig. Different move methods will then reach into that
configuration to alter the behavior of moves of that type.
The moves/with package defines a large collection of
CustomConfigurationOption for use with the moves in the moves package.
Moves that are used with AutoConfigurer must satisfy the AutoConfigurableMove
interface, which adds one method: DeriveName() string. AutoConfigurer.Config()
primarily consists of some set up and then using those return values as fields
on the returned MoveConfig. These methods are implemented in moves.Default,
which means that any move structs that embed moves.Default (directly or
indirectly) can be used with AutoConfigurer.
moves.Default does a fair bit of magic in these methods to implement much of
the logic of AutoConfigurer. In general, if you pass a configuration option
(via WithMoveName, for example) then that option will be used for that
method. moves.Default.DeriveName() also will use reflection to automatically
set a struct name like "MoveDealInitialCards" to "Deal Initial Cards". All
of the moves in the moves package will also automatically return
reasonable names for DeriveName(), so in many cases you can use those
structs directly without having to pass WithMoveName().
Other moves in the moves package, like DealCountComponents, will use
configuration, like WithGameProperty(), to power their default GameStack()
method.
All moves in the moves package are designed to return an error from
ValidConfiguration(), which means that if you forgot to pass a required
configuration property (e.g. you don't override GameStack and also don't
provide WithGameProperty), when you try to create NewGameManager() and all
moves' ValidConfiguration() is checked, you'll get an error. This helps
catch mis-configurations during boot time.
Refer to the documentation of the various methods in that package for
their precise behavior and how to configure them.
Idiomatic Move Definition and Installation
AutoConfigurer is at the core of idiomatic definition and installation of
moves, and typically is used for every move you install in your game. The
following paragraphs describe the high-level idioms to follow.
Never create your own MoveConfig objects--it's just another global
variable that clutters up your code and makes it harder to change.
Instead, use AutoConfigurer. There are some rare cases where you do want to
refer to the move by name (and not rely on finicky string-based lookup),
such as when you want an Agent to propose a speciifc type of move. In
those cases use AutoConfigurer to create the move type config, then save the
resulting config's Name to a global variable that you use elsewhere, and
then pass the created config to moves.Add() (and its cousins)
In general, you should only create a bespoke Move struct in your game if
it is not possible to use one of the off-the-shelf moves from the moves
package, combined with configuarion options, to do what you want. In
practice this means that only if you need to override a method on one of
the base moves do you need to create a bespoke struct. This typically
allows you to drastically reduce the number of bespoke move structs your
game defines, saving thousands of lines of code (each bespoke struct also
has hundreds of lines of auto-generated PropertyReader code).
If you do create a bespoke struct, name it like this: "MoveNameOfMyMove",
so that moves.Default's default DeriveName() will give it a reasonable name
automatically (in this example, "Name Of My Move").
In many cases if you subclass powerful moves like DealCountComponents the
default HelpText() value is sufficient (especially if it's a FixUp
move that won't ever be seen by players). In other cases, WithHelpText()
is often the only config option you will pass to AutoConfigurer.
If your move will be a FixUp move that doesn't sublcass one of the more
advanced fix up moves (like RoundRobin or DealCountComponents), embed
moves.FixUp into your struct. That will cause IsFixUp to return the right
value even without using WithIsFixUp--because WithIsFixUp is easy to
forget given that it's often in a different file. In almost all cases if
you use WithIsFixUp you should simply embed moves.FixUp instead.
AutoConfigurer.MustConfig is like AutoConfigurer.Config, but instead of returning a MoveConfig
and an error, it simply returns a MoveConfig--and panics if it would have
returned an error. Since your GameDelegate's ConfigureMoves() is typically
called during the boot-up sequence of your game, it is safe to use
AutoConfigurer.MustConfig exclusively, which saves many lines of boilerplate error
checking.
Configure Move Helpers
Your Game Delegate's ConfigureMoves() []boardgame.MoveConfig is where the
action happens for installing moves. In practice you can do whatever you want
in there as long as you return a list of MoveConfigs. In practice you often
use AutoConfigurer (see section above). If you have a very simple game type you
might not need to do anythign special.
If, however, your game type is complicated enough to need the notion of
phases, then you'll probably want to use some of the convenience methods for
installing moves: Combine, Add, AddForPhase, and AddOrderedForPhase. These
methods make sure that enough information is stored for the Legal() methods of
those moves to know when the move is legal. Technically they're just
convenience wrappers (each describes the straightforward things it's doing),
but in practice they're the best way to do it. See the tutorial in the main
package for more.
Move Type Hierarchy
The moves in this package are all defined as a hierarchy of structs that
anonymously embed higher level structs, overriding, modifying, and extending
the behavior of the struct they embed.
You can use many of these moves directly, using the configuration options like
WithSourceProperty() to configure which properties they should operate on.
Alternatively, you can embed them in your own move struct, overriding or
tweaking their behavior, perhaps adding an additional check to their Legal
check.
For convenience, here's the type hierarchy, with a brief description of the
diff each has on the one above it. See the documentation for each struct for
more.
* base.Move - The simplest, unopinonated stub of a move, from the base package.
* Default - Substantial base logic, including base property overriding for with and especially in Legal() around move progressions and phases.
* Done - A simple move that does nothing in its Apply and has no extra Legal() logic, meaning it's primarily a non-fix-up move applied by a player to move out of a move progression.
* CurrentPlayer - Defaults to the GameDelegate.CurrentPlayerIndex, and only lets the move be made if it's on behalf of that player.
* FixUp - Overrides IsFixUp() to always return true, making the move eligible for base.GameDelegate.ProposeFixUpMove.
* NoOp - A move that does nothing. Useful for specific edge cases of MoveProessionMatching, and also to signal to AddOrderedForPhase that the lack of a StartPhase move was intentional.
* Increment - Increments the provided SourceProperty by Amount. Useful to run automatically at a given spot in a move progression.
* ShuffleStack - Shuffles the stack at SourceProperty. Useful to run automatically at a certain time in a MoveProgression.
* StartPhase - Calls BeforeLeavePhase, then BeforeEnterPhase, then SetCurrentPhase. Generally you have one of these at the end of an AddOrderedForPhase.
* FinishTurn - Checks if State.CurrentPlayer().TurnDone() is true, and if so increments CurrentPlayerIndex to the next player, calling playerState.ResetForTurnEnd() and then ResetForTurnStart.
* FixUpMulti - Overrides AllowMultipleInProgression() to true, meaning multiple of the same move are legal to apply in a row according to Deafult.Legal()
* DefaultComponent - Looks at each component in SourceStack() and sees which one's method of Legal() returns nil, selecting that component for you to operate on in your own Apply.
* ApplyUntil - Legal() returns nil only once ConditionMet() returns nil.
* ApplyUntilCount - Supplies a ConditionMet that returns true when Count() is the same as TargetCount().
* ApplyCountTimes - Supplies a Count() that is the number of times this move has been applied in a row.
* MoveCountComponents - Moves components from SourceStack to DestinationStack until TargetCount have been moved.
* MoveComponentsUntilCountReached - Overrides Count to be how many components are in DestinatinoStack
* MoveComponentsUntilCountLeft - Overrides Count to be how many components are left in SourceStack
* MoveAllComponents - Overrides TargetCount to be 0
* RoundRobin - Applies around and aroudn for each player until PlayerConditionMet returns true for all. You must embed RoundRobinGameStateProperties in your GameState, as all of these moves store state in properties.
* RoundRobinNumRounds - Also checks that no more than NumRounds() around have happened
* DealCountComponents - Moves a component from GameStack to PlayerSTack() one at a time until each player has been dealt TargetCount() components.
* DealComponentsUntilPlayerCountReached - Instead of a fixed number, done when every player has TargetCount or more components in PlayerStack.
* CollectComponentsUntilPlayerCountReached - Flip so that the components move from PlayerStack to GameStack.
* CollectComponentsUntilPlayerCountLeft - Flips it so the TargetCount is when each PlayerStack has that many items or fewer.
*CollectAllComponents - Overrides TargetCount to 0, colleting all components.
* CollectComponentsUntilPlayerCountLeft - Flip movement to be from PlayerStack to GameStack, and flip TargetCount to be when all PlayerStack have TargetCount or less.
* DealComponentsUntilGameCountLeft - Instead of a fixed number, done when GameStack's count is TargetCount or less.
* DealAllComponents - Overrides TargetCount to be 0
* CollectComponentsUntilGameCountLeft - Flips move so it's from PlayerStack to GameStack
* CollectCountComponents - Flips so components move from PlayerStacks to GameStack
Move Deal and Collect Component Moves
Generally when moving components from one place to another it makes sense to
move one component at a time, so that each component is animated separately.
However, this is a pain to implement, because it requires implementing a move
that knows how many times to apply itself in a row, which is fincky and error-
prone.
There is a collection of 9 moves that all do basically the same thing for
moving components, one at a time, from stack to stack. Move-type moves move
components between two specific stacks, often both on your GameState. Deal and
Collect type moves move components between a stack in GameState and a stack in
each Player's PlayerState. Deal-type moves move components from the game stack
to the player stack, and Collect-type moves move components from each player
to the GameState.
All of these moves define a way to define the source and destination stack.
For Move-type moves, you define SourceStack() and DestinationStack(). For Deal
and Collect-type moves, you implement GameStack() and PlayerStack().
All moves in this collection implement TargetCount() int, and all of them
default to 1. Override this if you want a different number of components
checked for in the end condition.
In practice you'll often use WithTargetCount, WithGameProperty, and friends as
configuration to AutoConfigurer.Config instead of overriding those yourself.
In fact, in many cases configuartion options are powerful enough to allow you
to use these moves types on their own directly in your game. See the
documentation in the sections above for more examples.
Each of Move, Deal, and Collect have three variants based on the end
condition. Note that Move-type moves have only two stacks, but Deal and
Collect type moves operate on n pairs of stacks, where n is the number of
players in the game. In general for Deal and Collect type moves, the condition
is met when all pairs of stacks meet the end condition.
{Move,Deal,Collect}CountComponents simply apply that many moves without regard
to the number of components in the source or destination stacks. Move names
that end in CountReached operate until the destination stacks all have
TargetCount or more items. Move names that end in CountLeft operate until the
source stacks all have TargetCount or fewer items in them.
Since a common configuration of these moves is to use
{Move,Deal,Collect}ComponentsUntil*Reached with a TargetCount of 0, each also
provides a *AllComponents as sugar.
Groups
Groups allow you to specify a specific set of moves that must occur in a given
order. You pass them to AddOrderedForPhase. All of the groups are of type
MoveProgressionGroup, and this package defines 5: Serial, Parallel,
ParallelCount, Repeat, and Optional. They can be nested as often as you'd
like to express the semantics of your move progression.
They are defined as functions that return anonymous underlying structs so
that when used in configuration you can avoid needing to wrap your
children list with []MoveProgressionGroup, saving you typing.
//Example
//AddOrderedForPhase accepts move configs from auto.Config, or
//groups.
moves.AddOrderedForPhase(PhaseNormal,
//Top level groups are all joined implicitly into a group.Serial.
auto.MustConfig(new(MoveZero)),
moves.Serial(
auto.MustConfig(new(MoveOne)),
moves.Optional(
moves.Serial(
auto.MustConfig(new(MoveTwo)),
auto.MustConfig(new(MoveThree)),
),
),
moves.ParallelCount(
CountAny(),
auto.MustConfig(new(MoveFour)),
auto.MustConfig(new(MoveFive)),
moves.Repeat(
CountAtMost(2),
moves.Serial(
auto.MustConfig(new(MoveSix)),
auto.MustConfig(new(MoveSeven)),
),
),
),
),
)
Move names must be unique, but sometimes you want to use the same underlying
move at multiple points in a progression. WithMoveNameSuffix is useful for
that case.
*/
package moves
|
package recurrence
import (
. "github.com/smartystreets/goconvey/convey"
"testing"
"time"
)
func TestMonthly(t *testing.T) {
Convey("With a monthly recurrence on the first day every 3 months", t, func() {
r := Recurrence{
Type: Monthly,
Location: time.UTC,
Frequence: 3,
Start: time.Date(2016, 1, 1, 12, 0, 0, 0, time.UTC),
}
event := time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)
Convey("The first event should be on first january", func() {
event := r.GetNextDate(event)
So(event, ShouldHappenOn, time.Date(2016, 1, 1, 12, 0, 0, 0, time.UTC))
Convey("and the second event should be on first april", func() {
event := r.GetNextDate(event)
So(event, ShouldHappenOn, time.Date(2016, 4, 1, 12, 0, 0, 0, time.UTC))
})
})
})
Convey("With a monthly recurrence on the 31 of each month", t, func() {
r := Recurrence{
Type: Monthly,
Location: time.UTC,
Frequence: 1,
Start: time.Date(2016, 1, 31, 12, 0, 0, 0, time.UTC),
}
Convey("It should happen in january", func() {
event := r.GetNextDate(time.Date(2016, 1, 1, 0, 0, 0, 0, time.UTC))
So(event, ShouldHappenOn, time.Date(2016, 1, 31, 12, 0, 0, 0, time.UTC))
})
Convey("But not on february", func() {
event := r.GetNextDate(time.Date(2016, 2, 1, 0, 0, 0, 0, time.UTC))
So(event, ShouldHappenOn, time.Date(2016, 3, 31, 12, 0, 0, 0, time.UTC))
})
})
}
another monthly test
package recurrence
import (
. "github.com/smartystreets/goconvey/convey"
"testing"
"time"
)
func TestMonthly(t *testing.T) {
Convey("With a monthly recurrence on the first day every 3 months", t, func() {
r := Recurrence{
Type: Monthly,
Location: time.UTC,
Frequence: 3,
Start: time.Date(2016, 1, 1, 12, 0, 0, 0, time.UTC),
}
event := time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)
Convey("The first event should be on first january", func() {
event := r.GetNextDate(event)
So(event, ShouldHappenOn, time.Date(2016, 1, 1, 12, 0, 0, 0, time.UTC))
Convey("and the second event should be on first april", func() {
event := r.GetNextDate(event)
So(event, ShouldHappenOn, time.Date(2016, 4, 1, 12, 0, 0, 0, time.UTC))
})
Convey("With the recurrence end in february there should be no second event", func() {
r.End = time.Date(2016, 2, 1, 0, 0, 0, 0, time.UTC)
event := r.GetNextDate(event)
So(event, ShouldNotHappen)
})
})
})
Convey("With a monthly recurrence on the 31 of each month", t, func() {
r := Recurrence{
Type: Monthly,
Location: time.UTC,
Frequence: 1,
Start: time.Date(2016, 1, 31, 12, 0, 0, 0, time.UTC),
}
Convey("It should happen in january", func() {
event := r.GetNextDate(time.Date(2016, 1, 1, 0, 0, 0, 0, time.UTC))
So(event, ShouldHappenOn, time.Date(2016, 1, 31, 12, 0, 0, 0, time.UTC))
})
Convey("But not on february", func() {
event := r.GetNextDate(time.Date(2016, 2, 1, 0, 0, 0, 0, time.UTC))
So(event, ShouldHappenOn, time.Date(2016, 3, 31, 12, 0, 0, 0, time.UTC))
})
})
}
|
package mp
import (
"bufio"
"bytes"
"crypto/sha1"
"crypto/subtle"
"encoding/base64"
"encoding/hex"
"encoding/xml"
"github.com/ridewindx/mel"
"github.com/ridewindx/mel/binding"
"sort"
"sync"
"sync/atomic"
"unsafe"
"go.uber.org/zap"
"github.com/jiudaoyun/wechat"
"net/http"
"fmt"
"encoding/json"
"strings"
"github.com/ridewindx/melware"
)
type Server struct {
*mel.Mel
urlPrefix string
appID string // App ID
ID string // Wechat ID
tokenMutex sync.Mutex
token unsafe.Pointer
aesKeyMutex sync.Mutex
aesKey unsafe.Pointer
client *Client
middlewares []Handler
messageHandlerMap map[string]Handler
eventHandlerMap map[string]Handler
logger *zap.SugaredLogger
}
func (srv *Server) setURLPrefix(urlPrefix string) {
if !strings.HasPrefix(urlPrefix, "/") {
urlPrefix = "/" + urlPrefix
}
urlPrefix = strings.TrimRight(urlPrefix, "/")
srv.urlPrefix = urlPrefix
}
func (srv *Server) SetID(id string) {
srv.ID = id
}
func (srv *Server) SetAppID(appID string) {
srv.appID = appID
}
func (srv *Server) SetClient(client *Client) {
srv.client = client
}
type Token struct {
current string
last string
}
type AESKey struct {
current string
last string
}
func (srv *Server) GetToken() (string, string) {
p := (*Token)(atomic.LoadPointer(&srv.token))
if p != nil {
return p.current, p.last
}
return "", ""
}
func (srv *Server) SetToken(token string) {
if token == "" {
return
}
srv.tokenMutex.Lock()
defer srv.tokenMutex.Unlock()
current, _ := srv.GetToken()
if token == current {
return
}
t := Token{
current: token,
last: current,
}
atomic.StorePointer(&srv.token, unsafe.Pointer(&t))
}
func (srv *Server) deleteLastToken() {
srv.tokenMutex.Lock()
defer srv.tokenMutex.Unlock()
current, last := srv.GetToken()
if last == "" {
return
}
t := Token{
current: current,
}
atomic.StorePointer(&srv.token, unsafe.Pointer(&t))
}
func (srv *Server) GetAESKey() (string, string) {
p := (*AESKey)(atomic.LoadPointer(&srv.aesKey))
if p != nil {
return p.current, p.last
}
return "", ""
}
func (srv *Server) SetAESKey(base64AESKey string) {
if len(base64AESKey) != 43 {
return
}
aesKey, err := base64.StdEncoding.DecodeString(base64AESKey + "=")
if err != nil {
return
}
srv.aesKeyMutex.Lock()
defer srv.aesKeyMutex.Unlock()
current, _ := srv.GetAESKey()
if bytes.Equal(aesKey, []byte(current)) {
return
}
k := AESKey{
current: string(aesKey),
last: current,
}
atomic.StorePointer(&srv.aesKey, unsafe.Pointer(&k))
}
func (srv *Server) deleteLastAESKey() {
srv.aesKeyMutex.Lock()
defer srv.aesKeyMutex.Unlock()
current, last := srv.GetAESKey()
if last == "" {
return
}
k := AESKey{
current: current,
}
atomic.StorePointer(&srv.aesKey, unsafe.Pointer(&k))
}
func (srv *Server) Use(middlewares ...Handler) {
srv.middlewares = append(srv.middlewares, middlewares...)
if len(srv.middlewares)+1 > int(abortIndex) {
panic("too many middlewares")
}
}
func (srv *Server) HandleMessage(msgType string, handler Handler) {
srv.messageHandlerMap[msgType] = handler
}
func (srv *Server) HandleEvent(eventType string, handler Handler) {
srv.eventHandlerMap[eventType] = handler
}
func (srv *Server) GetVerifyFile(filename string, content []byte) {
srv.Get(srv.urlPrefix+"/"+filename, func(c *mel.Context) {
c.Data(200, "text/plain", content)
})
}
func NewServer(token, aesKey string, urlPrefix ...string) *Server {
srv := &Server{
Mel: mel.New(),
messageHandlerMap: make(map[string]Handler),
eventHandlerMap: make(map[string]Handler),
logger: wechat.Sugar,
}
srv.SetToken(token)
srv.SetAESKey(aesKey)
if len(urlPrefix) > 0 {
srv.setURLPrefix(urlPrefix[0])
}
equal := func(a, b string) bool {
return subtle.ConstantTimeCompare([]byte(a), []byte(b)) == 1
}
verifySignReturnToken := func(signature, timestamp, nonce string) string {
currentToken, lastToken := srv.GetToken()
token := currentToken
isValid := func() bool {
computedSignature := computeSign(token, timestamp, nonce)
return equal(signature, computedSignature)
}
if isValid() {
srv.deleteLastToken()
return token
}
if lastToken != "" {
token = lastToken
if isValid() {
return token
}
}
return ""
}
verifySign := func(c *mel.Context) bool {
signature := c.Query("signature")
timestamp := c.Query("timestamp")
nonce := c.Query("nonce")
return verifySignReturnToken(signature, timestamp, nonce) != ""
}
type EncryptMsg struct {
ToUserName string `xml:"ToUserName"`
Encrypt string `xml:"Encrypt"`
}
cors := melware.CorsAllowAll()
cors.AllowCredentials = false
srv.Mel.Use(cors.Middleware())
srv.Head("/", func(c *mel.Context) { // health check
c.Status(200)
})
srv.Get(srv.urlPrefix+"/", func(c *mel.Context) {
if verifySign(c) {
echostr := c.Query("echostr")
c.Text(200, echostr)
}
})
handleMessage := func(event *Event) interface{} {
var handler Handler
var ok bool
if event.Type == MessageEvent {
handler, ok = srv.eventHandlerMap[event.Event]
} else {
handler, ok = srv.messageHandlerMap[event.Type]
}
if !ok {
return nil // no registered handler, just respond with empty string
}
ctx := &Context{
Client: srv.client,
index: preStartIndex,
handlers: append(srv.middlewares, handler),
Event: event,
}
ctx.Next()
return ctx.response
}
srv.Post(srv.urlPrefix+"/", func(c *mel.Context) {
encryptType := c.Query("encrypt_type")
signature := c.Query("signature")
timestamp := c.Query("timestamp")
nonce := c.Query("nonce")
switch encryptType {
case "aes":
token := verifySignReturnToken(signature, timestamp, nonce)
if token == "" {
srv.logger.Error("Verify sign empty token")
return
}
msgSign := c.Query("msg_signature")
var obj EncryptMsg
err := c.BindWith(&obj, binding.XML)
if err != nil {
srv.logger.Errorw("Bind with XML failed", "error", err)
return
}
if srv.ID != "" && !equal(obj.ToUserName, srv.ID) {
srv.logger.Errorw("Wechat ID inconsistent", "id", srv.ID, "ToUserName", obj.ToUserName)
return
}
computedSign := computeSign(token, timestamp, nonce, obj.Encrypt)
if !equal(computedSign, msgSign) {
srv.logger.Errorw("Signature inconsistent")
return
}
encryptedMsg, err := base64.StdEncoding.DecodeString(obj.Encrypt)
if err != nil {
srv.logger.Errorw("Decode base64 string failed", "error", err)
return
}
current, last := srv.GetAESKey()
aesKey := current
random, msg, appId, err := decryptMsg(encryptedMsg, []byte(aesKey))
if err != nil {
if last == "" {
srv.logger.Errorw("Decrypt AES msg failed", "error", err)
return
}
aesKey = last
random, msg, appId, err = decryptMsg(encryptedMsg, []byte(aesKey))
if err != nil {
srv.logger.Errorw("Decrypt AES msg failed", "error", err)
return
}
} else {
srv.deleteLastAESKey()
}
if srv.appID != "" && string(appId) != srv.appID {
srv.logger.Errorw("AppID inconsistent", "AppID", appId)
return
}
var event Event
if err = xml.Unmarshal(msg, &event); err != nil {
srv.logger.Errorw("Unmarshal msg failed", "error", err)
return
}
repBytes, err := xml.Marshal(handleMessage(&event))
if err != nil {
srv.logger.Errorw("Marshal msg failed", "error", err)
return
}
encryptedRepBytes := encryptMsg(random, repBytes, appId, []byte(aesKey))
encryptedRepStr := base64.StdEncoding.EncodeToString(encryptedRepBytes)
repSignature := computeSign(token, timestamp, nonce, encryptedRepStr)
type EncryptRepMsg struct {
Encrypt string
MsgSignature string
TimeStamp string
Nonce string
}
err = c.XML(200, &EncryptRepMsg{encryptedRepStr, repSignature, timestamp, nonce})
if err != nil {
srv.logger.Errorw("Reply msg failed", "error", err)
}
case "", "raw":
if !verifySign(c) {
return
}
var event Event
err := c.BindWith(&event, binding.XML)
if err != nil {
return
}
c.XML(200, handleMessage(&event))
default:
return
}
})
handleAuthorize := func(c *mel.Context, url string, state string) {
rep, err := srv.client.Client.Get(url)
if err != nil {
c.AbortWithError(http.StatusUnauthorized, err)
return
}
defer rep.Body.Close()
if rep.StatusCode != http.StatusOK {
c.AbortWithError(http.StatusUnauthorized, fmt.Errorf("http.Status: %s", rep.Status))
return
}
type Result struct {
AccessToken string `json:"access_token"`
ExpiresIn string `json:"expires_in"`
RefreshToken string `json:"refresh_token"`
OpenID string `json:"openid"`
Scope string `json:"scope"`
State string `json:"state,omitempty"`
}
type ResultWithErr struct {
Result
Err
}
var result ResultWithErr
err = json.NewDecoder(rep.Body).Decode(&result)
if err != nil {
c.AbortWithError(http.StatusUnauthorized, err)
return
}
if result.Code() != OK {
c.AbortWithError(http.StatusUnauthorized, &result)
return
}
result.State = state
c.JSON(http.StatusOK, &result.Result)
srv.logger.Infof("/token", "result", result.Result)
}
srv.Get(srv.urlPrefix+"/token", func(c *mel.Context) {
code := c.Query("code")
state := c.Query("state")
url := fmt.Sprintf("https://api.weixin.qq.com/sns/oauth2/access_token?appid=%s&secret=%s&code=%s&grant_type=authorization_code", srv.client.appId, srv.client.appSecret, code)
handleAuthorize(c, url, state)
})
srv.Get(srv.urlPrefix+"/refresh-token", func(c *mel.Context) {
refreshToken := c.Query("refresh_token")
url := fmt.Sprintf("https://api.weixin.qq.com/sns/oauth2/refresh_token?appid=%s&grant_type=refresh_token&refresh_token=%s", srv.client.appId, refreshToken)
handleAuthorize(c, url, "")
})
srv.Get(srv.urlPrefix+"/signature", func(c *mel.Context) {
timestamp := c.Query("timestamp")
noncestr := c.Query("noncestr")
url := c.Query("url")
refresh := c.Query("refresh")
var ticket string
var err error
if refresh != "" && (refresh == "true" || refresh == "True" || refresh == "1") {
ticket, err = srv.client.RefreshTicket("")
} else {
ticket, err = srv.client.Ticket()
}
if err != nil {
c.AbortWithError(http.StatusInternalServerError, err)
return
}
strs := sort.StringSlice{
"timestamp=" + timestamp,
"noncestr=" + noncestr,
"url=" + url,
"jsapi_ticket=" + ticket,
}
strs.Sort()
h := sha1.New()
buf := bufio.NewWriterSize(h, 1024)
for i, s := range strs {
buf.WriteString(s)
if i < len(strs)-1 {
buf.WriteByte('&')
}
}
buf.Flush()
sign := hex.EncodeToString(h.Sum(nil))
c.JSON(http.StatusOK, map[string]string{
"signature": sign,
})
srv.logger.Infow("signature", "strs", strs, "sign", sign)
})
return srv
}
func computeSign(elements ...string) string {
strs := sort.StringSlice(elements)
strs.Sort()
h := sha1.New()
buf := bufio.NewWriterSize(h, 1024)
for _, s := range strs {
buf.WriteString(s)
}
buf.Flush()
return hex.EncodeToString(h.Sum(nil))
}
Update files
package mp
import (
"bufio"
"bytes"
"crypto/sha1"
"crypto/subtle"
"encoding/base64"
"encoding/hex"
"encoding/xml"
"github.com/ridewindx/mel"
"github.com/ridewindx/mel/binding"
"sort"
"sync"
"sync/atomic"
"unsafe"
"go.uber.org/zap"
"github.com/jiudaoyun/wechat"
"net/http"
"fmt"
"encoding/json"
"strings"
"github.com/ridewindx/melware"
)
type Server struct {
*mel.Mel
urlPrefix string
appID string // App ID
ID string // Wechat ID
tokenMutex sync.Mutex
token unsafe.Pointer
aesKeyMutex sync.Mutex
aesKey unsafe.Pointer
client *Client
middlewares []Handler
messageHandlerMap map[string]Handler
eventHandlerMap map[string]Handler
logger *zap.SugaredLogger
}
func (srv *Server) setURLPrefix(urlPrefix string) {
if !strings.HasPrefix(urlPrefix, "/") {
urlPrefix = "/" + urlPrefix
}
urlPrefix = strings.TrimRight(urlPrefix, "/")
srv.urlPrefix = urlPrefix
}
func (srv *Server) SetID(id string) {
srv.ID = id
}
func (srv *Server) SetAppID(appID string) {
srv.appID = appID
}
func (srv *Server) SetClient(client *Client) {
srv.client = client
}
type Token struct {
current string
last string
}
type AESKey struct {
current string
last string
}
func (srv *Server) GetToken() (string, string) {
p := (*Token)(atomic.LoadPointer(&srv.token))
if p != nil {
return p.current, p.last
}
return "", ""
}
func (srv *Server) SetToken(token string) {
if token == "" {
return
}
srv.tokenMutex.Lock()
defer srv.tokenMutex.Unlock()
current, _ := srv.GetToken()
if token == current {
return
}
t := Token{
current: token,
last: current,
}
atomic.StorePointer(&srv.token, unsafe.Pointer(&t))
}
func (srv *Server) deleteLastToken() {
srv.tokenMutex.Lock()
defer srv.tokenMutex.Unlock()
current, last := srv.GetToken()
if last == "" {
return
}
t := Token{
current: current,
}
atomic.StorePointer(&srv.token, unsafe.Pointer(&t))
}
func (srv *Server) GetAESKey() (string, string) {
p := (*AESKey)(atomic.LoadPointer(&srv.aesKey))
if p != nil {
return p.current, p.last
}
return "", ""
}
func (srv *Server) SetAESKey(base64AESKey string) {
if len(base64AESKey) != 43 {
return
}
aesKey, err := base64.StdEncoding.DecodeString(base64AESKey + "=")
if err != nil {
return
}
srv.aesKeyMutex.Lock()
defer srv.aesKeyMutex.Unlock()
current, _ := srv.GetAESKey()
if bytes.Equal(aesKey, []byte(current)) {
return
}
k := AESKey{
current: string(aesKey),
last: current,
}
atomic.StorePointer(&srv.aesKey, unsafe.Pointer(&k))
}
func (srv *Server) deleteLastAESKey() {
srv.aesKeyMutex.Lock()
defer srv.aesKeyMutex.Unlock()
current, last := srv.GetAESKey()
if last == "" {
return
}
k := AESKey{
current: current,
}
atomic.StorePointer(&srv.aesKey, unsafe.Pointer(&k))
}
func (srv *Server) Use(middlewares ...Handler) {
srv.middlewares = append(srv.middlewares, middlewares...)
if len(srv.middlewares)+1 > int(abortIndex) {
panic("too many middlewares")
}
}
func (srv *Server) HandleMessage(msgType string, handler Handler) {
srv.messageHandlerMap[msgType] = handler
}
func (srv *Server) HandleEvent(eventType string, handler Handler) {
srv.eventHandlerMap[eventType] = handler
}
func (srv *Server) GetVerifyFile(filename string, content []byte) {
srv.Get(srv.urlPrefix+"/"+filename, func(c *mel.Context) {
c.Data(200, "text/plain", content)
})
}
func NewServer(token, aesKey string, urlPrefix ...string) *Server {
srv := &Server{
Mel: mel.New(),
messageHandlerMap: make(map[string]Handler),
eventHandlerMap: make(map[string]Handler),
logger: wechat.Sugar,
}
srv.SetToken(token)
srv.SetAESKey(aesKey)
srv.Mel.Use(melware.Zap(srv.logger))
cors := melware.CorsAllowAll()
cors.AllowCredentials = false
srv.Mel.Use(cors.Middleware())
if len(urlPrefix) > 0 {
srv.setURLPrefix(urlPrefix[0])
}
equal := func(a, b string) bool {
return subtle.ConstantTimeCompare([]byte(a), []byte(b)) == 1
}
verifySignReturnToken := func(signature, timestamp, nonce string) string {
currentToken, lastToken := srv.GetToken()
token := currentToken
isValid := func() bool {
computedSignature := computeSign(token, timestamp, nonce)
return equal(signature, computedSignature)
}
if isValid() {
srv.deleteLastToken()
return token
}
if lastToken != "" {
token = lastToken
if isValid() {
return token
}
}
return ""
}
verifySign := func(c *mel.Context) bool {
signature := c.Query("signature")
timestamp := c.Query("timestamp")
nonce := c.Query("nonce")
return verifySignReturnToken(signature, timestamp, nonce) != ""
}
type EncryptMsg struct {
ToUserName string `xml:"ToUserName"`
Encrypt string `xml:"Encrypt"`
}
srv.Head("/", func(c *mel.Context) { // health check
c.Status(200)
})
srv.Get(srv.urlPrefix+"/", func(c *mel.Context) {
if verifySign(c) {
echostr := c.Query("echostr")
c.Text(200, echostr)
}
})
handleMessage := func(event *Event) interface{} {
var handler Handler
var ok bool
if event.Type == MessageEvent {
handler, ok = srv.eventHandlerMap[event.Event]
} else {
handler, ok = srv.messageHandlerMap[event.Type]
}
if !ok {
return nil // no registered handler, just respond with empty string
}
ctx := &Context{
Client: srv.client,
index: preStartIndex,
handlers: append(srv.middlewares, handler),
Event: event,
}
ctx.Next()
return ctx.response
}
srv.Post(srv.urlPrefix+"/", func(c *mel.Context) {
encryptType := c.Query("encrypt_type")
signature := c.Query("signature")
timestamp := c.Query("timestamp")
nonce := c.Query("nonce")
switch encryptType {
case "aes":
token := verifySignReturnToken(signature, timestamp, nonce)
if token == "" {
srv.logger.Error("Verify sign empty token")
return
}
msgSign := c.Query("msg_signature")
var obj EncryptMsg
err := c.BindWith(&obj, binding.XML)
if err != nil {
srv.logger.Errorw("Bind with XML failed", "error", err)
return
}
if srv.ID != "" && !equal(obj.ToUserName, srv.ID) {
srv.logger.Errorw("Wechat ID inconsistent", "id", srv.ID, "ToUserName", obj.ToUserName)
return
}
computedSign := computeSign(token, timestamp, nonce, obj.Encrypt)
if !equal(computedSign, msgSign) {
srv.logger.Errorw("Signature inconsistent")
return
}
encryptedMsg, err := base64.StdEncoding.DecodeString(obj.Encrypt)
if err != nil {
srv.logger.Errorw("Decode base64 string failed", "error", err)
return
}
current, last := srv.GetAESKey()
aesKey := current
random, msg, appId, err := decryptMsg(encryptedMsg, []byte(aesKey))
if err != nil {
if last == "" {
srv.logger.Errorw("Decrypt AES msg failed", "error", err)
return
}
aesKey = last
random, msg, appId, err = decryptMsg(encryptedMsg, []byte(aesKey))
if err != nil {
srv.logger.Errorw("Decrypt AES msg failed", "error", err)
return
}
} else {
srv.deleteLastAESKey()
}
if srv.appID != "" && string(appId) != srv.appID {
srv.logger.Errorw("AppID inconsistent", "AppID", appId)
return
}
var event Event
if err = xml.Unmarshal(msg, &event); err != nil {
srv.logger.Errorw("Unmarshal msg failed", "error", err)
return
}
repBytes, err := xml.Marshal(handleMessage(&event))
if err != nil {
srv.logger.Errorw("Marshal msg failed", "error", err)
return
}
encryptedRepBytes := encryptMsg(random, repBytes, appId, []byte(aesKey))
encryptedRepStr := base64.StdEncoding.EncodeToString(encryptedRepBytes)
repSignature := computeSign(token, timestamp, nonce, encryptedRepStr)
type EncryptRepMsg struct {
Encrypt string
MsgSignature string
TimeStamp string
Nonce string
}
err = c.XML(200, &EncryptRepMsg{encryptedRepStr, repSignature, timestamp, nonce})
if err != nil {
srv.logger.Errorw("Reply msg failed", "error", err)
}
case "", "raw":
if !verifySign(c) {
return
}
var event Event
err := c.BindWith(&event, binding.XML)
if err != nil {
return
}
c.XML(200, handleMessage(&event))
default:
return
}
})
handleAuthorize := func(c *mel.Context, url string, state string) {
rep, err := srv.client.Client.Get(url)
if err != nil {
c.AbortWithError(http.StatusUnauthorized, err)
return
}
defer rep.Body.Close()
if rep.StatusCode != http.StatusOK {
c.AbortWithError(http.StatusUnauthorized, fmt.Errorf("http.Status: %s", rep.Status))
return
}
type Result struct {
AccessToken string `json:"access_token"`
ExpiresIn string `json:"expires_in"`
RefreshToken string `json:"refresh_token"`
OpenID string `json:"openid"`
Scope string `json:"scope"`
State string `json:"state,omitempty"`
}
type ResultWithErr struct {
Result
Err
}
var result ResultWithErr
err = json.NewDecoder(rep.Body).Decode(&result)
if err != nil {
c.AbortWithError(http.StatusUnauthorized, err)
return
}
if result.Code() != OK {
c.AbortWithError(http.StatusUnauthorized, &result)
return
}
result.State = state
c.JSON(http.StatusOK, &result.Result)
srv.logger.Infof("/token", "result", result.Result)
}
srv.Get(srv.urlPrefix+"/token", func(c *mel.Context) {
code := c.Query("code")
state := c.Query("state")
url := fmt.Sprintf("https://api.weixin.qq.com/sns/oauth2/access_token?appid=%s&secret=%s&code=%s&grant_type=authorization_code", srv.client.appId, srv.client.appSecret, code)
handleAuthorize(c, url, state)
})
srv.Get(srv.urlPrefix+"/refresh-token", func(c *mel.Context) {
refreshToken := c.Query("refresh_token")
url := fmt.Sprintf("https://api.weixin.qq.com/sns/oauth2/refresh_token?appid=%s&grant_type=refresh_token&refresh_token=%s", srv.client.appId, refreshToken)
handleAuthorize(c, url, "")
})
srv.Get(srv.urlPrefix+"/signature", func(c *mel.Context) {
timestamp := c.Query("timestamp")
noncestr := c.Query("noncestr")
url := c.Query("url")
refresh := c.Query("refresh")
var ticket string
var err error
if refresh != "" && (refresh == "true" || refresh == "True" || refresh == "1") {
ticket, err = srv.client.RefreshTicket("")
} else {
ticket, err = srv.client.Ticket()
}
if err != nil {
c.AbortWithError(http.StatusInternalServerError, err)
return
}
strs := sort.StringSlice{
"timestamp=" + timestamp,
"noncestr=" + noncestr,
"url=" + url,
"jsapi_ticket=" + ticket,
}
strs.Sort()
h := sha1.New()
buf := bufio.NewWriterSize(h, 1024)
for i, s := range strs {
buf.WriteString(s)
if i < len(strs)-1 {
buf.WriteByte('&')
}
}
buf.Flush()
sign := hex.EncodeToString(h.Sum(nil))
c.JSON(http.StatusOK, map[string]string{
"signature": sign,
})
srv.logger.Infow("signature", "strs", strs, "sign", sign)
})
return srv
}
func computeSign(elements ...string) string {
strs := sort.StringSlice(elements)
strs.Sort()
h := sha1.New()
buf := bufio.NewWriterSize(h, 1024)
for _, s := range strs {
buf.WriteString(s)
}
buf.Flush()
return hex.EncodeToString(h.Sum(nil))
}
|
package movingmedian
import (
"container/heap"
"math"
)
type elt struct {
f float64
idx int
}
type float64Heap []*elt
func (h float64Heap) Len() int { return len(h) }
func (h float64Heap) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
h[i].idx = i
h[j].idx = j
}
func (h *float64Heap) Push(x interface{}) {
e := x.(*elt)
e.idx = len(*h)
*h = append(*h, e)
}
func (h *float64Heap) Pop() interface{} {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
type minFloat64Heap struct {
float64Heap
}
func (h minFloat64Heap) Less(i, j int) bool { return h.float64Heap[i].f < h.float64Heap[j].f }
type maxFloat64Heap struct {
float64Heap
}
func (h maxFloat64Heap) Less(i, j int) bool { return h.float64Heap[i].f > h.float64Heap[j].f }
type MovingMedian struct {
idx int
nelts int
queue []*elt
maxHeap maxFloat64Heap
minHeap minFloat64Heap
}
func NewMovingMedian(size int) MovingMedian {
m := MovingMedian{
queue: make([]*elt, size),
maxHeap: maxFloat64Heap{},
minHeap: minFloat64Heap{},
}
heap.Init(&m.maxHeap)
heap.Init(&m.minHeap)
return m
}
func (m *MovingMedian) Push(v float64) {
if m.nelts >= len(m.queue) {
old := m.queue[m.idx]
if len(m.queue) == 1 || old.f > m.minHeap.float64Heap[0].f {
heap.Remove(&m.minHeap, old.idx)
} else {
heap.Remove(&m.maxHeap, old.idx)
}
}
m.queue[m.idx] = &elt{f: v}
e := m.queue[m.idx]
m.nelts++
m.idx++
if m.idx >= len(m.queue) {
m.idx = 0
}
if m.minHeap.Len() == 0 ||
v > m.minHeap.float64Heap[0].f {
heap.Push(&m.minHeap, e)
} else {
heap.Push(&m.maxHeap, e)
}
if m.maxHeap.Len() > (m.minHeap.Len() + 1) {
moveItem := heap.Pop(&m.maxHeap)
heap.Push(&m.minHeap, moveItem)
} else if m.minHeap.Len() > (m.maxHeap.Len() + 1) {
moveItem := heap.Pop(&m.minHeap)
heap.Push(&m.maxHeap, moveItem)
}
}
func (m *MovingMedian) Median() float64 {
if len(m.queue) == 0 {
return math.NaN()
}
wsize := m.nelts
if m.nelts > len(m.queue) {
wsize = len(m.queue)
}
if (wsize % 2) == 0 {
return (m.maxHeap.float64Heap[0].f + m.minHeap.float64Heap[0].f) / 2
}
if m.maxHeap.Len() > m.minHeap.Len() {
return m.maxHeap.float64Heap[0].f
}
return m.minHeap.float64Heap[0].f
}
Revert "Fixed corner case for repeated value 4 times but with performance hit"
This fix is wrong. It leaves 'dead' values still live in the heaps.
This reverts commit 3114c56fb2cadc72110c37c59d5efb8241773df1.
package movingmedian
import (
"container/heap"
"math"
)
type elt struct {
f float64
idx int
}
type float64Heap []*elt
func (h float64Heap) Len() int { return len(h) }
func (h float64Heap) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
h[i].idx = i
h[j].idx = j
}
func (h *float64Heap) Push(x interface{}) {
e := x.(*elt)
e.idx = len(*h)
*h = append(*h, e)
}
func (h *float64Heap) Pop() interface{} {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
type minFloat64Heap struct {
float64Heap
}
func (h minFloat64Heap) Less(i, j int) bool { return h.float64Heap[i].f < h.float64Heap[j].f }
type maxFloat64Heap struct {
float64Heap
}
func (h maxFloat64Heap) Less(i, j int) bool { return h.float64Heap[i].f > h.float64Heap[j].f }
type MovingMedian struct {
idx int
nelts int
queue []elt
maxHeap maxFloat64Heap
minHeap minFloat64Heap
}
func NewMovingMedian(size int) MovingMedian {
m := MovingMedian{
queue: make([]elt, size),
maxHeap: maxFloat64Heap{},
minHeap: minFloat64Heap{},
}
heap.Init(&m.maxHeap)
heap.Init(&m.minHeap)
return m
}
func (m *MovingMedian) Push(v float64) {
if m.nelts >= len(m.queue) {
old := &m.queue[m.idx]
if len(m.queue) == 1 || old.f > m.minHeap.float64Heap[0].f {
heap.Remove(&m.minHeap, old.idx)
} else {
heap.Remove(&m.maxHeap, old.idx)
}
}
m.queue[m.idx] = elt{f: v}
e := &m.queue[m.idx]
m.nelts++
m.idx++
if m.idx >= len(m.queue) {
m.idx = 0
}
if m.minHeap.Len() == 0 ||
v > m.minHeap.float64Heap[0].f {
heap.Push(&m.minHeap, e)
} else {
heap.Push(&m.maxHeap, e)
}
if m.maxHeap.Len() > (m.minHeap.Len() + 1) {
moveItem := heap.Pop(&m.maxHeap)
heap.Push(&m.minHeap, moveItem)
} else if m.minHeap.Len() > (m.maxHeap.Len() + 1) {
moveItem := heap.Pop(&m.minHeap)
heap.Push(&m.maxHeap, moveItem)
}
}
func (m *MovingMedian) Median() float64 {
if len(m.queue) == 0 {
return math.NaN()
}
wsize := m.nelts
if m.nelts > len(m.queue) {
wsize = len(m.queue)
}
if (wsize % 2) == 0 {
return (m.maxHeap.float64Heap[0].f + m.minHeap.float64Heap[0].f) / 2
}
if m.maxHeap.Len() > m.minHeap.Len() {
return m.maxHeap.float64Heap[0].f
}
return m.minHeap.float64Heap[0].f
}
|
package apiserver_test
import (
"errors"
"fmt"
"io"
. "launchpad.net/gocheck"
"launchpad.net/juju-core/charm"
"launchpad.net/juju-core/juju/testing"
"launchpad.net/juju-core/rpc"
"launchpad.net/juju-core/state"
"launchpad.net/juju-core/state/api"
"launchpad.net/juju-core/state/api/params"
"launchpad.net/juju-core/state/apiserver"
coretesting "launchpad.net/juju-core/testing"
"net"
stdtesting "testing"
"time"
)
func TestAll(t *stdtesting.T) {
coretesting.MgoTestPackage(t)
}
type suite struct {
testing.JujuConnSuite
listener net.Listener
}
var _ = Suite(&suite{})
func init() {
apiserver.AuthenticationEnabled = true
}
func removeServiceAndUnits(c *C, service *state.Service) {
// Destroy all units for the service.
units, err := service.AllUnits()
c.Assert(err, IsNil)
for _, unit := range units {
err = unit.EnsureDead()
c.Assert(err, IsNil)
err = unit.Remove()
c.Assert(err, IsNil)
}
err = service.Refresh()
c.Assert(err, IsNil)
err = service.Destroy()
c.Assert(err, IsNil)
err = service.Refresh()
c.Assert(state.IsNotFound(err), Equals, true)
}
var operationPermTests = []struct {
about string
// op performs the operation to be tested using the given state
// connection. It returns a function that should be used to
// undo any changes made by the operation.
op func(c *C, st *api.State, mst *state.State) (reset func(), err error)
allow []string
deny []string
}{{
about: "Unit.Get",
op: opGetUnitWordpress0,
deny: []string{"user-admin", "user-other"},
}, {
about: "Machine.Get",
op: opGetMachine1,
deny: []string{"user-admin", "user-other"},
}, {
about: "Machine.SetPassword",
op: opMachine1SetPassword,
allow: []string{"machine-0", "machine-1"},
}, {
about: "Unit.SetPassword (on principal unit)",
op: opUnitSetPassword("wordpress/0"),
allow: []string{"unit-wordpress-0", "machine-1"},
}, {
about: "Unit.SetPassword (on subordinate unit)",
op: opUnitSetPassword("logging/0"),
allow: []string{"unit-logging-0", "unit-wordpress-0"},
}, {
about: "Client.Status",
op: opClientStatus,
allow: []string{"user-admin", "user-other"},
}, {
about: "Client.ServiceSet",
op: opClientServiceSet,
allow: []string{"user-admin", "user-other"},
}, {
about: "Client.ServiceSetYAML",
op: opClientServiceSetYAML,
allow: []string{"user-admin", "user-other"},
}, {
about: "Client.ServiceGet",
op: opClientServiceGet,
allow: []string{"user-admin", "user-other"},
}, {
about: "Client.ServiceExpose",
op: opClientServiceExpose,
allow: []string{"user-admin", "user-other"},
}, {
about: "Client.ServiceUnexpose",
op: opClientServiceUnexpose,
allow: []string{"user-admin", "user-other"},
}, {
about: "Client.ServiceDeploy",
op: opClientServiceDeploy,
allow: []string{"user-admin", "user-other"},
}, {
about: "Client.WatchAll",
op: opClientWatchAll,
allow: []string{"user-admin", "user-other"},
}, {
about: "Client.CharmInfo",
op: opClientCharmInfo,
allow: []string{"user-admin", "user-other"},
},
}
// allowed returns the set of allowed entities given an allow list and a
// deny list. If an allow list is specified, only those entities are
// allowed; otherwise those in deny are disallowed.
func allowed(all, allow, deny []string) map[string]bool {
p := make(map[string]bool)
if allow != nil {
for _, e := range allow {
p[e] = true
}
return p
}
loop:
for _, e0 := range all {
for _, e1 := range deny {
if e1 == e0 {
continue loop
}
}
p[e0] = true
}
return p
}
func (s *suite) TestOperationPerm(c *C) {
entities := s.setUpScenario(c)
for i, t := range operationPermTests {
allow := allowed(entities, t.allow, t.deny)
for _, e := range entities {
c.Logf("test %d; %s; entity %q", i, t.about, e)
st := s.openAs(c, e)
reset, err := t.op(c, st, s.State)
if allow[e] {
c.Check(err, IsNil)
} else {
c.Check(err, ErrorMatches, "permission denied")
c.Check(api.ErrCode(err), Equals, api.CodeUnauthorized)
}
reset()
st.Close()
}
}
}
func opGetUnitWordpress0(c *C, st *api.State, mst *state.State) (func(), error) {
u, err := st.Unit("wordpress/0")
if err != nil {
c.Check(u, IsNil)
} else {
name, ok := u.DeployerName()
c.Check(ok, Equals, true)
c.Check(name, Equals, "machine-1")
}
return func() {}, err
}
func opUnitSetPassword(unitName string) func(c *C, st *api.State, mst *state.State) (func(), error) {
return func(c *C, st *api.State, mst *state.State) (func(), error) {
u, err := st.Unit(unitName)
if err != nil {
c.Check(u, IsNil)
return func() {}, err
}
err = u.SetPassword("another password")
if err != nil {
return func() {}, err
}
return func() {
setDefaultPassword(c, u)
}, nil
}
}
func opGetMachine1(c *C, st *api.State, mst *state.State) (func(), error) {
m, err := st.Machine("1")
if err != nil {
c.Check(m, IsNil)
} else {
name, ok := m.InstanceId()
c.Assert(ok, Equals, true)
c.Assert(name, Equals, "i-machine-1")
}
return func() {}, err
}
func opMachine1SetPassword(c *C, st *api.State, mst *state.State) (func(), error) {
m, err := st.Machine("1")
if err != nil {
c.Check(m, IsNil)
return func() {}, err
}
err = m.SetPassword("another password")
if err != nil {
return func() {}, err
}
return func() {
setDefaultPassword(c, m)
}, nil
}
func opClientCharmInfo(c *C, st *api.State, mst *state.State) (func(), error) {
info, err := st.Client().CharmInfo("local:series/wordpress-3")
if err != nil {
c.Check(info, IsNil)
return func() {}, err
}
c.Assert(err, IsNil)
c.Assert(info.URL, Equals, "local:series/wordpress-3")
c.Assert(info.Meta.Name, Equals, "wordpress")
c.Assert(info.Revision, Equals, 3)
return func() {}, nil
}
func opClientStatus(c *C, st *api.State, mst *state.State) (func(), error) {
status, err := st.Client().Status()
if err != nil {
c.Check(status, IsNil)
return func() {}, err
}
c.Assert(err, IsNil)
c.Assert(status, DeepEquals, scenarioStatus)
return func() {}, nil
}
func resetBlogTitle(c *C, st *api.State) func() {
return func() {
err := st.Client().ServiceSet("wordpress", map[string]string{
"blog-title": "",
})
c.Assert(err, IsNil)
}
}
func opClientServiceSet(c *C, st *api.State, mst *state.State) (func(), error) {
err := st.Client().ServiceSet("wordpress", map[string]string{
"blog-title": "foo",
})
if err != nil {
return func() {}, err
}
return resetBlogTitle(c, st), nil
}
func opClientServiceSetYAML(c *C, st *api.State, mst *state.State) (func(), error) {
err := st.Client().ServiceSetYAML("wordpress", `"blog-title": "foo"`)
if err != nil {
return func() {}, err
}
return resetBlogTitle(c, st), nil
}
func opClientServiceGet(c *C, st *api.State, mst *state.State) (func(), error) {
// This test only shows that the call is made without error, ensuring the
// signatures match.
_, err := st.Client().ServiceGet("wordpress")
if err != nil {
return func() {}, err
}
c.Assert(err, IsNil)
return func() {}, nil
}
func opClientServiceExpose(c *C, st *api.State, mst *state.State) (func(), error) {
// This test only shows that the call is made without error, ensuring the
// signatures match.
err := st.Client().ServiceExpose("wordpress")
if err != nil {
return func() {}, err
}
c.Assert(err, IsNil)
return func() {}, nil
}
func opClientServiceUnexpose(c *C, st *api.State, mst *state.State) (func(), error) {
// This test only checks that the call is made without error, ensuring the
// signatures match.
err := st.Client().ServiceUnexpose("wordpress")
if err != nil {
return func() {}, err
}
c.Assert(err, IsNil)
return func() {}, nil
}
func opClientServiceDeploy(c *C, st *api.State, mst *state.State) (func(), error) {
// This test only checks that the call is made without error, ensuring the
// signatures match.
// We are cheating and using a local repo only.
// Set the CharmStore to the test repository.
serviceName := "mywordpress"
charmUrl := "local:series/wordpress"
parsedUrl := charm.MustParseURL(charmUrl)
repo, err := charm.InferRepository(parsedUrl, coretesting.Charms.Path)
originalServerCharmStore := apiserver.CharmStore
apiserver.CharmStore = repo
err = st.Client().ServiceDeploy(charmUrl, serviceName, 1, "")
if err != nil {
return func() {}, err
}
return func() {
apiserver.CharmStore = originalServerCharmStore
service, err := mst.Service(serviceName)
c.Assert(err, IsNil)
removeServiceAndUnits(c, service)
}, nil
}
func opClientWatchAll(c *C, st *api.State) (func(), error) {
watcher, err := st.Client().WatchAll()
if err == nil {
watcher.Stop()
}
return func() {}, err
}
// scenarioStatus describes the expected state
// of the juju environment set up by setUpScenario.
var scenarioStatus = &api.Status{
Machines: map[string]api.MachineInfo{
"0": {
InstanceId: "i-machine-0",
},
"1": {
InstanceId: "i-machine-1",
},
"2": {
InstanceId: "i-machine-2",
},
},
}
// setUpScenario makes an environment scenario suitable for
// testing most kinds of access scenario. It returns
// a list of all the entities in the scenario.
//
// When the scenario is initialized, we have:
// user-admin
// user-other
// machine-0
// instance-id="i-machine-0"
// jobs=manage-environ
// machine-1
// instance-id="i-machine-1"
// jobs=host-units
// machine-2
// instance-id="i-machine-2"
// jobs=host-units
// service-wordpress
// service-logging
// unit-wordpress-0
// deployer-name=machine-1
// unit-logging-0
// deployer-name=unit-wordpress-0
// unit-wordpress-1
// deployer-name=machine-2
// unit-logging-1
// deployer-name=unit-wordpress-1
//
// The passwords for all returned entities are
// set to the entity name with a " password" suffix.
//
// Note that there is nothing special about machine-0
// here - it's the environment manager in this scenario
// just because machine 0 has traditionally been the
// environment manager (bootstrap machine), so is
// hopefully easier to remember as such.
func (s *suite) setUpScenario(c *C) (entities []string) {
add := func(e state.AuthEntity) {
entities = append(entities, e.EntityName())
}
u, err := s.State.User("admin")
c.Assert(err, IsNil)
setDefaultPassword(c, u)
add(u)
u, err = s.State.AddUser("other", "")
c.Assert(err, IsNil)
setDefaultPassword(c, u)
add(u)
m, err := s.State.AddMachine("series", state.JobManageEnviron)
c.Assert(err, IsNil)
c.Assert(m.EntityName(), Equals, "machine-0")
err = m.SetInstanceId(state.InstanceId("i-" + m.EntityName()))
c.Assert(err, IsNil)
setDefaultPassword(c, m)
add(m)
wordpress, err := s.State.AddService("wordpress", s.AddTestingCharm(c, "wordpress"))
c.Assert(err, IsNil)
_, err = s.State.AddService("logging", s.AddTestingCharm(c, "logging"))
c.Assert(err, IsNil)
eps, err := s.State.InferEndpoints([]string{"logging", "wordpress"})
c.Assert(err, IsNil)
rel, err := s.State.AddRelation(eps...)
c.Assert(err, IsNil)
for i := 0; i < 2; i++ {
wu, err := wordpress.AddUnit()
c.Assert(err, IsNil)
c.Assert(wu.EntityName(), Equals, fmt.Sprintf("unit-wordpress-%d", i))
setDefaultPassword(c, wu)
add(wu)
m, err := s.State.AddMachine("series", state.JobHostUnits)
c.Assert(err, IsNil)
c.Assert(m.EntityName(), Equals, fmt.Sprintf("machine-%d", i+1))
err = m.SetInstanceId(state.InstanceId("i-" + m.EntityName()))
c.Assert(err, IsNil)
setDefaultPassword(c, m)
add(m)
err = wu.AssignToMachine(m)
c.Assert(err, IsNil)
deployer, ok := wu.DeployerName()
c.Assert(ok, Equals, true)
c.Assert(deployer, Equals, fmt.Sprintf("machine-%d", i+1))
wru, err := rel.Unit(wu)
c.Assert(err, IsNil)
// Create the subordinate unit as a side-effect of entering
// scope in the principal's relation-unit.
err = wru.EnterScope(nil)
c.Assert(err, IsNil)
lu, err := s.State.Unit(fmt.Sprintf("logging/%d", i))
c.Assert(err, IsNil)
c.Assert(lu.IsPrincipal(), Equals, false)
deployer, ok = lu.DeployerName()
c.Assert(ok, Equals, true)
c.Assert(deployer, Equals, fmt.Sprintf("unit-wordpress-%d", i))
setDefaultPassword(c, lu)
add(lu)
}
return
}
// AuthEntity is the same as state.AuthEntity but
// without PasswordValid, which is implemented
// by state entities but not by api entities.
type AuthEntity interface {
EntityName() string
SetPassword(pass string) error
Refresh() error
}
func setDefaultPassword(c *C, e AuthEntity) {
err := e.SetPassword(e.EntityName() + " password")
c.Assert(err, IsNil)
}
var badLoginTests = []struct {
entityName string
password string
err string
code string
}{{
entityName: "user-admin",
password: "wrong password",
err: "invalid entity name or password",
code: api.CodeUnauthorized,
}, {
entityName: "user-foo",
password: "password",
err: "invalid entity name or password",
code: api.CodeUnauthorized,
}, {
entityName: "bar",
password: "password",
err: `invalid entity name "bar"`,
}}
func (s *suite) TestBadLogin(c *C) {
_, info, err := s.APIConn.Environ.StateInfo()
c.Assert(err, IsNil)
for i, t := range badLoginTests {
c.Logf("test %d; entity %q; password %q", i, t.entityName, t.password)
info.EntityName = ""
info.Password = ""
func() {
st, err := api.Open(info)
c.Assert(err, IsNil)
defer st.Close()
_, err = st.Machine("0")
c.Assert(err, ErrorMatches, "not logged in")
c.Assert(api.ErrCode(err), Equals, api.CodeUnauthorized, Commentf("error %#v", err))
_, err = st.Unit("foo/0")
c.Assert(err, ErrorMatches, "not logged in")
c.Assert(api.ErrCode(err), Equals, api.CodeUnauthorized)
err = st.Login(t.entityName, t.password)
c.Assert(err, ErrorMatches, t.err)
c.Assert(api.ErrCode(err), Equals, t.code)
_, err = st.Machine("0")
c.Assert(err, ErrorMatches, "not logged in")
c.Assert(api.ErrCode(err), Equals, api.CodeUnauthorized)
}()
}
}
func (s *suite) TestClientStatus(c *C) {
s.setUpScenario(c)
status, err := s.APIState.Client().Status()
c.Assert(err, IsNil)
c.Assert(status, DeepEquals, scenarioStatus)
}
func (s *suite) TestClientServerSet(c *C) {
dummy, err := s.State.AddService("dummy", s.AddTestingCharm(c, "dummy"))
c.Assert(err, IsNil)
err = s.APIState.Client().ServiceSet("dummy", map[string]string{
"title": "xxx",
"username": "yyy",
})
c.Assert(err, IsNil)
conf, err := dummy.Config()
c.Assert(err, IsNil)
c.Assert(conf.Map(), DeepEquals, map[string]interface{}{
"title": "xxx",
"username": "yyy",
})
}
func (s *suite) TestClientServiceSetYAML(c *C) {
dummy, err := s.State.AddService("dummy", s.AddTestingCharm(c, "dummy"))
c.Assert(err, IsNil)
err = s.APIState.Client().ServiceSetYAML("dummy", "title: aaa\nusername: bbb")
c.Assert(err, IsNil)
conf, err := dummy.Config()
c.Assert(err, IsNil)
c.Assert(conf.Map(), DeepEquals, map[string]interface{}{
"title": "aaa",
"username": "bbb",
})
}
var clientCharmInfoTests = []struct {
about string
url string
err string
}{
{
about: "retrieves charm info",
url: "local:series/wordpress-3",
},
{
about: "invalid URL",
url: "not-valid",
err: `charm URL has invalid schema: "not-valid"`,
},
{
about: "unknown charm",
url: "cs:missing/one-1",
err: `charm "cs:missing/one-1" not found`,
},
}
func (s *suite) TestClientCharmInfo(c *C) {
// Use wordpress for tests so that we can compare Provides and Requires.
charm := s.AddTestingCharm(c, "wordpress")
for i, t := range clientCharmInfoTests {
c.Logf("test %d. %s", i, t.about)
info, err := s.APIState.Client().CharmInfo(t.url)
if t.err != "" {
c.Assert(err, ErrorMatches, t.err)
continue
}
c.Assert(err, IsNil)
expected := &api.CharmInfo{
Revision: charm.Revision(),
URL: charm.URL().String(),
Config: charm.Config(),
Meta: charm.Meta(),
}
c.Assert(info, DeepEquals, expected)
}
}
func (s *suite) TestClientEnvironmentInfo(c *C) {
conf, _ := s.State.EnvironConfig()
info, err := s.APIState.Client().EnvironmentInfo()
c.Assert(err, IsNil)
c.Assert(info.DefaultSeries, Equals, conf.DefaultSeries())
c.Assert(info.ProviderType, Equals, conf.Type())
}
func (s *suite) TestMachineLogin(c *C) {
stm, err := s.State.AddMachine("series", state.JobHostUnits)
c.Assert(err, IsNil)
err = stm.SetPassword("machine-password")
c.Assert(err, IsNil)
err = stm.SetInstanceId("i-foo")
c.Assert(err, IsNil)
_, info, err := s.APIConn.Environ.StateInfo()
c.Assert(err, IsNil)
info.EntityName = stm.EntityName()
info.Password = "machine-password"
st, err := api.Open(info)
c.Assert(err, IsNil)
defer st.Close()
m, err := st.Machine(stm.Id())
c.Assert(err, IsNil)
instId, ok := m.InstanceId()
c.Assert(ok, Equals, true)
c.Assert(instId, Equals, "i-foo")
}
func (s *suite) TestMachineInstanceId(c *C) {
stm, err := s.State.AddMachine("series", state.JobHostUnits)
c.Assert(err, IsNil)
setDefaultPassword(c, stm)
// Normal users can't access Machines...
m, err := s.APIState.Machine(stm.Id())
c.Assert(err, ErrorMatches, "permission denied")
c.Assert(api.ErrCode(err), Equals, api.CodeUnauthorized)
c.Assert(m, IsNil)
// ... so login as the machine.
st := s.openAs(c, stm.EntityName())
defer st.Close()
m, err = st.Machine(stm.Id())
c.Assert(err, IsNil)
instId, ok := m.InstanceId()
c.Check(instId, Equals, "")
c.Check(ok, Equals, false)
err = stm.SetInstanceId("foo")
c.Assert(err, IsNil)
instId, ok = m.InstanceId()
c.Check(instId, Equals, "")
c.Check(ok, Equals, false)
err = m.Refresh()
c.Assert(err, IsNil)
instId, ok = m.InstanceId()
c.Check(ok, Equals, true)
c.Assert(instId, Equals, "foo")
}
func (s *suite) TestMachineRefresh(c *C) {
stm, err := s.State.AddMachine("series", state.JobHostUnits)
c.Assert(err, IsNil)
setDefaultPassword(c, stm)
err = stm.SetInstanceId("foo")
c.Assert(err, IsNil)
st := s.openAs(c, stm.EntityName())
defer st.Close()
m, err := st.Machine(stm.Id())
c.Assert(err, IsNil)
instId, ok := m.InstanceId()
c.Assert(ok, Equals, true)
c.Assert(instId, Equals, "foo")
err = stm.SetInstanceId("bar")
c.Assert(err, IsNil)
instId, ok = m.InstanceId()
c.Assert(ok, Equals, true)
c.Assert(instId, Equals, "foo")
err = m.Refresh()
c.Assert(err, IsNil)
instId, ok = m.InstanceId()
c.Assert(ok, Equals, true)
c.Assert(instId, Equals, "bar")
}
func (s *suite) TestMachineSetPassword(c *C) {
stm, err := s.State.AddMachine("series", state.JobHostUnits)
c.Assert(err, IsNil)
setDefaultPassword(c, stm)
st := s.openAs(c, stm.EntityName())
defer st.Close()
m, err := st.Machine(stm.Id())
c.Assert(err, IsNil)
err = m.SetPassword("foo")
c.Assert(err, IsNil)
err = stm.Refresh()
c.Assert(err, IsNil)
c.Assert(stm.PasswordValid("foo"), Equals, true)
}
func (s *suite) TestMachineEntityName(c *C) {
c.Assert(api.MachineEntityName("2"), Equals, "machine-2")
stm, err := s.State.AddMachine("series", state.JobHostUnits)
c.Assert(err, IsNil)
setDefaultPassword(c, stm)
st := s.openAs(c, "machine-0")
defer st.Close()
m, err := st.Machine("0")
c.Assert(err, IsNil)
c.Assert(m.EntityName(), Equals, "machine-0")
}
func (s *suite) TestMachineWatch(c *C) {
stm, err := s.State.AddMachine("series", state.JobHostUnits)
c.Assert(err, IsNil)
setDefaultPassword(c, stm)
st := s.openAs(c, stm.EntityName())
defer st.Close()
m, err := st.Machine(stm.Id())
c.Assert(err, IsNil)
w0 := m.Watch()
w1 := m.Watch()
// Initial event.
ok := chanRead(c, w0.Changes(), "watcher 0")
c.Assert(ok, Equals, true)
ok = chanRead(c, w1.Changes(), "watcher 1")
c.Assert(ok, Equals, true)
// No subsequent event until something changes.
select {
case <-w0.Changes():
c.Fatalf("unexpected value on watcher 0")
case <-w1.Changes():
c.Fatalf("unexpected value on watcher 1")
case <-time.After(20 * time.Millisecond):
}
err = stm.SetInstanceId("foo")
c.Assert(err, IsNil)
s.State.StartSync()
// Next event.
ok = chanRead(c, w0.Changes(), "watcher 0")
c.Assert(ok, Equals, true)
ok = chanRead(c, w1.Changes(), "watcher 1")
c.Assert(ok, Equals, true)
err = w0.Stop()
c.Check(err, IsNil)
err = w1.Stop()
c.Check(err, IsNil)
ok = chanRead(c, w0.Changes(), "watcher 0")
c.Assert(ok, Equals, false)
ok = chanRead(c, w1.Changes(), "watcher 1")
c.Assert(ok, Equals, false)
}
func (s *suite) TestServerStopsOutstandingWatchMethod(c *C) {
// Start our own instance of the server so we have
// a handle on it to stop it.
srv, err := apiserver.NewServer(s.State, "localhost:0", []byte(coretesting.ServerCert), []byte(coretesting.ServerKey))
c.Assert(err, IsNil)
stm, err := s.State.AddMachine("series", state.JobHostUnits)
c.Assert(err, IsNil)
err = stm.SetPassword("password")
c.Assert(err, IsNil)
// Note we can't use openAs because we're
// not connecting to s.APIConn.
st, err := api.Open(&api.Info{
EntityName: stm.EntityName(),
Password: "password",
Addrs: []string{srv.Addr()},
CACert: []byte(coretesting.CACert),
})
c.Assert(err, IsNil)
defer st.Close()
m, err := st.Machine(stm.Id())
c.Assert(err, IsNil)
c.Assert(m.Id(), Equals, stm.Id())
w := m.Watch()
// Initial event.
ok := chanRead(c, w.Changes(), "watcher 0")
c.Assert(ok, Equals, true)
// Wait long enough for the Next request to be sent
// so it's blocking on the server side.
time.Sleep(50 * time.Millisecond)
c.Logf("stopping server")
err = srv.Stop()
c.Assert(err, IsNil)
c.Logf("server stopped")
ok = chanRead(c, w.Changes(), "watcher 0")
c.Assert(ok, Equals, false)
c.Assert(api.ErrCode(w.Err()), Equals, api.CodeStopped)
}
func chanRead(c *C, ch <-chan struct{}, what string) (ok bool) {
select {
case _, ok := <-ch:
return ok
case <-time.After(10 * time.Second):
c.Fatalf("timed out reading from %s", what)
}
panic("unreachable")
}
func (s *suite) TestUnitRefresh(c *C) {
s.setUpScenario(c)
st := s.openAs(c, "unit-wordpress-0")
defer st.Close()
u, err := st.Unit("wordpress/0")
c.Assert(err, IsNil)
deployer, ok := u.DeployerName()
c.Assert(ok, Equals, true)
c.Assert(deployer, Equals, "machine-1")
stu, err := s.State.Unit("wordpress/0")
c.Assert(err, IsNil)
err = stu.UnassignFromMachine()
c.Assert(err, IsNil)
deployer, ok = u.DeployerName()
c.Assert(ok, Equals, true)
c.Assert(deployer, Equals, "machine-1")
err = u.Refresh()
c.Assert(err, IsNil)
deployer, ok = u.DeployerName()
c.Assert(ok, Equals, false)
c.Assert(deployer, Equals, "")
}
func (s *suite) TestErrors(c *C) {
stm, err := s.State.AddMachine("series", state.JobHostUnits)
c.Assert(err, IsNil)
setDefaultPassword(c, stm)
st := s.openAs(c, stm.EntityName())
defer st.Close()
// By testing this single call, we test that the
// error transformation function is correctly called
// on error returns from the API apiserver. The transformation
// function itself is tested below.
_, err = st.Machine("99")
c.Assert(api.ErrCode(err), Equals, api.CodeNotFound)
}
var errorTransformTests = []struct {
err error
code string
}{{
err: state.NotFoundf("hello"),
code: api.CodeNotFound,
}, {
err: state.Unauthorizedf("hello"),
code: api.CodeUnauthorized,
}, {
err: state.ErrCannotEnterScopeYet,
code: api.CodeCannotEnterScopeYet,
}, {
err: state.ErrCannotEnterScope,
code: api.CodeCannotEnterScope,
}, {
err: state.ErrExcessiveContention,
code: api.CodeExcessiveContention,
}, {
err: state.ErrUnitHasSubordinates,
code: api.CodeUnitHasSubordinates,
}, {
err: apiserver.ErrBadId,
code: api.CodeNotFound,
}, {
err: apiserver.ErrBadCreds,
code: api.CodeUnauthorized,
}, {
err: apiserver.ErrPerm,
code: api.CodeUnauthorized,
}, {
err: apiserver.ErrNotLoggedIn,
code: api.CodeUnauthorized,
}, {
err: apiserver.ErrUnknownWatcher,
code: api.CodeNotFound,
}, {
err: &state.NotAssignedError{&state.Unit{}}, // too sleazy?!
code: api.CodeNotAssigned,
}, {
err: apiserver.ErrStoppedWatcher,
code: api.CodeStopped,
}, {
err: errors.New("an error"),
code: "",
}}
func (s *suite) TestErrorTransform(c *C) {
for _, t := range errorTransformTests {
err1 := apiserver.ServerError(t.err)
c.Assert(err1.Error(), Equals, t.err.Error())
if t.code != "" {
c.Assert(api.ErrCode(err1), Equals, t.code)
} else {
c.Assert(err1, Equals, t.err)
}
}
}
func (s *suite) TestUnitEntityName(c *C) {
c.Assert(api.UnitEntityName("wordpress/2"), Equals, "unit-wordpress-2")
s.setUpScenario(c)
st := s.openAs(c, "unit-wordpress-0")
defer st.Close()
u, err := st.Unit("wordpress/0")
c.Assert(err, IsNil)
c.Assert(u.EntityName(), Equals, "unit-wordpress-0")
}
func (s *suite) TestStop(c *C) {
// Start our own instance of the server so we have
// a handle on it to stop it.
srv, err := apiserver.NewServer(s.State, "localhost:0", []byte(coretesting.ServerCert), []byte(coretesting.ServerKey))
c.Assert(err, IsNil)
stm, err := s.State.AddMachine("series", state.JobHostUnits)
c.Assert(err, IsNil)
err = stm.SetInstanceId("foo")
c.Assert(err, IsNil)
err = stm.SetPassword("password")
c.Assert(err, IsNil)
// Note we can't use openAs because we're
// not connecting to s.APIConn.
st, err := api.Open(&api.Info{
EntityName: stm.EntityName(),
Password: "password",
Addrs: []string{srv.Addr()},
CACert: []byte(coretesting.CACert),
})
c.Assert(err, IsNil)
defer st.Close()
m, err := st.Machine(stm.Id())
c.Assert(err, IsNil)
c.Assert(m.Id(), Equals, stm.Id())
err = srv.Stop()
c.Assert(err, IsNil)
_, err = st.Machine(stm.Id())
// The client has not necessarily seen the server
// shutdown yet, so there are two possible
// errors.
if err != rpc.ErrShutdown && err != io.ErrUnexpectedEOF {
c.Fatalf("unexpected error from request: %v", err)
}
// Check it can be stopped twice.
err = srv.Stop()
c.Assert(err, IsNil)
}
func (s *suite) TestClientServiceGet(c *C) {
s.setUpScenario(c)
config, err := s.APIState.Client().ServiceGet("wordpress")
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, ¶ms.ServiceGetResults{
Service: "wordpress",
Charm: "wordpress",
Settings: map[string]interface{}{
"blog-title": map[string]interface{}{
"type": "string",
"value": nil,
"description": "A descriptive title used for the blog."},
},
})
}
func (s *suite) TestClientServiceExpose(c *C) {
s.setUpScenario(c)
serviceName := "wordpress"
service, err := s.State.Service(serviceName)
c.Assert(err, IsNil)
c.Assert(service.IsExposed(), Equals, false)
err = s.APIState.Client().ServiceExpose(serviceName)
c.Assert(err, IsNil)
err = service.Refresh()
c.Assert(err, IsNil)
c.Assert(service.IsExposed(), Equals, true)
}
func (s *suite) TestClientServiceUnexpose(c *C) {
s.setUpScenario(c)
serviceName := "wordpress"
service, err := s.State.Service(serviceName)
c.Assert(err, IsNil)
service.SetExposed()
c.Assert(service.IsExposed(), Equals, true)
err = s.APIState.Client().ServiceUnexpose(serviceName)
c.Assert(err, IsNil)
service.Refresh()
c.Assert(service.IsExposed(), Equals, false)
}
var serviceDeployTests = []struct {
about string
serviceName string
charmUrl string
numUnits int
expectedNumUnits int
}{{
about: "Normal deploy",
serviceName: "mywordpress",
charmUrl: "local:series/wordpress",
expectedNumUnits: 1,
}, {
about: "Two units",
serviceName: "mywordpress",
charmUrl: "local:series/wordpress",
numUnits: 2,
expectedNumUnits: 2,
},
}
func (s *suite) TestClientServiceDeploy(c *C) {
s.setUpScenario(c)
for i, test := range serviceDeployTests {
c.Logf("test %d; %s", i, test.about)
parsedUrl := charm.MustParseURL(test.charmUrl)
localRepo, err := charm.InferRepository(parsedUrl,
coretesting.Charms.Path)
// Monkey-patch server repository.
originalServerCharmStore := apiserver.CharmStore
apiserver.CharmStore = localRepo
_, err = s.State.Service(test.serviceName)
c.Assert(err, NotNil)
err = s.APIState.Client().ServiceDeploy(
test.charmUrl, test.serviceName, test.numUnits, "")
c.Assert(err, IsNil)
service, err := s.State.Service(test.serviceName)
c.Assert(err, IsNil)
units, err := service.AllUnits()
c.Assert(err, IsNil)
c.Assert(units, HasLen, test.expectedNumUnits)
// Clean up.
removeServiceAndUnits(c, service)
// Restore server repository.
apiserver.CharmStore = originalServerCharmStore
}
}
// This test will be thrown away, at least in part, once the stub code in
// state/megawatcher.go is implemented.
func (s *suite) TestClientWatchAll(c *C) {
watcher, err := s.APIState.Client().WatchAll()
c.Assert(err, IsNil)
defer func() {
err := watcher.Stop()
c.Assert(err, IsNil)
}()
deltas, err := watcher.Next()
c.Assert(err, IsNil)
// This is the part that most clearly is tied to the fact that we are
// testing a stub.
c.Assert(deltas, DeepEquals, state.StubNextDelta)
}
// openAs connects to the API state as the given entity
// with the default password for that entity.
func (s *suite) openAs(c *C, entityName string) *api.State {
_, info, err := s.APIConn.Environ.StateInfo()
c.Assert(err, IsNil)
info.EntityName = entityName
info.Password = fmt.Sprintf("%s password", entityName)
c.Logf("opening state; entity %q; password %q", info.EntityName, info.Password)
st, err := api.Open(info)
c.Assert(err, IsNil)
c.Assert(st, NotNil)
return st
}
Modify new test to have different signature
package apiserver_test
import (
"errors"
"fmt"
"io"
. "launchpad.net/gocheck"
"launchpad.net/juju-core/charm"
"launchpad.net/juju-core/juju/testing"
"launchpad.net/juju-core/rpc"
"launchpad.net/juju-core/state"
"launchpad.net/juju-core/state/api"
"launchpad.net/juju-core/state/api/params"
"launchpad.net/juju-core/state/apiserver"
coretesting "launchpad.net/juju-core/testing"
"net"
stdtesting "testing"
"time"
)
func TestAll(t *stdtesting.T) {
coretesting.MgoTestPackage(t)
}
type suite struct {
testing.JujuConnSuite
listener net.Listener
}
var _ = Suite(&suite{})
func init() {
apiserver.AuthenticationEnabled = true
}
func removeServiceAndUnits(c *C, service *state.Service) {
// Destroy all units for the service.
units, err := service.AllUnits()
c.Assert(err, IsNil)
for _, unit := range units {
err = unit.EnsureDead()
c.Assert(err, IsNil)
err = unit.Remove()
c.Assert(err, IsNil)
}
err = service.Refresh()
c.Assert(err, IsNil)
err = service.Destroy()
c.Assert(err, IsNil)
err = service.Refresh()
c.Assert(state.IsNotFound(err), Equals, true)
}
var operationPermTests = []struct {
about string
// op performs the operation to be tested using the given state
// connection. It returns a function that should be used to
// undo any changes made by the operation.
op func(c *C, st *api.State, mst *state.State) (reset func(), err error)
allow []string
deny []string
}{{
about: "Unit.Get",
op: opGetUnitWordpress0,
deny: []string{"user-admin", "user-other"},
}, {
about: "Machine.Get",
op: opGetMachine1,
deny: []string{"user-admin", "user-other"},
}, {
about: "Machine.SetPassword",
op: opMachine1SetPassword,
allow: []string{"machine-0", "machine-1"},
}, {
about: "Unit.SetPassword (on principal unit)",
op: opUnitSetPassword("wordpress/0"),
allow: []string{"unit-wordpress-0", "machine-1"},
}, {
about: "Unit.SetPassword (on subordinate unit)",
op: opUnitSetPassword("logging/0"),
allow: []string{"unit-logging-0", "unit-wordpress-0"},
}, {
about: "Client.Status",
op: opClientStatus,
allow: []string{"user-admin", "user-other"},
}, {
about: "Client.ServiceSet",
op: opClientServiceSet,
allow: []string{"user-admin", "user-other"},
}, {
about: "Client.ServiceSetYAML",
op: opClientServiceSetYAML,
allow: []string{"user-admin", "user-other"},
}, {
about: "Client.ServiceGet",
op: opClientServiceGet,
allow: []string{"user-admin", "user-other"},
}, {
about: "Client.ServiceExpose",
op: opClientServiceExpose,
allow: []string{"user-admin", "user-other"},
}, {
about: "Client.ServiceUnexpose",
op: opClientServiceUnexpose,
allow: []string{"user-admin", "user-other"},
}, {
about: "Client.ServiceDeploy",
op: opClientServiceDeploy,
allow: []string{"user-admin", "user-other"},
}, {
about: "Client.WatchAll",
op: opClientWatchAll,
allow: []string{"user-admin", "user-other"},
}, {
about: "Client.CharmInfo",
op: opClientCharmInfo,
allow: []string{"user-admin", "user-other"},
},
}
// allowed returns the set of allowed entities given an allow list and a
// deny list. If an allow list is specified, only those entities are
// allowed; otherwise those in deny are disallowed.
func allowed(all, allow, deny []string) map[string]bool {
p := make(map[string]bool)
if allow != nil {
for _, e := range allow {
p[e] = true
}
return p
}
loop:
for _, e0 := range all {
for _, e1 := range deny {
if e1 == e0 {
continue loop
}
}
p[e0] = true
}
return p
}
func (s *suite) TestOperationPerm(c *C) {
entities := s.setUpScenario(c)
for i, t := range operationPermTests {
allow := allowed(entities, t.allow, t.deny)
for _, e := range entities {
c.Logf("test %d; %s; entity %q", i, t.about, e)
st := s.openAs(c, e)
reset, err := t.op(c, st, s.State)
if allow[e] {
c.Check(err, IsNil)
} else {
c.Check(err, ErrorMatches, "permission denied")
c.Check(api.ErrCode(err), Equals, api.CodeUnauthorized)
}
reset()
st.Close()
}
}
}
func opGetUnitWordpress0(c *C, st *api.State, mst *state.State) (func(), error) {
u, err := st.Unit("wordpress/0")
if err != nil {
c.Check(u, IsNil)
} else {
name, ok := u.DeployerName()
c.Check(ok, Equals, true)
c.Check(name, Equals, "machine-1")
}
return func() {}, err
}
func opUnitSetPassword(unitName string) func(c *C, st *api.State, mst *state.State) (func(), error) {
return func(c *C, st *api.State, mst *state.State) (func(), error) {
u, err := st.Unit(unitName)
if err != nil {
c.Check(u, IsNil)
return func() {}, err
}
err = u.SetPassword("another password")
if err != nil {
return func() {}, err
}
return func() {
setDefaultPassword(c, u)
}, nil
}
}
func opGetMachine1(c *C, st *api.State, mst *state.State) (func(), error) {
m, err := st.Machine("1")
if err != nil {
c.Check(m, IsNil)
} else {
name, ok := m.InstanceId()
c.Assert(ok, Equals, true)
c.Assert(name, Equals, "i-machine-1")
}
return func() {}, err
}
func opMachine1SetPassword(c *C, st *api.State, mst *state.State) (func(), error) {
m, err := st.Machine("1")
if err != nil {
c.Check(m, IsNil)
return func() {}, err
}
err = m.SetPassword("another password")
if err != nil {
return func() {}, err
}
return func() {
setDefaultPassword(c, m)
}, nil
}
func opClientCharmInfo(c *C, st *api.State, mst *state.State) (func(), error) {
info, err := st.Client().CharmInfo("local:series/wordpress-3")
if err != nil {
c.Check(info, IsNil)
return func() {}, err
}
c.Assert(err, IsNil)
c.Assert(info.URL, Equals, "local:series/wordpress-3")
c.Assert(info.Meta.Name, Equals, "wordpress")
c.Assert(info.Revision, Equals, 3)
return func() {}, nil
}
func opClientStatus(c *C, st *api.State, mst *state.State) (func(), error) {
status, err := st.Client().Status()
if err != nil {
c.Check(status, IsNil)
return func() {}, err
}
c.Assert(err, IsNil)
c.Assert(status, DeepEquals, scenarioStatus)
return func() {}, nil
}
func resetBlogTitle(c *C, st *api.State) func() {
return func() {
err := st.Client().ServiceSet("wordpress", map[string]string{
"blog-title": "",
})
c.Assert(err, IsNil)
}
}
func opClientServiceSet(c *C, st *api.State, mst *state.State) (func(), error) {
err := st.Client().ServiceSet("wordpress", map[string]string{
"blog-title": "foo",
})
if err != nil {
return func() {}, err
}
return resetBlogTitle(c, st), nil
}
func opClientServiceSetYAML(c *C, st *api.State, mst *state.State) (func(), error) {
err := st.Client().ServiceSetYAML("wordpress", `"blog-title": "foo"`)
if err != nil {
return func() {}, err
}
return resetBlogTitle(c, st), nil
}
func opClientServiceGet(c *C, st *api.State, mst *state.State) (func(), error) {
// This test only shows that the call is made without error, ensuring the
// signatures match.
_, err := st.Client().ServiceGet("wordpress")
if err != nil {
return func() {}, err
}
c.Assert(err, IsNil)
return func() {}, nil
}
func opClientServiceExpose(c *C, st *api.State, mst *state.State) (func(), error) {
// This test only shows that the call is made without error, ensuring the
// signatures match.
err := st.Client().ServiceExpose("wordpress")
if err != nil {
return func() {}, err
}
c.Assert(err, IsNil)
return func() {}, nil
}
func opClientServiceUnexpose(c *C, st *api.State, mst *state.State) (func(), error) {
// This test only checks that the call is made without error, ensuring the
// signatures match.
err := st.Client().ServiceUnexpose("wordpress")
if err != nil {
return func() {}, err
}
c.Assert(err, IsNil)
return func() {}, nil
}
func opClientServiceDeploy(c *C, st *api.State, mst *state.State) (func(), error) {
// This test only checks that the call is made without error, ensuring the
// signatures match.
// We are cheating and using a local repo only.
// Set the CharmStore to the test repository.
serviceName := "mywordpress"
charmUrl := "local:series/wordpress"
parsedUrl := charm.MustParseURL(charmUrl)
repo, err := charm.InferRepository(parsedUrl, coretesting.Charms.Path)
originalServerCharmStore := apiserver.CharmStore
apiserver.CharmStore = repo
err = st.Client().ServiceDeploy(charmUrl, serviceName, 1, "")
if err != nil {
return func() {}, err
}
return func() {
apiserver.CharmStore = originalServerCharmStore
service, err := mst.Service(serviceName)
c.Assert(err, IsNil)
removeServiceAndUnits(c, service)
}, nil
}
func opClientWatchAll(c *C, st *api.State, mst *state.State) (func(), error) {
watcher, err := st.Client().WatchAll()
if err == nil {
watcher.Stop()
}
return func() {}, err
}
// scenarioStatus describes the expected state
// of the juju environment set up by setUpScenario.
var scenarioStatus = &api.Status{
Machines: map[string]api.MachineInfo{
"0": {
InstanceId: "i-machine-0",
},
"1": {
InstanceId: "i-machine-1",
},
"2": {
InstanceId: "i-machine-2",
},
},
}
// setUpScenario makes an environment scenario suitable for
// testing most kinds of access scenario. It returns
// a list of all the entities in the scenario.
//
// When the scenario is initialized, we have:
// user-admin
// user-other
// machine-0
// instance-id="i-machine-0"
// jobs=manage-environ
// machine-1
// instance-id="i-machine-1"
// jobs=host-units
// machine-2
// instance-id="i-machine-2"
// jobs=host-units
// service-wordpress
// service-logging
// unit-wordpress-0
// deployer-name=machine-1
// unit-logging-0
// deployer-name=unit-wordpress-0
// unit-wordpress-1
// deployer-name=machine-2
// unit-logging-1
// deployer-name=unit-wordpress-1
//
// The passwords for all returned entities are
// set to the entity name with a " password" suffix.
//
// Note that there is nothing special about machine-0
// here - it's the environment manager in this scenario
// just because machine 0 has traditionally been the
// environment manager (bootstrap machine), so is
// hopefully easier to remember as such.
func (s *suite) setUpScenario(c *C) (entities []string) {
add := func(e state.AuthEntity) {
entities = append(entities, e.EntityName())
}
u, err := s.State.User("admin")
c.Assert(err, IsNil)
setDefaultPassword(c, u)
add(u)
u, err = s.State.AddUser("other", "")
c.Assert(err, IsNil)
setDefaultPassword(c, u)
add(u)
m, err := s.State.AddMachine("series", state.JobManageEnviron)
c.Assert(err, IsNil)
c.Assert(m.EntityName(), Equals, "machine-0")
err = m.SetInstanceId(state.InstanceId("i-" + m.EntityName()))
c.Assert(err, IsNil)
setDefaultPassword(c, m)
add(m)
wordpress, err := s.State.AddService("wordpress", s.AddTestingCharm(c, "wordpress"))
c.Assert(err, IsNil)
_, err = s.State.AddService("logging", s.AddTestingCharm(c, "logging"))
c.Assert(err, IsNil)
eps, err := s.State.InferEndpoints([]string{"logging", "wordpress"})
c.Assert(err, IsNil)
rel, err := s.State.AddRelation(eps...)
c.Assert(err, IsNil)
for i := 0; i < 2; i++ {
wu, err := wordpress.AddUnit()
c.Assert(err, IsNil)
c.Assert(wu.EntityName(), Equals, fmt.Sprintf("unit-wordpress-%d", i))
setDefaultPassword(c, wu)
add(wu)
m, err := s.State.AddMachine("series", state.JobHostUnits)
c.Assert(err, IsNil)
c.Assert(m.EntityName(), Equals, fmt.Sprintf("machine-%d", i+1))
err = m.SetInstanceId(state.InstanceId("i-" + m.EntityName()))
c.Assert(err, IsNil)
setDefaultPassword(c, m)
add(m)
err = wu.AssignToMachine(m)
c.Assert(err, IsNil)
deployer, ok := wu.DeployerName()
c.Assert(ok, Equals, true)
c.Assert(deployer, Equals, fmt.Sprintf("machine-%d", i+1))
wru, err := rel.Unit(wu)
c.Assert(err, IsNil)
// Create the subordinate unit as a side-effect of entering
// scope in the principal's relation-unit.
err = wru.EnterScope(nil)
c.Assert(err, IsNil)
lu, err := s.State.Unit(fmt.Sprintf("logging/%d", i))
c.Assert(err, IsNil)
c.Assert(lu.IsPrincipal(), Equals, false)
deployer, ok = lu.DeployerName()
c.Assert(ok, Equals, true)
c.Assert(deployer, Equals, fmt.Sprintf("unit-wordpress-%d", i))
setDefaultPassword(c, lu)
add(lu)
}
return
}
// AuthEntity is the same as state.AuthEntity but
// without PasswordValid, which is implemented
// by state entities but not by api entities.
type AuthEntity interface {
EntityName() string
SetPassword(pass string) error
Refresh() error
}
func setDefaultPassword(c *C, e AuthEntity) {
err := e.SetPassword(e.EntityName() + " password")
c.Assert(err, IsNil)
}
var badLoginTests = []struct {
entityName string
password string
err string
code string
}{{
entityName: "user-admin",
password: "wrong password",
err: "invalid entity name or password",
code: api.CodeUnauthorized,
}, {
entityName: "user-foo",
password: "password",
err: "invalid entity name or password",
code: api.CodeUnauthorized,
}, {
entityName: "bar",
password: "password",
err: `invalid entity name "bar"`,
}}
func (s *suite) TestBadLogin(c *C) {
_, info, err := s.APIConn.Environ.StateInfo()
c.Assert(err, IsNil)
for i, t := range badLoginTests {
c.Logf("test %d; entity %q; password %q", i, t.entityName, t.password)
info.EntityName = ""
info.Password = ""
func() {
st, err := api.Open(info)
c.Assert(err, IsNil)
defer st.Close()
_, err = st.Machine("0")
c.Assert(err, ErrorMatches, "not logged in")
c.Assert(api.ErrCode(err), Equals, api.CodeUnauthorized, Commentf("error %#v", err))
_, err = st.Unit("foo/0")
c.Assert(err, ErrorMatches, "not logged in")
c.Assert(api.ErrCode(err), Equals, api.CodeUnauthorized)
err = st.Login(t.entityName, t.password)
c.Assert(err, ErrorMatches, t.err)
c.Assert(api.ErrCode(err), Equals, t.code)
_, err = st.Machine("0")
c.Assert(err, ErrorMatches, "not logged in")
c.Assert(api.ErrCode(err), Equals, api.CodeUnauthorized)
}()
}
}
func (s *suite) TestClientStatus(c *C) {
s.setUpScenario(c)
status, err := s.APIState.Client().Status()
c.Assert(err, IsNil)
c.Assert(status, DeepEquals, scenarioStatus)
}
func (s *suite) TestClientServerSet(c *C) {
dummy, err := s.State.AddService("dummy", s.AddTestingCharm(c, "dummy"))
c.Assert(err, IsNil)
err = s.APIState.Client().ServiceSet("dummy", map[string]string{
"title": "xxx",
"username": "yyy",
})
c.Assert(err, IsNil)
conf, err := dummy.Config()
c.Assert(err, IsNil)
c.Assert(conf.Map(), DeepEquals, map[string]interface{}{
"title": "xxx",
"username": "yyy",
})
}
func (s *suite) TestClientServiceSetYAML(c *C) {
dummy, err := s.State.AddService("dummy", s.AddTestingCharm(c, "dummy"))
c.Assert(err, IsNil)
err = s.APIState.Client().ServiceSetYAML("dummy", "title: aaa\nusername: bbb")
c.Assert(err, IsNil)
conf, err := dummy.Config()
c.Assert(err, IsNil)
c.Assert(conf.Map(), DeepEquals, map[string]interface{}{
"title": "aaa",
"username": "bbb",
})
}
var clientCharmInfoTests = []struct {
about string
url string
err string
}{
{
about: "retrieves charm info",
url: "local:series/wordpress-3",
},
{
about: "invalid URL",
url: "not-valid",
err: `charm URL has invalid schema: "not-valid"`,
},
{
about: "unknown charm",
url: "cs:missing/one-1",
err: `charm "cs:missing/one-1" not found`,
},
}
func (s *suite) TestClientCharmInfo(c *C) {
// Use wordpress for tests so that we can compare Provides and Requires.
charm := s.AddTestingCharm(c, "wordpress")
for i, t := range clientCharmInfoTests {
c.Logf("test %d. %s", i, t.about)
info, err := s.APIState.Client().CharmInfo(t.url)
if t.err != "" {
c.Assert(err, ErrorMatches, t.err)
continue
}
c.Assert(err, IsNil)
expected := &api.CharmInfo{
Revision: charm.Revision(),
URL: charm.URL().String(),
Config: charm.Config(),
Meta: charm.Meta(),
}
c.Assert(info, DeepEquals, expected)
}
}
func (s *suite) TestClientEnvironmentInfo(c *C) {
conf, _ := s.State.EnvironConfig()
info, err := s.APIState.Client().EnvironmentInfo()
c.Assert(err, IsNil)
c.Assert(info.DefaultSeries, Equals, conf.DefaultSeries())
c.Assert(info.ProviderType, Equals, conf.Type())
}
func (s *suite) TestMachineLogin(c *C) {
stm, err := s.State.AddMachine("series", state.JobHostUnits)
c.Assert(err, IsNil)
err = stm.SetPassword("machine-password")
c.Assert(err, IsNil)
err = stm.SetInstanceId("i-foo")
c.Assert(err, IsNil)
_, info, err := s.APIConn.Environ.StateInfo()
c.Assert(err, IsNil)
info.EntityName = stm.EntityName()
info.Password = "machine-password"
st, err := api.Open(info)
c.Assert(err, IsNil)
defer st.Close()
m, err := st.Machine(stm.Id())
c.Assert(err, IsNil)
instId, ok := m.InstanceId()
c.Assert(ok, Equals, true)
c.Assert(instId, Equals, "i-foo")
}
func (s *suite) TestMachineInstanceId(c *C) {
stm, err := s.State.AddMachine("series", state.JobHostUnits)
c.Assert(err, IsNil)
setDefaultPassword(c, stm)
// Normal users can't access Machines...
m, err := s.APIState.Machine(stm.Id())
c.Assert(err, ErrorMatches, "permission denied")
c.Assert(api.ErrCode(err), Equals, api.CodeUnauthorized)
c.Assert(m, IsNil)
// ... so login as the machine.
st := s.openAs(c, stm.EntityName())
defer st.Close()
m, err = st.Machine(stm.Id())
c.Assert(err, IsNil)
instId, ok := m.InstanceId()
c.Check(instId, Equals, "")
c.Check(ok, Equals, false)
err = stm.SetInstanceId("foo")
c.Assert(err, IsNil)
instId, ok = m.InstanceId()
c.Check(instId, Equals, "")
c.Check(ok, Equals, false)
err = m.Refresh()
c.Assert(err, IsNil)
instId, ok = m.InstanceId()
c.Check(ok, Equals, true)
c.Assert(instId, Equals, "foo")
}
func (s *suite) TestMachineRefresh(c *C) {
stm, err := s.State.AddMachine("series", state.JobHostUnits)
c.Assert(err, IsNil)
setDefaultPassword(c, stm)
err = stm.SetInstanceId("foo")
c.Assert(err, IsNil)
st := s.openAs(c, stm.EntityName())
defer st.Close()
m, err := st.Machine(stm.Id())
c.Assert(err, IsNil)
instId, ok := m.InstanceId()
c.Assert(ok, Equals, true)
c.Assert(instId, Equals, "foo")
err = stm.SetInstanceId("bar")
c.Assert(err, IsNil)
instId, ok = m.InstanceId()
c.Assert(ok, Equals, true)
c.Assert(instId, Equals, "foo")
err = m.Refresh()
c.Assert(err, IsNil)
instId, ok = m.InstanceId()
c.Assert(ok, Equals, true)
c.Assert(instId, Equals, "bar")
}
func (s *suite) TestMachineSetPassword(c *C) {
stm, err := s.State.AddMachine("series", state.JobHostUnits)
c.Assert(err, IsNil)
setDefaultPassword(c, stm)
st := s.openAs(c, stm.EntityName())
defer st.Close()
m, err := st.Machine(stm.Id())
c.Assert(err, IsNil)
err = m.SetPassword("foo")
c.Assert(err, IsNil)
err = stm.Refresh()
c.Assert(err, IsNil)
c.Assert(stm.PasswordValid("foo"), Equals, true)
}
func (s *suite) TestMachineEntityName(c *C) {
c.Assert(api.MachineEntityName("2"), Equals, "machine-2")
stm, err := s.State.AddMachine("series", state.JobHostUnits)
c.Assert(err, IsNil)
setDefaultPassword(c, stm)
st := s.openAs(c, "machine-0")
defer st.Close()
m, err := st.Machine("0")
c.Assert(err, IsNil)
c.Assert(m.EntityName(), Equals, "machine-0")
}
func (s *suite) TestMachineWatch(c *C) {
stm, err := s.State.AddMachine("series", state.JobHostUnits)
c.Assert(err, IsNil)
setDefaultPassword(c, stm)
st := s.openAs(c, stm.EntityName())
defer st.Close()
m, err := st.Machine(stm.Id())
c.Assert(err, IsNil)
w0 := m.Watch()
w1 := m.Watch()
// Initial event.
ok := chanRead(c, w0.Changes(), "watcher 0")
c.Assert(ok, Equals, true)
ok = chanRead(c, w1.Changes(), "watcher 1")
c.Assert(ok, Equals, true)
// No subsequent event until something changes.
select {
case <-w0.Changes():
c.Fatalf("unexpected value on watcher 0")
case <-w1.Changes():
c.Fatalf("unexpected value on watcher 1")
case <-time.After(20 * time.Millisecond):
}
err = stm.SetInstanceId("foo")
c.Assert(err, IsNil)
s.State.StartSync()
// Next event.
ok = chanRead(c, w0.Changes(), "watcher 0")
c.Assert(ok, Equals, true)
ok = chanRead(c, w1.Changes(), "watcher 1")
c.Assert(ok, Equals, true)
err = w0.Stop()
c.Check(err, IsNil)
err = w1.Stop()
c.Check(err, IsNil)
ok = chanRead(c, w0.Changes(), "watcher 0")
c.Assert(ok, Equals, false)
ok = chanRead(c, w1.Changes(), "watcher 1")
c.Assert(ok, Equals, false)
}
func (s *suite) TestServerStopsOutstandingWatchMethod(c *C) {
// Start our own instance of the server so we have
// a handle on it to stop it.
srv, err := apiserver.NewServer(s.State, "localhost:0", []byte(coretesting.ServerCert), []byte(coretesting.ServerKey))
c.Assert(err, IsNil)
stm, err := s.State.AddMachine("series", state.JobHostUnits)
c.Assert(err, IsNil)
err = stm.SetPassword("password")
c.Assert(err, IsNil)
// Note we can't use openAs because we're
// not connecting to s.APIConn.
st, err := api.Open(&api.Info{
EntityName: stm.EntityName(),
Password: "password",
Addrs: []string{srv.Addr()},
CACert: []byte(coretesting.CACert),
})
c.Assert(err, IsNil)
defer st.Close()
m, err := st.Machine(stm.Id())
c.Assert(err, IsNil)
c.Assert(m.Id(), Equals, stm.Id())
w := m.Watch()
// Initial event.
ok := chanRead(c, w.Changes(), "watcher 0")
c.Assert(ok, Equals, true)
// Wait long enough for the Next request to be sent
// so it's blocking on the server side.
time.Sleep(50 * time.Millisecond)
c.Logf("stopping server")
err = srv.Stop()
c.Assert(err, IsNil)
c.Logf("server stopped")
ok = chanRead(c, w.Changes(), "watcher 0")
c.Assert(ok, Equals, false)
c.Assert(api.ErrCode(w.Err()), Equals, api.CodeStopped)
}
func chanRead(c *C, ch <-chan struct{}, what string) (ok bool) {
select {
case _, ok := <-ch:
return ok
case <-time.After(10 * time.Second):
c.Fatalf("timed out reading from %s", what)
}
panic("unreachable")
}
func (s *suite) TestUnitRefresh(c *C) {
s.setUpScenario(c)
st := s.openAs(c, "unit-wordpress-0")
defer st.Close()
u, err := st.Unit("wordpress/0")
c.Assert(err, IsNil)
deployer, ok := u.DeployerName()
c.Assert(ok, Equals, true)
c.Assert(deployer, Equals, "machine-1")
stu, err := s.State.Unit("wordpress/0")
c.Assert(err, IsNil)
err = stu.UnassignFromMachine()
c.Assert(err, IsNil)
deployer, ok = u.DeployerName()
c.Assert(ok, Equals, true)
c.Assert(deployer, Equals, "machine-1")
err = u.Refresh()
c.Assert(err, IsNil)
deployer, ok = u.DeployerName()
c.Assert(ok, Equals, false)
c.Assert(deployer, Equals, "")
}
func (s *suite) TestErrors(c *C) {
stm, err := s.State.AddMachine("series", state.JobHostUnits)
c.Assert(err, IsNil)
setDefaultPassword(c, stm)
st := s.openAs(c, stm.EntityName())
defer st.Close()
// By testing this single call, we test that the
// error transformation function is correctly called
// on error returns from the API apiserver. The transformation
// function itself is tested below.
_, err = st.Machine("99")
c.Assert(api.ErrCode(err), Equals, api.CodeNotFound)
}
var errorTransformTests = []struct {
err error
code string
}{{
err: state.NotFoundf("hello"),
code: api.CodeNotFound,
}, {
err: state.Unauthorizedf("hello"),
code: api.CodeUnauthorized,
}, {
err: state.ErrCannotEnterScopeYet,
code: api.CodeCannotEnterScopeYet,
}, {
err: state.ErrCannotEnterScope,
code: api.CodeCannotEnterScope,
}, {
err: state.ErrExcessiveContention,
code: api.CodeExcessiveContention,
}, {
err: state.ErrUnitHasSubordinates,
code: api.CodeUnitHasSubordinates,
}, {
err: apiserver.ErrBadId,
code: api.CodeNotFound,
}, {
err: apiserver.ErrBadCreds,
code: api.CodeUnauthorized,
}, {
err: apiserver.ErrPerm,
code: api.CodeUnauthorized,
}, {
err: apiserver.ErrNotLoggedIn,
code: api.CodeUnauthorized,
}, {
err: apiserver.ErrUnknownWatcher,
code: api.CodeNotFound,
}, {
err: &state.NotAssignedError{&state.Unit{}}, // too sleazy?!
code: api.CodeNotAssigned,
}, {
err: apiserver.ErrStoppedWatcher,
code: api.CodeStopped,
}, {
err: errors.New("an error"),
code: "",
}}
func (s *suite) TestErrorTransform(c *C) {
for _, t := range errorTransformTests {
err1 := apiserver.ServerError(t.err)
c.Assert(err1.Error(), Equals, t.err.Error())
if t.code != "" {
c.Assert(api.ErrCode(err1), Equals, t.code)
} else {
c.Assert(err1, Equals, t.err)
}
}
}
func (s *suite) TestUnitEntityName(c *C) {
c.Assert(api.UnitEntityName("wordpress/2"), Equals, "unit-wordpress-2")
s.setUpScenario(c)
st := s.openAs(c, "unit-wordpress-0")
defer st.Close()
u, err := st.Unit("wordpress/0")
c.Assert(err, IsNil)
c.Assert(u.EntityName(), Equals, "unit-wordpress-0")
}
func (s *suite) TestStop(c *C) {
// Start our own instance of the server so we have
// a handle on it to stop it.
srv, err := apiserver.NewServer(s.State, "localhost:0", []byte(coretesting.ServerCert), []byte(coretesting.ServerKey))
c.Assert(err, IsNil)
stm, err := s.State.AddMachine("series", state.JobHostUnits)
c.Assert(err, IsNil)
err = stm.SetInstanceId("foo")
c.Assert(err, IsNil)
err = stm.SetPassword("password")
c.Assert(err, IsNil)
// Note we can't use openAs because we're
// not connecting to s.APIConn.
st, err := api.Open(&api.Info{
EntityName: stm.EntityName(),
Password: "password",
Addrs: []string{srv.Addr()},
CACert: []byte(coretesting.CACert),
})
c.Assert(err, IsNil)
defer st.Close()
m, err := st.Machine(stm.Id())
c.Assert(err, IsNil)
c.Assert(m.Id(), Equals, stm.Id())
err = srv.Stop()
c.Assert(err, IsNil)
_, err = st.Machine(stm.Id())
// The client has not necessarily seen the server
// shutdown yet, so there are two possible
// errors.
if err != rpc.ErrShutdown && err != io.ErrUnexpectedEOF {
c.Fatalf("unexpected error from request: %v", err)
}
// Check it can be stopped twice.
err = srv.Stop()
c.Assert(err, IsNil)
}
func (s *suite) TestClientServiceGet(c *C) {
s.setUpScenario(c)
config, err := s.APIState.Client().ServiceGet("wordpress")
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, ¶ms.ServiceGetResults{
Service: "wordpress",
Charm: "wordpress",
Settings: map[string]interface{}{
"blog-title": map[string]interface{}{
"type": "string",
"value": nil,
"description": "A descriptive title used for the blog."},
},
})
}
func (s *suite) TestClientServiceExpose(c *C) {
s.setUpScenario(c)
serviceName := "wordpress"
service, err := s.State.Service(serviceName)
c.Assert(err, IsNil)
c.Assert(service.IsExposed(), Equals, false)
err = s.APIState.Client().ServiceExpose(serviceName)
c.Assert(err, IsNil)
err = service.Refresh()
c.Assert(err, IsNil)
c.Assert(service.IsExposed(), Equals, true)
}
func (s *suite) TestClientServiceUnexpose(c *C) {
s.setUpScenario(c)
serviceName := "wordpress"
service, err := s.State.Service(serviceName)
c.Assert(err, IsNil)
service.SetExposed()
c.Assert(service.IsExposed(), Equals, true)
err = s.APIState.Client().ServiceUnexpose(serviceName)
c.Assert(err, IsNil)
service.Refresh()
c.Assert(service.IsExposed(), Equals, false)
}
var serviceDeployTests = []struct {
about string
serviceName string
charmUrl string
numUnits int
expectedNumUnits int
}{{
about: "Normal deploy",
serviceName: "mywordpress",
charmUrl: "local:series/wordpress",
expectedNumUnits: 1,
}, {
about: "Two units",
serviceName: "mywordpress",
charmUrl: "local:series/wordpress",
numUnits: 2,
expectedNumUnits: 2,
},
}
func (s *suite) TestClientServiceDeploy(c *C) {
s.setUpScenario(c)
for i, test := range serviceDeployTests {
c.Logf("test %d; %s", i, test.about)
parsedUrl := charm.MustParseURL(test.charmUrl)
localRepo, err := charm.InferRepository(parsedUrl,
coretesting.Charms.Path)
// Monkey-patch server repository.
originalServerCharmStore := apiserver.CharmStore
apiserver.CharmStore = localRepo
_, err = s.State.Service(test.serviceName)
c.Assert(err, NotNil)
err = s.APIState.Client().ServiceDeploy(
test.charmUrl, test.serviceName, test.numUnits, "")
c.Assert(err, IsNil)
service, err := s.State.Service(test.serviceName)
c.Assert(err, IsNil)
units, err := service.AllUnits()
c.Assert(err, IsNil)
c.Assert(units, HasLen, test.expectedNumUnits)
// Clean up.
removeServiceAndUnits(c, service)
// Restore server repository.
apiserver.CharmStore = originalServerCharmStore
}
}
// This test will be thrown away, at least in part, once the stub code in
// state/megawatcher.go is implemented.
func (s *suite) TestClientWatchAll(c *C) {
watcher, err := s.APIState.Client().WatchAll()
c.Assert(err, IsNil)
defer func() {
err := watcher.Stop()
c.Assert(err, IsNil)
}()
deltas, err := watcher.Next()
c.Assert(err, IsNil)
// This is the part that most clearly is tied to the fact that we are
// testing a stub.
c.Assert(deltas, DeepEquals, state.StubNextDelta)
}
// openAs connects to the API state as the given entity
// with the default password for that entity.
func (s *suite) openAs(c *C, entityName string) *api.State {
_, info, err := s.APIConn.Environ.StateInfo()
c.Assert(err, IsNil)
info.EntityName = entityName
info.Password = fmt.Sprintf("%s password", entityName)
c.Logf("opening state; entity %q; password %q", info.EntityName, info.Password)
st, err := api.Open(info)
c.Assert(err, IsNil)
c.Assert(st, NotNil)
return st
}
|
// Copyright 2015 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd
// +build linux darwin freebsd
package cups
/*
#cgo freebsd CFLAGS: -I/usr/local/include
#cgo freebsd LDFLAGS: -L/usr/local/lib
#include "cups.h"
*/
import "C"
import (
"errors"
"fmt"
"os"
"runtime"
"strings"
"time"
"unsafe"
"github.com/avlis/cloud-print-connector/lib"
"github.com/avlis/cloud-print-connector/log"
)
const (
// jobURIFormat is the string format required by the CUPS API
// to do things like query the state of a job.
jobURIFormat = "/jobs/%d"
// filePathMaxLength varies by operating system and file system.
// This value should be large enough to be useful and small enough
// to work on any platform.
filePathMaxLength = 1024
)
// cupsCore handles CUPS API interaction and connection management.
type cupsCore struct {
host *C.char
port C.int
encryption C.http_encryption_t
connectTimeout C.int
// connectionSemaphore limits the quantity of open CUPS connections.
connectionSemaphore *lib.Semaphore
// connectionPool allows a connection to be reused instead of closed.
connectionPool chan *C.http_t
hostIsLocal bool
}
func newCUPSCore(maxConnections uint, connectTimeout time.Duration) (*cupsCore, error) {
host := C.cupsServer()
port := C.ippPort()
encryption := C.cupsEncryption()
timeout := C.int(connectTimeout / time.Millisecond)
var e string
switch encryption {
case C.HTTP_ENCRYPTION_ALWAYS:
e = "encrypting ALWAYS"
case C.HTTP_ENCRYPTION_IF_REQUESTED:
e = "encrypting IF REQUESTED"
case C.HTTP_ENCRYPTION_NEVER:
e = "encrypting NEVER"
case C.HTTP_ENCRYPTION_REQUIRED:
e = "encryption REQUIRED"
default:
encryption = C.HTTP_ENCRYPTION_REQUIRED
e = "encrypting REQUIRED"
}
var hostIsLocal bool
if h := C.GoString(host); strings.HasPrefix(h, "/") || h == "localhost" {
hostIsLocal = true
}
cs := lib.NewSemaphore(maxConnections)
cp := make(chan *C.http_t)
cc := &cupsCore{host, port, encryption, timeout, cs, cp, hostIsLocal}
log.Infof("Connecting to CUPS server at %s:%d %s", C.GoString(host), int(port), e)
// This connection isn't used, just checks that a connection is possible
// before returning from the constructor.
http, err := cc.connect()
if err != nil {
return nil, err
}
cc.disconnect(http)
log.Info("Connected to CUPS server successfully")
return cc, nil
}
// printFile prints by calling C.cupsPrintFile2().
// Returns the CUPS job ID, which is 0 (and meaningless) when err
// is not nil.
func (cc *cupsCore) printFile(user, printername, filename, title *C.char, numOptions C.int, options *C.cups_option_t) (C.int, error) {
http, err := cc.connect()
if err != nil {
return 0, err
}
defer cc.disconnect(http)
C.cupsSetUser(user)
jobID := C.cupsPrintFile2(http, printername, filename, title, numOptions, options)
if jobID == 0 {
return 0, fmt.Errorf("Failed to call cupsPrintFile2() for file %s: %d %s",
C.GoString(filename), int(C.cupsLastError()), C.GoString(C.cupsLastErrorString()))
}
return jobID, nil
}
// getPrinters gets the current list and state of printers by calling
// C.doRequest (IPP_OP_CUPS_GET_PRINTERS).
//
// The caller is responsible to C.ippDelete the returned *C.ipp_t response.
func (cc *cupsCore) getPrinters(attributes **C.char, attrSize C.int) (*C.ipp_t, error) {
// ippNewRequest() returns ipp_t pointer which does not need explicit free.
request := C.ippNewRequest(C.IPP_OP_CUPS_GET_PRINTERS)
C.ippAddStrings(request, C.IPP_TAG_OPERATION, C.IPP_TAG_KEYWORD, C.REQUESTED_ATTRIBUTES,
attrSize, nil, attributes)
response, err := cc.doRequest(request,
[]C.ipp_status_t{C.IPP_STATUS_OK, C.IPP_STATUS_ERROR_NOT_FOUND})
if err != nil {
err = fmt.Errorf("Failed to call cupsDoRequest() [IPP_OP_CUPS_GET_PRINTERS]: %s", err)
return nil, err
}
return response, nil
}
// getPPD gets the filename of the PPD for a printer by calling
// C.cupsGetPPD3. If the PPD hasn't changed since the time indicated
// by modtime, then the returned filename is a nil pointer.
//
// Note that modtime is a pointer whose value is changed by this
// function.
//
// The caller is responsible to C.free the returned *C.char filename
// if the returned filename is not nil.
func (cc *cupsCore) getPPD(printername *C.char, modtime *C.time_t) (*C.char, error) {
bufsize := C.size_t(filePathMaxLength)
buffer := (*C.char)(C.malloc(bufsize))
if buffer == nil {
return nil, errors.New("Failed to malloc; out of memory?")
}
C.memset(unsafe.Pointer(buffer), 0, bufsize)
var http *C.http_t
if !cc.hostIsLocal {
// Don't need a connection or corresponding semaphore if the PPD
// is on the local filesystem.
// Still need OS thread lock; see else.
var err error
http, err = cc.connect()
if err != nil {
return nil, err
}
defer cc.disconnect(http)
} else {
// Lock the OS thread so that thread-local storage is available to
// cupsLastError() and cupsLastErrorString().
runtime.LockOSThread()
defer runtime.UnlockOSThread()
}
httpStatus := C.cupsGetPPD3(http, printername, modtime, buffer, bufsize)
switch httpStatus {
case C.HTTP_STATUS_NOT_MODIFIED:
// Cache hit.
if len(C.GoString(buffer)) > 0 {
os.Remove(C.GoString(buffer))
}
C.free(unsafe.Pointer(buffer))
return nil, nil
case C.HTTP_STATUS_OK:
// Cache miss.
return buffer, nil
case C.HTTP_STATUS_NOT_FOUND:
// printer does not exist @ cups
if len(C.GoString(buffer)) > 0 {
os.Remove(C.GoString(buffer))
}
C.free(unsafe.Pointer(buffer))
return nil, fmt.Errorf("printer does not exist: %d %s",
404, printername)
default:
//ignore all other errors, may be temporary
if len(C.GoString(buffer)) > 0 {
os.Remove(C.GoString(buffer))
}
C.free(unsafe.Pointer(buffer))
return nil,nil
}
}
// getJobAttributes gets the requested attributes for a job by calling
// C.doRequest (IPP_OP_GET_JOB_ATTRIBUTES).
//
// The caller is responsible to C.ippDelete the returned *C.ipp_t response.
func (cc *cupsCore) getJobAttributes(jobID C.int, attributes **C.char) (*C.ipp_t, error) {
uri, err := createJobURI(jobID)
if err != nil {
return nil, err
}
defer C.free(unsafe.Pointer(uri))
// ippNewRequest() returns ipp_t pointer does not need explicit free.
request := C.ippNewRequest(C.IPP_OP_GET_JOB_ATTRIBUTES)
C.ippAddString(request, C.IPP_TAG_OPERATION, C.IPP_TAG_URI, C.JOB_URI_ATTRIBUTE, nil, uri)
C.ippAddStrings(request, C.IPP_TAG_OPERATION, C.IPP_TAG_KEYWORD, C.REQUESTED_ATTRIBUTES,
C.int(0), nil, attributes)
response, err := cc.doRequest(request, []C.ipp_status_t{C.IPP_STATUS_OK})
if err != nil {
err = fmt.Errorf("Failed to call cupsDoRequest() [IPP_OP_GET_JOB_ATTRIBUTES]: %s", err)
return nil, err
}
return response, nil
}
// createJobURI creates a uri string for the job-uri attribute, used to get the
// state of a CUPS job.
func createJobURI(jobID C.int) (*C.char, error) {
length := C.size_t(urlMaxLength)
uri := (*C.char)(C.malloc(length))
if uri == nil {
return nil, errors.New("Failed to malloc; out of memory?")
}
resource := C.CString(fmt.Sprintf(jobURIFormat, uint32(jobID)))
defer C.free(unsafe.Pointer(resource))
C.httpAssembleURI(C.HTTP_URI_CODING_ALL,
uri, C.int(length), C.IPP, nil, C.cupsServer(), C.ippPort(), resource)
return uri, nil
}
// doRequest calls cupsDoRequest().
func (cc *cupsCore) doRequest(request *C.ipp_t, acceptableStatusCodes []C.ipp_status_t) (*C.ipp_t, error) {
http, err := cc.connect()
if err != nil {
return nil, err
}
defer cc.disconnect(http)
if C.ippValidateAttributes(request) != 1 {
return nil, fmt.Errorf("Bad IPP request: %s", C.GoString(C.cupsLastErrorString()))
}
response := C.cupsDoRequest(http, request, C.POST_RESOURCE)
if response == nil {
return nil, fmt.Errorf("cupsDoRequest failed: %d %s", int(C.cupsLastError()), C.GoString(C.cupsLastErrorString()))
}
statusCode := C.getIPPRequestStatusCode(response)
for _, sc := range acceptableStatusCodes {
if statusCode == sc {
return response, nil
}
}
return nil, fmt.Errorf("IPP status code %d", int(statusCode))
}
// connect calls C.httpConnect2 to create a new, open connection to
// the CUPS server specified by environment variables, client.conf, etc.
//
// connect also acquires the connection semaphore and locks the OS
// thread to allow the CUPS API to use thread-local storage cleanly.
//
// The caller is responsible to close the connection when finished
// using cupsCore.disconnect.
func (cc *cupsCore) connect() (*C.http_t, error) {
cc.connectionSemaphore.Acquire()
// Lock the OS thread so that thread-local storage is available to
// cupsLastError() and cupsLastErrorString().
runtime.LockOSThread()
var http *C.http_t
select {
case h := <-cc.connectionPool:
// Reuse another connection.
http = h
default:
// No connection available for reuse; create a new one.
http = C.httpConnect2(cc.host, cc.port, nil, C.AF_UNSPEC, cc.encryption, 1, cc.connectTimeout, nil)
if http == nil {
defer cc.disconnect(http)
return nil, fmt.Errorf("Failed to connect to CUPS server %s:%d because %d %s",
C.GoString(cc.host), int(cc.port), int(C.cupsLastError()), C.GoString(C.cupsLastErrorString()))
}
}
return http, nil
}
// disconnect calls C.httpClose to close an open CUPS connection, then
// unlocks the OS thread and the connection semaphore.
//
// The http argument may be nil; the OS thread and semaphore are still
// treated the same as described above.
func (cc *cupsCore) disconnect(http *C.http_t) {
go func() {
select {
case cc.connectionPool <- http:
// Hand this connection to the next guy who needs it.
case <-time.After(time.Second):
// Don't wait very long; stale connections are no fun.
C.httpClose(http)
}
}()
runtime.UnlockOSThread()
cc.connectionSemaphore.Release()
}
func (cc *cupsCore) connQtyOpen() uint {
return cc.connectionSemaphore.Count()
}
func (cc *cupsCore) connQtyMax() uint {
return cc.connectionSemaphore.Size()
}
fixed the fix with proper parameters
// Copyright 2015 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd
// +build linux darwin freebsd
package cups
/*
#cgo freebsd CFLAGS: -I/usr/local/include
#cgo freebsd LDFLAGS: -L/usr/local/lib
#include "cups.h"
*/
import "C"
import (
"errors"
"fmt"
"os"
"runtime"
"strings"
"time"
"unsafe"
"github.com/avlis/cloud-print-connector/lib"
"github.com/avlis/cloud-print-connector/log"
)
const (
// jobURIFormat is the string format required by the CUPS API
// to do things like query the state of a job.
jobURIFormat = "/jobs/%d"
// filePathMaxLength varies by operating system and file system.
// This value should be large enough to be useful and small enough
// to work on any platform.
filePathMaxLength = 1024
)
// cupsCore handles CUPS API interaction and connection management.
type cupsCore struct {
host *C.char
port C.int
encryption C.http_encryption_t
connectTimeout C.int
// connectionSemaphore limits the quantity of open CUPS connections.
connectionSemaphore *lib.Semaphore
// connectionPool allows a connection to be reused instead of closed.
connectionPool chan *C.http_t
hostIsLocal bool
}
func newCUPSCore(maxConnections uint, connectTimeout time.Duration) (*cupsCore, error) {
host := C.cupsServer()
port := C.ippPort()
encryption := C.cupsEncryption()
timeout := C.int(connectTimeout / time.Millisecond)
var e string
switch encryption {
case C.HTTP_ENCRYPTION_ALWAYS:
e = "encrypting ALWAYS"
case C.HTTP_ENCRYPTION_IF_REQUESTED:
e = "encrypting IF REQUESTED"
case C.HTTP_ENCRYPTION_NEVER:
e = "encrypting NEVER"
case C.HTTP_ENCRYPTION_REQUIRED:
e = "encryption REQUIRED"
default:
encryption = C.HTTP_ENCRYPTION_REQUIRED
e = "encrypting REQUIRED"
}
var hostIsLocal bool
if h := C.GoString(host); strings.HasPrefix(h, "/") || h == "localhost" {
hostIsLocal = true
}
cs := lib.NewSemaphore(maxConnections)
cp := make(chan *C.http_t)
cc := &cupsCore{host, port, encryption, timeout, cs, cp, hostIsLocal}
log.Infof("Connecting to CUPS server at %s:%d %s", C.GoString(host), int(port), e)
// This connection isn't used, just checks that a connection is possible
// before returning from the constructor.
http, err := cc.connect()
if err != nil {
return nil, err
}
cc.disconnect(http)
log.Info("Connected to CUPS server successfully")
return cc, nil
}
// printFile prints by calling C.cupsPrintFile2().
// Returns the CUPS job ID, which is 0 (and meaningless) when err
// is not nil.
func (cc *cupsCore) printFile(user, printername, filename, title *C.char, numOptions C.int, options *C.cups_option_t) (C.int, error) {
http, err := cc.connect()
if err != nil {
return 0, err
}
defer cc.disconnect(http)
C.cupsSetUser(user)
jobID := C.cupsPrintFile2(http, printername, filename, title, numOptions, options)
if jobID == 0 {
return 0, fmt.Errorf("Failed to call cupsPrintFile2() for file %s: %d %s",
C.GoString(filename), int(C.cupsLastError()), C.GoString(C.cupsLastErrorString()))
}
return jobID, nil
}
// getPrinters gets the current list and state of printers by calling
// C.doRequest (IPP_OP_CUPS_GET_PRINTERS).
//
// The caller is responsible to C.ippDelete the returned *C.ipp_t response.
func (cc *cupsCore) getPrinters(attributes **C.char, attrSize C.int) (*C.ipp_t, error) {
// ippNewRequest() returns ipp_t pointer which does not need explicit free.
request := C.ippNewRequest(C.IPP_OP_CUPS_GET_PRINTERS)
C.ippAddStrings(request, C.IPP_TAG_OPERATION, C.IPP_TAG_KEYWORD, C.REQUESTED_ATTRIBUTES,
attrSize, nil, attributes)
response, err := cc.doRequest(request,
[]C.ipp_status_t{C.IPP_STATUS_OK, C.IPP_STATUS_ERROR_NOT_FOUND})
if err != nil {
err = fmt.Errorf("Failed to call cupsDoRequest() [IPP_OP_CUPS_GET_PRINTERS]: %s", err)
return nil, err
}
return response, nil
}
// getPPD gets the filename of the PPD for a printer by calling
// C.cupsGetPPD3. If the PPD hasn't changed since the time indicated
// by modtime, then the returned filename is a nil pointer.
//
// Note that modtime is a pointer whose value is changed by this
// function.
//
// The caller is responsible to C.free the returned *C.char filename
// if the returned filename is not nil.
func (cc *cupsCore) getPPD(printername *C.char, modtime *C.time_t) (*C.char, error) {
bufsize := C.size_t(filePathMaxLength)
buffer := (*C.char)(C.malloc(bufsize))
if buffer == nil {
return nil, errors.New("Failed to malloc; out of memory?")
}
C.memset(unsafe.Pointer(buffer), 0, bufsize)
var http *C.http_t
if !cc.hostIsLocal {
// Don't need a connection or corresponding semaphore if the PPD
// is on the local filesystem.
// Still need OS thread lock; see else.
var err error
http, err = cc.connect()
if err != nil {
return nil, err
}
defer cc.disconnect(http)
} else {
// Lock the OS thread so that thread-local storage is available to
// cupsLastError() and cupsLastErrorString().
runtime.LockOSThread()
defer runtime.UnlockOSThread()
}
httpStatus := C.cupsGetPPD3(http, printername, modtime, buffer, bufsize)
switch httpStatus {
case C.HTTP_STATUS_NOT_MODIFIED:
// Cache hit.
if len(C.GoString(buffer)) > 0 {
os.Remove(C.GoString(buffer))
}
C.free(unsafe.Pointer(buffer))
return nil, nil
case C.HTTP_STATUS_OK:
// Cache miss.
return buffer, nil
case C.HTTP_STATUS_NOT_FOUND:
// printer does not exist @ cups
if len(C.GoString(buffer)) > 0 {
os.Remove(C.GoString(buffer))
}
C.free(unsafe.Pointer(buffer))
return nil, fmt.Errorf("printer does not exist: %d", httpStatus)
default:
//ignore all other errors, may be temporary
if len(C.GoString(buffer)) > 0 {
os.Remove(C.GoString(buffer))
}
C.free(unsafe.Pointer(buffer))
return nil,nil
}
}
// getJobAttributes gets the requested attributes for a job by calling
// C.doRequest (IPP_OP_GET_JOB_ATTRIBUTES).
//
// The caller is responsible to C.ippDelete the returned *C.ipp_t response.
func (cc *cupsCore) getJobAttributes(jobID C.int, attributes **C.char) (*C.ipp_t, error) {
uri, err := createJobURI(jobID)
if err != nil {
return nil, err
}
defer C.free(unsafe.Pointer(uri))
// ippNewRequest() returns ipp_t pointer does not need explicit free.
request := C.ippNewRequest(C.IPP_OP_GET_JOB_ATTRIBUTES)
C.ippAddString(request, C.IPP_TAG_OPERATION, C.IPP_TAG_URI, C.JOB_URI_ATTRIBUTE, nil, uri)
C.ippAddStrings(request, C.IPP_TAG_OPERATION, C.IPP_TAG_KEYWORD, C.REQUESTED_ATTRIBUTES,
C.int(0), nil, attributes)
response, err := cc.doRequest(request, []C.ipp_status_t{C.IPP_STATUS_OK})
if err != nil {
err = fmt.Errorf("Failed to call cupsDoRequest() [IPP_OP_GET_JOB_ATTRIBUTES]: %s", err)
return nil, err
}
return response, nil
}
// createJobURI creates a uri string for the job-uri attribute, used to get the
// state of a CUPS job.
func createJobURI(jobID C.int) (*C.char, error) {
length := C.size_t(urlMaxLength)
uri := (*C.char)(C.malloc(length))
if uri == nil {
return nil, errors.New("Failed to malloc; out of memory?")
}
resource := C.CString(fmt.Sprintf(jobURIFormat, uint32(jobID)))
defer C.free(unsafe.Pointer(resource))
C.httpAssembleURI(C.HTTP_URI_CODING_ALL,
uri, C.int(length), C.IPP, nil, C.cupsServer(), C.ippPort(), resource)
return uri, nil
}
// doRequest calls cupsDoRequest().
func (cc *cupsCore) doRequest(request *C.ipp_t, acceptableStatusCodes []C.ipp_status_t) (*C.ipp_t, error) {
http, err := cc.connect()
if err != nil {
return nil, err
}
defer cc.disconnect(http)
if C.ippValidateAttributes(request) != 1 {
return nil, fmt.Errorf("Bad IPP request: %s", C.GoString(C.cupsLastErrorString()))
}
response := C.cupsDoRequest(http, request, C.POST_RESOURCE)
if response == nil {
return nil, fmt.Errorf("cupsDoRequest failed: %d %s", int(C.cupsLastError()), C.GoString(C.cupsLastErrorString()))
}
statusCode := C.getIPPRequestStatusCode(response)
for _, sc := range acceptableStatusCodes {
if statusCode == sc {
return response, nil
}
}
return nil, fmt.Errorf("IPP status code %d", int(statusCode))
}
// connect calls C.httpConnect2 to create a new, open connection to
// the CUPS server specified by environment variables, client.conf, etc.
//
// connect also acquires the connection semaphore and locks the OS
// thread to allow the CUPS API to use thread-local storage cleanly.
//
// The caller is responsible to close the connection when finished
// using cupsCore.disconnect.
func (cc *cupsCore) connect() (*C.http_t, error) {
cc.connectionSemaphore.Acquire()
// Lock the OS thread so that thread-local storage is available to
// cupsLastError() and cupsLastErrorString().
runtime.LockOSThread()
var http *C.http_t
select {
case h := <-cc.connectionPool:
// Reuse another connection.
http = h
default:
// No connection available for reuse; create a new one.
http = C.httpConnect2(cc.host, cc.port, nil, C.AF_UNSPEC, cc.encryption, 1, cc.connectTimeout, nil)
if http == nil {
defer cc.disconnect(http)
return nil, fmt.Errorf("Failed to connect to CUPS server %s:%d because %d %s",
C.GoString(cc.host), int(cc.port), int(C.cupsLastError()), C.GoString(C.cupsLastErrorString()))
}
}
return http, nil
}
// disconnect calls C.httpClose to close an open CUPS connection, then
// unlocks the OS thread and the connection semaphore.
//
// The http argument may be nil; the OS thread and semaphore are still
// treated the same as described above.
func (cc *cupsCore) disconnect(http *C.http_t) {
go func() {
select {
case cc.connectionPool <- http:
// Hand this connection to the next guy who needs it.
case <-time.After(time.Second):
// Don't wait very long; stale connections are no fun.
C.httpClose(http)
}
}()
runtime.UnlockOSThread()
cc.connectionSemaphore.Release()
}
func (cc *cupsCore) connQtyOpen() uint {
return cc.connectionSemaphore.Count()
}
func (cc *cupsCore) connQtyMax() uint {
return cc.connectionSemaphore.Size()
}
|
// package main provides the monstache binary
package main
import (
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"flag"
"fmt"
"github.com/BurntSushi/toml"
"github.com/coreos/go-systemd/daemon"
"github.com/evanphx/json-patch"
"github.com/globalsign/mgo"
"github.com/globalsign/mgo/bson"
"github.com/olivere/elastic"
"github.com/robertkrimen/otto"
_ "github.com/robertkrimen/otto/underscore"
"github.com/rwynn/gtm"
"github.com/rwynn/gtm/consistent"
"github.com/rwynn/monstache/monstachemap"
"golang.org/x/net/context"
"gopkg.in/Graylog2/go-gelf.v2/gelf"
"gopkg.in/natefinch/lumberjack.v2"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"os/signal"
"plugin"
"regexp"
"strconv"
"strings"
"syscall"
"time"
)
var infoLog = log.New(os.Stdout, "INFO ", log.Flags())
var statsLog = log.New(os.Stdout, "STATS ", log.Flags())
var traceLog = log.New(os.Stdout, "TRACE ", log.Flags())
var errorLog = log.New(os.Stderr, "ERROR ", log.Flags())
var mapperPlugin func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error)
var filterPlugin func(*monstachemap.MapperPluginInput) (bool, error)
var mapEnvs map[string]*executionEnv = make(map[string]*executionEnv)
var filterEnvs map[string]*executionEnv = make(map[string]*executionEnv)
var mapIndexTypes map[string]*indexTypeMapping = make(map[string]*indexTypeMapping)
var fileNamespaces map[string]bool = make(map[string]bool)
var patchNamespaces map[string]bool = make(map[string]bool)
var tmNamespaces map[string]bool = make(map[string]bool)
var routingNamespaces map[string]bool = make(map[string]bool)
var chunksRegex = regexp.MustCompile("\\.chunks$")
var systemsRegex = regexp.MustCompile("system\\..+$")
const version = "4.4.0"
const mongoURLDefault string = "localhost"
const resumeNameDefault string = "default"
const elasticMaxConnsDefault int = 10
const elasticClientTimeoutDefault int = 60
const elasticMaxDocsDefault int = 1000
const gtmChannelSizeDefault int = 512
const typeFromFuture string = "_doc"
const fileDownloadersDefault = 10
type deleteStrategy int
const (
statelessDeleteStrategy deleteStrategy = iota
statefulDeleteStrategy
ignoreDeleteStrategy
)
type stringargs []string
type executionEnv struct {
VM *otto.Otto
Script string
}
type javascript struct {
Namespace string
Script string
Path string
Routing bool
}
type indexTypeMapping struct {
Namespace string
Index string
Type string
}
type findConf struct {
vm *otto.Otto
ns string
name string
session *mgo.Session
byId bool
multi bool
}
type findCall struct {
config *findConf
session *mgo.Session
query interface{}
db string
col string
limit int
sort []string
sel map[string]int
}
type logFiles struct {
Info string
Error string
Trace string
Stats string
}
type indexingMeta struct {
Routing string
Index string
Type string
Parent string
Version int64
VersionType string
Pipeline string
RetryOnConflict int
}
type mongoDialSettings struct {
Timeout int
Ssl bool
}
type mongoSessionSettings struct {
SocketTimeout int `toml:"socket-timeout"`
SyncTimeout int `toml:"sync-timeout"`
}
type gtmSettings struct {
ChannelSize int `toml:"channel-size"`
BufferSize int `toml:"buffer-size"`
BufferDuration string `toml:"buffer-duration"`
}
type httpServerCtx struct {
httpServer *http.Server
bulk *elastic.BulkProcessor
config *configOptions
shutdown bool
started time.Time
}
type configOptions struct {
MongoURL string `toml:"mongo-url"`
MongoConfigURL string `toml:"mongo-config-url"`
MongoPemFile string `toml:"mongo-pem-file"`
MongoValidatePemFile bool `toml:"mongo-validate-pem-file"`
MongoOpLogDatabaseName string `toml:"mongo-oplog-database-name"`
MongoOpLogCollectionName string `toml:"mongo-oplog-collection-name"`
MongoCursorTimeout string `toml:"mongo-cursor-timeout"`
MongoDialSettings mongoDialSettings `toml:"mongo-dial-settings"`
MongoSessionSettings mongoSessionSettings `toml:"mongo-session-settings"`
GtmSettings gtmSettings `toml:"gtm-settings"`
Logs logFiles `toml:"logs"`
GraylogAddr string `toml:"graylog-addr"`
ElasticUrls stringargs `toml:"elasticsearch-urls"`
ElasticUser string `toml:"elasticsearch-user"`
ElasticPassword string `toml:"elasticsearch-password"`
ElasticPemFile string `toml:"elasticsearch-pem-file"`
ElasticValidatePemFile bool `toml:"elasticsearch-validate-pem-file"`
ElasticVersion string `toml:"elasticsearch-version"`
ResumeName string `toml:"resume-name"`
NsRegex string `toml:"namespace-regex"`
NsExcludeRegex string `toml:"namespace-exclude-regex"`
ClusterName string `toml:"cluster-name"`
Print bool `toml:"print-config"`
Version bool
Stats bool
IndexStats bool `toml:"index-stats"`
StatsDuration string `toml:"stats-duration"`
StatsIndexFormat string `toml:"stats-index-format"`
Gzip bool
Verbose bool
Resume bool
ResumeWriteUnsafe bool `toml:"resume-write-unsafe"`
ResumeFromTimestamp int64 `toml:"resume-from-timestamp"`
Replay bool
DroppedDatabases bool `toml:"dropped-databases"`
DroppedCollections bool `toml:"dropped-collections"`
IndexFiles bool `toml:"index-files"`
FileHighlighting bool `toml:"file-highlighting"`
EnablePatches bool `toml:"enable-patches"`
FailFast bool `toml:"fail-fast"`
IndexOplogTime bool `toml:"index-oplog-time"`
ExitAfterDirectReads bool `toml:"exit-after-direct-reads"`
MergePatchAttr string `toml:"merge-patch-attribute"`
ElasticMaxConns int `toml:"elasticsearch-max-conns"`
ElasticRetry bool `toml:"elasticsearch-retry"`
ElasticMaxDocs int `toml:"elasticsearch-max-docs"`
ElasticMaxBytes int `toml:"elasticsearch-max-bytes"`
ElasticMaxSeconds int `toml:"elasticsearch-max-seconds"`
ElasticClientTimeout int `toml:"elasticsearch-client-timeout"`
ElasticMajorVersion int
ElasticMinorVersion int
MaxFileSize int64 `toml:"max-file-size"`
ConfigFile string
Script []javascript
Filter []javascript
Mapping []indexTypeMapping
FileNamespaces stringargs `toml:"file-namespaces"`
PatchNamespaces stringargs `toml:"patch-namespaces"`
Workers stringargs
Worker string
DirectReadNs stringargs `toml:"direct-read-namespaces"`
DirectReadBatchSize int `toml:"direct-read-batch-size"`
DirectReadCursors int `toml:"direct-read-cursors"`
MapperPluginPath string `toml:"mapper-plugin-path"`
EnableHTTPServer bool `toml:"enable-http-server"`
HTTPServerAddr string `toml:"http-server-addr"`
TimeMachineNamespaces stringargs `toml:"time-machine-namespaces"`
TimeMachineIndexPrefix string `toml:"time-machine-index-prefix"`
TimeMachineIndexSuffix string `toml:"time-machine-index-suffix"`
TimeMachineDirectReads bool `toml:"time-machine-direct-reads"`
RoutingNamespaces stringargs `toml:"routing-namespaces"`
DeleteStrategy deleteStrategy `toml:"delete-strategy"`
DeleteIndexPattern string `toml:"delete-index-pattern"`
FileDownloaders int `toml:"file-downloaders"`
}
func (arg *deleteStrategy) String() string {
return fmt.Sprintf("%d", *arg)
}
func (arg *deleteStrategy) Set(value string) error {
if i, err := strconv.Atoi(value); err != nil {
return err
} else {
ds := deleteStrategy(i)
*arg = ds
return nil
}
}
func (args *stringargs) String() string {
return fmt.Sprintf("%s", *args)
}
func (args *stringargs) Set(value string) error {
*args = append(*args, value)
return nil
}
func (config *configOptions) isSharded() bool {
return config.MongoConfigURL != ""
}
func afterBulk(executionId int64, requests []elastic.BulkableRequest, response *elastic.BulkResponse, err error) {
if err != nil {
errorLog.Printf("Bulk index request with execution ID %d failed: %s", executionId, err)
}
if response != nil && response.Errors {
failed := response.Failed()
if failed != nil {
errorLog.Printf("Bulk index request with execution ID %d has %d line failure(s)", executionId, len(failed))
for i, item := range failed {
json, err := json.Marshal(item)
if err != nil {
errorLog.Printf("Unable to marshall failed request line #%d: %s", i, err)
} else {
errorLog.Printf("Failed request line #%d details: %s", i, string(json))
}
}
}
}
}
func (config *configOptions) useTypeFromFuture() (use bool) {
if config.ElasticMajorVersion > 6 {
use = true
} else if config.ElasticMajorVersion == 6 && config.ElasticMinorVersion >= 2 {
use = true
}
return
}
func (config *configOptions) parseElasticsearchVersion(number string) (err error) {
if number == "" {
err = errors.New("Elasticsearch version cannot be blank")
} else {
versionParts := strings.Split(number, ".")
var majorVersion, minorVersion int
majorVersion, err = strconv.Atoi(versionParts[0])
if err == nil {
config.ElasticMajorVersion = majorVersion
if majorVersion == 0 {
err = errors.New("Invalid Elasticsearch major version 0")
}
}
minorVersion, err = strconv.Atoi(versionParts[1])
if err == nil {
config.ElasticMinorVersion = minorVersion
}
}
return
}
func (config *configOptions) newBulkProcessor(client *elastic.Client) (bulk *elastic.BulkProcessor, err error) {
bulkService := client.BulkProcessor().Name("monstache")
bulkService.Workers(config.ElasticMaxConns)
bulkService.Stats(config.Stats)
if config.ElasticMaxDocs != 0 {
bulkService.BulkActions(config.ElasticMaxDocs)
}
if config.ElasticMaxBytes != 0 {
bulkService.BulkSize(config.ElasticMaxBytes)
}
if config.ElasticRetry == false {
bulkService.Backoff(&elastic.StopBackoff{})
}
bulkService.After(afterBulk)
bulkService.FlushInterval(time.Duration(config.ElasticMaxSeconds) * time.Second)
return bulkService.Do(context.Background())
}
func (config *configOptions) newStatsBulkProcessor(client *elastic.Client) (bulk *elastic.BulkProcessor, err error) {
bulkService := client.BulkProcessor().Name("monstache-stats")
bulkService.Workers(1)
bulkService.Stats(false)
bulkService.BulkActions(1)
bulkService.After(afterBulk)
return bulkService.Do(context.Background())
}
func (config *configOptions) needsSecureScheme() bool {
if len(config.ElasticUrls) > 0 {
for _, url := range config.ElasticUrls {
if strings.HasPrefix(url, "https") {
return true
}
}
}
return false
}
func (config *configOptions) newElasticClient() (client *elastic.Client, err error) {
var clientOptions []elastic.ClientOptionFunc
var httpClient *http.Client
clientOptions = append(clientOptions, elastic.SetErrorLog(errorLog))
clientOptions = append(clientOptions, elastic.SetSniff(false))
if config.needsSecureScheme() {
clientOptions = append(clientOptions, elastic.SetScheme("https"))
}
if len(config.ElasticUrls) > 0 {
clientOptions = append(clientOptions, elastic.SetURL(config.ElasticUrls...))
} else {
config.ElasticUrls = append(config.ElasticUrls, elastic.DefaultURL)
}
if config.Verbose {
clientOptions = append(clientOptions, elastic.SetTraceLog(traceLog))
}
if config.ElasticUser != "" {
clientOptions = append(clientOptions, elastic.SetBasicAuth(config.ElasticUser, config.ElasticPassword))
}
if config.ElasticRetry {
d1, d2 := time.Duration(50)*time.Millisecond, time.Duration(20)*time.Second
retrier := elastic.NewBackoffRetrier(elastic.NewExponentialBackoff(d1, d2))
clientOptions = append(clientOptions, elastic.SetRetrier(retrier))
}
httpClient, err = config.NewHTTPClient()
if err != nil {
return client, err
}
clientOptions = append(clientOptions, elastic.SetHttpClient(httpClient))
return elastic.NewClient(clientOptions...)
}
func (config *configOptions) testElasticsearchConn(client *elastic.Client) (err error) {
var number string
url := config.ElasticUrls[0]
number, err = client.ElasticsearchVersion(url)
if err == nil {
infoLog.Printf("Successfully connected to Elasticsearch version %s", number)
err = config.parseElasticsearchVersion(number)
}
return
}
func deleteIndexes(client *elastic.Client, db string, config *configOptions) (err error) {
index := strings.ToLower(db + "*")
for ns, m := range mapIndexTypes {
dbCol := strings.SplitN(ns, ".", 2)
if dbCol[0] == db {
if m.Index != "" {
index = strings.ToLower(m.Index + "*")
}
break
}
}
_, err = client.DeleteIndex(index).Do(context.Background())
return
}
func deleteIndex(client *elastic.Client, namespace string, config *configOptions) (err error) {
ctx := context.Background()
index := strings.ToLower(namespace)
if m := mapIndexTypes[namespace]; m != nil {
if m.Index != "" {
index = strings.ToLower(m.Index)
}
}
_, err = client.DeleteIndex(index).Do(ctx)
return err
}
func ensureFileMapping(client *elastic.Client) (err error) {
ctx := context.Background()
pipeline := map[string]interface{}{
"description": "Extract file information",
"processors": [1]map[string]interface{}{
{
"attachment": map[string]interface{}{
"field": "file",
},
},
},
}
_, err = client.IngestPutPipeline("attachment").BodyJson(pipeline).Do(ctx)
return err
}
func defaultIndexTypeMapping(config *configOptions, op *gtm.Op) *indexTypeMapping {
typeName := typeFromFuture
if !config.useTypeFromFuture() {
typeName = op.GetCollection()
}
return &indexTypeMapping{
Namespace: op.Namespace,
Index: strings.ToLower(op.Namespace),
Type: typeName,
}
}
func mapIndexType(config *configOptions, op *gtm.Op) *indexTypeMapping {
mapping := defaultIndexTypeMapping(config, op)
if m := mapIndexTypes[op.Namespace]; m != nil {
if m.Index != "" {
mapping.Index = m.Index
}
if m.Type != "" {
mapping.Type = m.Type
}
}
return mapping
}
func opIDToString(op *gtm.Op) string {
var opIDStr string
switch op.Id.(type) {
case bson.ObjectId:
opIDStr = op.Id.(bson.ObjectId).Hex()
case float64:
intID := int(op.Id.(float64))
if op.Id.(float64) == float64(intID) {
opIDStr = fmt.Sprintf("%v", intID)
} else {
opIDStr = fmt.Sprintf("%v", op.Id)
}
case float32:
intID := int(op.Id.(float32))
if op.Id.(float32) == float32(intID) {
opIDStr = fmt.Sprintf("%v", intID)
} else {
opIDStr = fmt.Sprintf("%v", op.Id)
}
default:
opIDStr = fmt.Sprintf("%v", op.Id)
}
return opIDStr
}
func convertSliceJavascript(a []interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
var avc interface{}
switch achild := av.(type) {
case map[string]interface{}:
avc = convertMapJavascript(achild)
case []interface{}:
avc = convertSliceJavascript(achild)
case bson.ObjectId:
avc = achild.Hex()
default:
avc = av
}
avs = append(avs, avc)
}
return avs
}
func convertMapJavascript(e map[string]interface{}) map[string]interface{} {
o := make(map[string]interface{})
for k, v := range e {
switch child := v.(type) {
case map[string]interface{}:
o[k] = convertMapJavascript(child)
case []interface{}:
o[k] = convertSliceJavascript(child)
case bson.ObjectId:
o[k] = child.Hex()
default:
o[k] = v
}
}
return o
}
func deepExportValue(a interface{}) (b interface{}) {
switch t := a.(type) {
case otto.Value:
ex, err := t.Export()
if t.Class() == "Date" {
ex, err = time.Parse("Mon, 2 Jan 2006 15:04:05 MST", t.String())
}
if err == nil {
b = deepExportValue(ex)
} else {
errorLog.Printf("Error exporting from javascript: %s", err)
}
case map[string]interface{}:
b = deepExportMap(t)
case []interface{}:
b = deepExportSlice(t)
default:
b = a
}
return
}
func deepExportSlice(a []interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
avs = append(avs, deepExportValue(av))
}
return avs
}
func deepExportMap(e map[string]interface{}) map[string]interface{} {
o := make(map[string]interface{})
for k, v := range e {
o[k] = deepExportValue(v)
}
return o
}
func mapDataJavascript(op *gtm.Op) error {
names := []string{"", op.Namespace}
for _, name := range names {
if env := mapEnvs[name]; env != nil {
arg := convertMapJavascript(op.Data)
val, err := env.VM.Call("module.exports", arg, arg, op.Namespace)
if err != nil {
return err
}
if strings.ToLower(val.Class()) == "object" {
data, err := val.Export()
if err != nil {
return err
} else if data == val {
return errors.New("Exported function must return an object")
} else {
dm := data.(map[string]interface{})
op.Data = deepExportMap(dm)
}
} else {
indexed, err := val.ToBoolean()
if err != nil {
return err
} else if !indexed {
op.Data = nil
break
}
}
}
}
return nil
}
func mapDataGolang(s *mgo.Session, op *gtm.Op) error {
session := s.Copy()
defer session.Close()
input := &monstachemap.MapperPluginInput{
Document: op.Data,
Namespace: op.Namespace,
Database: op.GetDatabase(),
Collection: op.GetCollection(),
Operation: op.Operation,
Session: session,
}
output, err := mapperPlugin(input)
if err != nil {
return err
}
if output != nil {
if output.Drop {
op.Data = nil
} else {
if output.Passthrough == false {
op.Data = output.Document
}
meta := make(map[string]interface{})
if output.Index != "" {
meta["index"] = output.Index
}
if output.Type != "" {
meta["type"] = output.Type
}
if output.Routing != "" {
meta["routing"] = output.Routing
}
if output.Parent != "" {
meta["parent"] = output.Parent
}
if output.Version != 0 {
meta["version"] = output.Version
}
if output.VersionType != "" {
meta["versionType"] = output.VersionType
}
if output.Pipeline != "" {
meta["pipeline"] = output.Pipeline
}
if output.RetryOnConflict != 0 {
meta["retryOnConflict"] = output.RetryOnConflict
}
if len(meta) > 0 {
op.Data["_meta_monstache"] = meta
}
}
}
return nil
}
func mapData(session *mgo.Session, config *configOptions, op *gtm.Op) error {
if config.MapperPluginPath != "" {
return mapDataGolang(session, op)
}
return mapDataJavascript(op)
}
func prepareDataForIndexing(config *configOptions, op *gtm.Op) {
data := op.Data
if config.IndexOplogTime {
secs := int64(op.Timestamp >> 32)
t := time.Unix(secs, 0).UTC()
data["_oplog_ts"] = op.Timestamp
data["_oplog_date"] = t.Format("2006/01/02 15:04:05")
}
delete(data, "_id")
delete(data, "_meta_monstache")
}
func parseIndexMeta(op *gtm.Op) (meta *indexingMeta) {
meta = &indexingMeta{
Version: int64(op.Timestamp),
VersionType: "external",
}
if m, ok := op.Data["_meta_monstache"]; ok {
switch m.(type) {
case map[string]interface{}:
metaAttrs := m.(map[string]interface{})
meta.load(metaAttrs)
case otto.Value:
ex, err := m.(otto.Value).Export()
if err == nil && ex != m {
switch ex.(type) {
case map[string]interface{}:
metaAttrs := ex.(map[string]interface{})
meta.load(metaAttrs)
default:
errorLog.Println("Invalid indexing metadata")
}
}
default:
errorLog.Println("Invalid indexing metadata")
}
}
return meta
}
func addFileContent(s *mgo.Session, op *gtm.Op, config *configOptions) (err error) {
session := s.Copy()
defer session.Close()
op.Data["file"] = ""
var gridByteBuffer bytes.Buffer
db, bucket :=
session.DB(op.GetDatabase()),
strings.SplitN(op.GetCollection(), ".", 2)[0]
encoder := base64.NewEncoder(base64.StdEncoding, &gridByteBuffer)
file, err := db.GridFS(bucket).OpenId(op.Id)
if err != nil {
return
}
defer file.Close()
if config.MaxFileSize > 0 {
if file.Size() > config.MaxFileSize {
infoLog.Printf("File %s md5(%s) exceeds max file size. file content omitted.",
file.Name(), file.MD5())
return
}
}
if _, err = io.Copy(encoder, file); err != nil {
return
}
if err = encoder.Close(); err != nil {
return
}
op.Data["file"] = string(gridByteBuffer.Bytes())
return
}
func notMonstache(op *gtm.Op) bool {
return op.GetDatabase() != "monstache"
}
func notChunks(op *gtm.Op) bool {
return !chunksRegex.MatchString(op.GetCollection())
}
func notConfig(op *gtm.Op) bool {
return op.GetDatabase() != "config"
}
func notSystem(op *gtm.Op) bool {
return !systemsRegex.MatchString(op.GetCollection())
}
func filterWithRegex(regex string) gtm.OpFilter {
var validNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
return validNameSpace.MatchString(op.Namespace)
}
}
func filterWithPlugin() gtm.OpFilter {
return func(op *gtm.Op) bool {
var keep bool = true
if (op.IsInsert() || op.IsUpdate()) && op.Data != nil {
keep = false
input := &monstachemap.MapperPluginInput{
Document: op.Data,
Namespace: op.Namespace,
Database: op.GetDatabase(),
Collection: op.GetCollection(),
Operation: op.Operation,
}
if ok, err := filterPlugin(input); err == nil {
keep = ok
} else {
errorLog.Println(err)
}
}
return keep
}
}
func filterWithScript() gtm.OpFilter {
return func(op *gtm.Op) bool {
var keep bool = true
if (op.IsInsert() || op.IsUpdate()) && op.Data != nil {
nss := []string{"", op.Namespace}
for _, ns := range nss {
if env := filterEnvs[ns]; env != nil {
keep = false
arg := convertMapJavascript(op.Data)
val, err := env.VM.Call("module.exports", arg, arg, op.Namespace)
if err != nil {
errorLog.Println(err)
} else {
if ok, err := val.ToBoolean(); err == nil {
keep = ok
} else {
errorLog.Println(err)
}
}
}
if !keep {
break
}
}
}
return keep
}
}
func filterInverseWithRegex(regex string) gtm.OpFilter {
var invalidNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
return !invalidNameSpace.MatchString(op.Namespace)
}
}
func ensureClusterTTL(session *mgo.Session) error {
col := session.DB("monstache").C("cluster")
return col.EnsureIndex(mgo.Index{
Key: []string{"expireAt"},
Background: true,
ExpireAfter: time.Duration(30) * time.Second,
})
}
func enableProcess(s *mgo.Session, config *configOptions) (bool, error) {
session := s.Copy()
defer session.Close()
col := session.DB("monstache").C("cluster")
doc := make(map[string]interface{})
doc["_id"] = config.ResumeName
doc["expireAt"] = time.Now().UTC()
doc["pid"] = os.Getpid()
if host, err := os.Hostname(); err == nil {
doc["host"] = host
} else {
return false, err
}
err := col.Insert(doc)
if err == nil {
return true, nil
}
if mgo.IsDup(err) {
return false, nil
}
return false, err
}
func resetClusterState(session *mgo.Session, config *configOptions) error {
col := session.DB("monstache").C("cluster")
return col.RemoveId(config.ResumeName)
}
func ensureEnabled(s *mgo.Session, config *configOptions) (enabled bool, err error) {
session := s.Copy()
defer session.Close()
col := session.DB("monstache").C("cluster")
doc := make(map[string]interface{})
if err = col.FindId(config.ResumeName).One(doc); err == nil {
if doc["pid"] != nil && doc["host"] != nil {
var hostname string
pid := doc["pid"].(int)
host := doc["host"].(string)
if hostname, err = os.Hostname(); err == nil {
enabled = (pid == os.Getpid() && host == hostname)
if enabled {
err = col.UpdateId(config.ResumeName,
bson.M{"$set": bson.M{"expireAt": time.Now().UTC()}})
}
}
}
}
return
}
func resumeWork(ctx *gtm.OpCtxMulti, session *mgo.Session, config *configOptions) {
col := session.DB("monstache").C("monstache")
doc := make(map[string]interface{})
col.FindId(config.ResumeName).One(doc)
if doc["ts"] != nil {
ts := doc["ts"].(bson.MongoTimestamp)
ctx.Since(ts)
}
ctx.Resume()
}
func saveTimestamp(s *mgo.Session, ts bson.MongoTimestamp, config *configOptions) error {
session := s.Copy()
session.SetSocketTimeout(time.Duration(5) * time.Second)
session.SetSyncTimeout(time.Duration(5) * time.Second)
if config.ResumeWriteUnsafe {
session.SetSafe(nil)
}
defer session.Close()
col := session.DB("monstache").C("monstache")
doc := make(map[string]interface{})
doc["ts"] = ts
_, err := col.UpsertId(config.ResumeName, bson.M{"$set": doc})
return err
}
func (config *configOptions) parseCommandLineFlags() *configOptions {
flag.BoolVar(&config.Print, "print-config", false, "Print the configuration and then exit")
flag.StringVar(&config.MongoURL, "mongo-url", "", "MongoDB server or router server connection URL")
flag.StringVar(&config.MongoConfigURL, "mongo-config-url", "", "MongoDB config server connection URL")
flag.StringVar(&config.MongoPemFile, "mongo-pem-file", "", "Path to a PEM file for secure connections to MongoDB")
flag.BoolVar(&config.MongoValidatePemFile, "mongo-validate-pem-file", true, "Set to boolean false to not validate the MongoDB PEM file")
flag.StringVar(&config.MongoOpLogDatabaseName, "mongo-oplog-database-name", "", "Override the database name which contains the mongodb oplog")
flag.StringVar(&config.MongoOpLogCollectionName, "mongo-oplog-collection-name", "", "Override the collection name which contains the mongodb oplog")
flag.StringVar(&config.MongoCursorTimeout, "mongo-cursor-timeout", "", "Override the duration before a cursor timeout occurs when tailing the oplog")
flag.StringVar(&config.GraylogAddr, "graylog-addr", "", "Send logs to a Graylog server at this address")
flag.StringVar(&config.ElasticVersion, "elasticsearch-version", "", "Specify elasticsearch version directly instead of getting it from the server")
flag.StringVar(&config.ElasticUser, "elasticsearch-user", "", "The elasticsearch user name for basic auth")
flag.StringVar(&config.ElasticPassword, "elasticsearch-password", "", "The elasticsearch password for basic auth")
flag.StringVar(&config.ElasticPemFile, "elasticsearch-pem-file", "", "Path to a PEM file for secure connections to elasticsearch")
flag.BoolVar(&config.ElasticValidatePemFile, "elasticsearch-validate-pem-file", true, "Set to boolean false to not validate the Elasticsearch PEM file")
flag.IntVar(&config.ElasticMaxConns, "elasticsearch-max-conns", 0, "Elasticsearch max connections")
flag.IntVar(&config.FileDownloaders, "file-downloaders", 0, "GridFs download go routines")
flag.BoolVar(&config.ElasticRetry, "elasticsearch-retry", false, "True to retry failed request to Elasticsearch")
flag.IntVar(&config.ElasticMaxDocs, "elasticsearch-max-docs", 0, "Number of docs to hold before flushing to Elasticsearch")
flag.IntVar(&config.ElasticMaxBytes, "elasticsearch-max-bytes", 0, "Number of bytes to hold before flushing to Elasticsearch")
flag.IntVar(&config.ElasticMaxSeconds, "elasticsearch-max-seconds", 0, "Number of seconds before flushing to Elasticsearch")
flag.IntVar(&config.ElasticClientTimeout, "elasticsearch-client-timeout", 0, "Number of seconds before a request to Elasticsearch is timed out")
flag.Int64Var(&config.MaxFileSize, "max-file-size", 0, "GridFs file content exceeding this limit in bytes will not be indexed in Elasticsearch")
flag.StringVar(&config.ConfigFile, "f", "", "Location of configuration file")
flag.BoolVar(&config.DroppedDatabases, "dropped-databases", true, "True to delete indexes from dropped databases")
flag.BoolVar(&config.DroppedCollections, "dropped-collections", true, "True to delete indexes from dropped collections")
flag.BoolVar(&config.Version, "v", false, "True to print the version number")
flag.BoolVar(&config.Gzip, "gzip", false, "True to use gzip for requests to elasticsearch")
flag.BoolVar(&config.Verbose, "verbose", false, "True to output verbose messages")
flag.BoolVar(&config.Stats, "stats", false, "True to print out statistics")
flag.BoolVar(&config.IndexStats, "index-stats", false, "True to index stats in elasticsearch")
flag.StringVar(&config.StatsDuration, "stats-duration", "", "The duration after which stats are logged")
flag.StringVar(&config.StatsIndexFormat, "stats-index-format", "", "time.Time supported format to use for the stats index names")
flag.BoolVar(&config.Resume, "resume", false, "True to capture the last timestamp of this run and resume on a subsequent run")
flag.Int64Var(&config.ResumeFromTimestamp, "resume-from-timestamp", 0, "Timestamp to resume syncing from")
flag.BoolVar(&config.ResumeWriteUnsafe, "resume-write-unsafe", false, "True to speedup writes of the last timestamp synched for resuming at the cost of error checking")
flag.BoolVar(&config.Replay, "replay", false, "True to replay all events from the oplog and index them in elasticsearch")
flag.BoolVar(&config.IndexFiles, "index-files", false, "True to index gridfs files into elasticsearch. Requires the elasticsearch mapper-attachments (deprecated) or ingest-attachment plugin")
flag.BoolVar(&config.FileHighlighting, "file-highlighting", false, "True to enable the ability to highlight search times for a file query")
flag.BoolVar(&config.EnablePatches, "enable-patches", false, "True to include an json-patch field on updates")
flag.BoolVar(&config.FailFast, "fail-fast", false, "True to exit if a single _bulk request fails")
flag.BoolVar(&config.IndexOplogTime, "index-oplog-time", false, "True to add date/time information from the oplog to each document when indexing")
flag.BoolVar(&config.ExitAfterDirectReads, "exit-after-direct-reads", false, "True to exit the program after reading directly from the configured namespaces")
flag.IntVar(&config.DirectReadBatchSize, "direct-read-batch-size", 0, "The batch size to set on direct read queries")
flag.IntVar(&config.DirectReadCursors, "direct-read-cursors", 0, "The number of cursors to request for parallel collection scans")
flag.StringVar(&config.MergePatchAttr, "merge-patch-attribute", "", "Attribute to store json-patch values under")
flag.StringVar(&config.ResumeName, "resume-name", "", "Name under which to load/store the resume state. Defaults to 'default'")
flag.StringVar(&config.ClusterName, "cluster-name", "", "Name of the monstache process cluster")
flag.StringVar(&config.Worker, "worker", "", "The name of this worker in a multi-worker configuration")
flag.StringVar(&config.MapperPluginPath, "mapper-plugin-path", "", "The path to a .so file to load as a document mapper plugin")
flag.StringVar(&config.NsRegex, "namespace-regex", "", "A regex which is matched against an operation's namespace (<database>.<collection>). Only operations which match are synched to elasticsearch")
flag.StringVar(&config.NsExcludeRegex, "namespace-exclude-regex", "", "A regex which is matched against an operation's namespace (<database>.<collection>). Only operations which do not match are synched to elasticsearch")
flag.Var(&config.DirectReadNs, "direct-read-namespace", "A list of direct read namespaces")
flag.Var(&config.RoutingNamespaces, "routing-namespace", "A list of namespaces that override routing information")
flag.Var(&config.TimeMachineNamespaces, "time-machine-namespace", "A list of direct read namespaces")
flag.StringVar(&config.TimeMachineIndexPrefix, "time-machine-index-prefix", "", "A prefix to preprend to time machine indexes")
flag.StringVar(&config.TimeMachineIndexSuffix, "time-machine-index-suffix", "", "A suffix to append to time machine indexes")
flag.BoolVar(&config.TimeMachineDirectReads, "time-machine-direct-reads", false, "True to index the results of direct reads into the any time machine indexes")
flag.Var(&config.ElasticUrls, "elasticsearch-url", "A list of Elasticsearch URLs")
flag.Var(&config.FileNamespaces, "file-namespace", "A list of file namespaces")
flag.Var(&config.PatchNamespaces, "patch-namespace", "A list of patch namespaces")
flag.Var(&config.Workers, "workers", "A list of worker names")
flag.BoolVar(&config.EnableHTTPServer, "enable-http-server", false, "True to enable an internal http server")
flag.StringVar(&config.HTTPServerAddr, "http-server-addr", "", "The address the internal http server listens on")
flag.Var(&config.DeleteStrategy, "delete-strategy", "Stategy to use for deletes. 0=stateless,1=stateful,2=ignore")
flag.StringVar(&config.DeleteIndexPattern, "delete-index-pattern", "", "An Elasticsearch index-pattern to restric the scope of stateless deletes")
flag.Parse()
return config
}
func (config *configOptions) loadIndexTypes() {
if config.Mapping != nil {
for _, m := range config.Mapping {
if m.Namespace != "" && (m.Index != "" || m.Type != "") {
mapIndexTypes[m.Namespace] = &indexTypeMapping{
Namespace: m.Namespace,
Index: strings.ToLower(m.Index),
Type: m.Type,
}
} else {
panic("Mappings must specify namespace and at least one of index and type")
}
}
}
}
func (config *configOptions) loadFilters() {
for _, s := range config.Filter {
if s.Script != "" || s.Path != "" {
if s.Path != "" && s.Script != "" {
panic("Filters must specify path or script but not both")
}
if s.Path != "" {
if script, err := ioutil.ReadFile(s.Path); err == nil {
s.Script = string(script[:])
} else {
errorLog.Panicf("Unable to load filter at path %s: %s", s.Path, err)
}
}
if _, exists := filterEnvs[s.Namespace]; exists {
errorLog.Panicf("Multiple filters with namespace: %s", s.Namespace)
}
env := &executionEnv{
VM: otto.New(),
Script: s.Script,
}
if err := env.VM.Set("module", make(map[string]interface{})); err != nil {
panic(err)
}
if _, err := env.VM.Run(env.Script); err != nil {
panic(err)
}
val, err := env.VM.Run("module.exports")
if err != nil {
panic(err)
} else if !val.IsFunction() {
panic("module.exports must be a function")
}
filterEnvs[s.Namespace] = env
} else {
panic("Filters must specify path or script attributes")
}
}
}
func (config *configOptions) loadScripts() {
for _, s := range config.Script {
if s.Script != "" || s.Path != "" {
if s.Path != "" && s.Script != "" {
panic("Scripts must specify path or script but not both")
}
if s.Path != "" {
if script, err := ioutil.ReadFile(s.Path); err == nil {
s.Script = string(script[:])
} else {
errorLog.Panicf("Unable to load script at path %s: %s", s.Path, err)
}
}
if _, exists := mapEnvs[s.Namespace]; exists {
errorLog.Panicf("Multiple scripts with namespace: %s", s.Namespace)
}
env := &executionEnv{
VM: otto.New(),
Script: s.Script,
}
if err := env.VM.Set("module", make(map[string]interface{})); err != nil {
panic(err)
}
if _, err := env.VM.Run(env.Script); err != nil {
panic(err)
}
val, err := env.VM.Run("module.exports")
if err != nil {
panic(err)
} else if !val.IsFunction() {
panic("module.exports must be a function")
}
mapEnvs[s.Namespace] = env
if s.Routing {
routingNamespaces[s.Namespace] = true
}
} else {
panic("Scripts must specify path or script")
}
}
}
func (config *configOptions) loadPlugins() *configOptions {
if config.MapperPluginPath != "" {
p, err := plugin.Open(config.MapperPluginPath)
if err != nil {
errorLog.Panicf("Unable to load mapper plugin %s: %s", config.MapperPluginPath, err)
}
mapper, err := p.Lookup("Map")
if err != nil {
errorLog.Panicf("Unable to find symbol 'Map' in mapper plugin: %s", err)
}
switch mapper.(type) {
case func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error):
mapperPlugin = mapper.(func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error))
default:
errorLog.Panicf("Plugin 'Map' function must be typed %T", mapperPlugin)
}
filter, err := p.Lookup("Filter")
if err == nil {
switch filter.(type) {
case func(*monstachemap.MapperPluginInput) (bool, error):
filterPlugin = mapper.(func(*monstachemap.MapperPluginInput) (bool, error))
default:
errorLog.Panicf("Plugin 'Filter' function must be typed %T", filterPlugin)
}
}
}
return config
}
func (config *configOptions) loadConfigFile() *configOptions {
if config.ConfigFile != "" {
var tomlConfig = configOptions{
DroppedDatabases: true,
DroppedCollections: true,
MongoDialSettings: mongoDialSettings{Timeout: -1},
MongoSessionSettings: mongoSessionSettings{SocketTimeout: -1, SyncTimeout: -1},
GtmSettings: gtmDefaultSettings(),
}
if _, err := toml.DecodeFile(config.ConfigFile, &tomlConfig); err != nil {
panic(err)
}
if config.MongoURL == "" {
config.MongoURL = tomlConfig.MongoURL
}
if config.MongoConfigURL == "" {
config.MongoConfigURL = tomlConfig.MongoConfigURL
}
if config.MongoPemFile == "" {
config.MongoPemFile = tomlConfig.MongoPemFile
}
if config.MongoValidatePemFile && !tomlConfig.MongoValidatePemFile {
config.MongoValidatePemFile = false
}
if config.MongoOpLogDatabaseName == "" {
config.MongoOpLogDatabaseName = tomlConfig.MongoOpLogDatabaseName
}
if config.MongoOpLogCollectionName == "" {
config.MongoOpLogCollectionName = tomlConfig.MongoOpLogCollectionName
}
if config.MongoCursorTimeout == "" {
config.MongoCursorTimeout = tomlConfig.MongoCursorTimeout
}
if config.ElasticUser == "" {
config.ElasticUser = tomlConfig.ElasticUser
}
if config.ElasticPassword == "" {
config.ElasticPassword = tomlConfig.ElasticPassword
}
if config.ElasticPemFile == "" {
config.ElasticPemFile = tomlConfig.ElasticPemFile
}
if config.ElasticValidatePemFile && !tomlConfig.ElasticValidatePemFile {
config.ElasticValidatePemFile = false
}
if config.ElasticVersion == "" {
config.ElasticVersion = tomlConfig.ElasticVersion
}
if config.ElasticMaxConns == 0 {
config.ElasticMaxConns = tomlConfig.ElasticMaxConns
}
if !config.ElasticRetry && tomlConfig.ElasticRetry {
config.ElasticRetry = true
}
if config.ElasticMaxDocs == 0 {
config.ElasticMaxDocs = tomlConfig.ElasticMaxDocs
}
if config.ElasticMaxBytes == 0 {
config.ElasticMaxBytes = tomlConfig.ElasticMaxBytes
}
if config.ElasticMaxSeconds == 0 {
config.ElasticMaxSeconds = tomlConfig.ElasticMaxSeconds
}
if config.ElasticClientTimeout == 0 {
config.ElasticClientTimeout = tomlConfig.ElasticClientTimeout
}
if config.MaxFileSize == 0 {
config.MaxFileSize = tomlConfig.MaxFileSize
}
if config.DirectReadBatchSize == 0 {
config.DirectReadBatchSize = tomlConfig.DirectReadBatchSize
}
if config.DirectReadCursors == 0 {
config.DirectReadCursors = tomlConfig.DirectReadCursors
}
if config.FileDownloaders == 0 {
config.FileDownloaders = tomlConfig.FileDownloaders
}
if config.DeleteStrategy == 0 {
config.DeleteStrategy = tomlConfig.DeleteStrategy
}
if config.DeleteIndexPattern == "" {
config.DeleteIndexPattern = tomlConfig.DeleteIndexPattern
}
if config.DroppedDatabases && !tomlConfig.DroppedDatabases {
config.DroppedDatabases = false
}
if config.DroppedCollections && !tomlConfig.DroppedCollections {
config.DroppedCollections = false
}
if !config.Gzip && tomlConfig.Gzip {
config.Gzip = true
}
if !config.Verbose && tomlConfig.Verbose {
config.Verbose = true
}
if !config.Stats && tomlConfig.Stats {
config.Stats = true
}
if !config.IndexStats && tomlConfig.IndexStats {
config.IndexStats = true
}
if config.StatsDuration == "" {
config.StatsDuration = tomlConfig.StatsDuration
}
if config.StatsIndexFormat == "" {
config.StatsIndexFormat = tomlConfig.StatsIndexFormat
}
if !config.IndexFiles && tomlConfig.IndexFiles {
config.IndexFiles = true
}
if !config.FileHighlighting && tomlConfig.FileHighlighting {
config.FileHighlighting = true
}
if !config.EnablePatches && tomlConfig.EnablePatches {
config.EnablePatches = true
}
if !config.Replay && tomlConfig.Replay {
config.Replay = true
}
if !config.Resume && tomlConfig.Resume {
config.Resume = true
}
if !config.ResumeWriteUnsafe && tomlConfig.ResumeWriteUnsafe {
config.ResumeWriteUnsafe = true
}
if config.ResumeFromTimestamp == 0 {
config.ResumeFromTimestamp = tomlConfig.ResumeFromTimestamp
}
if config.MergePatchAttr == "" {
config.MergePatchAttr = tomlConfig.MergePatchAttr
}
if !config.FailFast && tomlConfig.FailFast {
config.FailFast = true
}
if !config.IndexOplogTime && tomlConfig.IndexOplogTime {
config.IndexOplogTime = true
}
if !config.ExitAfterDirectReads && tomlConfig.ExitAfterDirectReads {
config.ExitAfterDirectReads = true
}
if config.Resume && config.ResumeName == "" {
config.ResumeName = tomlConfig.ResumeName
}
if config.ClusterName == "" {
config.ClusterName = tomlConfig.ClusterName
}
if config.NsRegex == "" {
config.NsRegex = tomlConfig.NsRegex
}
if config.NsExcludeRegex == "" {
config.NsExcludeRegex = tomlConfig.NsExcludeRegex
}
if config.IndexFiles {
if len(config.FileNamespaces) == 0 {
config.FileNamespaces = tomlConfig.FileNamespaces
}
config.loadGridFsConfig()
}
if config.Worker == "" {
config.Worker = tomlConfig.Worker
}
if config.GraylogAddr == "" {
config.GraylogAddr = tomlConfig.GraylogAddr
}
if config.MapperPluginPath == "" {
config.MapperPluginPath = tomlConfig.MapperPluginPath
}
if config.EnablePatches {
if len(config.PatchNamespaces) == 0 {
config.PatchNamespaces = tomlConfig.PatchNamespaces
}
config.loadPatchNamespaces()
}
if len(config.RoutingNamespaces) == 0 {
config.RoutingNamespaces = tomlConfig.RoutingNamespaces
config.loadRoutingNamespaces()
}
if len(config.TimeMachineNamespaces) == 0 {
config.TimeMachineNamespaces = tomlConfig.TimeMachineNamespaces
config.loadTimeMachineNamespaces()
}
if config.TimeMachineIndexPrefix == "" {
config.TimeMachineIndexPrefix = tomlConfig.TimeMachineIndexPrefix
}
if config.TimeMachineIndexSuffix == "" {
config.TimeMachineIndexSuffix = tomlConfig.TimeMachineIndexSuffix
}
if !config.TimeMachineDirectReads {
config.TimeMachineDirectReads = tomlConfig.TimeMachineDirectReads
}
if len(config.DirectReadNs) == 0 {
config.DirectReadNs = tomlConfig.DirectReadNs
}
if len(config.ElasticUrls) == 0 {
config.ElasticUrls = tomlConfig.ElasticUrls
}
if len(config.Workers) == 0 {
config.Workers = tomlConfig.Workers
}
if !config.EnableHTTPServer && tomlConfig.EnableHTTPServer {
config.EnableHTTPServer = true
}
if config.HTTPServerAddr == "" {
config.HTTPServerAddr = tomlConfig.HTTPServerAddr
}
config.MongoDialSettings = tomlConfig.MongoDialSettings
config.MongoSessionSettings = tomlConfig.MongoSessionSettings
config.GtmSettings = tomlConfig.GtmSettings
config.Logs = tomlConfig.Logs
tomlConfig.loadScripts()
tomlConfig.loadFilters()
tomlConfig.loadIndexTypes()
}
return config
}
func (config *configOptions) newLogger(path string) *lumberjack.Logger {
return &lumberjack.Logger{
Filename: path,
MaxSize: 500, // megabytes
MaxBackups: 5,
MaxAge: 28, //days
}
}
func (config *configOptions) setupLogging() *configOptions {
if config.GraylogAddr != "" {
gelfWriter, err := gelf.NewUDPWriter(config.GraylogAddr)
if err != nil {
errorLog.Fatalf("Error creating gelf writer: %s", err)
}
infoLog.SetOutput(gelfWriter)
errorLog.SetOutput(gelfWriter)
traceLog.SetOutput(gelfWriter)
statsLog.SetOutput(gelfWriter)
} else {
logs := config.Logs
if logs.Info != "" {
infoLog.SetOutput(config.newLogger(logs.Info))
}
if logs.Error != "" {
errorLog.SetOutput(config.newLogger(logs.Error))
}
if logs.Trace != "" {
traceLog.SetOutput(config.newLogger(logs.Trace))
}
if logs.Stats != "" {
statsLog.SetOutput(config.newLogger(logs.Stats))
}
}
return config
}
func (config *configOptions) loadRoutingNamespaces() *configOptions {
for _, namespace := range config.RoutingNamespaces {
routingNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadTimeMachineNamespaces() *configOptions {
for _, namespace := range config.TimeMachineNamespaces {
tmNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadPatchNamespaces() *configOptions {
for _, namespace := range config.PatchNamespaces {
patchNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadGridFsConfig() *configOptions {
for _, namespace := range config.FileNamespaces {
fileNamespaces[namespace] = true
}
return config
}
func (config *configOptions) dump() {
json, err := json.MarshalIndent(config, "", " ")
if err != nil {
errorLog.Printf("Unable to print configuration: %s", err)
} else {
infoLog.Println(string(json))
}
}
/*
if ssl=true is set on the connection string, remove the option
from the connection string and enable TLS because the mgo
driver does not support the option in the connection string
*/
func (config *configOptions) parseMongoURL(inURL string) (outURL string) {
const queryDelim string = "?"
outURL = inURL
hostQuery := strings.SplitN(outURL, queryDelim, 2)
if len(hostQuery) == 2 {
host, query := hostQuery[0], hostQuery[1]
r := regexp.MustCompile(`ssl=true&?|&ssl=true$`)
qstr := r.ReplaceAllString(query, "")
if qstr != query {
config.MongoDialSettings.Ssl = true
if qstr == "" {
outURL = host
} else {
outURL = strings.Join([]string{host, qstr}, queryDelim)
}
}
}
return
}
func (config *configOptions) setDefaults() *configOptions {
if config.MongoURL == "" {
config.MongoURL = mongoURLDefault
}
if config.ClusterName != "" {
if config.ClusterName != "" && config.Worker != "" {
config.ResumeName = fmt.Sprintf("%s:%s", config.ClusterName, config.Worker)
} else {
config.ResumeName = config.ClusterName
}
config.Resume = true
} else if config.ResumeName == "" {
if config.Worker != "" {
config.ResumeName = config.Worker
} else {
config.ResumeName = resumeNameDefault
}
}
if config.ElasticMaxConns == 0 {
config.ElasticMaxConns = elasticMaxConnsDefault
}
if config.ElasticClientTimeout == 0 {
config.ElasticClientTimeout = elasticClientTimeoutDefault
}
if config.MergePatchAttr == "" {
config.MergePatchAttr = "json-merge-patches"
}
if config.ElasticMaxSeconds == 0 {
config.ElasticMaxSeconds = 1
}
if config.ElasticMaxDocs == 0 {
config.ElasticMaxDocs = elasticMaxDocsDefault
}
if config.MongoURL != "" {
config.MongoURL = config.parseMongoURL(config.MongoURL)
}
if config.MongoConfigURL != "" {
config.MongoConfigURL = config.parseMongoURL(config.MongoConfigURL)
}
if config.HTTPServerAddr == "" {
config.HTTPServerAddr = ":8080"
}
if config.StatsIndexFormat == "" {
config.StatsIndexFormat = "monstache.stats.2006-01-02"
}
if config.TimeMachineIndexPrefix == "" {
config.TimeMachineIndexPrefix = "log"
}
if config.TimeMachineIndexSuffix == "" {
config.TimeMachineIndexSuffix = "2006-01-02"
}
if config.DeleteIndexPattern == "" {
config.DeleteIndexPattern = "*"
}
if config.FileDownloaders == 0 {
config.FileDownloaders = fileDownloadersDefault
}
return config
}
func (config *configOptions) getAuthURL(inURL string) string {
cred := strings.SplitN(config.MongoURL, "@", 2)
if len(cred) == 2 {
return cred[0] + "@" + inURL
} else {
return inURL
}
}
func (config *configOptions) configureMongo(session *mgo.Session) {
session.SetMode(mgo.Primary, true)
if config.MongoSessionSettings.SocketTimeout != -1 {
timeOut := time.Duration(config.MongoSessionSettings.SocketTimeout) * time.Second
session.SetSocketTimeout(timeOut)
}
if config.MongoSessionSettings.SyncTimeout != -1 {
timeOut := time.Duration(config.MongoSessionSettings.SyncTimeout) * time.Second
session.SetSyncTimeout(timeOut)
}
}
func (config *configOptions) dialMongo(inURL string) (*mgo.Session, error) {
ssl := config.MongoDialSettings.Ssl || config.MongoPemFile != ""
if ssl {
tlsConfig := &tls.Config{}
if config.MongoPemFile != "" {
certs := x509.NewCertPool()
if ca, err := ioutil.ReadFile(config.MongoPemFile); err == nil {
certs.AppendCertsFromPEM(ca)
} else {
return nil, err
}
tlsConfig.RootCAs = certs
}
// Check to see if we don't need to validate the PEM
if config.MongoValidatePemFile == false {
// Turn off validation
tlsConfig.InsecureSkipVerify = true
}
dialInfo, err := mgo.ParseURL(inURL)
if err != nil {
return nil, err
}
dialInfo.Timeout = time.Duration(10) * time.Second
if config.MongoDialSettings.Timeout != -1 {
dialInfo.Timeout = time.Duration(config.MongoDialSettings.Timeout) * time.Second
}
dialInfo.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) {
conn, err := tls.Dial("tcp", addr.String(), tlsConfig)
if err != nil {
errorLog.Printf("Unable to dial mongodb: %s", err)
}
return conn, err
}
session, err := mgo.DialWithInfo(dialInfo)
if err == nil {
session.SetSyncTimeout(1 * time.Minute)
session.SetSocketTimeout(1 * time.Minute)
}
return session, err
}
if config.MongoDialSettings.Timeout != -1 {
return mgo.DialWithTimeout(inURL,
time.Duration(config.MongoDialSettings.Timeout)*time.Second)
}
return mgo.Dial(inURL)
}
func (config *configOptions) NewHTTPClient() (client *http.Client, err error) {
tlsConfig := &tls.Config{}
if config.ElasticPemFile != "" {
var ca []byte
certs := x509.NewCertPool()
if ca, err = ioutil.ReadFile(config.ElasticPemFile); err == nil {
certs.AppendCertsFromPEM(ca)
tlsConfig.RootCAs = certs
} else {
return client, err
}
}
if config.ElasticValidatePemFile == false {
// Turn off validation
tlsConfig.InsecureSkipVerify = true
}
transport := &http.Transport{
DisableCompression: !config.Gzip,
TLSHandshakeTimeout: time.Duration(30) * time.Second,
TLSClientConfig: tlsConfig,
}
client = &http.Client{
Timeout: time.Duration(config.ElasticClientTimeout) * time.Second,
Transport: transport,
}
return client, err
}
func doDrop(mongo *mgo.Session, elastic *elastic.Client, op *gtm.Op, config *configOptions) (err error) {
if db, drop := op.IsDropDatabase(); drop {
if config.DroppedDatabases {
if err = deleteIndexes(elastic, db, config); err == nil {
if e := dropDBMeta(mongo, db); e != nil {
errorLog.Printf("Unable to delete metadata for db: %s", e)
}
}
}
} else if col, drop := op.IsDropCollection(); drop {
if config.DroppedCollections {
if err = deleteIndex(elastic, op.GetDatabase()+"."+col, config); err == nil {
if e := dropCollectionMeta(mongo, op.GetDatabase()+"."+col); e != nil {
errorLog.Printf("Unable to delete metadata for collection: %s", e)
}
}
}
}
return
}
func hasFileContent(op *gtm.Op, config *configOptions) (ingest bool) {
if !config.IndexFiles {
return
}
return fileNamespaces[op.Namespace]
}
func addPatch(config *configOptions, client *elastic.Client, op *gtm.Op,
objectID string, indexType *indexTypeMapping, meta *indexingMeta) (err error) {
var merges []interface{}
var toJSON []byte
if op.IsSourceDirect() {
return nil
}
if op.Timestamp == 0 {
return nil
}
if op.IsUpdate() {
ctx := context.Background()
service := client.Get()
service.Id(objectID)
service.Index(indexType.Index)
service.Type(indexType.Type)
if meta.Index != "" {
service.Index(meta.Index)
}
if meta.Type != "" {
service.Type(meta.Type)
}
if meta.Routing != "" {
service.Routing(meta.Routing)
}
if meta.Parent != "" {
service.Parent(meta.Parent)
}
var resp *elastic.GetResult
if resp, err = service.Do(ctx); err == nil {
if resp.Found {
var src map[string]interface{}
if err = json.Unmarshal(*resp.Source, &src); err == nil {
if val, ok := src[config.MergePatchAttr]; ok {
merges = val.([]interface{})
for _, m := range merges {
entry := m.(map[string]interface{})
entry["ts"] = int(entry["ts"].(float64))
entry["v"] = int(entry["v"].(float64))
}
}
delete(src, config.MergePatchAttr)
var fromJSON, mergeDoc []byte
if fromJSON, err = json.Marshal(src); err == nil {
if toJSON, err = json.Marshal(op.Data); err == nil {
if mergeDoc, err = jsonpatch.CreateMergePatch(fromJSON, toJSON); err == nil {
merge := make(map[string]interface{})
merge["ts"] = op.Timestamp >> 32
merge["p"] = string(mergeDoc)
merge["v"] = len(merges) + 1
merges = append(merges, merge)
op.Data[config.MergePatchAttr] = merges
}
}
}
}
} else {
err = errors.New("Last document revision not found")
}
}
} else {
if _, found := op.Data[config.MergePatchAttr]; !found {
if toJSON, err = json.Marshal(op.Data); err == nil {
merge := make(map[string]interface{})
merge["v"] = 1
merge["ts"] = op.Timestamp >> 32
merge["p"] = string(toJSON)
merges = append(merges, merge)
op.Data[config.MergePatchAttr] = merges
}
}
}
return
}
func doIndexing(config *configOptions, mongo *mgo.Session, bulk *elastic.BulkProcessor, client *elastic.Client, op *gtm.Op, ingestAttachment bool) (err error) {
meta := parseIndexMeta(op)
prepareDataForIndexing(config, op)
objectID, indexType := opIDToString(op), mapIndexType(config, op)
if config.EnablePatches {
if patchNamespaces[op.Namespace] {
if e := addPatch(config, client, op, objectID, indexType, meta); e != nil {
errorLog.Printf("Unable to save json-patch info: %s", e)
}
}
}
req := elastic.NewBulkIndexRequest()
req.Id(objectID)
req.Index(indexType.Index)
req.Type(indexType.Type)
req.Doc(op.Data)
if meta.Index != "" {
req.Index(meta.Index)
}
if meta.Type != "" {
req.Type(meta.Type)
}
if meta.Routing != "" {
req.Routing(meta.Routing)
}
if meta.Parent != "" {
req.Parent(meta.Parent)
}
if meta.Version != 0 {
req.Version(meta.Version)
}
if meta.VersionType != "" {
req.VersionType(meta.VersionType)
}
if meta.Pipeline != "" {
req.Pipeline(meta.Pipeline)
}
if meta.RetryOnConflict != 0 {
req.RetryOnConflict(meta.RetryOnConflict)
}
if ingestAttachment {
req.Pipeline("attachment")
}
bulk.Add(req)
if meta.shouldSave(config) {
if e := setIndexMeta(mongo, op.Namespace, objectID, meta); e != nil {
errorLog.Printf("Unable to save routing info: %s", e)
}
}
if tmNamespaces[op.Namespace] {
if op.IsSourceOplog() || config.TimeMachineDirectReads {
t := time.Now().UTC()
tmIndex := func(idx string) string {
pre, suf := config.TimeMachineIndexPrefix, config.TimeMachineIndexSuffix
tmFormat := strings.Join([]string{pre, idx, suf}, ".")
return strings.ToLower(t.Format(tmFormat))
}
data := make(map[string]interface{})
for k, v := range op.Data {
data[k] = v
}
data["_source_id"] = objectID
if config.IndexOplogTime == false {
secs := int64(op.Timestamp >> 32)
t := time.Unix(secs, 0).UTC()
data["_oplog_ts"] = op.Timestamp
data["_oplog_date"] = t.Format("2006/01/02 15:04:05")
}
req = elastic.NewBulkIndexRequest()
req.Index(tmIndex(indexType.Index))
req.Type(indexType.Type)
req.Routing(objectID)
req.Doc(data)
if meta.Index != "" {
req.Index(tmIndex(meta.Index))
}
if meta.Type != "" {
req.Type(meta.Type)
}
if meta.Pipeline != "" {
req.Pipeline(meta.Pipeline)
}
if ingestAttachment {
req.Pipeline("attachment")
}
bulk.Add(req)
}
}
return
}
func doIndex(config *configOptions, mongo *mgo.Session, bulk *elastic.BulkProcessor, client *elastic.Client, op *gtm.Op, ingestAttachment bool) (err error) {
if err = mapData(mongo, config, op); err == nil {
if op.Data != nil {
err = doIndexing(config, mongo, bulk, client, op, ingestAttachment)
} else if op.IsUpdate() {
doDelete(config, client, mongo, bulk, op)
}
}
return
}
func doIndexStats(config *configOptions, bulkStats *elastic.BulkProcessor, stats elastic.BulkProcessorStats) (err error) {
var hostname string
doc := make(map[string]interface{})
t := time.Now().UTC()
doc["Timestamp"] = t.Format("2006-01-02T15:04:05")
hostname, err = os.Hostname()
if err == nil {
doc["Host"] = hostname
}
doc["Pid"] = os.Getpid()
doc["Stats"] = stats
index := strings.ToLower(t.Format(config.StatsIndexFormat))
typeName := "stats"
if config.useTypeFromFuture() {
typeName = typeFromFuture
}
req := elastic.NewBulkIndexRequest().Index(index).Type(typeName)
req.Doc(doc)
bulkStats.Add(req)
return
}
func dropDBMeta(session *mgo.Session, db string) (err error) {
col := session.DB("monstache").C("meta")
q := bson.M{"db": db}
_, err = col.RemoveAll(q)
return
}
func dropCollectionMeta(session *mgo.Session, namespace string) (err error) {
col := session.DB("monstache").C("meta")
q := bson.M{"namespace": namespace}
_, err = col.RemoveAll(q)
return
}
func (meta *indexingMeta) load(metaAttrs map[string]interface{}) {
var v interface{}
var ok bool
var s string
if v, ok = metaAttrs["routing"]; ok {
meta.Routing = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["index"]; ok {
meta.Index = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["type"]; ok {
meta.Type = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["parent"]; ok {
meta.Parent = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["version"]; ok {
s = fmt.Sprintf("%v", v)
if version, err := strconv.ParseInt(s, 10, 64); err == nil {
meta.Version = version
}
}
if v, ok = metaAttrs["versionType"]; ok {
meta.VersionType = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["pipeline"]; ok {
meta.Pipeline = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["retryOnConflict"]; ok {
s = fmt.Sprintf("%v", v)
if roc, err := strconv.Atoi(s); err == nil {
meta.RetryOnConflict = roc
}
}
}
func (meta *indexingMeta) shouldSave(config *configOptions) bool {
if config.DeleteStrategy == statefulDeleteStrategy {
return (meta.Routing != "" ||
meta.Index != "" ||
meta.Type != "" ||
meta.Parent != "" ||
meta.Pipeline != "")
} else {
return false
}
}
func setIndexMeta(session *mgo.Session, namespace, id string, meta *indexingMeta) error {
col := session.DB("monstache").C("meta")
metaID := fmt.Sprintf("%s.%s", namespace, id)
doc := make(map[string]interface{})
doc["routing"] = meta.Routing
doc["index"] = meta.Index
doc["type"] = meta.Type
doc["parent"] = meta.Parent
doc["pipeline"] = meta.Pipeline
doc["db"] = strings.SplitN(namespace, ".", 2)[0]
doc["namespace"] = namespace
_, err := col.UpsertId(metaID, bson.M{"$set": doc})
return err
}
func getIndexMeta(session *mgo.Session, namespace, id string) (meta *indexingMeta) {
meta = &indexingMeta{}
col := session.DB("monstache").C("meta")
doc := make(map[string]interface{})
metaID := fmt.Sprintf("%s.%s", namespace, id)
col.FindId(metaID).One(doc)
if doc["routing"] != nil {
meta.Routing = doc["routing"].(string)
}
if doc["index"] != nil {
meta.Index = strings.ToLower(doc["index"].(string))
}
if doc["type"] != nil {
meta.Type = doc["type"].(string)
}
if doc["parent"] != nil {
meta.Parent = doc["parent"].(string)
}
if doc["pipeline"] != nil {
meta.Pipeline = doc["pipeline"].(string)
}
col.RemoveId(metaID)
return
}
func loadBuiltinFunctions(s *mgo.Session) {
for ns, env := range mapEnvs {
var fa *findConf
fa = &findConf{
session: s,
name: "findId",
vm: env.VM,
ns: ns,
byId: true,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
panic(err)
}
fa = &findConf{
session: s,
name: "findOne",
vm: env.VM,
ns: ns,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
panic(err)
}
fa = &findConf{
session: s,
name: "find",
vm: env.VM,
ns: ns,
multi: true,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
panic(err)
}
}
}
func (fc *findCall) setDatabase(topts map[string]interface{}) (err error) {
if ov, ok := topts["database"]; ok {
if ovs, ok := ov.(string); ok {
fc.db = ovs
} else {
err = errors.New("Invalid database option value")
}
}
return
}
func (fc *findCall) setCollection(topts map[string]interface{}) (err error) {
if ov, ok := topts["collection"]; ok {
if ovs, ok := ov.(string); ok {
fc.col = ovs
} else {
err = errors.New("Invalid collection option value")
}
}
return
}
func (fc *findCall) setSelect(topts map[string]interface{}) (err error) {
if ov, ok := topts["select"]; ok {
if ovsel, ok := ov.(map[string]interface{}); ok {
for k, v := range ovsel {
if vi, ok := v.(int64); ok {
fc.sel[k] = int(vi)
}
}
} else {
err = errors.New("Invalid select option value")
}
}
return
}
func (fc *findCall) setSort(topts map[string]interface{}) (err error) {
if ov, ok := topts["sort"]; ok {
if ovs, ok := ov.([]string); ok {
fc.sort = ovs
} else {
err = errors.New("Invalid sort option value")
}
}
return
}
func (fc *findCall) setLimit(topts map[string]interface{}) (err error) {
if ov, ok := topts["limit"]; ok {
if ovl, ok := ov.(int64); ok {
fc.limit = int(ovl)
} else {
err = errors.New("Invalid limit option value")
}
}
return
}
func (fc *findCall) setQuery(v otto.Value) (err error) {
var q interface{}
if q, err = v.Export(); err == nil {
fc.query = fc.restoreIds(q)
}
return
}
func (fc *findCall) setOptions(v otto.Value) (err error) {
var opts interface{}
if opts, err = v.Export(); err == nil {
switch topts := opts.(type) {
case map[string]interface{}:
if err = fc.setDatabase(topts); err != nil {
return
}
if err = fc.setCollection(topts); err != nil {
return
}
if err = fc.setSelect(topts); err != nil {
return
}
if fc.isMulti() {
if err = fc.setSort(topts); err != nil {
return
}
if err = fc.setLimit(topts); err != nil {
return
}
}
default:
err = errors.New("Invalid options argument")
return
}
} else {
err = errors.New("Invalid options argument")
}
return
}
func (fc *findCall) setDefaults() {
if fc.config.ns != "" {
ns := strings.Split(fc.config.ns, ".")
fc.db = ns[0]
fc.col = ns[1]
}
}
func (fc *findCall) getCollection() *mgo.Collection {
return fc.session.DB(fc.db).C(fc.col)
}
func (fc *findCall) getVM() *otto.Otto {
return fc.config.vm
}
func (fc *findCall) getFunctionName() string {
return fc.config.name
}
func (fc *findCall) isMulti() bool {
return fc.config.multi
}
func (fc *findCall) logError(err error) {
errorLog.Printf("Error in function %s: %s\n", fc.getFunctionName(), err)
}
func (fc *findCall) restoreIds(v interface{}) (r interface{}) {
switch vt := v.(type) {
case string:
if bson.IsObjectIdHex(vt) {
r = bson.ObjectIdHex(vt)
} else {
r = v
}
case []interface{}:
var avs []interface{}
for _, av := range vt {
avs = append(avs, fc.restoreIds(av))
}
r = avs
case map[string]interface{}:
mvs := make(map[string]interface{})
for k, v := range vt {
mvs[k] = fc.restoreIds(v)
}
r = mvs
default:
r = v
}
return
}
func (fc *findCall) execute() (r otto.Value, err error) {
col := fc.getCollection()
if fc.isMulti() {
var docs []map[string]interface{}
mq := col.Find(fc.query)
if fc.limit > 0 {
mq.Limit(fc.limit)
}
if len(fc.sort) > 0 {
mq.Sort(fc.sort...)
}
if len(fc.sel) > 0 {
mq.Select(fc.sel)
}
if err = mq.All(&docs); err == nil {
r, err = fc.getVM().ToValue(docs)
}
} else {
doc := make(map[string]interface{})
if fc.config.byId {
if err = col.FindId(fc.query).One(doc); err == nil {
r, err = fc.getVM().ToValue(doc)
}
} else {
if err = col.Find(fc.query).One(doc); err == nil {
r, err = fc.getVM().ToValue(doc)
}
}
}
return
}
func makeFind(fa *findConf) func(otto.FunctionCall) otto.Value {
return func(call otto.FunctionCall) (r otto.Value) {
var err error
fc := &findCall{
config: fa,
session: fa.session.Copy(),
sel: make(map[string]int),
}
defer fc.session.Close()
fc.setDefaults()
args := call.ArgumentList
argLen := len(args)
r = otto.NullValue()
if argLen >= 1 {
if argLen >= 2 {
if err = fc.setOptions(call.Argument(1)); err != nil {
fc.logError(err)
return
}
}
if fc.db == "" || fc.col == "" {
fc.logError(errors.New("Find call must specify db and collection"))
return
}
if err = fc.setQuery(call.Argument(0)); err == nil {
var result otto.Value
if result, err = fc.execute(); err == nil {
r = result
} else {
fc.logError(err)
}
} else {
fc.logError(err)
}
} else {
fc.logError(errors.New("At least one argument is required"))
}
return
}
}
func doDelete(config *configOptions, client *elastic.Client, mongo *mgo.Session, bulk *elastic.BulkProcessor, op *gtm.Op) {
req := elastic.NewBulkDeleteRequest()
if config.DeleteStrategy == ignoreDeleteStrategy {
return
}
objectID, indexType, meta := opIDToString(op), mapIndexType(config, op), &indexingMeta{}
req.Id(objectID)
req.Version(int64(op.Timestamp))
req.VersionType("external")
if config.DeleteStrategy == statefulDeleteStrategy {
if routingNamespaces[""] || routingNamespaces[op.Namespace] {
meta = getIndexMeta(mongo, op.Namespace, objectID)
}
req.Index(indexType.Index)
req.Type(indexType.Type)
if meta.Index != "" {
req.Index(meta.Index)
}
if meta.Type != "" {
req.Type(meta.Type)
}
if meta.Routing != "" {
req.Routing(meta.Routing)
}
if meta.Parent != "" {
req.Parent(meta.Parent)
}
} else if config.DeleteStrategy == statelessDeleteStrategy {
if routingNamespaces[""] || routingNamespaces[op.Namespace] {
termQuery := elastic.NewTermQuery("_id", objectID)
searchResult, err := client.Search().FetchSource(false).Size(1).Index(config.DeleteIndexPattern).Query(termQuery).Do(context.Background())
if err != nil {
errorLog.Printf("Unable to delete document %s: %s", objectID, err)
return
}
if searchResult.Hits != nil && searchResult.Hits.TotalHits == 1 {
hit := searchResult.Hits.Hits[0]
req.Index(hit.Index)
req.Type(hit.Type)
if hit.Routing != "" {
req.Routing(hit.Routing)
}
if hit.Parent != "" {
req.Parent(hit.Parent)
}
} else {
errorLog.Printf("Failed to find unique document %s for deletion using index pattern %s", objectID, config.DeleteIndexPattern)
return
}
} else {
req.Index(indexType.Index)
req.Type(indexType.Type)
}
} else {
return
}
bulk.Add(req)
return
}
func gtmDefaultSettings() gtmSettings {
return gtmSettings{
ChannelSize: gtmChannelSizeDefault,
BufferSize: 32,
BufferDuration: "750ms",
}
}
func notifySdFailed(config *configOptions, err error) {
if err != nil {
errorLog.Printf("Systemd notification failed: %s", err)
} else {
if config.Verbose {
infoLog.Println("Systemd notification not supported (i.e. NOTIFY_SOCKET is unset)")
}
}
}
func watchdogSdFailed(config *configOptions, err error) {
if err != nil {
errorLog.Printf("Error determining systemd WATCHDOG interval: %s", err)
} else {
if config.Verbose {
infoLog.Println("Systemd WATCHDOG not enabled")
}
}
}
func (ctx *httpServerCtx) serveHttp() {
s := ctx.httpServer
if ctx.config.Verbose {
infoLog.Printf("Starting http server at %s", s.Addr)
}
ctx.started = time.Now()
err := s.ListenAndServe()
if !ctx.shutdown {
errorLog.Panicf("Unable to serve http at address %s: %s", s.Addr, err)
}
}
func (ctx *httpServerCtx) buildServer() {
mux := http.NewServeMux()
mux.HandleFunc("/started", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
data := (time.Now().Sub(ctx.started)).String()
w.Write([]byte(data))
})
mux.HandleFunc("/healthz", func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(200)
w.Write([]byte("ok"))
})
if ctx.config.Stats {
mux.HandleFunc("/stats", func(w http.ResponseWriter, req *http.Request) {
stats, err := json.MarshalIndent(ctx.bulk.Stats(), "", " ")
if err == nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
w.Write(stats)
} else {
w.WriteHeader(500)
fmt.Fprintf(w, "Unable to print statistics: %s", err)
}
})
}
mux.HandleFunc("/config", func(w http.ResponseWriter, req *http.Request) {
conf, err := json.MarshalIndent(ctx.config, "", " ")
if err == nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
w.Write(conf)
} else {
w.WriteHeader(500)
fmt.Fprintf(w, "Unable to print config: %s", err)
}
})
s := &http.Server{
Addr: ctx.config.HTTPServerAddr,
Handler: mux,
ErrorLog: errorLog,
}
ctx.httpServer = s
}
func notifySd(config *configOptions) {
var interval time.Duration
if config.Verbose {
infoLog.Println("Sending systemd READY=1")
}
sent, err := daemon.SdNotify(false, "READY=1")
if sent {
if config.Verbose {
infoLog.Println("READY=1 successfully sent to systemd")
}
} else {
notifySdFailed(config, err)
return
}
interval, err = daemon.SdWatchdogEnabled(false)
if err != nil || interval == 0 {
watchdogSdFailed(config, err)
return
}
for {
if config.Verbose {
infoLog.Println("Sending systemd WATCHDOG=1")
}
sent, err = daemon.SdNotify(false, "WATCHDOG=1")
if sent {
if config.Verbose {
infoLog.Println("WATCHDOG=1 successfully sent to systemd")
}
} else {
notifySdFailed(config, err)
return
}
time.Sleep(interval / 2)
}
}
func (config *configOptions) makeShardInsertHandler() gtm.ShardInsertHandler {
return func(shardInfo *gtm.ShardInfo) (*mgo.Session, error) {
infoLog.Printf("Adding shard found at %s\n", shardInfo.GetURL())
shardURL := config.getAuthURL(shardInfo.GetURL())
shard, err := config.dialMongo(shardURL)
if err == nil {
config.configureMongo(shard)
return shard, nil
} else {
return nil, err
}
}
}
func shutdown(exitStatus int, hsc *httpServerCtx, bulk *elastic.BulkProcessor, bulkStats *elastic.BulkProcessor, mongo *mgo.Session, config *configOptions) {
infoLog.Println("Shutting down")
closeC := make(chan bool)
go func() {
if config.ClusterName != "" {
resetClusterState(mongo, config)
}
if hsc != nil {
hsc.shutdown = true
hsc.httpServer.Shutdown(context.Background())
}
bulk.Flush()
if bulkStats != nil {
bulkStats.Flush()
}
close(closeC)
}()
doneC := make(chan bool)
go func() {
closeT := time.NewTicker(5 * time.Second)
done := false
for !done {
select {
case <-closeC:
done = true
close(doneC)
case <-closeT.C:
done = true
close(doneC)
}
}
}()
<-doneC
os.Exit(exitStatus)
}
func main() {
enabled := true
config := &configOptions{
MongoDialSettings: mongoDialSettings{Timeout: -1},
MongoSessionSettings: mongoSessionSettings{SocketTimeout: -1, SyncTimeout: -1},
GtmSettings: gtmDefaultSettings(),
}
config.parseCommandLineFlags()
if config.Version {
fmt.Println(version)
os.Exit(0)
}
config.loadTimeMachineNamespaces()
config.loadRoutingNamespaces()
config.loadPatchNamespaces()
config.loadGridFsConfig()
config.loadConfigFile()
config.setDefaults()
if config.Print {
config.dump()
os.Exit(0)
}
config.setupLogging()
config.loadPlugins()
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)
mongo, err := config.dialMongo(config.MongoURL)
if err != nil {
errorLog.Panicf("Unable to connect to mongodb using URL %s: %s", config.MongoURL, err)
}
if mongoInfo, err := mongo.BuildInfo(); err == nil {
infoLog.Printf("Successfully connected to MongoDB version %s", mongoInfo.Version)
} else {
infoLog.Println("Successfully connected to MongoDB")
}
defer mongo.Close()
config.configureMongo(mongo)
loadBuiltinFunctions(mongo)
elasticClient, err := config.newElasticClient()
if err != nil {
errorLog.Panicf("Unable to create elasticsearch client: %s", err)
}
if config.ElasticVersion == "" {
if err := config.testElasticsearchConn(elasticClient); err != nil {
errorLog.Panicf("Unable to validate connection to elasticsearch using client %s: %s",
elasticClient, err)
}
} else {
if err := config.parseElasticsearchVersion(config.ElasticVersion); err != nil {
errorLog.Panicf("Elasticsearch version must conform to major.minor.fix: %s", err)
}
}
bulk, err := config.newBulkProcessor(elasticClient)
if err != nil {
errorLog.Panicf("Unable to start bulk processor: %s", err)
}
defer bulk.Stop()
var bulkStats *elastic.BulkProcessor
if config.IndexStats {
bulkStats, err = config.newStatsBulkProcessor(elasticClient)
if err != nil {
errorLog.Panicf("Unable to start stats bulk processor: %s", err)
}
defer bulkStats.Stop()
}
var after gtm.TimestampGenerator
if config.Resume {
after = func(session *mgo.Session, options *gtm.Options) bson.MongoTimestamp {
ts := gtm.LastOpTimestamp(session, options)
if config.Replay {
ts = bson.MongoTimestamp(0)
} else if config.ResumeFromTimestamp != 0 {
ts = bson.MongoTimestamp(config.ResumeFromTimestamp)
} else {
collection := session.DB("monstache").C("monstache")
doc := make(map[string]interface{})
collection.FindId(config.ResumeName).One(doc)
if doc["ts"] != nil {
ts = doc["ts"].(bson.MongoTimestamp)
}
}
return ts
}
} else if config.Replay {
after = func(session *mgo.Session, options *gtm.Options) bson.MongoTimestamp {
return bson.MongoTimestamp(0)
}
}
if config.IndexFiles {
if len(config.FileNamespaces) == 0 {
errorLog.Fatalln("File indexing is ON but no file namespaces are configured")
}
if err := ensureFileMapping(elasticClient); err != nil {
panic(err)
}
}
var nsFilter, filter, directReadFilter gtm.OpFilter
filterChain := []gtm.OpFilter{notMonstache, notSystem, notChunks}
filterArray := []gtm.OpFilter{}
if config.isSharded() {
filterChain = append(filterChain, notConfig)
}
if config.NsRegex != "" {
filterChain = append(filterChain, filterWithRegex(config.NsRegex))
}
if config.NsExcludeRegex != "" {
filterChain = append(filterChain, filterInverseWithRegex(config.NsExcludeRegex))
}
if config.Worker != "" {
workerFilter, err := consistent.ConsistentHashFilter(config.Worker, config.Workers)
if err != nil {
panic(err)
}
filterChain = append(filterChain, workerFilter)
} else if config.Workers != nil {
panic("Workers configured but this worker is undefined. worker must be set to one of the workers.")
}
if filterPlugin != nil {
filterArray = append(filterArray, filterWithPlugin())
} else if len(filterEnvs) > 0 {
filterArray = append(filterArray, filterWithScript())
}
nsFilter = gtm.ChainOpFilters(filterChain...)
filter = gtm.ChainOpFilters(filterArray...)
directReadFilter = gtm.ChainOpFilters(filterArray...)
var oplogDatabaseName, oplogCollectionName, cursorTimeout *string
if config.MongoOpLogDatabaseName != "" {
oplogDatabaseName = &config.MongoOpLogDatabaseName
}
if config.MongoOpLogCollectionName != "" {
oplogCollectionName = &config.MongoOpLogCollectionName
}
if config.MongoCursorTimeout != "" {
cursorTimeout = &config.MongoCursorTimeout
}
if config.ClusterName != "" {
if err = ensureClusterTTL(mongo); err == nil {
infoLog.Printf("Joined cluster %s", config.ClusterName)
} else {
errorLog.Panicf("Unable to enable cluster mode: %s", err)
}
enabled, err = enableProcess(mongo, config)
if err != nil {
errorLog.Panicf("Unable to determine enabled cluster process: %s", err)
}
if !enabled {
config.DirectReadNs = stringargs{}
}
}
gtmBufferDuration, err := time.ParseDuration(config.GtmSettings.BufferDuration)
if err != nil {
errorLog.Panicf("Unable to parse gtm buffer duration %s: %s", config.GtmSettings.BufferDuration, err)
}
var mongos []*mgo.Session
var configSession *mgo.Session
if config.isSharded() {
// if we have a config server URL then we are running in a sharded cluster
configSession, err = config.dialMongo(config.MongoConfigURL)
if err != nil {
errorLog.Panicf("Unable to connect to mongodb config server using URL %s: %s", config.MongoConfigURL, err)
}
config.configureMongo(configSession)
// get the list of shard servers
shardInfos := gtm.GetShards(configSession)
if len(shardInfos) == 0 {
errorLog.Fatalln("Shards enabled but none found in config.shards collection")
}
// add each shard server to the sync list
for _, shardInfo := range shardInfos {
infoLog.Printf("Adding shard found at %s\n", shardInfo.GetURL())
shardURL := config.getAuthURL(shardInfo.GetURL())
shard, err := config.dialMongo(shardURL)
if err != nil {
errorLog.Panicf("Unable to connect to mongodb shard using URL %s: %s", shardURL, err)
}
defer shard.Close()
config.configureMongo(shard)
mongos = append(mongos, shard)
}
} else {
mongos = append(mongos, mongo)
}
gtmOpts := >m.Options{
After: after,
Filter: filter,
NamespaceFilter: nsFilter,
OpLogDatabaseName: oplogDatabaseName,
OpLogCollectionName: oplogCollectionName,
CursorTimeout: cursorTimeout,
ChannelSize: config.GtmSettings.ChannelSize,
Ordering: gtm.Oplog,
WorkerCount: 1,
BufferDuration: gtmBufferDuration,
BufferSize: config.GtmSettings.BufferSize,
DirectReadNs: config.DirectReadNs,
DirectReadBatchSize: config.DirectReadBatchSize,
DirectReadCursors: config.DirectReadCursors,
DirectReadFilter: directReadFilter,
Log: infoLog,
}
gtmCtx := gtm.StartMulti(mongos, gtmOpts)
if config.isSharded() {
gtmCtx.AddShardListener(configSession, gtmOpts, config.makeShardInsertHandler())
}
if config.ClusterName != "" {
if enabled {
infoLog.Printf("Starting work for cluster %s", config.ClusterName)
} else {
infoLog.Printf("Pausing work for cluster %s", config.ClusterName)
gtmCtx.Pause()
}
}
timestampTicker := time.NewTicker(10 * time.Second)
if config.Resume == false {
timestampTicker.Stop()
}
heartBeat := time.NewTicker(10 * time.Second)
if config.ClusterName == "" {
heartBeat.Stop()
}
statsTimeout := time.Duration(30) * time.Second
if config.StatsDuration != "" {
statsTimeout, err = time.ParseDuration(config.StatsDuration)
if err != nil {
errorLog.Panicf("Unable to parse stats duration: %s", err)
}
}
printStats := time.NewTicker(statsTimeout)
if config.Stats == false {
printStats.Stop()
}
exitStatus := 0
go notifySd(config)
var hsc *httpServerCtx
if config.EnableHTTPServer {
hsc = &httpServerCtx{
bulk: bulk,
config: config,
}
hsc.buildServer()
go hsc.serveHttp()
}
go func() {
<-sigs
shutdown(exitStatus, hsc, bulk, bulkStats, mongo, config)
}()
if len(config.DirectReadNs) > 0 {
if config.ExitAfterDirectReads {
go func() {
gtmCtx.DirectReadWg.Wait()
shutdown(exitStatus, hsc, bulk, bulkStats, mongo, config)
}()
}
}
infoLog.Println("Entering event loop")
var lastTimestamp, lastSavedTimestamp bson.MongoTimestamp
fileC := make(chan *gtm.Op)
fileDoneC := make(chan *gtm.Op)
for i := 0; i < config.FileDownloaders; i++ {
go func() {
for op := range fileC {
err := addFileContent(mongo, op, config)
if err != nil {
gtmCtx.ErrC <- err
}
fileDoneC <- op
}
}()
}
for {
select {
case <-timestampTicker.C:
if lastTimestamp > lastSavedTimestamp {
bulk.Flush()
if saveTimestamp(mongo, lastTimestamp, config); err == nil {
lastSavedTimestamp = lastTimestamp
} else {
gtmCtx.ErrC <- err
}
}
case <-heartBeat.C:
if config.ClusterName == "" {
break
}
if enabled {
enabled, err = ensureEnabled(mongo, config)
if !enabled {
infoLog.Printf("Pausing work for cluster %s", config.ClusterName)
gtmCtx.Pause()
bulk.Stop()
}
} else {
enabled, err = enableProcess(mongo, config)
if enabled {
infoLog.Printf("Resuming work for cluster %s", config.ClusterName)
bulk.Start(context.Background())
resumeWork(gtmCtx, mongo, config)
}
}
if err != nil {
gtmCtx.ErrC <- err
}
case <-printStats.C:
if !enabled {
break
}
if config.IndexStats {
if err := doIndexStats(config, bulkStats, bulk.Stats()); err != nil {
errorLog.Printf("Error indexing statistics: %s", err)
}
} else {
stats, err := json.Marshal(bulk.Stats())
if err != nil {
errorLog.Printf("Unable to log statistics: %s", err)
} else {
statsLog.Println(string(stats))
}
}
case err = <-gtmCtx.ErrC:
exitStatus = 1
errorLog.Println(err)
if config.FailFast {
os.Exit(exitStatus)
}
case op := <-fileDoneC:
ingest := op.Data["file"] != nil
if err = doIndex(config, mongo, bulk, elasticClient, op, ingest); err != nil {
gtmCtx.ErrC <- err
}
case op := <-gtmCtx.OpC:
if !enabled {
break
}
if op.IsSourceOplog() {
lastTimestamp = op.Timestamp
}
if op.IsDrop() {
bulk.Flush()
if err = doDrop(mongo, elasticClient, op, config); err != nil {
gtmCtx.ErrC <- err
}
} else if op.IsDelete() {
doDelete(config, elasticClient, mongo, bulk, op)
} else if op.Data != nil {
if hasFileContent(op, config) {
fileC <- op
} else {
if err = doIndex(config, mongo, bulk, elasticClient, op, false); err != nil {
gtmCtx.ErrC <- err
}
}
}
}
}
}
simplify interface
// package main provides the monstache binary
package main
import (
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"flag"
"fmt"
"github.com/BurntSushi/toml"
"github.com/coreos/go-systemd/daemon"
"github.com/evanphx/json-patch"
"github.com/globalsign/mgo"
"github.com/globalsign/mgo/bson"
"github.com/olivere/elastic"
"github.com/robertkrimen/otto"
_ "github.com/robertkrimen/otto/underscore"
"github.com/rwynn/gtm"
"github.com/rwynn/gtm/consistent"
"github.com/rwynn/monstache/monstachemap"
"golang.org/x/net/context"
"gopkg.in/Graylog2/go-gelf.v2/gelf"
"gopkg.in/natefinch/lumberjack.v2"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"os/signal"
"plugin"
"regexp"
"strconv"
"strings"
"syscall"
"time"
)
var infoLog = log.New(os.Stdout, "INFO ", log.Flags())
var statsLog = log.New(os.Stdout, "STATS ", log.Flags())
var traceLog = log.New(os.Stdout, "TRACE ", log.Flags())
var errorLog = log.New(os.Stderr, "ERROR ", log.Flags())
var mapperPlugin func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error)
var filterPlugin func(*monstachemap.MapperPluginInput) (bool, error)
var mapEnvs map[string]*executionEnv = make(map[string]*executionEnv)
var filterEnvs map[string]*executionEnv = make(map[string]*executionEnv)
var mapIndexTypes map[string]*indexTypeMapping = make(map[string]*indexTypeMapping)
var fileNamespaces map[string]bool = make(map[string]bool)
var patchNamespaces map[string]bool = make(map[string]bool)
var tmNamespaces map[string]bool = make(map[string]bool)
var routingNamespaces map[string]bool = make(map[string]bool)
var chunksRegex = regexp.MustCompile("\\.chunks$")
var systemsRegex = regexp.MustCompile("system\\..+$")
const version = "4.4.0"
const mongoURLDefault string = "localhost"
const resumeNameDefault string = "default"
const elasticMaxConnsDefault int = 10
const elasticClientTimeoutDefault int = 60
const elasticMaxDocsDefault int = 1000
const gtmChannelSizeDefault int = 512
const typeFromFuture string = "_doc"
const fileDownloadersDefault = 10
type deleteStrategy int
const (
statelessDeleteStrategy deleteStrategy = iota
statefulDeleteStrategy
ignoreDeleteStrategy
)
type stringargs []string
type executionEnv struct {
VM *otto.Otto
Script string
}
type javascript struct {
Namespace string
Script string
Path string
Routing bool
}
type indexTypeMapping struct {
Namespace string
Index string
Type string
}
type findConf struct {
vm *otto.Otto
ns string
name string
session *mgo.Session
byId bool
multi bool
}
type findCall struct {
config *findConf
session *mgo.Session
query interface{}
db string
col string
limit int
sort []string
sel map[string]int
}
type logFiles struct {
Info string
Error string
Trace string
Stats string
}
type indexingMeta struct {
Routing string
Index string
Type string
Parent string
Version int64
VersionType string
Pipeline string
RetryOnConflict int
}
type mongoDialSettings struct {
Timeout int
Ssl bool
}
type mongoSessionSettings struct {
SocketTimeout int `toml:"socket-timeout"`
SyncTimeout int `toml:"sync-timeout"`
}
type gtmSettings struct {
ChannelSize int `toml:"channel-size"`
BufferSize int `toml:"buffer-size"`
BufferDuration string `toml:"buffer-duration"`
}
type httpServerCtx struct {
httpServer *http.Server
bulk *elastic.BulkProcessor
config *configOptions
shutdown bool
started time.Time
}
type configOptions struct {
MongoURL string `toml:"mongo-url"`
MongoConfigURL string `toml:"mongo-config-url"`
MongoPemFile string `toml:"mongo-pem-file"`
MongoValidatePemFile bool `toml:"mongo-validate-pem-file"`
MongoOpLogDatabaseName string `toml:"mongo-oplog-database-name"`
MongoOpLogCollectionName string `toml:"mongo-oplog-collection-name"`
MongoCursorTimeout string `toml:"mongo-cursor-timeout"`
MongoDialSettings mongoDialSettings `toml:"mongo-dial-settings"`
MongoSessionSettings mongoSessionSettings `toml:"mongo-session-settings"`
GtmSettings gtmSettings `toml:"gtm-settings"`
Logs logFiles `toml:"logs"`
GraylogAddr string `toml:"graylog-addr"`
ElasticUrls stringargs `toml:"elasticsearch-urls"`
ElasticUser string `toml:"elasticsearch-user"`
ElasticPassword string `toml:"elasticsearch-password"`
ElasticPemFile string `toml:"elasticsearch-pem-file"`
ElasticValidatePemFile bool `toml:"elasticsearch-validate-pem-file"`
ElasticVersion string `toml:"elasticsearch-version"`
ResumeName string `toml:"resume-name"`
NsRegex string `toml:"namespace-regex"`
NsExcludeRegex string `toml:"namespace-exclude-regex"`
ClusterName string `toml:"cluster-name"`
Print bool `toml:"print-config"`
Version bool
Stats bool
IndexStats bool `toml:"index-stats"`
StatsDuration string `toml:"stats-duration"`
StatsIndexFormat string `toml:"stats-index-format"`
Gzip bool
Verbose bool
Resume bool
ResumeWriteUnsafe bool `toml:"resume-write-unsafe"`
ResumeFromTimestamp int64 `toml:"resume-from-timestamp"`
Replay bool
DroppedDatabases bool `toml:"dropped-databases"`
DroppedCollections bool `toml:"dropped-collections"`
IndexFiles bool `toml:"index-files"`
FileHighlighting bool `toml:"file-highlighting"`
EnablePatches bool `toml:"enable-patches"`
FailFast bool `toml:"fail-fast"`
IndexOplogTime bool `toml:"index-oplog-time"`
ExitAfterDirectReads bool `toml:"exit-after-direct-reads"`
MergePatchAttr string `toml:"merge-patch-attribute"`
ElasticMaxConns int `toml:"elasticsearch-max-conns"`
ElasticRetry bool `toml:"elasticsearch-retry"`
ElasticMaxDocs int `toml:"elasticsearch-max-docs"`
ElasticMaxBytes int `toml:"elasticsearch-max-bytes"`
ElasticMaxSeconds int `toml:"elasticsearch-max-seconds"`
ElasticClientTimeout int `toml:"elasticsearch-client-timeout"`
ElasticMajorVersion int
ElasticMinorVersion int
MaxFileSize int64 `toml:"max-file-size"`
ConfigFile string
Script []javascript
Filter []javascript
Mapping []indexTypeMapping
FileNamespaces stringargs `toml:"file-namespaces"`
PatchNamespaces stringargs `toml:"patch-namespaces"`
Workers stringargs
Worker string
DirectReadNs stringargs `toml:"direct-read-namespaces"`
MapperPluginPath string `toml:"mapper-plugin-path"`
EnableHTTPServer bool `toml:"enable-http-server"`
HTTPServerAddr string `toml:"http-server-addr"`
TimeMachineNamespaces stringargs `toml:"time-machine-namespaces"`
TimeMachineIndexPrefix string `toml:"time-machine-index-prefix"`
TimeMachineIndexSuffix string `toml:"time-machine-index-suffix"`
TimeMachineDirectReads bool `toml:"time-machine-direct-reads"`
RoutingNamespaces stringargs `toml:"routing-namespaces"`
DeleteStrategy deleteStrategy `toml:"delete-strategy"`
DeleteIndexPattern string `toml:"delete-index-pattern"`
FileDownloaders int `toml:"file-downloaders"`
}
func (arg *deleteStrategy) String() string {
return fmt.Sprintf("%d", *arg)
}
func (arg *deleteStrategy) Set(value string) error {
if i, err := strconv.Atoi(value); err != nil {
return err
} else {
ds := deleteStrategy(i)
*arg = ds
return nil
}
}
func (args *stringargs) String() string {
return fmt.Sprintf("%s", *args)
}
func (args *stringargs) Set(value string) error {
*args = append(*args, value)
return nil
}
func (config *configOptions) isSharded() bool {
return config.MongoConfigURL != ""
}
func afterBulk(executionId int64, requests []elastic.BulkableRequest, response *elastic.BulkResponse, err error) {
if err != nil {
errorLog.Printf("Bulk index request with execution ID %d failed: %s", executionId, err)
}
if response != nil && response.Errors {
failed := response.Failed()
if failed != nil {
errorLog.Printf("Bulk index request with execution ID %d has %d line failure(s)", executionId, len(failed))
for i, item := range failed {
json, err := json.Marshal(item)
if err != nil {
errorLog.Printf("Unable to marshall failed request line #%d: %s", i, err)
} else {
errorLog.Printf("Failed request line #%d details: %s", i, string(json))
}
}
}
}
}
func (config *configOptions) useTypeFromFuture() (use bool) {
if config.ElasticMajorVersion > 6 {
use = true
} else if config.ElasticMajorVersion == 6 && config.ElasticMinorVersion >= 2 {
use = true
}
return
}
func (config *configOptions) parseElasticsearchVersion(number string) (err error) {
if number == "" {
err = errors.New("Elasticsearch version cannot be blank")
} else {
versionParts := strings.Split(number, ".")
var majorVersion, minorVersion int
majorVersion, err = strconv.Atoi(versionParts[0])
if err == nil {
config.ElasticMajorVersion = majorVersion
if majorVersion == 0 {
err = errors.New("Invalid Elasticsearch major version 0")
}
}
minorVersion, err = strconv.Atoi(versionParts[1])
if err == nil {
config.ElasticMinorVersion = minorVersion
}
}
return
}
func (config *configOptions) newBulkProcessor(client *elastic.Client) (bulk *elastic.BulkProcessor, err error) {
bulkService := client.BulkProcessor().Name("monstache")
bulkService.Workers(config.ElasticMaxConns)
bulkService.Stats(config.Stats)
if config.ElasticMaxDocs != 0 {
bulkService.BulkActions(config.ElasticMaxDocs)
}
if config.ElasticMaxBytes != 0 {
bulkService.BulkSize(config.ElasticMaxBytes)
}
if config.ElasticRetry == false {
bulkService.Backoff(&elastic.StopBackoff{})
}
bulkService.After(afterBulk)
bulkService.FlushInterval(time.Duration(config.ElasticMaxSeconds) * time.Second)
return bulkService.Do(context.Background())
}
func (config *configOptions) newStatsBulkProcessor(client *elastic.Client) (bulk *elastic.BulkProcessor, err error) {
bulkService := client.BulkProcessor().Name("monstache-stats")
bulkService.Workers(1)
bulkService.Stats(false)
bulkService.BulkActions(1)
bulkService.After(afterBulk)
return bulkService.Do(context.Background())
}
func (config *configOptions) needsSecureScheme() bool {
if len(config.ElasticUrls) > 0 {
for _, url := range config.ElasticUrls {
if strings.HasPrefix(url, "https") {
return true
}
}
}
return false
}
func (config *configOptions) newElasticClient() (client *elastic.Client, err error) {
var clientOptions []elastic.ClientOptionFunc
var httpClient *http.Client
clientOptions = append(clientOptions, elastic.SetErrorLog(errorLog))
clientOptions = append(clientOptions, elastic.SetSniff(false))
if config.needsSecureScheme() {
clientOptions = append(clientOptions, elastic.SetScheme("https"))
}
if len(config.ElasticUrls) > 0 {
clientOptions = append(clientOptions, elastic.SetURL(config.ElasticUrls...))
} else {
config.ElasticUrls = append(config.ElasticUrls, elastic.DefaultURL)
}
if config.Verbose {
clientOptions = append(clientOptions, elastic.SetTraceLog(traceLog))
}
if config.ElasticUser != "" {
clientOptions = append(clientOptions, elastic.SetBasicAuth(config.ElasticUser, config.ElasticPassword))
}
if config.ElasticRetry {
d1, d2 := time.Duration(50)*time.Millisecond, time.Duration(20)*time.Second
retrier := elastic.NewBackoffRetrier(elastic.NewExponentialBackoff(d1, d2))
clientOptions = append(clientOptions, elastic.SetRetrier(retrier))
}
httpClient, err = config.NewHTTPClient()
if err != nil {
return client, err
}
clientOptions = append(clientOptions, elastic.SetHttpClient(httpClient))
return elastic.NewClient(clientOptions...)
}
func (config *configOptions) testElasticsearchConn(client *elastic.Client) (err error) {
var number string
url := config.ElasticUrls[0]
number, err = client.ElasticsearchVersion(url)
if err == nil {
infoLog.Printf("Successfully connected to Elasticsearch version %s", number)
err = config.parseElasticsearchVersion(number)
}
return
}
func deleteIndexes(client *elastic.Client, db string, config *configOptions) (err error) {
index := strings.ToLower(db + "*")
for ns, m := range mapIndexTypes {
dbCol := strings.SplitN(ns, ".", 2)
if dbCol[0] == db {
if m.Index != "" {
index = strings.ToLower(m.Index + "*")
}
break
}
}
_, err = client.DeleteIndex(index).Do(context.Background())
return
}
func deleteIndex(client *elastic.Client, namespace string, config *configOptions) (err error) {
ctx := context.Background()
index := strings.ToLower(namespace)
if m := mapIndexTypes[namespace]; m != nil {
if m.Index != "" {
index = strings.ToLower(m.Index)
}
}
_, err = client.DeleteIndex(index).Do(ctx)
return err
}
func ensureFileMapping(client *elastic.Client) (err error) {
ctx := context.Background()
pipeline := map[string]interface{}{
"description": "Extract file information",
"processors": [1]map[string]interface{}{
{
"attachment": map[string]interface{}{
"field": "file",
},
},
},
}
_, err = client.IngestPutPipeline("attachment").BodyJson(pipeline).Do(ctx)
return err
}
func defaultIndexTypeMapping(config *configOptions, op *gtm.Op) *indexTypeMapping {
typeName := typeFromFuture
if !config.useTypeFromFuture() {
typeName = op.GetCollection()
}
return &indexTypeMapping{
Namespace: op.Namespace,
Index: strings.ToLower(op.Namespace),
Type: typeName,
}
}
func mapIndexType(config *configOptions, op *gtm.Op) *indexTypeMapping {
mapping := defaultIndexTypeMapping(config, op)
if m := mapIndexTypes[op.Namespace]; m != nil {
if m.Index != "" {
mapping.Index = m.Index
}
if m.Type != "" {
mapping.Type = m.Type
}
}
return mapping
}
func opIDToString(op *gtm.Op) string {
var opIDStr string
switch op.Id.(type) {
case bson.ObjectId:
opIDStr = op.Id.(bson.ObjectId).Hex()
case float64:
intID := int(op.Id.(float64))
if op.Id.(float64) == float64(intID) {
opIDStr = fmt.Sprintf("%v", intID)
} else {
opIDStr = fmt.Sprintf("%v", op.Id)
}
case float32:
intID := int(op.Id.(float32))
if op.Id.(float32) == float32(intID) {
opIDStr = fmt.Sprintf("%v", intID)
} else {
opIDStr = fmt.Sprintf("%v", op.Id)
}
default:
opIDStr = fmt.Sprintf("%v", op.Id)
}
return opIDStr
}
func convertSliceJavascript(a []interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
var avc interface{}
switch achild := av.(type) {
case map[string]interface{}:
avc = convertMapJavascript(achild)
case []interface{}:
avc = convertSliceJavascript(achild)
case bson.ObjectId:
avc = achild.Hex()
default:
avc = av
}
avs = append(avs, avc)
}
return avs
}
func convertMapJavascript(e map[string]interface{}) map[string]interface{} {
o := make(map[string]interface{})
for k, v := range e {
switch child := v.(type) {
case map[string]interface{}:
o[k] = convertMapJavascript(child)
case []interface{}:
o[k] = convertSliceJavascript(child)
case bson.ObjectId:
o[k] = child.Hex()
default:
o[k] = v
}
}
return o
}
func deepExportValue(a interface{}) (b interface{}) {
switch t := a.(type) {
case otto.Value:
ex, err := t.Export()
if t.Class() == "Date" {
ex, err = time.Parse("Mon, 2 Jan 2006 15:04:05 MST", t.String())
}
if err == nil {
b = deepExportValue(ex)
} else {
errorLog.Printf("Error exporting from javascript: %s", err)
}
case map[string]interface{}:
b = deepExportMap(t)
case []interface{}:
b = deepExportSlice(t)
default:
b = a
}
return
}
func deepExportSlice(a []interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
avs = append(avs, deepExportValue(av))
}
return avs
}
func deepExportMap(e map[string]interface{}) map[string]interface{} {
o := make(map[string]interface{})
for k, v := range e {
o[k] = deepExportValue(v)
}
return o
}
func mapDataJavascript(op *gtm.Op) error {
names := []string{"", op.Namespace}
for _, name := range names {
if env := mapEnvs[name]; env != nil {
arg := convertMapJavascript(op.Data)
val, err := env.VM.Call("module.exports", arg, arg, op.Namespace)
if err != nil {
return err
}
if strings.ToLower(val.Class()) == "object" {
data, err := val.Export()
if err != nil {
return err
} else if data == val {
return errors.New("Exported function must return an object")
} else {
dm := data.(map[string]interface{})
op.Data = deepExportMap(dm)
}
} else {
indexed, err := val.ToBoolean()
if err != nil {
return err
} else if !indexed {
op.Data = nil
break
}
}
}
}
return nil
}
func mapDataGolang(s *mgo.Session, op *gtm.Op) error {
session := s.Copy()
defer session.Close()
input := &monstachemap.MapperPluginInput{
Document: op.Data,
Namespace: op.Namespace,
Database: op.GetDatabase(),
Collection: op.GetCollection(),
Operation: op.Operation,
Session: session,
}
output, err := mapperPlugin(input)
if err != nil {
return err
}
if output != nil {
if output.Drop {
op.Data = nil
} else {
if output.Passthrough == false {
op.Data = output.Document
}
meta := make(map[string]interface{})
if output.Index != "" {
meta["index"] = output.Index
}
if output.Type != "" {
meta["type"] = output.Type
}
if output.Routing != "" {
meta["routing"] = output.Routing
}
if output.Parent != "" {
meta["parent"] = output.Parent
}
if output.Version != 0 {
meta["version"] = output.Version
}
if output.VersionType != "" {
meta["versionType"] = output.VersionType
}
if output.Pipeline != "" {
meta["pipeline"] = output.Pipeline
}
if output.RetryOnConflict != 0 {
meta["retryOnConflict"] = output.RetryOnConflict
}
if len(meta) > 0 {
op.Data["_meta_monstache"] = meta
}
}
}
return nil
}
func mapData(session *mgo.Session, config *configOptions, op *gtm.Op) error {
if config.MapperPluginPath != "" {
return mapDataGolang(session, op)
}
return mapDataJavascript(op)
}
func prepareDataForIndexing(config *configOptions, op *gtm.Op) {
data := op.Data
if config.IndexOplogTime {
secs := int64(op.Timestamp >> 32)
t := time.Unix(secs, 0).UTC()
data["_oplog_ts"] = op.Timestamp
data["_oplog_date"] = t.Format("2006/01/02 15:04:05")
}
delete(data, "_id")
delete(data, "_meta_monstache")
}
func parseIndexMeta(op *gtm.Op) (meta *indexingMeta) {
meta = &indexingMeta{
Version: int64(op.Timestamp),
VersionType: "external",
}
if m, ok := op.Data["_meta_monstache"]; ok {
switch m.(type) {
case map[string]interface{}:
metaAttrs := m.(map[string]interface{})
meta.load(metaAttrs)
case otto.Value:
ex, err := m.(otto.Value).Export()
if err == nil && ex != m {
switch ex.(type) {
case map[string]interface{}:
metaAttrs := ex.(map[string]interface{})
meta.load(metaAttrs)
default:
errorLog.Println("Invalid indexing metadata")
}
}
default:
errorLog.Println("Invalid indexing metadata")
}
}
return meta
}
func addFileContent(s *mgo.Session, op *gtm.Op, config *configOptions) (err error) {
session := s.Copy()
defer session.Close()
op.Data["file"] = ""
var gridByteBuffer bytes.Buffer
db, bucket :=
session.DB(op.GetDatabase()),
strings.SplitN(op.GetCollection(), ".", 2)[0]
encoder := base64.NewEncoder(base64.StdEncoding, &gridByteBuffer)
file, err := db.GridFS(bucket).OpenId(op.Id)
if err != nil {
return
}
defer file.Close()
if config.MaxFileSize > 0 {
if file.Size() > config.MaxFileSize {
infoLog.Printf("File %s md5(%s) exceeds max file size. file content omitted.",
file.Name(), file.MD5())
return
}
}
if _, err = io.Copy(encoder, file); err != nil {
return
}
if err = encoder.Close(); err != nil {
return
}
op.Data["file"] = string(gridByteBuffer.Bytes())
return
}
func notMonstache(op *gtm.Op) bool {
return op.GetDatabase() != "monstache"
}
func notChunks(op *gtm.Op) bool {
return !chunksRegex.MatchString(op.GetCollection())
}
func notConfig(op *gtm.Op) bool {
return op.GetDatabase() != "config"
}
func notSystem(op *gtm.Op) bool {
return !systemsRegex.MatchString(op.GetCollection())
}
func filterWithRegex(regex string) gtm.OpFilter {
var validNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
return validNameSpace.MatchString(op.Namespace)
}
}
func filterWithPlugin() gtm.OpFilter {
return func(op *gtm.Op) bool {
var keep bool = true
if (op.IsInsert() || op.IsUpdate()) && op.Data != nil {
keep = false
input := &monstachemap.MapperPluginInput{
Document: op.Data,
Namespace: op.Namespace,
Database: op.GetDatabase(),
Collection: op.GetCollection(),
Operation: op.Operation,
}
if ok, err := filterPlugin(input); err == nil {
keep = ok
} else {
errorLog.Println(err)
}
}
return keep
}
}
func filterWithScript() gtm.OpFilter {
return func(op *gtm.Op) bool {
var keep bool = true
if (op.IsInsert() || op.IsUpdate()) && op.Data != nil {
nss := []string{"", op.Namespace}
for _, ns := range nss {
if env := filterEnvs[ns]; env != nil {
keep = false
arg := convertMapJavascript(op.Data)
val, err := env.VM.Call("module.exports", arg, arg, op.Namespace)
if err != nil {
errorLog.Println(err)
} else {
if ok, err := val.ToBoolean(); err == nil {
keep = ok
} else {
errorLog.Println(err)
}
}
}
if !keep {
break
}
}
}
return keep
}
}
func filterInverseWithRegex(regex string) gtm.OpFilter {
var invalidNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
return !invalidNameSpace.MatchString(op.Namespace)
}
}
func ensureClusterTTL(session *mgo.Session) error {
col := session.DB("monstache").C("cluster")
return col.EnsureIndex(mgo.Index{
Key: []string{"expireAt"},
Background: true,
ExpireAfter: time.Duration(30) * time.Second,
})
}
func enableProcess(s *mgo.Session, config *configOptions) (bool, error) {
session := s.Copy()
defer session.Close()
col := session.DB("monstache").C("cluster")
doc := make(map[string]interface{})
doc["_id"] = config.ResumeName
doc["expireAt"] = time.Now().UTC()
doc["pid"] = os.Getpid()
if host, err := os.Hostname(); err == nil {
doc["host"] = host
} else {
return false, err
}
err := col.Insert(doc)
if err == nil {
return true, nil
}
if mgo.IsDup(err) {
return false, nil
}
return false, err
}
func resetClusterState(session *mgo.Session, config *configOptions) error {
col := session.DB("monstache").C("cluster")
return col.RemoveId(config.ResumeName)
}
func ensureEnabled(s *mgo.Session, config *configOptions) (enabled bool, err error) {
session := s.Copy()
defer session.Close()
col := session.DB("monstache").C("cluster")
doc := make(map[string]interface{})
if err = col.FindId(config.ResumeName).One(doc); err == nil {
if doc["pid"] != nil && doc["host"] != nil {
var hostname string
pid := doc["pid"].(int)
host := doc["host"].(string)
if hostname, err = os.Hostname(); err == nil {
enabled = (pid == os.Getpid() && host == hostname)
if enabled {
err = col.UpdateId(config.ResumeName,
bson.M{"$set": bson.M{"expireAt": time.Now().UTC()}})
}
}
}
}
return
}
func resumeWork(ctx *gtm.OpCtxMulti, session *mgo.Session, config *configOptions) {
col := session.DB("monstache").C("monstache")
doc := make(map[string]interface{})
col.FindId(config.ResumeName).One(doc)
if doc["ts"] != nil {
ts := doc["ts"].(bson.MongoTimestamp)
ctx.Since(ts)
}
ctx.Resume()
}
func saveTimestamp(s *mgo.Session, ts bson.MongoTimestamp, config *configOptions) error {
session := s.Copy()
session.SetSocketTimeout(time.Duration(5) * time.Second)
session.SetSyncTimeout(time.Duration(5) * time.Second)
if config.ResumeWriteUnsafe {
session.SetSafe(nil)
}
defer session.Close()
col := session.DB("monstache").C("monstache")
doc := make(map[string]interface{})
doc["ts"] = ts
_, err := col.UpsertId(config.ResumeName, bson.M{"$set": doc})
return err
}
func (config *configOptions) parseCommandLineFlags() *configOptions {
flag.BoolVar(&config.Print, "print-config", false, "Print the configuration and then exit")
flag.StringVar(&config.MongoURL, "mongo-url", "", "MongoDB server or router server connection URL")
flag.StringVar(&config.MongoConfigURL, "mongo-config-url", "", "MongoDB config server connection URL")
flag.StringVar(&config.MongoPemFile, "mongo-pem-file", "", "Path to a PEM file for secure connections to MongoDB")
flag.BoolVar(&config.MongoValidatePemFile, "mongo-validate-pem-file", true, "Set to boolean false to not validate the MongoDB PEM file")
flag.StringVar(&config.MongoOpLogDatabaseName, "mongo-oplog-database-name", "", "Override the database name which contains the mongodb oplog")
flag.StringVar(&config.MongoOpLogCollectionName, "mongo-oplog-collection-name", "", "Override the collection name which contains the mongodb oplog")
flag.StringVar(&config.MongoCursorTimeout, "mongo-cursor-timeout", "", "Override the duration before a cursor timeout occurs when tailing the oplog")
flag.StringVar(&config.GraylogAddr, "graylog-addr", "", "Send logs to a Graylog server at this address")
flag.StringVar(&config.ElasticVersion, "elasticsearch-version", "", "Specify elasticsearch version directly instead of getting it from the server")
flag.StringVar(&config.ElasticUser, "elasticsearch-user", "", "The elasticsearch user name for basic auth")
flag.StringVar(&config.ElasticPassword, "elasticsearch-password", "", "The elasticsearch password for basic auth")
flag.StringVar(&config.ElasticPemFile, "elasticsearch-pem-file", "", "Path to a PEM file for secure connections to elasticsearch")
flag.BoolVar(&config.ElasticValidatePemFile, "elasticsearch-validate-pem-file", true, "Set to boolean false to not validate the Elasticsearch PEM file")
flag.IntVar(&config.ElasticMaxConns, "elasticsearch-max-conns", 0, "Elasticsearch max connections")
flag.IntVar(&config.FileDownloaders, "file-downloaders", 0, "GridFs download go routines")
flag.BoolVar(&config.ElasticRetry, "elasticsearch-retry", false, "True to retry failed request to Elasticsearch")
flag.IntVar(&config.ElasticMaxDocs, "elasticsearch-max-docs", 0, "Number of docs to hold before flushing to Elasticsearch")
flag.IntVar(&config.ElasticMaxBytes, "elasticsearch-max-bytes", 0, "Number of bytes to hold before flushing to Elasticsearch")
flag.IntVar(&config.ElasticMaxSeconds, "elasticsearch-max-seconds", 0, "Number of seconds before flushing to Elasticsearch")
flag.IntVar(&config.ElasticClientTimeout, "elasticsearch-client-timeout", 0, "Number of seconds before a request to Elasticsearch is timed out")
flag.Int64Var(&config.MaxFileSize, "max-file-size", 0, "GridFs file content exceeding this limit in bytes will not be indexed in Elasticsearch")
flag.StringVar(&config.ConfigFile, "f", "", "Location of configuration file")
flag.BoolVar(&config.DroppedDatabases, "dropped-databases", true, "True to delete indexes from dropped databases")
flag.BoolVar(&config.DroppedCollections, "dropped-collections", true, "True to delete indexes from dropped collections")
flag.BoolVar(&config.Version, "v", false, "True to print the version number")
flag.BoolVar(&config.Gzip, "gzip", false, "True to use gzip for requests to elasticsearch")
flag.BoolVar(&config.Verbose, "verbose", false, "True to output verbose messages")
flag.BoolVar(&config.Stats, "stats", false, "True to print out statistics")
flag.BoolVar(&config.IndexStats, "index-stats", false, "True to index stats in elasticsearch")
flag.StringVar(&config.StatsDuration, "stats-duration", "", "The duration after which stats are logged")
flag.StringVar(&config.StatsIndexFormat, "stats-index-format", "", "time.Time supported format to use for the stats index names")
flag.BoolVar(&config.Resume, "resume", false, "True to capture the last timestamp of this run and resume on a subsequent run")
flag.Int64Var(&config.ResumeFromTimestamp, "resume-from-timestamp", 0, "Timestamp to resume syncing from")
flag.BoolVar(&config.ResumeWriteUnsafe, "resume-write-unsafe", false, "True to speedup writes of the last timestamp synched for resuming at the cost of error checking")
flag.BoolVar(&config.Replay, "replay", false, "True to replay all events from the oplog and index them in elasticsearch")
flag.BoolVar(&config.IndexFiles, "index-files", false, "True to index gridfs files into elasticsearch. Requires the elasticsearch mapper-attachments (deprecated) or ingest-attachment plugin")
flag.BoolVar(&config.FileHighlighting, "file-highlighting", false, "True to enable the ability to highlight search times for a file query")
flag.BoolVar(&config.EnablePatches, "enable-patches", false, "True to include an json-patch field on updates")
flag.BoolVar(&config.FailFast, "fail-fast", false, "True to exit if a single _bulk request fails")
flag.BoolVar(&config.IndexOplogTime, "index-oplog-time", false, "True to add date/time information from the oplog to each document when indexing")
flag.BoolVar(&config.ExitAfterDirectReads, "exit-after-direct-reads", false, "True to exit the program after reading directly from the configured namespaces")
flag.StringVar(&config.MergePatchAttr, "merge-patch-attribute", "", "Attribute to store json-patch values under")
flag.StringVar(&config.ResumeName, "resume-name", "", "Name under which to load/store the resume state. Defaults to 'default'")
flag.StringVar(&config.ClusterName, "cluster-name", "", "Name of the monstache process cluster")
flag.StringVar(&config.Worker, "worker", "", "The name of this worker in a multi-worker configuration")
flag.StringVar(&config.MapperPluginPath, "mapper-plugin-path", "", "The path to a .so file to load as a document mapper plugin")
flag.StringVar(&config.NsRegex, "namespace-regex", "", "A regex which is matched against an operation's namespace (<database>.<collection>). Only operations which match are synched to elasticsearch")
flag.StringVar(&config.NsExcludeRegex, "namespace-exclude-regex", "", "A regex which is matched against an operation's namespace (<database>.<collection>). Only operations which do not match are synched to elasticsearch")
flag.Var(&config.DirectReadNs, "direct-read-namespace", "A list of direct read namespaces")
flag.Var(&config.RoutingNamespaces, "routing-namespace", "A list of namespaces that override routing information")
flag.Var(&config.TimeMachineNamespaces, "time-machine-namespace", "A list of direct read namespaces")
flag.StringVar(&config.TimeMachineIndexPrefix, "time-machine-index-prefix", "", "A prefix to preprend to time machine indexes")
flag.StringVar(&config.TimeMachineIndexSuffix, "time-machine-index-suffix", "", "A suffix to append to time machine indexes")
flag.BoolVar(&config.TimeMachineDirectReads, "time-machine-direct-reads", false, "True to index the results of direct reads into the any time machine indexes")
flag.Var(&config.ElasticUrls, "elasticsearch-url", "A list of Elasticsearch URLs")
flag.Var(&config.FileNamespaces, "file-namespace", "A list of file namespaces")
flag.Var(&config.PatchNamespaces, "patch-namespace", "A list of patch namespaces")
flag.Var(&config.Workers, "workers", "A list of worker names")
flag.BoolVar(&config.EnableHTTPServer, "enable-http-server", false, "True to enable an internal http server")
flag.StringVar(&config.HTTPServerAddr, "http-server-addr", "", "The address the internal http server listens on")
flag.Var(&config.DeleteStrategy, "delete-strategy", "Stategy to use for deletes. 0=stateless,1=stateful,2=ignore")
flag.StringVar(&config.DeleteIndexPattern, "delete-index-pattern", "", "An Elasticsearch index-pattern to restric the scope of stateless deletes")
flag.Parse()
return config
}
func (config *configOptions) loadIndexTypes() {
if config.Mapping != nil {
for _, m := range config.Mapping {
if m.Namespace != "" && (m.Index != "" || m.Type != "") {
mapIndexTypes[m.Namespace] = &indexTypeMapping{
Namespace: m.Namespace,
Index: strings.ToLower(m.Index),
Type: m.Type,
}
} else {
panic("Mappings must specify namespace and at least one of index and type")
}
}
}
}
func (config *configOptions) loadFilters() {
for _, s := range config.Filter {
if s.Script != "" || s.Path != "" {
if s.Path != "" && s.Script != "" {
panic("Filters must specify path or script but not both")
}
if s.Path != "" {
if script, err := ioutil.ReadFile(s.Path); err == nil {
s.Script = string(script[:])
} else {
errorLog.Panicf("Unable to load filter at path %s: %s", s.Path, err)
}
}
if _, exists := filterEnvs[s.Namespace]; exists {
errorLog.Panicf("Multiple filters with namespace: %s", s.Namespace)
}
env := &executionEnv{
VM: otto.New(),
Script: s.Script,
}
if err := env.VM.Set("module", make(map[string]interface{})); err != nil {
panic(err)
}
if _, err := env.VM.Run(env.Script); err != nil {
panic(err)
}
val, err := env.VM.Run("module.exports")
if err != nil {
panic(err)
} else if !val.IsFunction() {
panic("module.exports must be a function")
}
filterEnvs[s.Namespace] = env
} else {
panic("Filters must specify path or script attributes")
}
}
}
func (config *configOptions) loadScripts() {
for _, s := range config.Script {
if s.Script != "" || s.Path != "" {
if s.Path != "" && s.Script != "" {
panic("Scripts must specify path or script but not both")
}
if s.Path != "" {
if script, err := ioutil.ReadFile(s.Path); err == nil {
s.Script = string(script[:])
} else {
errorLog.Panicf("Unable to load script at path %s: %s", s.Path, err)
}
}
if _, exists := mapEnvs[s.Namespace]; exists {
errorLog.Panicf("Multiple scripts with namespace: %s", s.Namespace)
}
env := &executionEnv{
VM: otto.New(),
Script: s.Script,
}
if err := env.VM.Set("module", make(map[string]interface{})); err != nil {
panic(err)
}
if _, err := env.VM.Run(env.Script); err != nil {
panic(err)
}
val, err := env.VM.Run("module.exports")
if err != nil {
panic(err)
} else if !val.IsFunction() {
panic("module.exports must be a function")
}
mapEnvs[s.Namespace] = env
if s.Routing {
routingNamespaces[s.Namespace] = true
}
} else {
panic("Scripts must specify path or script")
}
}
}
func (config *configOptions) loadPlugins() *configOptions {
if config.MapperPluginPath != "" {
p, err := plugin.Open(config.MapperPluginPath)
if err != nil {
errorLog.Panicf("Unable to load mapper plugin %s: %s", config.MapperPluginPath, err)
}
mapper, err := p.Lookup("Map")
if err != nil {
errorLog.Panicf("Unable to find symbol 'Map' in mapper plugin: %s", err)
}
switch mapper.(type) {
case func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error):
mapperPlugin = mapper.(func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error))
default:
errorLog.Panicf("Plugin 'Map' function must be typed %T", mapperPlugin)
}
filter, err := p.Lookup("Filter")
if err == nil {
switch filter.(type) {
case func(*monstachemap.MapperPluginInput) (bool, error):
filterPlugin = mapper.(func(*monstachemap.MapperPluginInput) (bool, error))
default:
errorLog.Panicf("Plugin 'Filter' function must be typed %T", filterPlugin)
}
}
}
return config
}
func (config *configOptions) loadConfigFile() *configOptions {
if config.ConfigFile != "" {
var tomlConfig = configOptions{
DroppedDatabases: true,
DroppedCollections: true,
MongoDialSettings: mongoDialSettings{Timeout: -1},
MongoSessionSettings: mongoSessionSettings{SocketTimeout: -1, SyncTimeout: -1},
GtmSettings: gtmDefaultSettings(),
}
if _, err := toml.DecodeFile(config.ConfigFile, &tomlConfig); err != nil {
panic(err)
}
if config.MongoURL == "" {
config.MongoURL = tomlConfig.MongoURL
}
if config.MongoConfigURL == "" {
config.MongoConfigURL = tomlConfig.MongoConfigURL
}
if config.MongoPemFile == "" {
config.MongoPemFile = tomlConfig.MongoPemFile
}
if config.MongoValidatePemFile && !tomlConfig.MongoValidatePemFile {
config.MongoValidatePemFile = false
}
if config.MongoOpLogDatabaseName == "" {
config.MongoOpLogDatabaseName = tomlConfig.MongoOpLogDatabaseName
}
if config.MongoOpLogCollectionName == "" {
config.MongoOpLogCollectionName = tomlConfig.MongoOpLogCollectionName
}
if config.MongoCursorTimeout == "" {
config.MongoCursorTimeout = tomlConfig.MongoCursorTimeout
}
if config.ElasticUser == "" {
config.ElasticUser = tomlConfig.ElasticUser
}
if config.ElasticPassword == "" {
config.ElasticPassword = tomlConfig.ElasticPassword
}
if config.ElasticPemFile == "" {
config.ElasticPemFile = tomlConfig.ElasticPemFile
}
if config.ElasticValidatePemFile && !tomlConfig.ElasticValidatePemFile {
config.ElasticValidatePemFile = false
}
if config.ElasticVersion == "" {
config.ElasticVersion = tomlConfig.ElasticVersion
}
if config.ElasticMaxConns == 0 {
config.ElasticMaxConns = tomlConfig.ElasticMaxConns
}
if !config.ElasticRetry && tomlConfig.ElasticRetry {
config.ElasticRetry = true
}
if config.ElasticMaxDocs == 0 {
config.ElasticMaxDocs = tomlConfig.ElasticMaxDocs
}
if config.ElasticMaxBytes == 0 {
config.ElasticMaxBytes = tomlConfig.ElasticMaxBytes
}
if config.ElasticMaxSeconds == 0 {
config.ElasticMaxSeconds = tomlConfig.ElasticMaxSeconds
}
if config.ElasticClientTimeout == 0 {
config.ElasticClientTimeout = tomlConfig.ElasticClientTimeout
}
if config.MaxFileSize == 0 {
config.MaxFileSize = tomlConfig.MaxFileSize
}
if config.FileDownloaders == 0 {
config.FileDownloaders = tomlConfig.FileDownloaders
}
if config.DeleteStrategy == 0 {
config.DeleteStrategy = tomlConfig.DeleteStrategy
}
if config.DeleteIndexPattern == "" {
config.DeleteIndexPattern = tomlConfig.DeleteIndexPattern
}
if config.DroppedDatabases && !tomlConfig.DroppedDatabases {
config.DroppedDatabases = false
}
if config.DroppedCollections && !tomlConfig.DroppedCollections {
config.DroppedCollections = false
}
if !config.Gzip && tomlConfig.Gzip {
config.Gzip = true
}
if !config.Verbose && tomlConfig.Verbose {
config.Verbose = true
}
if !config.Stats && tomlConfig.Stats {
config.Stats = true
}
if !config.IndexStats && tomlConfig.IndexStats {
config.IndexStats = true
}
if config.StatsDuration == "" {
config.StatsDuration = tomlConfig.StatsDuration
}
if config.StatsIndexFormat == "" {
config.StatsIndexFormat = tomlConfig.StatsIndexFormat
}
if !config.IndexFiles && tomlConfig.IndexFiles {
config.IndexFiles = true
}
if !config.FileHighlighting && tomlConfig.FileHighlighting {
config.FileHighlighting = true
}
if !config.EnablePatches && tomlConfig.EnablePatches {
config.EnablePatches = true
}
if !config.Replay && tomlConfig.Replay {
config.Replay = true
}
if !config.Resume && tomlConfig.Resume {
config.Resume = true
}
if !config.ResumeWriteUnsafe && tomlConfig.ResumeWriteUnsafe {
config.ResumeWriteUnsafe = true
}
if config.ResumeFromTimestamp == 0 {
config.ResumeFromTimestamp = tomlConfig.ResumeFromTimestamp
}
if config.MergePatchAttr == "" {
config.MergePatchAttr = tomlConfig.MergePatchAttr
}
if !config.FailFast && tomlConfig.FailFast {
config.FailFast = true
}
if !config.IndexOplogTime && tomlConfig.IndexOplogTime {
config.IndexOplogTime = true
}
if !config.ExitAfterDirectReads && tomlConfig.ExitAfterDirectReads {
config.ExitAfterDirectReads = true
}
if config.Resume && config.ResumeName == "" {
config.ResumeName = tomlConfig.ResumeName
}
if config.ClusterName == "" {
config.ClusterName = tomlConfig.ClusterName
}
if config.NsRegex == "" {
config.NsRegex = tomlConfig.NsRegex
}
if config.NsExcludeRegex == "" {
config.NsExcludeRegex = tomlConfig.NsExcludeRegex
}
if config.IndexFiles {
if len(config.FileNamespaces) == 0 {
config.FileNamespaces = tomlConfig.FileNamespaces
}
config.loadGridFsConfig()
}
if config.Worker == "" {
config.Worker = tomlConfig.Worker
}
if config.GraylogAddr == "" {
config.GraylogAddr = tomlConfig.GraylogAddr
}
if config.MapperPluginPath == "" {
config.MapperPluginPath = tomlConfig.MapperPluginPath
}
if config.EnablePatches {
if len(config.PatchNamespaces) == 0 {
config.PatchNamespaces = tomlConfig.PatchNamespaces
}
config.loadPatchNamespaces()
}
if len(config.RoutingNamespaces) == 0 {
config.RoutingNamespaces = tomlConfig.RoutingNamespaces
config.loadRoutingNamespaces()
}
if len(config.TimeMachineNamespaces) == 0 {
config.TimeMachineNamespaces = tomlConfig.TimeMachineNamespaces
config.loadTimeMachineNamespaces()
}
if config.TimeMachineIndexPrefix == "" {
config.TimeMachineIndexPrefix = tomlConfig.TimeMachineIndexPrefix
}
if config.TimeMachineIndexSuffix == "" {
config.TimeMachineIndexSuffix = tomlConfig.TimeMachineIndexSuffix
}
if !config.TimeMachineDirectReads {
config.TimeMachineDirectReads = tomlConfig.TimeMachineDirectReads
}
if len(config.DirectReadNs) == 0 {
config.DirectReadNs = tomlConfig.DirectReadNs
}
if len(config.ElasticUrls) == 0 {
config.ElasticUrls = tomlConfig.ElasticUrls
}
if len(config.Workers) == 0 {
config.Workers = tomlConfig.Workers
}
if !config.EnableHTTPServer && tomlConfig.EnableHTTPServer {
config.EnableHTTPServer = true
}
if config.HTTPServerAddr == "" {
config.HTTPServerAddr = tomlConfig.HTTPServerAddr
}
config.MongoDialSettings = tomlConfig.MongoDialSettings
config.MongoSessionSettings = tomlConfig.MongoSessionSettings
config.GtmSettings = tomlConfig.GtmSettings
config.Logs = tomlConfig.Logs
tomlConfig.loadScripts()
tomlConfig.loadFilters()
tomlConfig.loadIndexTypes()
}
return config
}
func (config *configOptions) newLogger(path string) *lumberjack.Logger {
return &lumberjack.Logger{
Filename: path,
MaxSize: 500, // megabytes
MaxBackups: 5,
MaxAge: 28, //days
}
}
func (config *configOptions) setupLogging() *configOptions {
if config.GraylogAddr != "" {
gelfWriter, err := gelf.NewUDPWriter(config.GraylogAddr)
if err != nil {
errorLog.Fatalf("Error creating gelf writer: %s", err)
}
infoLog.SetOutput(gelfWriter)
errorLog.SetOutput(gelfWriter)
traceLog.SetOutput(gelfWriter)
statsLog.SetOutput(gelfWriter)
} else {
logs := config.Logs
if logs.Info != "" {
infoLog.SetOutput(config.newLogger(logs.Info))
}
if logs.Error != "" {
errorLog.SetOutput(config.newLogger(logs.Error))
}
if logs.Trace != "" {
traceLog.SetOutput(config.newLogger(logs.Trace))
}
if logs.Stats != "" {
statsLog.SetOutput(config.newLogger(logs.Stats))
}
}
return config
}
func (config *configOptions) loadRoutingNamespaces() *configOptions {
for _, namespace := range config.RoutingNamespaces {
routingNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadTimeMachineNamespaces() *configOptions {
for _, namespace := range config.TimeMachineNamespaces {
tmNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadPatchNamespaces() *configOptions {
for _, namespace := range config.PatchNamespaces {
patchNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadGridFsConfig() *configOptions {
for _, namespace := range config.FileNamespaces {
fileNamespaces[namespace] = true
}
return config
}
func (config *configOptions) dump() {
json, err := json.MarshalIndent(config, "", " ")
if err != nil {
errorLog.Printf("Unable to print configuration: %s", err)
} else {
infoLog.Println(string(json))
}
}
/*
if ssl=true is set on the connection string, remove the option
from the connection string and enable TLS because the mgo
driver does not support the option in the connection string
*/
func (config *configOptions) parseMongoURL(inURL string) (outURL string) {
const queryDelim string = "?"
outURL = inURL
hostQuery := strings.SplitN(outURL, queryDelim, 2)
if len(hostQuery) == 2 {
host, query := hostQuery[0], hostQuery[1]
r := regexp.MustCompile(`ssl=true&?|&ssl=true$`)
qstr := r.ReplaceAllString(query, "")
if qstr != query {
config.MongoDialSettings.Ssl = true
if qstr == "" {
outURL = host
} else {
outURL = strings.Join([]string{host, qstr}, queryDelim)
}
}
}
return
}
func (config *configOptions) setDefaults() *configOptions {
if config.MongoURL == "" {
config.MongoURL = mongoURLDefault
}
if config.ClusterName != "" {
if config.ClusterName != "" && config.Worker != "" {
config.ResumeName = fmt.Sprintf("%s:%s", config.ClusterName, config.Worker)
} else {
config.ResumeName = config.ClusterName
}
config.Resume = true
} else if config.ResumeName == "" {
if config.Worker != "" {
config.ResumeName = config.Worker
} else {
config.ResumeName = resumeNameDefault
}
}
if config.ElasticMaxConns == 0 {
config.ElasticMaxConns = elasticMaxConnsDefault
}
if config.ElasticClientTimeout == 0 {
config.ElasticClientTimeout = elasticClientTimeoutDefault
}
if config.MergePatchAttr == "" {
config.MergePatchAttr = "json-merge-patches"
}
if config.ElasticMaxSeconds == 0 {
config.ElasticMaxSeconds = 1
}
if config.ElasticMaxDocs == 0 {
config.ElasticMaxDocs = elasticMaxDocsDefault
}
if config.MongoURL != "" {
config.MongoURL = config.parseMongoURL(config.MongoURL)
}
if config.MongoConfigURL != "" {
config.MongoConfigURL = config.parseMongoURL(config.MongoConfigURL)
}
if config.HTTPServerAddr == "" {
config.HTTPServerAddr = ":8080"
}
if config.StatsIndexFormat == "" {
config.StatsIndexFormat = "monstache.stats.2006-01-02"
}
if config.TimeMachineIndexPrefix == "" {
config.TimeMachineIndexPrefix = "log"
}
if config.TimeMachineIndexSuffix == "" {
config.TimeMachineIndexSuffix = "2006-01-02"
}
if config.DeleteIndexPattern == "" {
config.DeleteIndexPattern = "*"
}
if config.FileDownloaders == 0 {
config.FileDownloaders = fileDownloadersDefault
}
return config
}
func (config *configOptions) getAuthURL(inURL string) string {
cred := strings.SplitN(config.MongoURL, "@", 2)
if len(cred) == 2 {
return cred[0] + "@" + inURL
} else {
return inURL
}
}
func (config *configOptions) configureMongo(session *mgo.Session) {
session.SetMode(mgo.Primary, true)
if config.MongoSessionSettings.SocketTimeout != -1 {
timeOut := time.Duration(config.MongoSessionSettings.SocketTimeout) * time.Second
session.SetSocketTimeout(timeOut)
}
if config.MongoSessionSettings.SyncTimeout != -1 {
timeOut := time.Duration(config.MongoSessionSettings.SyncTimeout) * time.Second
session.SetSyncTimeout(timeOut)
}
}
func (config *configOptions) dialMongo(inURL string) (*mgo.Session, error) {
ssl := config.MongoDialSettings.Ssl || config.MongoPemFile != ""
if ssl {
tlsConfig := &tls.Config{}
if config.MongoPemFile != "" {
certs := x509.NewCertPool()
if ca, err := ioutil.ReadFile(config.MongoPemFile); err == nil {
certs.AppendCertsFromPEM(ca)
} else {
return nil, err
}
tlsConfig.RootCAs = certs
}
// Check to see if we don't need to validate the PEM
if config.MongoValidatePemFile == false {
// Turn off validation
tlsConfig.InsecureSkipVerify = true
}
dialInfo, err := mgo.ParseURL(inURL)
if err != nil {
return nil, err
}
dialInfo.Timeout = time.Duration(10) * time.Second
if config.MongoDialSettings.Timeout != -1 {
dialInfo.Timeout = time.Duration(config.MongoDialSettings.Timeout) * time.Second
}
dialInfo.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) {
conn, err := tls.Dial("tcp", addr.String(), tlsConfig)
if err != nil {
errorLog.Printf("Unable to dial mongodb: %s", err)
}
return conn, err
}
session, err := mgo.DialWithInfo(dialInfo)
if err == nil {
session.SetSyncTimeout(1 * time.Minute)
session.SetSocketTimeout(1 * time.Minute)
}
return session, err
}
if config.MongoDialSettings.Timeout != -1 {
return mgo.DialWithTimeout(inURL,
time.Duration(config.MongoDialSettings.Timeout)*time.Second)
}
return mgo.Dial(inURL)
}
func (config *configOptions) NewHTTPClient() (client *http.Client, err error) {
tlsConfig := &tls.Config{}
if config.ElasticPemFile != "" {
var ca []byte
certs := x509.NewCertPool()
if ca, err = ioutil.ReadFile(config.ElasticPemFile); err == nil {
certs.AppendCertsFromPEM(ca)
tlsConfig.RootCAs = certs
} else {
return client, err
}
}
if config.ElasticValidatePemFile == false {
// Turn off validation
tlsConfig.InsecureSkipVerify = true
}
transport := &http.Transport{
DisableCompression: !config.Gzip,
TLSHandshakeTimeout: time.Duration(30) * time.Second,
TLSClientConfig: tlsConfig,
}
client = &http.Client{
Timeout: time.Duration(config.ElasticClientTimeout) * time.Second,
Transport: transport,
}
return client, err
}
func doDrop(mongo *mgo.Session, elastic *elastic.Client, op *gtm.Op, config *configOptions) (err error) {
if db, drop := op.IsDropDatabase(); drop {
if config.DroppedDatabases {
if err = deleteIndexes(elastic, db, config); err == nil {
if e := dropDBMeta(mongo, db); e != nil {
errorLog.Printf("Unable to delete metadata for db: %s", e)
}
}
}
} else if col, drop := op.IsDropCollection(); drop {
if config.DroppedCollections {
if err = deleteIndex(elastic, op.GetDatabase()+"."+col, config); err == nil {
if e := dropCollectionMeta(mongo, op.GetDatabase()+"."+col); e != nil {
errorLog.Printf("Unable to delete metadata for collection: %s", e)
}
}
}
}
return
}
func hasFileContent(op *gtm.Op, config *configOptions) (ingest bool) {
if !config.IndexFiles {
return
}
return fileNamespaces[op.Namespace]
}
func addPatch(config *configOptions, client *elastic.Client, op *gtm.Op,
objectID string, indexType *indexTypeMapping, meta *indexingMeta) (err error) {
var merges []interface{}
var toJSON []byte
if op.IsSourceDirect() {
return nil
}
if op.Timestamp == 0 {
return nil
}
if op.IsUpdate() {
ctx := context.Background()
service := client.Get()
service.Id(objectID)
service.Index(indexType.Index)
service.Type(indexType.Type)
if meta.Index != "" {
service.Index(meta.Index)
}
if meta.Type != "" {
service.Type(meta.Type)
}
if meta.Routing != "" {
service.Routing(meta.Routing)
}
if meta.Parent != "" {
service.Parent(meta.Parent)
}
var resp *elastic.GetResult
if resp, err = service.Do(ctx); err == nil {
if resp.Found {
var src map[string]interface{}
if err = json.Unmarshal(*resp.Source, &src); err == nil {
if val, ok := src[config.MergePatchAttr]; ok {
merges = val.([]interface{})
for _, m := range merges {
entry := m.(map[string]interface{})
entry["ts"] = int(entry["ts"].(float64))
entry["v"] = int(entry["v"].(float64))
}
}
delete(src, config.MergePatchAttr)
var fromJSON, mergeDoc []byte
if fromJSON, err = json.Marshal(src); err == nil {
if toJSON, err = json.Marshal(op.Data); err == nil {
if mergeDoc, err = jsonpatch.CreateMergePatch(fromJSON, toJSON); err == nil {
merge := make(map[string]interface{})
merge["ts"] = op.Timestamp >> 32
merge["p"] = string(mergeDoc)
merge["v"] = len(merges) + 1
merges = append(merges, merge)
op.Data[config.MergePatchAttr] = merges
}
}
}
}
} else {
err = errors.New("Last document revision not found")
}
}
} else {
if _, found := op.Data[config.MergePatchAttr]; !found {
if toJSON, err = json.Marshal(op.Data); err == nil {
merge := make(map[string]interface{})
merge["v"] = 1
merge["ts"] = op.Timestamp >> 32
merge["p"] = string(toJSON)
merges = append(merges, merge)
op.Data[config.MergePatchAttr] = merges
}
}
}
return
}
func doIndexing(config *configOptions, mongo *mgo.Session, bulk *elastic.BulkProcessor, client *elastic.Client, op *gtm.Op, ingestAttachment bool) (err error) {
meta := parseIndexMeta(op)
prepareDataForIndexing(config, op)
objectID, indexType := opIDToString(op), mapIndexType(config, op)
if config.EnablePatches {
if patchNamespaces[op.Namespace] {
if e := addPatch(config, client, op, objectID, indexType, meta); e != nil {
errorLog.Printf("Unable to save json-patch info: %s", e)
}
}
}
req := elastic.NewBulkIndexRequest()
req.Id(objectID)
req.Index(indexType.Index)
req.Type(indexType.Type)
req.Doc(op.Data)
if meta.Index != "" {
req.Index(meta.Index)
}
if meta.Type != "" {
req.Type(meta.Type)
}
if meta.Routing != "" {
req.Routing(meta.Routing)
}
if meta.Parent != "" {
req.Parent(meta.Parent)
}
if meta.Version != 0 {
req.Version(meta.Version)
}
if meta.VersionType != "" {
req.VersionType(meta.VersionType)
}
if meta.Pipeline != "" {
req.Pipeline(meta.Pipeline)
}
if meta.RetryOnConflict != 0 {
req.RetryOnConflict(meta.RetryOnConflict)
}
if ingestAttachment {
req.Pipeline("attachment")
}
bulk.Add(req)
if meta.shouldSave(config) {
if e := setIndexMeta(mongo, op.Namespace, objectID, meta); e != nil {
errorLog.Printf("Unable to save routing info: %s", e)
}
}
if tmNamespaces[op.Namespace] {
if op.IsSourceOplog() || config.TimeMachineDirectReads {
t := time.Now().UTC()
tmIndex := func(idx string) string {
pre, suf := config.TimeMachineIndexPrefix, config.TimeMachineIndexSuffix
tmFormat := strings.Join([]string{pre, idx, suf}, ".")
return strings.ToLower(t.Format(tmFormat))
}
data := make(map[string]interface{})
for k, v := range op.Data {
data[k] = v
}
data["_source_id"] = objectID
if config.IndexOplogTime == false {
secs := int64(op.Timestamp >> 32)
t := time.Unix(secs, 0).UTC()
data["_oplog_ts"] = op.Timestamp
data["_oplog_date"] = t.Format("2006/01/02 15:04:05")
}
req = elastic.NewBulkIndexRequest()
req.Index(tmIndex(indexType.Index))
req.Type(indexType.Type)
req.Routing(objectID)
req.Doc(data)
if meta.Index != "" {
req.Index(tmIndex(meta.Index))
}
if meta.Type != "" {
req.Type(meta.Type)
}
if meta.Pipeline != "" {
req.Pipeline(meta.Pipeline)
}
if ingestAttachment {
req.Pipeline("attachment")
}
bulk.Add(req)
}
}
return
}
func doIndex(config *configOptions, mongo *mgo.Session, bulk *elastic.BulkProcessor, client *elastic.Client, op *gtm.Op, ingestAttachment bool) (err error) {
if err = mapData(mongo, config, op); err == nil {
if op.Data != nil {
err = doIndexing(config, mongo, bulk, client, op, ingestAttachment)
} else if op.IsUpdate() {
doDelete(config, client, mongo, bulk, op)
}
}
return
}
func doIndexStats(config *configOptions, bulkStats *elastic.BulkProcessor, stats elastic.BulkProcessorStats) (err error) {
var hostname string
doc := make(map[string]interface{})
t := time.Now().UTC()
doc["Timestamp"] = t.Format("2006-01-02T15:04:05")
hostname, err = os.Hostname()
if err == nil {
doc["Host"] = hostname
}
doc["Pid"] = os.Getpid()
doc["Stats"] = stats
index := strings.ToLower(t.Format(config.StatsIndexFormat))
typeName := "stats"
if config.useTypeFromFuture() {
typeName = typeFromFuture
}
req := elastic.NewBulkIndexRequest().Index(index).Type(typeName)
req.Doc(doc)
bulkStats.Add(req)
return
}
func dropDBMeta(session *mgo.Session, db string) (err error) {
col := session.DB("monstache").C("meta")
q := bson.M{"db": db}
_, err = col.RemoveAll(q)
return
}
func dropCollectionMeta(session *mgo.Session, namespace string) (err error) {
col := session.DB("monstache").C("meta")
q := bson.M{"namespace": namespace}
_, err = col.RemoveAll(q)
return
}
func (meta *indexingMeta) load(metaAttrs map[string]interface{}) {
var v interface{}
var ok bool
var s string
if v, ok = metaAttrs["routing"]; ok {
meta.Routing = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["index"]; ok {
meta.Index = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["type"]; ok {
meta.Type = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["parent"]; ok {
meta.Parent = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["version"]; ok {
s = fmt.Sprintf("%v", v)
if version, err := strconv.ParseInt(s, 10, 64); err == nil {
meta.Version = version
}
}
if v, ok = metaAttrs["versionType"]; ok {
meta.VersionType = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["pipeline"]; ok {
meta.Pipeline = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["retryOnConflict"]; ok {
s = fmt.Sprintf("%v", v)
if roc, err := strconv.Atoi(s); err == nil {
meta.RetryOnConflict = roc
}
}
}
func (meta *indexingMeta) shouldSave(config *configOptions) bool {
if config.DeleteStrategy == statefulDeleteStrategy {
return (meta.Routing != "" ||
meta.Index != "" ||
meta.Type != "" ||
meta.Parent != "" ||
meta.Pipeline != "")
} else {
return false
}
}
func setIndexMeta(session *mgo.Session, namespace, id string, meta *indexingMeta) error {
col := session.DB("monstache").C("meta")
metaID := fmt.Sprintf("%s.%s", namespace, id)
doc := make(map[string]interface{})
doc["routing"] = meta.Routing
doc["index"] = meta.Index
doc["type"] = meta.Type
doc["parent"] = meta.Parent
doc["pipeline"] = meta.Pipeline
doc["db"] = strings.SplitN(namespace, ".", 2)[0]
doc["namespace"] = namespace
_, err := col.UpsertId(metaID, bson.M{"$set": doc})
return err
}
func getIndexMeta(session *mgo.Session, namespace, id string) (meta *indexingMeta) {
meta = &indexingMeta{}
col := session.DB("monstache").C("meta")
doc := make(map[string]interface{})
metaID := fmt.Sprintf("%s.%s", namespace, id)
col.FindId(metaID).One(doc)
if doc["routing"] != nil {
meta.Routing = doc["routing"].(string)
}
if doc["index"] != nil {
meta.Index = strings.ToLower(doc["index"].(string))
}
if doc["type"] != nil {
meta.Type = doc["type"].(string)
}
if doc["parent"] != nil {
meta.Parent = doc["parent"].(string)
}
if doc["pipeline"] != nil {
meta.Pipeline = doc["pipeline"].(string)
}
col.RemoveId(metaID)
return
}
func loadBuiltinFunctions(s *mgo.Session) {
for ns, env := range mapEnvs {
var fa *findConf
fa = &findConf{
session: s,
name: "findId",
vm: env.VM,
ns: ns,
byId: true,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
panic(err)
}
fa = &findConf{
session: s,
name: "findOne",
vm: env.VM,
ns: ns,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
panic(err)
}
fa = &findConf{
session: s,
name: "find",
vm: env.VM,
ns: ns,
multi: true,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
panic(err)
}
}
}
func (fc *findCall) setDatabase(topts map[string]interface{}) (err error) {
if ov, ok := topts["database"]; ok {
if ovs, ok := ov.(string); ok {
fc.db = ovs
} else {
err = errors.New("Invalid database option value")
}
}
return
}
func (fc *findCall) setCollection(topts map[string]interface{}) (err error) {
if ov, ok := topts["collection"]; ok {
if ovs, ok := ov.(string); ok {
fc.col = ovs
} else {
err = errors.New("Invalid collection option value")
}
}
return
}
func (fc *findCall) setSelect(topts map[string]interface{}) (err error) {
if ov, ok := topts["select"]; ok {
if ovsel, ok := ov.(map[string]interface{}); ok {
for k, v := range ovsel {
if vi, ok := v.(int64); ok {
fc.sel[k] = int(vi)
}
}
} else {
err = errors.New("Invalid select option value")
}
}
return
}
func (fc *findCall) setSort(topts map[string]interface{}) (err error) {
if ov, ok := topts["sort"]; ok {
if ovs, ok := ov.([]string); ok {
fc.sort = ovs
} else {
err = errors.New("Invalid sort option value")
}
}
return
}
func (fc *findCall) setLimit(topts map[string]interface{}) (err error) {
if ov, ok := topts["limit"]; ok {
if ovl, ok := ov.(int64); ok {
fc.limit = int(ovl)
} else {
err = errors.New("Invalid limit option value")
}
}
return
}
func (fc *findCall) setQuery(v otto.Value) (err error) {
var q interface{}
if q, err = v.Export(); err == nil {
fc.query = fc.restoreIds(q)
}
return
}
func (fc *findCall) setOptions(v otto.Value) (err error) {
var opts interface{}
if opts, err = v.Export(); err == nil {
switch topts := opts.(type) {
case map[string]interface{}:
if err = fc.setDatabase(topts); err != nil {
return
}
if err = fc.setCollection(topts); err != nil {
return
}
if err = fc.setSelect(topts); err != nil {
return
}
if fc.isMulti() {
if err = fc.setSort(topts); err != nil {
return
}
if err = fc.setLimit(topts); err != nil {
return
}
}
default:
err = errors.New("Invalid options argument")
return
}
} else {
err = errors.New("Invalid options argument")
}
return
}
func (fc *findCall) setDefaults() {
if fc.config.ns != "" {
ns := strings.Split(fc.config.ns, ".")
fc.db = ns[0]
fc.col = ns[1]
}
}
func (fc *findCall) getCollection() *mgo.Collection {
return fc.session.DB(fc.db).C(fc.col)
}
func (fc *findCall) getVM() *otto.Otto {
return fc.config.vm
}
func (fc *findCall) getFunctionName() string {
return fc.config.name
}
func (fc *findCall) isMulti() bool {
return fc.config.multi
}
func (fc *findCall) logError(err error) {
errorLog.Printf("Error in function %s: %s\n", fc.getFunctionName(), err)
}
func (fc *findCall) restoreIds(v interface{}) (r interface{}) {
switch vt := v.(type) {
case string:
if bson.IsObjectIdHex(vt) {
r = bson.ObjectIdHex(vt)
} else {
r = v
}
case []interface{}:
var avs []interface{}
for _, av := range vt {
avs = append(avs, fc.restoreIds(av))
}
r = avs
case map[string]interface{}:
mvs := make(map[string]interface{})
for k, v := range vt {
mvs[k] = fc.restoreIds(v)
}
r = mvs
default:
r = v
}
return
}
func (fc *findCall) execute() (r otto.Value, err error) {
col := fc.getCollection()
if fc.isMulti() {
var docs []map[string]interface{}
mq := col.Find(fc.query)
if fc.limit > 0 {
mq.Limit(fc.limit)
}
if len(fc.sort) > 0 {
mq.Sort(fc.sort...)
}
if len(fc.sel) > 0 {
mq.Select(fc.sel)
}
if err = mq.All(&docs); err == nil {
r, err = fc.getVM().ToValue(docs)
}
} else {
doc := make(map[string]interface{})
if fc.config.byId {
if err = col.FindId(fc.query).One(doc); err == nil {
r, err = fc.getVM().ToValue(doc)
}
} else {
if err = col.Find(fc.query).One(doc); err == nil {
r, err = fc.getVM().ToValue(doc)
}
}
}
return
}
func makeFind(fa *findConf) func(otto.FunctionCall) otto.Value {
return func(call otto.FunctionCall) (r otto.Value) {
var err error
fc := &findCall{
config: fa,
session: fa.session.Copy(),
sel: make(map[string]int),
}
defer fc.session.Close()
fc.setDefaults()
args := call.ArgumentList
argLen := len(args)
r = otto.NullValue()
if argLen >= 1 {
if argLen >= 2 {
if err = fc.setOptions(call.Argument(1)); err != nil {
fc.logError(err)
return
}
}
if fc.db == "" || fc.col == "" {
fc.logError(errors.New("Find call must specify db and collection"))
return
}
if err = fc.setQuery(call.Argument(0)); err == nil {
var result otto.Value
if result, err = fc.execute(); err == nil {
r = result
} else {
fc.logError(err)
}
} else {
fc.logError(err)
}
} else {
fc.logError(errors.New("At least one argument is required"))
}
return
}
}
func doDelete(config *configOptions, client *elastic.Client, mongo *mgo.Session, bulk *elastic.BulkProcessor, op *gtm.Op) {
req := elastic.NewBulkDeleteRequest()
if config.DeleteStrategy == ignoreDeleteStrategy {
return
}
objectID, indexType, meta := opIDToString(op), mapIndexType(config, op), &indexingMeta{}
req.Id(objectID)
req.Version(int64(op.Timestamp))
req.VersionType("external")
if config.DeleteStrategy == statefulDeleteStrategy {
if routingNamespaces[""] || routingNamespaces[op.Namespace] {
meta = getIndexMeta(mongo, op.Namespace, objectID)
}
req.Index(indexType.Index)
req.Type(indexType.Type)
if meta.Index != "" {
req.Index(meta.Index)
}
if meta.Type != "" {
req.Type(meta.Type)
}
if meta.Routing != "" {
req.Routing(meta.Routing)
}
if meta.Parent != "" {
req.Parent(meta.Parent)
}
} else if config.DeleteStrategy == statelessDeleteStrategy {
if routingNamespaces[""] || routingNamespaces[op.Namespace] {
termQuery := elastic.NewTermQuery("_id", objectID)
searchResult, err := client.Search().FetchSource(false).Size(1).Index(config.DeleteIndexPattern).Query(termQuery).Do(context.Background())
if err != nil {
errorLog.Printf("Unable to delete document %s: %s", objectID, err)
return
}
if searchResult.Hits != nil && searchResult.Hits.TotalHits == 1 {
hit := searchResult.Hits.Hits[0]
req.Index(hit.Index)
req.Type(hit.Type)
if hit.Routing != "" {
req.Routing(hit.Routing)
}
if hit.Parent != "" {
req.Parent(hit.Parent)
}
} else {
errorLog.Printf("Failed to find unique document %s for deletion using index pattern %s", objectID, config.DeleteIndexPattern)
return
}
} else {
req.Index(indexType.Index)
req.Type(indexType.Type)
}
} else {
return
}
bulk.Add(req)
return
}
func gtmDefaultSettings() gtmSettings {
return gtmSettings{
ChannelSize: gtmChannelSizeDefault,
BufferSize: 32,
BufferDuration: "750ms",
}
}
func notifySdFailed(config *configOptions, err error) {
if err != nil {
errorLog.Printf("Systemd notification failed: %s", err)
} else {
if config.Verbose {
infoLog.Println("Systemd notification not supported (i.e. NOTIFY_SOCKET is unset)")
}
}
}
func watchdogSdFailed(config *configOptions, err error) {
if err != nil {
errorLog.Printf("Error determining systemd WATCHDOG interval: %s", err)
} else {
if config.Verbose {
infoLog.Println("Systemd WATCHDOG not enabled")
}
}
}
func (ctx *httpServerCtx) serveHttp() {
s := ctx.httpServer
if ctx.config.Verbose {
infoLog.Printf("Starting http server at %s", s.Addr)
}
ctx.started = time.Now()
err := s.ListenAndServe()
if !ctx.shutdown {
errorLog.Panicf("Unable to serve http at address %s: %s", s.Addr, err)
}
}
func (ctx *httpServerCtx) buildServer() {
mux := http.NewServeMux()
mux.HandleFunc("/started", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
data := (time.Now().Sub(ctx.started)).String()
w.Write([]byte(data))
})
mux.HandleFunc("/healthz", func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(200)
w.Write([]byte("ok"))
})
if ctx.config.Stats {
mux.HandleFunc("/stats", func(w http.ResponseWriter, req *http.Request) {
stats, err := json.MarshalIndent(ctx.bulk.Stats(), "", " ")
if err == nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
w.Write(stats)
} else {
w.WriteHeader(500)
fmt.Fprintf(w, "Unable to print statistics: %s", err)
}
})
}
mux.HandleFunc("/config", func(w http.ResponseWriter, req *http.Request) {
conf, err := json.MarshalIndent(ctx.config, "", " ")
if err == nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
w.Write(conf)
} else {
w.WriteHeader(500)
fmt.Fprintf(w, "Unable to print config: %s", err)
}
})
s := &http.Server{
Addr: ctx.config.HTTPServerAddr,
Handler: mux,
ErrorLog: errorLog,
}
ctx.httpServer = s
}
func notifySd(config *configOptions) {
var interval time.Duration
if config.Verbose {
infoLog.Println("Sending systemd READY=1")
}
sent, err := daemon.SdNotify(false, "READY=1")
if sent {
if config.Verbose {
infoLog.Println("READY=1 successfully sent to systemd")
}
} else {
notifySdFailed(config, err)
return
}
interval, err = daemon.SdWatchdogEnabled(false)
if err != nil || interval == 0 {
watchdogSdFailed(config, err)
return
}
for {
if config.Verbose {
infoLog.Println("Sending systemd WATCHDOG=1")
}
sent, err = daemon.SdNotify(false, "WATCHDOG=1")
if sent {
if config.Verbose {
infoLog.Println("WATCHDOG=1 successfully sent to systemd")
}
} else {
notifySdFailed(config, err)
return
}
time.Sleep(interval / 2)
}
}
func (config *configOptions) makeShardInsertHandler() gtm.ShardInsertHandler {
return func(shardInfo *gtm.ShardInfo) (*mgo.Session, error) {
infoLog.Printf("Adding shard found at %s\n", shardInfo.GetURL())
shardURL := config.getAuthURL(shardInfo.GetURL())
shard, err := config.dialMongo(shardURL)
if err == nil {
config.configureMongo(shard)
return shard, nil
} else {
return nil, err
}
}
}
func shutdown(exitStatus int, hsc *httpServerCtx, bulk *elastic.BulkProcessor, bulkStats *elastic.BulkProcessor, mongo *mgo.Session, config *configOptions) {
infoLog.Println("Shutting down")
closeC := make(chan bool)
go func() {
if config.ClusterName != "" {
resetClusterState(mongo, config)
}
if hsc != nil {
hsc.shutdown = true
hsc.httpServer.Shutdown(context.Background())
}
bulk.Flush()
if bulkStats != nil {
bulkStats.Flush()
}
close(closeC)
}()
doneC := make(chan bool)
go func() {
closeT := time.NewTicker(5 * time.Second)
done := false
for !done {
select {
case <-closeC:
done = true
close(doneC)
case <-closeT.C:
done = true
close(doneC)
}
}
}()
<-doneC
os.Exit(exitStatus)
}
func main() {
enabled := true
config := &configOptions{
MongoDialSettings: mongoDialSettings{Timeout: -1},
MongoSessionSettings: mongoSessionSettings{SocketTimeout: -1, SyncTimeout: -1},
GtmSettings: gtmDefaultSettings(),
}
config.parseCommandLineFlags()
if config.Version {
fmt.Println(version)
os.Exit(0)
}
config.loadTimeMachineNamespaces()
config.loadRoutingNamespaces()
config.loadPatchNamespaces()
config.loadGridFsConfig()
config.loadConfigFile()
config.setDefaults()
if config.Print {
config.dump()
os.Exit(0)
}
config.setupLogging()
config.loadPlugins()
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)
mongo, err := config.dialMongo(config.MongoURL)
if err != nil {
errorLog.Panicf("Unable to connect to mongodb using URL %s: %s", config.MongoURL, err)
}
if mongoInfo, err := mongo.BuildInfo(); err == nil {
infoLog.Printf("Successfully connected to MongoDB version %s", mongoInfo.Version)
} else {
infoLog.Println("Successfully connected to MongoDB")
}
defer mongo.Close()
config.configureMongo(mongo)
loadBuiltinFunctions(mongo)
elasticClient, err := config.newElasticClient()
if err != nil {
errorLog.Panicf("Unable to create elasticsearch client: %s", err)
}
if config.ElasticVersion == "" {
if err := config.testElasticsearchConn(elasticClient); err != nil {
errorLog.Panicf("Unable to validate connection to elasticsearch using client %s: %s",
elasticClient, err)
}
} else {
if err := config.parseElasticsearchVersion(config.ElasticVersion); err != nil {
errorLog.Panicf("Elasticsearch version must conform to major.minor.fix: %s", err)
}
}
bulk, err := config.newBulkProcessor(elasticClient)
if err != nil {
errorLog.Panicf("Unable to start bulk processor: %s", err)
}
defer bulk.Stop()
var bulkStats *elastic.BulkProcessor
if config.IndexStats {
bulkStats, err = config.newStatsBulkProcessor(elasticClient)
if err != nil {
errorLog.Panicf("Unable to start stats bulk processor: %s", err)
}
defer bulkStats.Stop()
}
var after gtm.TimestampGenerator
if config.Resume {
after = func(session *mgo.Session, options *gtm.Options) bson.MongoTimestamp {
ts := gtm.LastOpTimestamp(session, options)
if config.Replay {
ts = bson.MongoTimestamp(0)
} else if config.ResumeFromTimestamp != 0 {
ts = bson.MongoTimestamp(config.ResumeFromTimestamp)
} else {
collection := session.DB("monstache").C("monstache")
doc := make(map[string]interface{})
collection.FindId(config.ResumeName).One(doc)
if doc["ts"] != nil {
ts = doc["ts"].(bson.MongoTimestamp)
}
}
return ts
}
} else if config.Replay {
after = func(session *mgo.Session, options *gtm.Options) bson.MongoTimestamp {
return bson.MongoTimestamp(0)
}
}
if config.IndexFiles {
if len(config.FileNamespaces) == 0 {
errorLog.Fatalln("File indexing is ON but no file namespaces are configured")
}
if err := ensureFileMapping(elasticClient); err != nil {
panic(err)
}
}
var nsFilter, filter, directReadFilter gtm.OpFilter
filterChain := []gtm.OpFilter{notMonstache, notSystem, notChunks}
filterArray := []gtm.OpFilter{}
if config.isSharded() {
filterChain = append(filterChain, notConfig)
}
if config.NsRegex != "" {
filterChain = append(filterChain, filterWithRegex(config.NsRegex))
}
if config.NsExcludeRegex != "" {
filterChain = append(filterChain, filterInverseWithRegex(config.NsExcludeRegex))
}
if config.Worker != "" {
workerFilter, err := consistent.ConsistentHashFilter(config.Worker, config.Workers)
if err != nil {
panic(err)
}
filterChain = append(filterChain, workerFilter)
} else if config.Workers != nil {
panic("Workers configured but this worker is undefined. worker must be set to one of the workers.")
}
if filterPlugin != nil {
filterArray = append(filterArray, filterWithPlugin())
} else if len(filterEnvs) > 0 {
filterArray = append(filterArray, filterWithScript())
}
nsFilter = gtm.ChainOpFilters(filterChain...)
filter = gtm.ChainOpFilters(filterArray...)
directReadFilter = gtm.ChainOpFilters(filterArray...)
var oplogDatabaseName, oplogCollectionName, cursorTimeout *string
if config.MongoOpLogDatabaseName != "" {
oplogDatabaseName = &config.MongoOpLogDatabaseName
}
if config.MongoOpLogCollectionName != "" {
oplogCollectionName = &config.MongoOpLogCollectionName
}
if config.MongoCursorTimeout != "" {
cursorTimeout = &config.MongoCursorTimeout
}
if config.ClusterName != "" {
if err = ensureClusterTTL(mongo); err == nil {
infoLog.Printf("Joined cluster %s", config.ClusterName)
} else {
errorLog.Panicf("Unable to enable cluster mode: %s", err)
}
enabled, err = enableProcess(mongo, config)
if err != nil {
errorLog.Panicf("Unable to determine enabled cluster process: %s", err)
}
if !enabled {
config.DirectReadNs = stringargs{}
}
}
gtmBufferDuration, err := time.ParseDuration(config.GtmSettings.BufferDuration)
if err != nil {
errorLog.Panicf("Unable to parse gtm buffer duration %s: %s", config.GtmSettings.BufferDuration, err)
}
var mongos []*mgo.Session
var configSession *mgo.Session
if config.isSharded() {
// if we have a config server URL then we are running in a sharded cluster
configSession, err = config.dialMongo(config.MongoConfigURL)
if err != nil {
errorLog.Panicf("Unable to connect to mongodb config server using URL %s: %s", config.MongoConfigURL, err)
}
config.configureMongo(configSession)
// get the list of shard servers
shardInfos := gtm.GetShards(configSession)
if len(shardInfos) == 0 {
errorLog.Fatalln("Shards enabled but none found in config.shards collection")
}
// add each shard server to the sync list
for _, shardInfo := range shardInfos {
infoLog.Printf("Adding shard found at %s\n", shardInfo.GetURL())
shardURL := config.getAuthURL(shardInfo.GetURL())
shard, err := config.dialMongo(shardURL)
if err != nil {
errorLog.Panicf("Unable to connect to mongodb shard using URL %s: %s", shardURL, err)
}
defer shard.Close()
config.configureMongo(shard)
mongos = append(mongos, shard)
}
} else {
mongos = append(mongos, mongo)
}
gtmOpts := >m.Options{
After: after,
Filter: filter,
NamespaceFilter: nsFilter,
OpLogDatabaseName: oplogDatabaseName,
OpLogCollectionName: oplogCollectionName,
CursorTimeout: cursorTimeout,
ChannelSize: config.GtmSettings.ChannelSize,
Ordering: gtm.Oplog,
WorkerCount: 1,
BufferDuration: gtmBufferDuration,
BufferSize: config.GtmSettings.BufferSize,
DirectReadNs: config.DirectReadNs,
DirectReadFilter: directReadFilter,
Log: infoLog,
}
gtmCtx := gtm.StartMulti(mongos, gtmOpts)
if config.isSharded() {
gtmCtx.AddShardListener(configSession, gtmOpts, config.makeShardInsertHandler())
}
if config.ClusterName != "" {
if enabled {
infoLog.Printf("Starting work for cluster %s", config.ClusterName)
} else {
infoLog.Printf("Pausing work for cluster %s", config.ClusterName)
gtmCtx.Pause()
}
}
timestampTicker := time.NewTicker(10 * time.Second)
if config.Resume == false {
timestampTicker.Stop()
}
heartBeat := time.NewTicker(10 * time.Second)
if config.ClusterName == "" {
heartBeat.Stop()
}
statsTimeout := time.Duration(30) * time.Second
if config.StatsDuration != "" {
statsTimeout, err = time.ParseDuration(config.StatsDuration)
if err != nil {
errorLog.Panicf("Unable to parse stats duration: %s", err)
}
}
printStats := time.NewTicker(statsTimeout)
if config.Stats == false {
printStats.Stop()
}
exitStatus := 0
go notifySd(config)
var hsc *httpServerCtx
if config.EnableHTTPServer {
hsc = &httpServerCtx{
bulk: bulk,
config: config,
}
hsc.buildServer()
go hsc.serveHttp()
}
go func() {
<-sigs
shutdown(exitStatus, hsc, bulk, bulkStats, mongo, config)
}()
if len(config.DirectReadNs) > 0 {
if config.ExitAfterDirectReads {
go func() {
gtmCtx.DirectReadWg.Wait()
shutdown(exitStatus, hsc, bulk, bulkStats, mongo, config)
}()
}
}
infoLog.Println("Entering event loop")
var lastTimestamp, lastSavedTimestamp bson.MongoTimestamp
fileC := make(chan *gtm.Op)
fileDoneC := make(chan *gtm.Op)
for i := 0; i < config.FileDownloaders; i++ {
go func() {
for op := range fileC {
err := addFileContent(mongo, op, config)
if err != nil {
gtmCtx.ErrC <- err
}
fileDoneC <- op
}
}()
}
for {
select {
case <-timestampTicker.C:
if lastTimestamp > lastSavedTimestamp {
bulk.Flush()
if saveTimestamp(mongo, lastTimestamp, config); err == nil {
lastSavedTimestamp = lastTimestamp
} else {
gtmCtx.ErrC <- err
}
}
case <-heartBeat.C:
if config.ClusterName == "" {
break
}
if enabled {
enabled, err = ensureEnabled(mongo, config)
if !enabled {
infoLog.Printf("Pausing work for cluster %s", config.ClusterName)
gtmCtx.Pause()
bulk.Stop()
}
} else {
enabled, err = enableProcess(mongo, config)
if enabled {
infoLog.Printf("Resuming work for cluster %s", config.ClusterName)
bulk.Start(context.Background())
resumeWork(gtmCtx, mongo, config)
}
}
if err != nil {
gtmCtx.ErrC <- err
}
case <-printStats.C:
if !enabled {
break
}
if config.IndexStats {
if err := doIndexStats(config, bulkStats, bulk.Stats()); err != nil {
errorLog.Printf("Error indexing statistics: %s", err)
}
} else {
stats, err := json.Marshal(bulk.Stats())
if err != nil {
errorLog.Printf("Unable to log statistics: %s", err)
} else {
statsLog.Println(string(stats))
}
}
case err = <-gtmCtx.ErrC:
exitStatus = 1
errorLog.Println(err)
if config.FailFast {
os.Exit(exitStatus)
}
case op := <-fileDoneC:
ingest := op.Data["file"] != nil
if err = doIndex(config, mongo, bulk, elasticClient, op, ingest); err != nil {
gtmCtx.ErrC <- err
}
case op := <-gtmCtx.OpC:
if !enabled {
break
}
if op.IsSourceOplog() {
lastTimestamp = op.Timestamp
}
if op.IsDrop() {
bulk.Flush()
if err = doDrop(mongo, elasticClient, op, config); err != nil {
gtmCtx.ErrC <- err
}
} else if op.IsDelete() {
doDelete(config, elasticClient, mongo, bulk, op)
} else if op.Data != nil {
if hasFileContent(op, config) {
fileC <- op
} else {
if err = doIndex(config, mongo, bulk, elasticClient, op, false); err != nil {
gtmCtx.ErrC <- err
}
}
}
}
}
}
|
package missinggo
type (
// A function that returns equality, and less than. Used for lazily evaluating arguments.
SameLessFunc func() (same, less bool)
// A helper for long chains of "less-than" comparisons, where later comparisons are only
// required if earlier ones haven't resolved the comparison.
MultiLess struct {
ok bool
less bool
}
)
// True iff the left is less than the right. Will return false if they're equal, or unresolved.
// (Which is okay in certain circumstances.)
func (me *MultiLess) Less() bool {
return me.ok && me.less
}
// Returns the result of the less-than comparison chains. Panics if the case was not resolved.
func (me *MultiLess) Final() bool {
if !me.ok {
panic("undetermined")
}
return me.less
}
// Returns less-than, and whether the comparison was definitely resolved.
func (me *MultiLess) FinalOk() (left, ok bool) {
return me.less, me.ok
}
// `f` is only evaluated if the result is not yet determined.
func (me *MultiLess) Next(f SameLessFunc) {
if me.ok {
return
}
same, less := f()
if same {
return
}
me.ok = true
me.less = less
}
// Like Next, but the arguments are already evaluated.
func (me *MultiLess) StrictNext(same, less bool) {
me.Next(func() (bool, bool) { return same, less })
}
// Compare booleans, where the lesser is the true one, if the other is false.
func (me *MultiLess) NextBool(l, r bool) {
me.StrictNext(l == r, l)
}
Add MultiLess.Compare
package missinggo
type (
// A function that returns equality, and less than. Used for lazily evaluating arguments.
SameLessFunc func() (same, less bool)
// A helper for long chains of "less-than" comparisons, where later comparisons are only
// required if earlier ones haven't resolved the comparison.
MultiLess struct {
ok bool
less bool
}
)
// True iff the left is less than the right. Will return false if they're equal, or unresolved.
// (Which is okay in certain circumstances.)
func (me *MultiLess) Less() bool {
return me.ok && me.less
}
// Returns the result of the less-than comparison chains. Panics if the case was not resolved.
func (me *MultiLess) Final() bool {
if !me.ok {
panic("undetermined")
}
return me.less
}
// Returns less-than, and whether the comparison was definitely resolved.
func (me *MultiLess) FinalOk() (left, ok bool) {
return me.less, me.ok
}
// `f` is only evaluated if the result is not yet determined.
func (me *MultiLess) Next(f SameLessFunc) {
if me.ok {
return
}
same, less := f()
if same {
return
}
me.ok = true
me.less = less
}
// Like Next, but the arguments are already evaluated.
func (me *MultiLess) StrictNext(same, less bool) {
me.Next(func() (bool, bool) { return same, less })
}
// Compare booleans, where the lesser is the true one, if the other is false.
func (me *MultiLess) NextBool(l, r bool) {
me.StrictNext(l == r, l)
}
// Next use a common comparison result, where < 0 is less and 0 is equal.
func (me *MultiLess) Compare(i int) {
me.StrictNext(i == 0, i < 0)
}
|
package memcart_mock
import (
"errors"
"fmt"
"github.com/grantek/fkmd/memcart"
"io"
//"io/ioutil"
//"os"
)
type MockMemCart struct {
banks []*MockMemBank
currentbank int
}
func (mc *MockMemCart) NumBanks() int {
return len(mc.banks)
}
func (mc *MockMemCart) AddBank(mb *MockMemBank) {
//mc.banks = append(mc.banks, mb)
asdf := append(mc.banks, mb)
mc.banks = asdf
}
func (mc *MockMemCart) GetCurrentBank() memcart.MemBank {
return mc.banks[mc.currentbank]
}
func (mc *MockMemCart) SwitchBank(n int) error {
if n < 0 || n >= len(mc.banks) {
return errors.New(fmt.Sprintf("Requested bank %d does not exist"))
}
mc.currentbank = n
return nil
}
type MockMemBank struct {
f io.ReadWriteSeeker
name string
size int64
}
func (d *MockMemBank) Read(p []byte) (n int, err error) {
return d.f.Read(p)
}
func (d *MockMemBank) Write(p []byte) (n int, err error) {
return d.f.Write(p)
}
func (d *MockMemBank) Seek(offset int64, whence int) (int64, error) {
return d.f.Seek(offset, whence)
}
func (d *MockMemBank) GetName() string {
return d.name
}
func (d *MockMemBank) GetSize() int64 {
return d.size
}
func NewMemBank(name string, f io.ReadWriteSeeker, size int64) (*MockMemBank, error) {
var mb MockMemBank
mb.f = f
mb.name = name
mb.size = size
return &mb, nil
}
cleanup
package memcart_mock
import (
"errors"
"fmt"
"github.com/grantek/fkmd/memcart"
"io"
//"io/ioutil"
//"os"
)
type MockMemCart struct {
banks []*MockMemBank
currentbank int
}
func (mc *MockMemCart) NumBanks() int {
return len(mc.banks)
}
func (mc *MockMemCart) AddBank(mb *MockMemBank) {
mc.banks = append(mc.banks, mb)
}
func (mc *MockMemCart) GetCurrentBank() memcart.MemBank {
return mc.banks[mc.currentbank]
}
func (mc *MockMemCart) SwitchBank(n int) error {
if n < 0 || n >= len(mc.banks) {
return errors.New(fmt.Sprintf("Requested bank %d does not exist"))
}
mc.currentbank = n
return nil
}
type MockMemBank struct {
f io.ReadWriteSeeker
name string
size int64
}
func (d *MockMemBank) Read(p []byte) (n int, err error) {
return d.f.Read(p)
}
func (d *MockMemBank) Write(p []byte) (n int, err error) {
return d.f.Write(p)
}
func (d *MockMemBank) Seek(offset int64, whence int) (int64, error) {
return d.f.Seek(offset, whence)
}
func (d *MockMemBank) GetName() string {
return d.name
}
func (d *MockMemBank) GetSize() int64 {
return d.size
}
func NewMemBank(name string, f io.ReadWriteSeeker, size int64) (*MockMemBank, error) {
var mb MockMemBank
mb.f = f
mb.name = name
mb.size = size
return &mb, nil
}
|
package main
import (
"crypto/md5"
"encoding/base64"
"errors"
"fmt"
"net/http"
"sync"
"github.com/Sirupsen/logrus"
"github.com/dgrijalva/jwt-go"
"github.com/TykTechnologies/openid2go/openid"
"github.com/TykTechnologies/tyk/apidef"
"github.com/TykTechnologies/tyk/user"
)
const OIDPREFIX = "openid"
type OpenIDMW struct {
BaseMiddleware
providerConfiguration *openid.Configuration
provider_client_policymap map[string]map[string]string
lock sync.RWMutex
}
func (k *OpenIDMW) Name() string {
return "OpenIDMW"
}
func (k *OpenIDMW) EnabledForSpec() bool {
return k.Spec.UseOpenID
}
func (k *OpenIDMW) Init() {
k.provider_client_policymap = make(map[string]map[string]string)
// Create an OpenID Configuration and store
var err error
k.providerConfiguration, err = openid.NewConfiguration(openid.ProvidersGetter(k.getProviders),
openid.ErrorHandler(k.dummyErrorHandler))
if err != nil {
log.WithFields(logrus.Fields{
"prefix": OIDPREFIX,
}).Error("OpenID configuration error: ", err)
}
}
func (k *OpenIDMW) getProviders() ([]openid.Provider, error) {
providers := []openid.Provider{}
log.Debug("Setting up providers: ", k.Spec.OpenIDOptions.Providers)
for _, provider := range k.Spec.OpenIDOptions.Providers {
iss := provider.Issuer
log.Debug("Setting up Issuer: ", iss)
providerClientArray := make([]string, len(provider.ClientIDs))
i := 0
for clientID, policyID := range provider.ClientIDs {
clID, _ := base64.StdEncoding.DecodeString(clientID)
clientID := string(clID)
k.lock.Lock()
if k.provider_client_policymap[iss] == nil {
k.provider_client_policymap[iss] = map[string]string{clientID: policyID}
} else {
k.provider_client_policymap[iss][clientID] = policyID
}
k.lock.Unlock()
log.Debug("--> Setting up client: ", clientID, " with policy: ", policyID)
providerClientArray[i] = clientID
i++
}
p, err := openid.NewProvider(iss, providerClientArray)
if err != nil {
log.WithFields(logrus.Fields{
"prefix": OIDPREFIX,
"provider": iss,
}).Error("Failed to create provider: ", err)
} else {
providers = append(providers, p)
}
}
return providers, nil
}
// We don't want any of the error handling, we use our own
func (k *OpenIDMW) dummyErrorHandler(e error, w http.ResponseWriter, r *http.Request) bool {
log.WithFields(logrus.Fields{
"prefix": OIDPREFIX,
}).Warning("JWT Invalid: ", e)
return true
}
func (k *OpenIDMW) ProcessRequest(w http.ResponseWriter, r *http.Request, _ interface{}) (error, int) {
// 1. Validate the JWT
ouser, token, halt := openid.AuthenticateOIDWithUser(k.providerConfiguration, w, r)
// 2. Generate the internal representation for the key
if halt {
// Fire Authfailed Event
k.reportLoginFailure("[JWT]", r)
return errors.New("Key not authorised"), http.StatusForbidden
}
// 3. Create or set the session to match
iss, found := token.Claims.(jwt.MapClaims)["iss"]
clients, cfound := token.Claims.(jwt.MapClaims)["aud"]
if !found && !cfound {
log.WithFields(logrus.Fields{
"prefix": OIDPREFIX,
}).Error("No issuer or audiences found!")
k.reportLoginFailure("[NOT GENERATED]", r)
return errors.New("Key not authorised"), http.StatusForbidden
}
k.lock.RLock()
clientSet, foundIssuer := k.provider_client_policymap[iss.(string)]
k.lock.RUnlock()
if !foundIssuer {
log.WithFields(logrus.Fields{
"prefix": OIDPREFIX,
}).Error("No issuer or audiences found!")
k.reportLoginFailure("[NOT GENERATED]", r)
return errors.New("Key not authorised"), http.StatusForbidden
}
policyID := ""
clientID := ""
switch v := clients.(type) {
case string:
k.lock.RLock()
policyID = clientSet[v]
k.lock.RUnlock()
clientID = v
case []interface{}:
for _, audVal := range v {
k.lock.RLock()
policy, foundPolicy := clientSet[audVal.(string)]
k.lock.RUnlock()
if foundPolicy {
clientID = audVal.(string)
policyID = policy
break
}
}
}
if policyID == "" {
log.WithFields(logrus.Fields{
"prefix": OIDPREFIX,
}).Error("No matching policy found!")
k.reportLoginFailure("[NOT GENERATED]", r)
return errors.New("Key not authorised"), http.StatusForbidden
}
data := []byte(ouser.ID)
tokenID := fmt.Sprintf("%x", md5.Sum(data))
sessionID := k.Spec.OrgID + tokenID
if k.Spec.OpenIDOptions.SegregateByClient {
// We are segregating by client, so use it as part of the internal token
log.Debug("Client ID:", clientID)
sessionID = k.Spec.OrgID + fmt.Sprintf("%x", md5.Sum([]byte(clientID))) + tokenID
}
log.Debug("Generated Session ID: ", sessionID)
session, exists := k.CheckSessionAndIdentityForValidKey(sessionID)
if !exists {
// Create it
log.Debug("Key does not exist, creating")
session = user.SessionState{}
// We need a base policy as a template, either get it from the token itself OR a proxy client ID within Tyk
newSession, err := generateSessionFromPolicy(policyID,
k.Spec.OrgID,
true)
if err != nil {
k.reportLoginFailure(sessionID, r)
log.WithFields(logrus.Fields{
"prefix": OIDPREFIX,
}).Error("Could not find a valid policy to apply to this token!")
return errors.New("Key not authorized: no matching policy"), http.StatusForbidden
}
session = newSession
session.MetaData = map[string]interface{}{"TykJWTSessionID": sessionID, "ClientID": clientID}
session.Alias = clientID + ":" + ouser.ID
// Update the session in the session manager in case it gets called again
k.Spec.SessionManager.UpdateSession(sessionID, &session, session.Lifetime(k.Spec.SessionLifetime), false)
log.Debug("Policy applied to key")
}
// 4. Set session state on context, we will need it later
switch k.Spec.BaseIdentityProvidedBy {
case apidef.OIDCUser, apidef.UnsetAuth:
ctxSetSession(r, &session)
ctxSetAuthToken(r, sessionID)
}
ctxSetJWTContextVars(k.Spec, r, token)
return nil, http.StatusOK
}
func (k *OpenIDMW) reportLoginFailure(tykId string, r *http.Request) {
log.WithFields(logrus.Fields{
"prefix": OIDPREFIX,
"key": tykId,
}).Warning("Attempted access with invalid key.")
// Fire Authfailed Event
AuthFailed(k, r, tykId)
// Report in health check
reportHealthValue(k.Spec, KeyFailure, "1")
}
changed 403 to 401 for unauthorised key in oidc (#1698)
package main
import (
"crypto/md5"
"encoding/base64"
"errors"
"fmt"
"net/http"
"sync"
"github.com/Sirupsen/logrus"
"github.com/dgrijalva/jwt-go"
"github.com/TykTechnologies/openid2go/openid"
"github.com/TykTechnologies/tyk/apidef"
"github.com/TykTechnologies/tyk/user"
)
const OIDPREFIX = "openid"
type OpenIDMW struct {
BaseMiddleware
providerConfiguration *openid.Configuration
provider_client_policymap map[string]map[string]string
lock sync.RWMutex
}
func (k *OpenIDMW) Name() string {
return "OpenIDMW"
}
func (k *OpenIDMW) EnabledForSpec() bool {
return k.Spec.UseOpenID
}
func (k *OpenIDMW) Init() {
k.provider_client_policymap = make(map[string]map[string]string)
// Create an OpenID Configuration and store
var err error
k.providerConfiguration, err = openid.NewConfiguration(openid.ProvidersGetter(k.getProviders),
openid.ErrorHandler(k.dummyErrorHandler))
if err != nil {
log.WithFields(logrus.Fields{
"prefix": OIDPREFIX,
}).Error("OpenID configuration error: ", err)
}
}
func (k *OpenIDMW) getProviders() ([]openid.Provider, error) {
providers := []openid.Provider{}
log.Debug("Setting up providers: ", k.Spec.OpenIDOptions.Providers)
for _, provider := range k.Spec.OpenIDOptions.Providers {
iss := provider.Issuer
log.Debug("Setting up Issuer: ", iss)
providerClientArray := make([]string, len(provider.ClientIDs))
i := 0
for clientID, policyID := range provider.ClientIDs {
clID, _ := base64.StdEncoding.DecodeString(clientID)
clientID := string(clID)
k.lock.Lock()
if k.provider_client_policymap[iss] == nil {
k.provider_client_policymap[iss] = map[string]string{clientID: policyID}
} else {
k.provider_client_policymap[iss][clientID] = policyID
}
k.lock.Unlock()
log.Debug("--> Setting up client: ", clientID, " with policy: ", policyID)
providerClientArray[i] = clientID
i++
}
p, err := openid.NewProvider(iss, providerClientArray)
if err != nil {
log.WithFields(logrus.Fields{
"prefix": OIDPREFIX,
"provider": iss,
}).Error("Failed to create provider: ", err)
} else {
providers = append(providers, p)
}
}
return providers, nil
}
// We don't want any of the error handling, we use our own
func (k *OpenIDMW) dummyErrorHandler(e error, w http.ResponseWriter, r *http.Request) bool {
log.WithFields(logrus.Fields{
"prefix": OIDPREFIX,
}).Warning("JWT Invalid: ", e)
return true
}
func (k *OpenIDMW) ProcessRequest(w http.ResponseWriter, r *http.Request, _ interface{}) (error, int) {
// 1. Validate the JWT
ouser, token, halt := openid.AuthenticateOIDWithUser(k.providerConfiguration, w, r)
// 2. Generate the internal representation for the key
if halt {
// Fire Authfailed Event
k.reportLoginFailure("[JWT]", r)
return errors.New("Key not authorised"), http.StatusUnauthorized
}
// 3. Create or set the session to match
iss, found := token.Claims.(jwt.MapClaims)["iss"]
clients, cfound := token.Claims.(jwt.MapClaims)["aud"]
if !found && !cfound {
log.WithFields(logrus.Fields{
"prefix": OIDPREFIX,
}).Error("No issuer or audiences found!")
k.reportLoginFailure("[NOT GENERATED]", r)
return errors.New("Key not authorised"), http.StatusUnauthorized
}
k.lock.RLock()
clientSet, foundIssuer := k.provider_client_policymap[iss.(string)]
k.lock.RUnlock()
if !foundIssuer {
log.WithFields(logrus.Fields{
"prefix": OIDPREFIX,
}).Error("No issuer or audiences found!")
k.reportLoginFailure("[NOT GENERATED]", r)
return errors.New("Key not authorised"), http.StatusUnauthorized
}
policyID := ""
clientID := ""
switch v := clients.(type) {
case string:
k.lock.RLock()
policyID = clientSet[v]
k.lock.RUnlock()
clientID = v
case []interface{}:
for _, audVal := range v {
k.lock.RLock()
policy, foundPolicy := clientSet[audVal.(string)]
k.lock.RUnlock()
if foundPolicy {
clientID = audVal.(string)
policyID = policy
break
}
}
}
if policyID == "" {
log.WithFields(logrus.Fields{
"prefix": OIDPREFIX,
}).Error("No matching policy found!")
k.reportLoginFailure("[NOT GENERATED]", r)
return errors.New("Key not authorised"), http.StatusUnauthorized
}
data := []byte(ouser.ID)
tokenID := fmt.Sprintf("%x", md5.Sum(data))
sessionID := k.Spec.OrgID + tokenID
if k.Spec.OpenIDOptions.SegregateByClient {
// We are segregating by client, so use it as part of the internal token
log.Debug("Client ID:", clientID)
sessionID = k.Spec.OrgID + fmt.Sprintf("%x", md5.Sum([]byte(clientID))) + tokenID
}
log.Debug("Generated Session ID: ", sessionID)
session, exists := k.CheckSessionAndIdentityForValidKey(sessionID)
if !exists {
// Create it
log.Debug("Key does not exist, creating")
session = user.SessionState{}
// We need a base policy as a template, either get it from the token itself OR a proxy client ID within Tyk
newSession, err := generateSessionFromPolicy(policyID,
k.Spec.OrgID,
true)
if err != nil {
k.reportLoginFailure(sessionID, r)
log.WithFields(logrus.Fields{
"prefix": OIDPREFIX,
}).Error("Could not find a valid policy to apply to this token!")
return errors.New("Key not authorized: no matching policy"), http.StatusForbidden
}
session = newSession
session.MetaData = map[string]interface{}{"TykJWTSessionID": sessionID, "ClientID": clientID}
session.Alias = clientID + ":" + ouser.ID
// Update the session in the session manager in case it gets called again
k.Spec.SessionManager.UpdateSession(sessionID, &session, session.Lifetime(k.Spec.SessionLifetime), false)
log.Debug("Policy applied to key")
}
// 4. Set session state on context, we will need it later
switch k.Spec.BaseIdentityProvidedBy {
case apidef.OIDCUser, apidef.UnsetAuth:
ctxSetSession(r, &session)
ctxSetAuthToken(r, sessionID)
}
ctxSetJWTContextVars(k.Spec, r, token)
return nil, http.StatusOK
}
func (k *OpenIDMW) reportLoginFailure(tykId string, r *http.Request) {
log.WithFields(logrus.Fields{
"prefix": OIDPREFIX,
"key": tykId,
}).Warning("Attempted access with invalid key.")
// Fire Authfailed Event
AuthFailed(k, r, tykId)
// Report in health check
reportHealthValue(k.Spec, KeyFailure, "1")
}
|
package main
import (
"encoding/json"
"fmt"
"net/url"
"os"
"os/exec"
"strings"
"github.com/blang/mpv"
"github.com/cnt0/twitch-streamsniper/api"
)
const (
ytdl = "youtube-dl"
mpvSocket = "/tmp/mpvsocket"
)
func main() {
c := mpv.NewClient(mpv.NewIPCClient(mpvSocket))
if u, err := url.Parse(os.Args[len(os.Args)-1]); err == nil {
hostname := strings.ToLower(u.Hostname())
var formats struct {
Formats []api.FormatItem `json:"formats"`
}
if strings.HasSuffix(hostname, "twitch.tv") {
data, err := exec.Command(ytdl, "-J", "--skip-download", os.Args[len(os.Args)-1]).Output()
if err != nil {
fmt.Println(err)
return
}
if err := json.Unmarshal(data, &formats); err != nil {
fmt.Printf("%v is likely offline\n", u.Path)
return
}
for _, f := range formats.Formats {
s := strings.ToLower(f.Format)
if strings.Contains(s, "source") || strings.Contains(s, "1080p60") {
c.Loadfile(f.URL, mpv.LoadFileModeReplace)
return
}
}
fmt.Println("Hmmm... What does hero truly need?")
for i, f := range formats.Formats {
fmt.Printf("%v: %v\n", i+1, f.Format)
}
var idx int
fmt.Scan(&idx)
c.Loadfile(formats.Formats[idx-1].URL, mpv.LoadFileModeReplace)
return
}
if strings.HasSuffix(hostname, "youtube.com") || strings.HasSuffix(hostname, "youtu.be") {
data, err := exec.Command(ytdl, "-J", "--skip-download", os.Args[len(os.Args)-1]).Output()
if err != nil {
fmt.Println(err)
return
}
if err := json.Unmarshal(data, &formats); err != nil {
fmt.Println(err)
return
}
fmt.Println("Hmmm... What does hero truly need?")
for i, f := range formats.Formats {
fmt.Printf("%v: %v\n", i+1, f.Format)
}
desiredFormat := ""
fmt.Scan(&desiredFormat)
c.SetProperty("ytdl-format", desiredFormat)
c.Loadfile(os.Args[len(os.Args)-1], mpv.LoadFileModeReplace)
return
}
}
}
mpvd: add flag to just print the URL
package main
import (
"encoding/json"
"flag"
"fmt"
"net/url"
"os"
"os/exec"
"strings"
"github.com/blang/mpv"
"github.com/cnt0/twitch-streamsniper/api"
)
const (
ytdl = "youtube-dl"
mpvSocket = "/tmp/mpvsocket"
)
var printURL = flag.Bool("a", false, "print URL instead of opening stream in mpv")
func processStream(mpvClient *mpv.Client, addr string) {
if *printURL {
fmt.Println(addr)
} else {
mpvClient.Loadfile(addr, mpv.LoadFileModeReplace)
}
}
func init() {
flag.Parse()
}
func main() {
c := mpv.NewClient(mpv.NewIPCClient(mpvSocket))
if u, err := url.Parse(os.Args[len(os.Args)-1]); err == nil {
hostname := strings.ToLower(u.Hostname())
var formats struct {
Formats []api.FormatItem `json:"formats"`
}
if strings.HasSuffix(hostname, "twitch.tv") {
data, err := exec.Command(ytdl, "-J", "--skip-download", os.Args[len(os.Args)-1]).Output()
if err != nil {
fmt.Println(err)
return
}
if err := json.Unmarshal(data, &formats); err != nil {
fmt.Printf("%v is likely offline\n", u.Path)
return
}
for _, f := range formats.Formats {
s := strings.ToLower(f.Format)
if strings.Contains(s, "source") || strings.Contains(s, "1080p60") {
processStream(c, f.URL)
return
}
}
fmt.Println("Hmmm... What does hero truly need?")
for i, f := range formats.Formats {
fmt.Printf("%v: %v\n", i+1, f.Format)
}
var idx int
fmt.Scan(&idx)
processStream(c, formats.Formats[idx-1].URL)
return
}
if strings.HasSuffix(hostname, "youtube.com") || strings.HasSuffix(hostname, "youtu.be") {
data, err := exec.Command(ytdl, "-J", "--skip-download", os.Args[len(os.Args)-1]).Output()
if err != nil {
fmt.Println(err)
return
}
if err := json.Unmarshal(data, &formats); err != nil {
fmt.Println(err)
return
}
fmt.Println("Hmmm... What does hero truly need?")
for i, f := range formats.Formats {
fmt.Printf("%v: %v\n", i+1, f.Format)
}
desiredFormat := ""
fmt.Scan(&desiredFormat)
c.SetProperty("ytdl-format", desiredFormat)
processStream(c, os.Args[len(os.Args)-1])
return
}
}
}
|
package mqtt
import (
"bytes"
"reflect"
"testing"
gbt "github.com/huin/gobinarytest"
)
var bitCnt = uint32(0)
func Test(t *testing.T) {
msg := Connect{
ProtocolName: "MQIsdp",
ProtocolVersion: 3,
UsernameFlag: true,
PasswordFlag: true,
WillRetain: false,
WillQos: 1,
WillFlag: true,
CleanSession: true,
KeepAliveTimer: 10,
ClientId: "xixihaha",
WillTopic: "topic",
WillMessage: "message",
Username: "name",
Password: "pwd",
}
expected := gbt.InOrder{
gbt.Named{"Header byte", gbt.Literal{0x10}},
gbt.Named{"Remaining length", gbt.Literal{12 + 5*2 + 8 + 5 + 7 + 4 + 3}},
// Extended headers for CONNECT:
gbt.Named{"Protocol name", gbt.InOrder{gbt.Literal{0x00, 0x06}, gbt.Literal("MQIsdp")}},
gbt.Named{
"Extended headers for CONNECT",
gbt.Literal{
0x03, // Protocol version number
0xce, // Connect flags
0x00, 0x0a, // Keep alive timer
},
},
// CONNECT payload:
gbt.Named{"Client identifier", gbt.InOrder{gbt.Literal{0x00, 0x08}, gbt.Literal("xixihaha")}},
gbt.Named{"Will topic", gbt.InOrder{gbt.Literal{0x00, 0x05}, gbt.Literal("topic")}},
gbt.Named{"Will message", gbt.InOrder{gbt.Literal{0x00, 0x07}, gbt.Literal("message")}},
gbt.Named{"Username", gbt.InOrder{gbt.Literal{0x00, 0x04}, gbt.Literal("name")}},
gbt.Named{"Password", gbt.InOrder{gbt.Literal{0x00, 0x03}, gbt.Literal("pwd")}},
}
encodedBuf := new(bytes.Buffer)
if err := msg.Encode(encodedBuf); err != nil {
t.Errorf("Unexpected error during encoding: %v", err)
} else if err = gbt.Matches(expected, encodedBuf.Bytes()); err != nil {
t.Errorf("Unexpected encoding output: %v", err)
}
expectedBuf := new(bytes.Buffer)
expected.Write(expectedBuf)
if decodedMsg, err := DecodeOneMessage(expectedBuf); err != nil {
t.Errorf("Unexpected error during decoding: %v", err)
} else if !reflect.DeepEqual(&msg, decodedMsg) {
t.Errorf("Decoded value mismatch\n got = %#v\nexpected = %#v", msg, decodedMsg)
}
}
func TestDecodeLength(t *testing.T) {
tests := []struct {
Expected int32
Bytes []byte
}{
{0, []byte{0}},
{1, []byte{1}},
{20, []byte{20}},
// Boundary conditions used as tests taken from MQTT 3.1 spec.
{0, []byte{0x00}},
{127, []byte{0x7F}},
{128, []byte{0x80, 0x01}},
{16383, []byte{0xFF, 0x7F}},
{16384, []byte{0x80, 0x80, 0x01}},
{2097151, []byte{0xFF, 0xFF, 0x7F}},
{2097152, []byte{0x80, 0x80, 0x80, 0x01}},
{268435455, []byte{0xFF, 0xFF, 0xFF, 0x7F}},
}
for _, test := range tests {
buf := bytes.NewBuffer(test.Bytes)
if result := decodeLength(buf); test.Expected != result {
t.Errorf("Test %v: got %d", test, result)
}
}
}
Refactor test into table.
package mqtt
import (
"bytes"
"reflect"
"testing"
gbt "github.com/huin/gobinarytest"
)
var bitCnt = uint32(0)
func Test(t *testing.T) {
tests := []struct {
Msg Message
Expected gbt.Matcher
}{
{
Msg: &Connect{
ProtocolName: "MQIsdp",
ProtocolVersion: 3,
UsernameFlag: true,
PasswordFlag: true,
WillRetain: false,
WillQos: 1,
WillFlag: true,
CleanSession: true,
KeepAliveTimer: 10,
ClientId: "xixihaha",
WillTopic: "topic",
WillMessage: "message",
Username: "name",
Password: "pwd",
},
Expected: gbt.InOrder{
gbt.Named{"Header byte", gbt.Literal{0x10}},
gbt.Named{"Remaining length", gbt.Literal{12 + 5*2 + 8 + 5 + 7 + 4 + 3}},
// Extended headers for CONNECT:
gbt.Named{"Protocol name", gbt.InOrder{gbt.Literal{0x00, 0x06}, gbt.Literal("MQIsdp")}},
gbt.Named{
"Extended headers for CONNECT",
gbt.Literal{
0x03, // Protocol version number
0xce, // Connect flags
0x00, 0x0a, // Keep alive timer
},
},
// CONNECT payload:
gbt.Named{"Client identifier", gbt.InOrder{gbt.Literal{0x00, 0x08}, gbt.Literal("xixihaha")}},
gbt.Named{"Will topic", gbt.InOrder{gbt.Literal{0x00, 0x05}, gbt.Literal("topic")}},
gbt.Named{"Will message", gbt.InOrder{gbt.Literal{0x00, 0x07}, gbt.Literal("message")}},
gbt.Named{"Username", gbt.InOrder{gbt.Literal{0x00, 0x04}, gbt.Literal("name")}},
gbt.Named{"Password", gbt.InOrder{gbt.Literal{0x00, 0x03}, gbt.Literal("pwd")}},
},
},
}
for _, test := range tests {
encodedBuf := new(bytes.Buffer)
if err := test.Msg.Encode(encodedBuf); err != nil {
t.Errorf("Unexpected error during encoding: %v", err)
} else if err = gbt.Matches(test.Expected, encodedBuf.Bytes()); err != nil {
t.Errorf("Unexpected encoding output: %v", err)
}
expectedBuf := new(bytes.Buffer)
test.Expected.Write(expectedBuf)
if decodedMsg, err := DecodeOneMessage(expectedBuf); err != nil {
t.Errorf("Unexpected error during decoding: %v", err)
} else if !reflect.DeepEqual(test.Msg, decodedMsg) {
t.Errorf("Decoded value mismatch\n got = %#v\nexpected = %#v",
decodedMsg, test.Msg)
}
}
}
func TestDecodeLength(t *testing.T) {
tests := []struct {
Expected int32
Bytes []byte
}{
{0, []byte{0}},
{1, []byte{1}},
{20, []byte{20}},
// Boundary conditions used as tests taken from MQTT 3.1 spec.
{0, []byte{0x00}},
{127, []byte{0x7F}},
{128, []byte{0x80, 0x01}},
{16383, []byte{0xFF, 0x7F}},
{16384, []byte{0x80, 0x80, 0x01}},
{2097151, []byte{0xFF, 0xFF, 0x7F}},
{2097152, []byte{0x80, 0x80, 0x80, 0x01}},
{268435455, []byte{0xFF, 0xFF, 0xFF, 0x7F}},
}
for _, test := range tests {
buf := bytes.NewBuffer(test.Bytes)
if result := decodeLength(buf); test.Expected != result {
t.Errorf("Test %v: got %d", test, result)
}
}
}
|
package objc
import (
"github.com/mkrautz/variadic"
"math"
"reflect"
"unsafe"
)
func unpackStruct(val reflect.Value) []uintptr {
memArgs := []uintptr{}
for i := 0; i < val.NumField(); i++ {
v := val.Field(i)
kind := v.Kind()
switch kind {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
memArgs = append(memArgs, uintptr(v.Int()))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
memArgs = append(memArgs, uintptr(v.Uint()))
case reflect.Float32, reflect.Float64:
memArgs = append(memArgs, uintptr(math.Float64bits(v.Float())))
case reflect.Ptr:
memArgs = append(memArgs, val.Pointer())
case reflect.Struct:
args := unpackStruct(v)
memArgs = append(memArgs, args...)
}
}
return memArgs
}
func (obj object) SendMsg(selector string, args ...interface{}) Object {
// Keep ObjC semantics: messages can be sent to nil objects,
// but the response is nil.
if obj.ptr == 0 {
return nil
}
sel := selectorWithName(selector)
if sel == nil {
return nil
}
intArgs := []uintptr{}
floatArgs := []uintptr{}
memArgs := []uintptr{}
typeInfo := simpleTypeInfoForMethod(obj, selector)
for i, arg := range args {
switch t := arg.(type) {
case Object:
intArgs = append(intArgs, t.Pointer())
case uintptr:
intArgs = append(intArgs, t)
case int:
intArgs = append(intArgs, uintptr(t))
case uint:
intArgs = append(intArgs, uintptr(t))
case int8:
intArgs = append(intArgs, uintptr(t))
case uint8:
intArgs = append(intArgs, uintptr(t))
case int16:
intArgs = append(intArgs, uintptr(t))
case uint16:
intArgs = append(intArgs, uintptr(t))
case int32:
intArgs = append(intArgs, uintptr(t))
case uint32:
intArgs = append(intArgs, uintptr(t))
case int64:
intArgs = append(intArgs, uintptr(t))
case uint64:
intArgs = append(intArgs, uintptr(t))
case bool:
if t {
intArgs = append(intArgs, uintptr(1))
} else {
intArgs = append(intArgs, uintptr(0))
}
case float32:
floatArgs = append(floatArgs, uintptr(math.Float32bits(t)))
// Float64 is a bit of a special case. Since SendMsg is a variadic
// Go function, implicit floats will be of type float64, but we can't
// be sure that the receiver expects that; they might expect a float32
// instead.
//
// To remedy this, we query the selector's type encoding, and check
// whether it expects a 32-bit or 64-bit float.
case float64:
typeEnc := string(typeInfo[i+3])
switch typeEnc {
case encFloat:
floatArgs = append(floatArgs, uintptr(math.Float32bits(float32(t))))
case encDouble:
floatArgs = append(floatArgs, uintptr(math.Float64bits(t)))
default:
panic("objc: float argument mismatch")
}
default:
val := reflect.ValueOf(args[i])
switch val.Kind() {
case reflect.Ptr:
intArgs = append(intArgs, val.Pointer())
case reflect.Uintptr:
intArgs = append(intArgs, uintptr(val.Uint()))
case reflect.Struct:
args := unpackStruct(val)
memArgs = append(memArgs, args...)
default:
panic("unhandled kind")
}
}
}
fc := variadic.NewFunctionCall("objc_msgSend")
fc.Words[0] = obj.Pointer()
fc.Words[1] = uintptr(sel)
if len(memArgs) > 0 {
fc.Memory = unsafe.Pointer(&memArgs[0])
fc.NumMemory = int64(len(memArgs))
}
if len(intArgs) > 4 {
panic("too many int args")
}
if len(floatArgs) > 8 {
panic("too many float args")
}
for i, v := range intArgs {
fc.Words[i+2] = v
}
fc.NumFloat = int64(len(floatArgs))
for i, v := range floatArgs {
fc.Words[6+i] = v
}
if len(typeInfo) > 0 {
retEnc := string(typeInfo[0])
if retEnc == encFloat {
return object{ptr: uintptr(math.Float32bits(fc.CallFloat32()))}
} else if retEnc == encDouble {
return object{ptr: uintptr(math.Float64bits(fc.CallFloat64()))}
}
}
return object{ptr: fc.Call()}
}
msg: fix sending to a nil Object.
package objc
import (
"github.com/mkrautz/variadic"
"math"
"reflect"
"unsafe"
)
func unpackStruct(val reflect.Value) []uintptr {
memArgs := []uintptr{}
for i := 0; i < val.NumField(); i++ {
v := val.Field(i)
kind := v.Kind()
switch kind {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
memArgs = append(memArgs, uintptr(v.Int()))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
memArgs = append(memArgs, uintptr(v.Uint()))
case reflect.Float32, reflect.Float64:
memArgs = append(memArgs, uintptr(math.Float64bits(v.Float())))
case reflect.Ptr:
memArgs = append(memArgs, val.Pointer())
case reflect.Struct:
args := unpackStruct(v)
memArgs = append(memArgs, args...)
}
}
return memArgs
}
func (obj object) SendMsg(selector string, args ...interface{}) Object {
// Keep ObjC semantics: messages can be sent to nil objects,
// but the response is nil.
if obj.ptr == 0 {
return obj
}
sel := selectorWithName(selector)
if sel == nil {
return nil
}
intArgs := []uintptr{}
floatArgs := []uintptr{}
memArgs := []uintptr{}
typeInfo := simpleTypeInfoForMethod(obj, selector)
for i, arg := range args {
switch t := arg.(type) {
case Object:
intArgs = append(intArgs, t.Pointer())
case uintptr:
intArgs = append(intArgs, t)
case int:
intArgs = append(intArgs, uintptr(t))
case uint:
intArgs = append(intArgs, uintptr(t))
case int8:
intArgs = append(intArgs, uintptr(t))
case uint8:
intArgs = append(intArgs, uintptr(t))
case int16:
intArgs = append(intArgs, uintptr(t))
case uint16:
intArgs = append(intArgs, uintptr(t))
case int32:
intArgs = append(intArgs, uintptr(t))
case uint32:
intArgs = append(intArgs, uintptr(t))
case int64:
intArgs = append(intArgs, uintptr(t))
case uint64:
intArgs = append(intArgs, uintptr(t))
case bool:
if t {
intArgs = append(intArgs, uintptr(1))
} else {
intArgs = append(intArgs, uintptr(0))
}
case float32:
floatArgs = append(floatArgs, uintptr(math.Float32bits(t)))
// Float64 is a bit of a special case. Since SendMsg is a variadic
// Go function, implicit floats will be of type float64, but we can't
// be sure that the receiver expects that; they might expect a float32
// instead.
//
// To remedy this, we query the selector's type encoding, and check
// whether it expects a 32-bit or 64-bit float.
case float64:
typeEnc := string(typeInfo[i+3])
switch typeEnc {
case encFloat:
floatArgs = append(floatArgs, uintptr(math.Float32bits(float32(t))))
case encDouble:
floatArgs = append(floatArgs, uintptr(math.Float64bits(t)))
default:
panic("objc: float argument mismatch")
}
default:
val := reflect.ValueOf(args[i])
switch val.Kind() {
case reflect.Ptr:
intArgs = append(intArgs, val.Pointer())
case reflect.Uintptr:
intArgs = append(intArgs, uintptr(val.Uint()))
case reflect.Struct:
args := unpackStruct(val)
memArgs = append(memArgs, args...)
default:
panic("unhandled kind")
}
}
}
fc := variadic.NewFunctionCall("objc_msgSend")
fc.Words[0] = obj.Pointer()
fc.Words[1] = uintptr(sel)
if len(memArgs) > 0 {
fc.Memory = unsafe.Pointer(&memArgs[0])
fc.NumMemory = int64(len(memArgs))
}
if len(intArgs) > 4 {
panic("too many int args")
}
if len(floatArgs) > 8 {
panic("too many float args")
}
for i, v := range intArgs {
fc.Words[i+2] = v
}
fc.NumFloat = int64(len(floatArgs))
for i, v := range floatArgs {
fc.Words[6+i] = v
}
if len(typeInfo) > 0 {
retEnc := string(typeInfo[0])
if retEnc == encFloat {
return object{ptr: uintptr(math.Float32bits(fc.CallFloat32()))}
} else if retEnc == encDouble {
return object{ptr: uintptr(math.Float64bits(fc.CallFloat64()))}
}
}
return object{ptr: fc.Call()}
}
|
/*
http://www.apache.org/licenses/LICENSE-2.0.txt
Copyright 2015 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mysql
import (
"bytes"
"encoding/gob"
"errors"
"fmt"
"strconv"
"strings"
log "github.com/Sirupsen/logrus"
"github.com/intelsdi-x/snap/control/plugin"
"github.com/intelsdi-x/snap/control/plugin/cpolicy"
"github.com/intelsdi-x/snap/core/ctypes"
"database/sql"
_ "github.com/go-sql-driver/mysql"
)
const (
name = "mysql"
version = 5
pluginType = plugin.PublisherPluginType
)
type mysqlPublisher struct {
}
func NewMySQLPublisher() *mysqlPublisher {
return &mysqlPublisher{}
}
// Publish sends data to a MySQL server
func (s *mysqlPublisher) Publish(contentType string, content []byte, config map[string]ctypes.ConfigValue) error {
logger := log.New()
logger.Println("Publishing started")
var metrics []plugin.PluginMetricType
switch contentType {
case plugin.SnapGOBContentType:
dec := gob.NewDecoder(bytes.NewBuffer(content))
if err := dec.Decode(&metrics); err != nil {
logger.Printf("Error decoding: error=%v content=%v", err, content)
return err
}
default:
logger.Printf("Error unknown content type '%v'", contentType)
return errors.New(fmt.Sprintf("Unknown content type '%s'", contentType))
}
logger.Printf("publishing %v to %v", metrics, config)
// Open connection and ping to make sure it works
username := config["username"].(ctypes.ConfigValueStr).Value
password := config["password"].(ctypes.ConfigValueStr).Value
database := config["database"].(ctypes.ConfigValueStr).Value
tableName := config["tablename"].(ctypes.ConfigValueStr).Value
tableColumns := "(timestamp VARCHAR(200), source_column VARCHAR(200), key_column VARCHAR(200), value_column VARCHAR(200))"
db, err := sql.Open("mysql", username+":"+password+"@/"+database)
defer db.Close()
if err != nil {
logger.Printf("Error: %v", err)
return err
}
err = db.Ping()
if err != nil {
logger.Printf("Error: %v", err)
return err
}
// Create the table if it's not already there
_, err = db.Exec("CREATE TABLE IF NOT EXISTS" + " " + tableName + " " + tableColumns)
if err != nil {
logger.Printf("Error: %v", err)
return err
}
// Put the values into the database with the current time
tableValues := "VALUES( ?, ?, ?, ? )"
insert, err := db.Prepare("INSERT INTO" + " " + tableName + " " + tableValues)
if err != nil {
logger.Printf("Error: %v", err)
return err
}
var key, value string
for _, m := range metrics {
key = sliceToString(m.Namespace())
value, err = interfaceToString(m.Data())
if err != nil {
logger.Printf("Error: %v", err)
return err
}
_, err := insert.Exec(m.Timestamp(), m.Source(), key, value)
if err != nil {
panic(err)
}
}
return nil
}
func Meta() *plugin.PluginMeta {
return plugin.NewPluginMeta(name, version, pluginType, []string{plugin.SnapGOBContentType}, []string{plugin.SnapGOBContentType})
}
func (f *mysqlPublisher) GetConfigPolicy() (*cpolicy.ConfigPolicy, error) {
cp := cpolicy.New()
config := cpolicy.NewPolicyNode()
username, err := cpolicy.NewStringRule("username", true, "root")
handleErr(err)
username.Description = "Username to login to the MySQL server"
password, err := cpolicy.NewStringRule("password", true, "root")
handleErr(err)
password.Description = "Password to login to the MySQL server"
database, err := cpolicy.NewStringRule("database", true, "SNAP_TEST")
handleErr(err)
database.Description = "The MySQL database that data will be pushed to"
tableName, err := cpolicy.NewStringRule("tablename", true, "info")
handleErr(err)
tableName.Description = "The MySQL table within the database where information will be stored"
config.Add(username, password, database, tableName)
cp.Add([]string{""}, config)
return cp, nil
}
func handleErr(e error) {
if e != nil {
panic(e)
}
}
func sliceToString(slice []string) string {
return strings.Join(slice, ", ")
}
// Supported types: []string, []int, int, string
func interfaceToString(face interface{}) (string, error) {
var (
ret string
err error
)
switch val := face.(type) {
case []string:
ret = sliceToString(val)
case []int:
length := len(val)
if length == 0 {
return ret, err
}
ret = strconv.Itoa(val[0])
if length == 1 {
return ret, err
}
for i := 1; i < length; i++ {
ret += ", "
ret += strconv.Itoa(val[i])
}
case int:
ret = strconv.Itoa(val)
case string:
ret = val
default:
err = errors.New("unsupported type")
}
return ret, err
}
Update version to 6 with change in snap dependency.
/*
http://www.apache.org/licenses/LICENSE-2.0.txt
Copyright 2015 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mysql
import (
"bytes"
"encoding/gob"
"errors"
"fmt"
"strconv"
"strings"
log "github.com/Sirupsen/logrus"
"github.com/intelsdi-x/snap/control/plugin"
"github.com/intelsdi-x/snap/control/plugin/cpolicy"
"github.com/intelsdi-x/snap/core/ctypes"
"database/sql"
_ "github.com/go-sql-driver/mysql"
)
const (
name = "mysql"
version = 6
pluginType = plugin.PublisherPluginType
)
type mysqlPublisher struct {
}
func NewMySQLPublisher() *mysqlPublisher {
return &mysqlPublisher{}
}
// Publish sends data to a MySQL server
func (s *mysqlPublisher) Publish(contentType string, content []byte, config map[string]ctypes.ConfigValue) error {
logger := log.New()
logger.Println("Publishing started")
var metrics []plugin.PluginMetricType
switch contentType {
case plugin.SnapGOBContentType:
dec := gob.NewDecoder(bytes.NewBuffer(content))
if err := dec.Decode(&metrics); err != nil {
logger.Printf("Error decoding: error=%v content=%v", err, content)
return err
}
default:
logger.Printf("Error unknown content type '%v'", contentType)
return errors.New(fmt.Sprintf("Unknown content type '%s'", contentType))
}
logger.Printf("publishing %v to %v", metrics, config)
// Open connection and ping to make sure it works
username := config["username"].(ctypes.ConfigValueStr).Value
password := config["password"].(ctypes.ConfigValueStr).Value
database := config["database"].(ctypes.ConfigValueStr).Value
tableName := config["tablename"].(ctypes.ConfigValueStr).Value
tableColumns := "(timestamp VARCHAR(200), source_column VARCHAR(200), key_column VARCHAR(200), value_column VARCHAR(200))"
db, err := sql.Open("mysql", username+":"+password+"@/"+database)
defer db.Close()
if err != nil {
logger.Printf("Error: %v", err)
return err
}
err = db.Ping()
if err != nil {
logger.Printf("Error: %v", err)
return err
}
// Create the table if it's not already there
_, err = db.Exec("CREATE TABLE IF NOT EXISTS" + " " + tableName + " " + tableColumns)
if err != nil {
logger.Printf("Error: %v", err)
return err
}
// Put the values into the database with the current time
tableValues := "VALUES( ?, ?, ?, ? )"
insert, err := db.Prepare("INSERT INTO" + " " + tableName + " " + tableValues)
if err != nil {
logger.Printf("Error: %v", err)
return err
}
var key, value string
for _, m := range metrics {
key = sliceToString(m.Namespace())
value, err = interfaceToString(m.Data())
if err != nil {
logger.Printf("Error: %v", err)
return err
}
_, err := insert.Exec(m.Timestamp(), m.Source(), key, value)
if err != nil {
panic(err)
}
}
return nil
}
func Meta() *plugin.PluginMeta {
return plugin.NewPluginMeta(name, version, pluginType, []string{plugin.SnapGOBContentType}, []string{plugin.SnapGOBContentType})
}
func (f *mysqlPublisher) GetConfigPolicy() (*cpolicy.ConfigPolicy, error) {
cp := cpolicy.New()
config := cpolicy.NewPolicyNode()
username, err := cpolicy.NewStringRule("username", true, "root")
handleErr(err)
username.Description = "Username to login to the MySQL server"
password, err := cpolicy.NewStringRule("password", true, "root")
handleErr(err)
password.Description = "Password to login to the MySQL server"
database, err := cpolicy.NewStringRule("database", true, "SNAP_TEST")
handleErr(err)
database.Description = "The MySQL database that data will be pushed to"
tableName, err := cpolicy.NewStringRule("tablename", true, "info")
handleErr(err)
tableName.Description = "The MySQL table within the database where information will be stored"
config.Add(username, password, database, tableName)
cp.Add([]string{""}, config)
return cp, nil
}
func handleErr(e error) {
if e != nil {
panic(e)
}
}
func sliceToString(slice []string) string {
return strings.Join(slice, ", ")
}
// Supported types: []string, []int, int, string
func interfaceToString(face interface{}) (string, error) {
var (
ret string
err error
)
switch val := face.(type) {
case []string:
ret = sliceToString(val)
case []int:
length := len(val)
if length == 0 {
return ret, err
}
ret = strconv.Itoa(val[0])
if length == 1 {
return ret, err
}
for i := 1; i < length; i++ {
ret += ", "
ret += strconv.Itoa(val[i])
}
case int:
ret = strconv.Itoa(val)
case string:
ret = val
default:
err = errors.New("unsupported type")
}
return ret, err
}
|
// Copyright (c) 2014 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package n1ql
import (
"bytes"
"crypto/tls"
"database/sql/driver"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/url"
"regexp"
"strings"
"sync"
"time"
"unicode"
"github.com/couchbase/go-couchbase"
"github.com/couchbase/godbc"
"github.com/couchbase/query/util"
)
// Common error codes
var (
ErrNotSupported = fmt.Errorf("N1QL:Not supported")
ErrNotImplemented = fmt.Errorf("N1QL: Not implemented")
ErrUnknownCommand = fmt.Errorf("N1QL: Unknown Command")
ErrInternalError = fmt.Errorf("N1QL: Internal Error")
)
// defaults
var (
N1QL_SERVICE_ENDPOINT = "/query/service"
N1QL_DEFAULT_HOST = "127.0.0.1"
N1QL_DEFAULT_PORT = 8093
N1QL_POOL_SIZE = 2 ^ 10 // 1 MB
N1QL_DEFAULT_STATEMENT = "SELECT RAW 1;"
LOCALHOST = N1QL_DEFAULT_HOST
)
// flags
var (
N1QL_PASSTHROUGH_MODE = false
)
// Rest API query parameters
var QueryParams map[string]string
// Username and password. Used for querying the cluster endpoint,
// which may require authorization.
var username, password string
// Used to decide whether to skip verification of certificates when
// connecting to an ssl port.
var skipVerify = true
func init() {
QueryParams = make(map[string]string)
}
func SetQueryParams(key string, value string) error {
if key == "" {
return fmt.Errorf("N1QL: Key not specified")
}
QueryParams[key] = value
return nil
}
func UnsetQueryParams(key string) error {
if key == "" {
return fmt.Errorf("N1QL: Key not specified")
}
delete(QueryParams, key)
return nil
}
func SetPassthroughMode(val bool) {
N1QL_PASSTHROUGH_MODE = val
}
func SetUsernamePassword(u, p string) {
username = u
password = p
}
func hasUsernamePassword() bool {
return username != "" || password != ""
}
func SetSkipVerify(skip bool) {
skipVerify = skip
}
// implements driver.Conn interface
type n1qlConn struct {
clusterAddr string
queryAPIs []string
client *http.Client
lock sync.RWMutex
}
// HTTPClient to use for REST and view operations.
var MaxIdleConnsPerHost = 10
var HTTPTransport = &http.Transport{MaxIdleConnsPerHost: MaxIdleConnsPerHost}
var HTTPClient = &http.Client{Transport: HTTPTransport}
func discoverN1QLService(name string, ps couchbase.PoolServices) string {
for _, ns := range ps.NodesExt {
if ns.Services != nil {
if port, ok := ns.Services["n1ql"]; ok == true {
var hostname string
//n1ql service found
var ipv6 = false
if ns.Hostname == "" {
hostnm := strings.TrimSpace(name)
if strings.HasPrefix(hostnm, "http://") || strings.HasPrefix(hostnm, "https://") {
hostUrl, _ := url.Parse(name)
hostnm = hostUrl.Host
}
hostname, _, ipv6, _ = HostNameandPort(hostnm)
} else {
hostname = ns.Hostname
}
if ipv6 {
return fmt.Sprintf("[%s]:%d", hostname, port)
} else {
return fmt.Sprintf("%s:%d", hostname, port)
}
}
}
}
return ""
}
var cbUserAgent string = "godbc/" + util.VERSION
func SetCBUserAgentHeader(v string) {
cbUserAgent = v
}
func setCBUserAgent(request *http.Request) {
request.Header.Add("CB-User-Agent", cbUserAgent)
}
func getQueryApi(n1qlEndPoint string) ([]string, error) {
queryAdmin := "http://" + n1qlEndPoint + "/admin/clusters/default/nodes"
request, _ := http.NewRequest("GET", queryAdmin, nil)
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
setCBUserAgent(request)
if hasUsernamePassword() {
request.SetBasicAuth(username, password)
}
queryAPIs := make([]string, 0)
hostname, _, ipv6, err := HostNameandPort(n1qlEndPoint)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse URL. Error %v", err)
}
resp, err := HTTPClient.Do(request)
if err != nil {
return nil, fmt.Errorf("%v", err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var nodesInfo []interface{}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to read response body from server. Error %v", err)
}
if err := json.Unmarshal(body, &nodesInfo); err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse response. Error %v", err)
}
for _, queryNode := range nodesInfo {
switch queryNode := queryNode.(type) {
case map[string]interface{}:
queryAPIs = append(queryAPIs, queryNode["queryEndpoint"].(string))
}
}
if ipv6 {
hostname = "[" + hostname + "]"
LOCALHOST = "[::1]"
}
// if the end-points contain localhost IPv4 or IPv6 then replace them with the actual hostname
for i, qa := range queryAPIs {
queryAPIs[i] = strings.Replace(qa, LOCALHOST, hostname, -1)
}
if len(queryAPIs) == 0 {
return nil, fmt.Errorf("Query endpoints not found")
}
return queryAPIs, nil
}
func OpenN1QLConnection(name string) (*n1qlConn, error) {
var queryAPIs []string
if strings.HasPrefix(name, "https") && skipVerify {
HTTPTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
}
//First check if the input string is a cluster endpoint
couchbase.SetSkipVerify(skipVerify)
var client couchbase.Client
var err error
if hasUsernamePassword() {
client, err = couchbase.ConnectWithAuthCreds(name, username, password)
} else {
client, err = couchbase.Connect(name)
}
var perr error = nil
if err != nil {
perr = fmt.Errorf("N1QL: Unable to connect to cluster endpoint %s. Error %v", name, err)
// If not cluster endpoint then check if query endpoint
name = strings.TrimSuffix(name, "/")
queryAPI := name + N1QL_SERVICE_ENDPOINT
queryAPIs = make([]string, 1, 1)
queryAPIs[0] = queryAPI
} else {
ps, err := client.GetPoolServices("default")
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to get NodeServices list. Error %v", err)
}
n1qlEndPoint := discoverN1QLService(name, ps)
if n1qlEndPoint == "" {
return nil, fmt.Errorf("N1QL: No query service found on this cluster")
}
queryAPIs, err = getQueryApi(n1qlEndPoint)
if err != nil {
return nil, err
}
}
conn := &n1qlConn{client: HTTPClient, queryAPIs: queryAPIs}
request, err := prepareRequest(N1QL_DEFAULT_STATEMENT, queryAPIs[0], nil)
if err != nil {
return nil, err
}
resp, err := conn.client.Do(request)
if err != nil {
final_error := fmt.Errorf("N1QL: Connection failed %v", stripurl(err.Error())).Error()
if perr != nil {
final_error = final_error + "\n " + stripurl(perr.Error())
}
return nil, fmt.Errorf("%v", final_error)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("N1QL: Connection failure %s", bod)
}
return conn, nil
}
func stripurl(inputstring string) string {
// Detect http* within the string.
startindex := strings.Index(inputstring, "http")
endindex := strings.Index(inputstring[startindex:], " ")
inputurl := inputstring[startindex : startindex+endindex]
// Parse into a url and detect password
urlpart, err := url.Parse(inputurl)
if err != nil {
return inputstring
}
u := urlpart.User
if u == nil {
return inputstring
}
uname := u.Username()
pwd, _ := u.Password()
//Find how many symbols there are in the User string
num := 0
for _, letter := range fmt.Sprintf("%v", pwd) {
if (unicode.IsSymbol(letter) || unicode.IsPunct(letter)) && letter != '*' {
num = num + 1
}
}
// detect the index on the password
startindex = strings.Index(inputstring, uname)
//reform the error message, with * as the password
inputstring = inputstring[:startindex+len(uname)+1] + "*" + inputstring[startindex+len(uname)+1+len(pwd):]
//Replace all the special characters encoding
for num > 0 {
num = num - 1
inputstring = stripurl(inputstring)
}
return inputstring
}
// do client request with retry
func (conn *n1qlConn) doClientRequest(query string, requestValues *url.Values) (*http.Response, error) {
ok := false
for !ok {
var request *http.Request
var err error
// select query API
rand.Seed(time.Now().Unix())
numNodes := len(conn.queryAPIs)
selectedNode := rand.Intn(numNodes)
conn.lock.RLock()
queryAPI := conn.queryAPIs[selectedNode]
conn.lock.RUnlock()
if query != "" {
request, err = prepareRequest(query, queryAPI, nil)
if err != nil {
return nil, err
}
} else {
if requestValues != nil {
request, _ = http.NewRequest("POST", queryAPI, bytes.NewBufferString(requestValues.Encode()))
} else {
request, _ = http.NewRequest("POST", queryAPI, nil)
}
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
setCBUserAgent(request)
if hasUsernamePassword() {
request.SetBasicAuth(username, password)
}
}
resp, err := conn.client.Do(request)
if err != nil {
// if this is the last node return with error
if numNodes == 1 {
break
}
// remove the node that failed from the list of query nodes
conn.lock.Lock()
conn.queryAPIs = append(conn.queryAPIs[:selectedNode], conn.queryAPIs[selectedNode+1:]...)
conn.lock.Unlock()
continue
} else {
return resp, nil
}
}
return nil, fmt.Errorf("N1QL: Query nodes not responding")
}
func serializeErrors(errors interface{}) string {
var errString string
switch errors := errors.(type) {
case []interface{}:
for _, e := range errors {
switch e := e.(type) {
case map[string]interface{}:
code, _ := e["code"]
msg, _ := e["msg"]
if code != 0 && msg != "" {
if errString != "" {
errString = fmt.Sprintf("%v Code : %v Message : %v", errString, code, msg)
} else {
errString = fmt.Sprintf("Code : %v Message : %v", code, msg)
}
}
}
}
}
if errString != "" {
return errString
}
return fmt.Sprintf(" Error %v %T", errors, errors)
}
func (conn *n1qlConn) Prepare(query string) (*n1qlStmt, error) {
var argCount int
query = "PREPARE " + query
query, argCount = prepareQuery(query)
resp, err := conn.doClientRequest(query, nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var resultMap map[string]*json.RawMessage
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to read response body from server. Error %v", err)
}
if err := json.Unmarshal(body, &resultMap); err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse response. Error %v", err)
}
stmt := &n1qlStmt{conn: conn, argCount: argCount}
errors, ok := resultMap["errors"]
if ok && errors != nil {
var errs []interface{}
_ = json.Unmarshal(*errors, &errs)
return nil, fmt.Errorf("N1QL: Error preparing statement %v", serializeErrors(errs))
}
for name, results := range resultMap {
switch name {
case "results":
var preparedResults []interface{}
if err := json.Unmarshal(*results, &preparedResults); err != nil {
return nil, fmt.Errorf("N1QL: Failed to unmarshal results %v", err)
}
if len(preparedResults) == 0 {
return nil, fmt.Errorf("N1QL: Unknown error, no prepared results returned")
}
serialized, _ := json.Marshal(preparedResults[0])
stmt.name = preparedResults[0].(map[string]interface{})["name"].(string)
stmt.prepared = string(serialized)
case "signature":
stmt.signature = string(*results)
}
}
if stmt.prepared == "" {
return nil, ErrInternalError
}
return stmt, nil
}
func (conn *n1qlConn) Begin() (driver.Tx, error) {
return nil, ErrNotSupported
}
func (conn *n1qlConn) Close() error {
return nil
}
func decodeSignature(signature *json.RawMessage) interface{} {
var sign interface{}
var rows map[string]interface{}
json.Unmarshal(*signature, &sign)
switch s := sign.(type) {
case map[string]interface{}:
return s
case string:
return s
default:
fmt.Printf(" Cannot decode signature. Type of this signature is %T", s)
return map[string]interface{}{"*": "*"}
}
return rows
}
func (conn *n1qlConn) performQueryRaw(query string, requestValues *url.Values) (io.ReadCloser, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return resp.Body, fmt.Errorf("Request failed with error code %d.", resp.StatusCode)
}
return resp.Body, nil
}
func getDecoder(r io.Reader) (*json.Decoder, error) {
if r == nil {
return nil, fmt.Errorf("Failed to decode nil response.")
}
return json.NewDecoder(r), nil
}
func (conn *n1qlConn) performQuery(query string, requestValues *url.Values) (godbc.Rows, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var resultMap map[string]*json.RawMessage
decoder, err := getDecoder(resp.Body)
if err != nil {
return nil, err
}
err = decoder.Decode(&resultMap)
if err != nil {
return nil, fmt.Errorf(" N1QL: Failed to decode result %v", err)
}
var signature interface{}
var resultRows *json.RawMessage
var metrics interface{}
var status interface{}
var requestId interface{}
var errs interface{}
for name, results := range resultMap {
switch name {
case "errors":
_ = json.Unmarshal(*results, &errs)
case "signature":
if results != nil {
signature = decodeSignature(results)
} else if N1QL_PASSTHROUGH_MODE == true {
// for certain types of DML queries, the returned signature could be null
// however in passthrough mode we always return the metrics, status etc as
// rows therefore we need to ensure that there is a default signature.
signature = map[string]interface{}{"*": "*"}
}
case "results":
resultRows = results
case "metrics":
if N1QL_PASSTHROUGH_MODE == true {
_ = json.Unmarshal(*results, &metrics)
}
case "status":
if N1QL_PASSTHROUGH_MODE == true {
_ = json.Unmarshal(*results, &status)
}
case "requestID":
if N1QL_PASSTHROUGH_MODE == true {
_ = json.Unmarshal(*results, &requestId)
}
}
}
if N1QL_PASSTHROUGH_MODE == true {
extraVals := map[string]interface{}{"requestID": requestId,
"status": status,
"signature": signature,
}
// in passthrough mode last line will always be en error line
errors := map[string]interface{}{"errors": errs}
return resultToRows(bytes.NewReader(*resultRows), resp, signature, metrics, errors, extraVals)
}
// we return the errors with the rows because we can have scenarios where there are valid
// results returned along with the error and this interface doesn't allow for both to be
// returned and hence this workaround.
return resultToRows(bytes.NewReader(*resultRows), resp, signature, nil, errs, nil)
}
// Executes a query that returns a set of Rows.
// Select statements should use this interface
func (conn *n1qlConn) Query(query string, args ...interface{}) (godbc.Rows, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performQuery(query, nil)
}
func (conn *n1qlConn) QueryRaw(query string, args ...interface{}) (io.ReadCloser, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performQueryRaw(query, nil)
}
func (conn *n1qlConn) performExecRaw(query string, requestValues *url.Values) (io.ReadCloser, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return resp.Body, fmt.Errorf("Request failed with error code %d.", resp.StatusCode)
}
return resp.Body, nil
}
func (conn *n1qlConn) performExec(query string, requestValues *url.Values) (godbc.Result, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var resultMap map[string]*json.RawMessage
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to read response body from server. Error %v", err)
}
if err := json.Unmarshal(body, &resultMap); err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse response. Error %v", err)
}
var execErr error
res := &n1qlResult{}
for name, results := range resultMap {
switch name {
case "metrics":
var metrics map[string]interface{}
err := json.Unmarshal(*results, &metrics)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to unmarshal response. Error %v", err)
}
if mc, ok := metrics["mutationCount"]; ok {
res.affectedRows = int64(mc.(float64))
}
break
case "errors":
var errs []interface{}
_ = json.Unmarshal(*results, &errs)
execErr = fmt.Errorf("N1QL: Error executing query %v", serializeErrors(errs))
}
}
return res, execErr
}
// Execer implementation. To be used for queries that do not return any rows
// such as Create Index, Insert, Upset, Delete etc
func (conn *n1qlConn) Exec(query string, args ...interface{}) (godbc.Result, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performExec(query, nil)
}
func (conn *n1qlConn) ExecRaw(query string, args ...interface{}) (io.ReadCloser, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performExecRaw(query, nil)
}
func prepareQuery(query string) (string, int) {
var count int
re := regexp.MustCompile("\\?")
f := func(s string) string {
count++
return fmt.Sprintf("$%d", count)
}
return re.ReplaceAllStringFunc(query, f), count
}
//
// Replace the conditional pqrams in the query and return the list of left-over args
func preparePositionalArgs(query string, argCount int, args []interface{}) (string, []interface{}) {
subList := make([]string, 0)
newArgs := make([]interface{}, 0)
for i, arg := range args {
if i < argCount {
var a string
switch arg := arg.(type) {
case string:
a = fmt.Sprintf("\"%v\"", arg)
case []byte:
a = string(arg)
default:
a = fmt.Sprintf("%v", arg)
}
sub := []string{fmt.Sprintf("$%d", i+1), a}
subList = append(subList, sub...)
} else {
newArgs = append(newArgs, arg)
}
}
r := strings.NewReplacer(subList...)
return r.Replace(query), newArgs
}
// prepare a http request for the query
//
func prepareRequest(query string, queryAPI string, args []interface{}) (*http.Request, error) {
postData := url.Values{}
postData.Set("statement", query)
if len(args) > 0 {
paStr := buildPositionalArgList(args)
if len(paStr) > 0 {
postData.Set("args", paStr)
}
}
setQueryParams(&postData)
request, err := http.NewRequest("POST", queryAPI, bytes.NewBufferString(postData.Encode()))
if err != nil {
return nil, err
}
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
setCBUserAgent(request)
if hasUsernamePassword() {
request.SetBasicAuth(username, password)
}
return request, nil
}
//
// Set query params
func setQueryParams(v *url.Values) {
for key, value := range QueryParams {
v.Set(key, value)
}
}
// Return hostname and port for IPv4 and IPv6
func HostNameandPort(node string) (host, port string, ipv6 bool, err error) {
tokens := []string{}
// Set _IPv6 based on input address
ipv6, err = IsIPv6(node)
if err != nil {
return "", "", false, err
}
err = nil
// For IPv6
if ipv6 {
// Then the url should be of the form [::1]:8091
tokens = strings.Split(node, "]:")
host = strings.Replace(tokens[0], "[", "", 1)
} else {
// For IPv4
tokens = strings.Split(node, ":")
host = tokens[0]
}
if len(tokens) == 2 {
port = tokens[1]
} else {
port = ""
}
return
}
func IsIPv6(str string) (bool, error) {
//ipv6 - can be [::1]:8091
host, _, err := net.SplitHostPort(str)
if err != nil {
host = str
}
if host == "localhost" {
host = LOCALHOST
}
ip := net.ParseIP(host)
if ip.To4() == nil {
//Not an ipv4 address
// check if ipv6
if ip.To16() == nil {
// Not ipv6
return false, fmt.Errorf("\nThis is an incorrect address %v", str)
}
// IPv6
return true, nil
}
// IPv4
return false, nil
}
MB-28917 Allow cbq to parse fqdns correctly - AWS urls
Change-Id: Ie7f91d87cc7694b0aa11b5b41eea4153eefecabb
Reviewed-on: http://review.couchbase.org/91859
Reviewed-by: Sitaram Vemulapalli <8d86a491dcdf321bac78bef141a8229a59bf9211@couchbase.com>
Reviewed-by: Johan Larson <5105ed78d79b274eb9b1b31aff9e2d413721d12f@couchbase.com>
Tested-by: Isha Kandaswamy <e3cc845ebc6144fc4d71cf5f07a0ce9db6fdfa91@couchbase.com>
// Copyright (c) 2014 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package n1ql
import (
"bytes"
"crypto/tls"
"database/sql/driver"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/url"
"regexp"
"strings"
"sync"
"time"
"unicode"
"github.com/couchbase/go-couchbase"
"github.com/couchbase/godbc"
"github.com/couchbase/query/util"
)
// Common error codes
var (
ErrNotSupported = fmt.Errorf("N1QL:Not supported")
ErrNotImplemented = fmt.Errorf("N1QL: Not implemented")
ErrUnknownCommand = fmt.Errorf("N1QL: Unknown Command")
ErrInternalError = fmt.Errorf("N1QL: Internal Error")
)
// defaults
var (
N1QL_SERVICE_ENDPOINT = "/query/service"
N1QL_DEFAULT_HOST = "127.0.0.1"
N1QL_DEFAULT_PORT = 8093
N1QL_POOL_SIZE = 2 ^ 10 // 1 MB
N1QL_DEFAULT_STATEMENT = "SELECT RAW 1;"
LOCALHOST = N1QL_DEFAULT_HOST
)
// flags
var (
N1QL_PASSTHROUGH_MODE = false
)
// Rest API query parameters
var QueryParams map[string]string
// Username and password. Used for querying the cluster endpoint,
// which may require authorization.
var username, password string
// Used to decide whether to skip verification of certificates when
// connecting to an ssl port.
var skipVerify = true
func init() {
QueryParams = make(map[string]string)
}
func SetQueryParams(key string, value string) error {
if key == "" {
return fmt.Errorf("N1QL: Key not specified")
}
QueryParams[key] = value
return nil
}
func UnsetQueryParams(key string) error {
if key == "" {
return fmt.Errorf("N1QL: Key not specified")
}
delete(QueryParams, key)
return nil
}
func SetPassthroughMode(val bool) {
N1QL_PASSTHROUGH_MODE = val
}
func SetUsernamePassword(u, p string) {
username = u
password = p
}
func hasUsernamePassword() bool {
return username != "" || password != ""
}
func SetSkipVerify(skip bool) {
skipVerify = skip
}
// implements driver.Conn interface
type n1qlConn struct {
clusterAddr string
queryAPIs []string
client *http.Client
lock sync.RWMutex
}
// HTTPClient to use for REST and view operations.
var MaxIdleConnsPerHost = 10
var HTTPTransport = &http.Transport{MaxIdleConnsPerHost: MaxIdleConnsPerHost}
var HTTPClient = &http.Client{Transport: HTTPTransport}
func discoverN1QLService(name string, ps couchbase.PoolServices) string {
for _, ns := range ps.NodesExt {
if ns.Services != nil {
if port, ok := ns.Services["n1ql"]; ok == true {
var hostname string
//n1ql service found
var ipv6 = false
if ns.Hostname == "" {
hostnm := strings.TrimSpace(name)
if strings.HasPrefix(hostnm, "http://") || strings.HasPrefix(hostnm, "https://") {
hostUrl, _ := url.Parse(name)
hostnm = hostUrl.Host
}
hostname, _, ipv6, _ = HostNameandPort(hostnm)
} else {
hostname = ns.Hostname
}
if ipv6 {
return fmt.Sprintf("[%s]:%d", hostname, port)
} else {
return fmt.Sprintf("%s:%d", hostname, port)
}
}
}
}
return ""
}
var cbUserAgent string = "godbc/" + util.VERSION
func SetCBUserAgentHeader(v string) {
cbUserAgent = v
}
func setCBUserAgent(request *http.Request) {
request.Header.Add("CB-User-Agent", cbUserAgent)
}
func getQueryApi(n1qlEndPoint string) ([]string, error) {
queryAdmin := "http://" + n1qlEndPoint + "/admin/clusters/default/nodes"
request, _ := http.NewRequest("GET", queryAdmin, nil)
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
setCBUserAgent(request)
if hasUsernamePassword() {
request.SetBasicAuth(username, password)
}
queryAPIs := make([]string, 0)
hostname, _, ipv6, err := HostNameandPort(n1qlEndPoint)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse URL. Error %v", err)
}
resp, err := HTTPClient.Do(request)
if err != nil {
return nil, fmt.Errorf("%v", err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var nodesInfo []interface{}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to read response body from server. Error %v", err)
}
if err := json.Unmarshal(body, &nodesInfo); err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse response. Error %v", err)
}
for _, queryNode := range nodesInfo {
switch queryNode := queryNode.(type) {
case map[string]interface{}:
queryAPIs = append(queryAPIs, queryNode["queryEndpoint"].(string))
}
}
if ipv6 {
hostname = "[" + hostname + "]"
LOCALHOST = "[::1]"
}
// if the end-points contain localhost IPv4 or IPv6 then replace them with the actual hostname
for i, qa := range queryAPIs {
queryAPIs[i] = strings.Replace(qa, LOCALHOST, hostname, -1)
}
if len(queryAPIs) == 0 {
return nil, fmt.Errorf("Query endpoints not found")
}
return queryAPIs, nil
}
func OpenN1QLConnection(name string) (*n1qlConn, error) {
var queryAPIs []string
if strings.HasPrefix(name, "https") && skipVerify {
HTTPTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
}
//First check if the input string is a cluster endpoint
couchbase.SetSkipVerify(skipVerify)
var client couchbase.Client
var err error
if hasUsernamePassword() {
client, err = couchbase.ConnectWithAuthCreds(name, username, password)
} else {
client, err = couchbase.Connect(name)
}
var perr error = nil
if err != nil {
perr = fmt.Errorf("N1QL: Unable to connect to cluster endpoint %s. Error %v", name, err)
// If not cluster endpoint then check if query endpoint
name = strings.TrimSuffix(name, "/")
queryAPI := name + N1QL_SERVICE_ENDPOINT
queryAPIs = make([]string, 1, 1)
queryAPIs[0] = queryAPI
} else {
ps, err := client.GetPoolServices("default")
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to get NodeServices list. Error %v", err)
}
n1qlEndPoint := discoverN1QLService(name, ps)
if n1qlEndPoint == "" {
return nil, fmt.Errorf("N1QL: No query service found on this cluster")
}
queryAPIs, err = getQueryApi(n1qlEndPoint)
if err != nil {
return nil, err
}
}
conn := &n1qlConn{client: HTTPClient, queryAPIs: queryAPIs}
request, err := prepareRequest(N1QL_DEFAULT_STATEMENT, queryAPIs[0], nil)
if err != nil {
return nil, err
}
resp, err := conn.client.Do(request)
if err != nil {
final_error := fmt.Errorf("N1QL: Connection failed %v", stripurl(err.Error())).Error()
if perr != nil {
final_error = final_error + "\n " + stripurl(perr.Error())
}
return nil, fmt.Errorf("%v", final_error)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("N1QL: Connection failure %s", bod)
}
return conn, nil
}
func stripurl(inputstring string) string {
// Detect http* within the string.
startindex := strings.Index(inputstring, "http")
endindex := strings.Index(inputstring[startindex:], " ")
inputurl := inputstring[startindex : startindex+endindex]
// Parse into a url and detect password
urlpart, err := url.Parse(inputurl)
if err != nil {
return inputstring
}
u := urlpart.User
if u == nil {
return inputstring
}
uname := u.Username()
pwd, _ := u.Password()
//Find how many symbols there are in the User string
num := 0
for _, letter := range fmt.Sprintf("%v", pwd) {
if (unicode.IsSymbol(letter) || unicode.IsPunct(letter)) && letter != '*' {
num = num + 1
}
}
// detect the index on the password
startindex = strings.Index(inputstring, uname)
//reform the error message, with * as the password
inputstring = inputstring[:startindex+len(uname)+1] + "*" + inputstring[startindex+len(uname)+1+len(pwd):]
//Replace all the special characters encoding
for num > 0 {
num = num - 1
inputstring = stripurl(inputstring)
}
return inputstring
}
// do client request with retry
func (conn *n1qlConn) doClientRequest(query string, requestValues *url.Values) (*http.Response, error) {
ok := false
for !ok {
var request *http.Request
var err error
// select query API
rand.Seed(time.Now().Unix())
numNodes := len(conn.queryAPIs)
selectedNode := rand.Intn(numNodes)
conn.lock.RLock()
queryAPI := conn.queryAPIs[selectedNode]
conn.lock.RUnlock()
if query != "" {
request, err = prepareRequest(query, queryAPI, nil)
if err != nil {
return nil, err
}
} else {
if requestValues != nil {
request, _ = http.NewRequest("POST", queryAPI, bytes.NewBufferString(requestValues.Encode()))
} else {
request, _ = http.NewRequest("POST", queryAPI, nil)
}
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
setCBUserAgent(request)
if hasUsernamePassword() {
request.SetBasicAuth(username, password)
}
}
resp, err := conn.client.Do(request)
if err != nil {
// if this is the last node return with error
if numNodes == 1 {
break
}
// remove the node that failed from the list of query nodes
conn.lock.Lock()
conn.queryAPIs = append(conn.queryAPIs[:selectedNode], conn.queryAPIs[selectedNode+1:]...)
conn.lock.Unlock()
continue
} else {
return resp, nil
}
}
return nil, fmt.Errorf("N1QL: Query nodes not responding")
}
func serializeErrors(errors interface{}) string {
var errString string
switch errors := errors.(type) {
case []interface{}:
for _, e := range errors {
switch e := e.(type) {
case map[string]interface{}:
code, _ := e["code"]
msg, _ := e["msg"]
if code != 0 && msg != "" {
if errString != "" {
errString = fmt.Sprintf("%v Code : %v Message : %v", errString, code, msg)
} else {
errString = fmt.Sprintf("Code : %v Message : %v", code, msg)
}
}
}
}
}
if errString != "" {
return errString
}
return fmt.Sprintf(" Error %v %T", errors, errors)
}
func (conn *n1qlConn) Prepare(query string) (*n1qlStmt, error) {
var argCount int
query = "PREPARE " + query
query, argCount = prepareQuery(query)
resp, err := conn.doClientRequest(query, nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var resultMap map[string]*json.RawMessage
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to read response body from server. Error %v", err)
}
if err := json.Unmarshal(body, &resultMap); err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse response. Error %v", err)
}
stmt := &n1qlStmt{conn: conn, argCount: argCount}
errors, ok := resultMap["errors"]
if ok && errors != nil {
var errs []interface{}
_ = json.Unmarshal(*errors, &errs)
return nil, fmt.Errorf("N1QL: Error preparing statement %v", serializeErrors(errs))
}
for name, results := range resultMap {
switch name {
case "results":
var preparedResults []interface{}
if err := json.Unmarshal(*results, &preparedResults); err != nil {
return nil, fmt.Errorf("N1QL: Failed to unmarshal results %v", err)
}
if len(preparedResults) == 0 {
return nil, fmt.Errorf("N1QL: Unknown error, no prepared results returned")
}
serialized, _ := json.Marshal(preparedResults[0])
stmt.name = preparedResults[0].(map[string]interface{})["name"].(string)
stmt.prepared = string(serialized)
case "signature":
stmt.signature = string(*results)
}
}
if stmt.prepared == "" {
return nil, ErrInternalError
}
return stmt, nil
}
func (conn *n1qlConn) Begin() (driver.Tx, error) {
return nil, ErrNotSupported
}
func (conn *n1qlConn) Close() error {
return nil
}
func decodeSignature(signature *json.RawMessage) interface{} {
var sign interface{}
var rows map[string]interface{}
json.Unmarshal(*signature, &sign)
switch s := sign.(type) {
case map[string]interface{}:
return s
case string:
return s
default:
fmt.Printf(" Cannot decode signature. Type of this signature is %T", s)
return map[string]interface{}{"*": "*"}
}
return rows
}
func (conn *n1qlConn) performQueryRaw(query string, requestValues *url.Values) (io.ReadCloser, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return resp.Body, fmt.Errorf("Request failed with error code %d.", resp.StatusCode)
}
return resp.Body, nil
}
func getDecoder(r io.Reader) (*json.Decoder, error) {
if r == nil {
return nil, fmt.Errorf("Failed to decode nil response.")
}
return json.NewDecoder(r), nil
}
func (conn *n1qlConn) performQuery(query string, requestValues *url.Values) (godbc.Rows, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var resultMap map[string]*json.RawMessage
decoder, err := getDecoder(resp.Body)
if err != nil {
return nil, err
}
err = decoder.Decode(&resultMap)
if err != nil {
return nil, fmt.Errorf(" N1QL: Failed to decode result %v", err)
}
var signature interface{}
var resultRows *json.RawMessage
var metrics interface{}
var status interface{}
var requestId interface{}
var errs interface{}
for name, results := range resultMap {
switch name {
case "errors":
_ = json.Unmarshal(*results, &errs)
case "signature":
if results != nil {
signature = decodeSignature(results)
} else if N1QL_PASSTHROUGH_MODE == true {
// for certain types of DML queries, the returned signature could be null
// however in passthrough mode we always return the metrics, status etc as
// rows therefore we need to ensure that there is a default signature.
signature = map[string]interface{}{"*": "*"}
}
case "results":
resultRows = results
case "metrics":
if N1QL_PASSTHROUGH_MODE == true {
_ = json.Unmarshal(*results, &metrics)
}
case "status":
if N1QL_PASSTHROUGH_MODE == true {
_ = json.Unmarshal(*results, &status)
}
case "requestID":
if N1QL_PASSTHROUGH_MODE == true {
_ = json.Unmarshal(*results, &requestId)
}
}
}
if N1QL_PASSTHROUGH_MODE == true {
extraVals := map[string]interface{}{"requestID": requestId,
"status": status,
"signature": signature,
}
// in passthrough mode last line will always be en error line
errors := map[string]interface{}{"errors": errs}
return resultToRows(bytes.NewReader(*resultRows), resp, signature, metrics, errors, extraVals)
}
// we return the errors with the rows because we can have scenarios where there are valid
// results returned along with the error and this interface doesn't allow for both to be
// returned and hence this workaround.
return resultToRows(bytes.NewReader(*resultRows), resp, signature, nil, errs, nil)
}
// Executes a query that returns a set of Rows.
// Select statements should use this interface
func (conn *n1qlConn) Query(query string, args ...interface{}) (godbc.Rows, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performQuery(query, nil)
}
func (conn *n1qlConn) QueryRaw(query string, args ...interface{}) (io.ReadCloser, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performQueryRaw(query, nil)
}
func (conn *n1qlConn) performExecRaw(query string, requestValues *url.Values) (io.ReadCloser, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return resp.Body, fmt.Errorf("Request failed with error code %d.", resp.StatusCode)
}
return resp.Body, nil
}
func (conn *n1qlConn) performExec(query string, requestValues *url.Values) (godbc.Result, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var resultMap map[string]*json.RawMessage
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to read response body from server. Error %v", err)
}
if err := json.Unmarshal(body, &resultMap); err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse response. Error %v", err)
}
var execErr error
res := &n1qlResult{}
for name, results := range resultMap {
switch name {
case "metrics":
var metrics map[string]interface{}
err := json.Unmarshal(*results, &metrics)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to unmarshal response. Error %v", err)
}
if mc, ok := metrics["mutationCount"]; ok {
res.affectedRows = int64(mc.(float64))
}
break
case "errors":
var errs []interface{}
_ = json.Unmarshal(*results, &errs)
execErr = fmt.Errorf("N1QL: Error executing query %v", serializeErrors(errs))
}
}
return res, execErr
}
// Execer implementation. To be used for queries that do not return any rows
// such as Create Index, Insert, Upset, Delete etc
func (conn *n1qlConn) Exec(query string, args ...interface{}) (godbc.Result, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performExec(query, nil)
}
func (conn *n1qlConn) ExecRaw(query string, args ...interface{}) (io.ReadCloser, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performExecRaw(query, nil)
}
func prepareQuery(query string) (string, int) {
var count int
re := regexp.MustCompile("\\?")
f := func(s string) string {
count++
return fmt.Sprintf("$%d", count)
}
return re.ReplaceAllStringFunc(query, f), count
}
//
// Replace the conditional pqrams in the query and return the list of left-over args
func preparePositionalArgs(query string, argCount int, args []interface{}) (string, []interface{}) {
subList := make([]string, 0)
newArgs := make([]interface{}, 0)
for i, arg := range args {
if i < argCount {
var a string
switch arg := arg.(type) {
case string:
a = fmt.Sprintf("\"%v\"", arg)
case []byte:
a = string(arg)
default:
a = fmt.Sprintf("%v", arg)
}
sub := []string{fmt.Sprintf("$%d", i+1), a}
subList = append(subList, sub...)
} else {
newArgs = append(newArgs, arg)
}
}
r := strings.NewReplacer(subList...)
return r.Replace(query), newArgs
}
// prepare a http request for the query
//
func prepareRequest(query string, queryAPI string, args []interface{}) (*http.Request, error) {
postData := url.Values{}
postData.Set("statement", query)
if len(args) > 0 {
paStr := buildPositionalArgList(args)
if len(paStr) > 0 {
postData.Set("args", paStr)
}
}
setQueryParams(&postData)
request, err := http.NewRequest("POST", queryAPI, bytes.NewBufferString(postData.Encode()))
if err != nil {
return nil, err
}
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
setCBUserAgent(request)
if hasUsernamePassword() {
request.SetBasicAuth(username, password)
}
return request, nil
}
//
// Set query params
func setQueryParams(v *url.Values) {
for key, value := range QueryParams {
v.Set(key, value)
}
}
// Return hostname and port for IPv4 and IPv6
func HostNameandPort(node string) (host, port string, ipv6 bool, err error) {
tokens := []string{}
// Set _IPv6 based on input address
ipv6, err = IsIPv6(node)
if err != nil {
return "", "", false, err
}
err = nil
// For IPv6
if ipv6 {
// Then the url should be of the form [::1]:8091
tokens = strings.Split(node, "]:")
host = strings.Replace(tokens[0], "[", "", 1)
} else {
// For IPv4
tokens = strings.Split(node, ":")
host = tokens[0]
}
if len(tokens) == 2 {
port = tokens[1]
} else {
port = ""
}
return
}
func IsIPv6(str string) (bool, error) {
//ipv6 - can be [::1]:8091
host, _, err := net.SplitHostPort(str)
if err != nil {
host = str
}
if host == "localhost" {
host = LOCALHOST
}
ip := net.ParseIP(host)
if ip == nil {
// Essentially this is a FQDN. Golangs ParseIP cannot parse IPs that are non-numerical.
// It could also be an incorrect address. But that can be handled by split host port.
// This method is only to check if address is IPv6.
return false, nil
}
if ip.To4() == nil {
//Not an ipv4 address
// check if ipv6
if ip.To16() == nil {
// Not ipv6
return false, fmt.Errorf("\nThis is an incorrect address %v", str)
}
// IPv6
return true, nil
}
// IPv4
return false, nil
}
|
// Copyright (c) 2014 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package n1ql
import (
"bytes"
"crypto/tls"
"database/sql/driver"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/url"
"regexp"
"strings"
"sync"
"time"
"unicode"
"github.com/couchbase/go-couchbase"
"github.com/couchbase/godbc"
"github.com/couchbase/query/util"
)
// Common error codes
var (
ErrNotSupported = fmt.Errorf("N1QL:Not supported")
ErrNotImplemented = fmt.Errorf("N1QL: Not implemented")
ErrUnknownCommand = fmt.Errorf("N1QL: Unknown Command")
ErrInternalError = fmt.Errorf("N1QL: Internal Error")
)
// defaults
var (
N1QL_SERVICE_ENDPOINT = "/query/service"
N1QL_DEFAULT_HOST = "127.0.0.1"
N1QL_DEFAULT_PORT = 8093
N1QL_POOL_SIZE = 2 ^ 10 // 1 MB
N1QL_DEFAULT_STATEMENT = "SELECT RAW 1;"
LOCALHOST = N1QL_DEFAULT_HOST
)
// flags
var (
N1QL_PASSTHROUGH_MODE = false
)
// Rest API query parameters
var QueryParams map[string]string
// Username and password. Used for querying the cluster endpoint,
// which may require authorization.
var username, password string
// Used to decide whether to skip verification of certificates when
// connecting to an ssl port.
var skipVerify = true
var certFile = ""
var keyFile = ""
var rootFile = ""
func init() {
QueryParams = make(map[string]string)
}
func SetQueryParams(key string, value string) error {
if key == "" {
return fmt.Errorf("N1QL: Key not specified")
}
QueryParams[key] = value
return nil
}
func UnsetQueryParams(key string) error {
if key == "" {
return fmt.Errorf("N1QL: Key not specified")
}
delete(QueryParams, key)
return nil
}
func SetPassthroughMode(val bool) {
N1QL_PASSTHROUGH_MODE = val
}
func SetUsernamePassword(u, p string) {
username = u
password = p
}
func hasUsernamePassword() bool {
return username != "" || password != ""
}
func SetSkipVerify(skip bool) {
skipVerify = skip
}
func SetCertFile(cert string) {
certFile = cert
}
func SetKeyFile(cert string) {
keyFile = cert
}
func SetRootFile(cert string) {
rootFile = cert
}
// implements driver.Conn interface
type n1qlConn struct {
clusterAddr string
queryAPIs []string
client *http.Client
lock sync.RWMutex
}
// HTTPClient to use for REST and view operations.
var MaxIdleConnsPerHost = 10
var HTTPTransport = &http.Transport{MaxIdleConnsPerHost: MaxIdleConnsPerHost}
var HTTPClient = &http.Client{Transport: HTTPTransport}
func discoverN1QLService(name string, ps couchbase.PoolServices) (string, bool) {
isHttps := false
for _, ns := range ps.NodesExt {
if ns.Services != nil {
port, ok := ns.Services["n1ql"]
if strings.HasPrefix(name, "https://") {
isHttps = true
port, ok = ns.Services["n1qlSSL"]
}
if ok {
var hostname string
//n1ql service found
var ipv6 = false
if ns.Hostname == "" {
hostnm := strings.TrimSpace(name)
if strings.HasPrefix(hostnm, "http://") || strings.HasPrefix(hostnm, "https://") {
hostUrl, _ := url.Parse(name)
hostnm = hostUrl.Host
}
hostname, _, ipv6, _ = HostNameandPort(hostnm)
} else {
hostname, _, ipv6, _ = HostNameandPort(ns.Hostname)
}
if ipv6 {
return fmt.Sprintf("[%s]:%d", hostname, port), isHttps
} else {
return fmt.Sprintf("%s:%d", hostname, port), isHttps
}
}
}
}
return "", isHttps
}
var cbUserAgent string = "godbc/" + util.VERSION
func SetCBUserAgentHeader(v string) {
cbUserAgent = v
}
func setCBUserAgent(request *http.Request) {
request.Header.Add("CB-User-Agent", cbUserAgent)
}
func getQueryApi(n1qlEndPoint string, isHttps bool) ([]string, error) {
queryAdmin := n1qlEndPoint + "/admin/clusters/default/nodes"
if isHttps {
queryAdmin = "https://" + queryAdmin
} else {
queryAdmin = "http://" + queryAdmin
}
request, _ := http.NewRequest("GET", queryAdmin, nil)
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
setCBUserAgent(request)
if hasUsernamePassword() {
request.SetBasicAuth(username, password)
}
queryAPIs := make([]string, 0)
hostname, _, ipv6, err := HostNameandPort(n1qlEndPoint)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse URL. Error %v", err)
}
resp, err := HTTPClient.Do(request)
if err != nil {
return nil, fmt.Errorf("%v", err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var nodesInfo []interface{}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to read response body from server. Error %v", err)
}
if err := json.Unmarshal(body, &nodesInfo); err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse response. Error %v", err)
}
for _, queryNode := range nodesInfo {
switch queryNode := queryNode.(type) {
case map[string]interface{}:
queryAPIs = append(queryAPIs, queryNode["queryEndpoint"].(string))
}
}
if ipv6 {
hostname = "[" + hostname + "]"
LOCALHOST = "[::1]"
}
// if the end-points contain localhost IPv4 or IPv6 then replace them with the actual hostname
for i, qa := range queryAPIs {
queryAPIs[i] = strings.Replace(qa, LOCALHOST, hostname, -1)
}
if len(queryAPIs) == 0 {
return nil, fmt.Errorf("Query endpoints not found")
}
return queryAPIs, nil
}
func OpenN1QLConnection(name string) (*n1qlConn, error) {
var queryAPIs []string
if strings.HasPrefix(name, "https") {
//First check if the input string is a cluster endpoint
couchbase.SetSkipVerify(skipVerify)
if skipVerify {
HTTPTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
} else {
if certFile != "" && keyFile != "" {
couchbase.SetCertFile(certFile)
couchbase.SetKeyFile(keyFile)
} else {
//error need to pass both certfile and keyfile
return nil, fmt.Errorf("N1QL: Need to pass both certfile and keyfile")
}
if rootFile != "" {
couchbase.SetRootFile(rootFile)
}
// For 18093 connections
cfg, err := couchbase.ClientConfigForX509(certFile, keyFile, rootFile)
if err != nil {
return nil, err
}
HTTPTransport.TLSClientConfig = cfg
}
}
var client couchbase.Client
var err error
if hasUsernamePassword() {
client, err = couchbase.ConnectWithAuthCreds(name, username, password)
} else {
client, err = couchbase.Connect(name)
}
var perr error = nil
if err != nil {
perr = fmt.Errorf("N1QL: Unable to connect to cluster endpoint %s. Error %v", name, err)
// If not cluster endpoint then check if query endpoint
name = strings.TrimSuffix(name, "/")
queryAPI := name + N1QL_SERVICE_ENDPOINT
queryAPIs = make([]string, 1, 1)
queryAPIs[0] = queryAPI
} else {
ps, err := client.GetPoolServices("default")
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to get NodeServices list. Error %v", err)
}
n1qlEndPoint, isHttps := discoverN1QLService(name, ps)
if n1qlEndPoint == "" {
return nil, fmt.Errorf("N1QL: No query service found on this cluster")
}
queryAPIs, err = getQueryApi(n1qlEndPoint, isHttps)
if err != nil {
return nil, err
}
}
conn := &n1qlConn{client: HTTPClient, queryAPIs: queryAPIs}
request, err := prepareRequest(N1QL_DEFAULT_STATEMENT, queryAPIs[0], nil)
if err != nil {
return nil, err
}
resp, err := conn.client.Do(request)
if err != nil {
final_error := fmt.Errorf("N1QL: Connection failed %v", stripurl(err.Error())).Error()
if perr != nil {
final_error = final_error + "\n " + stripurl(perr.Error())
}
return nil, fmt.Errorf("%v", final_error)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("N1QL: Connection failure %s", bod)
}
return conn, nil
}
func stripurl(inputstring string) string {
// Detect http* within the string.
startindex := strings.Index(inputstring, "http")
endindex := strings.Index(inputstring[startindex:], " ")
inputurl := inputstring[startindex : startindex+endindex]
// Parse into a url and detect password
urlpart, err := url.Parse(inputurl)
if err != nil {
return inputstring
}
u := urlpart.User
if u == nil {
return inputstring
}
uname := u.Username()
pwd, _ := u.Password()
//Find how many symbols there are in the User string
num := 0
for _, letter := range fmt.Sprintf("%v", pwd) {
if (unicode.IsSymbol(letter) || unicode.IsPunct(letter)) && letter != '*' {
num = num + 1
}
}
// detect the index on the password
startindex = strings.Index(inputstring, uname)
//reform the error message, with * as the password
inputstring = inputstring[:startindex+len(uname)+1] + "*" + inputstring[startindex+len(uname)+1+len(pwd):]
//Replace all the special characters encoding
for num > 0 {
num = num - 1
inputstring = stripurl(inputstring)
}
return inputstring
}
// do client request with retry
func (conn *n1qlConn) doClientRequest(query string, requestValues *url.Values) (*http.Response, error) {
ok := false
for !ok {
var request *http.Request
var err error
// select query API
rand.Seed(time.Now().Unix())
numNodes := len(conn.queryAPIs)
selectedNode := rand.Intn(numNodes)
conn.lock.RLock()
queryAPI := conn.queryAPIs[selectedNode]
conn.lock.RUnlock()
if query != "" {
request, err = prepareRequest(query, queryAPI, nil)
if err != nil {
return nil, err
}
} else {
if requestValues != nil {
request, _ = http.NewRequest("POST", queryAPI, bytes.NewBufferString(requestValues.Encode()))
} else {
request, _ = http.NewRequest("POST", queryAPI, nil)
}
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
setCBUserAgent(request)
if hasUsernamePassword() {
request.SetBasicAuth(username, password)
}
}
resp, err := conn.client.Do(request)
if err != nil {
// if this is the last node return with error
if numNodes == 1 {
break
}
// remove the node that failed from the list of query nodes
conn.lock.Lock()
conn.queryAPIs = append(conn.queryAPIs[:selectedNode], conn.queryAPIs[selectedNode+1:]...)
conn.lock.Unlock()
continue
} else {
return resp, nil
}
}
return nil, fmt.Errorf("N1QL: Query nodes not responding")
}
func serializeErrors(errors interface{}) string {
var errString string
switch errors := errors.(type) {
case []interface{}:
for _, e := range errors {
switch e := e.(type) {
case map[string]interface{}:
code, _ := e["code"]
msg, _ := e["msg"]
if code != 0 && msg != "" {
if errString != "" {
errString = fmt.Sprintf("%v Code : %v Message : %v", errString, code, msg)
} else {
errString = fmt.Sprintf("Code : %v Message : %v", code, msg)
}
}
}
}
}
if errString != "" {
return errString
}
return fmt.Sprintf(" Error %v %T", errors, errors)
}
func (conn *n1qlConn) Prepare(query string) (*n1qlStmt, error) {
var argCount int
query = "PREPARE " + query
query, argCount = prepareQuery(query)
resp, err := conn.doClientRequest(query, nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var resultMap map[string]*json.RawMessage
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to read response body from server. Error %v", err)
}
if err := json.Unmarshal(body, &resultMap); err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse response. Error %v", err)
}
stmt := &n1qlStmt{conn: conn, argCount: argCount}
errors, ok := resultMap["errors"]
if ok && errors != nil {
var errs []interface{}
_ = json.Unmarshal(*errors, &errs)
return nil, fmt.Errorf("N1QL: Error preparing statement %v", serializeErrors(errs))
}
for name, results := range resultMap {
switch name {
case "results":
var preparedResults []interface{}
if err := json.Unmarshal(*results, &preparedResults); err != nil {
return nil, fmt.Errorf("N1QL: Failed to unmarshal results %v", err)
}
if len(preparedResults) == 0 {
return nil, fmt.Errorf("N1QL: Unknown error, no prepared results returned")
}
serialized, _ := json.Marshal(preparedResults[0])
stmt.name = preparedResults[0].(map[string]interface{})["name"].(string)
stmt.prepared = string(serialized)
case "signature":
stmt.signature = string(*results)
}
}
if stmt.prepared == "" {
return nil, ErrInternalError
}
return stmt, nil
}
func (conn *n1qlConn) Begin() (driver.Tx, error) {
return nil, ErrNotSupported
}
func (conn *n1qlConn) Close() error {
return nil
}
func decodeSignature(signature *json.RawMessage) interface{} {
var sign interface{}
var rows map[string]interface{}
json.Unmarshal(*signature, &sign)
switch s := sign.(type) {
case map[string]interface{}:
return s
case string:
return s
default:
fmt.Printf(" Cannot decode signature. Type of this signature is %T", s)
return map[string]interface{}{"*": "*"}
}
return rows
}
func (conn *n1qlConn) performQueryRaw(query string, requestValues *url.Values) (io.ReadCloser, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return resp.Body, fmt.Errorf("Request failed with error code %d.", resp.StatusCode)
}
return resp.Body, nil
}
func getDecoder(r io.Reader) (*json.Decoder, error) {
if r == nil {
return nil, fmt.Errorf("Failed to decode nil response.")
}
return json.NewDecoder(r), nil
}
func (conn *n1qlConn) performQuery(query string, requestValues *url.Values) (godbc.Rows, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var resultMap map[string]*json.RawMessage
decoder, err := getDecoder(resp.Body)
if err != nil {
return nil, err
}
err = decoder.Decode(&resultMap)
if err != nil {
return nil, fmt.Errorf(" N1QL: Failed to decode result %v", err)
}
var signature interface{}
var resultRows *json.RawMessage
var metrics interface{}
var status interface{}
var requestId interface{}
var errs interface{}
for name, results := range resultMap {
switch name {
case "errors":
_ = json.Unmarshal(*results, &errs)
case "signature":
if results != nil {
signature = decodeSignature(results)
} else if N1QL_PASSTHROUGH_MODE == true {
// for certain types of DML queries, the returned signature could be null
// however in passthrough mode we always return the metrics, status etc as
// rows therefore we need to ensure that there is a default signature.
signature = map[string]interface{}{"*": "*"}
}
case "results":
resultRows = results
case "metrics":
if N1QL_PASSTHROUGH_MODE == true {
_ = json.Unmarshal(*results, &metrics)
}
case "status":
if N1QL_PASSTHROUGH_MODE == true {
_ = json.Unmarshal(*results, &status)
}
case "requestID":
if N1QL_PASSTHROUGH_MODE == true {
_ = json.Unmarshal(*results, &requestId)
}
}
}
if N1QL_PASSTHROUGH_MODE == true {
extraVals := map[string]interface{}{"requestID": requestId,
"status": status,
"signature": signature,
}
// in passthrough mode last line will always be en error line
errors := map[string]interface{}{"errors": errs}
return resultToRows(bytes.NewReader(*resultRows), resp, signature, metrics, errors, extraVals)
}
// we return the errors with the rows because we can have scenarios where there are valid
// results returned along with the error and this interface doesn't allow for both to be
// returned and hence this workaround.
return resultToRows(bytes.NewReader(*resultRows), resp, signature, nil, errs, nil)
}
// Executes a query that returns a set of Rows.
// Select statements should use this interface
func (conn *n1qlConn) Query(query string, args ...interface{}) (godbc.Rows, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performQuery(query, nil)
}
func (conn *n1qlConn) QueryRaw(query string, args ...interface{}) (io.ReadCloser, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performQueryRaw(query, nil)
}
func (conn *n1qlConn) performExecRaw(query string, requestValues *url.Values) (io.ReadCloser, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return resp.Body, fmt.Errorf("Request failed with error code %d.", resp.StatusCode)
}
return resp.Body, nil
}
func (conn *n1qlConn) performExec(query string, requestValues *url.Values) (godbc.Result, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var resultMap map[string]*json.RawMessage
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to read response body from server. Error %v", err)
}
if err := json.Unmarshal(body, &resultMap); err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse response. Error %v", err)
}
var execErr error
res := &n1qlResult{}
for name, results := range resultMap {
switch name {
case "metrics":
var metrics map[string]interface{}
err := json.Unmarshal(*results, &metrics)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to unmarshal response. Error %v", err)
}
if mc, ok := metrics["mutationCount"]; ok {
res.affectedRows = int64(mc.(float64))
}
break
case "errors":
var errs []interface{}
_ = json.Unmarshal(*results, &errs)
execErr = fmt.Errorf("N1QL: Error executing query %v", serializeErrors(errs))
}
}
return res, execErr
}
// Execer implementation. To be used for queries that do not return any rows
// such as Create Index, Insert, Upset, Delete etc
func (conn *n1qlConn) Exec(query string, args ...interface{}) (godbc.Result, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performExec(query, nil)
}
func (conn *n1qlConn) ExecRaw(query string, args ...interface{}) (io.ReadCloser, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performExecRaw(query, nil)
}
func prepareQuery(query string) (string, int) {
var count int
re := regexp.MustCompile("\\?")
f := func(s string) string {
count++
return fmt.Sprintf("$%d", count)
}
return re.ReplaceAllStringFunc(query, f), count
}
//
// Replace the conditional pqrams in the query and return the list of left-over args
func preparePositionalArgs(query string, argCount int, args []interface{}) (string, []interface{}) {
subList := make([]string, 0)
newArgs := make([]interface{}, 0)
for i, arg := range args {
if i < argCount {
var a string
switch arg := arg.(type) {
case string:
a = fmt.Sprintf("\"%v\"", arg)
case []byte:
a = string(arg)
default:
a = fmt.Sprintf("%v", arg)
}
sub := []string{fmt.Sprintf("$%d", i+1), a}
subList = append(subList, sub...)
} else {
newArgs = append(newArgs, arg)
}
}
r := strings.NewReplacer(subList...)
return r.Replace(query), newArgs
}
// prepare a http request for the query
//
func prepareRequest(query string, queryAPI string, args []interface{}) (*http.Request, error) {
postData := url.Values{}
postData.Set("statement", query)
if len(args) > 0 {
paStr := buildPositionalArgList(args)
if len(paStr) > 0 {
postData.Set("args", paStr)
}
}
setQueryParams(&postData)
request, err := http.NewRequest("POST", queryAPI, bytes.NewBufferString(postData.Encode()))
if err != nil {
return nil, err
}
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
setCBUserAgent(request)
if hasUsernamePassword() {
request.SetBasicAuth(username, password)
}
return request, nil
}
//
// Set query params
func setQueryParams(v *url.Values) {
for key, value := range QueryParams {
v.Set(key, value)
}
}
// Return hostname and port for IPv4 and IPv6
func HostNameandPort(node string) (host, port string, ipv6 bool, err error) {
tokens := []string{}
// Set _IPv6 based on input address
ipv6, err = IsIPv6(node)
if err != nil {
return "", "", false, err
}
err = nil
// For IPv6
if ipv6 {
// Then the url should be of the form [::1]:8091
tokens = strings.Split(node, "]:")
host = strings.Replace(tokens[0], "[", "", 1)
} else {
// For IPv4
tokens = strings.Split(node, ":")
host = tokens[0]
}
if len(tokens) == 2 {
port = tokens[1]
} else {
port = ""
}
return
}
func IsIPv6(str string) (bool, error) {
//ipv6 - can be [::1]:8091
host, _, err := net.SplitHostPort(str)
if err != nil {
host = str
}
if host == "localhost" {
host = LOCALHOST
}
ip := net.ParseIP(host)
if ip == nil {
// Essentially this is a FQDN. Golangs ParseIP cannot parse IPs that are non-numerical.
// It could also be an incorrect address. But that can be handled by split host port.
// This method is only to check if address is IPv6.
return false, nil
}
if ip.To4() == nil {
//Not an ipv4 address
// check if ipv6
if ip.To16() == nil {
// Not ipv6
return false, fmt.Errorf("\nThis is an incorrect address %v", str)
}
// IPv6
return true, nil
}
// IPv4
return false, nil
}
MB-35490, MB-35237 - Add support for the following 1. Auto discovery of Analytics nodes (along with N1QL) for cbq using option --analytics. and 2. Alternate address support for the same using --networkconfig.
Change-Id: I7163278942a275b9c5277de11c379c717fd0e1af
Reviewed-on: http://review.couchbase.org/113668
Reviewed-by: Sitaram Vemulapalli <8d86a491dcdf321bac78bef141a8229a59bf9211@couchbase.com>
Reviewed-by: Johan Larson <5105ed78d79b274eb9b1b31aff9e2d413721d12f@couchbase.com>
Tested-by: Isha Kandaswamy <e3cc845ebc6144fc4d71cf5f07a0ce9db6fdfa91@couchbase.com>
// Copyright (c) 2014 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package n1ql
import (
"bytes"
"crypto/tls"
"database/sql/driver"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/url"
"regexp"
"strings"
"sync"
"time"
"unicode"
"github.com/couchbase/go-couchbase"
"github.com/couchbase/godbc"
"github.com/couchbase/query/util"
)
// Common error codes
var (
ErrNotSupported = fmt.Errorf("N1QL:Not supported")
ErrNotImplemented = fmt.Errorf("N1QL: Not implemented")
ErrUnknownCommand = fmt.Errorf("N1QL: Unknown Command")
ErrInternalError = fmt.Errorf("N1QL: Internal Error")
)
// defaults
var (
N1QL_SERVICE_ENDPOINT = "/query/service"
N1QL_DEFAULT_HOST = "127.0.0.1"
N1QL_DEFAULT_PORT = 8093
N1QL_POOL_SIZE = 2 ^ 10 // 1 MB
N1QL_DEFAULT_STATEMENT = "SELECT RAW 1;"
LOCALHOST = N1QL_DEFAULT_HOST
)
// flags
var (
N1QL_PASSTHROUGH_MODE = false
)
// Rest API query parameters
var QueryParams map[string]string
// Username and password. Used for querying the cluster endpoint,
// which may require authorization.
var username, password string
// Used to decide whether to skip verification of certificates when
// connecting to an ssl port.
var skipVerify = true
var certFile = ""
var keyFile = ""
var rootFile = ""
var isAnalytics = false
var networkCfg = "default"
func init() {
QueryParams = make(map[string]string)
}
func SetIsAnalytics(val bool) {
isAnalytics = val
}
func SetNetworkType(networkType string) {
networkCfg = networkType
}
func SetQueryParams(key string, value string) error {
if key == "" {
return fmt.Errorf("N1QL: Key not specified")
}
QueryParams[key] = value
return nil
}
func UnsetQueryParams(key string) error {
if key == "" {
return fmt.Errorf("N1QL: Key not specified")
}
delete(QueryParams, key)
return nil
}
func SetPassthroughMode(val bool) {
N1QL_PASSTHROUGH_MODE = val
}
func SetUsernamePassword(u, p string) {
username = u
password = p
}
func hasUsernamePassword() bool {
return username != "" || password != ""
}
func SetSkipVerify(skip bool) {
skipVerify = skip
}
func SetCertFile(cert string) {
certFile = cert
}
func SetKeyFile(cert string) {
keyFile = cert
}
func SetRootFile(cert string) {
rootFile = cert
}
// implements driver.Conn interface
type n1qlConn struct {
clusterAddr string
queryAPIs []string
client *http.Client
lock sync.RWMutex
}
// HTTPClient to use for REST and view operations.
var MaxIdleConnsPerHost = 10
var HTTPTransport = &http.Transport{MaxIdleConnsPerHost: MaxIdleConnsPerHost}
var HTTPClient = &http.Client{Transport: HTTPTransport}
// Auto discover N1QL and Analytics services depending on input
func discoverN1QLService(name string, ps couchbase.PoolServices, isAnalytics bool, networkType string) ([]string, error) {
var hostnm string
var port int
var ipv6, ok bool
var hostUrl *url.URL
isHttps := false
serviceType := "n1ql"
serviceTypeSsl := "n1qlSSL"
// Since analytics doesnt have a rest endpoint that lists the cluster nodes
// We need to populate the list of analytics APIs here itself
// We might as well do the same for query. This makes getQueryApi() redundant.
queryAPIs := []string{}
if isAnalytics {
serviceType = "cbas"
serviceTypeSsl = "cbasSSL"
}
// If the network type isn't provided, then we need to detect whether to use default address or alternate address
// by comparing the input hostname with the hostname's under services.
// If it matches then we know its a default (internal address), else we can think of it as an external address and
// move on, throwing an error if that doesnt work.
hostnm = strings.TrimSpace(name)
if strings.HasPrefix(hostnm, "http://") || strings.HasPrefix(hostnm, "https://") {
hostUrl, _ = url.Parse(name)
hostnm = hostUrl.Host
}
if networkCfg == "auto" {
for _, ns := range ps.NodesExt {
if strings.Compare(ns.Hostname, hostUrl.Hostname()) == 0 {
networkCfg = "default"
break
}
}
if networkCfg != "default" {
networkCfg = "external"
}
}
for _, ns := range ps.NodesExt {
if networkCfg == "default" {
// Get the host and port info
if ns.Hostname != "" {
hostnm = ns.Hostname
}
hostnm, _, ipv6, _ = HostNameandPort(hostnm)
}
// Find default ports. This is used even if network Type is external if the port mapping is absent.
if ns.Services != nil {
if strings.HasPrefix(name, "https://") {
isHttps = true
port, ok = ns.Services[serviceTypeSsl]
} else {
port, ok = ns.Services[serviceType]
}
}
if networkCfg == "external" {
// Get the host and port info
if v, found := ns.AlternateNames["external"]; found {
if v.Hostname == "" {
// This is an error condition.
return nil, fmt.Errorf("N1QL: Hostname for external address cannot be nil")
}
hostnm, _, ipv6, _ = HostNameandPort(v.Hostname)
if v.Ports != nil {
// need to use external ports
if strings.HasPrefix(name, "https://") {
isHttps = true
port, ok = v.Ports[serviceTypeSsl]
} else {
port, ok = v.Ports[serviceType]
}
}
} else {
// return error as External address required to be setup.
return nil, fmt.Errorf("N1QL: Alternate Addresses required. ")
}
}
// we have found a port. And we have hostname as well.
if ok {
// n1ql or analytics service found
if isHttps {
hostnm = "https://" + hostnm
} else {
hostnm = "http://" + hostnm
}
if ipv6 {
queryAPIs = append(queryAPIs, fmt.Sprintf("[%s]:%d"+N1QL_SERVICE_ENDPOINT, hostnm, port))
} else {
queryAPIs = append(queryAPIs, fmt.Sprintf("%s:%d"+N1QL_SERVICE_ENDPOINT, hostnm, port))
}
}
}
return queryAPIs, nil
}
var cbUserAgent string = "godbc/" + util.VERSION
func SetCBUserAgentHeader(v string) {
cbUserAgent = v
}
func setCBUserAgent(request *http.Request) {
request.Header.Add("CB-User-Agent", cbUserAgent)
}
func getQueryApi(n1qlEndPoint string, isHttps bool) ([]string, error) {
queryAdmin := n1qlEndPoint + "/admin/clusters/default/nodes"
if isHttps {
queryAdmin = "https://" + queryAdmin
} else {
queryAdmin = "http://" + queryAdmin
}
request, _ := http.NewRequest("GET", queryAdmin, nil)
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
setCBUserAgent(request)
if hasUsernamePassword() {
request.SetBasicAuth(username, password)
}
queryAPIs := make([]string, 0)
hostname, _, ipv6, err := HostNameandPort(n1qlEndPoint)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse URL. Error %v", err)
}
resp, err := HTTPClient.Do(request)
if err != nil {
return nil, fmt.Errorf("%v", err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var nodesInfo []interface{}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to read response body from server. Error %v", err)
}
if err := json.Unmarshal(body, &nodesInfo); err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse response. Error %v", err)
}
for _, queryNode := range nodesInfo {
switch queryNode := queryNode.(type) {
case map[string]interface{}:
queryAPIs = append(queryAPIs, queryNode["queryEndpoint"].(string))
}
}
if ipv6 {
hostname = "[" + hostname + "]"
LOCALHOST = "[::1]"
}
// if the end-points contain localhost IPv4 or IPv6 then replace them with the actual hostname
for i, qa := range queryAPIs {
queryAPIs[i] = strings.Replace(qa, LOCALHOST, hostname, -1)
}
if len(queryAPIs) == 0 {
return nil, fmt.Errorf("Query endpoints not found")
}
return queryAPIs, nil
}
func OpenN1QLConnection(name string) (*n1qlConn, error) {
var queryAPIs []string = nil
if strings.HasPrefix(name, "https") {
//First check if the input string is a cluster endpoint
couchbase.SetSkipVerify(skipVerify)
if skipVerify {
HTTPTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
} else {
if certFile != "" && keyFile != "" {
couchbase.SetCertFile(certFile)
couchbase.SetKeyFile(keyFile)
} else {
//error need to pass both certfile and keyfile
return nil, fmt.Errorf("N1QL: Need to pass both certfile and keyfile")
}
if rootFile != "" {
couchbase.SetRootFile(rootFile)
}
// For 18093 connections
cfg, err := couchbase.ClientConfigForX509(certFile, keyFile, rootFile)
if err != nil {
return nil, err
}
HTTPTransport.TLSClientConfig = cfg
}
}
var client couchbase.Client
var err error
var perr error = nil
// Connect to a couchbase cluster
if hasUsernamePassword() {
client, err = couchbase.ConnectWithAuthCreds(name, username, password)
} else {
client, err = couchbase.Connect(name)
}
if err != nil {
// Direct query entry (8093 or 8095 for example. So connect to that.)
perr = fmt.Errorf("N1QL: Unable to connect to cluster endpoint %s. Error %v", name, err)
// If not cluster endpoint then check if query endpoint
name = strings.TrimSuffix(name, "/")
queryAPI := name + N1QL_SERVICE_ENDPOINT
queryAPIs = make([]string, 1, 1)
queryAPIs[0] = queryAPI
} else {
// Connection was possible - means this is a cluster endpoint.
// We need to auto detect the query / analytics nodes.
// Query by default. Analytics if option is set.
// Get pools/default/nodeServices
ps, err := client.GetPoolServices("default")
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to get NodeServices list. Error %v", err)
}
queryAPIs, err = discoverN1QLService(name, ps, isAnalytics, networkCfg)
if err != nil {
return nil, err
}
sType := "N1QL"
if isAnalytics {
sType = "Analytics"
}
if len(queryAPIs) <= 0 {
return nil, fmt.Errorf("N1QL: No " + sType + " service found on this cluster")
}
}
conn := &n1qlConn{client: HTTPClient, queryAPIs: queryAPIs}
request, err := prepareRequest(N1QL_DEFAULT_STATEMENT, queryAPIs[0], nil)
if err != nil {
return nil, err
}
resp, err := conn.client.Do(request)
if err != nil {
final_error := fmt.Errorf("N1QL: Connection failed %v", stripurl(err.Error())).Error()
if perr != nil {
final_error = final_error + "\n " + stripurl(perr.Error())
}
return nil, fmt.Errorf("%v", final_error)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("N1QL: Connection failure %s", bod)
}
return conn, nil
}
func stripurl(inputstring string) string {
// Detect http* within the string.
startindex := strings.Index(inputstring, "http")
endindex := strings.Index(inputstring[startindex:], " ")
inputurl := inputstring[startindex : startindex+endindex]
// Parse into a url and detect password
urlpart, err := url.Parse(inputurl)
if err != nil {
return inputstring
}
u := urlpart.User
if u == nil {
return inputstring
}
uname := u.Username()
pwd, _ := u.Password()
//Find how many symbols there are in the User string
num := 0
for _, letter := range fmt.Sprintf("%v", pwd) {
if (unicode.IsSymbol(letter) || unicode.IsPunct(letter)) && letter != '*' {
num = num + 1
}
}
// detect the index on the password
startindex = strings.Index(inputstring, uname)
//reform the error message, with * as the password
inputstring = inputstring[:startindex+len(uname)+1] + "*" + inputstring[startindex+len(uname)+1+len(pwd):]
//Replace all the special characters encoding
for num > 0 {
num = num - 1
inputstring = stripurl(inputstring)
}
return inputstring
}
// do client request with retry
func (conn *n1qlConn) doClientRequest(query string, requestValues *url.Values) (*http.Response, error) {
ok := false
for !ok {
var request *http.Request
var err error
// select query API
rand.Seed(time.Now().Unix())
numNodes := len(conn.queryAPIs)
selectedNode := rand.Intn(numNodes)
conn.lock.RLock()
queryAPI := conn.queryAPIs[selectedNode]
conn.lock.RUnlock()
if query != "" {
request, err = prepareRequest(query, queryAPI, nil)
if err != nil {
return nil, err
}
} else {
if requestValues != nil {
request, _ = http.NewRequest("POST", queryAPI, bytes.NewBufferString(requestValues.Encode()))
} else {
request, _ = http.NewRequest("POST", queryAPI, nil)
}
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
setCBUserAgent(request)
if hasUsernamePassword() {
request.SetBasicAuth(username, password)
}
}
resp, err := conn.client.Do(request)
if err != nil {
// if this is the last node return with error
if numNodes == 1 {
break
}
// remove the node that failed from the list of query nodes
conn.lock.Lock()
conn.queryAPIs = append(conn.queryAPIs[:selectedNode], conn.queryAPIs[selectedNode+1:]...)
conn.lock.Unlock()
continue
} else {
return resp, nil
}
}
return nil, fmt.Errorf("N1QL: Query nodes not responding")
}
func serializeErrors(errors interface{}) string {
var errString string
switch errors := errors.(type) {
case []interface{}:
for _, e := range errors {
switch e := e.(type) {
case map[string]interface{}:
code, _ := e["code"]
msg, _ := e["msg"]
if code != 0 && msg != "" {
if errString != "" {
errString = fmt.Sprintf("%v Code : %v Message : %v", errString, code, msg)
} else {
errString = fmt.Sprintf("Code : %v Message : %v", code, msg)
}
}
}
}
}
if errString != "" {
return errString
}
return fmt.Sprintf(" Error %v %T", errors, errors)
}
func (conn *n1qlConn) Prepare(query string) (*n1qlStmt, error) {
var argCount int
query = "PREPARE " + query
query, argCount = prepareQuery(query)
resp, err := conn.doClientRequest(query, nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var resultMap map[string]*json.RawMessage
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to read response body from server. Error %v", err)
}
if err := json.Unmarshal(body, &resultMap); err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse response. Error %v", err)
}
stmt := &n1qlStmt{conn: conn, argCount: argCount}
errors, ok := resultMap["errors"]
if ok && errors != nil {
var errs []interface{}
_ = json.Unmarshal(*errors, &errs)
return nil, fmt.Errorf("N1QL: Error preparing statement %v", serializeErrors(errs))
}
for name, results := range resultMap {
switch name {
case "results":
var preparedResults []interface{}
if err := json.Unmarshal(*results, &preparedResults); err != nil {
return nil, fmt.Errorf("N1QL: Failed to unmarshal results %v", err)
}
if len(preparedResults) == 0 {
return nil, fmt.Errorf("N1QL: Unknown error, no prepared results returned")
}
serialized, _ := json.Marshal(preparedResults[0])
stmt.name = preparedResults[0].(map[string]interface{})["name"].(string)
stmt.prepared = string(serialized)
case "signature":
stmt.signature = string(*results)
}
}
if stmt.prepared == "" {
return nil, ErrInternalError
}
return stmt, nil
}
func (conn *n1qlConn) Begin() (driver.Tx, error) {
return nil, ErrNotSupported
}
func (conn *n1qlConn) Close() error {
return nil
}
func decodeSignature(signature *json.RawMessage) interface{} {
var sign interface{}
var rows map[string]interface{}
json.Unmarshal(*signature, &sign)
switch s := sign.(type) {
case map[string]interface{}:
return s
case string:
return s
default:
fmt.Printf(" Cannot decode signature. Type of this signature is %T", s)
return map[string]interface{}{"*": "*"}
}
return rows
}
func (conn *n1qlConn) performQueryRaw(query string, requestValues *url.Values) (io.ReadCloser, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return resp.Body, fmt.Errorf("Request failed with error code %d.", resp.StatusCode)
}
return resp.Body, nil
}
func getDecoder(r io.Reader) (*json.Decoder, error) {
if r == nil {
return nil, fmt.Errorf("Failed to decode nil response.")
}
return json.NewDecoder(r), nil
}
func (conn *n1qlConn) performQuery(query string, requestValues *url.Values) (godbc.Rows, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var resultMap map[string]*json.RawMessage
decoder, err := getDecoder(resp.Body)
if err != nil {
return nil, err
}
err = decoder.Decode(&resultMap)
if err != nil {
return nil, fmt.Errorf(" N1QL: Failed to decode result %v", err)
}
var signature interface{}
var resultRows *json.RawMessage
var metrics interface{}
var status interface{}
var requestId interface{}
var errs interface{}
for name, results := range resultMap {
switch name {
case "errors":
_ = json.Unmarshal(*results, &errs)
case "signature":
if results != nil {
signature = decodeSignature(results)
} else if N1QL_PASSTHROUGH_MODE == true {
// for certain types of DML queries, the returned signature could be null
// however in passthrough mode we always return the metrics, status etc as
// rows therefore we need to ensure that there is a default signature.
signature = map[string]interface{}{"*": "*"}
}
case "results":
resultRows = results
case "metrics":
if N1QL_PASSTHROUGH_MODE == true {
_ = json.Unmarshal(*results, &metrics)
}
case "status":
if N1QL_PASSTHROUGH_MODE == true {
_ = json.Unmarshal(*results, &status)
}
case "requestID":
if N1QL_PASSTHROUGH_MODE == true {
_ = json.Unmarshal(*results, &requestId)
}
}
}
if N1QL_PASSTHROUGH_MODE == true {
extraVals := map[string]interface{}{"requestID": requestId,
"status": status,
"signature": signature,
}
// in passthrough mode last line will always be en error line
errors := map[string]interface{}{"errors": errs}
return resultToRows(bytes.NewReader(*resultRows), resp, signature, metrics, errors, extraVals)
}
// we return the errors with the rows because we can have scenarios where there are valid
// results returned along with the error and this interface doesn't allow for both to be
// returned and hence this workaround.
return resultToRows(bytes.NewReader(*resultRows), resp, signature, nil, errs, nil)
}
// Executes a query that returns a set of Rows.
// Select statements should use this interface
func (conn *n1qlConn) Query(query string, args ...interface{}) (godbc.Rows, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performQuery(query, nil)
}
func (conn *n1qlConn) QueryRaw(query string, args ...interface{}) (io.ReadCloser, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performQueryRaw(query, nil)
}
func (conn *n1qlConn) performExecRaw(query string, requestValues *url.Values) (io.ReadCloser, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return resp.Body, fmt.Errorf("Request failed with error code %d.", resp.StatusCode)
}
return resp.Body, nil
}
func (conn *n1qlConn) performExec(query string, requestValues *url.Values) (godbc.Result, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var resultMap map[string]*json.RawMessage
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to read response body from server. Error %v", err)
}
if err := json.Unmarshal(body, &resultMap); err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse response. Error %v", err)
}
var execErr error
res := &n1qlResult{}
for name, results := range resultMap {
switch name {
case "metrics":
var metrics map[string]interface{}
err := json.Unmarshal(*results, &metrics)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to unmarshal response. Error %v", err)
}
if mc, ok := metrics["mutationCount"]; ok {
res.affectedRows = int64(mc.(float64))
}
break
case "errors":
var errs []interface{}
_ = json.Unmarshal(*results, &errs)
execErr = fmt.Errorf("N1QL: Error executing query %v", serializeErrors(errs))
}
}
return res, execErr
}
// Execer implementation. To be used for queries that do not return any rows
// such as Create Index, Insert, Upset, Delete etc
func (conn *n1qlConn) Exec(query string, args ...interface{}) (godbc.Result, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performExec(query, nil)
}
func (conn *n1qlConn) ExecRaw(query string, args ...interface{}) (io.ReadCloser, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performExecRaw(query, nil)
}
func prepareQuery(query string) (string, int) {
var count int
re := regexp.MustCompile("\\?")
f := func(s string) string {
count++
return fmt.Sprintf("$%d", count)
}
return re.ReplaceAllStringFunc(query, f), count
}
//
// Replace the conditional pqrams in the query and return the list of left-over args
func preparePositionalArgs(query string, argCount int, args []interface{}) (string, []interface{}) {
subList := make([]string, 0)
newArgs := make([]interface{}, 0)
for i, arg := range args {
if i < argCount {
var a string
switch arg := arg.(type) {
case string:
a = fmt.Sprintf("\"%v\"", arg)
case []byte:
a = string(arg)
default:
a = fmt.Sprintf("%v", arg)
}
sub := []string{fmt.Sprintf("$%d", i+1), a}
subList = append(subList, sub...)
} else {
newArgs = append(newArgs, arg)
}
}
r := strings.NewReplacer(subList...)
return r.Replace(query), newArgs
}
// prepare a http request for the query
//
func prepareRequest(query string, queryAPI string, args []interface{}) (*http.Request, error) {
postData := url.Values{}
postData.Set("statement", query)
if len(args) > 0 {
paStr := buildPositionalArgList(args)
if len(paStr) > 0 {
postData.Set("args", paStr)
}
}
setQueryParams(&postData)
request, err := http.NewRequest("POST", queryAPI, bytes.NewBufferString(postData.Encode()))
if err != nil {
return nil, err
}
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
setCBUserAgent(request)
if hasUsernamePassword() {
request.SetBasicAuth(username, password)
}
return request, nil
}
//
// Set query params
func setQueryParams(v *url.Values) {
for key, value := range QueryParams {
v.Set(key, value)
}
}
// Return hostname and port for IPv4 and IPv6
func HostNameandPort(node string) (host, port string, ipv6 bool, err error) {
tokens := []string{}
// Set _IPv6 based on input address
ipv6, err = IsIPv6(node)
if err != nil {
return "", "", false, err
}
err = nil
// For IPv6
if ipv6 {
// Then the url should be of the form [::1]:8091
tokens = strings.Split(node, "]:")
host = strings.Replace(tokens[0], "[", "", 1)
} else {
// For IPv4
tokens = strings.Split(node, ":")
host = tokens[0]
}
if len(tokens) == 2 {
port = tokens[1]
} else {
port = ""
}
return
}
func IsIPv6(str string) (bool, error) {
//ipv6 - can be [::1]:8091
host, _, err := net.SplitHostPort(str)
if err != nil {
host = str
}
if host == "localhost" {
host = LOCALHOST
}
ip := net.ParseIP(host)
if ip == nil {
// Essentially this is a FQDN. Golangs ParseIP cannot parse IPs that are non-numerical.
// It could also be an incorrect address. But that can be handled by split host port.
// This method is only to check if address is IPv6.
return false, nil
}
if ip.To4() == nil {
//Not an ipv4 address
// check if ipv6
if ip.To16() == nil {
// Not ipv6
return false, fmt.Errorf("\nThis is an incorrect address %v", str)
}
// IPv6
return true, nil
}
// IPv4
return false, nil
}
|
// Package main defines the nectar command line tool.
package main
import (
"bytes"
"encoding/csv"
"flag"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/gholt/brimtext"
"github.com/troubling/nectar"
)
var (
globalFlags = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
globalFlagAuthURL = globalFlags.String("A", os.Getenv("AUTH_URL"), "|<url>| URL to auth system, example: http://127.0.0.1:8080/auth/v1.0 - Env: AUTH_URL")
globalFlagAuthTenant = globalFlags.String("T", os.Getenv("AUTH_TENANT"), "|<tenant>| Tenant name for auth system, example: test - Not all auth systems need this. Env: AUTH_TENANT")
globalFlagAuthUser = globalFlags.String("U", os.Getenv("AUTH_USER"), "|<user>| User name for auth system, example: tester - Some auth systems allow tenant:user format here, example: test:tester - Env: AUTH_USER")
globalFlagAuthKey = globalFlags.String("K", os.Getenv("AUTH_KEY"), "|<key>| Key for auth system, example: testing - Some auth systems use passwords instead, see -P - Env: AUTH_KEY")
globalFlagAuthPassword = globalFlags.String("P", os.Getenv("AUTH_PASSWORD"), "|<password>| Password for auth system, example: testing - Some auth system use keys instead, see -K - Env: AUTH_PASSWORD")
globalFlagStorageRegion = globalFlags.String("R", os.Getenv("STORAGE_REGION"), "|<region>| Storage region to use if set, otherwise uses the default. Env: STORAGE_REGION")
globalFlagVerbose = globalFlags.Bool("v", false, "Will activate verbose output.")
globalFlagContinueOnError = globalFlags.Bool("continue-on-error", false, "When possible, continue with additional operations even if one or more fail.")
globalFlagConcurrency *int // defined in init()
globalFlagInternalStorage *bool // defined in init()
globalFlagHeaders = stringListFlag{} // defined in init()
)
var (
benchGetFlags = flag.NewFlagSet("bench-get", flag.ContinueOnError)
benchGetFlagContainers = benchGetFlags.Int("containers", 1, "|<number>| Number of containers to use.")
benchGetFlagCount = benchGetFlags.Int("count", 1000, "|<number>| Number of objects to get, distributed across containers.")
benchGetFlagCSV = benchGetFlags.String("csv", "", "|<filename>| Store the timing of each get into a CSV file.")
benchGetFlagCSVOT = benchGetFlags.String("csvot", "", "|<filename>| Store the number of gets performed over time into a CSV file.")
benchGetFlagIterations = benchGetFlags.Int("iterations", 1, "|<number>| Number of iterations to perform.")
)
var (
benchMixedFlags = flag.NewFlagSet("bench-mixed", flag.ContinueOnError)
benchMixedFlagContainers = benchMixedFlags.Int("containers", 1, "|<number>| Number of containers to use.")
benchMixedFlagCSV = benchMixedFlags.String("csv", "", "|<filename>| Store the timing of each request into a CSV file.")
benchMixedFlagCSVOT = benchMixedFlags.String("csvot", "", "|<filename>| Store the number of requests performed over time into a CSV file.")
benchMixedFlagSize = benchMixedFlags.Int("size", 4096, "|<bytes>| Number of bytes for each object.")
benchMixedFlagRatios = benchMixedFlags.String("ratios", "1,2,2,2,2", "|<deletes>,<gets>,<heads>,<posts>,<puts>| Specifies the number of each type of request in relation to other requests. The default is 1,2,2,2,2 so that two of every other type of request will happen for each DELETE request.")
benchMixedFlagTime = benchMixedFlags.String("time", "10m", "|<timespan>| Amount of time to run the test, such as 10m or 1h.")
)
var (
benchPutFlags = flag.NewFlagSet("bench-put", flag.ContinueOnError)
benchPutFlagContainers = benchPutFlags.Int("containers", 1, "|<number>| Number of containers to use.")
benchPutFlagCount = benchPutFlags.Int("count", 1000, "|<number>| Number of objects to PUT, distributed across containers.")
benchPutFlagCSV = benchPutFlags.String("csv", "", "|<filename>| Store the timing of each PUT into a CSV file.")
benchPutFlagCSVOT = benchPutFlags.String("csvot", "", "|<filename>| Store the number of PUTs performed over time into a CSV file.")
benchPutFlagSize = benchPutFlags.Int("size", 4096, "|<bytes>| Number of bytes for each object.")
)
var (
downloadFlags = flag.NewFlagSet("download", flag.ContinueOnError)
downloadFlagAccount = downloadFlags.Bool("a", false, "Indicates you truly wish to download the entire account; this is to prevent accidentally doing so when giving a single parameter to download.")
)
var (
getFlags = flag.NewFlagSet("get", flag.ContinueOnError)
getFlagRaw = getFlags.Bool("r", false, "Emit raw results")
getFlagNameOnly = getFlags.Bool("n", false, "In listings, emits the names only")
getFlagMarker = getFlags.String("marker", "", "|<text>| In listings, sets the start marker")
getFlagEndMarker = getFlags.String("endmarker", "", "|<text>| In listings, sets the stop marker")
getFlagReverse = getFlags.Bool("reverse", false, "In listings, reverses the order")
getFlagLimit = getFlags.Int("limit", 0, "|<number>| In listings, limits the results")
getFlagPrefix = getFlags.String("prefix", "", "|<text>| In listings, returns only those matching the prefix")
getFlagDelimiter = getFlags.String("delimiter", "", "|<text>| In listings, sets the delimiter and activates delimiter listings")
)
var (
headFlags = flag.NewFlagSet("head", flag.ContinueOnError)
)
func init() {
i32, _ := strconv.ParseInt(os.Getenv("CONCURRENCY"), 10, 32)
globalFlagConcurrency = globalFlags.Int("C", int(i32), "|<number>| The maximum number of concurrent operations to perform; default is 1. Env: CONCURRENCY")
b, _ := strconv.ParseBool(os.Getenv("STORAGE_INTERNAL"))
globalFlagInternalStorage = globalFlags.Bool("I", b, "Internal storage URL resolution, such as Rackspace ServiceNet. Env: STORAGE_INTERNAL")
globalFlags.Var(&globalFlagHeaders, "H", "|<name>:[value]| Sets a header to be sent with the request. Useful mostly for PUTs and POSTs, allowing you to set metadata. This option can be specified multiple times for additional headers.")
var flagbuf bytes.Buffer
globalFlags.SetOutput(&flagbuf)
benchGetFlags.SetOutput(&flagbuf)
benchMixedFlags.SetOutput(&flagbuf)
benchPutFlags.SetOutput(&flagbuf)
downloadFlags.SetOutput(&flagbuf)
getFlags.SetOutput(&flagbuf)
headFlags.SetOutput(&flagbuf)
}
func fatal(err error) {
if err == flag.ErrHelp || err == nil {
fmt.Println(os.Args[0], `[options] <subcommand> ...`)
fmt.Println(brimtext.Wrap(`
Tool for accessing a Hummingbird/Swift cluster. Some global options can also be set via environment variables. These will be noted at the end of the description with Env: NAME. The following global options are available:
`, 0, " ", " "))
helpFlags(globalFlags)
fmt.Println()
fmt.Println(brimtext.Wrap(`
The following subcommands are available:`, 0, "", ""))
fmt.Println("\nbench-get [options] <container> [object]")
fmt.Println(brimtext.Wrap(`
Benchmark tests GETs. By default, 1000 GETs are done from the named <container>. If you specify [object] it will be used as the prefix for the object names, otherwise "bench-" will be used. Generally, you would use bench-put to populate the containers and objects, and then use bench-get with the same options with the possible addition of -iterations to lengthen the test time.
`, 0, " ", " "))
helpFlags(benchGetFlags)
fmt.Println("\nbench-mixed [options] <container> [object]")
fmt.Println(brimtext.Wrap(`
Benchmark tests mixed request workloads. If you specify [object] it will be used as a prefix for the object names, otherwise "bench-" will be used. This test is made to be run for a specific span of time (10 minutes by default). You probably want to run with the -continue-on-error global flag; due to the eventual consistency model of Swift|Hummingbird, a few requests may 404.
`, 0, " ", " "))
helpFlags(benchMixedFlags)
fmt.Println("\nbench-put [options] <container> [object]")
fmt.Println(brimtext.Wrap(`
Benchmark tests PUTs. By default, 1000 PUTs are done into the named <container>. If you specify [object] it will be used as a prefix for the object names, otherwise "bench-" will be used.
`, 0, " ", " "))
helpFlags(benchPutFlags)
fmt.Println("\ndelete [container] [object]")
fmt.Println(brimtext.Wrap(`
Performs a DELETE request. A DELETE, as probably expected, is used to remove the target.
`, 0, " ", " "))
fmt.Println("\ndownload [options] [container] [object] <destpath>")
fmt.Println(brimtext.Wrap(`
Downloads an object or objects to a local file or files. The <destpath> indicates where you want the file or files to be created. If you don't give [container] [object] the entire account will be downloaded (requires -a for confirmation). If you just give [container] that entire container will be downloaded. Perhaps obviously, if you give [container] [object] just that object will be downloaded.
`, 0, " ", " "))
helpFlags(downloadFlags)
fmt.Println("\nget [options] [container] [object]")
fmt.Println(brimtext.Wrap(`
Performs a GET request. A GET on an account or container will output the listing of containers or objects, respectively. A GET on an object will output the content of the object to standard output.
`, 0, " ", " "))
helpFlags(getFlags)
fmt.Println("\nhead [options] [container] [object]")
fmt.Println(brimtext.Wrap(`
Performs a HEAD request, giving overall information about the account, container, or object.
`, 0, " ", " "))
helpFlags(headFlags)
fmt.Println("\npost [container] [object]")
fmt.Println(brimtext.Wrap(`
Performs a POST request. POSTs allow you to update the metadata for the target.
`, 0, " ", " "))
fmt.Println("\nput [container] [object]")
fmt.Println(brimtext.Wrap(`
Performs a PUT request. A PUT to an account or container will create them. A PUT to an object will create it using the content from standard input.
`, 0, " ", " "))
fmt.Println("\nupload [options] <sourcepath> [container] [object]")
fmt.Println(brimtext.Wrap(`
Uploads local files as objects. If you don't specify [container] the name of the current directory will be used. If you don't specify [object] the relative path name from the current directory will be used. If you do specify [object] while uploading a directory, [object] will be used as a prefix to the resulting object names. Note that when uploading a directory, only regular files will be uploaded.
`, 0, " ", " "))
fmt.Println("\n[container] [object] can also be specified as [container]/[object]")
} else {
msg := err.Error()
if strings.HasPrefix(msg, "flag provided but not defined: ") {
msg = "No such option: " + msg[len("flag provided but not defined: "):]
}
fmt.Fprintln(os.Stderr, msg)
}
os.Exit(1)
}
func fatalf(frmt string, args ...interface{}) {
fmt.Fprintf(os.Stderr, frmt, args...)
os.Exit(1)
}
func verbosef(frmt string, args ...interface{}) {
if *globalFlagVerbose {
fmt.Fprintf(os.Stderr, frmt, args...)
}
}
func helpFlags(flags *flag.FlagSet) {
var data [][]string
firstWidth := 0
flags.VisitAll(func(f *flag.Flag) {
n := " -" + f.Name
u := strings.TrimSpace(f.Usage)
if u != "" && u[0] == '|' {
s := strings.SplitN(u, "|", 3)
if len(s) == 3 {
n += " " + strings.TrimSpace(s[1])
u = strings.TrimSpace(s[2])
}
}
if len(n) > firstWidth {
firstWidth = len(n)
}
data = append(data, []string{n, u})
})
opts := brimtext.NewDefaultAlignOptions()
opts.Widths = []int{0, brimtext.GetTTYWidth() - firstWidth - 2}
fmt.Print(brimtext.Align(data, opts))
}
func main() {
if err := globalFlags.Parse(os.Args[1:]); err != nil || len(globalFlags.Args()) == 0 {
fatal(err)
}
if *globalFlagAuthURL == "" {
fatalf("No Auth URL set; use -A\n")
}
if *globalFlagAuthUser == "" {
fatalf("No Auth User set; use -U\n")
}
if *globalFlagAuthKey == "" && *globalFlagAuthPassword == "" {
fatalf("No Auth Key or Password set; use -K or -P\n")
}
c, resp := nectar.NewClient(*globalFlagAuthTenant, *globalFlagAuthUser, *globalFlagAuthPassword, *globalFlagAuthKey, *globalFlagStorageRegion, *globalFlagAuthURL, *globalFlagInternalStorage)
if resp != nil {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fatalf("Auth responded with %d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
cmd := ""
args := append([]string{}, globalFlags.Args()...)
if len(args) > 0 {
cmd = args[0]
args = args[1:]
}
switch cmd {
case "bench-get":
benchGet(c, args)
case "bench-mixed":
benchMixed(c, args)
case "bench-put":
benchPut(c, args)
case "delete":
delet(c, args)
case "download":
download(c, args)
case "get":
get(c, args)
case "head":
head(c, args)
case "post":
post(c, args)
case "put":
put(c, args)
case "upload":
upload(c, args)
default:
fatalf("Unknown command: %s\n", cmd)
}
}
func benchGet(c nectar.Client, args []string) {
if err := benchGetFlags.Parse(args); err != nil {
fatal(err)
}
container, object := parsePath(benchGetFlags.Args())
if container == "" {
fatalf("bench-get requires <container>\n")
}
if object == "" {
object = "bench-"
}
containers := *benchGetFlagContainers
if containers < 1 {
containers = 1
}
count := *benchGetFlagCount
if count < 1 {
count = 1000
}
var csvw *csv.Writer
var csvlk sync.Mutex
if *benchGetFlagCSV != "" {
csvf, err := os.Create(*benchGetFlagCSV)
if err != nil {
fatal(err)
}
csvw = csv.NewWriter(csvf)
defer func() {
csvw.Flush()
csvf.Close()
}()
csvw.Write([]string{"completion_time_unix_nano", "object_name", "transaction_id", "status", "headers_elapsed_nanoseconds", "elapsed_nanoseconds"})
}
var csvotw *csv.Writer
if *benchGetFlagCSVOT != "" {
csvotf, err := os.Create(*benchGetFlagCSVOT)
if err != nil {
fatal(err)
}
csvotw = csv.NewWriter(csvotf)
defer func() {
csvotw.Flush()
csvotf.Close()
}()
csvotw.Write([]string{"time_unix_nano", "count_since_last_time"})
csvotw.Write([]string{fmt.Sprintf("%d", time.Now().UnixNano()), "0"})
}
iterations := *benchGetFlagIterations
if iterations < 1 {
iterations = 1
}
concurrency := *globalFlagConcurrency
if concurrency < 1 {
concurrency = 1
}
benchChan := make(chan int, concurrency)
wg := sync.WaitGroup{}
wg.Add(concurrency)
for x := 0; x < concurrency; x++ {
go func() {
var start time.Time
var headers_elapsed int64
for {
i := <-benchChan
if i == 0 {
break
}
i--
getContainer := container
if containers > 1 {
getContainer = fmt.Sprintf("%s%d", getContainer, i%containers)
}
getObject := fmt.Sprintf("%s%d", object, i)
verbosef("GET %s/%s\n", getContainer, getObject)
if csvw != nil {
start = time.Now()
}
resp := c.GetObject(getContainer, getObject, globalFlagHeaders.Headers())
if csvw != nil {
headers_elapsed = time.Now().Sub(start).Nanoseconds()
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "GET %s/%s - %d %s - %s\n", getContainer, getObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
fatalf("GET %s/%s - %d %s - %s\n", getContainer, getObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
} else {
io.Copy(ioutil.Discard, resp.Body)
}
resp.Body.Close()
if csvw != nil {
stop := time.Now()
elapsed := stop.Sub(start).Nanoseconds()
csvlk.Lock()
csvw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
getContainer + "/" + getObject,
resp.Header.Get("X-Trans-Id"),
fmt.Sprintf("%d", resp.StatusCode),
fmt.Sprintf("%d", headers_elapsed),
fmt.Sprintf("%d", elapsed),
})
csvlk.Unlock()
}
}
wg.Done()
}()
}
if containers == 1 {
fmt.Printf("Bench-GET of %d (%d distinct) objects, from 1 container, at %d concurrency...", iterations*count, count, concurrency)
} else {
fmt.Printf("Bench-GET of %d (%d distinct) objects, distributed across %d containers, at %d concurrency...", iterations*count, count, containers, concurrency)
}
ticker := time.NewTicker(time.Minute)
start := time.Now()
lastSoFar := 0
for iteration := 0; iteration <= iterations; iteration++ {
for i := 1; i <= count; i++ {
waiting := true
for waiting {
select {
case <-ticker.C:
soFar := iteration*count + i - concurrency
now := time.Now()
elapsed := now.Sub(start)
fmt.Printf("\n%.05fs for %d GETs so far, %.05fs per GET, or %.05f GETs per second...", float64(elapsed)/float64(time.Second), soFar, float64(elapsed)/float64(time.Second)/float64(soFar), float64(soFar)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", now.UnixNano()),
fmt.Sprintf("%d", soFar-lastSoFar),
})
lastSoFar = soFar
}
case benchChan <- i:
waiting = false
}
}
}
}
close(benchChan)
wg.Wait()
stop := time.Now()
elapsed := stop.Sub(start)
ticker.Stop()
fmt.Println()
fmt.Printf("%.05fs total time, %.05fs per GET, or %.05f GETs per second.\n", float64(elapsed)/float64(time.Second), float64(elapsed)/float64(time.Second)/float64(iterations*count), float64(iterations*count)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
fmt.Sprintf("%d", iterations*count-lastSoFar),
})
}
}
func benchMixed(c nectar.Client, args []string) {
if err := benchMixedFlags.Parse(args); err != nil {
fatal(err)
}
container, object := parsePath(benchMixedFlags.Args())
if container == "" {
fatalf("bench-mixed requires <container>\n")
}
if object == "" {
object = "bench-"
}
containers := *benchMixedFlagContainers
if containers < 1 {
containers = 1
}
size := int64(*benchMixedFlagSize)
if size < 0 {
size = 4096
}
timespan, err := time.ParseDuration(*benchMixedFlagTime)
if err != nil {
fatal(err)
}
const (
delet = iota
get
head
post
put
)
methods := []string{
"DELETE",
"GET",
"HEAD",
"POST",
"PUT",
}
ratios := strings.Split(*benchMixedFlagRatios, ",")
if len(ratios) != 5 {
fatalf("bench-mixed got a bad -ratio value: %v\n", ratios)
}
var opOrder []int
for op, ratio := range ratios {
n, err := strconv.Atoi(ratio)
if err != nil {
fatalf("bench-mixed got a bad -ratio value: %v %s\n", ratios, err)
}
for x := 0; x < n; x++ {
opOrder = append(opOrder, op)
}
}
var csvw *csv.Writer
var csvlk sync.Mutex
if *benchMixedFlagCSV != "" {
csvf, err := os.Create(*benchMixedFlagCSV)
if err != nil {
fatal(err)
}
csvw = csv.NewWriter(csvf)
defer func() {
csvw.Flush()
csvf.Close()
}()
csvw.Write([]string{"completion_time_unix_nano", "method", "object_name", "transaction_id", "status", "elapsed_nanoseconds"})
}
var csvotw *csv.Writer
if *benchMixedFlagCSVOT != "" {
csvotf, err := os.Create(*benchMixedFlagCSVOT)
if err != nil {
fatal(err)
}
csvotw = csv.NewWriter(csvotf)
defer func() {
csvotw.Flush()
csvotf.Close()
}()
csvotw.Write([]string{"time_unix_nano", "method", "count_since_last_time"})
csvotw.Write([]string{fmt.Sprintf("%d", time.Now().UnixNano()), "0"})
}
if containers == 1 {
fmt.Printf("Ensuring container exists...")
verbosef("PUT %s\n", container)
resp := c.PutContainer(container, globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "PUT %s - %d %s - %s\n", container, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
} else {
fatalf("PUT %s - %d %s - %s\n", container, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
} else {
fmt.Printf("Ensuring %d containers exist...", containers)
for x := 0; x < containers; x++ {
putContainer := fmt.Sprintf("%s%d", container, x)
verbosef("PUT %s\n", putContainer)
resp := c.PutContainer(putContainer, globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "PUT %s - %d %s - %s\n", putContainer, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
fatalf("PUT %s - %d %s - %s\n", putContainer, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
}
}
fmt.Println()
concurrency := *globalFlagConcurrency
if concurrency < 1 {
concurrency = 1
}
benchChan := make(chan int, concurrency)
wg := sync.WaitGroup{}
wg.Add(concurrency)
var deletes int64
var gets int64
var heads int64
var posts int64
var puts int64
for x := 0; x < concurrency; x++ {
go func() {
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
var start time.Time
for {
i := <-benchChan
if i == 0 {
break
}
op := i & 0xf
i >>= 4
opContainer := container
if containers > 1 {
opContainer = fmt.Sprintf("%s%d", opContainer, i%containers)
}
opObject := fmt.Sprintf("%s%d", object, i)
verbosef("%s %s/%s\n", methods[op], opContainer, opObject)
if csvw != nil {
start = time.Now()
}
var resp *http.Response
switch op {
case delet:
resp = c.DeleteObject(opContainer, opObject, globalFlagHeaders.Headers())
atomic.AddInt64(&deletes, 1)
case get:
resp = c.GetObject(opContainer, opObject, globalFlagHeaders.Headers())
atomic.AddInt64(&gets, 1)
case head:
resp = c.HeadObject(opContainer, opObject, globalFlagHeaders.Headers())
atomic.AddInt64(&heads, 1)
case post:
headers := globalFlagHeaders.Headers()
headers["X-Object-Meta-Bench-Mixed"] = strconv.Itoa(i)
resp = c.PostObject(opContainer, opObject, headers)
atomic.AddInt64(&posts, 1)
case put:
resp = c.PutObject(opContainer, opObject, globalFlagHeaders.Headers(), &io.LimitedReader{R: rnd, N: size})
atomic.AddInt64(&puts, 1)
default:
panic(fmt.Errorf("programming error: %d", op))
}
if csvw != nil {
stop := time.Now()
elapsed := stop.Sub(start).Nanoseconds()
csvlk.Lock()
csvw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
methods[op],
opContainer + "/" + opObject,
resp.Header.Get("X-Trans-Id"),
fmt.Sprintf("%d", resp.StatusCode),
fmt.Sprintf("%d", elapsed),
})
csvlk.Unlock()
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "%s %s/%s - %d %s - %s\n", methods[op], opContainer, opObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
fatalf("%s %s/%s - %d %s - %s\n", methods[op], opContainer, opObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
}
wg.Done()
}()
}
if containers == 1 {
fmt.Printf("Bench-Mixed for %s, each object is %d bytes, into 1 container, at %d concurrency...", timespan, size, concurrency)
} else {
fmt.Printf("Bench-Mixed for %s, each object is %d bytes, distributed across %d containers, at %d concurrency...", timespan, size, containers, concurrency)
}
timespanTicker := time.NewTicker(timespan)
updateTicker := time.NewTicker(time.Minute)
start := time.Now()
var lastDeletes int64
var lastGets int64
var lastHeads int64
var lastPosts int64
var lastPuts int64
var sentDeletes int
var sentPuts int
requestLoop:
for x := 0; ; x++ {
var i int
op := opOrder[x%len(opOrder)]
switch op {
case delet:
if atomic.LoadInt64(&puts) < 1000 {
continue
}
sentDeletes++
i = sentDeletes
case put:
sentPuts++
i = sentPuts
default:
rang := (int(atomic.LoadInt64(&puts)) - sentDeletes) / 2
if rang < 1000 {
continue
}
i = sentDeletes + rang/2 + (x % rang)
}
waiting := true
for waiting {
select {
case <-timespanTicker.C:
break requestLoop
case <-updateTicker.C:
now := time.Now()
elapsed := now.Sub(start)
snapshotDeletes := atomic.LoadInt64(&deletes)
snapshotGets := atomic.LoadInt64(&gets)
snapshotHeads := atomic.LoadInt64(&heads)
snapshotPosts := atomic.LoadInt64(&posts)
snapshotPuts := atomic.LoadInt64(&puts)
total := snapshotDeletes + snapshotGets + snapshotHeads + snapshotPosts + snapshotPuts
fmt.Printf("\n%.05fs for %d requests so far, %.05fs per request, or %.05f requests per second...", float64(elapsed)/float64(time.Second), total, float64(elapsed)/float64(time.Second)/float64(total), float64(total)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", now.UnixNano()),
"DELETE",
fmt.Sprintf("%d", snapshotDeletes-lastDeletes),
})
csvotw.Write([]string{
fmt.Sprintf("%d", now.UnixNano()),
"GET",
fmt.Sprintf("%d", snapshotGets-lastGets),
})
csvotw.Write([]string{
fmt.Sprintf("%d", now.UnixNano()),
"HEAD",
fmt.Sprintf("%d", snapshotHeads-lastHeads),
})
csvotw.Write([]string{
fmt.Sprintf("%d", now.UnixNano()),
"POST",
fmt.Sprintf("%d", snapshotPosts-lastPosts),
})
csvotw.Write([]string{
fmt.Sprintf("%d", now.UnixNano()),
"PUT",
fmt.Sprintf("%d", snapshotPuts-lastPuts),
})
lastDeletes = snapshotDeletes
lastGets = snapshotGets
lastHeads = snapshotHeads
lastPosts = snapshotPosts
lastPuts = snapshotPuts
}
case benchChan <- i<<4 | op:
waiting = false
}
}
}
close(benchChan)
wg.Wait()
stop := time.Now()
elapsed := stop.Sub(start)
timespanTicker.Stop()
updateTicker.Stop()
fmt.Println()
total := deletes + gets + heads + posts + puts
fmt.Printf("%.05fs for %d requests, %.05fs per request, or %.05f requests per second.\n", float64(elapsed)/float64(time.Second), total, float64(elapsed)/float64(time.Second)/float64(total), float64(total)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
"DELETE",
fmt.Sprintf("%d", deletes-lastDeletes),
})
csvotw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
"GET",
fmt.Sprintf("%d", gets-lastGets),
})
csvotw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
"HEAD",
fmt.Sprintf("%d", heads-lastHeads),
})
csvotw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
"POST",
fmt.Sprintf("%d", posts-lastPosts),
})
csvotw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
"PUT",
fmt.Sprintf("%d", puts-lastPuts),
})
}
}
func benchPut(c nectar.Client, args []string) {
if err := benchPutFlags.Parse(args); err != nil {
fatal(err)
}
container, object := parsePath(benchPutFlags.Args())
if container == "" {
fatalf("bench-put requires <container>\n")
}
if object == "" {
object = "bench-"
}
containers := *benchPutFlagContainers
if containers < 1 {
containers = 1
}
count := *benchPutFlagCount
if count < 1 {
count = 1000
}
size := int64(*benchPutFlagSize)
if size < 0 {
size = 4096
}
var csvw *csv.Writer
var csvlk sync.Mutex
if *benchPutFlagCSV != "" {
csvf, err := os.Create(*benchPutFlagCSV)
if err != nil {
fatal(err)
}
csvw = csv.NewWriter(csvf)
defer func() {
csvw.Flush()
csvf.Close()
}()
csvw.Write([]string{"completion_time_unix_nano", "object_name", "transaction_id", "status", "elapsed_nanoseconds"})
}
var csvotw *csv.Writer
if *benchPutFlagCSVOT != "" {
csvotf, err := os.Create(*benchPutFlagCSVOT)
if err != nil {
fatal(err)
}
csvotw = csv.NewWriter(csvotf)
defer func() {
csvotw.Flush()
csvotf.Close()
}()
csvotw.Write([]string{"time_unix_nano", "count_since_last_time"})
csvotw.Write([]string{fmt.Sprintf("%d", time.Now().UnixNano()), "0"})
}
if containers == 1 {
fmt.Printf("Ensuring container exists...")
verbosef("PUT %s\n", container)
resp := c.PutContainer(container, globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "PUT %s - %d %s - %s\n", container, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
} else {
fatalf("PUT %s - %d %s - %s\n", container, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
} else {
fmt.Printf("Ensuring %d containers exist...", containers)
for x := 0; x < containers; x++ {
putContainer := fmt.Sprintf("%s%d", container, x)
verbosef("PUT %s\n", putContainer)
resp := c.PutContainer(putContainer, globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "PUT %s - %d %s - %s\n", putContainer, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
fatalf("PUT %s - %d %s - %s\n", putContainer, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
}
}
fmt.Println()
concurrency := *globalFlagConcurrency
if concurrency < 1 {
concurrency = 1
}
benchChan := make(chan int, concurrency)
wg := sync.WaitGroup{}
wg.Add(concurrency)
for x := 0; x < concurrency; x++ {
go func() {
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
var start time.Time
for {
i := <-benchChan
if i == 0 {
break
}
i--
putContainer := container
if containers > 1 {
putContainer = fmt.Sprintf("%s%d", putContainer, i%containers)
}
putObject := fmt.Sprintf("%s%d", object, i)
verbosef("PUT %s/%s\n", putContainer, putObject)
if csvw != nil {
start = time.Now()
}
resp := c.PutObject(putContainer, putObject, globalFlagHeaders.Headers(), &io.LimitedReader{R: rnd, N: size})
if csvw != nil {
stop := time.Now()
elapsed := stop.Sub(start).Nanoseconds()
csvlk.Lock()
csvw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
putContainer + "/" + putObject,
resp.Header.Get("X-Trans-Id"),
fmt.Sprintf("%d", resp.StatusCode),
fmt.Sprintf("%d", elapsed),
})
csvlk.Unlock()
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "PUT %s/%s - %d %s - %s\n", putContainer, putObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
fatalf("PUT %s/%s - %d %s - %s\n", putContainer, putObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
}
wg.Done()
}()
}
if containers == 1 {
fmt.Printf("Bench-PUT of %d objects, each %d bytes, into 1 container, at %d concurrency...", count, size, concurrency)
} else {
fmt.Printf("Bench-PUT of %d objects, each %d bytes, distributed across %d containers, at %d concurrency...", count, size, containers, concurrency)
}
ticker := time.NewTicker(time.Minute)
start := time.Now()
lastSoFar := 0
for i := 1; i <= count; i++ {
waiting := true
for waiting {
select {
case <-ticker.C:
soFar := i - concurrency
now := time.Now()
elapsed := now.Sub(start)
fmt.Printf("\n%.05fs for %d PUTs so far, %.05fs per PUT, or %.05f PUTs per second...", float64(elapsed)/float64(time.Second), soFar, float64(elapsed)/float64(time.Second)/float64(soFar), float64(soFar)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", now.UnixNano()),
fmt.Sprintf("%d", soFar-lastSoFar),
})
lastSoFar = soFar
}
case benchChan <- i:
waiting = false
}
}
}
close(benchChan)
wg.Wait()
stop := time.Now()
elapsed := stop.Sub(start)
ticker.Stop()
fmt.Println()
fmt.Printf("%.05fs total time, %.05fs per PUT, or %.05f PUTs per second.\n", float64(elapsed)/float64(time.Second), float64(elapsed)/float64(time.Second)/float64(count), float64(count)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
fmt.Sprintf("%d", count-lastSoFar),
})
}
}
func delet(c nectar.Client, args []string) {
container, object := parsePath(args)
var resp *http.Response
if object != "" {
resp = c.DeleteObject(container, object, globalFlagHeaders.Headers())
} else if container != "" {
resp = c.DeleteContainer(container, globalFlagHeaders.Headers())
} else {
resp = c.DeleteAccount(globalFlagHeaders.Headers())
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fatalf("%d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
resp.Body.Close()
}
func get(c nectar.Client, args []string) {
if err := getFlags.Parse(args); err != nil {
fatal(err)
}
container, object := parsePath(getFlags.Args())
if *getFlagRaw || object != "" {
var resp *http.Response
if object != "" {
resp = c.GetObject(container, object, globalFlagHeaders.Headers())
} else if container != "" {
resp = c.GetContainerRaw(container, *getFlagMarker, *getFlagEndMarker, *getFlagLimit, *getFlagPrefix, *getFlagDelimiter, *getFlagReverse, globalFlagHeaders.Headers())
} else {
resp = c.GetAccountRaw(*getFlagMarker, *getFlagEndMarker, *getFlagLimit, *getFlagPrefix, *getFlagDelimiter, *getFlagReverse, globalFlagHeaders.Headers())
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fatalf("%d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
if *getFlagRaw || object == "" {
data := [][]string{}
ks := []string{}
for k := range resp.Header {
ks = append(ks, k)
}
sort.Strings(ks)
for _, k := range ks {
for _, v := range resp.Header[k] {
data = append(data, []string{k + ":", v})
}
}
fmt.Println(resp.StatusCode, http.StatusText(resp.StatusCode))
opts := brimtext.NewDefaultAlignOptions()
fmt.Print(brimtext.Align(data, opts))
}
if _, err := io.Copy(os.Stdout, resp.Body); err != nil {
fatal(err)
}
return
}
if container != "" {
entries, resp := c.GetContainer(container, *getFlagMarker, *getFlagEndMarker, *getFlagLimit, *getFlagPrefix, *getFlagDelimiter, *getFlagReverse, globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fatalf("%d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
if *getFlagNameOnly {
for _, entry := range entries {
if entry.Subdir != "" {
fmt.Println(entry.Subdir)
} else {
fmt.Println(entry.Name)
}
}
} else {
var data [][]string
data = [][]string{{"Name", "Bytes", "Content Type", "Last Modified", "Hash"}}
for _, entry := range entries {
if entry.Subdir != "" {
data = append(data, []string{entry.Subdir, "", "", "", ""})
} else {
data = append(data, []string{entry.Name, fmt.Sprintf("%d", entry.Bytes), entry.ContentType, entry.LastModified, entry.Hash})
}
}
fmt.Print(brimtext.Align(data, nil))
}
return
}
entries, resp := c.GetAccount(*getFlagMarker, *getFlagEndMarker, *getFlagLimit, *getFlagPrefix, *getFlagDelimiter, *getFlagReverse, globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fatalf("%d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
if *getFlagNameOnly {
for _, entry := range entries {
fmt.Println(entry.Name)
}
} else {
var data [][]string
data = [][]string{{"Name", "Count", "Bytes"}}
for _, entry := range entries {
data = append(data, []string{entry.Name, fmt.Sprintf("%d", entry.Count), fmt.Sprintf("%d", entry.Bytes)})
}
fmt.Print(brimtext.Align(data, nil))
}
return
}
func head(c nectar.Client, args []string) {
if err := headFlags.Parse(args); err != nil {
fatal(err)
}
container, object := parsePath(headFlags.Args())
var resp *http.Response
if object != "" {
resp = c.HeadObject(container, object, globalFlagHeaders.Headers())
} else if container != "" {
resp = c.HeadContainer(container, globalFlagHeaders.Headers())
} else {
resp = c.HeadAccount(globalFlagHeaders.Headers())
}
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if resp.StatusCode/100 != 2 {
fatalf("%d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
data := [][]string{}
ks := []string{}
kls := map[string]string{}
for k := range resp.Header {
ks = append(ks, k)
kls[k] = k
}
sort.Strings(ks)
for _, k := range ks {
for _, v := range resp.Header[kls[k]] {
data = append(data, []string{k + ":", v})
}
}
fmt.Println(resp.StatusCode, http.StatusText(resp.StatusCode))
fmt.Print(brimtext.Align(data, brimtext.NewDefaultAlignOptions()))
}
func put(c nectar.Client, args []string) {
container, object := parsePath(args)
var resp *http.Response
if object != "" {
resp = c.PutObject(container, object, globalFlagHeaders.Headers(), os.Stdin)
} else if container != "" {
resp = c.PutContainer(container, globalFlagHeaders.Headers())
} else {
resp = c.PutAccount(globalFlagHeaders.Headers())
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fatalf("%d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
resp.Body.Close()
}
func post(c nectar.Client, args []string) {
container, object := parsePath(args)
var resp *http.Response
if object != "" {
resp = c.PostObject(container, object, globalFlagHeaders.Headers())
} else if container != "" {
resp = c.PostContainer(container, globalFlagHeaders.Headers())
} else {
resp = c.PostAccount(globalFlagHeaders.Headers())
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fatalf("%d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
resp.Body.Close()
}
func upload(c nectar.Client, args []string) {
if len(args) == 0 {
fatalf("<sourcepath> is required for upload.\n")
}
sourcepath := args[0]
container, object := parsePath(args[1:])
if container == "" {
abscwd, err := filepath.Abs(".")
if err != nil {
fatalf("Could not determine current working directory: %s\n", err)
}
container = filepath.Base(abscwd)
}
verbosef("Ensuring container %q exists.\n", container)
resp := c.PutContainer(container, globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fatalf("PUT %s - %d %s - %s\n", container, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
resp.Body.Close()
concurrency := *globalFlagConcurrency
if concurrency < 1 {
concurrency = 1
}
uploadChan := make(chan string, concurrency-1)
wg := sync.WaitGroup{}
wg.Add(concurrency)
for i := 0; i < concurrency; i++ {
go func() {
for {
path := <-uploadChan
if path == "" {
break
}
verbosef("Uploading %q to %q %q.\n", path, container, object+path)
f, err := os.Open(path)
if err != nil {
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "Cannot open %s while attempting to upload to %s/%s: %s\n", path, container, object+path, err)
continue
} else {
fatalf("Cannot open %s while attempting to upload to %s/%s: %s\n", path, container, object+path, err)
}
}
resp := c.PutObject(container, object+path, globalFlagHeaders.Headers(), f)
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
f.Close()
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "PUT %s/%s - %d %s - %s\n", container, object+path, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
fatalf("PUT %s/%s - %d %s - %s\n", container, object+path, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
f.Close()
}
wg.Done()
}()
}
fi, err := os.Stat(sourcepath)
if err != nil {
fatalf("Could not stat %s: %s\n", sourcepath, err)
}
// This "if" is so a single file upload that happens to be a symlink will work.
if fi.Mode().IsRegular() {
uploadChan <- sourcepath
} else {
// This "if" is to handle when the user-given path is a symlink to a directory; we normally want to skip symlinks, but not in this initial case.
if !strings.HasSuffix(sourcepath, string(os.PathSeparator)) {
sourcepath += string(os.PathSeparator)
}
filepath.Walk(sourcepath, func(path string, info os.FileInfo, err error) error {
if err != nil || !info.Mode().IsRegular() {
return nil
}
uploadChan <- path
return nil
})
}
close(uploadChan)
wg.Wait()
}
func download(c nectar.Client, args []string) {
if err := downloadFlags.Parse(args); err != nil {
fatal(err)
}
args = downloadFlags.Args()
if len(args) == 0 {
fatalf("<destpath> is required for download.\n")
}
destpath := args[len(args)-1]
container, object := parsePath(args[:len(args)-1])
concurrency := *globalFlagConcurrency
// Need at least 2 to queue object downloads while reading a container listing.
if concurrency < 2 {
concurrency = 2
}
type downloadTask struct {
container string
object string
destpath string
}
downloadChan := make(chan *downloadTask, concurrency-1)
var dirExistsLock sync.Mutex
dirExists := map[string]bool{}
taskWG := sync.WaitGroup{}
taskWG.Add(concurrency)
containerWG := sync.WaitGroup{}
for i := 0; i < concurrency; i++ {
go func() {
for {
task := <-downloadChan
if task == nil {
break
}
if task.object == "" {
entries, resp := c.GetContainer(task.container, "", "", 0, "", "", false, globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
containerWG.Done()
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "GET %s - %d %s - %s\n", task.container, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
fatalf("GET %s - %d %s - %s\n", task.container, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
for _, entry := range entries {
if entry.Name != "" {
downloadChan <- &downloadTask{container: task.container, object: entry.Name, destpath: filepath.Join(task.destpath, filepath.FromSlash(entry.Name))}
}
}
containerWG.Done()
continue
}
verbosef("Downloading %s/%s to %s.\n", task.container, task.object, task.destpath)
if dstdr := filepath.Dir(task.destpath); dstdr != "." {
dirExistsLock.Lock()
if !dirExists[dstdr] {
if err := os.MkdirAll(dstdr, 0755); err != nil {
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "Could not make directory path %s: %s\n", dstdr, err)
} else {
fatalf("Could not make directory path %s: %s\n", dstdr, err)
}
}
dirExists[dstdr] = true
}
dirExistsLock.Unlock()
}
f, err := os.Create(task.destpath)
if err != nil {
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "Could not create %s: %s\n", task.destpath, err)
continue
} else {
fatalf("Could not create %s: %s\n", task.destpath, err)
}
}
resp := c.GetObject(task.container, task.object, globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
f.Close()
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "GET %s/%s - %d %s - %s\n", task.container, task.object, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
fatalf("GET %s/%s - %d %s - %s\n", task.container, task.object, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
if _, err = io.Copy(f, resp.Body); err != nil {
resp.Body.Close()
f.Close()
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "Could not complete content transfer from %s/%s to %s: %s\n", task.container, task.object, task.destpath, err)
continue
} else {
fatalf("Could not complete content transfer from %s/%s to %s: %s\n", task.container, task.object, task.destpath, err)
}
}
resp.Body.Close()
f.Close()
}
taskWG.Done()
}()
}
if object != "" {
fi, err := os.Stat(destpath)
if err != nil {
if !os.IsNotExist(err) {
fatalf("Could not stat %s: %s\n", destpath, err)
}
} else if fi.IsDir() {
destpath = filepath.Join(destpath, object)
}
downloadChan <- &downloadTask{container: container, object: object, destpath: destpath}
} else if container != "" {
fi, err := os.Stat(destpath)
if err != nil {
if !os.IsNotExist(err) {
fatalf("Could not stat %s: %s\n", destpath, err)
}
} else if !fi.IsDir() {
fatalf("Cannot download a container to a single file: %s\n", destpath)
}
containerWG.Add(1)
downloadChan <- &downloadTask{container: container, object: "", destpath: destpath}
} else if !*downloadFlagAccount {
fatalf("You must specify -a if you wish to download the entire account.\n")
} else {
fi, err := os.Stat(destpath)
if err != nil {
if !os.IsNotExist(err) {
fatalf("Could not stat %s: %s\n", destpath, err)
}
} else if !fi.IsDir() {
fatalf("Cannot download an account to a single file: %s\n", destpath)
}
entries, resp := c.GetAccount("", "", 0, "", "", false, globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fatalf("GET - %d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
resp.Body.Close()
for _, entry := range entries {
if entry.Name != "" {
containerWG.Add(1)
downloadChan <- &downloadTask{container: entry.Name, object: "", destpath: filepath.Join(destpath, entry.Name)}
}
}
}
containerWG.Wait()
close(downloadChan)
taskWG.Wait()
}
func parsePath(args []string) (string, string) {
if len(args) == 0 {
return "", ""
}
path := ""
for _, arg := range args {
if path == "" {
path = arg
continue
}
if strings.HasSuffix(path, "/") {
path += arg
} else {
path += "/" + arg
}
}
parts := strings.SplitN(path, "/", 2)
if len(parts) == 1 {
return parts[0], ""
}
return parts[0], parts[1]
}
type stringListFlag []string
func (slf *stringListFlag) Set(value string) error {
*slf = append(*slf, value)
return nil
}
func (slf *stringListFlag) String() string {
return strings.Join(*slf, " ")
}
func (slf *stringListFlag) Headers() map[string]string {
headers := map[string]string{}
for _, parameter := range *slf {
splitParameters := strings.SplitN(parameter, ":", 2)
if len(splitParameters) == 2 {
headers[strings.TrimSpace(splitParameters[0])] = strings.TrimSpace(splitParameters[1])
} else {
headers[strings.TrimSpace(splitParameters[0])] = ""
}
}
return headers
}
HUM-337 Added bench-post
// Package main defines the nectar command line tool.
package main
import (
"bytes"
"encoding/csv"
"flag"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/gholt/brimtext"
"github.com/troubling/nectar"
)
var (
globalFlags = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
globalFlagAuthURL = globalFlags.String("A", os.Getenv("AUTH_URL"), "|<url>| URL to auth system, example: http://127.0.0.1:8080/auth/v1.0 - Env: AUTH_URL")
globalFlagAuthTenant = globalFlags.String("T", os.Getenv("AUTH_TENANT"), "|<tenant>| Tenant name for auth system, example: test - Not all auth systems need this. Env: AUTH_TENANT")
globalFlagAuthUser = globalFlags.String("U", os.Getenv("AUTH_USER"), "|<user>| User name for auth system, example: tester - Some auth systems allow tenant:user format here, example: test:tester - Env: AUTH_USER")
globalFlagAuthKey = globalFlags.String("K", os.Getenv("AUTH_KEY"), "|<key>| Key for auth system, example: testing - Some auth systems use passwords instead, see -P - Env: AUTH_KEY")
globalFlagAuthPassword = globalFlags.String("P", os.Getenv("AUTH_PASSWORD"), "|<password>| Password for auth system, example: testing - Some auth system use keys instead, see -K - Env: AUTH_PASSWORD")
globalFlagStorageRegion = globalFlags.String("R", os.Getenv("STORAGE_REGION"), "|<region>| Storage region to use if set, otherwise uses the default. Env: STORAGE_REGION")
globalFlagVerbose = globalFlags.Bool("v", false, "Will activate verbose output.")
globalFlagContinueOnError = globalFlags.Bool("continue-on-error", false, "When possible, continue with additional operations even if one or more fail.")
globalFlagConcurrency *int // defined in init()
globalFlagInternalStorage *bool // defined in init()
globalFlagHeaders = stringListFlag{} // defined in init()
)
var (
benchGetFlags = flag.NewFlagSet("bench-get", flag.ContinueOnError)
benchGetFlagContainers = benchGetFlags.Int("containers", 1, "|<number>| Number of containers to use.")
benchGetFlagCount = benchGetFlags.Int("count", 1000, "|<number>| Number of objects to get, distributed across containers.")
benchGetFlagCSV = benchGetFlags.String("csv", "", "|<filename>| Store the timing of each get into a CSV file.")
benchGetFlagCSVOT = benchGetFlags.String("csvot", "", "|<filename>| Store the number of gets performed over time into a CSV file.")
benchGetFlagIterations = benchGetFlags.Int("iterations", 1, "|<number>| Number of iterations to perform.")
)
var (
benchMixedFlags = flag.NewFlagSet("bench-mixed", flag.ContinueOnError)
benchMixedFlagContainers = benchMixedFlags.Int("containers", 1, "|<number>| Number of containers to use.")
benchMixedFlagCSV = benchMixedFlags.String("csv", "", "|<filename>| Store the timing of each request into a CSV file.")
benchMixedFlagCSVOT = benchMixedFlags.String("csvot", "", "|<filename>| Store the number of requests performed over time into a CSV file.")
benchMixedFlagSize = benchMixedFlags.Int("size", 4096, "|<bytes>| Number of bytes for each object.")
benchMixedFlagRatios = benchMixedFlags.String("ratios", "1,2,2,2,2", "|<deletes>,<gets>,<heads>,<posts>,<puts>| Specifies the number of each type of request in relation to other requests. The default is 1,2,2,2,2 so that two of every other type of request will happen for each DELETE request.")
benchMixedFlagTime = benchMixedFlags.String("time", "10m", "|<timespan>| Amount of time to run the test, such as 10m or 1h.")
)
var (
benchPostFlags = flag.NewFlagSet("bench-post", flag.ContinueOnError)
benchPostFlagContainers = benchPostFlags.Int("containers", 1, "|<number>| Number of containers in use.")
benchPostFlagCount = benchPostFlags.Int("count", 1000, "|<number>| Number of objects to post, distributed across containers.")
benchPostFlagCSV = benchPostFlags.String("csv", "", "|<filename>| Store the timing of each post into a CSV file.")
benchPostFlagCSVOT = benchPostFlags.String("csvot", "", "|<filename>| Store the number of posts performed over time into a CSV file.")
)
var (
benchPutFlags = flag.NewFlagSet("bench-put", flag.ContinueOnError)
benchPutFlagContainers = benchPutFlags.Int("containers", 1, "|<number>| Number of containers to use.")
benchPutFlagCount = benchPutFlags.Int("count", 1000, "|<number>| Number of objects to PUT, distributed across containers.")
benchPutFlagCSV = benchPutFlags.String("csv", "", "|<filename>| Store the timing of each PUT into a CSV file.")
benchPutFlagCSVOT = benchPutFlags.String("csvot", "", "|<filename>| Store the number of PUTs performed over time into a CSV file.")
benchPutFlagSize = benchPutFlags.Int("size", 4096, "|<bytes>| Number of bytes for each object.")
)
var (
downloadFlags = flag.NewFlagSet("download", flag.ContinueOnError)
downloadFlagAccount = downloadFlags.Bool("a", false, "Indicates you truly wish to download the entire account; this is to prevent accidentally doing so when giving a single parameter to download.")
)
var (
getFlags = flag.NewFlagSet("get", flag.ContinueOnError)
getFlagRaw = getFlags.Bool("r", false, "Emit raw results")
getFlagNameOnly = getFlags.Bool("n", false, "In listings, emits the names only")
getFlagMarker = getFlags.String("marker", "", "|<text>| In listings, sets the start marker")
getFlagEndMarker = getFlags.String("endmarker", "", "|<text>| In listings, sets the stop marker")
getFlagReverse = getFlags.Bool("reverse", false, "In listings, reverses the order")
getFlagLimit = getFlags.Int("limit", 0, "|<number>| In listings, limits the results")
getFlagPrefix = getFlags.String("prefix", "", "|<text>| In listings, returns only those matching the prefix")
getFlagDelimiter = getFlags.String("delimiter", "", "|<text>| In listings, sets the delimiter and activates delimiter listings")
)
var (
headFlags = flag.NewFlagSet("head", flag.ContinueOnError)
)
func init() {
i32, _ := strconv.ParseInt(os.Getenv("CONCURRENCY"), 10, 32)
globalFlagConcurrency = globalFlags.Int("C", int(i32), "|<number>| The maximum number of concurrent operations to perform; default is 1. Env: CONCURRENCY")
b, _ := strconv.ParseBool(os.Getenv("STORAGE_INTERNAL"))
globalFlagInternalStorage = globalFlags.Bool("I", b, "Internal storage URL resolution, such as Rackspace ServiceNet. Env: STORAGE_INTERNAL")
globalFlags.Var(&globalFlagHeaders, "H", "|<name>:[value]| Sets a header to be sent with the request. Useful mostly for PUTs and POSTs, allowing you to set metadata. This option can be specified multiple times for additional headers.")
var flagbuf bytes.Buffer
globalFlags.SetOutput(&flagbuf)
benchGetFlags.SetOutput(&flagbuf)
benchMixedFlags.SetOutput(&flagbuf)
benchPostFlags.SetOutput(&flagbuf)
benchPutFlags.SetOutput(&flagbuf)
downloadFlags.SetOutput(&flagbuf)
getFlags.SetOutput(&flagbuf)
headFlags.SetOutput(&flagbuf)
}
func fatal(err error) {
if err == flag.ErrHelp || err == nil {
fmt.Println(os.Args[0], `[options] <subcommand> ...`)
fmt.Println(brimtext.Wrap(`
Tool for accessing a Hummingbird/Swift cluster. Some global options can also be set via environment variables. These will be noted at the end of the description with Env: NAME. The following global options are available:
`, 0, " ", " "))
helpFlags(globalFlags)
fmt.Println()
fmt.Println(brimtext.Wrap(`
The following subcommands are available:`, 0, "", ""))
fmt.Println("\nbench-get [options] <container> [object]")
fmt.Println(brimtext.Wrap(`
Benchmark tests GETs. By default, 1000 GETs are done from the named <container>. If you specify [object] it will be used as the prefix for the object names, otherwise "bench-" will be used. Generally, you would use bench-put to populate the containers and objects, and then use bench-get with the same options with the possible addition of -iterations to lengthen the test time.
`, 0, " ", " "))
helpFlags(benchGetFlags)
fmt.Println("\nbench-mixed [options] <container> [object]")
fmt.Println(brimtext.Wrap(`
Benchmark tests mixed request workloads. If you specify [object] it will be used as a prefix for the object names, otherwise "bench-" will be used. This test is made to be run for a specific span of time (10 minutes by default). You probably want to run with the -continue-on-error global flag; due to the eventual consistency model of Swift|Hummingbird, a few requests may 404.
`, 0, " ", " "))
helpFlags(benchMixedFlags)
fmt.Println("\nbench-post [options] <container> [object]")
fmt.Println(brimtext.Wrap(`
Benchmark tests POSTs. By default, 1000 POSTs are done against the named <container>. If you specify [object] it will be used as a prefix for the object names, otherwise "bench-" will be used. Generally, you would use bench-put to populate the containers and objects, and then use bench-post with the same options to test POSTing.
`, 0, " ", " "))
helpFlags(benchPostFlags)
fmt.Println("\nbench-put [options] <container> [object]")
fmt.Println(brimtext.Wrap(`
Benchmark tests PUTs. By default, 1000 PUTs are done into the named <container>. If you specify [object] it will be used as a prefix for the object names, otherwise "bench-" will be used.
`, 0, " ", " "))
helpFlags(benchPutFlags)
fmt.Println("\ndelete [container] [object]")
fmt.Println(brimtext.Wrap(`
Performs a DELETE request. A DELETE, as probably expected, is used to remove the target.
`, 0, " ", " "))
fmt.Println("\ndownload [options] [container] [object] <destpath>")
fmt.Println(brimtext.Wrap(`
Downloads an object or objects to a local file or files. The <destpath> indicates where you want the file or files to be created. If you don't give [container] [object] the entire account will be downloaded (requires -a for confirmation). If you just give [container] that entire container will be downloaded. Perhaps obviously, if you give [container] [object] just that object will be downloaded.
`, 0, " ", " "))
helpFlags(downloadFlags)
fmt.Println("\nget [options] [container] [object]")
fmt.Println(brimtext.Wrap(`
Performs a GET request. A GET on an account or container will output the listing of containers or objects, respectively. A GET on an object will output the content of the object to standard output.
`, 0, " ", " "))
helpFlags(getFlags)
fmt.Println("\nhead [options] [container] [object]")
fmt.Println(brimtext.Wrap(`
Performs a HEAD request, giving overall information about the account, container, or object.
`, 0, " ", " "))
helpFlags(headFlags)
fmt.Println("\npost [container] [object]")
fmt.Println(brimtext.Wrap(`
Performs a POST request. POSTs allow you to update the metadata for the target.
`, 0, " ", " "))
fmt.Println("\nput [container] [object]")
fmt.Println(brimtext.Wrap(`
Performs a PUT request. A PUT to an account or container will create them. A PUT to an object will create it using the content from standard input.
`, 0, " ", " "))
fmt.Println("\nupload [options] <sourcepath> [container] [object]")
fmt.Println(brimtext.Wrap(`
Uploads local files as objects. If you don't specify [container] the name of the current directory will be used. If you don't specify [object] the relative path name from the current directory will be used. If you do specify [object] while uploading a directory, [object] will be used as a prefix to the resulting object names. Note that when uploading a directory, only regular files will be uploaded.
`, 0, " ", " "))
fmt.Println("\n[container] [object] can also be specified as [container]/[object]")
} else {
msg := err.Error()
if strings.HasPrefix(msg, "flag provided but not defined: ") {
msg = "No such option: " + msg[len("flag provided but not defined: "):]
}
fmt.Fprintln(os.Stderr, msg)
}
os.Exit(1)
}
func fatalf(frmt string, args ...interface{}) {
fmt.Fprintf(os.Stderr, frmt, args...)
os.Exit(1)
}
func verbosef(frmt string, args ...interface{}) {
if *globalFlagVerbose {
fmt.Fprintf(os.Stderr, frmt, args...)
}
}
func helpFlags(flags *flag.FlagSet) {
var data [][]string
firstWidth := 0
flags.VisitAll(func(f *flag.Flag) {
n := " -" + f.Name
u := strings.TrimSpace(f.Usage)
if u != "" && u[0] == '|' {
s := strings.SplitN(u, "|", 3)
if len(s) == 3 {
n += " " + strings.TrimSpace(s[1])
u = strings.TrimSpace(s[2])
}
}
if len(n) > firstWidth {
firstWidth = len(n)
}
data = append(data, []string{n, u})
})
opts := brimtext.NewDefaultAlignOptions()
opts.Widths = []int{0, brimtext.GetTTYWidth() - firstWidth - 2}
fmt.Print(brimtext.Align(data, opts))
}
func main() {
if err := globalFlags.Parse(os.Args[1:]); err != nil || len(globalFlags.Args()) == 0 {
fatal(err)
}
if *globalFlagAuthURL == "" {
fatalf("No Auth URL set; use -A\n")
}
if *globalFlagAuthUser == "" {
fatalf("No Auth User set; use -U\n")
}
if *globalFlagAuthKey == "" && *globalFlagAuthPassword == "" {
fatalf("No Auth Key or Password set; use -K or -P\n")
}
c, resp := nectar.NewClient(*globalFlagAuthTenant, *globalFlagAuthUser, *globalFlagAuthPassword, *globalFlagAuthKey, *globalFlagStorageRegion, *globalFlagAuthURL, *globalFlagInternalStorage)
if resp != nil {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fatalf("Auth responded with %d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
cmd := ""
args := append([]string{}, globalFlags.Args()...)
if len(args) > 0 {
cmd = args[0]
args = args[1:]
}
switch cmd {
case "bench-get":
benchGet(c, args)
case "bench-mixed":
benchMixed(c, args)
case "bench-post":
benchPost(c, args)
case "bench-put":
benchPut(c, args)
case "delete":
delet(c, args)
case "download":
download(c, args)
case "get":
get(c, args)
case "head":
head(c, args)
case "post":
post(c, args)
case "put":
put(c, args)
case "upload":
upload(c, args)
default:
fatalf("Unknown command: %s\n", cmd)
}
}
func benchGet(c nectar.Client, args []string) {
if err := benchGetFlags.Parse(args); err != nil {
fatal(err)
}
container, object := parsePath(benchGetFlags.Args())
if container == "" {
fatalf("bench-get requires <container>\n")
}
if object == "" {
object = "bench-"
}
containers := *benchGetFlagContainers
if containers < 1 {
containers = 1
}
count := *benchGetFlagCount
if count < 1 {
count = 1000
}
var csvw *csv.Writer
var csvlk sync.Mutex
if *benchGetFlagCSV != "" {
csvf, err := os.Create(*benchGetFlagCSV)
if err != nil {
fatal(err)
}
csvw = csv.NewWriter(csvf)
defer func() {
csvw.Flush()
csvf.Close()
}()
csvw.Write([]string{"completion_time_unix_nano", "object_name", "transaction_id", "status", "headers_elapsed_nanoseconds", "elapsed_nanoseconds"})
}
var csvotw *csv.Writer
if *benchGetFlagCSVOT != "" {
csvotf, err := os.Create(*benchGetFlagCSVOT)
if err != nil {
fatal(err)
}
csvotw = csv.NewWriter(csvotf)
defer func() {
csvotw.Flush()
csvotf.Close()
}()
csvotw.Write([]string{"time_unix_nano", "count_since_last_time"})
csvotw.Write([]string{fmt.Sprintf("%d", time.Now().UnixNano()), "0"})
}
iterations := *benchGetFlagIterations
if iterations < 1 {
iterations = 1
}
concurrency := *globalFlagConcurrency
if concurrency < 1 {
concurrency = 1
}
benchChan := make(chan int, concurrency)
wg := sync.WaitGroup{}
wg.Add(concurrency)
for x := 0; x < concurrency; x++ {
go func() {
var start time.Time
var headers_elapsed int64
for {
i := <-benchChan
if i == 0 {
break
}
i--
getContainer := container
if containers > 1 {
getContainer = fmt.Sprintf("%s%d", getContainer, i%containers)
}
getObject := fmt.Sprintf("%s%d", object, i)
verbosef("GET %s/%s\n", getContainer, getObject)
if csvw != nil {
start = time.Now()
}
resp := c.GetObject(getContainer, getObject, globalFlagHeaders.Headers())
if csvw != nil {
headers_elapsed = time.Now().Sub(start).Nanoseconds()
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "GET %s/%s - %d %s - %s\n", getContainer, getObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
fatalf("GET %s/%s - %d %s - %s\n", getContainer, getObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
} else {
io.Copy(ioutil.Discard, resp.Body)
}
resp.Body.Close()
if csvw != nil {
stop := time.Now()
elapsed := stop.Sub(start).Nanoseconds()
csvlk.Lock()
csvw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
getContainer + "/" + getObject,
resp.Header.Get("X-Trans-Id"),
fmt.Sprintf("%d", resp.StatusCode),
fmt.Sprintf("%d", headers_elapsed),
fmt.Sprintf("%d", elapsed),
})
csvlk.Unlock()
}
}
wg.Done()
}()
}
if containers == 1 {
fmt.Printf("Bench-GET of %d (%d distinct) objects, from 1 container, at %d concurrency...", iterations*count, count, concurrency)
} else {
fmt.Printf("Bench-GET of %d (%d distinct) objects, distributed across %d containers, at %d concurrency...", iterations*count, count, containers, concurrency)
}
ticker := time.NewTicker(time.Minute)
start := time.Now()
lastSoFar := 0
for iteration := 0; iteration <= iterations; iteration++ {
for i := 1; i <= count; i++ {
waiting := true
for waiting {
select {
case <-ticker.C:
soFar := iteration*count + i - concurrency
now := time.Now()
elapsed := now.Sub(start)
fmt.Printf("\n%.05fs for %d GETs so far, %.05fs per GET, or %.05f GETs per second...", float64(elapsed)/float64(time.Second), soFar, float64(elapsed)/float64(time.Second)/float64(soFar), float64(soFar)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", now.UnixNano()),
fmt.Sprintf("%d", soFar-lastSoFar),
})
lastSoFar = soFar
}
case benchChan <- i:
waiting = false
}
}
}
}
close(benchChan)
wg.Wait()
stop := time.Now()
elapsed := stop.Sub(start)
ticker.Stop()
fmt.Println()
fmt.Printf("%.05fs total time, %.05fs per GET, or %.05f GETs per second.\n", float64(elapsed)/float64(time.Second), float64(elapsed)/float64(time.Second)/float64(iterations*count), float64(iterations*count)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
fmt.Sprintf("%d", iterations*count-lastSoFar),
})
}
}
func benchMixed(c nectar.Client, args []string) {
if err := benchMixedFlags.Parse(args); err != nil {
fatal(err)
}
container, object := parsePath(benchMixedFlags.Args())
if container == "" {
fatalf("bench-mixed requires <container>\n")
}
if object == "" {
object = "bench-"
}
containers := *benchMixedFlagContainers
if containers < 1 {
containers = 1
}
size := int64(*benchMixedFlagSize)
if size < 0 {
size = 4096
}
timespan, err := time.ParseDuration(*benchMixedFlagTime)
if err != nil {
fatal(err)
}
const (
delet = iota
get
head
post
put
)
methods := []string{
"DELETE",
"GET",
"HEAD",
"POST",
"PUT",
}
ratios := strings.Split(*benchMixedFlagRatios, ",")
if len(ratios) != 5 {
fatalf("bench-mixed got a bad -ratio value: %v\n", ratios)
}
var opOrder []int
for op, ratio := range ratios {
n, err := strconv.Atoi(ratio)
if err != nil {
fatalf("bench-mixed got a bad -ratio value: %v %s\n", ratios, err)
}
for x := 0; x < n; x++ {
opOrder = append(opOrder, op)
}
}
var csvw *csv.Writer
var csvlk sync.Mutex
if *benchMixedFlagCSV != "" {
csvf, err := os.Create(*benchMixedFlagCSV)
if err != nil {
fatal(err)
}
csvw = csv.NewWriter(csvf)
defer func() {
csvw.Flush()
csvf.Close()
}()
csvw.Write([]string{"completion_time_unix_nano", "method", "object_name", "transaction_id", "status", "elapsed_nanoseconds"})
}
var csvotw *csv.Writer
if *benchMixedFlagCSVOT != "" {
csvotf, err := os.Create(*benchMixedFlagCSVOT)
if err != nil {
fatal(err)
}
csvotw = csv.NewWriter(csvotf)
defer func() {
csvotw.Flush()
csvotf.Close()
}()
csvotw.Write([]string{"time_unix_nano", "method", "count_since_last_time"})
csvotw.Write([]string{fmt.Sprintf("%d", time.Now().UnixNano()), "0"})
}
if containers == 1 {
fmt.Printf("Ensuring container exists...")
verbosef("PUT %s\n", container)
resp := c.PutContainer(container, globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "PUT %s - %d %s - %s\n", container, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
} else {
fatalf("PUT %s - %d %s - %s\n", container, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
} else {
fmt.Printf("Ensuring %d containers exist...", containers)
for x := 0; x < containers; x++ {
putContainer := fmt.Sprintf("%s%d", container, x)
verbosef("PUT %s\n", putContainer)
resp := c.PutContainer(putContainer, globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "PUT %s - %d %s - %s\n", putContainer, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
fatalf("PUT %s - %d %s - %s\n", putContainer, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
}
}
fmt.Println()
concurrency := *globalFlagConcurrency
if concurrency < 1 {
concurrency = 1
}
benchChan := make(chan int, concurrency)
wg := sync.WaitGroup{}
wg.Add(concurrency)
var deletes int64
var gets int64
var heads int64
var posts int64
var puts int64
for x := 0; x < concurrency; x++ {
go func() {
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
var start time.Time
for {
i := <-benchChan
if i == 0 {
break
}
op := i & 0xf
i >>= 4
opContainer := container
if containers > 1 {
opContainer = fmt.Sprintf("%s%d", opContainer, i%containers)
}
opObject := fmt.Sprintf("%s%d", object, i)
verbosef("%s %s/%s\n", methods[op], opContainer, opObject)
if csvw != nil {
start = time.Now()
}
var resp *http.Response
switch op {
case delet:
resp = c.DeleteObject(opContainer, opObject, globalFlagHeaders.Headers())
atomic.AddInt64(&deletes, 1)
case get:
resp = c.GetObject(opContainer, opObject, globalFlagHeaders.Headers())
atomic.AddInt64(&gets, 1)
case head:
resp = c.HeadObject(opContainer, opObject, globalFlagHeaders.Headers())
atomic.AddInt64(&heads, 1)
case post:
headers := globalFlagHeaders.Headers()
headers["X-Object-Meta-Bench-Mixed"] = strconv.Itoa(i)
resp = c.PostObject(opContainer, opObject, headers)
atomic.AddInt64(&posts, 1)
case put:
resp = c.PutObject(opContainer, opObject, globalFlagHeaders.Headers(), &io.LimitedReader{R: rnd, N: size})
atomic.AddInt64(&puts, 1)
default:
panic(fmt.Errorf("programming error: %d", op))
}
if csvw != nil {
stop := time.Now()
elapsed := stop.Sub(start).Nanoseconds()
csvlk.Lock()
csvw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
methods[op],
opContainer + "/" + opObject,
resp.Header.Get("X-Trans-Id"),
fmt.Sprintf("%d", resp.StatusCode),
fmt.Sprintf("%d", elapsed),
})
csvlk.Unlock()
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "%s %s/%s - %d %s - %s\n", methods[op], opContainer, opObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
fatalf("%s %s/%s - %d %s - %s\n", methods[op], opContainer, opObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
}
wg.Done()
}()
}
if containers == 1 {
fmt.Printf("Bench-Mixed for %s, each object is %d bytes, into 1 container, at %d concurrency...", timespan, size, concurrency)
} else {
fmt.Printf("Bench-Mixed for %s, each object is %d bytes, distributed across %d containers, at %d concurrency...", timespan, size, containers, concurrency)
}
timespanTicker := time.NewTicker(timespan)
updateTicker := time.NewTicker(time.Minute)
start := time.Now()
var lastDeletes int64
var lastGets int64
var lastHeads int64
var lastPosts int64
var lastPuts int64
var sentDeletes int
var sentPuts int
requestLoop:
for x := 0; ; x++ {
var i int
op := opOrder[x%len(opOrder)]
switch op {
case delet:
if atomic.LoadInt64(&puts) < 1000 {
continue
}
sentDeletes++
i = sentDeletes
case put:
sentPuts++
i = sentPuts
default:
rang := (int(atomic.LoadInt64(&puts)) - sentDeletes) / 2
if rang < 1000 {
continue
}
i = sentDeletes + rang/2 + (x % rang)
}
waiting := true
for waiting {
select {
case <-timespanTicker.C:
break requestLoop
case <-updateTicker.C:
now := time.Now()
elapsed := now.Sub(start)
snapshotDeletes := atomic.LoadInt64(&deletes)
snapshotGets := atomic.LoadInt64(&gets)
snapshotHeads := atomic.LoadInt64(&heads)
snapshotPosts := atomic.LoadInt64(&posts)
snapshotPuts := atomic.LoadInt64(&puts)
total := snapshotDeletes + snapshotGets + snapshotHeads + snapshotPosts + snapshotPuts
fmt.Printf("\n%.05fs for %d requests so far, %.05fs per request, or %.05f requests per second...", float64(elapsed)/float64(time.Second), total, float64(elapsed)/float64(time.Second)/float64(total), float64(total)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", now.UnixNano()),
"DELETE",
fmt.Sprintf("%d", snapshotDeletes-lastDeletes),
})
csvotw.Write([]string{
fmt.Sprintf("%d", now.UnixNano()),
"GET",
fmt.Sprintf("%d", snapshotGets-lastGets),
})
csvotw.Write([]string{
fmt.Sprintf("%d", now.UnixNano()),
"HEAD",
fmt.Sprintf("%d", snapshotHeads-lastHeads),
})
csvotw.Write([]string{
fmt.Sprintf("%d", now.UnixNano()),
"POST",
fmt.Sprintf("%d", snapshotPosts-lastPosts),
})
csvotw.Write([]string{
fmt.Sprintf("%d", now.UnixNano()),
"PUT",
fmt.Sprintf("%d", snapshotPuts-lastPuts),
})
lastDeletes = snapshotDeletes
lastGets = snapshotGets
lastHeads = snapshotHeads
lastPosts = snapshotPosts
lastPuts = snapshotPuts
}
case benchChan <- i<<4 | op:
waiting = false
}
}
}
close(benchChan)
wg.Wait()
stop := time.Now()
elapsed := stop.Sub(start)
timespanTicker.Stop()
updateTicker.Stop()
fmt.Println()
total := deletes + gets + heads + posts + puts
fmt.Printf("%.05fs for %d requests, %.05fs per request, or %.05f requests per second.\n", float64(elapsed)/float64(time.Second), total, float64(elapsed)/float64(time.Second)/float64(total), float64(total)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
"DELETE",
fmt.Sprintf("%d", deletes-lastDeletes),
})
csvotw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
"GET",
fmt.Sprintf("%d", gets-lastGets),
})
csvotw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
"HEAD",
fmt.Sprintf("%d", heads-lastHeads),
})
csvotw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
"POST",
fmt.Sprintf("%d", posts-lastPosts),
})
csvotw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
"PUT",
fmt.Sprintf("%d", puts-lastPuts),
})
}
}
func benchPost(c nectar.Client, args []string) {
if err := benchPostFlags.Parse(args); err != nil {
fatal(err)
}
container, object := parsePath(benchPostFlags.Args())
if container == "" {
fatalf("bench-post requires <container>\n")
}
if object == "" {
object = "bench-"
}
containers := *benchPostFlagContainers
if containers < 1 {
containers = 1
}
count := *benchPostFlagCount
if count < 1 {
count = 1000
}
var csvw *csv.Writer
var csvlk sync.Mutex
if *benchPostFlagCSV != "" {
csvf, err := os.Create(*benchPostFlagCSV)
if err != nil {
fatal(err)
}
csvw = csv.NewWriter(csvf)
defer func() {
csvw.Flush()
csvf.Close()
}()
csvw.Write([]string{"completion_time_unix_nano", "object_name", "transaction_id", "status", "elapsed_nanoseconds"})
}
var csvotw *csv.Writer
if *benchPostFlagCSVOT != "" {
csvotf, err := os.Create(*benchPostFlagCSVOT)
if err != nil {
fatal(err)
}
csvotw = csv.NewWriter(csvotf)
defer func() {
csvotw.Flush()
csvotf.Close()
}()
csvotw.Write([]string{"time_unix_nano", "count_since_last_time"})
csvotw.Write([]string{fmt.Sprintf("%d", time.Now().UnixNano()), "0"})
}
concurrency := *globalFlagConcurrency
if concurrency < 1 {
concurrency = 1
}
benchChan := make(chan int, concurrency)
wg := sync.WaitGroup{}
wg.Add(concurrency)
for x := 0; x < concurrency; x++ {
go func() {
var start time.Time
for {
i := <-benchChan
if i == 0 {
break
}
i--
postContainer := container
if containers > 1 {
postContainer = fmt.Sprintf("%s%d", postContainer, i%containers)
}
postObject := fmt.Sprintf("%s%d", object, i)
verbosef("POST %s/%s\n", postContainer, postObject)
if csvw != nil {
start = time.Now()
}
resp := c.PostObject(postContainer, postObject, globalFlagHeaders.Headers())
if csvw != nil {
stop := time.Now()
elapsed := stop.Sub(start).Nanoseconds()
csvlk.Lock()
csvw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
postContainer + "/" + postObject,
resp.Header.Get("X-Trans-Id"),
fmt.Sprintf("%d", resp.StatusCode),
fmt.Sprintf("%d", elapsed),
})
csvlk.Unlock()
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "POST %s/%s - %d %s - %s\n", postContainer, postObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
fatalf("POST %s/%s - %d %s - %s\n", postContainer, postObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
}
wg.Done()
}()
}
if containers == 1 {
fmt.Printf("Bench-POST of %d objects in 1 container, at %d concurrency...", count, concurrency)
} else {
fmt.Printf("Bench-POST of %d objects, distributed across %d containers, at %d concurrency...", count, containers, concurrency)
}
ticker := time.NewTicker(time.Minute)
start := time.Now()
lastSoFar := 0
for i := 1; i <= count; i++ {
waiting := true
for waiting {
select {
case <-ticker.C:
soFar := i - concurrency
now := time.Now()
elapsed := now.Sub(start)
fmt.Printf("\n%.05fs for %d POSTs so far, %.05fs per POST, or %.05f POSTs per second...", float64(elapsed)/float64(time.Second), soFar, float64(elapsed)/float64(time.Second)/float64(soFar), float64(soFar)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", now.UnixNano()),
fmt.Sprintf("%d", soFar-lastSoFar),
})
lastSoFar = soFar
}
case benchChan <- i:
waiting = false
}
}
}
close(benchChan)
wg.Wait()
stop := time.Now()
elapsed := stop.Sub(start)
ticker.Stop()
fmt.Println()
fmt.Printf("%.05fs total time, %.05fs per POST, or %.05f POSTs per second.\n", float64(elapsed)/float64(time.Second), float64(elapsed)/float64(time.Second)/float64(count), float64(count)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
fmt.Sprintf("%d", count-lastSoFar),
})
}
}
func benchPut(c nectar.Client, args []string) {
if err := benchPutFlags.Parse(args); err != nil {
fatal(err)
}
container, object := parsePath(benchPutFlags.Args())
if container == "" {
fatalf("bench-put requires <container>\n")
}
if object == "" {
object = "bench-"
}
containers := *benchPutFlagContainers
if containers < 1 {
containers = 1
}
count := *benchPutFlagCount
if count < 1 {
count = 1000
}
size := int64(*benchPutFlagSize)
if size < 0 {
size = 4096
}
var csvw *csv.Writer
var csvlk sync.Mutex
if *benchPutFlagCSV != "" {
csvf, err := os.Create(*benchPutFlagCSV)
if err != nil {
fatal(err)
}
csvw = csv.NewWriter(csvf)
defer func() {
csvw.Flush()
csvf.Close()
}()
csvw.Write([]string{"completion_time_unix_nano", "object_name", "transaction_id", "status", "elapsed_nanoseconds"})
}
var csvotw *csv.Writer
if *benchPutFlagCSVOT != "" {
csvotf, err := os.Create(*benchPutFlagCSVOT)
if err != nil {
fatal(err)
}
csvotw = csv.NewWriter(csvotf)
defer func() {
csvotw.Flush()
csvotf.Close()
}()
csvotw.Write([]string{"time_unix_nano", "count_since_last_time"})
csvotw.Write([]string{fmt.Sprintf("%d", time.Now().UnixNano()), "0"})
}
if containers == 1 {
fmt.Printf("Ensuring container exists...")
verbosef("PUT %s\n", container)
resp := c.PutContainer(container, globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "PUT %s - %d %s - %s\n", container, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
} else {
fatalf("PUT %s - %d %s - %s\n", container, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
} else {
fmt.Printf("Ensuring %d containers exist...", containers)
for x := 0; x < containers; x++ {
putContainer := fmt.Sprintf("%s%d", container, x)
verbosef("PUT %s\n", putContainer)
resp := c.PutContainer(putContainer, globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "PUT %s - %d %s - %s\n", putContainer, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
fatalf("PUT %s - %d %s - %s\n", putContainer, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
}
}
fmt.Println()
concurrency := *globalFlagConcurrency
if concurrency < 1 {
concurrency = 1
}
benchChan := make(chan int, concurrency)
wg := sync.WaitGroup{}
wg.Add(concurrency)
for x := 0; x < concurrency; x++ {
go func() {
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
var start time.Time
for {
i := <-benchChan
if i == 0 {
break
}
i--
putContainer := container
if containers > 1 {
putContainer = fmt.Sprintf("%s%d", putContainer, i%containers)
}
putObject := fmt.Sprintf("%s%d", object, i)
verbosef("PUT %s/%s\n", putContainer, putObject)
if csvw != nil {
start = time.Now()
}
resp := c.PutObject(putContainer, putObject, globalFlagHeaders.Headers(), &io.LimitedReader{R: rnd, N: size})
if csvw != nil {
stop := time.Now()
elapsed := stop.Sub(start).Nanoseconds()
csvlk.Lock()
csvw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
putContainer + "/" + putObject,
resp.Header.Get("X-Trans-Id"),
fmt.Sprintf("%d", resp.StatusCode),
fmt.Sprintf("%d", elapsed),
})
csvlk.Unlock()
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "PUT %s/%s - %d %s - %s\n", putContainer, putObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
fatalf("PUT %s/%s - %d %s - %s\n", putContainer, putObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
}
wg.Done()
}()
}
if containers == 1 {
fmt.Printf("Bench-PUT of %d objects, each %d bytes, into 1 container, at %d concurrency...", count, size, concurrency)
} else {
fmt.Printf("Bench-PUT of %d objects, each %d bytes, distributed across %d containers, at %d concurrency...", count, size, containers, concurrency)
}
ticker := time.NewTicker(time.Minute)
start := time.Now()
lastSoFar := 0
for i := 1; i <= count; i++ {
waiting := true
for waiting {
select {
case <-ticker.C:
soFar := i - concurrency
now := time.Now()
elapsed := now.Sub(start)
fmt.Printf("\n%.05fs for %d PUTs so far, %.05fs per PUT, or %.05f PUTs per second...", float64(elapsed)/float64(time.Second), soFar, float64(elapsed)/float64(time.Second)/float64(soFar), float64(soFar)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", now.UnixNano()),
fmt.Sprintf("%d", soFar-lastSoFar),
})
lastSoFar = soFar
}
case benchChan <- i:
waiting = false
}
}
}
close(benchChan)
wg.Wait()
stop := time.Now()
elapsed := stop.Sub(start)
ticker.Stop()
fmt.Println()
fmt.Printf("%.05fs total time, %.05fs per PUT, or %.05f PUTs per second.\n", float64(elapsed)/float64(time.Second), float64(elapsed)/float64(time.Second)/float64(count), float64(count)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
fmt.Sprintf("%d", count-lastSoFar),
})
}
}
func delet(c nectar.Client, args []string) {
container, object := parsePath(args)
var resp *http.Response
if object != "" {
resp = c.DeleteObject(container, object, globalFlagHeaders.Headers())
} else if container != "" {
resp = c.DeleteContainer(container, globalFlagHeaders.Headers())
} else {
resp = c.DeleteAccount(globalFlagHeaders.Headers())
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fatalf("%d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
resp.Body.Close()
}
func get(c nectar.Client, args []string) {
if err := getFlags.Parse(args); err != nil {
fatal(err)
}
container, object := parsePath(getFlags.Args())
if *getFlagRaw || object != "" {
var resp *http.Response
if object != "" {
resp = c.GetObject(container, object, globalFlagHeaders.Headers())
} else if container != "" {
resp = c.GetContainerRaw(container, *getFlagMarker, *getFlagEndMarker, *getFlagLimit, *getFlagPrefix, *getFlagDelimiter, *getFlagReverse, globalFlagHeaders.Headers())
} else {
resp = c.GetAccountRaw(*getFlagMarker, *getFlagEndMarker, *getFlagLimit, *getFlagPrefix, *getFlagDelimiter, *getFlagReverse, globalFlagHeaders.Headers())
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fatalf("%d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
if *getFlagRaw || object == "" {
data := [][]string{}
ks := []string{}
for k := range resp.Header {
ks = append(ks, k)
}
sort.Strings(ks)
for _, k := range ks {
for _, v := range resp.Header[k] {
data = append(data, []string{k + ":", v})
}
}
fmt.Println(resp.StatusCode, http.StatusText(resp.StatusCode))
opts := brimtext.NewDefaultAlignOptions()
fmt.Print(brimtext.Align(data, opts))
}
if _, err := io.Copy(os.Stdout, resp.Body); err != nil {
fatal(err)
}
return
}
if container != "" {
entries, resp := c.GetContainer(container, *getFlagMarker, *getFlagEndMarker, *getFlagLimit, *getFlagPrefix, *getFlagDelimiter, *getFlagReverse, globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fatalf("%d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
if *getFlagNameOnly {
for _, entry := range entries {
if entry.Subdir != "" {
fmt.Println(entry.Subdir)
} else {
fmt.Println(entry.Name)
}
}
} else {
var data [][]string
data = [][]string{{"Name", "Bytes", "Content Type", "Last Modified", "Hash"}}
for _, entry := range entries {
if entry.Subdir != "" {
data = append(data, []string{entry.Subdir, "", "", "", ""})
} else {
data = append(data, []string{entry.Name, fmt.Sprintf("%d", entry.Bytes), entry.ContentType, entry.LastModified, entry.Hash})
}
}
fmt.Print(brimtext.Align(data, nil))
}
return
}
entries, resp := c.GetAccount(*getFlagMarker, *getFlagEndMarker, *getFlagLimit, *getFlagPrefix, *getFlagDelimiter, *getFlagReverse, globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fatalf("%d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
if *getFlagNameOnly {
for _, entry := range entries {
fmt.Println(entry.Name)
}
} else {
var data [][]string
data = [][]string{{"Name", "Count", "Bytes"}}
for _, entry := range entries {
data = append(data, []string{entry.Name, fmt.Sprintf("%d", entry.Count), fmt.Sprintf("%d", entry.Bytes)})
}
fmt.Print(brimtext.Align(data, nil))
}
return
}
func head(c nectar.Client, args []string) {
if err := headFlags.Parse(args); err != nil {
fatal(err)
}
container, object := parsePath(headFlags.Args())
var resp *http.Response
if object != "" {
resp = c.HeadObject(container, object, globalFlagHeaders.Headers())
} else if container != "" {
resp = c.HeadContainer(container, globalFlagHeaders.Headers())
} else {
resp = c.HeadAccount(globalFlagHeaders.Headers())
}
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if resp.StatusCode/100 != 2 {
fatalf("%d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
data := [][]string{}
ks := []string{}
kls := map[string]string{}
for k := range resp.Header {
ks = append(ks, k)
kls[k] = k
}
sort.Strings(ks)
for _, k := range ks {
for _, v := range resp.Header[kls[k]] {
data = append(data, []string{k + ":", v})
}
}
fmt.Println(resp.StatusCode, http.StatusText(resp.StatusCode))
fmt.Print(brimtext.Align(data, brimtext.NewDefaultAlignOptions()))
}
func put(c nectar.Client, args []string) {
container, object := parsePath(args)
var resp *http.Response
if object != "" {
resp = c.PutObject(container, object, globalFlagHeaders.Headers(), os.Stdin)
} else if container != "" {
resp = c.PutContainer(container, globalFlagHeaders.Headers())
} else {
resp = c.PutAccount(globalFlagHeaders.Headers())
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fatalf("%d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
resp.Body.Close()
}
func post(c nectar.Client, args []string) {
container, object := parsePath(args)
var resp *http.Response
if object != "" {
resp = c.PostObject(container, object, globalFlagHeaders.Headers())
} else if container != "" {
resp = c.PostContainer(container, globalFlagHeaders.Headers())
} else {
resp = c.PostAccount(globalFlagHeaders.Headers())
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fatalf("%d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
resp.Body.Close()
}
func upload(c nectar.Client, args []string) {
if len(args) == 0 {
fatalf("<sourcepath> is required for upload.\n")
}
sourcepath := args[0]
container, object := parsePath(args[1:])
if container == "" {
abscwd, err := filepath.Abs(".")
if err != nil {
fatalf("Could not determine current working directory: %s\n", err)
}
container = filepath.Base(abscwd)
}
verbosef("Ensuring container %q exists.\n", container)
resp := c.PutContainer(container, globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fatalf("PUT %s - %d %s - %s\n", container, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
resp.Body.Close()
concurrency := *globalFlagConcurrency
if concurrency < 1 {
concurrency = 1
}
uploadChan := make(chan string, concurrency-1)
wg := sync.WaitGroup{}
wg.Add(concurrency)
for i := 0; i < concurrency; i++ {
go func() {
for {
path := <-uploadChan
if path == "" {
break
}
verbosef("Uploading %q to %q %q.\n", path, container, object+path)
f, err := os.Open(path)
if err != nil {
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "Cannot open %s while attempting to upload to %s/%s: %s\n", path, container, object+path, err)
continue
} else {
fatalf("Cannot open %s while attempting to upload to %s/%s: %s\n", path, container, object+path, err)
}
}
resp := c.PutObject(container, object+path, globalFlagHeaders.Headers(), f)
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
f.Close()
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "PUT %s/%s - %d %s - %s\n", container, object+path, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
fatalf("PUT %s/%s - %d %s - %s\n", container, object+path, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
f.Close()
}
wg.Done()
}()
}
fi, err := os.Stat(sourcepath)
if err != nil {
fatalf("Could not stat %s: %s\n", sourcepath, err)
}
// This "if" is so a single file upload that happens to be a symlink will work.
if fi.Mode().IsRegular() {
uploadChan <- sourcepath
} else {
// This "if" is to handle when the user-given path is a symlink to a directory; we normally want to skip symlinks, but not in this initial case.
if !strings.HasSuffix(sourcepath, string(os.PathSeparator)) {
sourcepath += string(os.PathSeparator)
}
filepath.Walk(sourcepath, func(path string, info os.FileInfo, err error) error {
if err != nil || !info.Mode().IsRegular() {
return nil
}
uploadChan <- path
return nil
})
}
close(uploadChan)
wg.Wait()
}
func download(c nectar.Client, args []string) {
if err := downloadFlags.Parse(args); err != nil {
fatal(err)
}
args = downloadFlags.Args()
if len(args) == 0 {
fatalf("<destpath> is required for download.\n")
}
destpath := args[len(args)-1]
container, object := parsePath(args[:len(args)-1])
concurrency := *globalFlagConcurrency
// Need at least 2 to queue object downloads while reading a container listing.
if concurrency < 2 {
concurrency = 2
}
type downloadTask struct {
container string
object string
destpath string
}
downloadChan := make(chan *downloadTask, concurrency-1)
var dirExistsLock sync.Mutex
dirExists := map[string]bool{}
taskWG := sync.WaitGroup{}
taskWG.Add(concurrency)
containerWG := sync.WaitGroup{}
for i := 0; i < concurrency; i++ {
go func() {
for {
task := <-downloadChan
if task == nil {
break
}
if task.object == "" {
entries, resp := c.GetContainer(task.container, "", "", 0, "", "", false, globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
containerWG.Done()
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "GET %s - %d %s - %s\n", task.container, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
fatalf("GET %s - %d %s - %s\n", task.container, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
for _, entry := range entries {
if entry.Name != "" {
downloadChan <- &downloadTask{container: task.container, object: entry.Name, destpath: filepath.Join(task.destpath, filepath.FromSlash(entry.Name))}
}
}
containerWG.Done()
continue
}
verbosef("Downloading %s/%s to %s.\n", task.container, task.object, task.destpath)
if dstdr := filepath.Dir(task.destpath); dstdr != "." {
dirExistsLock.Lock()
if !dirExists[dstdr] {
if err := os.MkdirAll(dstdr, 0755); err != nil {
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "Could not make directory path %s: %s\n", dstdr, err)
} else {
fatalf("Could not make directory path %s: %s\n", dstdr, err)
}
}
dirExists[dstdr] = true
}
dirExistsLock.Unlock()
}
f, err := os.Create(task.destpath)
if err != nil {
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "Could not create %s: %s\n", task.destpath, err)
continue
} else {
fatalf("Could not create %s: %s\n", task.destpath, err)
}
}
resp := c.GetObject(task.container, task.object, globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
f.Close()
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "GET %s/%s - %d %s - %s\n", task.container, task.object, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
fatalf("GET %s/%s - %d %s - %s\n", task.container, task.object, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
if _, err = io.Copy(f, resp.Body); err != nil {
resp.Body.Close()
f.Close()
if *globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "Could not complete content transfer from %s/%s to %s: %s\n", task.container, task.object, task.destpath, err)
continue
} else {
fatalf("Could not complete content transfer from %s/%s to %s: %s\n", task.container, task.object, task.destpath, err)
}
}
resp.Body.Close()
f.Close()
}
taskWG.Done()
}()
}
if object != "" {
fi, err := os.Stat(destpath)
if err != nil {
if !os.IsNotExist(err) {
fatalf("Could not stat %s: %s\n", destpath, err)
}
} else if fi.IsDir() {
destpath = filepath.Join(destpath, object)
}
downloadChan <- &downloadTask{container: container, object: object, destpath: destpath}
} else if container != "" {
fi, err := os.Stat(destpath)
if err != nil {
if !os.IsNotExist(err) {
fatalf("Could not stat %s: %s\n", destpath, err)
}
} else if !fi.IsDir() {
fatalf("Cannot download a container to a single file: %s\n", destpath)
}
containerWG.Add(1)
downloadChan <- &downloadTask{container: container, object: "", destpath: destpath}
} else if !*downloadFlagAccount {
fatalf("You must specify -a if you wish to download the entire account.\n")
} else {
fi, err := os.Stat(destpath)
if err != nil {
if !os.IsNotExist(err) {
fatalf("Could not stat %s: %s\n", destpath, err)
}
} else if !fi.IsDir() {
fatalf("Cannot download an account to a single file: %s\n", destpath)
}
entries, resp := c.GetAccount("", "", 0, "", "", false, globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fatalf("GET - %d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
resp.Body.Close()
for _, entry := range entries {
if entry.Name != "" {
containerWG.Add(1)
downloadChan <- &downloadTask{container: entry.Name, object: "", destpath: filepath.Join(destpath, entry.Name)}
}
}
}
containerWG.Wait()
close(downloadChan)
taskWG.Wait()
}
func parsePath(args []string) (string, string) {
if len(args) == 0 {
return "", ""
}
path := ""
for _, arg := range args {
if path == "" {
path = arg
continue
}
if strings.HasSuffix(path, "/") {
path += arg
} else {
path += "/" + arg
}
}
parts := strings.SplitN(path, "/", 2)
if len(parts) == 1 {
return parts[0], ""
}
return parts[0], parts[1]
}
type stringListFlag []string
func (slf *stringListFlag) Set(value string) error {
*slf = append(*slf, value)
return nil
}
func (slf *stringListFlag) String() string {
return strings.Join(*slf, " ")
}
func (slf *stringListFlag) Headers() map[string]string {
headers := map[string]string{}
for _, parameter := range *slf {
splitParameters := strings.SplitN(parameter, ":", 2)
if len(splitParameters) == 2 {
headers[strings.TrimSpace(splitParameters[0])] = strings.TrimSpace(splitParameters[1])
} else {
headers[strings.TrimSpace(splitParameters[0])] = ""
}
}
return headers
}
|
package nessie
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
)
func TestDoRequest(t *testing.T) {
// Test structure to be serialized.
type payload struct {
A int `json:"a"`
}
authToken := "some token"
var tests = []struct {
method string
resource string
sentPayload payload
wantPayload string
serverStatus int
wantStatus []int
wantError bool
}{
// All succeeding methods.
{"GET", "/test", payload{}, "{\"a\":0}", http.StatusOK, []int{http.StatusOK}, false},
{"POST", "/test", payload{}, "{\"a\":0}", http.StatusOK, []int{http.StatusOK}, false},
{"DELETE", "/test", payload{}, "{\"a\":0}", http.StatusOK, []int{http.StatusOK}, false},
{"PUT", "/test", payload{}, "{\"a\":0}", http.StatusOK, []int{http.StatusOK}, false},
// Payload test.
{"GET", "/test", payload{42}, "{\"a\":42}", http.StatusOK, []int{http.StatusOK}, false},
// Expected failure.
{"POST", "/test", payload{}, "{\"a\":0}", http.StatusInternalServerError, []int{http.StatusInternalServerError}, false},
// Unexpected failure
{"POST", "/test", payload{}, "{\"a\":0}", http.StatusInternalServerError, []int{http.StatusOK}, true},
}
for _, tt := range tests {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(tt.serverStatus)
if r.Header.Get("X-Cookie") != fmt.Sprintf("token=%s", authToken) {
t.Errorf("invalid auth header, got=%s, want=%s", r.Header.Get("X-Cookie"), authToken)
}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Errorf("could not read request body: %v", err)
return
}
bodyStr := string(body)
if bodyStr != tt.wantPayload {
t.Errorf("unexpected payload, got=%s, want=%s", body, tt.wantPayload)
}
}))
n, err := NewInsecureNessus(ts.URL)
n.Verbose = true
if err != nil {
t.Errorf("could not create nessie instance: %v (%+v)", err, tt)
continue
}
// Increase covered lines.
n.authCookie = authToken
resp, err := n.doRequest(tt.method, tt.resource, tt.sentPayload, tt.wantStatus)
if tt.wantError {
if err == nil {
t.Errorf("got no error, expected one (%+v)", tt)
}
continue
}
if err != nil {
t.Errorf("error in doRequest: %v (%+v)", err, tt)
continue
}
if resp.StatusCode != tt.serverStatus {
t.Errorf("got status code=%d, wanted=%d", resp.StatusCode, tt.serverStatus)
}
}
}
func TestLogin(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
w.Header().Set("Content-Type", "application/json")
j, err := json.Marshal(&loginResp{Token: "some token"})
if err != nil {
t.Fatalf("cannot serialize login response: %v", err)
}
w.Write(j)
}))
defer server.Close()
n, err := NewInsecureNessus(server.URL)
if err != nil {
t.Fatalf("cannot create nessus instance: %v", err)
}
if err := n.Login("username", "password"); err != nil {
t.Fatalf("got error during login: %v", err)
}
if got, want := n.authCookie, "some token"; got != want {
t.Fatalf("wrong auth cookie, got=%q, want=%q", got, want)
}
}
func TestMethods(t *testing.T) {
var tests = []struct {
resp interface{}
statusCode int
call func(n *Nessus)
}{
{&Session{}, http.StatusOK, func(n *Nessus) { n.Session() }},
{&ServerProperties{}, http.StatusOK, func(n *Nessus) { n.ServerProperties() }},
}
for _, tt := range tests {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(tt.statusCode)
j, err := json.Marshal(tt.resp)
if err != nil {
t.Fatalf("cannot serialize response: %v", err)
}
w.Write(j)
}))
defer server.Close()
n, err := NewInsecureNessus(server.URL)
if err != nil {
t.Fatalf("cannot create nessus instance: %v", err)
}
tt.call(n)
}
}
first batch of added tests
package nessie
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
)
func TestDoRequest(t *testing.T) {
// Test structure to be serialized.
type payload struct {
A int `json:"a"`
}
authToken := "some token"
var tests = []struct {
method string
resource string
sentPayload payload
wantPayload string
serverStatus int
wantStatus []int
wantError bool
}{
// All succeeding methods.
{"GET", "/test", payload{}, "{\"a\":0}", http.StatusOK, []int{http.StatusOK}, false},
{"POST", "/test", payload{}, "{\"a\":0}", http.StatusOK, []int{http.StatusOK}, false},
{"DELETE", "/test", payload{}, "{\"a\":0}", http.StatusOK, []int{http.StatusOK}, false},
{"PUT", "/test", payload{}, "{\"a\":0}", http.StatusOK, []int{http.StatusOK}, false},
// Payload test.
{"GET", "/test", payload{42}, "{\"a\":42}", http.StatusOK, []int{http.StatusOK}, false},
// Expected failure.
{"POST", "/test", payload{}, "{\"a\":0}", http.StatusInternalServerError, []int{http.StatusInternalServerError}, false},
// Unexpected failure
{"POST", "/test", payload{}, "{\"a\":0}", http.StatusInternalServerError, []int{http.StatusOK}, true},
}
for _, tt := range tests {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(tt.serverStatus)
if r.Header.Get("X-Cookie") != fmt.Sprintf("token=%s", authToken) {
t.Errorf("invalid auth header, got=%s, want=%s", r.Header.Get("X-Cookie"), authToken)
}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Errorf("could not read request body: %v", err)
return
}
bodyStr := string(body)
if bodyStr != tt.wantPayload {
t.Errorf("unexpected payload, got=%s, want=%s", body, tt.wantPayload)
}
}))
n, err := NewInsecureNessus(ts.URL)
n.Verbose = true
if err != nil {
t.Errorf("could not create nessie instance: %v (%+v)", err, tt)
continue
}
// Increase covered lines.
n.authCookie = authToken
resp, err := n.doRequest(tt.method, tt.resource, tt.sentPayload, tt.wantStatus)
if tt.wantError {
if err == nil {
t.Errorf("got no error, expected one (%+v)", tt)
}
continue
}
if err != nil {
t.Errorf("error in doRequest: %v (%+v)", err, tt)
continue
}
if resp.StatusCode != tt.serverStatus {
t.Errorf("got status code=%d, wanted=%d", resp.StatusCode, tt.serverStatus)
}
}
}
func TestLogin(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
w.Header().Set("Content-Type", "application/json")
j, err := json.Marshal(&loginResp{Token: "some token"})
if err != nil {
t.Fatalf("cannot serialize login response: %v", err)
}
w.Write(j)
}))
defer server.Close()
n, err := NewInsecureNessus(server.URL)
if err != nil {
t.Fatalf("cannot create nessus instance: %v", err)
}
if err := n.Login("username", "password"); err != nil {
t.Fatalf("got error during login: %v", err)
}
if got, want := n.authCookie, "some token"; got != want {
t.Fatalf("wrong auth cookie, got=%q, want=%q", got, want)
}
}
func TestMethods(t *testing.T) {
var tests = []struct {
resp interface{}
statusCode int
call func(n *Nessus)
}{
{&Session{}, http.StatusOK, func(n *Nessus) { n.Session() }},
{&ServerProperties{}, http.StatusOK, func(n *Nessus) { n.ServerProperties() }},
{&ServerStatus{}, http.StatusOK, func(n *Nessus) { n.ServerStatus() }},
{&User{}, http.StatusOK, func(n *Nessus) {
n.CreateUser("username", "pass", UserTypeLocal, Permissions32, "name", "email@foo.com")
}},
{&listUsersResp{}, http.StatusOK, func(n *Nessus) { n.ListUsers() }},
{nil, http.StatusOK, func(n *Nessus) { n.DeleteUser(42) }},
{nil, http.StatusOK, func(n *Nessus) { n.SetUserPassword(42, "newpass") }},
{&User{}, http.StatusOK, func(n *Nessus) {
n.EditUser(42, Permissions128, "newname", "newmain@goo.fom")
}},
}
for _, tt := range tests {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(tt.statusCode)
if tt.resp != nil {
j, err := json.Marshal(tt.resp)
if err != nil {
t.Fatalf("cannot serialize response: %v", err)
}
w.Write(j)
}
}))
defer server.Close()
n, err := NewInsecureNessus(server.URL)
if err != nil {
t.Fatalf("cannot create nessus instance: %v", err)
}
n.Verbose = true
tt.call(n)
}
}
|
package bus
import (
"net"
"log"
"golem-server/net/bus/containers"
"strconv"
)
const (
busAddress = "/tmp/golem_bus.sock"
)
var hostPool []containers.Host
/*
EventBusListener Will be the backend bus which messages will be passed over
Using a unix socket to make it more efficient. Does
not have any TCP overhead.
*/
func EventBusListener() {
log.Println("Starting Listener")
l, err := net.Listen("unix", busAddress)
if err != nil {
log.Fatal("listen error:", err)
panic(err)
}
for {
fd, err := l.Accept()
if err != nil {
log.Fatal("accept error:", err)
//panic(err)
}
go processConnection(fd)
}
log.Println("Exiting Listener")
}
/*
This opens the communication CommSocketListener
peons will connect to and use this socket
//TODO: Connect the socket bus here with the control bus
*/
func SocketListener(ip string, port int, role string) {
log.Println("Listening for Comm on socket: " + strconv.Itoa(port))
if ip == "" {
ip = "localhost"
}
l, err := net.Listen("tcp", ip + ":" + strconv.Itoa(port))
if err != nil {
log.Fatal(err)
panic(err)
}
for {
fd, err := l.Accept()
if err != nil {
log.Fatal(err)
//panic(err)
}
go processConnection(fd)
}
}
func processConnection(c net.Conn) {
log.Println("Processing Connection from: ", c.RemoteAddr().String())
hostChannel := make (chan string)
host := containers.Host{c, c.RemoteAddr(), "test", 10000, hostChannel}
hostPool = append(hostPool, host)
log.Println("Finished Processing Connection")
}
/*
FindClients this function will search the structs of associated peons
and return a slice of all the matching peons.
*/
func FindClients(pattern string) ([]containers.Host, error) {
if hostPool != nil {
for _, value := range hostPool {
name := value.HostName
log.Println("Hostname of Search: " + name)
}
}
return nil, nil
}
/*
EventBusClient Will push commands onto the event bus.
func EventBusClient(command Command) {
}*/
Updated comments for bus to explain the purpse of it.
package bus
/*
This package is intended to start the socket connections that
the system uses. It also will bind the clients and group the necessary connections
together. This means that when a client connects it will be connected to the control
bus chan, will be given a message chan, and a chan to send file data through. Each of these
chans will be connected to a socket.
*/
import (
"net"
"log"
"golem-server/net/bus/containers"
"strconv"
)
const (
busAddress = "/tmp/golem_bus.sock"
)
var hostPool []containers.Host
/*
EventBusListener Will be the backend bus which messages will be passed over
Using a unix socket to make it more efficient. Does
not have any TCP overhead.
*/
func EventBusListener() {
log.Println("Starting Listener")
l, err := net.Listen("unix", busAddress)
if err != nil {
log.Fatal("listen error:", err)
panic(err)
}
for {
fd, err := l.Accept()
if err != nil {
log.Fatal("accept error:", err)
//panic(err)
}
go processConnection(fd)
}
log.Println("Exiting Listener")
}
/*
This opens the communication CommSocketListener
peons will connect to and use this socket
//TODO: Connect the socket bus here with the control bus
*/
func SocketListener(ip string, port int, role string) {
log.Println("Listening for Comm on socket: " + strconv.Itoa(port))
if ip == "" {
ip = "localhost"
}
l, err := net.Listen("tcp", ip + ":" + strconv.Itoa(port))
if err != nil {
log.Fatal(err)
panic(err)
}
for {
fd, err := l.Accept()
if err != nil {
log.Fatal(err)
//panic(err)
}
go processConnection(fd)
}
}
func processConnection(c net.Conn) {
log.Println("Processing Connection from: ", c.RemoteAddr().String())
hostChannel := make (chan string)
host := containers.Host{c, c.RemoteAddr(), "test", 10000, hostChannel}
hostPool = append(hostPool, host)
log.Println("Finished Processing Connection")
}
/*
FindClients this function will search the structs of associated peons
and return a slice of all the matching peons.
*/
func FindClients(pattern string) ([]containers.Host, error) {
if hostPool != nil {
for _, value := range hostPool {
name := value.HostName
log.Println("Hostname of Search: " + name)
}
}
return nil, nil
}
/*
EventBusClient Will push commands onto the event bus.
func EventBusClient(command Command) {
}*/
|
// Copyright 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package maas
import (
"encoding/base64"
"fmt"
"net/url"
"strings"
"sync"
"time"
"launchpad.net/gomaasapi"
"launchpad.net/juju-core/agent"
"launchpad.net/juju-core/cloudinit"
"launchpad.net/juju-core/constraints"
"launchpad.net/juju-core/environs"
"launchpad.net/juju-core/environs/config"
"launchpad.net/juju-core/environs/imagemetadata"
"launchpad.net/juju-core/environs/simplestreams"
"launchpad.net/juju-core/environs/storage"
envtools "launchpad.net/juju-core/environs/tools"
"launchpad.net/juju-core/instance"
"launchpad.net/juju-core/provider/common"
"launchpad.net/juju-core/state"
"launchpad.net/juju-core/state/api"
"launchpad.net/juju-core/tools"
"launchpad.net/juju-core/utils"
)
const (
// We're using v1.0 of the MAAS API.
apiVersion = "1.0"
)
// A request may fail to due "eventual consistency" semantics, which
// should resolve fairly quickly. A request may also fail due to a slow
// state transition (for instance an instance taking a while to release
// a security group after termination). The former failure mode is
// dealt with by shortAttempt, the latter by LongAttempt.
var shortAttempt = utils.AttemptStrategy{
Total: 5 * time.Second,
Delay: 200 * time.Millisecond,
}
type maasEnviron struct {
name string
// archMutex gates access to supportedArchitectures
archMutex sync.Mutex
// supportedArchitectures caches the architectures
// for which images can be instantiated.
supportedArchitectures []string
// ecfgMutex protects the *Unlocked fields below.
ecfgMutex sync.Mutex
ecfgUnlocked *maasEnvironConfig
maasClientUnlocked *gomaasapi.MAASObject
storageUnlocked storage.Storage
}
var _ environs.Environ = (*maasEnviron)(nil)
var _ imagemetadata.SupportsCustomSources = (*maasEnviron)(nil)
var _ envtools.SupportsCustomSources = (*maasEnviron)(nil)
func NewEnviron(cfg *config.Config) (*maasEnviron, error) {
env := new(maasEnviron)
err := env.SetConfig(cfg)
if err != nil {
return nil, err
}
env.name = cfg.Name()
env.storageUnlocked = NewStorage(env)
return env, nil
}
// Name is specified in the Environ interface.
func (env *maasEnviron) Name() string {
return env.name
}
// Bootstrap is specified in the Environ interface.
func (env *maasEnviron) Bootstrap(ctx environs.BootstrapContext, cons constraints.Value) error {
return common.Bootstrap(ctx, env, cons)
}
// StateInfo is specified in the Environ interface.
func (env *maasEnviron) StateInfo() (*state.Info, *api.Info, error) {
return common.StateInfo(env)
}
// ecfg returns the environment's maasEnvironConfig, and protects it with a
// mutex.
func (env *maasEnviron) ecfg() *maasEnvironConfig {
env.ecfgMutex.Lock()
defer env.ecfgMutex.Unlock()
return env.ecfgUnlocked
}
// Config is specified in the Environ interface.
func (env *maasEnviron) Config() *config.Config {
return env.ecfg().Config
}
// SetConfig is specified in the Environ interface.
func (env *maasEnviron) SetConfig(cfg *config.Config) error {
env.ecfgMutex.Lock()
defer env.ecfgMutex.Unlock()
// The new config has already been validated by itself, but now we
// validate the transition from the old config to the new.
var oldCfg *config.Config
if env.ecfgUnlocked != nil {
oldCfg = env.ecfgUnlocked.Config
}
cfg, err := env.Provider().Validate(cfg, oldCfg)
if err != nil {
return err
}
ecfg, err := providerInstance.newConfig(cfg)
if err != nil {
return err
}
env.ecfgUnlocked = ecfg
authClient, err := gomaasapi.NewAuthenticatedClient(ecfg.maasServer(), ecfg.maasOAuth(), apiVersion)
if err != nil {
return err
}
env.maasClientUnlocked = gomaasapi.NewMAAS(*authClient)
return nil
}
// SupportedArchitectures is specified on the EnvironCapability interface.
func (env *maasEnviron) SupportedArchitectures() ([]string, error) {
env.archMutex.Lock()
defer env.archMutex.Unlock()
if env.supportedArchitectures != nil {
return env.supportedArchitectures, nil
}
// Create a filter to get all images from our region and for the correct stream.
imageConstraint := imagemetadata.NewImageConstraint(simplestreams.LookupParams{
Stream: env.Config().ImageStream(),
})
var err error
env.supportedArchitectures, err = common.SupportedArchitectures(env, imageConstraint)
return env.supportedArchitectures, err
}
// getMAASClient returns a MAAS client object to use for a request, in a
// lock-protected fashion.
func (env *maasEnviron) getMAASClient() *gomaasapi.MAASObject {
env.ecfgMutex.Lock()
defer env.ecfgMutex.Unlock()
return env.maasClientUnlocked
}
// convertConstraints converts the given constraints into an url.Values
// object suitable to pass to MAAS when acquiring a node.
// CpuPower is ignored because it cannot translated into something
// meaningful for MAAS right now.
func convertConstraints(cons constraints.Value) url.Values {
params := url.Values{}
if cons.Arch != nil {
params.Add("arch", *cons.Arch)
}
if cons.CpuCores != nil {
params.Add("cpu_count", fmt.Sprintf("%d", *cons.CpuCores))
}
if cons.Mem != nil {
params.Add("mem", fmt.Sprintf("%d", *cons.Mem))
}
if cons.Tags != nil && len(*cons.Tags) > 0 {
params.Add("tags", strings.Join(*cons.Tags, ","))
}
// TODO(bug 1212689): ignore root-disk constraint for now.
if cons.RootDisk != nil {
logger.Warningf("ignoring unsupported constraint 'root-disk'")
}
if cons.CpuPower != nil {
logger.Warningf("ignoring unsupported constraint 'cpu-power'")
}
return params
}
// addNetworks converts networks include/exclude information into
// url.Values object suitable to pass to MAAS when acquiring a node.
func addNetworks(params url.Values, nets environs.Networks) {
// Network Inclusion/Exclusion setup
if nets.IncludeNetworks != nil {
for _, network_name := range nets.IncludeNetworks {
params.Add("networks", network_name)
}
}
if nets.ExcludeNetworks != nil {
for _, not_network_name := range nets.ExcludeNetworks {
params.Add("not_networks", not_network_name)
}
}
}
// acquireNode allocates a node from the MAAS.
func (environ *maasEnviron) acquireNode(cons constraints.Value, nets environs.Networks, possibleTools tools.List) (gomaasapi.MAASObject, *tools.Tools, error) {
acquireParams := convertConstraints(cons)
addNetworks(acquireParams, nets)
acquireParams.Add("agent_name", environ.ecfg().maasAgentName())
var result gomaasapi.JSONObject
var err error
for a := shortAttempt.Start(); a.Next(); {
client := environ.getMAASClient().GetSubObject("nodes/")
result, err = client.CallPost("acquire", acquireParams)
if err == nil {
break
}
}
if err != nil {
return gomaasapi.MAASObject{}, nil, err
}
node, err := result.GetMAASObject()
if err != nil {
msg := fmt.Errorf("unexpected result from 'acquire' on MAAS API: %v", err)
return gomaasapi.MAASObject{}, nil, msg
}
tools := possibleTools[0]
logger.Warningf("picked arbitrary tools %q", tools)
return node, tools, nil
}
// startNode installs and boots a node.
func (environ *maasEnviron) startNode(node gomaasapi.MAASObject, series string, userdata []byte) error {
userDataParam := base64.StdEncoding.EncodeToString(userdata)
params := url.Values{
"distro_series": {series},
"user_data": {userDataParam},
}
// Initialize err to a non-nil value as a sentinel for the following
// loop.
err := fmt.Errorf("(no error)")
for a := shortAttempt.Start(); a.Next() && err != nil; {
_, err = node.CallPost("start", params)
}
return err
}
// createBridgeNetwork returns a string representing the upstart command to
// create a bridged eth0.
func createBridgeNetwork() string {
return `cat > /etc/network/eth0.config << EOF
iface eth0 inet manual
auto br0
iface br0 inet dhcp
bridge_ports eth0
EOF
`
}
// linkBridgeInInterfaces adds the file created by createBridgeNetwork to the
// interfaces file.
func linkBridgeInInterfaces() string {
return `sed -i "s/iface eth0 inet dhcp/source \/etc\/network\/eth0.config/" /etc/network/interfaces`
}
// StartInstance is specified in the InstanceBroker interface.
func (environ *maasEnviron) StartInstance(args environs.StartInstanceParams) (instance.Instance, *instance.HardwareCharacteristics, error) {
var inst *maasInstance
var err error
if node, tools, err := environ.acquireNode(args.Constraints, args.Networks, args.Tools); err != nil {
return nil, nil, fmt.Errorf("cannot run instances: %v", err)
} else {
inst = &maasInstance{maasObject: &node, environ: environ}
args.MachineConfig.Tools = tools
}
defer func() {
if err != nil {
if err := environ.releaseInstance(inst); err != nil {
logger.Errorf("error releasing failed instance: %v", err)
}
}
}()
hostname, err := inst.DNSName()
if err != nil {
return nil, nil, err
}
if err := environs.FinishMachineConfig(args.MachineConfig, environ.Config(), args.Constraints); err != nil {
return nil, nil, err
}
// TODO(thumper): 2013-08-28 bug 1217614
// The machine envronment config values are being moved to the agent config.
// Explicitly specify that the lxc containers use the network bridge defined above.
args.MachineConfig.AgentEnvironment[agent.LxcBridge] = "br0"
cloudcfg, err := newCloudinitConfig(hostname)
if err != nil {
return nil, nil, err
}
userdata, err := environs.ComposeUserData(args.MachineConfig, cloudcfg)
if err != nil {
msg := fmt.Errorf("could not compose userdata for bootstrap node: %v", err)
return nil, nil, msg
}
logger.Debugf("maas user data; %d bytes", len(userdata))
series := args.Tools.OneSeries()
if err := environ.startNode(*inst.maasObject, series, userdata); err != nil {
return nil, nil, err
}
logger.Debugf("started instance %q", inst.Id())
// TODO(bug 1193998) - return instance hardware characteristics as well
return inst, nil, nil
}
// newCloudinitConfig creates a cloudinit.Config structure
// suitable as a base for initialising a MAAS node.
func newCloudinitConfig(hostname string) (*cloudinit.Config, error) {
info := machineInfo{hostname}
runCmd, err := info.cloudinitRunCmd()
if err != nil {
return nil, err
}
cloudcfg := cloudinit.New()
cloudcfg.SetAptUpdate(true)
cloudcfg.AddPackage("bridge-utils")
cloudcfg.AddScripts(
"set -xe",
runCmd,
"ifdown eth0",
createBridgeNetwork(),
linkBridgeInInterfaces(),
"ifup br0",
)
return cloudcfg, nil
}
// StartInstance is specified in the InstanceBroker interface.
func (environ *maasEnviron) StopInstances(instances []instance.Instance) error {
// Shortcut to exit quickly if 'instances' is an empty slice or nil.
if len(instances) == 0 {
return nil
}
// Tell MAAS to release each of the instances. If there are errors,
// return only the first one (but release all instances regardless).
// Note that releasing instances also turns them off.
var firstErr error
for _, instance := range instances {
err := environ.releaseInstance(instance)
if firstErr == nil {
firstErr = err
}
}
return firstErr
}
// releaseInstance releases a single instance.
func (environ *maasEnviron) releaseInstance(inst instance.Instance) error {
maasInst := inst.(*maasInstance)
maasObj := maasInst.maasObject
_, err := maasObj.CallPost("release", nil)
if err != nil {
logger.Debugf("error releasing instance %v", maasInst)
}
return err
}
// instances calls the MAAS API to list nodes. The "ids" slice is a filter for
// specific instance IDs. Due to how this works in the HTTP API, an empty
// "ids" matches all instances (not none as you might expect).
func (environ *maasEnviron) instances(ids []instance.Id) ([]instance.Instance, error) {
nodeListing := environ.getMAASClient().GetSubObject("nodes")
filter := getSystemIdValues(ids)
filter.Add("agent_name", environ.ecfg().maasAgentName())
listNodeObjects, err := nodeListing.CallGet("list", filter)
if err != nil {
return nil, err
}
listNodes, err := listNodeObjects.GetArray()
if err != nil {
return nil, err
}
instances := make([]instance.Instance, len(listNodes))
for index, nodeObj := range listNodes {
node, err := nodeObj.GetMAASObject()
if err != nil {
return nil, err
}
instances[index] = &maasInstance{
maasObject: &node,
environ: environ,
}
}
return instances, nil
}
// Instances returns the instance.Instance objects corresponding to the given
// slice of instance.Id. The error is ErrNoInstances if no instances
// were found.
func (environ *maasEnviron) Instances(ids []instance.Id) ([]instance.Instance, error) {
if len(ids) == 0 {
// This would be treated as "return all instances" below, so
// treat it as a special case.
// The interface requires us to return this particular error
// if no instances were found.
return nil, environs.ErrNoInstances
}
instances, err := environ.instances(ids)
if err != nil {
return nil, err
}
if len(instances) == 0 {
return nil, environs.ErrNoInstances
}
idMap := make(map[instance.Id]instance.Instance)
for _, instance := range instances {
idMap[instance.Id()] = instance
}
result := make([]instance.Instance, len(ids))
for index, id := range ids {
result[index] = idMap[id]
}
if len(instances) < len(ids) {
return result, environs.ErrPartialInstances
}
return result, nil
}
// AllInstances returns all the instance.Instance in this provider.
func (environ *maasEnviron) AllInstances() ([]instance.Instance, error) {
return environ.instances(nil)
}
// Storage is defined by the Environ interface.
func (env *maasEnviron) Storage() storage.Storage {
env.ecfgMutex.Lock()
defer env.ecfgMutex.Unlock()
return env.storageUnlocked
}
func (environ *maasEnviron) Destroy() error {
return common.Destroy(environ)
}
// MAAS does not do firewalling so these port methods do nothing.
func (*maasEnviron) OpenPorts([]instance.Port) error {
logger.Debugf("unimplemented OpenPorts() called")
return nil
}
func (*maasEnviron) ClosePorts([]instance.Port) error {
logger.Debugf("unimplemented ClosePorts() called")
return nil
}
func (*maasEnviron) Ports() ([]instance.Port, error) {
logger.Debugf("unimplemented Ports() called")
return []instance.Port{}, nil
}
func (*maasEnviron) Provider() environs.EnvironProvider {
return &providerInstance
}
// GetImageSources returns a list of sources which are used to search for simplestreams image metadata.
func (e *maasEnviron) GetImageSources() ([]simplestreams.DataSource, error) {
// Add the simplestreams source off the control bucket.
return []simplestreams.DataSource{
storage.NewStorageSimpleStreamsDataSource("cloud storage", e.Storage(), storage.BaseImagesPath)}, nil
}
// GetToolsSources returns a list of sources which are used to search for simplestreams tools metadata.
func (e *maasEnviron) GetToolsSources() ([]simplestreams.DataSource, error) {
// Add the simplestreams source off the control bucket.
return []simplestreams.DataSource{
storage.NewStorageSimpleStreamsDataSource("cloud storage", e.Storage(), storage.BaseToolsPath)}, nil
}
Added method to fetch networks of a given node
// Copyright 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package maas
import (
"encoding/base64"
"fmt"
"net/url"
"strings"
"sync"
"time"
"launchpad.net/gomaasapi"
"launchpad.net/juju-core/agent"
"launchpad.net/juju-core/cloudinit"
"launchpad.net/juju-core/constraints"
"launchpad.net/juju-core/environs"
"launchpad.net/juju-core/environs/config"
"launchpad.net/juju-core/environs/imagemetadata"
"launchpad.net/juju-core/environs/simplestreams"
"launchpad.net/juju-core/environs/storage"
envtools "launchpad.net/juju-core/environs/tools"
"launchpad.net/juju-core/instance"
"launchpad.net/juju-core/provider/common"
"launchpad.net/juju-core/state"
"launchpad.net/juju-core/state/api"
"launchpad.net/juju-core/tools"
"launchpad.net/juju-core/utils"
)
const (
// We're using v1.0 of the MAAS API.
apiVersion = "1.0"
)
// A request may fail to due "eventual consistency" semantics, which
// should resolve fairly quickly. A request may also fail due to a slow
// state transition (for instance an instance taking a while to release
// a security group after termination). The former failure mode is
// dealt with by shortAttempt, the latter by LongAttempt.
var shortAttempt = utils.AttemptStrategy{
Total: 5 * time.Second,
Delay: 200 * time.Millisecond,
}
type maasEnviron struct {
name string
// archMutex gates access to supportedArchitectures
archMutex sync.Mutex
// supportedArchitectures caches the architectures
// for which images can be instantiated.
supportedArchitectures []string
// ecfgMutex protects the *Unlocked fields below.
ecfgMutex sync.Mutex
ecfgUnlocked *maasEnvironConfig
maasClientUnlocked *gomaasapi.MAASObject
storageUnlocked storage.Storage
}
var _ environs.Environ = (*maasEnviron)(nil)
var _ imagemetadata.SupportsCustomSources = (*maasEnviron)(nil)
var _ envtools.SupportsCustomSources = (*maasEnviron)(nil)
func NewEnviron(cfg *config.Config) (*maasEnviron, error) {
env := new(maasEnviron)
err := env.SetConfig(cfg)
if err != nil {
return nil, err
}
env.name = cfg.Name()
env.storageUnlocked = NewStorage(env)
return env, nil
}
// Name is specified in the Environ interface.
func (env *maasEnviron) Name() string {
return env.name
}
// Bootstrap is specified in the Environ interface.
func (env *maasEnviron) Bootstrap(ctx environs.BootstrapContext, cons constraints.Value) error {
return common.Bootstrap(ctx, env, cons)
}
// StateInfo is specified in the Environ interface.
func (env *maasEnviron) StateInfo() (*state.Info, *api.Info, error) {
return common.StateInfo(env)
}
// ecfg returns the environment's maasEnvironConfig, and protects it with a
// mutex.
func (env *maasEnviron) ecfg() *maasEnvironConfig {
env.ecfgMutex.Lock()
defer env.ecfgMutex.Unlock()
return env.ecfgUnlocked
}
// Config is specified in the Environ interface.
func (env *maasEnviron) Config() *config.Config {
return env.ecfg().Config
}
// SetConfig is specified in the Environ interface.
func (env *maasEnviron) SetConfig(cfg *config.Config) error {
env.ecfgMutex.Lock()
defer env.ecfgMutex.Unlock()
// The new config has already been validated by itself, but now we
// validate the transition from the old config to the new.
var oldCfg *config.Config
if env.ecfgUnlocked != nil {
oldCfg = env.ecfgUnlocked.Config
}
cfg, err := env.Provider().Validate(cfg, oldCfg)
if err != nil {
return err
}
ecfg, err := providerInstance.newConfig(cfg)
if err != nil {
return err
}
env.ecfgUnlocked = ecfg
authClient, err := gomaasapi.NewAuthenticatedClient(ecfg.maasServer(), ecfg.maasOAuth(), apiVersion)
if err != nil {
return err
}
env.maasClientUnlocked = gomaasapi.NewMAAS(*authClient)
return nil
}
// SupportedArchitectures is specified on the EnvironCapability interface.
func (env *maasEnviron) SupportedArchitectures() ([]string, error) {
env.archMutex.Lock()
defer env.archMutex.Unlock()
if env.supportedArchitectures != nil {
return env.supportedArchitectures, nil
}
// Create a filter to get all images from our region and for the correct stream.
imageConstraint := imagemetadata.NewImageConstraint(simplestreams.LookupParams{
Stream: env.Config().ImageStream(),
})
var err error
env.supportedArchitectures, err = common.SupportedArchitectures(env, imageConstraint)
return env.supportedArchitectures, err
}
// getMAASClient returns a MAAS client object to use for a request, in a
// lock-protected fashion.
func (env *maasEnviron) getMAASClient() *gomaasapi.MAASObject {
env.ecfgMutex.Lock()
defer env.ecfgMutex.Unlock()
return env.maasClientUnlocked
}
// convertConstraints converts the given constraints into an url.Values
// object suitable to pass to MAAS when acquiring a node.
// CpuPower is ignored because it cannot translated into something
// meaningful for MAAS right now.
func convertConstraints(cons constraints.Value) url.Values {
params := url.Values{}
if cons.Arch != nil {
params.Add("arch", *cons.Arch)
}
if cons.CpuCores != nil {
params.Add("cpu_count", fmt.Sprintf("%d", *cons.CpuCores))
}
if cons.Mem != nil {
params.Add("mem", fmt.Sprintf("%d", *cons.Mem))
}
if cons.Tags != nil && len(*cons.Tags) > 0 {
params.Add("tags", strings.Join(*cons.Tags, ","))
}
// TODO(bug 1212689): ignore root-disk constraint for now.
if cons.RootDisk != nil {
logger.Warningf("ignoring unsupported constraint 'root-disk'")
}
if cons.CpuPower != nil {
logger.Warningf("ignoring unsupported constraint 'cpu-power'")
}
return params
}
// addNetworks converts networks include/exclude information into
// url.Values object suitable to pass to MAAS when acquiring a node.
func addNetworks(params url.Values, nets environs.Networks) {
// Network Inclusion/Exclusion setup
if nets.IncludeNetworks != nil {
for _, network_name := range nets.IncludeNetworks {
params.Add("networks", network_name)
}
}
if nets.ExcludeNetworks != nil {
for _, not_network_name := range nets.ExcludeNetworks {
params.Add("not_networks", not_network_name)
}
}
}
// acquireNode allocates a node from the MAAS.
func (environ *maasEnviron) acquireNode(cons constraints.Value, nets environs.Networks, possibleTools tools.List) (gomaasapi.MAASObject, *tools.Tools, error) {
acquireParams := convertConstraints(cons)
addNetworks(acquireParams, nets)
acquireParams.Add("agent_name", environ.ecfg().maasAgentName())
var result gomaasapi.JSONObject
var err error
for a := shortAttempt.Start(); a.Next(); {
client := environ.getMAASClient().GetSubObject("nodes/")
result, err = client.CallPost("acquire", acquireParams)
if err == nil {
break
}
}
if err != nil {
return gomaasapi.MAASObject{}, nil, err
}
node, err := result.GetMAASObject()
if err != nil {
msg := fmt.Errorf("unexpected result from 'acquire' on MAAS API: %v", err)
return gomaasapi.MAASObject{}, nil, msg
}
tools := possibleTools[0]
logger.Warningf("picked arbitrary tools %q", tools)
return node, tools, nil
}
// startNode installs and boots a node.
func (environ *maasEnviron) startNode(node gomaasapi.MAASObject, series string, userdata []byte) error {
userDataParam := base64.StdEncoding.EncodeToString(userdata)
params := url.Values{
"distro_series": {series},
"user_data": {userDataParam},
}
// Initialize err to a non-nil value as a sentinel for the following
// loop.
err := fmt.Errorf("(no error)")
for a := shortAttempt.Start(); a.Next() && err != nil; {
_, err = node.CallPost("start", params)
}
return err
}
// createBridgeNetwork returns a string representing the upstart command to
// create a bridged eth0.
func createBridgeNetwork() string {
return `cat > /etc/network/eth0.config << EOF
iface eth0 inet manual
auto br0
iface br0 inet dhcp
bridge_ports eth0
EOF
`
}
// linkBridgeInInterfaces adds the file created by createBridgeNetwork to the
// interfaces file.
func linkBridgeInInterfaces() string {
return `sed -i "s/iface eth0 inet dhcp/source \/etc\/network\/eth0.config/" /etc/network/interfaces`
}
// StartInstance is specified in the InstanceBroker interface.
func (environ *maasEnviron) StartInstance(args environs.StartInstanceParams) (instance.Instance, *instance.HardwareCharacteristics, error) {
var inst *maasInstance
var err error
if node, tools, err := environ.acquireNode(args.Constraints, args.Networks, args.Tools); err != nil {
return nil, nil, fmt.Errorf("cannot run instances: %v", err)
} else {
inst = &maasInstance{maasObject: &node, environ: environ}
args.MachineConfig.Tools = tools
}
defer func() {
if err != nil {
if err := environ.releaseInstance(inst); err != nil {
logger.Errorf("error releasing failed instance: %v", err)
}
}
}()
hostname, err := inst.DNSName()
if err != nil {
return nil, nil, err
}
if err := environs.FinishMachineConfig(args.MachineConfig, environ.Config(), args.Constraints); err != nil {
return nil, nil, err
}
// TODO(thumper): 2013-08-28 bug 1217614
// The machine envronment config values are being moved to the agent config.
// Explicitly specify that the lxc containers use the network bridge defined above.
args.MachineConfig.AgentEnvironment[agent.LxcBridge] = "br0"
cloudcfg, err := newCloudinitConfig(hostname)
if err != nil {
return nil, nil, err
}
userdata, err := environs.ComposeUserData(args.MachineConfig, cloudcfg)
if err != nil {
msg := fmt.Errorf("could not compose userdata for bootstrap node: %v", err)
return nil, nil, msg
}
logger.Debugf("maas user data; %d bytes", len(userdata))
series := args.Tools.OneSeries()
if err := environ.startNode(*inst.maasObject, series, userdata); err != nil {
return nil, nil, err
}
logger.Debugf("started instance %q", inst.Id())
// TODO(bug 1193998) - return instance hardware characteristics as well
return inst, nil, nil
}
// newCloudinitConfig creates a cloudinit.Config structure
// suitable as a base for initialising a MAAS node.
func newCloudinitConfig(hostname string) (*cloudinit.Config, error) {
info := machineInfo{hostname}
runCmd, err := info.cloudinitRunCmd()
if err != nil {
return nil, err
}
cloudcfg := cloudinit.New()
cloudcfg.SetAptUpdate(true)
cloudcfg.AddPackage("bridge-utils")
cloudcfg.AddScripts(
"set -xe",
runCmd,
"ifdown eth0",
createBridgeNetwork(),
linkBridgeInInterfaces(),
"ifup br0",
)
return cloudcfg, nil
}
// StartInstance is specified in the InstanceBroker interface.
func (environ *maasEnviron) StopInstances(instances []instance.Instance) error {
// Shortcut to exit quickly if 'instances' is an empty slice or nil.
if len(instances) == 0 {
return nil
}
// Tell MAAS to release each of the instances. If there are errors,
// return only the first one (but release all instances regardless).
// Note that releasing instances also turns them off.
var firstErr error
for _, instance := range instances {
err := environ.releaseInstance(instance)
if firstErr == nil {
firstErr = err
}
}
return firstErr
}
// releaseInstance releases a single instance.
func (environ *maasEnviron) releaseInstance(inst instance.Instance) error {
maasInst := inst.(*maasInstance)
maasObj := maasInst.maasObject
_, err := maasObj.CallPost("release", nil)
if err != nil {
logger.Debugf("error releasing instance %v", maasInst)
}
return err
}
// instances calls the MAAS API to list nodes. The "ids" slice is a filter for
// specific instance IDs. Due to how this works in the HTTP API, an empty
// "ids" matches all instances (not none as you might expect).
func (environ *maasEnviron) instances(ids []instance.Id) ([]instance.Instance, error) {
nodeListing := environ.getMAASClient().GetSubObject("nodes")
filter := getSystemIdValues(ids)
filter.Add("agent_name", environ.ecfg().maasAgentName())
listNodeObjects, err := nodeListing.CallGet("list", filter)
if err != nil {
return nil, err
}
listNodes, err := listNodeObjects.GetArray()
if err != nil {
return nil, err
}
instances := make([]instance.Instance, len(listNodes))
for index, nodeObj := range listNodes {
node, err := nodeObj.GetMAASObject()
if err != nil {
return nil, err
}
instances[index] = &maasInstance{
maasObject: &node,
environ: environ,
}
}
return instances, nil
}
// Instances returns the instance.Instance objects corresponding to the given
// slice of instance.Id. The error is ErrNoInstances if no instances
// were found.
func (environ *maasEnviron) Instances(ids []instance.Id) ([]instance.Instance, error) {
if len(ids) == 0 {
// This would be treated as "return all instances" below, so
// treat it as a special case.
// The interface requires us to return this particular error
// if no instances were found.
return nil, environs.ErrNoInstances
}
instances, err := environ.instances(ids)
if err != nil {
return nil, err
}
if len(instances) == 0 {
return nil, environs.ErrNoInstances
}
idMap := make(map[instance.Id]instance.Instance)
for _, instance := range instances {
idMap[instance.Id()] = instance
}
result := make([]instance.Instance, len(ids))
for index, id := range ids {
result[index] = idMap[id]
}
if len(instances) < len(ids) {
return result, environs.ErrPartialInstances
}
return result, nil
}
// AllInstances returns all the instance.Instance in this provider.
func (environ *maasEnviron) AllInstances() ([]instance.Instance, error) {
return environ.instances(nil)
}
// Storage is defined by the Environ interface.
func (env *maasEnviron) Storage() storage.Storage {
env.ecfgMutex.Lock()
defer env.ecfgMutex.Unlock()
return env.storageUnlocked
}
func (environ *maasEnviron) Destroy() error {
return common.Destroy(environ)
}
// MAAS does not do firewalling so these port methods do nothing.
func (*maasEnviron) OpenPorts([]instance.Port) error {
logger.Debugf("unimplemented OpenPorts() called")
return nil
}
func (*maasEnviron) ClosePorts([]instance.Port) error {
logger.Debugf("unimplemented ClosePorts() called")
return nil
}
func (*maasEnviron) Ports() ([]instance.Port, error) {
logger.Debugf("unimplemented Ports() called")
return []instance.Port{}, nil
}
func (*maasEnviron) Provider() environs.EnvironProvider {
return &providerInstance
}
// GetImageSources returns a list of sources which are used to search for simplestreams image metadata.
func (e *maasEnviron) GetImageSources() ([]simplestreams.DataSource, error) {
// Add the simplestreams source off the control bucket.
return []simplestreams.DataSource{
storage.NewStorageSimpleStreamsDataSource("cloud storage", e.Storage(), storage.BaseImagesPath)}, nil
}
// GetToolsSources returns a list of sources which are used to search for simplestreams tools metadata.
func (e *maasEnviron) GetToolsSources() ([]simplestreams.DataSource, error) {
// Add the simplestreams source off the control bucket.
return []simplestreams.DataSource{
storage.NewStorageSimpleStreamsDataSource("cloud storage", e.Storage(), storage.BaseToolsPath)}, nil
}
// GetNetworksList returns a list of strings which contain networks for a gien maas node instance.
func (e *maasEnviron) GetNetworksList(node *gomaasapi.MAASObject) (gomaasapi.JSONObject, error){
system_id, err := node.GetField("system_id")
if err != nil {
return gomaasapi.JSONObject{}, err
}
params := url.Values{"node": {system_id}}
networks, err := node.CallGet("networks", params)
return networks, err
}
|
// Copyright 2013 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package juju
import (
"bufio"
"fmt"
"github.com/globocom/config"
"github.com/globocom/tsuru/app"
"github.com/globocom/tsuru/db"
"github.com/globocom/tsuru/heal"
"github.com/globocom/tsuru/log"
"github.com/globocom/tsuru/provision"
"launchpad.net/goamz/aws"
"launchpad.net/goamz/ec2"
"launchpad.net/goamz/s3"
"net"
"os/exec"
"strings"
)
func init() {
heal.Register("bootstrap", bootstrapMachineHealer{})
heal.Register("bootstrap-provision", bootstrapProvisionHealer{})
heal.Register("instance-machine", instanceMachineHealer{})
heal.Register("instance-agents-config", instanceAgentsConfigHealer{})
heal.Register("instance-unit", instanceUnitHealer{})
heal.Register("zookeeper", zookeeperHealer{})
heal.Register("elb-instance", elbInstanceHealer{})
heal.Register("bootstrap-instanceid", bootstrapInstanceIdHealer{})
}
type bootstrapInstanceIdHealer struct {
s *s3.S3
e *ec2.EC2
}
func (h bootstrapInstanceIdHealer) Heal() error {
if h.needsHeal() {
jujuBucket, err := config.GetString("juju:bucket")
if err != nil {
return err
}
bucket := h.s3().Bucket(jujuBucket)
ec2InstanceId, err := h.bootstrapInstanceId()
if err != nil {
return err
}
return bucket.Put("provider-state", []byte("zookeeper-instances: ["+ec2InstanceId+"]"), "binary/octet-stream", s3.BucketOwnerFull)
}
return nil
}
func (h *bootstrapInstanceIdHealer) needsHeal() bool {
s3InstanceId, err := h.bootstrapInstanceIdFromBucket()
if err != nil {
return false
}
ec2InstanceId, err := h.bootstrapInstanceId()
if err != nil {
return false
}
if s3InstanceId != ec2InstanceId {
return true
}
return false
}
func (h *bootstrapInstanceIdHealer) ec2() *ec2.EC2 {
if h.e == nil {
h.e = getEC2Endpoint()
}
return h.e
}
func (h *bootstrapInstanceIdHealer) s3() *s3.S3 {
if h.s == nil {
h.s = h.getS3Endpoint()
}
return h.s
}
func (bootstrapInstanceIdHealer) getS3Endpoint() *s3.S3 {
access, err := config.GetString("aws:access-key-id")
if err != nil {
log.Fatal(err)
}
secret, err := config.GetString("aws:secret-access-key")
if err != nil {
log.Fatal(err)
}
auth := aws.Auth{AccessKey: access, SecretKey: secret}
return s3.New(auth, aws.USEast)
}
func (h *bootstrapInstanceIdHealer) bootstrapInstanceIdFromBucket() (string, error) {
jujuBucket, err := config.GetString("juju:bucket")
if err != nil {
return "", err
}
bucket := h.s3().Bucket(jujuBucket)
data, err := bucket.Get("provider-state")
if err != nil {
return "", err
}
s := strings.Replace(string(data), "zookeeper-instances: [", "", -1)
s = strings.Replace(s, "]", "", -1)
return s, nil
}
func (h *bootstrapInstanceIdHealer) bootstrapInstanceId() (string, error) {
resp, err := h.ec2().Instances(nil, nil)
if err != nil {
return "", err
}
for _, reservation := range resp.Reservations {
for _, group := range reservation.SecurityGroups {
if group.Name == "juju-delta-0" {
for _, instance := range reservation.Instances {
return instance.InstanceId, nil
}
}
}
}
return "", nil
}
// instanceAgentsConfigHealer is an implementation for the Haler interface. For more
// detail on how a healer work, check the documentation of the heal package.
type instanceAgentsConfigHealer struct {
e *ec2.EC2
}
func (h *instanceAgentsConfigHealer) ec2() *ec2.EC2 {
if h.e == nil {
h.e = getEC2Endpoint()
}
return h.e
}
func getEC2Endpoint() *ec2.EC2 {
access, err := config.GetString("aws:access-key-id")
if err != nil {
log.Fatal(err)
}
secret, err := config.GetString("aws:secret-access-key")
if err != nil {
log.Fatal(err)
}
endpoint, err := config.GetString("aws:ec2:endpoint")
if err != nil {
log.Fatal(err)
}
auth := aws.Auth{AccessKey: access, SecretKey: secret}
return ec2.New(auth, aws.Region{EC2Endpoint: endpoint})
}
// getPrivateDns returns the private dns for an instance.
func (h *instanceAgentsConfigHealer) getPrivateDns(instanceId string) (string, error) {
log.Printf("getting dns for %s", instanceId)
resp, err := h.ec2().Instances([]string{instanceId}, nil)
if err != nil {
log.Printf("error in gettings dns for %s", instanceId)
log.Print(err)
return "", err
}
dns := resp.Reservations[0].Instances[0].PrivateDNSName
return dns, nil
}
// bootstrapProvisionHealer returns the bootstrap private dns.
func (h *instanceAgentsConfigHealer) bootstrapPrivateDns() (string, error) {
machine := getBootstrapMachine()
return h.getPrivateDns(machine.InstanceId)
}
// Heal verifies if the bootstrap private dns is different of the bootstrap
// private dns setted into agents for each machine.
// If the bootstrap private dns is wrong, Heal will injects the correct value.
func (h instanceAgentsConfigHealer) Heal() error {
p := JujuProvisioner{}
output, _ := p.getOutput()
dns, _ := h.bootstrapPrivateDns()
log.Printf("bootstrap dns is %s", dns)
for _, service := range output.Services {
for unitName, unit := range service.Units {
cmd := exec.Command("ssh", "-o", "StrictHostKeyChecking no", "-q", "-l", "ubuntu", unit.PublicAddress, "grep", dns, "/etc/init/juju-machine-agent.conf")
err := cmd.Run()
if err != nil {
log.Printf("Injecting bootstrap private dns for machine %d", unit.Machine)
cmd := exec.Command("ssh", "-o", "StrictHostKeyChecking no", "-q", "-l", "ubuntu", unit.PublicAddress, "sudo", "sed", "-i", "'s/env JUJU_ZOOKEEPER=.*/env JUJU_ZOOKEEPER=\""+dns+":2181\"/g'", "/etc/init/juju-machine-agent.conf")
cmd.Run()
}
agent := fmt.Sprintf("/etc/init/juju-%s.conf", strings.Join(strings.Split(unitName, "/"), "-"))
cmd = exec.Command("ssh", "-o", "StrictHostKeyChecking no", "-q", "-l", "ubuntu", unit.PublicAddress, "grep", dns, agent)
err = cmd.Run()
if err != nil {
log.Printf("Injecting bootstrap private dns for agent %s", agent)
cmd := exec.Command("ssh", "-o", "StrictHostKeyChecking no", "-q", "-l", "ubuntu", unit.PublicAddress, "sudo", "sed", "-i", "'s/env JUJU_ZOOKEEPER=.*/env JUJU_ZOOKEEPER=\""+dns+":2181\"/g'", agent)
cmd.Run()
}
}
}
return nil
}
// InstanceUnitHealer is an implementation for the Healer interface. For more
// detail on how a healer work, check the documentation of the heal package.
type instanceUnitHealer struct{}
// Heal iterates through all juju units verifying if
// a juju-unit-agent is down and heal these machines.
func (h instanceUnitHealer) Heal() error {
p := JujuProvisioner{}
output, _ := p.getOutput()
for _, service := range output.Services {
for unitName, unit := range service.Units {
agent := fmt.Sprintf("juju-%s", strings.Join(strings.Split(unitName, "/"), "-"))
if unit.AgentState == "down" {
log.Printf("Healing %s", agent)
upStartCmd("stop", agent, unit.PublicAddress)
upStartCmd("start", agent, unit.PublicAddress)
} else {
log.Printf("%s needs no cure, skipping...", agent)
}
}
}
return nil
}
// InstanceMachineHealer is an implementation for the Healer interface. For more
// detail on how a healer work, check the documentation of the heal package.
type instanceMachineHealer struct{}
// Heal iterates through all juju machines verifying if
// a juju-machine-agent is down and heal these machines.
func (h instanceMachineHealer) Heal() error {
p := JujuProvisioner{}
output, _ := p.getOutput()
for _, machine := range output.Machines {
if machine.AgentState == "down" {
log.Printf("Healing juju-machine-agent in machine %s", machine.InstanceId)
upStartCmd("stop", "juju-machine-agent", machine.IpAddress)
upStartCmd("start", "juju-machine-agent", machine.IpAddress)
} else {
log.Printf("juju-machine-agent for machine %s needs no cure, skipping...", machine.InstanceId)
}
}
return nil
}
// ZookeeperHealer is an implementation for the Healer interface. For more
// detail on how a healer work, check the documentation of the heal package.
type zookeeperHealer struct{}
// needsHeal verifies if zookeeper is ok using 'ruok' command.
func (h zookeeperHealer) needsHeal() bool {
bootstrapMachine := getBootstrapMachine()
conn, err := net.Dial("tcp", fmt.Sprintf("%s:2181", bootstrapMachine.IpAddress))
if err != nil {
return true
}
defer conn.Close()
fmt.Fprintf(conn, "ruok\r\n\r\n")
status, _ := bufio.NewReader(conn).ReadString('\n')
return !strings.Contains(status, "imok")
}
// Heal restarts the zookeeper using upstart.
func (h zookeeperHealer) Heal() error {
if h.needsHeal() {
bootstrapMachine := getBootstrapMachine()
log.Printf("Healing zookeeper")
upStartCmd("stop", "zookeeper", bootstrapMachine.IpAddress)
return upStartCmd("start", "zookeeper", bootstrapMachine.IpAddress)
}
log.Printf("Zookeeper needs no cure, skipping...")
return nil
}
// BootstrapProvisionHealer is an import for the Healer interface. For more
// details on how a healer work, check the documentation of the heal package.
type bootstrapProvisionHealer struct{}
// Heal starts the juju-provision-agent using upstart.
func (h bootstrapProvisionHealer) Heal() error {
bootstrapMachine := getBootstrapMachine()
log.Printf("Healing bootstrap juju-provision-agent")
return upStartCmd("start", "juju-provision-agent", bootstrapMachine.IpAddress)
}
// BootstrapMachineHealer is an implementation for the Healer interface. For more
// details on how a healer work, check the documentation of the heal package.
type bootstrapMachineHealer struct{}
// getBootstrapMachine returns the bootstrap machine.
func getBootstrapMachine() machine {
p := JujuProvisioner{}
output, _ := p.getOutput()
// for juju bootstrap machine always is the machine 0.
return output.Machines[0]
}
// needsHeal returns true if the AgentState of bootstrap machine is "not-started".
func (h bootstrapMachineHealer) needsHeal() bool {
bootstrapMachine := getBootstrapMachine()
return bootstrapMachine.AgentState == "not-started"
}
func upStartCmd(cmd, daemon, machine string) error {
args := []string{
"-o",
"StrictHostKeyChecking no",
"-q",
"-l",
"ubuntu",
machine,
"sudo",
cmd,
daemon,
}
log.Printf(strings.Join(args, " "))
c := exec.Command("ssh", args...)
return c.Run()
}
// Heal executes the action for heal the bootstrap machine agent.
func (h bootstrapMachineHealer) Heal() error {
if h.needsHeal() {
bootstrapMachine := getBootstrapMachine()
log.Printf("Healing bootstrap juju-machine-agent")
upStartCmd("stop", "juju-machine-agent", bootstrapMachine.IpAddress)
return upStartCmd("start", "juju-machine-agent", bootstrapMachine.IpAddress)
}
log.Printf("Bootstrap juju-machine-agent needs no cure, skipping...")
return nil
}
type elbInstanceHealer struct{}
func (h elbInstanceHealer) Heal() error {
apps := h.getUnhealthyApps()
if len(apps) == 0 {
log.Print("No app is down.")
return nil
}
names := make([]string, len(apps))
i := 0
for n := range apps {
names[i] = n
i++
}
if instances, err := h.checkInstances(names); err == nil && len(instances) > 0 {
for _, instance := range instances {
a := apps[instance.lb]
if err := a.RemoveUnit(instance.id); err != nil {
return err
}
if err := a.AddUnits(1); err != nil {
return err
}
}
}
return nil
}
func (h elbInstanceHealer) checkInstances(names []string) ([]elbInstance, error) {
if elbSupport, _ := config.GetBool("juju:use-elb"); !elbSupport {
return nil, nil
}
lbs, err := h.describeLoadBalancers(names)
if err != nil {
return nil, err
}
var unhealthy []elbInstance
description := "Instance has failed at least the UnhealthyThreshold number of health checks consecutively."
state := "OutOfService"
reasonCode := "Instance"
for _, lb := range lbs {
instances, err := h.describeInstancesHealth(lb)
if err != nil {
return nil, err
}
for _, instance := range instances {
if instance.description == description &&
instance.state == state &&
instance.reasonCode == reasonCode {
unhealthy = append(unhealthy, instance)
}
}
}
log.Printf("Found %d unhealthy instances.", len(unhealthy))
return unhealthy, nil
}
func (h elbInstanceHealer) getUnhealthyApps() map[string]app.App {
conn, err := db.Conn()
if err != nil {
return nil
}
var all []app.App
apps := make(map[string]app.App)
s := map[string]interface{}{"name": 1, "units": 1}
err = conn.Apps().Find(nil).Select(s).All(&all)
if err != nil {
return nil
}
for _, a := range all {
for _, u := range a.ProvisionUnits() {
if u.GetStatus() == provision.StatusDown ||
u.GetStatus() == provision.StatusError {
apps[a.Name] = a
break
}
}
}
return apps
}
func (h elbInstanceHealer) describeLoadBalancers(names []string) ([]string, error) {
resp, err := getELBEndpoint().DescribeLoadBalancers(names...)
if err != nil {
return nil, err
}
lbs := make([]string, len(resp.LoadBalancerDescriptions))
for i, lbdesc := range resp.LoadBalancerDescriptions {
lbs[i] = lbdesc.LoadBalancerName
}
return lbs, nil
}
func (h elbInstanceHealer) describeInstancesHealth(lb string) ([]elbInstance, error) {
resp, err := getELBEndpoint().DescribeInstanceHealth(lb)
if err != nil {
return nil, err
}
instances := make([]elbInstance, len(resp.InstanceStates))
for i, state := range resp.InstanceStates {
instances[i].id = state.InstanceId
instances[i].description = state.Description
instances[i].reasonCode = state.ReasonCode
instances[i].state = state.State
instances[i].lb = lb
}
return instances, nil
}
provision/juju: added log when bootstrap instance id heal.
// Copyright 2013 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package juju
import (
"bufio"
"fmt"
"github.com/globocom/config"
"github.com/globocom/tsuru/app"
"github.com/globocom/tsuru/db"
"github.com/globocom/tsuru/heal"
"github.com/globocom/tsuru/log"
"github.com/globocom/tsuru/provision"
"launchpad.net/goamz/aws"
"launchpad.net/goamz/ec2"
"launchpad.net/goamz/s3"
"net"
"os/exec"
"strings"
)
func init() {
heal.Register("bootstrap", bootstrapMachineHealer{})
heal.Register("bootstrap-provision", bootstrapProvisionHealer{})
heal.Register("instance-machine", instanceMachineHealer{})
heal.Register("instance-agents-config", instanceAgentsConfigHealer{})
heal.Register("instance-unit", instanceUnitHealer{})
heal.Register("zookeeper", zookeeperHealer{})
heal.Register("elb-instance", elbInstanceHealer{})
heal.Register("bootstrap-instanceid", bootstrapInstanceIdHealer{})
}
type bootstrapInstanceIdHealer struct {
s *s3.S3
e *ec2.EC2
}
func (h bootstrapInstanceIdHealer) Heal() error {
if h.needsHeal() {
log.Print("healing bootstrap instance id")
jujuBucket, err := config.GetString("juju:bucket")
if err != nil {
return err
}
bucket := h.s3().Bucket(jujuBucket)
ec2InstanceId, err := h.bootstrapInstanceId()
if err != nil {
return err
}
return bucket.Put("provider-state", []byte("zookeeper-instances: ["+ec2InstanceId+"]"), "binary/octet-stream", s3.BucketOwnerFull)
}
return nil
}
func (h *bootstrapInstanceIdHealer) needsHeal() bool {
s3InstanceId, err := h.bootstrapInstanceIdFromBucket()
if err != nil {
return false
}
ec2InstanceId, err := h.bootstrapInstanceId()
if err != nil {
return false
}
if s3InstanceId != ec2InstanceId {
return true
}
return false
}
func (h *bootstrapInstanceIdHealer) ec2() *ec2.EC2 {
if h.e == nil {
h.e = getEC2Endpoint()
}
return h.e
}
func (h *bootstrapInstanceIdHealer) s3() *s3.S3 {
if h.s == nil {
h.s = h.getS3Endpoint()
}
return h.s
}
func (bootstrapInstanceIdHealer) getS3Endpoint() *s3.S3 {
access, err := config.GetString("aws:access-key-id")
if err != nil {
log.Fatal(err)
}
secret, err := config.GetString("aws:secret-access-key")
if err != nil {
log.Fatal(err)
}
auth := aws.Auth{AccessKey: access, SecretKey: secret}
return s3.New(auth, aws.USEast)
}
func (h *bootstrapInstanceIdHealer) bootstrapInstanceIdFromBucket() (string, error) {
jujuBucket, err := config.GetString("juju:bucket")
if err != nil {
return "", err
}
bucket := h.s3().Bucket(jujuBucket)
data, err := bucket.Get("provider-state")
if err != nil {
return "", err
}
s := strings.Replace(string(data), "zookeeper-instances: [", "", -1)
s = strings.Replace(s, "]", "", -1)
return s, nil
}
func (h *bootstrapInstanceIdHealer) bootstrapInstanceId() (string, error) {
resp, err := h.ec2().Instances(nil, nil)
if err != nil {
return "", err
}
for _, reservation := range resp.Reservations {
for _, group := range reservation.SecurityGroups {
if group.Name == "juju-delta-0" {
for _, instance := range reservation.Instances {
return instance.InstanceId, nil
}
}
}
}
return "", nil
}
// instanceAgentsConfigHealer is an implementation for the Haler interface. For more
// detail on how a healer work, check the documentation of the heal package.
type instanceAgentsConfigHealer struct {
e *ec2.EC2
}
func (h *instanceAgentsConfigHealer) ec2() *ec2.EC2 {
if h.e == nil {
h.e = getEC2Endpoint()
}
return h.e
}
func getEC2Endpoint() *ec2.EC2 {
access, err := config.GetString("aws:access-key-id")
if err != nil {
log.Fatal(err)
}
secret, err := config.GetString("aws:secret-access-key")
if err != nil {
log.Fatal(err)
}
endpoint, err := config.GetString("aws:ec2:endpoint")
if err != nil {
log.Fatal(err)
}
auth := aws.Auth{AccessKey: access, SecretKey: secret}
return ec2.New(auth, aws.Region{EC2Endpoint: endpoint})
}
// getPrivateDns returns the private dns for an instance.
func (h *instanceAgentsConfigHealer) getPrivateDns(instanceId string) (string, error) {
log.Printf("getting dns for %s", instanceId)
resp, err := h.ec2().Instances([]string{instanceId}, nil)
if err != nil {
log.Printf("error in gettings dns for %s", instanceId)
log.Print(err)
return "", err
}
dns := resp.Reservations[0].Instances[0].PrivateDNSName
return dns, nil
}
// bootstrapProvisionHealer returns the bootstrap private dns.
func (h *instanceAgentsConfigHealer) bootstrapPrivateDns() (string, error) {
machine := getBootstrapMachine()
return h.getPrivateDns(machine.InstanceId)
}
// Heal verifies if the bootstrap private dns is different of the bootstrap
// private dns setted into agents for each machine.
// If the bootstrap private dns is wrong, Heal will injects the correct value.
func (h instanceAgentsConfigHealer) Heal() error {
p := JujuProvisioner{}
output, _ := p.getOutput()
dns, _ := h.bootstrapPrivateDns()
log.Printf("bootstrap dns is %s", dns)
for _, service := range output.Services {
for unitName, unit := range service.Units {
cmd := exec.Command("ssh", "-o", "StrictHostKeyChecking no", "-q", "-l", "ubuntu", unit.PublicAddress, "grep", dns, "/etc/init/juju-machine-agent.conf")
err := cmd.Run()
if err != nil {
log.Printf("Injecting bootstrap private dns for machine %d", unit.Machine)
cmd := exec.Command("ssh", "-o", "StrictHostKeyChecking no", "-q", "-l", "ubuntu", unit.PublicAddress, "sudo", "sed", "-i", "'s/env JUJU_ZOOKEEPER=.*/env JUJU_ZOOKEEPER=\""+dns+":2181\"/g'", "/etc/init/juju-machine-agent.conf")
cmd.Run()
}
agent := fmt.Sprintf("/etc/init/juju-%s.conf", strings.Join(strings.Split(unitName, "/"), "-"))
cmd = exec.Command("ssh", "-o", "StrictHostKeyChecking no", "-q", "-l", "ubuntu", unit.PublicAddress, "grep", dns, agent)
err = cmd.Run()
if err != nil {
log.Printf("Injecting bootstrap private dns for agent %s", agent)
cmd := exec.Command("ssh", "-o", "StrictHostKeyChecking no", "-q", "-l", "ubuntu", unit.PublicAddress, "sudo", "sed", "-i", "'s/env JUJU_ZOOKEEPER=.*/env JUJU_ZOOKEEPER=\""+dns+":2181\"/g'", agent)
cmd.Run()
}
}
}
return nil
}
// InstanceUnitHealer is an implementation for the Healer interface. For more
// detail on how a healer work, check the documentation of the heal package.
type instanceUnitHealer struct{}
// Heal iterates through all juju units verifying if
// a juju-unit-agent is down and heal these machines.
func (h instanceUnitHealer) Heal() error {
p := JujuProvisioner{}
output, _ := p.getOutput()
for _, service := range output.Services {
for unitName, unit := range service.Units {
agent := fmt.Sprintf("juju-%s", strings.Join(strings.Split(unitName, "/"), "-"))
if unit.AgentState == "down" {
log.Printf("Healing %s", agent)
upStartCmd("stop", agent, unit.PublicAddress)
upStartCmd("start", agent, unit.PublicAddress)
} else {
log.Printf("%s needs no cure, skipping...", agent)
}
}
}
return nil
}
// InstanceMachineHealer is an implementation for the Healer interface. For more
// detail on how a healer work, check the documentation of the heal package.
type instanceMachineHealer struct{}
// Heal iterates through all juju machines verifying if
// a juju-machine-agent is down and heal these machines.
func (h instanceMachineHealer) Heal() error {
p := JujuProvisioner{}
output, _ := p.getOutput()
for _, machine := range output.Machines {
if machine.AgentState == "down" {
log.Printf("Healing juju-machine-agent in machine %s", machine.InstanceId)
upStartCmd("stop", "juju-machine-agent", machine.IpAddress)
upStartCmd("start", "juju-machine-agent", machine.IpAddress)
} else {
log.Printf("juju-machine-agent for machine %s needs no cure, skipping...", machine.InstanceId)
}
}
return nil
}
// ZookeeperHealer is an implementation for the Healer interface. For more
// detail on how a healer work, check the documentation of the heal package.
type zookeeperHealer struct{}
// needsHeal verifies if zookeeper is ok using 'ruok' command.
func (h zookeeperHealer) needsHeal() bool {
bootstrapMachine := getBootstrapMachine()
conn, err := net.Dial("tcp", fmt.Sprintf("%s:2181", bootstrapMachine.IpAddress))
if err != nil {
return true
}
defer conn.Close()
fmt.Fprintf(conn, "ruok\r\n\r\n")
status, _ := bufio.NewReader(conn).ReadString('\n')
return !strings.Contains(status, "imok")
}
// Heal restarts the zookeeper using upstart.
func (h zookeeperHealer) Heal() error {
if h.needsHeal() {
bootstrapMachine := getBootstrapMachine()
log.Printf("Healing zookeeper")
upStartCmd("stop", "zookeeper", bootstrapMachine.IpAddress)
return upStartCmd("start", "zookeeper", bootstrapMachine.IpAddress)
}
log.Printf("Zookeeper needs no cure, skipping...")
return nil
}
// BootstrapProvisionHealer is an import for the Healer interface. For more
// details on how a healer work, check the documentation of the heal package.
type bootstrapProvisionHealer struct{}
// Heal starts the juju-provision-agent using upstart.
func (h bootstrapProvisionHealer) Heal() error {
bootstrapMachine := getBootstrapMachine()
log.Printf("Healing bootstrap juju-provision-agent")
return upStartCmd("start", "juju-provision-agent", bootstrapMachine.IpAddress)
}
// BootstrapMachineHealer is an implementation for the Healer interface. For more
// details on how a healer work, check the documentation of the heal package.
type bootstrapMachineHealer struct{}
// getBootstrapMachine returns the bootstrap machine.
func getBootstrapMachine() machine {
p := JujuProvisioner{}
output, _ := p.getOutput()
// for juju bootstrap machine always is the machine 0.
return output.Machines[0]
}
// needsHeal returns true if the AgentState of bootstrap machine is "not-started".
func (h bootstrapMachineHealer) needsHeal() bool {
bootstrapMachine := getBootstrapMachine()
return bootstrapMachine.AgentState == "not-started"
}
func upStartCmd(cmd, daemon, machine string) error {
args := []string{
"-o",
"StrictHostKeyChecking no",
"-q",
"-l",
"ubuntu",
machine,
"sudo",
cmd,
daemon,
}
log.Printf(strings.Join(args, " "))
c := exec.Command("ssh", args...)
return c.Run()
}
// Heal executes the action for heal the bootstrap machine agent.
func (h bootstrapMachineHealer) Heal() error {
if h.needsHeal() {
bootstrapMachine := getBootstrapMachine()
log.Printf("Healing bootstrap juju-machine-agent")
upStartCmd("stop", "juju-machine-agent", bootstrapMachine.IpAddress)
return upStartCmd("start", "juju-machine-agent", bootstrapMachine.IpAddress)
}
log.Printf("Bootstrap juju-machine-agent needs no cure, skipping...")
return nil
}
type elbInstanceHealer struct{}
func (h elbInstanceHealer) Heal() error {
apps := h.getUnhealthyApps()
if len(apps) == 0 {
log.Print("No app is down.")
return nil
}
names := make([]string, len(apps))
i := 0
for n := range apps {
names[i] = n
i++
}
if instances, err := h.checkInstances(names); err == nil && len(instances) > 0 {
for _, instance := range instances {
a := apps[instance.lb]
if err := a.RemoveUnit(instance.id); err != nil {
return err
}
if err := a.AddUnits(1); err != nil {
return err
}
}
}
return nil
}
func (h elbInstanceHealer) checkInstances(names []string) ([]elbInstance, error) {
if elbSupport, _ := config.GetBool("juju:use-elb"); !elbSupport {
return nil, nil
}
lbs, err := h.describeLoadBalancers(names)
if err != nil {
return nil, err
}
var unhealthy []elbInstance
description := "Instance has failed at least the UnhealthyThreshold number of health checks consecutively."
state := "OutOfService"
reasonCode := "Instance"
for _, lb := range lbs {
instances, err := h.describeInstancesHealth(lb)
if err != nil {
return nil, err
}
for _, instance := range instances {
if instance.description == description &&
instance.state == state &&
instance.reasonCode == reasonCode {
unhealthy = append(unhealthy, instance)
}
}
}
log.Printf("Found %d unhealthy instances.", len(unhealthy))
return unhealthy, nil
}
func (h elbInstanceHealer) getUnhealthyApps() map[string]app.App {
conn, err := db.Conn()
if err != nil {
return nil
}
var all []app.App
apps := make(map[string]app.App)
s := map[string]interface{}{"name": 1, "units": 1}
err = conn.Apps().Find(nil).Select(s).All(&all)
if err != nil {
return nil
}
for _, a := range all {
for _, u := range a.ProvisionUnits() {
if u.GetStatus() == provision.StatusDown ||
u.GetStatus() == provision.StatusError {
apps[a.Name] = a
break
}
}
}
return apps
}
func (h elbInstanceHealer) describeLoadBalancers(names []string) ([]string, error) {
resp, err := getELBEndpoint().DescribeLoadBalancers(names...)
if err != nil {
return nil, err
}
lbs := make([]string, len(resp.LoadBalancerDescriptions))
for i, lbdesc := range resp.LoadBalancerDescriptions {
lbs[i] = lbdesc.LoadBalancerName
}
return lbs, nil
}
func (h elbInstanceHealer) describeInstancesHealth(lb string) ([]elbInstance, error) {
resp, err := getELBEndpoint().DescribeInstanceHealth(lb)
if err != nil {
return nil, err
}
instances := make([]elbInstance, len(resp.InstanceStates))
for i, state := range resp.InstanceStates {
instances[i].id = state.InstanceId
instances[i].description = state.Description
instances[i].reasonCode = state.ReasonCode
instances[i].state = state.State
instances[i].lb = lb
}
return instances, nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.