repo_id
stringclasses
927 values
file_path
stringlengths
99
214
content
stringlengths
2
4.15M
bolt
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/boltdb/bolt/LICENSE
The MIT License (MIT) Copyright (c) 2013 Ben Johnson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
bolt
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/boltdb/bolt/db.go
package bolt import ( "errors" "fmt" "hash/fnv" "log" "os" "runtime" "runtime/debug" "strings" "sync" "time" "unsafe" ) // The largest step that can be taken when remapping the mmap. const maxMmapStep = 1 << 30 // 1GB // The data file format version. const version = 2 // Represents a marker value to indicate that a file is a Bolt DB. const magic uint32 = 0xED0CDAED // IgnoreNoSync specifies whether the NoSync field of a DB is ignored when // syncing changes to a file. This is required as some operating systems, // such as OpenBSD, do not have a unified buffer cache (UBC) and writes // must be synchronized using the msync(2) syscall. const IgnoreNoSync = runtime.GOOS == "openbsd" // Default values if not set in a DB instance. const ( DefaultMaxBatchSize int = 1000 DefaultMaxBatchDelay = 10 * time.Millisecond DefaultAllocSize = 16 * 1024 * 1024 ) // default page size for db is set to the OS page size. var defaultPageSize = os.Getpagesize() // DB represents a collection of buckets persisted to a file on disk. // All data access is performed through transactions which can be obtained through the DB. // All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. type DB struct { // When enabled, the database will perform a Check() after every commit. // A panic is issued if the database is in an inconsistent state. This // flag has a large performance impact so it should only be used for // debugging purposes. StrictMode bool // Setting the NoSync flag will cause the database to skip fsync() // calls after each commit. This can be useful when bulk loading data // into a database and you can restart the bulk load in the event of // a system failure or database corruption. Do not set this flag for // normal use. // // If the package global IgnoreNoSync constant is true, this value is // ignored. See the comment on that constant for more details. // // THIS IS UNSAFE. PLEASE USE WITH CAUTION. NoSync bool // When true, skips the truncate call when growing the database. // Setting this to true is only safe on non-ext3/ext4 systems. // Skipping truncation avoids preallocation of hard drive space and // bypasses a truncate() and fsync() syscall on remapping. // // https://github.com/boltdb/bolt/issues/284 NoGrowSync bool // If you want to read the entire database fast, you can set MmapFlag to // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. MmapFlags int // MaxBatchSize is the maximum size of a batch. Default value is // copied from DefaultMaxBatchSize in Open. // // If <=0, disables batching. // // Do not change concurrently with calls to Batch. MaxBatchSize int // MaxBatchDelay is the maximum delay before a batch starts. // Default value is copied from DefaultMaxBatchDelay in Open. // // If <=0, effectively disables batching. // // Do not change concurrently with calls to Batch. MaxBatchDelay time.Duration // AllocSize is the amount of space allocated when the database // needs to create new pages. This is done to amortize the cost // of truncate() and fsync() when growing the data file. AllocSize int path string file *os.File lockfile *os.File // windows only dataref []byte // mmap'ed readonly, write throws SEGV data *[maxMapSize]byte datasz int filesz int // current on disk file size meta0 *meta meta1 *meta pageSize int opened bool rwtx *Tx txs []*Tx freelist *freelist stats Stats pagePool sync.Pool batchMu sync.Mutex batch *batch rwlock sync.Mutex // Allows only one writer at a time. metalock sync.Mutex // Protects meta page access. mmaplock sync.RWMutex // Protects mmap access during remapping. statlock sync.RWMutex // Protects stats access. ops struct { writeAt func(b []byte, off int64) (n int, err error) } // Read only mode. // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. readOnly bool } // Path returns the path to currently open database file. func (db *DB) Path() string { return db.path } // GoString returns the Go string representation of the database. func (db *DB) GoString() string { return fmt.Sprintf("bolt.DB{path:%q}", db.path) } // String returns the string representation of the database. func (db *DB) String() string { return fmt.Sprintf("DB<%q>", db.path) } // Open creates and opens a database at the given path. // If the file does not exist then it will be created automatically. // Passing in nil options will cause Bolt to open the database with the default options. func Open(path string, mode os.FileMode, options *Options) (*DB, error) { var db = &DB{opened: true} // Set default options if no options are provided. if options == nil { options = DefaultOptions } db.NoGrowSync = options.NoGrowSync db.MmapFlags = options.MmapFlags // Set default values for later DB operations. db.MaxBatchSize = DefaultMaxBatchSize db.MaxBatchDelay = DefaultMaxBatchDelay db.AllocSize = DefaultAllocSize flag := os.O_RDWR if options.ReadOnly { flag = os.O_RDONLY db.readOnly = true } // Open data file and separate sync handler for metadata writes. db.path = path var err error if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil { _ = db.close() return nil, err } // Lock file so that other processes using Bolt in read-write mode cannot // use the database at the same time. This would cause corruption since // the two processes would write meta pages and free pages separately. // The database file is locked exclusively (only one process can grab the lock) // if !options.ReadOnly. // The database file is locked using the shared lock (more than one process may // hold a lock at the same time) otherwise (options.ReadOnly is set). if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { _ = db.close() return nil, err } // Default values for test hooks db.ops.writeAt = db.file.WriteAt // Initialize the database if it doesn't exist. if info, err := db.file.Stat(); err != nil { return nil, err } else if info.Size() == 0 { // Initialize new files with meta pages. if err := db.init(); err != nil { return nil, err } } else { // Read the first meta page to determine the page size. var buf [0x1000]byte if _, err := db.file.ReadAt(buf[:], 0); err == nil { m := db.pageInBuffer(buf[:], 0).meta() if err := m.validate(); err != nil { // If we can't read the page size, we can assume it's the same // as the OS -- since that's how the page size was chosen in the // first place. // // If the first page is invalid and this OS uses a different // page size than what the database was created with then we // are out of luck and cannot access the database. db.pageSize = os.Getpagesize() } else { db.pageSize = int(m.pageSize) } } } // Initialize page pool. db.pagePool = sync.Pool{ New: func() interface{} { return make([]byte, db.pageSize) }, } // Memory map the data file. if err := db.mmap(options.InitialMmapSize); err != nil { _ = db.close() return nil, err } // Read in the freelist. db.freelist = newFreelist() db.freelist.read(db.page(db.meta().freelist)) // Mark the database as opened and return. return db, nil } // mmap opens the underlying memory-mapped file and initializes the meta references. // minsz is the minimum size that the new mmap can be. func (db *DB) mmap(minsz int) error { db.mmaplock.Lock() defer db.mmaplock.Unlock() info, err := db.file.Stat() if err != nil { return fmt.Errorf("mmap stat error: %s", err) } else if int(info.Size()) < db.pageSize*2 { return fmt.Errorf("file size too small") } // Ensure the size is at least the minimum size. var size = int(info.Size()) if size < minsz { size = minsz } size, err = db.mmapSize(size) if err != nil { return err } // Dereference all mmap references before unmapping. if db.rwtx != nil { db.rwtx.root.dereference() } // Unmap existing data before continuing. if err := db.munmap(); err != nil { return err } // Memory-map the data file as a byte slice. if err := mmap(db, size); err != nil { return err } // Save references to the meta pages. db.meta0 = db.page(0).meta() db.meta1 = db.page(1).meta() // Validate the meta pages. We only return an error if both meta pages fail // validation, since meta0 failing validation means that it wasn't saved // properly -- but we can recover using meta1. And vice-versa. err0 := db.meta0.validate() err1 := db.meta1.validate() if err0 != nil && err1 != nil { return err0 } return nil } // munmap unmaps the data file from memory. func (db *DB) munmap() error { if err := munmap(db); err != nil { return fmt.Errorf("unmap error: " + err.Error()) } return nil } // mmapSize determines the appropriate size for the mmap given the current size // of the database. The minimum size is 32KB and doubles until it reaches 1GB. // Returns an error if the new mmap size is greater than the max allowed. func (db *DB) mmapSize(size int) (int, error) { // Double the size from 32KB until 1GB. for i := uint(15); i <= 30; i++ { if size <= 1<<i { return 1 << i, nil } } // Verify the requested size is not above the maximum allowed. if size > maxMapSize { return 0, fmt.Errorf("mmap too large") } // If larger than 1GB then grow by 1GB at a time. sz := int64(size) if remainder := sz % int64(maxMmapStep); remainder > 0 { sz += int64(maxMmapStep) - remainder } // Ensure that the mmap size is a multiple of the page size. // This should always be true since we're incrementing in MBs. pageSize := int64(db.pageSize) if (sz % pageSize) != 0 { sz = ((sz / pageSize) + 1) * pageSize } // If we've exceeded the max size then only grow up to the max size. if sz > maxMapSize { sz = maxMapSize } return int(sz), nil } // init creates a new database file and initializes its meta pages. func (db *DB) init() error { // Set the page size to the OS page size. db.pageSize = os.Getpagesize() // Create two meta pages on a buffer. buf := make([]byte, db.pageSize*4) for i := 0; i < 2; i++ { p := db.pageInBuffer(buf[:], pgid(i)) p.id = pgid(i) p.flags = metaPageFlag // Initialize the meta page. m := p.meta() m.magic = magic m.version = version m.pageSize = uint32(db.pageSize) m.freelist = 2 m.root = bucket{root: 3} m.pgid = 4 m.txid = txid(i) m.checksum = m.sum64() } // Write an empty freelist at page 3. p := db.pageInBuffer(buf[:], pgid(2)) p.id = pgid(2) p.flags = freelistPageFlag p.count = 0 // Write an empty leaf page at page 4. p = db.pageInBuffer(buf[:], pgid(3)) p.id = pgid(3) p.flags = leafPageFlag p.count = 0 // Write the buffer to our data file. if _, err := db.ops.writeAt(buf, 0); err != nil { return err } if err := fdatasync(db); err != nil { return err } return nil } // Close releases all database resources. // All transactions must be closed before closing the database. func (db *DB) Close() error { db.rwlock.Lock() defer db.rwlock.Unlock() db.metalock.Lock() defer db.metalock.Unlock() db.mmaplock.RLock() defer db.mmaplock.RUnlock() return db.close() } func (db *DB) close() error { if !db.opened { return nil } db.opened = false db.freelist = nil // Clear ops. db.ops.writeAt = nil // Close the mmap. if err := db.munmap(); err != nil { return err } // Close file handles. if db.file != nil { // No need to unlock read-only file. if !db.readOnly { // Unlock the file. if err := funlock(db); err != nil { log.Printf("bolt.Close(): funlock error: %s", err) } } // Close the file descriptor. if err := db.file.Close(); err != nil { return fmt.Errorf("db file close: %s", err) } db.file = nil } db.path = "" return nil } // Begin starts a new transaction. // Multiple read-only transactions can be used concurrently but only one // write transaction can be used at a time. Starting multiple write transactions // will cause the calls to block and be serialized until the current write // transaction finishes. // // Transactions should not be dependent on one another. Opening a read // transaction and a write transaction in the same goroutine can cause the // writer to deadlock because the database periodically needs to re-mmap itself // as it grows and it cannot do that while a read transaction is open. // // If a long running read transaction (for example, a snapshot transaction) is // needed, you might want to set DB.InitialMmapSize to a large enough value // to avoid potential blocking of write transaction. // // IMPORTANT: You must close read-only transactions after you are finished or // else the database will not reclaim old pages. func (db *DB) Begin(writable bool) (*Tx, error) { if writable { return db.beginRWTx() } return db.beginTx() } func (db *DB) beginTx() (*Tx, error) { // Lock the meta pages while we initialize the transaction. We obtain // the meta lock before the mmap lock because that's the order that the // write transaction will obtain them. db.metalock.Lock() // Obtain a read-only lock on the mmap. When the mmap is remapped it will // obtain a write lock so all transactions must finish before it can be // remapped. db.mmaplock.RLock() // Exit if the database is not open yet. if !db.opened { db.mmaplock.RUnlock() db.metalock.Unlock() return nil, ErrDatabaseNotOpen } // Create a transaction associated with the database. t := &Tx{} t.init(db) // Keep track of transaction until it closes. db.txs = append(db.txs, t) n := len(db.txs) // Unlock the meta pages. db.metalock.Unlock() // Update the transaction stats. db.statlock.Lock() db.stats.TxN++ db.stats.OpenTxN = n db.statlock.Unlock() return t, nil } func (db *DB) beginRWTx() (*Tx, error) { // If the database was opened with Options.ReadOnly, return an error. if db.readOnly { return nil, ErrDatabaseReadOnly } // Obtain writer lock. This is released by the transaction when it closes. // This enforces only one writer transaction at a time. db.rwlock.Lock() // Once we have the writer lock then we can lock the meta pages so that // we can set up the transaction. db.metalock.Lock() defer db.metalock.Unlock() // Exit if the database is not open yet. if !db.opened { db.rwlock.Unlock() return nil, ErrDatabaseNotOpen } // Create a transaction associated with the database. t := &Tx{writable: true} t.init(db) db.rwtx = t // Free any pages associated with closed read-only transactions. var minid txid = 0xFFFFFFFFFFFFFFFF for _, t := range db.txs { if t.meta.txid < minid { minid = t.meta.txid } } if minid > 0 { db.freelist.release(minid - 1) } return t, nil } // removeTx removes a transaction from the database. func (db *DB) removeTx(tx *Tx) { // Release the read lock on the mmap. db.mmaplock.RUnlock() // Use the meta lock to restrict access to the DB object. db.metalock.Lock() // Remove the transaction. for i, t := range db.txs { if t == tx { last := len(db.txs) - 1 db.txs[i] = db.txs[last] db.txs[last] = nil db.txs = db.txs[:last] break } } n := len(db.txs) // Unlock the meta pages. db.metalock.Unlock() // Merge statistics. db.statlock.Lock() db.stats.OpenTxN = n db.stats.TxStats.add(&tx.stats) db.statlock.Unlock() } // Update executes a function within the context of a read-write managed transaction. // If no error is returned from the function then the transaction is committed. // If an error is returned then the entire transaction is rolled back. // Any error that is returned from the function or returned from the commit is // returned from the Update() method. // // Attempting to manually commit or rollback within the function will cause a panic. func (db *DB) Update(fn func(*Tx) error) error { t, err := db.Begin(true) if err != nil { return err } // Make sure the transaction rolls back in the event of a panic. defer func() { if t.db != nil { t.rollback() } }() // Mark as a managed tx so that the inner function cannot manually commit. t.managed = true // If an error is returned from the function then rollback and return error. err = fn(t) t.managed = false if err != nil { _ = t.Rollback() return err } return t.Commit() } // View executes a function within the context of a managed read-only transaction. // Any error that is returned from the function is returned from the View() method. // // Attempting to manually rollback within the function will cause a panic. func (db *DB) View(fn func(*Tx) error) error { t, err := db.Begin(false) if err != nil { return err } // Make sure the transaction rolls back in the event of a panic. defer func() { if t.db != nil { t.rollback() } }() // Mark as a managed tx so that the inner function cannot manually rollback. t.managed = true // If an error is returned from the function then pass it through. err = fn(t) t.managed = false if err != nil { _ = t.Rollback() return err } if err := t.Rollback(); err != nil { return err } return nil } // Batch calls fn as part of a batch. It behaves similar to Update, // except: // // 1. concurrent Batch calls can be combined into a single Bolt // transaction. // // 2. the function passed to Batch may be called multiple times, // regardless of whether it returns error or not. // // This means that Batch function side effects must be idempotent and // take permanent effect only after a successful return is seen in // caller. // // The maximum batch size and delay can be adjusted with DB.MaxBatchSize // and DB.MaxBatchDelay, respectively. // // Batch is only useful when there are multiple goroutines calling it. func (db *DB) Batch(fn func(*Tx) error) error { errCh := make(chan error, 1) db.batchMu.Lock() if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { // There is no existing batch, or the existing batch is full; start a new one. db.batch = &batch{ db: db, } db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) } db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) if len(db.batch.calls) >= db.MaxBatchSize { // wake up batch, it's ready to run go db.batch.trigger() } db.batchMu.Unlock() err := <-errCh if err == trySolo { err = db.Update(fn) } return err } type call struct { fn func(*Tx) error err chan<- error } type batch struct { db *DB timer *time.Timer start sync.Once calls []call } // trigger runs the batch if it hasn't already been run. func (b *batch) trigger() { b.start.Do(b.run) } // run performs the transactions in the batch and communicates results // back to DB.Batch. func (b *batch) run() { b.db.batchMu.Lock() b.timer.Stop() // Make sure no new work is added to this batch, but don't break // other batches. if b.db.batch == b { b.db.batch = nil } b.db.batchMu.Unlock() retry: for len(b.calls) > 0 { var failIdx = -1 err := b.db.Update(func(tx *Tx) error { for i, c := range b.calls { if err := safelyCall(c.fn, tx); err != nil { failIdx = i return err } } return nil }) if failIdx >= 0 { // take the failing transaction out of the batch. it's // safe to shorten b.calls here because db.batch no longer // points to us, and we hold the mutex anyway. c := b.calls[failIdx] b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] // tell the submitter re-run it solo, continue with the rest of the batch c.err <- trySolo continue retry } // pass success, or bolt internal errors, to all callers for _, c := range b.calls { if c.err != nil { c.err <- err } } break retry } } // trySolo is a special sentinel error value used for signaling that a // transaction function should be re-run. It should never be seen by // callers. var trySolo = errors.New("batch function returned an error and should be re-run solo") type panicked struct { reason interface{} } func (p panicked) Error() string { if err, ok := p.reason.(error); ok { return err.Error() } return fmt.Sprintf("panic: %v", p.reason) } func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { defer func() { if p := recover(); p != nil { err = panicked{p} } }() return fn(tx) } // Sync executes fdatasync() against the database file handle. // // This is not necessary under normal operation, however, if you use NoSync // then it allows you to force the database file to sync against the disk. func (db *DB) Sync() error { return fdatasync(db) } // Stats retrieves ongoing performance stats for the database. // This is only updated when a transaction closes. func (db *DB) Stats() Stats { db.statlock.RLock() defer db.statlock.RUnlock() return db.stats } // This is for internal access to the raw data bytes from the C cursor, use // carefully, or not at all. func (db *DB) Info() *Info { return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} } // page retrieves a page reference from the mmap based on the current page size. func (db *DB) page(id pgid) *page { pos := id * pgid(db.pageSize) return (*page)(unsafe.Pointer(&db.data[pos])) } // pageInBuffer retrieves a page reference from a given byte array based on the current page size. func (db *DB) pageInBuffer(b []byte, id pgid) *page { return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) } // meta retrieves the current meta page reference. func (db *DB) meta() *meta { // We have to return the meta with the highest txid which doesn't fail // validation. Otherwise, we can cause errors when in fact the database is // in a consistent state. metaA is the one with the higher txid. metaA := db.meta0 metaB := db.meta1 if db.meta1.txid > db.meta0.txid { metaA = db.meta1 metaB = db.meta0 } // Use higher meta page if valid. Otherwise fallback to previous, if valid. if err := metaA.validate(); err == nil { return metaA } else if err := metaB.validate(); err == nil { return metaB } // This should never be reached, because both meta1 and meta0 were validated // on mmap() and we do fsync() on every write. panic("bolt.DB.meta(): invalid meta pages") } // allocate returns a contiguous block of memory starting at a given page. func (db *DB) allocate(count int) (*page, error) { // Allocate a temporary buffer for the page. var buf []byte if count == 1 { buf = db.pagePool.Get().([]byte) } else { buf = make([]byte, count*db.pageSize) } p := (*page)(unsafe.Pointer(&buf[0])) p.overflow = uint32(count - 1) // Use pages from the freelist if they are available. if p.id = db.freelist.allocate(count); p.id != 0 { return p, nil } // Resize mmap() if we're at the end. p.id = db.rwtx.meta.pgid var minsz = int((p.id+pgid(count))+1) * db.pageSize if minsz >= db.datasz { if err := db.mmap(minsz); err != nil { return nil, fmt.Errorf("mmap allocate error: %s", err) } } // Move the page id high water mark. db.rwtx.meta.pgid += pgid(count) return p, nil } // grow grows the size of the database to the given sz. func (db *DB) grow(sz int) error { // Ignore if the new size is less than available file size. if sz <= db.filesz { return nil } // If the data is smaller than the alloc size then only allocate what's needed. // Once it goes over the allocation size then allocate in chunks. if db.datasz < db.AllocSize { sz = db.datasz } else { sz += db.AllocSize } // Truncate and fsync to ensure file size metadata is flushed. // https://github.com/boltdb/bolt/issues/284 if !db.NoGrowSync && !db.readOnly { if runtime.GOOS != "windows" { if err := db.file.Truncate(int64(sz)); err != nil { return fmt.Errorf("file resize error: %s", err) } } if err := db.file.Sync(); err != nil { return fmt.Errorf("file sync error: %s", err) } } db.filesz = sz return nil } func (db *DB) IsReadOnly() bool { return db.readOnly } // Options represents the options that can be set when opening a database. type Options struct { // Timeout is the amount of time to wait to obtain a file lock. // When set to zero it will wait indefinitely. This option is only // available on Darwin and Linux. Timeout time.Duration // Sets the DB.NoGrowSync flag before memory mapping the file. NoGrowSync bool // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to // grab a shared lock (UNIX). ReadOnly bool // Sets the DB.MmapFlags flag before memory mapping the file. MmapFlags int // InitialMmapSize is the initial mmap size of the database // in bytes. Read transactions won't block write transaction // if the InitialMmapSize is large enough to hold database mmap // size. (See DB.Begin for more information) // // If <=0, the initial map size is 0. // If initialMmapSize is smaller than the previous database size, // it takes no effect. InitialMmapSize int } // DefaultOptions represent the options used if nil options are passed into Open(). // No timeout is used which will cause Bolt to wait indefinitely for a lock. var DefaultOptions = &Options{ Timeout: 0, NoGrowSync: false, } // Stats represents statistics about the database. type Stats struct { // Freelist stats FreePageN int // total number of free pages on the freelist PendingPageN int // total number of pending pages on the freelist FreeAlloc int // total bytes allocated in free pages FreelistInuse int // total bytes used by the freelist // Transaction stats TxN int // total number of started read transactions OpenTxN int // number of currently open read transactions TxStats TxStats // global, ongoing stats. } // Sub calculates and returns the difference between two sets of database stats. // This is useful when obtaining stats at two different points and time and // you need the performance counters that occurred within that time span. func (s *Stats) Sub(other *Stats) Stats { if other == nil { return *s } var diff Stats diff.FreePageN = s.FreePageN diff.PendingPageN = s.PendingPageN diff.FreeAlloc = s.FreeAlloc diff.FreelistInuse = s.FreelistInuse diff.TxN = s.TxN - other.TxN diff.TxStats = s.TxStats.Sub(&other.TxStats) return diff } func (s *Stats) add(other *Stats) { s.TxStats.add(&other.TxStats) } type Info struct { Data uintptr PageSize int } type meta struct { magic uint32 version uint32 pageSize uint32 flags uint32 root bucket freelist pgid pgid pgid txid txid checksum uint64 } // validate checks the marker bytes and version of the meta page to ensure it matches this binary. func (m *meta) validate() error { if m.magic != magic { return ErrInvalid } else if m.version != version { return ErrVersionMismatch } else if m.checksum != 0 && m.checksum != m.sum64() { return ErrChecksum } return nil } // copy copies one meta object to another. func (m *meta) copy(dest *meta) { *dest = *m } // write writes the meta onto a page. func (m *meta) write(p *page) { if m.root.root >= m.pgid { panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) } else if m.freelist >= m.pgid { panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) } // Page id is either going to be 0 or 1 which we can determine by the transaction ID. p.id = pgid(m.txid % 2) p.flags |= metaPageFlag // Calculate the checksum. m.checksum = m.sum64() m.copy(p.meta()) } // generates the checksum for the meta. func (m *meta) sum64() uint64 { var h = fnv.New64a() _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) return h.Sum64() } // _assert will panic with a given formatted message if the given condition is false. func _assert(condition bool, msg string, v ...interface{}) { if !condition { panic(fmt.Sprintf("assertion failed: "+msg, v...)) } } func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } func printstack() { stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") fmt.Fprintln(os.Stderr, stack) }
lockfile
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/nightlyone/lockfile/lockfile.go
// Package lockfile handles pid file based locking. // While a sync.Mutex helps against concurrency issues within a single process, // this package is designed to help against concurrency issues between cooperating processes // or serializing multiple invocations of the same process. You can also combine sync.Mutex // with Lockfile in order to serialize an action between different goroutines in a single program // and also multiple invocations of this program. package lockfile import ( "errors" "fmt" "io" "io/ioutil" "os" "path/filepath" ) // Lockfile is a pid file which can be locked type Lockfile string // TemporaryError is a type of error where a retry after a random amount of sleep should help to mitigate it. type TemporaryError string func (t TemporaryError) Error() string { return string(t) } // Temporary returns always true. // It exists, so you can detect it via // if te, ok := err.(interface{ Temporary() bool }); ok { // fmt.Println("I am a temporay error situation, so wait and retry") // } func (t TemporaryError) Temporary() bool { return true } // Various errors returned by this package var ( ErrBusy = TemporaryError("Locked by other process") // If you get this, retry after a short sleep might help ErrNotExist = TemporaryError("Lockfile created, but doesn't exist") // If you get this, retry after a short sleep might help ErrNeedAbsPath = errors.New("Lockfiles must be given as absolute path names") ErrInvalidPid = errors.New("Lockfile contains invalid pid for system") ErrDeadOwner = errors.New("Lockfile contains pid of process not existent on this system anymore") ErrRogueDeletion = errors.New("Lockfile owned by me has been removed unexpectedly") ) // New describes a new filename located at the given absolute path. func New(path string) (Lockfile, error) { if !filepath.IsAbs(path) { return Lockfile(""), ErrNeedAbsPath } return Lockfile(path), nil } // GetOwner returns who owns the lockfile. func (l Lockfile) GetOwner() (*os.Process, error) { name := string(l) // Ok, see, if we have a stale lockfile here content, err := ioutil.ReadFile(name) if err != nil { return nil, err } // try hard for pids. If no pid, the lockfile is junk anyway and we delete it. pid, err := scanPidLine(content) if err != nil { return nil, err } running, err := isRunning(pid) if err != nil { return nil, err } if running { proc, err := os.FindProcess(pid) if err != nil { return nil, err } return proc, nil } return nil, ErrDeadOwner } // TryLock tries to own the lock. // It Returns nil, if successful and and error describing the reason, it didn't work out. // Please note, that existing lockfiles containing pids of dead processes // and lockfiles containing no pid at all are simply deleted. func (l Lockfile) TryLock() error { name := string(l) // This has been checked by New already. If we trigger here, // the caller didn't use New and re-implemented it's functionality badly. // So panic, that he might find this easily during testing. if !filepath.IsAbs(name) { panic(ErrNeedAbsPath) } tmplock, err := ioutil.TempFile(filepath.Dir(name), filepath.Base(name)+".") if err != nil { return err } cleanup := func() { _ = tmplock.Close() _ = os.Remove(tmplock.Name()) } defer cleanup() if err := writePidLine(tmplock, os.Getpid()); err != nil { return err } // return value intentionally ignored, as ignoring it is part of the algorithm _ = os.Link(tmplock.Name(), name) fiTmp, err := os.Lstat(tmplock.Name()) if err != nil { return err } fiLock, err := os.Lstat(name) if err != nil { // tell user that a retry would be a good idea if os.IsNotExist(err) { return ErrNotExist } return err } // Success if os.SameFile(fiTmp, fiLock) { return nil } proc, err := l.GetOwner() switch err { default: // Other errors -> defensively fail and let caller handle this return err case nil: if proc.Pid != os.Getpid() { return ErrBusy } case ErrDeadOwner, ErrInvalidPid: // cases we can fix below } // clean stale/invalid lockfile err = os.Remove(name) if err != nil { // If it doesn't exist, then it doesn't matter who removed it. if !os.IsNotExist(err) { return err } } // now that the stale lockfile is gone, let's recurse return l.TryLock() } // Unlock a lock again, if we owned it. Returns any error that happend during release of lock. func (l Lockfile) Unlock() error { proc, err := l.GetOwner() switch err { case ErrInvalidPid, ErrDeadOwner: return ErrRogueDeletion case nil: if proc.Pid == os.Getpid() { // we really own it, so let's remove it. return os.Remove(string(l)) } // Not owned by me, so don't delete it. return ErrRogueDeletion default: // This is an application error or system error. // So give a better error for logging here. if os.IsNotExist(err) { return ErrRogueDeletion } // Other errors -> defensively fail and let caller handle this return err } } func writePidLine(w io.Writer, pid int) error { _, err := io.WriteString(w, fmt.Sprintf("%d\n", pid)) return err } func scanPidLine(content []byte) (int, error) { if len(content) == 0 { return 0, ErrInvalidPid } var pid int if _, err := fmt.Sscanln(string(content), &pid); err != nil { return 0, ErrInvalidPid } if pid <= 0 { return 0, ErrInvalidPid } return pid, nil }
lockfile
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/nightlyone/lockfile/lockfile_windows.go
package lockfile import ( "syscall" ) //For some reason these consts don't exist in syscall. const ( error_invalid_parameter = 87 code_still_active = 259 ) func isRunning(pid int) (bool, error) { procHnd, err := syscall.OpenProcess(syscall.PROCESS_QUERY_INFORMATION, true, uint32(pid)) if err != nil { if scerr, ok := err.(syscall.Errno); ok { if uintptr(scerr) == error_invalid_parameter { return false, nil } } } var code uint32 err = syscall.GetExitCodeProcess(procHnd, &code) if err != nil { return false, err } return code == code_still_active, nil }
lockfile
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/nightlyone/lockfile/lockfile_unix.go
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris package lockfile import ( "os" "syscall" ) func isRunning(pid int) (bool, error) { proc, err := os.FindProcess(pid) if err != nil { return false, err } if err := proc.Signal(syscall.Signal(0)); err != nil { return false, nil } return true, nil }
lockfile
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/nightlyone/lockfile/LICENSE
Copyright (c) 2012 Ingo Oeser Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
constext
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/sdboyer/constext/constext.go
// Package constext provides facilities for pairing contexts together so that // they behave as one. package constext import ( "context" "sync" "time" ) type constext struct { car, cdr context.Context done chan struct{} // chan closed on cancelFunc() call, or parent done once sync.Once // protects cancel func mu sync.Mutex // protects timer and err err error // err set on cancel or timeout } // Cons takes two Contexts and combines them into a pair, conjoining their // behavior: // // - If either parent context is canceled, the constext is canceled. The err is // set to whatever the err of the parent that was canceled. // - If either parent has a deadline, the constext uses that same deadline. If // both have a deadline, it uses the sooner/lesser one. // - Values from both parents are unioned together. When a key is present in // both parent trees, the left (first) context supercedes the right (second). // // All the normal context.With*() funcs should incorporate constexts correctly. // // If the two parent contexts both return a nil channel from Done() (which can // occur if both parents are Background, or were created only through // context.WithValue()), then the returned cancelFunc() is a no-op; calling it // will NOT result in the termination of any sub-contexts later created. func Cons(l, r context.Context) (context.Context, context.CancelFunc) { cc := &constext{ car: l, cdr: r, done: make(chan struct{}), } if cc.car.Done() == nil && cc.cdr.Done() == nil { // Both parents are un-cancelable, so it's more technically correct to // return a no-op func here. return cc, func() {} } if cc.car.Err() != nil { cc.cancel(cc.car.Err()) return cc, func() {} } if cc.cdr.Err() != nil { cc.cancel(cc.cdr.Err()) return cc, func() {} } go func() { select { case <-cc.car.Done(): cc.cancel(cc.car.Err()) case <-cc.cdr.Done(): cc.cancel(cc.cdr.Err()) case <-cc.done: // Ensure the goroutine dies when canceled } }() return cc, func() { cc.cancel(context.Canceled) } } func (cc *constext) cancel(err error) { cc.once.Do(func() { if err == nil { panic("constext: internal error: missing cancel error") } cc.mu.Lock() if cc.err == nil { cc.err = err close(cc.done) } cc.mu.Unlock() }) } func (cc *constext) Deadline() (time.Time, bool) { hdeadline, hok := cc.car.Deadline() tdeadline, tok := cc.cdr.Deadline() if !hok && !tok { return time.Time{}, false } if hok && !tok { return hdeadline, true } if !hok && tok { return tdeadline, true } if hdeadline.Before(tdeadline) { return hdeadline, true } return tdeadline, true } func (cc *constext) Done() <-chan struct{} { return cc.done } func (cc *constext) Err() error { cc.mu.Lock() defer cc.mu.Unlock() return cc.err } func (cc *constext) Value(key interface{}) interface{} { v := cc.car.Value(key) if v != nil { return v } return cc.cdr.Value(key) }
constext
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/sdboyer/constext/LICENSE
The MIT License (MIT) Copyright (c) 2017 Sam Boyer Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
vcs
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/Masterminds/vcs/LICENSE.txt
The Masterminds Copyright (C) 2014-2015, Matt Butcher and Matt Farina Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
vcs
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/Masterminds/vcs/repo.go
// Package vcs provides the ability to work with varying version control systems // (VCS), also known as source control systems (SCM) though the same interface. // // This package includes a function that attempts to detect the repo type from // the remote URL and return the proper type. For example, // // remote := "https://github.com/Masterminds/vcs" // local, _ := ioutil.TempDir("", "go-vcs") // repo, err := NewRepo(remote, local) // // In this case repo will be a GitRepo instance. NewRepo can detect the VCS for // numerous popular VCS and from the URL. For example, a URL ending in .git // that's not from one of the popular VCS will be detected as a Git repo and // the correct type will be returned. // // If you know the repository type and would like to create an instance of a // specific type you can use one of constructors for a type. They are NewGitRepo, // NewSvnRepo, NewBzrRepo, and NewHgRepo. The definition and usage is the same // as NewRepo. // // Once you have an object implementing the Repo interface the operations are // the same no matter which VCS you're using. There are some caveats. For // example, each VCS has its own version formats that need to be respected and // checkout out branches, if a branch is being worked with, is different in // each VCS. package vcs import ( "fmt" "io/ioutil" "log" "os" "os/exec" "regexp" "strings" "time" ) // Logger is where you can provide a logger, implementing the log.Logger interface, // where verbose output from each VCS will be written. The default logger does // not log data. To log data supply your own logger or change the output location // of the provided logger. var Logger *log.Logger func init() { // Initialize the logger to one that does not actually log anywhere. This is // to be overridden by the package user by setting vcs.Logger to a different // logger. Logger = log.New(ioutil.Discard, "go-vcs", log.LstdFlags) } const longForm = "2006-01-02 15:04:05 -0700" // Type describes the type of VCS type Type string // VCS types const ( NoVCS Type = "" Git Type = "git" Svn Type = "svn" Bzr Type = "bzr" Hg Type = "hg" ) // Repo provides an interface to work with repositories using different source // control systems such as Git, Bzr, Mercurial, and SVN. For implementations // of this interface see BzrRepo, GitRepo, HgRepo, and SvnRepo. type Repo interface { // Vcs retrieves the underlying VCS being implemented. Vcs() Type // Remote retrieves the remote location for a repo. Remote() string // LocalPath retrieves the local file system location for a repo. LocalPath() string // Get is used to perform an initial clone/checkout of a repository. Get() error // Initializes a new repository locally. Init() error // Update performs an update to an existing checkout of a repository. Update() error // UpdateVersion sets the version of a package of a repository. UpdateVersion(string) error // Version retrieves the current version. Version() (string, error) // Current retrieves the current version-ish. This is different from the // Version method. The output could be a branch name if on the tip of a // branch (git), a tag if on a tag, a revision if on a specific revision // that's not the tip of the branch. The values here vary based on the VCS. Current() (string, error) // Date retrieves the date on the latest commit. Date() (time.Time, error) // CheckLocal verifies the local location is of the correct VCS type CheckLocal() bool // Branches returns a list of available branches on the repository. Branches() ([]string, error) // Tags returns a list of available tags on the repository. Tags() ([]string, error) // IsReference returns if a string is a reference. A reference can be a // commit id, branch, or tag. IsReference(string) bool // IsDirty returns if the checkout has been modified from the checked // out reference. IsDirty() bool // CommitInfo retrieves metadata about a commit. CommitInfo(string) (*CommitInfo, error) // TagsFromCommit retrieves tags from a commit id. TagsFromCommit(string) ([]string, error) // Ping returns if remote location is accessible. Ping() bool // RunFromDir executes a command from repo's directory. RunFromDir(cmd string, args ...string) ([]byte, error) // CmdFromDir creates a new command that will be executed from repo's // directory. CmdFromDir(cmd string, args ...string) *exec.Cmd // ExportDir exports the current revision to the passed in directory. ExportDir(string) error } // NewRepo returns a Repo based on trying to detect the source control from the // remote and local locations. The appropriate implementation will be returned // or an ErrCannotDetectVCS if the VCS type cannot be detected. // Note, this function may make calls to the Internet to determind help determine // the VCS. func NewRepo(remote, local string) (Repo, error) { vtype, remote, err := detectVcsFromRemote(remote) // From the remote URL the VCS could not be detected. See if the local // repo contains enough information to figure out the VCS. The reason the // local repo is not checked first is because of the potential for VCS type // switches which will be detected in each of the type builders. if err == ErrCannotDetectVCS { vtype, err = DetectVcsFromFS(local) } if err != nil { return nil, err } switch vtype { case Git: return NewGitRepo(remote, local) case Svn: return NewSvnRepo(remote, local) case Hg: return NewHgRepo(remote, local) case Bzr: return NewBzrRepo(remote, local) } // Should never fall through to here but just in case. return nil, ErrCannotDetectVCS } // CommitInfo contains metadata about a commit. type CommitInfo struct { // The commit id Commit string // Who authored the commit Author string // Date of the commit Date time.Time // Commit message Message string } type base struct { remote, local string Logger *log.Logger } func (b *base) log(v interface{}) { b.Logger.Printf("%s", v) } // Remote retrieves the remote location for a repo. func (b *base) Remote() string { return b.remote } // LocalPath retrieves the local file system location for a repo. func (b *base) LocalPath() string { return b.local } func (b *base) setRemote(remote string) { b.remote = remote } func (b *base) setLocalPath(local string) { b.local = local } func (b base) run(cmd string, args ...string) ([]byte, error) { out, err := exec.Command(cmd, args...).CombinedOutput() b.log(out) if err != nil { err = fmt.Errorf("%s: %s", out, err) } return out, err } func (b *base) CmdFromDir(cmd string, args ...string) *exec.Cmd { c := exec.Command(cmd, args...) c.Dir = b.local c.Env = envForDir(c.Dir) return c } func (b *base) RunFromDir(cmd string, args ...string) ([]byte, error) { c := b.CmdFromDir(cmd, args...) out, err := c.CombinedOutput() return out, err } func (b *base) referenceList(c, r string) []string { var out []string re := regexp.MustCompile(r) for _, m := range re.FindAllStringSubmatch(c, -1) { out = append(out, m[1]) } return out } func envForDir(dir string) []string { env := os.Environ() return mergeEnvLists([]string{"PWD=" + dir}, env) } func mergeEnvLists(in, out []string) []string { NextVar: for _, inkv := range in { k := strings.SplitAfterN(inkv, "=", 2)[0] for i, outkv := range out { if strings.HasPrefix(outkv, k) { out[i] = inkv continue NextVar } } out = append(out, inkv) } return out } func depInstalled(name string) bool { if _, err := exec.LookPath(name); err != nil { return false } return true }
vcs
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/Masterminds/vcs/vcs_local_lookup.go
package vcs import ( "os" "runtime" "strings" ) // DetectVcsFromFS detects the type from the local path. // Is there a better way to do this? func DetectVcsFromFS(vcsPath string) (Type, error) { // There are cases under windows that a path could start with a / and it needs // to be stripped. For example, a path such as /C:\foio\bar. if runtime.GOOS == "windows" && strings.HasPrefix(vcsPath, "/") { vcsPath = strings.TrimPrefix(vcsPath, "/") } // When the local directory to the package doesn't exist // it's not yet downloaded so we can't detect the type // locally. if _, err := os.Stat(vcsPath); os.IsNotExist(err) { return "", ErrCannotDetectVCS } separator := string(os.PathSeparator) // Walk through each of the different VCS types to see if // one can be detected. Do this is order of guessed popularity. if _, err := os.Stat(vcsPath + separator + ".git"); err == nil { return Git, nil } if _, err := os.Stat(vcsPath + separator + ".svn"); err == nil { return Svn, nil } if _, err := os.Stat(vcsPath + separator + ".hg"); err == nil { return Hg, nil } if _, err := os.Stat(vcsPath + separator + ".bzr"); err == nil { return Bzr, nil } // If one was not already detected than we default to not finding it. return "", ErrCannotDetectVCS }
vcs
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/Masterminds/vcs/bzr.go
package vcs import ( "fmt" "net/url" "os" "os/exec" "path/filepath" "regexp" "strings" "time" ) var bzrDetectURL = regexp.MustCompile("parent branch: (?P<foo>.+)\n") // NewBzrRepo creates a new instance of BzrRepo. The remote and local directories // need to be passed in. func NewBzrRepo(remote, local string) (*BzrRepo, error) { ins := depInstalled("bzr") if !ins { return nil, NewLocalError("bzr is not installed", nil, "") } ltype, err := DetectVcsFromFS(local) // Found a VCS other than Bzr. Need to report an error. if err == nil && ltype != Bzr { return nil, ErrWrongVCS } r := &BzrRepo{} r.setRemote(remote) r.setLocalPath(local) r.Logger = Logger // With the other VCS we can check if the endpoint locally is different // from the one configured internally. But, with Bzr you can't. For example, // if you do `bzr branch https://launchpad.net/govcstestbzrrepo` and then // use `bzr info` to get the parent branch you'll find it set to // http://bazaar.launchpad.net/~mattfarina/govcstestbzrrepo/trunk/. Notice // the change from https to http and the path chance. // Here we set the remote to be the local one if none is passed in. if err == nil && r.CheckLocal() && remote == "" { c := exec.Command("bzr", "info") c.Dir = local c.Env = envForDir(c.Dir) out, err := c.CombinedOutput() if err != nil { return nil, NewLocalError("Unable to retrieve local repo information", err, string(out)) } m := bzrDetectURL.FindStringSubmatch(string(out)) // If no remote was passed in but one is configured for the locally // checked out Bzr repo use that one. if m[1] != "" { r.setRemote(m[1]) } } return r, nil } // BzrRepo implements the Repo interface for the Bzr source control. type BzrRepo struct { base } // Vcs retrieves the underlying VCS being implemented. func (s BzrRepo) Vcs() Type { return Bzr } // Get is used to perform an initial clone of a repository. func (s *BzrRepo) Get() error { basePath := filepath.Dir(filepath.FromSlash(s.LocalPath())) if _, err := os.Stat(basePath); os.IsNotExist(err) { err = os.MkdirAll(basePath, 0755) if err != nil { return NewLocalError("Unable to create directory", err, "") } } out, err := s.run("bzr", "branch", s.Remote(), s.LocalPath()) if err != nil { return NewRemoteError("Unable to get repository", err, string(out)) } return nil } // Init initializes a bazaar repository at local location. func (s *BzrRepo) Init() error { out, err := s.run("bzr", "init", s.LocalPath()) // There are some windows cases where bazaar cannot create the parent // directory if it does not already exist, to the location it's trying // to create the repo. Catch that error and try to handle it. if err != nil && s.isUnableToCreateDir(err) { basePath := filepath.Dir(filepath.FromSlash(s.LocalPath())) if _, err := os.Stat(basePath); os.IsNotExist(err) { err = os.MkdirAll(basePath, 0755) if err != nil { return NewLocalError("Unable to initialize repository", err, "") } out, err = s.run("bzr", "init", s.LocalPath()) if err != nil { return NewLocalError("Unable to initialize repository", err, string(out)) } return nil } } else if err != nil { return NewLocalError("Unable to initialize repository", err, string(out)) } return nil } // Update performs a Bzr pull and update to an existing checkout. func (s *BzrRepo) Update() error { out, err := s.RunFromDir("bzr", "pull") if err != nil { return NewRemoteError("Unable to update repository", err, string(out)) } out, err = s.RunFromDir("bzr", "update") if err != nil { return NewRemoteError("Unable to update repository", err, string(out)) } return nil } // UpdateVersion sets the version of a package currently checked out via Bzr. func (s *BzrRepo) UpdateVersion(version string) error { out, err := s.RunFromDir("bzr", "update", "-r", version) if err != nil { return NewLocalError("Unable to update checked out version", err, string(out)) } return nil } // Version retrieves the current version. func (s *BzrRepo) Version() (string, error) { out, err := s.RunFromDir("bzr", "revno", "--tree") if err != nil { return "", NewLocalError("Unable to retrieve checked out version", err, string(out)) } return strings.TrimSpace(string(out)), nil } // Current returns the current version-ish. This means: // * -1 if on the tip of the branch (this is the Bzr value for HEAD) // * A tag if on a tag // * Otherwise a revision func (s *BzrRepo) Current() (string, error) { tip, err := s.CommitInfo("-1") if err != nil { return "", err } curr, err := s.Version() if err != nil { return "", err } if tip.Commit == curr { return "-1", nil } ts, err := s.TagsFromCommit(curr) if err != nil { return "", err } if len(ts) > 0 { return ts[0], nil } return curr, nil } // Date retrieves the date on the latest commit. func (s *BzrRepo) Date() (time.Time, error) { out, err := s.RunFromDir("bzr", "version-info", "--custom", "--template={date}") if err != nil { return time.Time{}, NewLocalError("Unable to retrieve revision date", err, string(out)) } t, err := time.Parse(longForm, string(out)) if err != nil { return time.Time{}, NewLocalError("Unable to retrieve revision date", err, string(out)) } return t, nil } // CheckLocal verifies the local location is a Bzr repo. func (s *BzrRepo) CheckLocal() bool { if _, err := os.Stat(s.LocalPath() + "/.bzr"); err == nil { return true } return false } // Branches returns a list of available branches on the repository. // In Bazaar (Bzr) clones and branches are the same. A different branch will // have a different URL location which we cannot detect from the repo. This // is a little different from other VCS. func (s *BzrRepo) Branches() ([]string, error) { var branches []string return branches, nil } // Tags returns a list of available tags on the repository. func (s *BzrRepo) Tags() ([]string, error) { out, err := s.RunFromDir("bzr", "tags") if err != nil { return []string{}, NewLocalError("Unable to retrieve tags", err, string(out)) } tags := s.referenceList(string(out), `(?m-s)^(\S+)`) return tags, nil } // IsReference returns if a string is a reference. A reference can be a // commit id or tag. func (s *BzrRepo) IsReference(r string) bool { _, err := s.RunFromDir("bzr", "revno", "-r", r) return err == nil } // IsDirty returns if the checkout has been modified from the checked // out reference. func (s *BzrRepo) IsDirty() bool { out, err := s.RunFromDir("bzr", "diff") return err != nil || len(out) != 0 } // CommitInfo retrieves metadata about a commit. func (s *BzrRepo) CommitInfo(id string) (*CommitInfo, error) { r := "-r" + id out, err := s.RunFromDir("bzr", "log", r, "--log-format=long") if err != nil { return nil, ErrRevisionUnavailable } ci := &CommitInfo{} lines := strings.Split(string(out), "\n") const format = "Mon 2006-01-02 15:04:05 -0700" var track int var trackOn bool // Note, bzr does not appear to use i18m. for i, l := range lines { if strings.HasPrefix(l, "revno:") { ci.Commit = strings.TrimSpace(strings.TrimPrefix(l, "revno:")) } else if strings.HasPrefix(l, "committer:") { ci.Author = strings.TrimSpace(strings.TrimPrefix(l, "committer:")) } else if strings.HasPrefix(l, "timestamp:") { ts := strings.TrimSpace(strings.TrimPrefix(l, "timestamp:")) ci.Date, err = time.Parse(format, ts) if err != nil { return nil, NewLocalError("Unable to retrieve commit information", err, string(out)) } } else if strings.TrimSpace(l) == "message:" { track = i trackOn = true } else if trackOn && i > track { ci.Message = ci.Message + l } } ci.Message = strings.TrimSpace(ci.Message) // Didn't find the revision if ci.Author == "" { return nil, ErrRevisionUnavailable } return ci, nil } // TagsFromCommit retrieves tags from a commit id. func (s *BzrRepo) TagsFromCommit(id string) ([]string, error) { out, err := s.RunFromDir("bzr", "tags", "-r", id) if err != nil { return []string{}, NewLocalError("Unable to retrieve tags", err, string(out)) } tags := s.referenceList(string(out), `(?m-s)^(\S+)`) return tags, nil } // Ping returns if remote location is accessible. func (s *BzrRepo) Ping() bool { // Running bzr info is slow. Many of the projects are on launchpad which // has a public 1.0 API we can use. u, err := url.Parse(s.Remote()) if err == nil { if u.Host == "launchpad.net" { try := strings.TrimPrefix(u.Path, "/") // get returns the body and an err. If the status code is not a 200 // an error is returned. Launchpad returns a 404 for a codebase that // does not exist. Otherwise it returns a JSON object describing it. _, er := get("https://api.launchpad.net/1.0/" + try) return er == nil } } // This is the same command that Go itself uses but it's not fast (or fast // enough by my standards). A faster method would be useful. _, err = s.run("bzr", "info", s.Remote()) return err == nil } // ExportDir exports the current revision to the passed in directory. func (s *BzrRepo) ExportDir(dir string) error { out, err := s.RunFromDir("bzr", "export", dir) s.log(out) if err != nil { return NewLocalError("Unable to export source", err, string(out)) } return nil } // Multi-lingual manner check for the VCS error that it couldn't create directory. // https://bazaar.launchpad.net/~bzr-pqm/bzr/bzr.dev/files/head:/po/ func (s *BzrRepo) isUnableToCreateDir(err error) bool { msg := err.Error() if strings.HasPrefix(msg, fmt.Sprintf("Parent directory of %s does not exist.", s.LocalPath())) || strings.HasPrefix(msg, fmt.Sprintf("Nadřazený adresář %s neexistuje.", s.LocalPath())) || strings.HasPrefix(msg, fmt.Sprintf("El directorio padre de %s no existe.", s.LocalPath())) || strings.HasPrefix(msg, fmt.Sprintf("%s の親ディレクトリがありません。", s.LocalPath())) || strings.HasPrefix(msg, fmt.Sprintf("Родительская директория для %s не существует.", s.LocalPath())) { return true } return false }
vcs
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/Masterminds/vcs/svn.go
package vcs import ( "encoding/xml" "fmt" "os" "os/exec" "path/filepath" "runtime" "strings" "time" ) // NewSvnRepo creates a new instance of SvnRepo. The remote and local directories // need to be passed in. The remote location should include the branch for SVN. // For example, if the package is https://github.com/Masterminds/cookoo/ the remote // should be https://github.com/Masterminds/cookoo/trunk for the trunk branch. func NewSvnRepo(remote, local string) (*SvnRepo, error) { ins := depInstalled("svn") if !ins { return nil, NewLocalError("svn is not installed", nil, "") } ltype, err := DetectVcsFromFS(local) // Found a VCS other than Svn. Need to report an error. if err == nil && ltype != Svn { return nil, ErrWrongVCS } r := &SvnRepo{} r.setRemote(remote) r.setLocalPath(local) r.Logger = Logger // Make sure the local SVN repo is configured the same as the remote when // A remote value was passed in. if err == nil && r.CheckLocal() { // An SVN repo was found so test that the URL there matches // the repo passed in here. out, err := exec.Command("svn", "info", local).CombinedOutput() if err != nil { return nil, NewLocalError("Unable to retrieve local repo information", err, string(out)) } detectedRemote, err := detectRemoteFromInfoCommand(string(out)) if err != nil { return nil, NewLocalError("Unable to retrieve local repo information", err, string(out)) } if detectedRemote != "" && remote != "" && detectedRemote != remote { return nil, ErrWrongRemote } // If no remote was passed in but one is configured for the locally // checked out Svn repo use that one. if remote == "" && detectedRemote != "" { r.setRemote(detectedRemote) } } return r, nil } // SvnRepo implements the Repo interface for the Svn source control. type SvnRepo struct { base } // Vcs retrieves the underlying VCS being implemented. func (s SvnRepo) Vcs() Type { return Svn } // Get is used to perform an initial checkout of a repository. // Note, because SVN isn't distributed this is a checkout without // a clone. func (s *SvnRepo) Get() error { remote := s.Remote() if strings.HasPrefix(remote, "/") { remote = "file://" + remote } else if runtime.GOOS == "windows" && filepath.VolumeName(remote) != "" { remote = "file:///" + remote } out, err := s.run("svn", "checkout", remote, s.LocalPath()) if err != nil { return NewRemoteError("Unable to get repository", err, string(out)) } return nil } // Init will create a svn repository at remote location. func (s *SvnRepo) Init() error { out, err := s.run("svnadmin", "create", s.Remote()) if err != nil && s.isUnableToCreateDir(err) { basePath := filepath.Dir(filepath.FromSlash(s.Remote())) if _, err := os.Stat(basePath); os.IsNotExist(err) { err = os.MkdirAll(basePath, 0755) if err != nil { return NewLocalError("Unable to initialize repository", err, "") } out, err = s.run("svnadmin", "create", s.Remote()) if err != nil { return NewLocalError("Unable to initialize repository", err, string(out)) } return nil } } else if err != nil { return NewLocalError("Unable to initialize repository", err, string(out)) } return nil } // Update performs an SVN update to an existing checkout. func (s *SvnRepo) Update() error { out, err := s.RunFromDir("svn", "update") if err != nil { return NewRemoteError("Unable to update repository", err, string(out)) } return err } // UpdateVersion sets the version of a package currently checked out via SVN. func (s *SvnRepo) UpdateVersion(version string) error { out, err := s.RunFromDir("svn", "update", "-r", version) if err != nil { return NewRemoteError("Unable to update checked out version", err, string(out)) } return nil } // Version retrieves the current version. func (s *SvnRepo) Version() (string, error) { type Commit struct { Revision string `xml:"revision,attr"` } type Info struct { Commit Commit `xml:"entry>commit"` } out, err := s.RunFromDir("svn", "info", "--xml") if err != nil { return "", NewLocalError("Unable to retrieve checked out version", err, string(out)) } s.log(out) infos := &Info{} err = xml.Unmarshal(out, &infos) if err != nil { return "", NewLocalError("Unable to retrieve checked out version", err, string(out)) } return infos.Commit.Revision, nil } // Current returns the current version-ish. This means: // * HEAD if on the tip. // * Otherwise a revision id func (s *SvnRepo) Current() (string, error) { tip, err := s.CommitInfo("HEAD") if err != nil { return "", err } curr, err := s.Version() if err != nil { return "", err } if tip.Commit == curr { return "HEAD", nil } return curr, nil } // Date retrieves the date on the latest commit. func (s *SvnRepo) Date() (time.Time, error) { version, err := s.Version() if err != nil { return time.Time{}, NewLocalError("Unable to retrieve revision date", err, "") } out, err := s.RunFromDir("svn", "pget", "svn:date", "--revprop", "-r", version) if err != nil { return time.Time{}, NewLocalError("Unable to retrieve revision date", err, string(out)) } const longForm = "2006-01-02T15:04:05.000000Z" t, err := time.Parse(longForm, strings.TrimSpace(string(out))) if err != nil { return time.Time{}, NewLocalError("Unable to retrieve revision date", err, string(out)) } return t, nil } // CheckLocal verifies the local location is an SVN repo. func (s *SvnRepo) CheckLocal() bool { pth, err := filepath.Abs(s.LocalPath()) if err != nil { s.log(err.Error()) return false } if _, err := os.Stat(filepath.Join(pth, ".svn")); err == nil { return true } oldpth := pth for oldpth != pth { pth = filepath.Dir(pth) if _, err := os.Stat(filepath.Join(pth, ".svn")); err == nil { return true } } return false } // Tags returns []string{} as there are no formal tags in SVN. Tags are a // convention in SVN. They are typically implemented as a copy of the trunk and // placed in the /tags/[tag name] directory. Since this is a convention the // expectation is to checkout a tag the correct subdirectory will be used // as the path. For more information see: // http://svnbook.red-bean.com/en/1.7/svn.branchmerge.tags.html func (s *SvnRepo) Tags() ([]string, error) { return []string{}, nil } // Branches returns []string{} as there are no formal branches in SVN. Branches // are a convention. They are typically implemented as a copy of the trunk and // placed in the /branches/[tag name] directory. Since this is a convention the // expectation is to checkout a branch the correct subdirectory will be used // as the path. For more information see: // http://svnbook.red-bean.com/en/1.7/svn.branchmerge.using.html func (s *SvnRepo) Branches() ([]string, error) { return []string{}, nil } // IsReference returns if a string is a reference. A reference is a commit id. // Branches and tags are part of the path. func (s *SvnRepo) IsReference(r string) bool { out, err := s.RunFromDir("svn", "log", "-r", r) // This is a complete hack. There must be a better way to do this. Pull // requests welcome. When the reference isn't real you get a line of // repeated - followed by an empty line. If the reference is real there // is commit information in addition to those. So, we look for responses // over 2 lines long. lines := strings.Split(string(out), "\n") if err == nil && len(lines) > 2 { return true } return false } // IsDirty returns if the checkout has been modified from the checked // out reference. func (s *SvnRepo) IsDirty() bool { out, err := s.RunFromDir("svn", "diff") return err != nil || len(out) != 0 } // CommitInfo retrieves metadata about a commit. func (s *SvnRepo) CommitInfo(id string) (*CommitInfo, error) { // There are cases where Svn log doesn't return anything for HEAD or BASE. // svn info does provide details for these but does not have elements like // the commit message. if id == "HEAD" || id == "BASE" { type Commit struct { Revision string `xml:"revision,attr"` } type Info struct { Commit Commit `xml:"entry>commit"` } out, err := s.RunFromDir("svn", "info", "-r", id, "--xml") if err != nil { return nil, NewLocalError("Unable to retrieve commit information", err, string(out)) } infos := &Info{} err = xml.Unmarshal(out, &infos) if err != nil { return nil, NewLocalError("Unable to retrieve commit information", err, string(out)) } id = infos.Commit.Revision if id == "" { return nil, ErrRevisionUnavailable } } out, err := s.RunFromDir("svn", "log", "-r", id, "--xml") if err != nil { return nil, NewRemoteError("Unable to retrieve commit information", err, string(out)) } type Logentry struct { Author string `xml:"author"` Date string `xml:"date"` Msg string `xml:"msg"` } type Log struct { XMLName xml.Name `xml:"log"` Logs []Logentry `xml:"logentry"` } logs := &Log{} err = xml.Unmarshal(out, &logs) if err != nil { return nil, NewLocalError("Unable to retrieve commit information", err, string(out)) } if len(logs.Logs) == 0 { return nil, ErrRevisionUnavailable } ci := &CommitInfo{ Commit: id, Author: logs.Logs[0].Author, Message: logs.Logs[0].Msg, } if len(logs.Logs[0].Date) > 0 { ci.Date, err = time.Parse(time.RFC3339Nano, logs.Logs[0].Date) if err != nil { return nil, NewLocalError("Unable to retrieve commit information", err, string(out)) } } return ci, nil } // TagsFromCommit retrieves tags from a commit id. func (s *SvnRepo) TagsFromCommit(id string) ([]string, error) { // Svn tags are a convention implemented as paths. See the details on the // Tag() method for more information. return []string{}, nil } // Ping returns if remote location is accessible. func (s *SvnRepo) Ping() bool { _, err := s.run("svn", "--non-interactive", "info", s.Remote()) return err == nil } // ExportDir exports the current revision to the passed in directory. func (s *SvnRepo) ExportDir(dir string) error { out, err := s.RunFromDir("svn", "export", ".", dir) s.log(out) if err != nil { return NewLocalError("Unable to export source", err, string(out)) } return nil } // isUnableToCreateDir checks for an error in Init() to see if an error // where the parent directory of the VCS local path doesn't exist. func (s *SvnRepo) isUnableToCreateDir(err error) bool { msg := err.Error() return strings.HasPrefix(msg, "E000002") } // detectRemoteFromInfoCommand finds the remote url from the `svn info` // command's output without using a regex. We avoid regex because URLs // are notoriously complex to accurately match with a regex and // splitting strings is less complex and often faster func detectRemoteFromInfoCommand(infoOut string) (string, error) { sBytes := []byte(infoOut) urlIndex := strings.Index(infoOut, "URL: ") if urlIndex == -1 { return "", fmt.Errorf("Remote not specified in svn info") } urlEndIndex := strings.Index(string(sBytes[urlIndex:]), "\n") if urlEndIndex == -1 { urlEndIndex = strings.Index(string(sBytes[urlIndex:]), "\r") if urlEndIndex == -1 { return "", fmt.Errorf("Unable to parse remote URL for svn info") } } return string(sBytes[(urlIndex + 5):(urlIndex + urlEndIndex)]), nil }
vcs
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/Masterminds/vcs/hg.go
package vcs import ( "encoding/xml" "os" "os/exec" "regexp" "strings" "time" ) var hgDetectURL = regexp.MustCompile("default = (?P<foo>.+)\n") // NewHgRepo creates a new instance of HgRepo. The remote and local directories // need to be passed in. func NewHgRepo(remote, local string) (*HgRepo, error) { ins := depInstalled("hg") if !ins { return nil, NewLocalError("hg is not installed", nil, "") } ltype, err := DetectVcsFromFS(local) // Found a VCS other than Hg. Need to report an error. if err == nil && ltype != Hg { return nil, ErrWrongVCS } r := &HgRepo{} r.setRemote(remote) r.setLocalPath(local) r.Logger = Logger // Make sure the local Hg repo is configured the same as the remote when // A remote value was passed in. if err == nil && r.CheckLocal() { // An Hg repo was found so test that the URL there matches // the repo passed in here. c := exec.Command("hg", "paths") c.Dir = local c.Env = envForDir(c.Dir) out, err := c.CombinedOutput() if err != nil { return nil, NewLocalError("Unable to retrieve local repo information", err, string(out)) } m := hgDetectURL.FindStringSubmatch(string(out)) if m[1] != "" && m[1] != remote { return nil, ErrWrongRemote } // If no remote was passed in but one is configured for the locally // checked out Hg repo use that one. if remote == "" && m[1] != "" { r.setRemote(m[1]) } } return r, nil } // HgRepo implements the Repo interface for the Mercurial source control. type HgRepo struct { base } // Vcs retrieves the underlying VCS being implemented. func (s HgRepo) Vcs() Type { return Hg } // Get is used to perform an initial clone of a repository. func (s *HgRepo) Get() error { out, err := s.run("hg", "clone", s.Remote(), s.LocalPath()) if err != nil { return NewRemoteError("Unable to get repository", err, string(out)) } return nil } // Init will initialize a mercurial repository at local location. func (s *HgRepo) Init() error { out, err := s.run("hg", "init", s.LocalPath()) if err != nil { return NewLocalError("Unable to initialize repository", err, string(out)) } return nil } // Update performs a Mercurial pull to an existing checkout. func (s *HgRepo) Update() error { return s.UpdateVersion(``) } // UpdateVersion sets the version of a package currently checked out via Hg. func (s *HgRepo) UpdateVersion(version string) error { out, err := s.RunFromDir("hg", "pull") if err != nil { return NewLocalError("Unable to update checked out version", err, string(out)) } if len(strings.TrimSpace(version)) > 0 { out, err = s.RunFromDir("hg", "update", version) } else { out, err = s.RunFromDir("hg", "update") } if err != nil { return NewLocalError("Unable to update checked out version", err, string(out)) } return nil } // Version retrieves the current version. func (s *HgRepo) Version() (string, error) { out, err := s.RunFromDir("hg", "--debug", "identify") if err != nil { return "", NewLocalError("Unable to retrieve checked out version", err, string(out)) } parts := strings.SplitN(string(out), " ", 2) sha := parts[0] return strings.TrimSpace(sha), nil } // Current returns the current version-ish. This means: // * Branch name if on the tip of the branch // * Tag if on a tag // * Otherwise a revision id func (s *HgRepo) Current() (string, error) { out, err := s.RunFromDir("hg", "branch") if err != nil { return "", err } branch := strings.TrimSpace(string(out)) tip, err := s.CommitInfo("max(branch(" + branch + "))") if err != nil { return "", err } curr, err := s.Version() if err != nil { return "", err } if tip.Commit == curr { return branch, nil } ts, err := s.TagsFromCommit(curr) if err != nil { return "", err } if len(ts) > 0 { return ts[0], nil } return curr, nil } // Date retrieves the date on the latest commit. func (s *HgRepo) Date() (time.Time, error) { version, err := s.Version() if err != nil { return time.Time{}, NewLocalError("Unable to retrieve revision date", err, "") } out, err := s.RunFromDir("hg", "log", "-r", version, "--template", "{date|isodatesec}") if err != nil { return time.Time{}, NewLocalError("Unable to retrieve revision date", err, string(out)) } t, err := time.Parse(longForm, string(out)) if err != nil { return time.Time{}, NewLocalError("Unable to retrieve revision date", err, string(out)) } return t, nil } // CheckLocal verifies the local location is a Git repo. func (s *HgRepo) CheckLocal() bool { if _, err := os.Stat(s.LocalPath() + "/.hg"); err == nil { return true } return false } // Branches returns a list of available branches func (s *HgRepo) Branches() ([]string, error) { out, err := s.RunFromDir("hg", "branches") if err != nil { return []string{}, NewLocalError("Unable to retrieve branches", err, string(out)) } branches := s.referenceList(string(out), `(?m-s)^(\S+)`) return branches, nil } // Tags returns a list of available tags func (s *HgRepo) Tags() ([]string, error) { out, err := s.RunFromDir("hg", "tags") if err != nil { return []string{}, NewLocalError("Unable to retrieve tags", err, string(out)) } tags := s.referenceList(string(out), `(?m-s)^(\S+)`) return tags, nil } // IsReference returns if a string is a reference. A reference can be a // commit id, branch, or tag. func (s *HgRepo) IsReference(r string) bool { _, err := s.RunFromDir("hg", "log", "-r", r) return err == nil } // IsDirty returns if the checkout has been modified from the checked // out reference. func (s *HgRepo) IsDirty() bool { out, err := s.RunFromDir("hg", "diff") return err != nil || len(out) != 0 } // CommitInfo retrieves metadata about a commit. func (s *HgRepo) CommitInfo(id string) (*CommitInfo, error) { out, err := s.RunFromDir("hg", "log", "-r", id, "--style=xml") if err != nil { return nil, ErrRevisionUnavailable } type Author struct { Name string `xml:",chardata"` Email string `xml:"email,attr"` } type Logentry struct { Node string `xml:"node,attr"` Author Author `xml:"author"` Date string `xml:"date"` Msg string `xml:"msg"` } type Log struct { XMLName xml.Name `xml:"log"` Logs []Logentry `xml:"logentry"` } logs := &Log{} err = xml.Unmarshal(out, &logs) if err != nil { return nil, NewLocalError("Unable to retrieve commit information", err, string(out)) } if len(logs.Logs) == 0 { return nil, ErrRevisionUnavailable } ci := &CommitInfo{ Commit: logs.Logs[0].Node, Author: logs.Logs[0].Author.Name + " <" + logs.Logs[0].Author.Email + ">", Message: logs.Logs[0].Msg, } if logs.Logs[0].Date != "" { ci.Date, err = time.Parse(time.RFC3339, logs.Logs[0].Date) if err != nil { return nil, NewLocalError("Unable to retrieve commit information", err, string(out)) } } return ci, nil } // TagsFromCommit retrieves tags from a commit id. func (s *HgRepo) TagsFromCommit(id string) ([]string, error) { // Hg has a single tag per commit. If a second tag is added to a commit a // new commit is created and the tag is attached to that new commit. out, err := s.RunFromDir("hg", "log", "-r", id, "--style=xml") if err != nil { return []string{}, NewLocalError("Unable to retrieve tags", err, string(out)) } type Logentry struct { Node string `xml:"node,attr"` Tag string `xml:"tag"` } type Log struct { XMLName xml.Name `xml:"log"` Logs []Logentry `xml:"logentry"` } logs := &Log{} err = xml.Unmarshal(out, &logs) if err != nil { return []string{}, NewLocalError("Unable to retrieve tags", err, string(out)) } if len(logs.Logs) == 0 { return []string{}, NewLocalError("Unable to retrieve tags", err, string(out)) } t := strings.TrimSpace(logs.Logs[0].Tag) if t != "" { return []string{t}, nil } return []string{}, nil } // Ping returns if remote location is accessible. func (s *HgRepo) Ping() bool { _, err := s.run("hg", "identify", s.Remote()) return err == nil } // ExportDir exports the current revision to the passed in directory. func (s *HgRepo) ExportDir(dir string) error { out, err := s.RunFromDir("hg", "archive", dir) s.log(out) if err != nil { return NewLocalError("Unable to export source", err, string(out)) } return nil }
vcs
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/Masterminds/vcs/errors.go
package vcs import "errors" // The vcs package provides ways to work with errors that hide the underlying // implementation details but make them accessible if needed. For basic errors // that do not have underlying implementation specific details or the underlying // details are not necessary there are errors for comparison. // // For example: // // ci, err := repo.CommitInfo("123") // if err == vcs.ErrRevisionUnavailable { // // The commit id was not available in the VCS. // } // // There are other times where getting the details are more useful. For example, // if you're performing a repo.Get() and an error occurs. In general you'll want // to consistently know it failed. But, you may want to know the underlying // details (opt-in) to them. For those cases there is a different form of error // handling. // // For example: // // err := repo.Get() // if err != nil { // // A RemoteError was returned. This has access to the output of the // // vcs command, original error, and has a consistent cross vcs message. // } // // The errors returned here can be used in type switches to detect the underlying // error. For example: // // switch err.(type) { // case *vcs.RemoteError: // // This an error connecting to a remote system. // } // // For more information on using type switches to detect error types you can // read the Go wiki at https://github.com/golang/go/wiki/Errors var ( // ErrWrongVCS is returned when an action is tried on the wrong VCS. ErrWrongVCS = errors.New("Wrong VCS detected") // ErrCannotDetectVCS is returned when VCS cannot be detected from URI string. ErrCannotDetectVCS = errors.New("Cannot detect VCS") // ErrWrongRemote occurs when the passed in remote does not match the VCS // configured endpoint. ErrWrongRemote = errors.New("The Remote does not match the VCS endpoint") // ErrRevisionUnavailable happens when commit revision information is // unavailable. ErrRevisionUnavailable = errors.New("Revision unavailable") ) // RemoteError is returned when an operation fails against a remote repo type RemoteError struct { vcsError } // NewRemoteError constructs a RemoteError func NewRemoteError(msg string, err error, out string) error { e := &RemoteError{} e.s = msg e.e = err e.o = out return e } // LocalError is returned when a local operation has an error type LocalError struct { vcsError } // NewLocalError constructs a LocalError func NewLocalError(msg string, err error, out string) error { e := &LocalError{} e.s = msg e.e = err e.o = out return e } type vcsError struct { s string e error // The original error o string // The output from executing the command } // Error implements the Error interface func (e *vcsError) Error() string { return e.s } // Original retrieves the underlying implementation specific error. func (e *vcsError) Original() error { return e.e } // Out retrieves the output of the original command that was run. func (e *vcsError) Out() string { return e.o }
vcs
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/Masterminds/vcs/vcs_remote_lookup.go
package vcs import ( "encoding/json" "encoding/xml" "fmt" "io" "io/ioutil" "net/http" "net/url" "regexp" "strings" ) type vcsInfo struct { host string pattern string vcs Type addCheck func(m map[string]string, u *url.URL) (Type, error) regex *regexp.Regexp } // scpSyntaxRe matches the SCP-like addresses used by Git to access // repositories by SSH. var scpSyntaxRe = regexp.MustCompile(`^([a-zA-Z0-9_]+)@([a-zA-Z0-9._-]+):(.*)$`) var vcsList = []*vcsInfo{ { host: "github.com", vcs: Git, pattern: `^(github\.com[/|:][A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(/[A-Za-z0-9_.\-]+)*$`, }, { host: "bitbucket.org", pattern: `^(bitbucket\.org/(?P<name>[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+))(/[A-Za-z0-9_.\-]+)*$`, addCheck: checkBitbucket, }, { host: "launchpad.net", pattern: `^(launchpad\.net/(([A-Za-z0-9_.\-]+)(/[A-Za-z0-9_.\-]+)?|~[A-Za-z0-9_.\-]+/(\+junk|[A-Za-z0-9_.\-]+)/[A-Za-z0-9_.\-]+))(/[A-Za-z0-9_.\-]+)*$`, vcs: Bzr, }, { host: "git.launchpad.net", vcs: Git, pattern: `^(git\.launchpad\.net/(([A-Za-z0-9_.\-]+)|~[A-Za-z0-9_.\-]+/(\+git|[A-Za-z0-9_.\-]+)/[A-Za-z0-9_.\-]+))$`, }, { host: "hub.jazz.net", vcs: Git, pattern: `^(hub\.jazz\.net/git/[a-z0-9]+/[A-Za-z0-9_.\-]+)(/[A-Za-z0-9_.\-]+)*$`, }, { host: "go.googlesource.com", vcs: Git, pattern: `^(go\.googlesource\.com/[A-Za-z0-9_.\-]+/?)$`, }, { host: "git.openstack.org", vcs: Git, pattern: `^(git\.openstack\.org/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)$`, }, // If none of the previous detect the type they will fall to this looking for the type in a generic sense // by the extension to the path. { addCheck: checkURL, pattern: `\.(?P<type>git|hg|svn|bzr)$`, }, } func init() { // Precompile the regular expressions used to check VCS locations. for _, v := range vcsList { v.regex = regexp.MustCompile(v.pattern) } } // This function is really a hack around Go redirects rather than around // something VCS related. Should this be moved to the glide project or a // helper function? func detectVcsFromRemote(vcsURL string) (Type, string, error) { t, e := detectVcsFromURL(vcsURL) if e == nil { return t, vcsURL, nil } else if e != ErrCannotDetectVCS { return NoVCS, "", e } // Pages like https://golang.org/x/net provide an html document with // meta tags containing a location to work with. The go tool uses // a meta tag with the name go-import which is what we use here. // godoc.org also has one call go-source that we do not need to use. // The value of go-import is in the form "prefix vcs repo". The prefix // should match the vcsURL and the repo is a location that can be // checked out. Note, to get the html document you you need to add // ?go-get=1 to the url. u, err := url.Parse(vcsURL) if err != nil { return NoVCS, "", err } if u.RawQuery == "" { u.RawQuery = "go-get=1" } else { u.RawQuery = u.RawQuery + "+go-get=1" } checkURL := u.String() resp, err := http.Get(checkURL) if err != nil { return NoVCS, "", ErrCannotDetectVCS } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode >= 300 { if resp.StatusCode == 404 { return NoVCS, "", NewRemoteError(fmt.Sprintf("%s Not Found", vcsURL), nil, "") } else if resp.StatusCode == 401 || resp.StatusCode == 403 { return NoVCS, "", NewRemoteError(fmt.Sprintf("%s Access Denied", vcsURL), nil, "") } return NoVCS, "", ErrCannotDetectVCS } t, nu, err := parseImportFromBody(u, resp.Body) if err != nil { // TODO(mattfarina): Log the parsing error return NoVCS, "", ErrCannotDetectVCS } else if t == "" || nu == "" { return NoVCS, "", ErrCannotDetectVCS } return t, nu, nil } // From a remote vcs url attempt to detect the VCS. func detectVcsFromURL(vcsURL string) (Type, error) { var u *url.URL var err error if m := scpSyntaxRe.FindStringSubmatch(vcsURL); m != nil { // Match SCP-like syntax and convert it to a URL. // Eg, "git@github.com:user/repo" becomes // "ssh://git@github.com/user/repo". u = &url.URL{ Scheme: "ssh", User: url.User(m[1]), Host: m[2], Path: "/" + m[3], } } else { u, err = url.Parse(vcsURL) if err != nil { return "", err } } // Detect file schemes if u.Scheme == "file" { return DetectVcsFromFS(u.Path) } if u.Host == "" { return "", ErrCannotDetectVCS } // Try to detect from the scheme switch u.Scheme { case "git+ssh": return Git, nil case "git": return Git, nil case "bzr+ssh": return Bzr, nil case "svn+ssh": return Svn, nil } // Try to detect from known hosts, such as Github for _, v := range vcsList { if v.host != "" && v.host != u.Host { continue } // Make sure the pattern matches for an actual repo location. For example, // we should fail if the VCS listed is github.com/masterminds as that's // not actually a repo. uCheck := u.Host + u.Path m := v.regex.FindStringSubmatch(uCheck) if m == nil { if v.host != "" { return "", ErrCannotDetectVCS } continue } // If we are here the host matches. If the host has a singular // VCS type, such as Github, we can return the type right away. if v.vcs != "" { return v.vcs, nil } // Run additional checks to determine try and determine the repo // for the matched service. info := make(map[string]string) for i, name := range v.regex.SubexpNames() { if name != "" { info[name] = m[i] } } t, err := v.addCheck(info, u) if err != nil { switch err.(type) { case *RemoteError: return "", err } return "", ErrCannotDetectVCS } return t, nil } // Attempt to ascertain from the username passed in. if u.User != nil { un := u.User.Username() if un == "git" { return Git, nil } else if un == "hg" { return Hg, nil } } // Unable to determine the vcs from the url. return "", ErrCannotDetectVCS } // Figure out the type for Bitbucket by the passed in information // or via the public API. func checkBitbucket(i map[string]string, ul *url.URL) (Type, error) { // Fast path for ssh urls where we may not even be able to // anonymously get details from the API. if ul.User != nil { un := ul.User.Username() if un == "git" { return Git, nil } else if un == "hg" { return Hg, nil } } // The part of the response we care about. var response struct { SCM Type `json:"scm"` } u := expand(i, "https://api.bitbucket.org/1.0/repositories/{name}") data, err := get(u) if err != nil { return "", err } if err := json.Unmarshal(data, &response); err != nil { return "", fmt.Errorf("Decoding error %s: %v", u, err) } return response.SCM, nil } // Expect a type key on i with the exact type detected from the regex. func checkURL(i map[string]string, u *url.URL) (Type, error) { return Type(i["type"]), nil } func get(url string) ([]byte, error) { resp, err := http.Get(url) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode != 200 { if resp.StatusCode == 404 { return nil, NewRemoteError("Not Found", err, resp.Status) } else if resp.StatusCode == 401 || resp.StatusCode == 403 { return nil, NewRemoteError("Access Denied", err, resp.Status) } return nil, fmt.Errorf("%s: %s", url, resp.Status) } b, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("%s: %v", url, err) } return b, nil } func expand(match map[string]string, s string) string { for k, v := range match { s = strings.Replace(s, "{"+k+"}", v, -1) } return s } func parseImportFromBody(ur *url.URL, r io.ReadCloser) (tp Type, u string, err error) { d := xml.NewDecoder(r) d.CharsetReader = charsetReader d.Strict = false var t xml.Token for { t, err = d.Token() if err != nil { if err == io.EOF { // When the end is reached it could not detect a VCS if it // got here. err = ErrCannotDetectVCS } return } if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") { return } if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") { return } e, ok := t.(xml.StartElement) if !ok || !strings.EqualFold(e.Name.Local, "meta") { continue } if attrValue(e.Attr, "name") != "go-import" { continue } if f := strings.Fields(attrValue(e.Attr, "content")); len(f) == 3 { // If the prefix supplied by the remote system isn't a prefix to the // url we're fetching continue to look for other imports. // This will work for exact matches and prefixes. For example, // golang.org/x/net as a prefix will match for golang.org/x/net and // golang.org/x/net/context. vcsURL := ur.Host + ur.Path if !strings.HasPrefix(vcsURL, f[0]) { continue } else { switch Type(f[1]) { case Git: tp = Git case Svn: tp = Svn case Bzr: tp = Bzr case Hg: tp = Hg } u = f[2] return } } } } func charsetReader(charset string, input io.Reader) (io.Reader, error) { switch strings.ToLower(charset) { case "ascii": return input, nil default: return nil, fmt.Errorf("can't decode XML document using charset %q", charset) } } func attrValue(attrs []xml.Attr, name string) string { for _, a := range attrs { if strings.EqualFold(a.Name.Local, name) { return a.Value } } return "" }
vcs
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/Masterminds/vcs/git.go
package vcs import ( "bytes" "encoding/xml" "io/ioutil" "os" "os/exec" "path/filepath" "runtime" "strings" "time" ) // NewGitRepo creates a new instance of GitRepo. The remote and local directories // need to be passed in. func NewGitRepo(remote, local string) (*GitRepo, error) { ins := depInstalled("git") if !ins { return nil, NewLocalError("git is not installed", nil, "") } ltype, err := DetectVcsFromFS(local) // Found a VCS other than Git. Need to report an error. if err == nil && ltype != Git { return nil, ErrWrongVCS } r := &GitRepo{} r.setRemote(remote) r.setLocalPath(local) r.RemoteLocation = "origin" r.Logger = Logger // Make sure the local Git repo is configured the same as the remote when // A remote value was passed in. if err == nil && r.CheckLocal() { c := exec.Command("git", "config", "--get", "remote.origin.url") c.Dir = local c.Env = envForDir(c.Dir) out, err := c.CombinedOutput() if err != nil { return nil, NewLocalError("Unable to retrieve local repo information", err, string(out)) } localRemote := strings.TrimSpace(string(out)) if remote != "" && localRemote != remote { return nil, ErrWrongRemote } // If no remote was passed in but one is configured for the locally // checked out Git repo use that one. if remote == "" && localRemote != "" { r.setRemote(localRemote) } } return r, nil } // GitRepo implements the Repo interface for the Git source control. type GitRepo struct { base RemoteLocation string } // Vcs retrieves the underlying VCS being implemented. func (s GitRepo) Vcs() Type { return Git } // Get is used to perform an initial clone of a repository. func (s *GitRepo) Get() error { out, err := s.run("git", "clone", "--recursive", s.Remote(), s.LocalPath()) // There are some windows cases where Git cannot create the parent directory, // if it does not already exist, to the location it's trying to create the // repo. Catch that error and try to handle it. if err != nil && s.isUnableToCreateDir(err) { basePath := filepath.Dir(filepath.FromSlash(s.LocalPath())) if _, err := os.Stat(basePath); os.IsNotExist(err) { err = os.MkdirAll(basePath, 0755) if err != nil { return NewLocalError("Unable to create directory", err, "") } out, err = s.run("git", "clone", s.Remote(), s.LocalPath()) if err != nil { return NewRemoteError("Unable to get repository", err, string(out)) } return err } } else if err != nil { return NewRemoteError("Unable to get repository", err, string(out)) } return nil } // Init initializes a git repository at local location. func (s *GitRepo) Init() error { out, err := s.run("git", "init", s.LocalPath()) // There are some windows cases where Git cannot create the parent directory, // if it does not already exist, to the location it's trying to create the // repo. Catch that error and try to handle it. if err != nil && s.isUnableToCreateDir(err) { basePath := filepath.Dir(filepath.FromSlash(s.LocalPath())) if _, err := os.Stat(basePath); os.IsNotExist(err) { err = os.MkdirAll(basePath, 0755) if err != nil { return NewLocalError("Unable to initialize repository", err, "") } out, err = s.run("git", "init", s.LocalPath()) if err != nil { return NewLocalError("Unable to initialize repository", err, string(out)) } return nil } } else if err != nil { return NewLocalError("Unable to initialize repository", err, string(out)) } return nil } // Update performs an Git fetch and pull to an existing checkout. func (s *GitRepo) Update() error { // Perform a fetch to make sure everything is up to date. out, err := s.RunFromDir("git", "fetch", "--tags", s.RemoteLocation) if err != nil { return NewRemoteError("Unable to update repository", err, string(out)) } // When in a detached head state, such as when an individual commit is checked // out do not attempt a pull. It will cause an error. detached, err := isDetachedHead(s.LocalPath()) if err != nil { return NewLocalError("Unable to update repository", err, "") } if detached { return nil } out, err = s.RunFromDir("git", "pull") if err != nil { return NewRemoteError("Unable to update repository", err, string(out)) } return s.defendAgainstSubmodules() } // UpdateVersion sets the version of a package currently checked out via Git. func (s *GitRepo) UpdateVersion(version string) error { out, err := s.RunFromDir("git", "checkout", version) if err != nil { return NewLocalError("Unable to update checked out version", err, string(out)) } return s.defendAgainstSubmodules() } // defendAgainstSubmodules tries to keep repo state sane in the event of // submodules. Or nested submodules. What a great idea, submodules. func (s *GitRepo) defendAgainstSubmodules() error { // First, update them to whatever they should be, if there should happen to be any. out, err := s.RunFromDir("git", "submodule", "update", "--init", "--recursive") if err != nil { return NewLocalError("Unexpected error while defensively updating submodules", err, string(out)) } // Now, do a special extra-aggressive clean in case changing versions caused // one or more submodules to go away. out, err = s.RunFromDir("git", "clean", "-x", "-d", "-f", "-f") if err != nil { return NewLocalError("Unexpected error while defensively cleaning up after possible derelict submodule directories", err, string(out)) } // Then, repeat just in case there are any nested submodules that went away. out, err = s.RunFromDir("git", "submodule", "foreach", "--recursive", "git", "clean", "-x", "-d", "-f", "-f") if err != nil { return NewLocalError("Unexpected error while defensively cleaning up after possible derelict nested submodule directories", err, string(out)) } return nil } // Version retrieves the current version. func (s *GitRepo) Version() (string, error) { out, err := s.RunFromDir("git", "rev-parse", "HEAD") if err != nil { return "", NewLocalError("Unable to retrieve checked out version", err, string(out)) } return strings.TrimSpace(string(out)), nil } // Current returns the current version-ish. This means: // * Branch name if on the tip of the branch // * Tag if on a tag // * Otherwise a revision id func (s *GitRepo) Current() (string, error) { out, err := s.RunFromDir("git", "symbolic-ref", "HEAD") if err == nil { o := bytes.TrimSpace(bytes.TrimPrefix(out, []byte("refs/heads/"))) return string(o), nil } v, err := s.Version() if err != nil { return "", err } ts, err := s.TagsFromCommit(v) if err != nil { return "", err } if len(ts) > 0 { return ts[0], nil } return v, nil } // Date retrieves the date on the latest commit. func (s *GitRepo) Date() (time.Time, error) { out, err := s.RunFromDir("git", "log", "-1", "--date=iso", "--pretty=format:%cd") if err != nil { return time.Time{}, NewLocalError("Unable to retrieve revision date", err, string(out)) } t, err := time.Parse(longForm, string(out)) if err != nil { return time.Time{}, NewLocalError("Unable to retrieve revision date", err, string(out)) } return t, nil } // Branches returns a list of available branches on the RemoteLocation func (s *GitRepo) Branches() ([]string, error) { out, err := s.RunFromDir("git", "show-ref") if err != nil { return []string{}, NewLocalError("Unable to retrieve branches", err, string(out)) } branches := s.referenceList(string(out), `(?m-s)(?:`+s.RemoteLocation+`)/(\S+)$`) return branches, nil } // Tags returns a list of available tags on the RemoteLocation func (s *GitRepo) Tags() ([]string, error) { out, err := s.RunFromDir("git", "show-ref") if err != nil { return []string{}, NewLocalError("Unable to retrieve tags", err, string(out)) } tags := s.referenceList(string(out), `(?m-s)(?:tags)/(\S+)$`) return tags, nil } // CheckLocal verifies the local location is a Git repo. func (s *GitRepo) CheckLocal() bool { if _, err := os.Stat(s.LocalPath() + "/.git"); err == nil { return true } return false } // IsReference returns if a string is a reference. A reference can be a // commit id, branch, or tag. func (s *GitRepo) IsReference(r string) bool { _, err := s.RunFromDir("git", "rev-parse", "--verify", r) if err == nil { return true } // Some refs will fail rev-parse. For example, a remote branch that has // not been checked out yet. This next step should pickup the other // possible references. _, err = s.RunFromDir("git", "show-ref", r) return err == nil } // IsDirty returns if the checkout has been modified from the checked // out reference. func (s *GitRepo) IsDirty() bool { out, err := s.RunFromDir("git", "diff") return err != nil || len(out) != 0 } // CommitInfo retrieves metadata about a commit. func (s *GitRepo) CommitInfo(id string) (*CommitInfo, error) { fm := `--pretty=format:"<logentry><commit>%H</commit><author>%an &lt;%ae&gt;</author><date>%aD</date><message>%s</message></logentry>"` out, err := s.RunFromDir("git", "log", id, fm, "-1") if err != nil { return nil, ErrRevisionUnavailable } cis := struct { Commit string `xml:"commit"` Author string `xml:"author"` Date string `xml:"date"` Message string `xml:"message"` }{} err = xml.Unmarshal(out, &cis) if err != nil { return nil, NewLocalError("Unable to retrieve commit information", err, string(out)) } t, err := time.Parse("Mon, _2 Jan 2006 15:04:05 -0700", cis.Date) if err != nil { return nil, NewLocalError("Unable to retrieve commit information", err, string(out)) } ci := &CommitInfo{ Commit: cis.Commit, Author: cis.Author, Date: t, Message: cis.Message, } return ci, nil } // TagsFromCommit retrieves tags from a commit id. func (s *GitRepo) TagsFromCommit(id string) ([]string, error) { // This is imperfect and a better method would be great. var re []string out, err := s.RunFromDir("git", "show-ref", "-d") if err != nil { return []string{}, NewLocalError("Unable to retrieve tags", err, string(out)) } lines := strings.Split(string(out), "\n") var list []string for _, i := range lines { if strings.HasPrefix(strings.TrimSpace(i), id) { list = append(list, i) } } tags := s.referenceList(strings.Join(list, "\n"), `(?m-s)(?:tags)/(\S+)$`) for _, t := range tags { // Dereferenced tags have ^{} appended to them. re = append(re, strings.TrimSuffix(t, "^{}")) } return re, nil } // Ping returns if remote location is accessible. func (s *GitRepo) Ping() bool { c := exec.Command("git", "ls-remote", s.Remote()) // If prompted for a username and password, which GitHub does for all things // not public, it's considered not available. To make it available the // remote needs to be different. c.Env = mergeEnvLists([]string{"GIT_TERMINAL_PROMPT=0"}, os.Environ()) _, err := c.CombinedOutput() return err == nil } // EscapePathSeparator escapes the path separator by replacing it with several. // Note: this is harmless on Unix, and needed on Windows. func EscapePathSeparator(path string) (string) { switch runtime.GOOS { case `windows`: // On Windows, triple all path separators. // Needed to escape backslash(s) preceding doublequotes, // because of how Windows strings treats backslash+doublequote combo, // and Go seems to be implicitly passing around a doublequoted string on Windows, // so we cannnot use default string instead. // See: https://blogs.msdn.microsoft.com/twistylittlepassagesallalike/2011/04/23/everyone-quotes-command-line-arguments-the-wrong-way/ // e.g., C:\foo\bar\ -> C:\\\foo\\\bar\\\ // used with --prefix, like this: --prefix=C:\foo\bar\ -> --prefix=C:\\\foo\\\bar\\\ return strings.Replace(path, string(os.PathSeparator), string(os.PathSeparator) + string(os.PathSeparator) + string(os.PathSeparator), -1) default: return path } } // ExportDir exports the current revision to the passed in directory. func (s *GitRepo) ExportDir(dir string) error { var path string // Without the trailing / there can be problems. if !strings.HasSuffix(dir, string(os.PathSeparator)) { dir = dir + string(os.PathSeparator) } // checkout-index on some systems, such as some Windows cases, does not // create the parent directory to export into if it does not exist. Explicitly // creating it. err := os.MkdirAll(dir, 0755) if err != nil { return NewLocalError("Unable to create directory", err, "") } path = EscapePathSeparator( dir ) out, err := s.RunFromDir("git", "checkout-index", "-f", "-a", "--prefix="+path) s.log(out) if err != nil { return NewLocalError("Unable to export source", err, string(out)) } // and now, the horror of submodules path = EscapePathSeparator( dir + "$path" + string(os.PathSeparator) ) out, err = s.RunFromDir("git", "submodule", "foreach", "--recursive", "git checkout-index -f -a --prefix="+path) s.log(out) if err != nil { return NewLocalError("Error while exporting submodule sources", err, string(out)) } return nil } // isDetachedHead will detect if git repo is in "detached head" state. func isDetachedHead(dir string) (bool, error) { p := filepath.Join(dir, ".git", "HEAD") contents, err := ioutil.ReadFile(p) if err != nil { return false, err } contents = bytes.TrimSpace(contents) if bytes.HasPrefix(contents, []byte("ref: ")) { return false, nil } return true, nil } // isUnableToCreateDir checks for an error in Init() to see if an error // where the parent directory of the VCS local path doesn't exist. This is // done in a multi-lingual manner. func (s *GitRepo) isUnableToCreateDir(err error) bool { msg := err.Error() if strings.HasPrefix(msg, "could not create work tree dir") || strings.HasPrefix(msg, "不能创建工作区目录") || strings.HasPrefix(msg, "no s'ha pogut crear el directori d'arbre de treball") || strings.HasPrefix(msg, "impossible de créer le répertoire de la copie de travail") || strings.HasPrefix(msg, "kunde inte skapa arbetskatalogen") || (strings.HasPrefix(msg, "Konnte Arbeitsverzeichnis") && strings.Contains(msg, "nicht erstellen")) || (strings.HasPrefix(msg, "작업 디렉터리를") && strings.Contains(msg, "만들 수 없습니다")) { return true } return false }
semver
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/Masterminds/semver/LICENSE.txt
The Masterminds Copyright (C) 2014-2015, Matt Butcher and Matt Farina Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
semver
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/Masterminds/semver/parse.go
package semver import ( "errors" "fmt" "strings" ) func rewriteRange(i string) string { m := constraintRangeRegex.FindAllStringSubmatch(i, -1) if m == nil { return i } o := i for _, v := range m { t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) o = strings.Replace(o, v[0], t, 1) } return o } func parseConstraint(c string, cbd bool) (Constraint, error) { m := constraintRegex.FindStringSubmatch(c) if m == nil { return nil, fmt.Errorf("Malformed constraint: %s", c) } // Handle the full wildcard case first - easy! if isX(m[3]) { return any{}, nil } ver := m[2] var wildPatch, wildMinor bool if isX(strings.TrimPrefix(m[4], ".")) { wildPatch = true wildMinor = true ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) } else if isX(strings.TrimPrefix(m[5], ".")) { wildPatch = true ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) } v, err := NewVersion(ver) if err != nil { // The constraintRegex should catch any regex parsing errors. So, // we should never get here. return nil, errors.New("constraint Parser Error") } // We never want to keep the "original" data in a constraint, and keeping it // around can disrupt simple equality comparisons. So, strip it out. v.original = "" // If caret-by-default flag is on and there's no operator, convert the // operator to a caret. if cbd && m[1] == "" { m[1] = "^" } switch m[1] { case "^": // Caret always expands to a range return expandCaret(v), nil case "~": // Tilde always expands to a range return expandTilde(v, wildMinor), nil case "!=": // Not equals expands to a range if no element isX(); otherwise expands // to a union of ranges return expandNeq(v, wildMinor, wildPatch), nil case "", "=": if wildPatch || wildMinor { // Equalling a wildcard has the same behavior as expanding tilde return expandTilde(v, wildMinor), nil } return v, nil case ">": return expandGreater(v, wildMinor, wildPatch, false), nil case ">=", "=>": return expandGreater(v, wildMinor, wildPatch, true), nil case "<": return expandLess(v, wildMinor, wildPatch, false), nil case "<=", "=<": return expandLess(v, wildMinor, wildPatch, true), nil default: // Shouldn't be possible to get here, unless the regex is allowing // predicate we don't know about... return nil, fmt.Errorf("Unrecognized predicate %q", m[1]) } } func expandCaret(v Version) Constraint { var maxv Version // Caret behaves like tilde below 1.0.0 if v.major == 0 { maxv.minor = v.minor + 1 } else { maxv.major = v.major + 1 } return rangeConstraint{ min: v, max: maxv, includeMin: true, includeMax: false, } } func expandTilde(v Version, wildMinor bool) Constraint { if wildMinor { // When minor is wild on a tilde, behavior is same as caret return expandCaret(v) } maxv := Version{ major: v.major, minor: v.minor + 1, patch: 0, } return rangeConstraint{ min: v, max: maxv, includeMin: true, includeMax: false, } } // expandNeq expands a "not-equals" constraint. // // If the constraint has any wildcards, it will expand into a unionConstraint // (which is how we represent a disjoint set). If there are no wildcards, it // will expand to a rangeConstraint with no min or max, but having the one // exception. func expandNeq(v Version, wildMinor, wildPatch bool) Constraint { if !(wildMinor || wildPatch) { return rangeConstraint{ min: Version{special: zeroVersion}, max: Version{special: infiniteVersion}, excl: []Version{v}, } } // Create the low range with no min, and the max as the floor admitted by // the wildcard lr := rangeConstraint{ min: Version{special: zeroVersion}, max: v, includeMax: false, } // The high range uses the derived version (bumped depending on where the // wildcards were) as the min, and is inclusive minv := Version{ major: v.major, minor: v.minor, patch: v.patch, } if wildMinor { minv.major++ } else { minv.minor++ } hr := rangeConstraint{ min: minv, max: Version{special: infiniteVersion}, includeMin: true, } return Union(lr, hr) } func expandGreater(v Version, wildMinor, wildPatch, eq bool) Constraint { if (wildMinor || wildPatch) && !eq { // wildcards negate the meaning of prerelease and other info v = Version{ major: v.major, minor: v.minor, patch: v.patch, } // Not equal but with wildcards is the weird case - we have to bump up // the next version AND make it equal if wildMinor { v.major++ } else { v.minor++ } return rangeConstraint{ min: v, max: Version{special: infiniteVersion}, includeMin: true, } } return rangeConstraint{ min: v, max: Version{special: infiniteVersion}, includeMin: eq, } } func expandLess(v Version, wildMinor, wildPatch, eq bool) Constraint { if eq && (wildMinor || wildPatch) { // wildcards negate the meaning of prerelease and other info v = Version{ major: v.major, minor: v.minor, patch: v.patch, } if wildMinor { v.major++ } else if wildPatch { v.minor++ } return rangeConstraint{ min: Version{special: zeroVersion}, max: v, includeMax: false, } } return rangeConstraint{ min: Version{special: zeroVersion}, max: v, includeMax: eq, } } func isX(x string) bool { l := strings.ToLower(x) return l == "x" || l == "*" }
semver
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/Masterminds/semver/version.go
package semver import ( "bytes" "errors" "fmt" "regexp" "strconv" "strings" "sync" ) // The compiled version of the regex created at init() is cached here so it // only needs to be created once. var versionRegex *regexp.Regexp var ( // ErrInvalidSemVer is returned a version is found to be invalid when // being parsed. ErrInvalidSemVer = errors.New("Invalid Semantic Version") ) // Error type; lets us defer string interpolation type badVersionSegment struct { e error } func (b badVersionSegment) Error() string { return fmt.Sprintf("Error parsing version segment: %s", b.e) } // CacheVersions controls whether or not parsed constraints are cached. Defaults // to true. var CacheVersions = true var versionCache = make(map[string]vcache) var versionCacheLock sync.RWMutex type vcache struct { v Version err error } // SemVerRegex id the regular expression used to parse a semantic version. const SemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` type specialVersion uint8 const ( notSpecial specialVersion = iota zeroVersion infiniteVersion ) // Version represents a single semantic version. type Version struct { major, minor, patch uint64 pre string metadata string original string special specialVersion } func init() { versionRegex = regexp.MustCompile("^" + SemVerRegex + "$") } // NewVersion parses a given version and returns an instance of Version or // an error if unable to parse the version. func NewVersion(v string) (Version, error) { if CacheVersions { versionCacheLock.RLock() if sv, exists := versionCache[v]; exists { versionCacheLock.RUnlock() return sv.v, sv.err } versionCacheLock.RUnlock() } m := versionRegex.FindStringSubmatch(v) if m == nil { if CacheVersions { versionCacheLock.Lock() versionCache[v] = vcache{err: ErrInvalidSemVer} versionCacheLock.Unlock() } return Version{}, ErrInvalidSemVer } sv := Version{ metadata: m[8], pre: m[5], original: v, } var temp uint64 temp, err := strconv.ParseUint(m[1], 10, 32) if err != nil { bvs := badVersionSegment{e: err} if CacheVersions { versionCacheLock.Lock() versionCache[v] = vcache{err: bvs} versionCacheLock.Unlock() } return Version{}, bvs } sv.major = temp if m[2] != "" { temp, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 32) if err != nil { bvs := badVersionSegment{e: err} if CacheVersions { versionCacheLock.Lock() versionCache[v] = vcache{err: bvs} versionCacheLock.Unlock() } return Version{}, bvs } sv.minor = temp } else { sv.minor = 0 } if m[3] != "" { temp, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 32) if err != nil { bvs := badVersionSegment{e: err} if CacheVersions { versionCacheLock.Lock() versionCache[v] = vcache{err: bvs} versionCacheLock.Unlock() } return Version{}, bvs } sv.patch = temp } else { sv.patch = 0 } if CacheVersions { versionCacheLock.Lock() versionCache[v] = vcache{v: sv} versionCacheLock.Unlock() } return sv, nil } // String converts a Version object to a string. // Note, if the original version contained a leading v this version will not. // See the Original() method to retrieve the original value. Semantic Versions // don't contain a leading v per the spec. Instead it's optional on // impelementation. func (v Version) String() string { return v.toString(false) } // ImpliedCaretString follows the same rules as String(), but in accordance with // the Constraint interface will always print a leading "=", as all Versions, // when acting as a Constraint, act as exact matches. func (v Version) ImpliedCaretString() string { return v.toString(true) } func (v Version) toString(ic bool) string { var buf bytes.Buffer var base string if ic { base = "=%d.%d.%d" } else { base = "%d.%d.%d" } fmt.Fprintf(&buf, base, v.major, v.minor, v.patch) if v.pre != "" { fmt.Fprintf(&buf, "-%s", v.pre) } if v.metadata != "" { fmt.Fprintf(&buf, "+%s", v.metadata) } return buf.String() } // Original returns the original value passed in to be parsed. func (v Version) Original() string { return v.original } // Major returns the major version. func (v *Version) Major() uint64 { return v.major } // Minor returns the minor version. func (v *Version) Minor() uint64 { return v.minor } // Patch returns the patch version. func (v *Version) Patch() uint64 { return v.patch } // Prerelease returns the pre-release version. func (v Version) Prerelease() string { return v.pre } // Metadata returns the metadata on the version. func (v Version) Metadata() string { return v.metadata } // LessThan tests if one version is less than another one. func (v Version) LessThan(o Version) bool { return v.Compare(o) < 0 } // GreaterThan tests if one version is greater than another one. func (v Version) GreaterThan(o Version) bool { return v.Compare(o) > 0 } // Equal tests if two versions are equal to each other. // Note, versions can be equal with different metadata since metadata // is not considered part of the comparable version. func (v Version) Equal(o Version) bool { return v.Compare(o) == 0 } // Compare compares this version to another one. It returns -1, 0, or 1 if // the version smaller, equal, or larger than the other version. // // Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is // lower than the version without a prerelease. func (v Version) Compare(o Version) int { // The special field supercedes all the other information. If it's not // equal, we can skip out early if v.special != o.special { switch v.special { case zeroVersion: return -1 case notSpecial: if o.special == zeroVersion { return 1 } return -1 case infiniteVersion: return 1 } } else if v.special != notSpecial { // If special fields are equal and not notSpecial, then they're // necessarily equal return 0 } // Compare the major, minor, and patch version for differences. If a // difference is found return the comparison. if d := compareSegment(v.Major(), o.Major()); d != 0 { return d } if d := compareSegment(v.Minor(), o.Minor()); d != 0 { return d } if d := compareSegment(v.Patch(), o.Patch()); d != 0 { return d } // At this point the major, minor, and patch versions are the same. ps := v.pre po := o.Prerelease() if ps == "" && po == "" { return 0 } if ps == "" { return 1 } if po == "" { return -1 } return comparePrerelease(ps, po) } // Matches checks that a verstions match. If they do not, // an error is returned indcating the problem; if it does, the error is nil. // This is part of the Constraint interface. func (v Version) Matches(v2 Version) error { if v.Equal(v2) { return nil } return VersionMatchFailure{v: v, other: v2} } // MatchesAny checks if an instance of a version matches a constraint which can // include anything matching the Constraint interface. func (v Version) MatchesAny(c Constraint) bool { if v2, ok := c.(Version); ok { return v.Equal(v2) } // The other implementations all have specific handling for this; fall // back on theirs. return c.MatchesAny(v) } // Intersect computes the intersection between the receiving Constraint and // passed Constraint, and returns a new Constraint representing the result. // This is part of the Constraint interface. func (v Version) Intersect(c Constraint) Constraint { if v2, ok := c.(Version); ok { if v.Equal(v2) { return v } return none{} } return c.Intersect(v) } // Union computes the union between the receiving Constraint and the passed // Constraint, and returns a new Constraint representing the result. // This is part of the Constraint interface. func (v Version) Union(c Constraint) Constraint { if v2, ok := c.(Version); ok && v.Equal(v2) { return v } return Union(v, c) } func (Version) _private() {} func (Version) _real() {} func compareSegment(v, o uint64) int { if v < o { return -1 } if v > o { return 1 } return 0 } func comparePrerelease(v, o string) int { // split the prelease versions by their part. The separator, per the spec, // is a . sparts := strings.Split(v, ".") oparts := strings.Split(o, ".") // Find the longer length of the parts to know how many loop iterations to // go through. slen := len(sparts) olen := len(oparts) l := slen if olen > slen { l = olen } // Iterate over each part of the prereleases to compare the differences. for i := 0; i < l; i++ { // Since the length of the parts can be different we need to create // a placeholder. This is to avoid out of bounds issues. stemp := "" if i < slen { stemp = sparts[i] } otemp := "" if i < olen { otemp = oparts[i] } d := comparePrePart(stemp, otemp) if d != 0 { return d } } // Reaching here means two versions are of equal value but have different // metadata (the part following a +). They are not identical in string form // but the version comparison finds them to be equal. return 0 } func comparePrePart(s, o string) int { // Fastpath if they are equal if s == o { return 0 } // When s or o are empty we can use the other in an attempt to determine // the response. if o == "" { _, n := strconv.ParseUint(s, 10, 64) if n != nil { return -1 } return 1 } if s == "" { _, n := strconv.ParseUint(o, 10, 64) if n != nil { return 1 } return -1 } if s > o { return 1 } return -1 } func numPartsEq(v1, v2 Version) bool { if v1.special != v2.special { return false } if v1.special != notSpecial { // If special fields are equal and not notSpecial, then the versions are // necessarily equal, so their numeric parts are too. return true } if v1.major != v2.major { return false } if v1.minor != v2.minor { return false } if v1.patch != v2.patch { return false } return true }
semver
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/Masterminds/semver/range.go
package semver import ( "fmt" "sort" "strings" ) type rangeConstraint struct { min, max Version includeMin, includeMax bool excl []Version } func (rc rangeConstraint) Matches(v Version) error { var fail bool ispre := v.Prerelease() != "" rce := RangeMatchFailure{ v: v, rc: rc, } if !rc.minIsZero() { cmp := rc.min.Compare(v) if rc.includeMin { rce.typ = rerrLT fail = cmp == 1 } else { rce.typ = rerrLTE fail = cmp != -1 } if fail { return rce } } if !rc.maxIsInf() { cmp := rc.max.Compare(v) if rc.includeMax { rce.typ = rerrGT fail = cmp == -1 } else { rce.typ = rerrGTE fail = cmp != 1 } if fail { return rce } } for _, excl := range rc.excl { if excl.Equal(v) { rce.typ = rerrNE return rce } } // If the incoming version has prerelease info, it's usually a match failure // - unless all the numeric parts are equal between the incoming and the // minimum. if !fail && ispre && !numPartsEq(rc.min, v) { rce.typ = rerrPre return rce } return nil } func (rc rangeConstraint) dup() rangeConstraint { // Only need to do anything if there are some excludes if len(rc.excl) == 0 { return rc } var excl []Version excl = make([]Version, len(rc.excl)) copy(excl, rc.excl) return rangeConstraint{ min: rc.min, max: rc.max, includeMin: rc.includeMin, includeMax: rc.includeMax, excl: excl, } } func (rc rangeConstraint) minIsZero() bool { return rc.min.special == zeroVersion } func (rc rangeConstraint) maxIsInf() bool { return rc.max.special == infiniteVersion } func (rc rangeConstraint) Intersect(c Constraint) Constraint { switch oc := c.(type) { case any: return rc case none: return None() case unionConstraint: return oc.Intersect(rc) case Version: if err := rc.Matches(oc); err != nil { return None() } return c case rangeConstraint: nr := rangeConstraint{ min: rc.min, max: rc.max, includeMin: rc.includeMin, includeMax: rc.includeMax, } if !oc.minIsZero() { if nr.minIsZero() || nr.min.LessThan(oc.min) { nr.min = oc.min nr.includeMin = oc.includeMin } else if oc.min.Equal(nr.min) && !oc.includeMin { // intersection means we must follow the least inclusive nr.includeMin = false } } if !oc.maxIsInf() { if nr.maxIsInf() || nr.max.GreaterThan(oc.max) { nr.max = oc.max nr.includeMax = oc.includeMax } else if oc.max.Equal(nr.max) && !oc.includeMax { // intersection means we must follow the least inclusive nr.includeMax = false } } // Ensure any applicable excls from oc are included in nc for _, e := range append(rc.excl, oc.excl...) { if nr.Matches(e) == nil { nr.excl = append(nr.excl, e) } } if nr.minIsZero() || nr.maxIsInf() { return nr } if nr.min.Equal(nr.max) { // min and max are equal. if range is inclusive, return that // version; otherwise, none if nr.includeMin && nr.includeMax { return nr.min } return None() } if nr.min.GreaterThan(nr.max) { // min is greater than max - not possible, so we return none return None() } // range now fully validated, return what we have return nr default: panic("unknown type") } } func (rc rangeConstraint) Union(c Constraint) Constraint { switch oc := c.(type) { case any: return Any() case none: return rc case unionConstraint: return Union(rc, oc) case Version: if err := rc.Matches(oc); err == nil { return rc } else if len(rc.excl) > 0 { // TODO (re)checking like this is wasteful // ensure we don't have an excl-specific mismatch; if we do, remove // it and return that for k, e := range rc.excl { if e.Equal(oc) { excl := make([]Version, len(rc.excl)-1) if k == len(rc.excl)-1 { copy(excl, rc.excl[:k]) } else { copy(excl, append(rc.excl[:k], rc.excl[k+1:]...)) } return rangeConstraint{ min: rc.min, max: rc.max, includeMin: rc.includeMin, includeMax: rc.includeMax, excl: excl, } } } } if oc.LessThan(rc.min) { return unionConstraint{oc, rc.dup()} } if oc.Equal(rc.min) { ret := rc.dup() ret.includeMin = true return ret } if oc.Equal(rc.max) { ret := rc.dup() ret.includeMax = true return ret } // Only possibility left is gt return unionConstraint{rc.dup(), oc} case rangeConstraint: if (rc.minIsZero() && oc.maxIsInf()) || (rc.maxIsInf() && oc.minIsZero()) { rcl, ocl := len(rc.excl), len(oc.excl) // Quick check for open case if rcl == 0 && ocl == 0 { return Any() } // This is inefficient, but it's such an absurdly corner case... if len(dedupeExcls(rc.excl, oc.excl)) == rcl+ocl { // If deduped excludes are the same length as the individual // excludes, then they have no overlapping elements, so the // union knocks out the excludes and we're back to Any. return Any() } // There's at least some dupes, which are all we need to include nc := rangeConstraint{ min: Version{special: zeroVersion}, max: Version{special: infiniteVersion}, } for _, e1 := range rc.excl { for _, e2 := range oc.excl { if e1.Equal(e2) { nc.excl = append(nc.excl, e1) } } } return nc } else if areAdjacent(rc, oc) { // Receiver adjoins the input from below nc := rc.dup() nc.max = oc.max nc.includeMax = oc.includeMax nc.excl = append(nc.excl, oc.excl...) return nc } else if areAdjacent(oc, rc) { // Input adjoins the receiver from below nc := oc.dup() nc.max = rc.max nc.includeMax = rc.includeMax nc.excl = append(nc.excl, rc.excl...) return nc } else if rc.MatchesAny(oc) { // Receiver and input overlap; form a new range accordingly. nc := rangeConstraint{ min: Version{special: zeroVersion}, max: Version{special: infiniteVersion}, } // For efficiency, we simultaneously determine if either of the // ranges are supersets of the other, while also selecting the min // and max of the new range var info uint8 const ( lminlt uint8 = 1 << iota // left (rc) min less than right rminlt // right (oc) min less than left lmaxgt // left max greater than right rmaxgt // right max greater than left lsupr = lminlt | lmaxgt // left is superset of right rsupl = rminlt | rmaxgt // right is superset of left ) // Pick the min if !rc.minIsZero() { if oc.minIsZero() || rc.min.GreaterThan(oc.min) || (rc.min.Equal(oc.min) && !rc.includeMin && oc.includeMin) { info |= rminlt nc.min = oc.min nc.includeMin = oc.includeMin } else { info |= lminlt nc.min = rc.min nc.includeMin = rc.includeMin } } else if !oc.minIsZero() { info |= lminlt nc.min = rc.min nc.includeMin = rc.includeMin } // Pick the max if !rc.maxIsInf() { if oc.maxIsInf() || rc.max.LessThan(oc.max) || (rc.max.Equal(oc.max) && !rc.includeMax && oc.includeMax) { info |= rmaxgt nc.max = oc.max nc.includeMax = oc.includeMax } else { info |= lmaxgt nc.max = rc.max nc.includeMax = rc.includeMax } } else if oc.maxIsInf() { info |= lmaxgt nc.max = rc.max nc.includeMax = rc.includeMax } // Reincorporate any excluded versions if info&lsupr != lsupr { // rc is not superset of oc, so must walk oc.excl for _, e := range oc.excl { if rc.Matches(e) != nil { nc.excl = append(nc.excl, e) } } } if info&rsupl != rsupl { // oc is not superset of rc, so must walk rc.excl for _, e := range rc.excl { if oc.Matches(e) != nil { nc.excl = append(nc.excl, e) } } } return nc } else { // Don't call Union() here b/c it would duplicate work uc := constraintList{rc, oc} sort.Sort(uc) return unionConstraint(uc) } } panic("unknown type") } // isSupersetOf computes whether the receiver rangeConstraint is a superset of // the passed rangeConstraint. // // This is NOT a strict superset comparison, so identical ranges will both // report being supersets of each other. // // Note also that this does *not* compare excluded versions - it only compares // range endpoints. func (rc rangeConstraint) isSupersetOf(rc2 rangeConstraint) bool { if !rc.minIsZero() { if rc2.minIsZero() || rc.min.GreaterThan(rc2.min) || (rc.min.Equal(rc2.min) && !rc.includeMin && rc2.includeMin) { return false } } if !rc.maxIsInf() { if rc2.maxIsInf() || rc.max.LessThan(rc2.max) || (rc.max.Equal(rc2.max) && !rc.includeMax && rc2.includeMax) { return false } } return true } func (rc rangeConstraint) String() string { return rc.toString(false) } func (rc rangeConstraint) ImpliedCaretString() string { return rc.toString(true) } func (rc rangeConstraint) toString(impliedCaret bool) string { var pieces []string // We need to trigger the standard verbose handling from various points, so // wrap it in a function. noshort := func() { if !rc.minIsZero() { if rc.includeMin { pieces = append(pieces, fmt.Sprintf(">=%s", rc.min)) } else { pieces = append(pieces, fmt.Sprintf(">%s", rc.min)) } } if !rc.maxIsInf() { if rc.includeMax { pieces = append(pieces, fmt.Sprintf("<=%s", rc.max)) } else { pieces = append(pieces, fmt.Sprintf("<%s", rc.max)) } } } // Handle the possibility that we might be able to express the range // with a caret or tilde, as we prefer those forms. var caretstr string if impliedCaret { caretstr = "%s" } else { caretstr = "^%s" } switch { case rc.minIsZero() && rc.maxIsInf(): // This if is internal because it's useful to know for the other cases // that we don't have special values at both bounds if len(rc.excl) == 0 { // Shouldn't be possible to reach from anything that can be done // outside the package, but best to cover it and be safe return "*" } case rc.minIsZero(), rc.includeMax, !rc.includeMin: // tilde and caret could never apply here noshort() case !rc.maxIsInf() && rc.max.Minor() == 0 && rc.max.Patch() == 0: // basic caret if rc.min.Major() == rc.max.Major()-1 && rc.min.Major() != 0 { pieces = append(pieces, fmt.Sprintf(caretstr, rc.min)) } else { // range is too wide for caret, need standard operators noshort() } case !rc.maxIsInf() && rc.max.Major() != 0 && rc.max.Patch() == 0: // basic tilde if rc.min.Minor() == rc.max.Minor()-1 && rc.min.Major() == rc.max.Major() { pieces = append(pieces, fmt.Sprintf("~%s", rc.min)) } else { // range is too wide for tilde, need standard operators noshort() } case !rc.maxIsInf() && rc.max.Major() == 0 && rc.max.Patch() == 0 && rc.max.Minor() != 0: // below 1.0.0, tilde is meaningless but caret is shifted to the // right (so it basically behaves the same as tilde does above 1.0.0) if rc.min.Minor() == rc.max.Minor()-1 { pieces = append(pieces, fmt.Sprintf(caretstr, rc.min)) } else { noshort() } default: noshort() } for _, e := range rc.excl { pieces = append(pieces, fmt.Sprintf("!=%s", e)) } return strings.Join(pieces, ", ") } // areAdjacent tests two constraints to determine if they are adjacent, // but non-overlapping. // // If either constraint is not a range, returns false. We still allow it at the // type level, however, to make the check convenient elsewhere. // // Assumes the first range is less than the second; it is incumbent on the // caller to arrange the inputs appropriately. func areAdjacent(c1, c2 Constraint) bool { var rc1, rc2 rangeConstraint var ok bool if rc1, ok = c1.(rangeConstraint); !ok { return false } if rc2, ok = c2.(rangeConstraint); !ok { return false } if !rc1.max.Equal(rc2.min) { return false } return (rc1.includeMax && !rc2.includeMin) || (!rc1.includeMax && rc2.includeMin) } func (rc rangeConstraint) MatchesAny(c Constraint) bool { if _, ok := rc.Intersect(c).(none); ok { return false } return true } func dedupeExcls(ex1, ex2 []Version) []Version { // TODO stupid inefficient, but these are really only ever going to be // small, so not worth optimizing right now var ret []Version oloop: for _, e1 := range ex1 { for _, e2 := range ex2 { if e1.Equal(e2) { continue oloop } } ret = append(ret, e1) } return append(ret, ex2...) } func (rangeConstraint) _private() {} func (rangeConstraint) _real() {}
semver
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/Masterminds/semver/doc.go
/* Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. Specifically it provides the ability to: * Parse semantic versions * Sort semantic versions * Check if a semantic version fits within a set of constraints * Optionally work with a `v` prefix Parsing Semantic Versions To parse a semantic version use the `NewVersion` function. For example, v, err := semver.NewVersion("1.2.3-beta.1+build345") If there is an error the version wasn't parseable. The version object has methods to get the parts of the version, compare it to other versions, convert the version back into a string, and get the original string. For more details please see the documentation at https://godoc.org/github.com/Masterminds/semver. Sorting Semantic Versions A set of versions can be sorted using the `sort` package from the standard library. For example, raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} vs := make([]*semver.Version, len(raw)) for i, r := range raw { v, err := semver.NewVersion(r) if err != nil { t.Errorf("Error parsing version: %s", err) } vs[i] = v } sort.Sort(semver.Collection(vs)) Checking Version Constraints Checking a version against version constraints is one of the most featureful parts of the package. c, err := semver.NewConstraint(">= 1.2.3") if err != nil { // Handle constraint not being parseable. } v, _ := semver.NewVersion("1.3") if err != nil { // Handle version not being parseable. } // Check if the version meets the constraints. The a variable will be true. a := c.Check(v) Basic Comparisons There are two elements to the comparisons. First, a comparison string is a list of comma separated and comparisons. These are then separated by || separated or comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a comparison that's greater than or equal to 1.2 and less than 3.0.0 or is greater than or equal to 4.2.3. The basic comparisons are: * `=`: equal (aliased to no operator) * `!=`: not equal * `>`: greater than * `<`: less than * `>=`: greater than or equal to * `<=`: less than or equal to Hyphen Range Comparisons There are multiple methods to handle ranges and the first is hyphens ranges. These look like: * `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5` Wildcards In Comparisons The `x`, `X`, and `*` characters can be used as a wildcard character. This works for all comparison operators. When used on the `=` operator it falls back to the pack level comparison (see tilde below). For example, * `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` * `>= 1.2.x` is equivalent to `>= 1.2.0` * `<= 2.x` is equivalent to `<= 3` * `*` is equivalent to `>= 0.0.0` Tilde Range Comparisons (Patch) The tilde (`~`) comparison operator is for patch level ranges when a minor version is specified and major level changes when the minor number is missing. For example, * `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` * `~1` is equivalent to `>= 1, < 2` * `~2.3` is equivalent to `>= 2.3, < 2.4` * `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` * `~1.x` is equivalent to `>= 1, < 2` Caret Range Comparisons (Major) The caret (`^`) comparison operator is for major level changes. This is useful when comparisons of API versions as a major change is API breaking. For example, * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` * `^2.3` is equivalent to `>= 2.3, < 3` * `^2.x` is equivalent to `>= 2.0.0, < 3` */ package semver
semver
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/Masterminds/semver/union.go
package semver import "strings" type unionConstraint []realConstraint func (uc unionConstraint) Matches(v Version) error { var uce MultiMatchFailure for _, c := range uc { err := c.Matches(v) if err == nil { return nil } uce = append(uce, err.(MatchFailure)) } return uce } func (uc unionConstraint) Intersect(c2 Constraint) Constraint { var other []realConstraint switch tc2 := c2.(type) { case none: return None() case any: return uc case Version: return c2 case rangeConstraint: other = append(other, tc2) case unionConstraint: other = c2.(unionConstraint) default: panic("unknown type") } var newc []Constraint // TODO there's a smarter way to do this than NxN, but...worth it? for _, c := range uc { for _, oc := range other { i := c.Intersect(oc) if !IsNone(i) { newc = append(newc, i) } } } return Union(newc...) } func (uc unionConstraint) MatchesAny(c Constraint) bool { for _, ic := range uc { if ic.MatchesAny(c) { return true } } return false } func (uc unionConstraint) Union(c Constraint) Constraint { return Union(uc, c) } func (uc unionConstraint) String() string { var pieces []string for _, c := range uc { pieces = append(pieces, c.String()) } return strings.Join(pieces, " || ") } func (uc unionConstraint) ImpliedCaretString() string { var pieces []string for _, c := range uc { pieces = append(pieces, c.ImpliedCaretString()) } return strings.Join(pieces, " || ") } func (unionConstraint) _private() {} type constraintList []realConstraint func (cl constraintList) Len() int { return len(cl) } func (cl constraintList) Swap(i, j int) { cl[i], cl[j] = cl[j], cl[i] } func (cl constraintList) Less(i, j int) bool { ic, jc := cl[i], cl[j] switch tic := ic.(type) { case Version: switch tjc := jc.(type) { case Version: return tic.LessThan(tjc) case rangeConstraint: if tjc.minIsZero() { return false } // Because we don't assume stable sort, always put versions ahead of // range mins if they're equal and includeMin is on if tjc.includeMin && tic.Equal(tjc.min) { return false } return tic.LessThan(tjc.min) } case rangeConstraint: switch tjc := jc.(type) { case Version: if tic.minIsZero() { return true } // Because we don't assume stable sort, always put versions ahead of // range mins if they're equal and includeMin is on if tic.includeMin && tjc.Equal(tic.min) { return false } return tic.min.LessThan(tjc) case rangeConstraint: if tic.minIsZero() { return true } if tjc.minIsZero() { return false } return tic.min.LessThan(tjc.min) } } panic("unreachable") } func (cl *constraintList) Push(x interface{}) { *cl = append(*cl, x.(realConstraint)) } func (cl *constraintList) Pop() interface{} { o := *cl c := o[len(o)-1] *cl = o[:len(o)-1] return c }
semver
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/Masterminds/semver/constraints.go
package semver import ( "fmt" "regexp" "sort" "strings" "sync" ) var constraintRegex *regexp.Regexp var constraintRangeRegex *regexp.Regexp const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` func init() { constraintOps := []string{ "", "=", "!=", ">", "<", ">=", "=>", "<=", "=<", "~", "~>", "^", } ops := make([]string, 0, len(constraintOps)) for _, op := range constraintOps { ops = append(ops, regexp.QuoteMeta(op)) } constraintRegex = regexp.MustCompile(fmt.Sprintf( `^\s*(%s)\s*(%s)\s*$`, strings.Join(ops, "|"), cvRegex)) constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( `\s*(%s)\s* - \s*(%s)\s*`, cvRegex, cvRegex)) } // Constraint is the interface that wraps checking a semantic version against // one or more constraints to find a match. type Constraint interface { // Constraints compose the fmt.Stringer interface. This method is the // bijective inverse of NewConstraint(): if a string yielded from this // method is passed to NewConstraint(), a byte-identical instance of the // original Constraint will be returend. fmt.Stringer // ImpliedCaretString converts the Constraint to a string in the same manner // as String(), but treats the empty operator as equivalent to ^, rather // than =. // // In the same way that String() is the inverse of NewConstraint(), this // method is the inverse of to NewConstraintIC(). ImpliedCaretString() string // Matches checks that a version satisfies the constraint. If it does not, // an error is returned indcating the problem; if it does, the error is nil. Matches(v Version) error // Intersect computes the intersection between the receiving Constraint and // passed Constraint, and returns a new Constraint representing the result. Intersect(Constraint) Constraint // Union computes the union between the receiving Constraint and the passed // Constraint, and returns a new Constraint representing the result. Union(Constraint) Constraint // MatchesAny returns a bool indicating whether there exists any version that // satisfies both the receiver constraint, and the passed Constraint. // // In other words, this reports whether an intersection would be non-empty. MatchesAny(Constraint) bool // Restrict implementation of this interface to this package. We need the // flexibility of an interface, but we cover all possibilities here; closing // off the interface to external implementation lets us safely do tricks // with types for magic types (none and any) _private() } // realConstraint is used internally to differentiate between any, none, and // unionConstraints, vs. Version and rangeConstraints. type realConstraint interface { Constraint _real() } // CacheConstraints controls whether or not parsed constraints are cached var CacheConstraints = true var constraintCache = make(map[string]ccache) var constraintCacheIC = make(map[string]ccache) var constraintCacheLock sync.RWMutex type ccache struct { c Constraint err error } // NewConstraint takes a string representing a set of semver constraints, and // returns a corresponding Constraint object. Constraints are suitable // for checking Versions for admissibility, or combining with other Constraint // objects. // // If an invalid constraint string is passed, more information is provided in // the returned error string. func NewConstraint(in string) (Constraint, error) { return newConstraint(in, false, constraintCache) } // NewConstraintIC ("Implied Caret") is the same as NewConstraint, except that // it treats an absent operator as being equivalent to ^ instead of =. func NewConstraintIC(in string) (Constraint, error) { return newConstraint(in, true, constraintCacheIC) } func newConstraint(in string, ic bool, cache map[string]ccache) (Constraint, error) { if CacheConstraints { constraintCacheLock.RLock() if final, exists := cache[in]; exists { constraintCacheLock.RUnlock() return final.c, final.err } constraintCacheLock.RUnlock() } // Rewrite - ranges into a comparison operation. c := rewriteRange(in) ors := strings.Split(c, "||") or := make([]Constraint, len(ors)) for k, v := range ors { cs := strings.Split(v, ",") result := make([]Constraint, len(cs)) for i, s := range cs { pc, err := parseConstraint(s, ic) if err != nil { if CacheConstraints { constraintCacheLock.Lock() cache[in] = ccache{err: err} constraintCacheLock.Unlock() } return nil, err } result[i] = pc } or[k] = Intersection(result...) } final := Union(or...) if CacheConstraints { constraintCacheLock.Lock() cache[in] = ccache{c: final} constraintCacheLock.Unlock() } return final, nil } // Intersection computes the intersection between N Constraints, returning as // compact a representation of the intersection as possible. // // No error is indicated if all the sets are collectively disjoint; you must inspect the // return value to see if the result is the empty set (by calling IsNone() on // it). func Intersection(cg ...Constraint) Constraint { // If there's zero or one constraints in the group, we can quit fast switch len(cg) { case 0: // Zero members, only sane thing to do is return none return None() case 1: // Just one member means that's our final constraint return cg[0] } car, cdr := cg[0], cg[1:] for _, c := range cdr { if IsNone(car) { return None() } car = car.Intersect(c) } return car } // Union takes a variable number of constraints, and returns the most compact // possible representation of those constraints. // // This effectively ORs together all the provided constraints. If any of the // included constraints are the set of all versions (any), that supercedes // everything else. func Union(cg ...Constraint) Constraint { // If there's zero or one constraints in the group, we can quit fast switch len(cg) { case 0: // Zero members, only sane thing to do is return none return None() case 1: // One member, so the result will just be that return cg[0] } // Preliminary pass to look for 'any' in the current set (and bail out early // if found), but also construct a []realConstraint for everything else var real constraintList for _, c := range cg { switch tc := c.(type) { case any: return c case none: continue case Version: //heap.Push(&real, tc) real = append(real, tc) case rangeConstraint: //heap.Push(&real, tc) real = append(real, tc) case unionConstraint: real = append(real, tc...) //for _, c2 := range tc { //heap.Push(&real, c2) //} default: panic("unknown constraint type") } } // TODO wtf why isn't heap working...so, ugh, have to do this // Sort both the versions and ranges into ascending order sort.Sort(real) // Iteratively merge the constraintList elements var nuc unionConstraint for _, c := range real { if len(nuc) == 0 { nuc = append(nuc, c) continue } last := nuc[len(nuc)-1] switch lt := last.(type) { case Version: switch ct := c.(type) { case Version: // Two versions in a row; only append if they're not equal if !lt.Equal(ct) { nuc = append(nuc, ct) } case rangeConstraint: // Last was version, current is range. constraintList sorts by // min version, so it's guaranteed that the version will be less // than the range's min, guaranteeing that these are disjoint. // // ...almost. If the min of the range is the same as the // version, then a union should merge the two by making the // range inclusive at the bottom. if lt.Equal(ct.min) { ct.includeMin = true nuc[len(nuc)-1] = ct } else { nuc = append(nuc, c) } } case rangeConstraint: switch ct := c.(type) { case Version: // Last was range, current is version. constraintList sort invariants guarantee // that the version will be greater than the min, so we have to // determine if the version is less than the max. If it is, we // subsume it into the range with a Union call. // // Lazy version: just union them and let rangeConstraint figure // it out, then switch on the result type. c2 := lt.Union(ct) if crc, ok := c2.(realConstraint); ok { nuc[len(nuc)-1] = crc } else { // Otherwise, all it can be is a union constraint. First // item in the union will be the same range, second will be the // version, so append onto nuc from one back from the end nuc = append(nuc[:len(nuc)-1], c2.(unionConstraint)...) } case rangeConstraint: if lt.MatchesAny(ct) || areAdjacent(lt, ct) { // If the previous range overlaps or is adjacent to the // current range, we know they'll be able to merge together, // so overwrite the last item in nuc with the result of that // merge (which is what Union will produce) nuc[len(nuc)-1] = lt.Union(ct).(realConstraint) } else { nuc = append(nuc, c) } } } } if len(nuc) == 1 { return nuc[0] } return nuc }
semver
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/Masterminds/semver/magic.go
package semver import "errors" var errNone = errors.New("The 'None' constraint admits no versions.") // Any is a constraint that is satisfied by any valid semantic version. type any struct{} // Any creates a constraint that will match any version. func Any() Constraint { return any{} } func (any) String() string { return "*" } func (any) ImpliedCaretString() string { return "*" } // Matches checks that a version satisfies the constraint. As all versions // satisfy Any, this always returns nil. func (any) Matches(v Version) error { return nil } // Intersect computes the intersection between two constraints. // // As Any is the set of all possible versions, any intersection with that // infinite set will necessarily be the entirety of the second set. Thus, this // simply returns the passed constraint. func (any) Intersect(c Constraint) Constraint { return c } // MatchesAny indicates whether there exists any version that can satisfy both // this constraint, and the passed constraint. As all versions // satisfy Any, this is always true - unless none is passed. func (any) MatchesAny(c Constraint) bool { if _, ok := c.(none); ok { return false } return true } func (any) Union(c Constraint) Constraint { return Any() } func (any) _private() {} // None is an unsatisfiable constraint - it represents the empty set. type none struct{} // None creates a constraint that matches no versions (the empty set). func None() Constraint { return none{} } func (none) String() string { return "" } func (none) ImpliedCaretString() string { return "" } // Matches checks that a version satisfies the constraint. As no version can // satisfy None, this always fails (returns an error). func (none) Matches(v Version) error { return errNone } // Intersect computes the intersection between two constraints. // // None is the empty set of versions, and any intersection with the empty set is // necessarily the empty set. Thus, this always returns None. func (none) Intersect(Constraint) Constraint { return None() } func (none) Union(c Constraint) Constraint { return c } // MatchesAny indicates whether there exists any version that can satisfy the // constraint. As no versions satisfy None, this is always false. func (none) MatchesAny(c Constraint) bool { return false } func (none) _private() {} // IsNone indicates if a constraint will match no versions - that is, the // constraint represents the empty set. func IsNone(c Constraint) bool { _, ok := c.(none) return ok } // IsAny indicates if a constraint will match any and all versions. func IsAny(c Constraint) bool { _, ok := c.(any) return ok }
semver
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/Masterminds/semver/collection.go
package semver // Collection is a collection of Version instances and implements the sort // interface. See the sort package for more details. // https://golang.org/pkg/sort/ type Collection []Version // Len returns the length of a collection. The number of Version instances // on the slice. func (c Collection) Len() int { return len(c) } // Less is needed for the sort interface to compare two Version objects on the // slice. If checks if one is less than the other. func (c Collection) Less(i, j int) bool { return c[i].LessThan(c[j]) } // Swap is needed for the sort interface to replace the Version objects // at two different positions in the slice. func (c Collection) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
semver
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/vendor/github.com/Masterminds/semver/error.go
package semver import ( "bytes" "fmt" ) var rangeErrs = [...]string{ "%s is less than the minimum of %s", "%s is less than or equal to the minimum of %s", "%s is greater than the maximum of %s", "%s is greater than or equal to the maximum of %s", "%s is specifically disallowed in %s", "%s has prerelease data, so is omitted by the range %s", } const ( rerrLT = iota rerrLTE rerrGT rerrGTE rerrNE rerrPre ) // MatchFailure is an interface for failures to find a Constraint match. type MatchFailure interface { error // Pair returns the version and constraint that did not match prompting // the error. Pair() (v Version, c Constraint) } // RangeMatchFailure occurs when a version is not within a constraint range. type RangeMatchFailure struct { v Version rc rangeConstraint typ int8 } func (rce RangeMatchFailure) Error() string { return fmt.Sprintf(rangeErrs[rce.typ], rce.v, rce.rc) } // Pair returns the version and constraint that did not match. Part of the // MatchFailure interface. func (rce RangeMatchFailure) Pair() (v Version, r Constraint) { return rce.v, rce.rc } // VersionMatchFailure occurs when two versions do not match each other. type VersionMatchFailure struct { v, other Version } func (vce VersionMatchFailure) Error() string { return fmt.Sprintf("%s is not equal to %s", vce.v, vce.other) } // Pair returns the two versions that did not match. Part of the // MatchFailure interface. func (vce VersionMatchFailure) Pair() (v Version, r Constraint) { return vce.v, vce.other } // MultiMatchFailure errors occur when there are multiple constraints a version // is being checked against and there are failures. type MultiMatchFailure []MatchFailure func (mmf MultiMatchFailure) Error() string { var buf bytes.Buffer for k, e := range mmf { if k < len(mmf)-1 { fmt.Fprintf(&buf, "%s\n", e) } else { fmt.Fprintf(&buf, e.Error()) } } return buf.String() }
txn_writer
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/testdata/txn_writer/expected_diff_output.txt
Add: [[projects]] name = "github.com/sdboyer/deptest" packages = ["."] revision = "ff2948a2ac8f538c4ecd55962e919d1e13e74baf" version = "v1.0.0" [[projects]] name = "github.com/stuff/realthing" packages = ["."] revision = "1f02e52d6bac308da54ab84a234c58a98ca82347" version = "2.0.0" Remove: [[projects]] name = "github.com/stuff/placeholder" packages = ["."] revision = "6694017eeb4e20fd277b049bf29dba4895c97234" version = "2.0.0" Modify: [[projects]] branch = "- master" name = "github.com/foo/bar" packages = ["- placeholder","+ thing"] revision = "f24338400f072ef18125ae0fbe6b06fe6d1783e7 -> 2a3a211e171803acb82d1d5d42ceb53228f51751" source = "+ http://github.example.com/foo/bar" version = "+ 1.2.0"
txn_writer
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/testdata/txn_writer/expected_lock.toml
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. [[projects]] digest = "1:c4844614e2b12233bb037afec536831b92a4f58f7b712432b978d34df291e43a" name = "github.com/sdboyer/dep-test" packages = ["."] pruneopts = "" revision = "2a3a211e171803acb82d1d5d42ceb53228f51751" version = "1.0.0" [solve-meta] analyzer-name = "" analyzer-version = 0 input-imports = [] solver-name = "" solver-version = 0
txn_writer
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/testdata/txn_writer/expected_manifest.toml
# Gopkg.toml example # # Refer to https://golang.github.io/dep/docs/Gopkg.toml.html # for detailed Gopkg.toml documentation. # # required = ["github.com/user/thing/cmd/thing"] # ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] # # [[constraint]] # name = "github.com/user/project" # version = "1.0.0" # # [[constraint]] # name = "github.com/user/project2" # branch = "dev" # source = "github.com/myfork/project2" # # [[override]] # name = "github.com/x/y" # version = "2.4.0" # # [prune] # non-go = false # go-tests = true # unused-packages = true [[constraint]] name = "github.com/sdboyer/dep-test" version = "1.0.0"
manifest
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/testdata/manifest/error1.toml
ignored = ["github.com/foo/bar"] [[constraint]] name = "github.com/golang/dep" branch = "master" revision = "d05d5aca9f895d19e9265839bffeadd74a2d2ecb" version = "^v0.12.0" source = "https://github.com/golang/dep" [[override]] name = "github.com/golang/dep" branch = "master" revision = "d05d5aca9f895d19e9265839bffeadd74a2d2ecb" version = "^v0.12.0" source = "https://github.com/golang/dep"
manifest
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/testdata/manifest/golden.toml
ignored = ["github.com/foo/bar"] [[constraint]] name = "github.com/babble/brook" revision = "d05d5aca9f895d19e9265839bffeadd74a2d2ecb" [[constraint]] name = "github.com/golang/dep" version = "0.12.0" [[override]] branch = "master" name = "github.com/golang/dep" source = "https://github.com/golang/dep" [prune] non-go = true
manifest
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/testdata/manifest/error2.toml
ignored = ["github.com/foo/bar"] [[constraint]] name = "github.com/golang/dep" branch = "master" [[constraint]] name = "github.com/golang/dep" branch = "master"
manifest
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/testdata/manifest/error3.toml
ignored = ["github.com/foo/bar"] [[override]] name = "github.com/golang/dep" branch = "master" [[override]] name = "github.com/golang/dep" branch = "master"
analyzer
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/testdata/analyzer/Gopkg.toml
[[constraint]] name = "github.com/golang/dep" version = ">=0.12.0, <1.0.0" [[constraint]] name = "github.com/pkg/errors" version = ">=0.8.0, <1.0.0"
lock
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/testdata/lock/golden0.toml
[[projects]] branch = "master" digest = "1:666f6f" name = "github.com/golang/dep" packages = ["."] pruneopts = "" revision = "d05d5aca9f895d19e9265839bffeadd74a2d2ecb" [solve-meta] analyzer-name = "" analyzer-version = 0 input-imports = [] solver-name = "" solver-version = 0
lock
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/testdata/lock/error1.toml
[[projects]] name = "github.com/golang/dep" branch = "master" revision = "d05d5aca9f895d19e9265839bffeadd74a2d2ecb" packages = ["."] digest = "1:000aaa2a285ab27944a4d7adcba8dbd03980f59ba652f12db39fa93b927c345593e"
lock
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/testdata/lock/error0.toml
[[projects]] name = "github.com/golang/dep" branch = "master" version = "v0.12.0" revision = "d05d5aca9f895d19e9265839bffeadd74a2d2ecb" packages = ["."]
lock
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/testdata/lock/error2.toml
[[projects]] name = "github.com/golang/dep" packages = ["."]
lock
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/testdata/lock/golden1.toml
[[projects]] digest = "1:666f6f" name = "github.com/golang/dep" packages = ["."] pruneopts = "NUT" revision = "d05d5aca9f895d19e9265839bffeadd74a2d2ecb" version = "0.12.2" [solve-meta] analyzer-name = "" analyzer-version = 0 input-imports = [] solver-name = "" solver-version = 0
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/source.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "bytes" "context" "fmt" "log" "sync" "github.com/golang/dep/gps/pkgtree" "github.com/pkg/errors" ) // sourceState represent the states that a source can be in, depending on how // much search and discovery work ahs been done by a source's managing gateway. // // These are basically used to achieve a cheap approximation of a FSM. type sourceState int32 const ( // sourceExistsUpstream means the chosen source was verified upstream, during this execution. sourceExistsUpstream sourceState = 1 << iota // sourceExistsLocally means the repo was retrieved in the past. sourceExistsLocally // sourceHasLatestVersionList means the version list was refreshed within the cache window. sourceHasLatestVersionList // sourceHasLatestLocally means the repo was pulled fresh during this execution. sourceHasLatestLocally ) func (state sourceState) String() string { var b bytes.Buffer for _, s := range []struct { sourceState string }{ {sourceExistsUpstream, "sourceExistsUpstream"}, {sourceExistsLocally, "sourceExistsLocally"}, {sourceHasLatestVersionList, "sourceHasLatestVersionList"}, {sourceHasLatestLocally, "sourceHasLatestLocally"}, } { if state&s.sourceState > 0 { if b.Len() > 0 { b.WriteString("|") } b.WriteString(s.string) } } return b.String() } type srcReturn struct { *sourceGateway error } type sourceCoordinator struct { supervisor *supervisor deducer deducer srcmut sync.RWMutex // guards srcs and srcIdx srcs map[string]*sourceGateway nameToURL map[string]string psrcmut sync.Mutex // guards protoSrcs map protoSrcs map[string][]chan srcReturn cachedir string cache sourceCache logger *log.Logger } // newSourceCoordinator returns a new sourceCoordinator. // Passing a nil sourceCache defaults to an in-memory cache. func newSourceCoordinator(superv *supervisor, deducer deducer, cachedir string, cache sourceCache, logger *log.Logger) *sourceCoordinator { if cache == nil { cache = memoryCache{} } return &sourceCoordinator{ supervisor: superv, deducer: deducer, cachedir: cachedir, cache: cache, logger: logger, srcs: make(map[string]*sourceGateway), nameToURL: make(map[string]string), protoSrcs: make(map[string][]chan srcReturn), } } func (sc *sourceCoordinator) close() { if err := sc.cache.close(); err != nil { sc.logger.Println(errors.Wrap(err, "failed to close the source cache")) } } func (sc *sourceCoordinator) getSourceGatewayFor(ctx context.Context, id ProjectIdentifier) (*sourceGateway, error) { if err := sc.supervisor.ctx.Err(); err != nil { return nil, err } normalizedName := id.normalizedSource() sc.srcmut.RLock() if url, has := sc.nameToURL[normalizedName]; has { srcGate, has := sc.srcs[url] sc.srcmut.RUnlock() if has { return srcGate, nil } panic(fmt.Sprintf("%q was URL for %q in nameToURL, but no corresponding srcGate in srcs map", url, normalizedName)) } // Without a direct match, we must fold the input name to a generally // stable, caseless variant and primarily work from that. This ensures that // on case-insensitive filesystems, we do not end up with multiple // sourceGateways for paths that vary only by case. We perform folding // unconditionally, independent of whether the underlying fs is // case-sensitive, in order to ensure uniform behavior. // // This has significant implications. It is effectively deciding that the // ProjectRoot portion of import paths are case-insensitive, which is by no // means an invariant maintained by all hosting systems. If this presents a // problem in practice, then we can explore expanding the deduction system // to include case-sensitivity-for-roots metadata and treat it on a // host-by-host basis. Such cases would still be rejected by the Go // toolchain's compiler, though, and case-sensitivity in root names is // likely to be at least frowned on if not disallowed by most hosting // systems. So we follow this path, which is both a vastly simpler solution // and one that seems quite likely to work in practice. foldedNormalName := toFold(normalizedName) notFolded := foldedNormalName != normalizedName if notFolded { // If the folded name differs from the input name, then there may // already be an entry for it in the nameToURL map, so check again. if url, has := sc.nameToURL[foldedNormalName]; has { srcGate, has := sc.srcs[url] // There was a match on the canonical folded variant. Upgrade to a // write lock, so that future calls on this name don't need to // burn cycles on folding. sc.srcmut.RUnlock() sc.srcmut.Lock() // It may be possible that another goroutine could interleave // between the unlock and re-lock. Even if they do, though, they'll // only have recorded the same url value as we have here. In other // words, these operations commute, so we can safely write here // without checking again. sc.nameToURL[normalizedName] = url sc.srcmut.Unlock() if has { return srcGate, nil } panic(fmt.Sprintf("%q was URL for %q in nameToURL, but no corresponding srcGate in srcs map", url, normalizedName)) } } sc.srcmut.RUnlock() // No gateway exists for this path yet; set up a proto, being careful to fold // together simultaneous attempts on the same case-folded path. sc.psrcmut.Lock() if chans, has := sc.protoSrcs[foldedNormalName]; has { // Another goroutine is already working on this normalizedName. Fold // in with that work by attaching our return channels to the list. rc := make(chan srcReturn, 1) sc.protoSrcs[foldedNormalName] = append(chans, rc) sc.psrcmut.Unlock() ret := <-rc return ret.sourceGateway, ret.error } sc.protoSrcs[foldedNormalName] = []chan srcReturn{} sc.psrcmut.Unlock() doReturn := func(sg *sourceGateway, err error) { ret := srcReturn{sourceGateway: sg, error: err} sc.psrcmut.Lock() for _, rc := range sc.protoSrcs[foldedNormalName] { rc <- ret } delete(sc.protoSrcs, foldedNormalName) sc.psrcmut.Unlock() } pd, err := sc.deducer.deduceRootPath(ctx, normalizedName) if err != nil { // As in the deducer, don't cache errors so that externally-driven retry // strategies can be constructed. doReturn(nil, err) return nil, err } // It'd be quite the feat - but not impossible - for a gateway // corresponding to this normalizedName to have slid into the main // sources map after the initial unlock, but before this goroutine got // scheduled. Guard against that by checking the main sources map again // and bailing out if we find an entry. sc.srcmut.RLock() if url, has := sc.nameToURL[foldedNormalName]; has { if srcGate, has := sc.srcs[url]; has { sc.srcmut.RUnlock() doReturn(srcGate, nil) return srcGate, nil } panic(fmt.Sprintf("%q was URL for %q in nameToURL, but no corresponding srcGate in srcs map", url, normalizedName)) } sc.srcmut.RUnlock() sc.srcmut.Lock() defer sc.srcmut.Unlock() // Get or create a sourceGateway. var srcGate *sourceGateway var url, unfoldedURL string var errs errorSlice for _, m := range pd.mb { url = m.URL().String() if notFolded { // If the normalizedName and foldedNormalName differ, then we're pretty well // guaranteed that returned URL will also need folding into canonical form. unfoldedURL = url url = toFold(url) } if sg, has := sc.srcs[url]; has { srcGate = sg break } src, err := m.try(ctx, sc.cachedir) if err == nil { cache := sc.cache.newSingleSourceCache(id) srcGate, err = newSourceGateway(ctx, src, sc.supervisor, sc.cachedir, cache) if err == nil { sc.srcs[url] = srcGate break } } errs = append(errs, err) } if srcGate == nil { doReturn(nil, errs) return nil, errs } // Record the name -> URL mapping, making sure that we also get the // self-mapping. sc.nameToURL[foldedNormalName] = url if url != foldedNormalName { sc.nameToURL[url] = url } // Make sure we have both the folded and unfolded names and URLs recorded in // the map, if the input needed folding. if notFolded { sc.nameToURL[normalizedName] = url sc.nameToURL[unfoldedURL] = url } doReturn(srcGate, nil) return srcGate, nil } // sourceGateways manage all incoming calls for data from sources, serializing // and caching them as needed. type sourceGateway struct { cachedir string srcState sourceState src source cache singleSourceCache mu sync.Mutex // global lock, serializes all behaviors suprvsr *supervisor } // newSourceGateway returns a new gateway for src. If the source exists locally, // the local state may be cleaned, otherwise we ping upstream. func newSourceGateway(ctx context.Context, src source, superv *supervisor, cachedir string, cache singleSourceCache) (*sourceGateway, error) { var state sourceState local := src.existsLocally(ctx) if local { state |= sourceExistsLocally if err := superv.do(ctx, src.upstreamURL(), ctValidateLocal, src.maybeClean); err != nil { return nil, err } } sg := &sourceGateway{ srcState: state, src: src, cachedir: cachedir, cache: cache, suprvsr: superv, } if !local { if err := sg.require(ctx, sourceExistsUpstream); err != nil { return nil, err } } return sg, nil } func (sg *sourceGateway) syncLocal(ctx context.Context) error { sg.mu.Lock() err := sg.require(ctx, sourceExistsLocally|sourceHasLatestLocally) sg.mu.Unlock() return err } func (sg *sourceGateway) existsInCache(ctx context.Context) error { sg.mu.Lock() err := sg.require(ctx, sourceExistsLocally) sg.mu.Unlock() return err } func (sg *sourceGateway) existsUpstream(ctx context.Context) error { sg.mu.Lock() err := sg.require(ctx, sourceExistsUpstream) sg.mu.Unlock() return err } func (sg *sourceGateway) exportVersionTo(ctx context.Context, v Version, to string) error { sg.mu.Lock() defer sg.mu.Unlock() err := sg.require(ctx, sourceExistsLocally) if err != nil { return err } r, err := sg.convertToRevision(ctx, v) if err != nil { return err } err = sg.suprvsr.do(ctx, sg.src.upstreamURL(), ctExportTree, func(ctx context.Context) error { return sg.src.exportRevisionTo(ctx, r, to) }) // It's possible (in git) that we may have tried this against a version that // doesn't exist in the repository cache, even though we know it exists in // the upstream. If it looks like that might be the case, update the local // and retry. // TODO(sdboyer) It'd be better if we could check the error to see if this // actually was the cause of the problem. if err != nil && sg.srcState&sourceHasLatestLocally == 0 { if err = sg.require(ctx, sourceHasLatestLocally); err == nil { err = sg.suprvsr.do(ctx, sg.src.upstreamURL(), ctExportTree, func(ctx context.Context) error { return sg.src.exportRevisionTo(ctx, r, to) }) } } return err } func (sg *sourceGateway) exportPrunedVersionTo(ctx context.Context, lp LockedProject, prune PruneOptions, to string) error { sg.mu.Lock() defer sg.mu.Unlock() err := sg.require(ctx, sourceExistsLocally) if err != nil { return err } r, err := sg.convertToRevision(ctx, lp.Version()) if err != nil { return err } if fastprune, ok := sg.src.(sourceFastPrune); ok { return sg.suprvsr.do(ctx, sg.src.upstreamURL(), ctExportTree, func(ctx context.Context) error { return fastprune.exportPrunedRevisionTo(ctx, r, lp.Packages(), prune, to) }) } if err = sg.suprvsr.do(ctx, sg.src.upstreamURL(), ctExportTree, func(ctx context.Context) error { return sg.src.exportRevisionTo(ctx, r, to) }); err != nil { return err } return PruneProject(to, lp, prune) } func (sg *sourceGateway) getManifestAndLock(ctx context.Context, pr ProjectRoot, v Version, an ProjectAnalyzer) (Manifest, Lock, error) { sg.mu.Lock() defer sg.mu.Unlock() r, err := sg.convertToRevision(ctx, v) if err != nil { return nil, nil, err } m, l, has := sg.cache.getManifestAndLock(r, an.Info()) if has { return m, l, nil } err = sg.require(ctx, sourceExistsLocally) if err != nil { return nil, nil, err } label := fmt.Sprintf("%s:%s", sg.src.upstreamURL(), an.Info()) err = sg.suprvsr.do(ctx, label, ctGetManifestAndLock, func(ctx context.Context) error { m, l, err = sg.src.getManifestAndLock(ctx, pr, r, an) return err }) // It's possible (in git) that we may have tried this against a version that // doesn't exist in the repository cache, even though we know it exists in // the upstream. If it looks like that might be the case, update the local // and retry. // TODO(sdboyer) It'd be better if we could check the error to see if this // actually was the cause of the problem. if err != nil && sg.srcState&sourceHasLatestLocally == 0 { // TODO(sdboyer) we should warn/log/something in adaptive recovery // situations like this err = sg.require(ctx, sourceHasLatestLocally) if err != nil { return nil, nil, err } err = sg.suprvsr.do(ctx, label, ctGetManifestAndLock, func(ctx context.Context) error { m, l, err = sg.src.getManifestAndLock(ctx, pr, r, an) return err }) } if err != nil { return nil, nil, err } sg.cache.setManifestAndLock(r, an.Info(), m, l) return m, l, nil } func (sg *sourceGateway) listPackages(ctx context.Context, pr ProjectRoot, v Version) (pkgtree.PackageTree, error) { sg.mu.Lock() defer sg.mu.Unlock() r, err := sg.convertToRevision(ctx, v) if err != nil { return pkgtree.PackageTree{}, err } ptree, has := sg.cache.getPackageTree(r, pr) if has { return ptree, nil } err = sg.require(ctx, sourceExistsLocally) if err != nil { return pkgtree.PackageTree{}, err } label := fmt.Sprintf("%s:%s", pr, sg.src.upstreamURL()) err = sg.suprvsr.do(ctx, label, ctListPackages, func(ctx context.Context) error { ptree, err = sg.src.listPackages(ctx, pr, r) return err }) // It's possible (in git) that we may have tried this against a version that // doesn't exist in the repository cache, even though we know it exists in // the upstream. If it looks like that might be the case, update the local // and retry. // TODO(sdboyer) It'd be better if we could check the error to see if this // actually was the cause of the problem. if err != nil && sg.srcState&sourceHasLatestLocally == 0 { // TODO(sdboyer) we should warn/log/something in adaptive recovery // situations like this err = sg.require(ctx, sourceHasLatestLocally) if err != nil { return pkgtree.PackageTree{}, err } err = sg.suprvsr.do(ctx, label, ctListPackages, func(ctx context.Context) error { ptree, err = sg.src.listPackages(ctx, pr, r) return err }) } if err != nil { return pkgtree.PackageTree{}, err } sg.cache.setPackageTree(r, ptree) return ptree, nil } // caller must hold sg.mu. func (sg *sourceGateway) convertToRevision(ctx context.Context, v Version) (Revision, error) { // When looking up by Version, there are four states that may have // differing opinions about version->revision mappings: // // 1. The upstream source/repo (canonical) // 2. The local source/repo // 3. The local cache // 4. The input (params to this method) // // If the input differs from any of the above, it's likely because some lock // got written somewhere with a version/rev pair that has since changed or // been removed. But correct operation dictates that such a mis-mapping be // respected; if the mis-mapping is to be corrected, it has to be done // intentionally by the caller, not automatically here. r, has := sg.cache.toRevision(v) if has { return r, nil } if sg.srcState&sourceHasLatestVersionList != 0 { // We have the latest version list already and didn't get a match, so // this is definitely a failure case. return "", fmt.Errorf("version %q does not exist in source", v) } // The version list is out of date; it's possible this version might // show up after loading it. err := sg.require(ctx, sourceHasLatestVersionList) if err != nil { return "", err } r, has = sg.cache.toRevision(v) if !has { return "", fmt.Errorf("version %q does not exist in source", v) } return r, nil } func (sg *sourceGateway) listVersions(ctx context.Context) ([]PairedVersion, error) { sg.mu.Lock() defer sg.mu.Unlock() if pvs, ok := sg.cache.getAllVersions(); ok { return pvs, nil } err := sg.require(ctx, sourceHasLatestVersionList) if err != nil { return nil, err } if pvs, ok := sg.cache.getAllVersions(); ok { return pvs, nil } return nil, nil } func (sg *sourceGateway) revisionPresentIn(ctx context.Context, r Revision) (bool, error) { sg.mu.Lock() defer sg.mu.Unlock() err := sg.require(ctx, sourceExistsLocally) if err != nil { return false, err } if _, exists := sg.cache.getVersionsFor(r); exists { return true, nil } present, err := sg.src.revisionPresentIn(r) if err == nil && present { sg.cache.markRevisionExists(r) } return present, err } func (sg *sourceGateway) disambiguateRevision(ctx context.Context, r Revision) (Revision, error) { sg.mu.Lock() defer sg.mu.Unlock() err := sg.require(ctx, sourceExistsLocally) if err != nil { return "", err } return sg.src.disambiguateRevision(ctx, r) } // sourceExistsUpstream verifies that the source exists upstream and that the // upstreamURL has not changed and returns any additional sourceState, or an error. func (sg *sourceGateway) sourceExistsUpstream(ctx context.Context) (sourceState, error) { if sg.src.existsCallsListVersions() { return sg.loadLatestVersionList(ctx) } err := sg.suprvsr.do(ctx, sg.src.sourceType(), ctSourcePing, func(ctx context.Context) error { if !sg.src.existsUpstream(ctx) { return errors.Errorf("source does not exist upstream: %s: %s", sg.src.sourceType(), sg.src.upstreamURL()) } return nil }) return 0, err } // initLocal initializes the source locally and returns the resulting sourceState. func (sg *sourceGateway) initLocal(ctx context.Context) (sourceState, error) { if err := sg.suprvsr.do(ctx, sg.src.sourceType(), ctSourceInit, func(ctx context.Context) error { err := sg.src.initLocal(ctx) return errors.Wrapf(err, "failed to fetch source for %s", sg.src.upstreamURL()) }); err != nil { return 0, err } return sourceExistsUpstream | sourceExistsLocally | sourceHasLatestLocally, nil } // loadLatestVersionList loads the latest version list, possibly ensuring the source // exists locally first, and returns the resulting sourceState. func (sg *sourceGateway) loadLatestVersionList(ctx context.Context) (sourceState, error) { var addlState sourceState if sg.src.listVersionsRequiresLocal() && !sg.src.existsLocally(ctx) { as, err := sg.initLocal(ctx) if err != nil { return 0, err } addlState |= as } var pvl []PairedVersion if err := sg.suprvsr.do(ctx, sg.src.sourceType(), ctListVersions, func(ctx context.Context) error { var err error pvl, err = sg.src.listVersions(ctx) return errors.Wrapf(err, "failed to list versions for %s", sg.src.upstreamURL()) }); err != nil { return addlState, err } sg.cache.setVersionMap(pvl) return addlState | sourceHasLatestVersionList, nil } // require ensures the sourceGateway has the wanted sourceState, fetching more // data if necessary. Returns an error if the state could not be reached. // caller must hold sg.mu func (sg *sourceGateway) require(ctx context.Context, wanted sourceState) (err error) { todo := (^sg.srcState) & wanted var flag sourceState = 1 for todo != 0 { if todo&flag != 0 { // Set up addlState so that individual ops can easily attach // more states that were incidentally satisfied by the op. var addlState sourceState switch flag { case sourceExistsUpstream: addlState, err = sg.sourceExistsUpstream(ctx) case sourceExistsLocally: if !sg.src.existsLocally(ctx) { addlState, err = sg.initLocal(ctx) } case sourceHasLatestVersionList: if _, ok := sg.cache.getAllVersions(); !ok { addlState, err = sg.loadLatestVersionList(ctx) } case sourceHasLatestLocally: err = sg.suprvsr.do(ctx, sg.src.sourceType(), ctSourceFetch, func(ctx context.Context) error { return sg.src.updateLocal(ctx) }) addlState = sourceExistsUpstream | sourceExistsLocally } if err != nil { return } checked := flag | addlState sg.srcState |= checked todo &= ^checked } flag <<= 1 } return nil } // source is an abstraction around the different underlying types (git, bzr, hg, // svn, maybe raw on-disk code, and maybe eventually a registry) that can // provide versioned project source trees. type source interface { existsLocally(context.Context) bool existsUpstream(context.Context) bool upstreamURL() string initLocal(context.Context) error updateLocal(context.Context) error // maybeClean is a no-op when the underlying source does not support cleaning. maybeClean(context.Context) error listVersions(context.Context) ([]PairedVersion, error) getManifestAndLock(context.Context, ProjectRoot, Revision, ProjectAnalyzer) (Manifest, Lock, error) listPackages(context.Context, ProjectRoot, Revision) (pkgtree.PackageTree, error) revisionPresentIn(Revision) (bool, error) disambiguateRevision(context.Context, Revision) (Revision, error) exportRevisionTo(context.Context, Revision, string) error sourceType() string // existsCallsListVersions returns true if calling existsUpstream actually lists // versions underneath, meaning listVersions might as well be used instead. existsCallsListVersions() bool // listVersionsRequiresLocal returns true if calling listVersions first // requires the source to exist locally. listVersionsRequiresLocal() bool } type sourceFastPrune interface { source exportPrunedRevisionTo(context.Context, Revision, []string, PruneOptions, string) error }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/source_manager_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "log" "reflect" "testing" "github.com/golang/dep/internal/test" ) func TestSourceManager_InferConstraint(t *testing.T) { if testing.Short() { t.Skip("Skipping slow test in short mode") } t.Parallel() // Used in git subtests: v081, err := NewSemverConstraintIC("v0.8.1") if err != nil { t.Fatal(err) } v012, err := NewSemverConstraintIC("v0.12.0-12-de4dcafe0") if err != nil { t.Fatal(err) } // Used in hg and bzr subtests: v1, err := NewSemverConstraintIC("v1.0.0") if err != nil { t.Fatal(err) } var ( gitProj = ProjectIdentifier{ProjectRoot: "github.com/carolynvs/deptest"} bzrProj = ProjectIdentifier{ProjectRoot: "launchpad.net/govcstestbzrrepo"} hgProj = ProjectIdentifier{ProjectRoot: "bitbucket.org/golang-dep/dep-test"} testcases = []struct { project ProjectIdentifier name string str string want Constraint }{ {gitProj, "empty", "", Any()}, {gitProj, "semver-short", "v0.8.1", v081}, {gitProj, "long semver constraint", "v0.12.0-12-de4dcafe0", v012}, {gitProj, "branch v2", "v2", NewBranch("v2")}, {gitProj, "branch master", "master", NewBranch("master")}, {gitProj, "long revision", "3f4c3bea144e112a69bbe5d8d01c1b09a544253f", Revision("3f4c3bea144e112a69bbe5d8d01c1b09a544253f")}, {gitProj, "short revision", "3f4c3bea", Revision("3f4c3bea144e112a69bbe5d8d01c1b09a544253f")}, {bzrProj, "empty", "", Any()}, {bzrProj, "semver", "v1.0.0", v1}, {bzrProj, "revision", "matt@mattfarina.com-20150731135137-pbphasfppmygpl68", Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68")}, {hgProj, "empty", "", Any()}, {hgProj, "semver", "v1.0.0", v1}, {hgProj, "default branch", "default", NewBranch("default")}, {hgProj, "revision", "6f55e1f03d91f8a7cce35d1968eb60a2352e4d59", Revision("6f55e1f03d91f8a7cce35d1968eb60a2352e4d59")}, {hgProj, "short revision", "6f55e1f03d91", Revision("6f55e1f03d91f8a7cce35d1968eb60a2352e4d59")}, } ) for _, tc := range testcases { var subtestName string switch tc.project { case gitProj: subtestName = "git-" + tc.name case bzrProj: subtestName = "bzr-" + tc.name case hgProj: subtestName = "hg-" + tc.name default: subtestName = tc.name } t.Run(subtestName, func(t *testing.T) { t.Parallel() h := test.NewHelper(t) defer h.Cleanup() cacheDir := "gps-repocache" h.TempDir(cacheDir) sm, err := NewSourceManager(SourceManagerConfig{ Cachedir: h.Path(cacheDir), Logger: log.New(test.Writer{TB: t}, "", 0), }) h.Must(err) got, err := sm.InferConstraint(tc.str, tc.project) h.Must(err) wantT := reflect.TypeOf(tc.want) gotT := reflect.TypeOf(got) if wantT != gotT { t.Errorf("expected type: %s, got %s, for input %s", wantT, gotT, tc.str) } if got.String() != tc.want.String() { t.Errorf("expected value: %s, got %s for input %s", tc.want, got, tc.str) } }) } }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/manifest_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import "testing" // Test that prep manifest sanitizes manifests appropriately func TestPrepManifest(t *testing.T) { m := SimpleManifest{ Deps: ProjectConstraints{ ProjectRoot("foo"): ProjectProperties{}, ProjectRoot("bar"): ProjectProperties{ Source: "whatever", }, }, } prepped := prepManifest(m) d := prepped.DependencyConstraints() if len(d) != 1 { t.Error("prepManifest did not eliminate empty ProjectProperties from deps map") } if d[ProjectRoot("bar")].Constraint != any { t.Error("prepManifest did not normalize nil constraint to anyConstraint in deps map") } }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/source_cache_bolt_encode.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "encoding/binary" "strings" "time" "github.com/boltdb/bolt" "github.com/golang/dep/gps/internal/pb" "github.com/golang/dep/gps/pkgtree" "github.com/golang/protobuf/proto" "github.com/jmank88/nuts" "github.com/pkg/errors" ) var ( cacheKeyComment = []byte("c") cacheKeyConstraint = cacheKeyComment cacheKeyError = []byte("e") cacheKeyInputImports = []byte("m") cacheKeyIgnored = []byte("i") cacheKeyImport = cacheKeyIgnored cacheKeyLock = []byte("l") cacheKeyName = []byte("n") cacheKeyOverride = []byte("o") cacheKeyPTree = []byte("p") cacheKeyRequired = []byte("r") cacheKeyRevision = cacheKeyRequired cacheKeyTestImport = []byte("t") cacheRevision = byte('r') cacheVersion = byte('v') ) // propertiesFromCache returns a new ProjectRoot and ProjectProperties with the fields from m. func propertiesFromCache(m *pb.ProjectProperties) (ProjectRoot, ProjectProperties, error) { ip := ProjectRoot(m.Root) var pp ProjectProperties pp.Source = m.Source if m.Constraint == nil { pp.Constraint = Any() } else { c, err := constraintFromCache(m.Constraint) if err != nil { return "", ProjectProperties{}, err } pp.Constraint = c } return ip, pp, nil } // projectPropertiesMsgs is a convenience tuple. type projectPropertiesMsgs struct { pp pb.ProjectProperties c pb.Constraint } // copyFrom sets the ProjectPropertiesMsg fields from ip and pp. func (ms *projectPropertiesMsgs) copyFrom(ip ProjectRoot, pp ProjectProperties) { ms.pp.Root = string(ip) ms.pp.Source = pp.Source if pp.Constraint != nil && !IsAny(pp.Constraint) { pp.Constraint.copyTo(&ms.c) ms.pp.Constraint = &ms.c } else { ms.pp.Constraint = nil } } // cachePutManifest stores a Manifest in the bolt.Bucket. func cachePutManifest(b *bolt.Bucket, m Manifest) error { var ppMsg projectPropertiesMsgs constraints := m.DependencyConstraints() if len(constraints) > 0 { cs, err := b.CreateBucket(cacheKeyConstraint) if err != nil { return err } key := make(nuts.Key, nuts.KeyLen(uint64(len(constraints)-1))) var i uint64 for ip, pp := range constraints { ppMsg.copyFrom(ip, pp) v, err := proto.Marshal(&ppMsg.pp) if err != nil { return err } key.Put(i) i++ if err := cs.Put(key, v); err != nil { return err } } } rm, ok := m.(RootManifest) if !ok { return nil } ignored := rm.IgnoredPackages().ToSlice() if len(ignored) > 0 { ig, err := b.CreateBucket(cacheKeyIgnored) if err != nil { return err } key := make(nuts.Key, nuts.KeyLen(uint64(len(ignored)-1))) var i uint64 for _, ip := range ignored { key.Put(i) i++ if err := ig.Put(key, []byte(ip)); err != nil { return err } } } overrides := rm.Overrides() if len(overrides) > 0 { ovr, err := b.CreateBucket(cacheKeyOverride) if err != nil { return err } key := make(nuts.Key, nuts.KeyLen(uint64(len(overrides)-1))) var i uint64 for ip, pp := range overrides { ppMsg.copyFrom(ip, pp) v, err := proto.Marshal(&ppMsg.pp) if err != nil { return err } key.Put(i) i++ if err := ovr.Put(key, v); err != nil { return err } } } required := rm.RequiredPackages() if len(required) > 0 { req, err := b.CreateBucket(cacheKeyRequired) if err != nil { return err } key := make(nuts.Key, nuts.KeyLen(uint64(len(required)-1))) var i uint64 for ip, ok := range required { if ok { key.Put(i) i++ if err := req.Put(key, []byte(ip)); err != nil { return err } } } } return nil } // cacheGetManifest returns a new RootManifest with the data retrieved from the bolt.Bucket. func cacheGetManifest(b *bolt.Bucket) (RootManifest, error) { //TODO consider storing slice/map lens to enable calling make() with capacity m := &simpleRootManifest{ c: make(ProjectConstraints), ovr: make(ProjectConstraints), req: make(map[string]bool), } // Constraints if cs := b.Bucket(cacheKeyConstraint); cs != nil { var msg pb.ProjectProperties err := cs.ForEach(func(_, v []byte) error { if err := proto.Unmarshal(v, &msg); err != nil { return err } ip, pp, err := propertiesFromCache(&msg) if err != nil { return err } m.c[ip] = pp return nil }) if err != nil { return nil, errors.Wrap(err, "failed to get constraints") } } // Ignored if ig := b.Bucket(cacheKeyIgnored); ig != nil { var igslice []string err := ig.ForEach(func(_, v []byte) error { igslice = append(igslice, string(v)) return nil }) m.ig = pkgtree.NewIgnoredRuleset(igslice) if err != nil { return nil, errors.Wrap(err, "failed to get ignored") } } // Overrides if os := b.Bucket(cacheKeyOverride); os != nil { var msg pb.ProjectProperties err := os.ForEach(func(_, v []byte) error { if err := proto.Unmarshal(v, &msg); err != nil { return err } ip, pp, err := propertiesFromCache(&msg) if err != nil { return err } m.ovr[ip] = pp return nil }) if err != nil { return nil, errors.Wrap(err, "failed to get overrides") } } // Required if req := b.Bucket(cacheKeyRequired); req != nil { err := req.ForEach(func(_, v []byte) error { m.req[string(v)] = true return nil }) if err != nil { return nil, errors.Wrap(err, "failed to get required") } } return m, nil } // copyTo returns a serializable representation of lp. func (lp lockedProject) copyTo(msg *pb.LockedProject, c *pb.Constraint) { if lp.v == nil { msg.UnpairedVersion = nil } else { lp.v.copyTo(c) msg.UnpairedVersion = c } msg.Root = string(lp.pi.ProjectRoot) msg.Source = lp.pi.Source msg.Revision = string(lp.r) msg.Packages = lp.pkgs } // copyLockedProjectTo hydrates pointers to serializable representations of a // LockedProject with the appropriate data. func copyLockedProjectTo(lp LockedProject, msg *pb.LockedProject, c *pb.Constraint) { if nlp, ok := lp.(lockedProject); ok { nlp.copyTo(msg, c) return } v := lp.Version() if v == nil { msg.UnpairedVersion = nil } else { v.copyTo(c) msg.UnpairedVersion = c switch tv := v.(type) { case Revision: msg.Revision = string(tv) case versionPair: msg.Revision = string(tv.r) } } pi := lp.Ident() msg.Root = string(pi.ProjectRoot) msg.Source = pi.Source msg.Packages = lp.Packages() } // lockedProjectFromCache returns a new LockedProject with fields from m. func lockedProjectFromCache(m *pb.LockedProject) (LockedProject, error) { var uv UnpairedVersion var err error if m.UnpairedVersion != nil { uv, err = unpairedVersionFromCache(m.UnpairedVersion) if err != nil { return lockedProject{}, err } } return lockedProject{ pi: ProjectIdentifier{ ProjectRoot: ProjectRoot(m.Root), Source: m.Source, }, v: uv, r: Revision(m.Revision), pkgs: m.Packages, }, nil } // cachePutLock stores the Lock as fields in the bolt.Bucket. func cachePutLock(b *bolt.Bucket, l Lock) error { // Input imports, if present. byt := []byte(strings.Join(l.InputImports(), "#")) if err := b.Put(cacheKeyInputImports, byt); err != nil { return errors.Wrap(err, "failed to put input imports") } // Projects if projects := l.Projects(); len(projects) > 0 { lb, err := b.CreateBucket(cacheKeyLock) if err != nil { return err } key := make(nuts.Key, nuts.KeyLen(uint64(len(projects)-1))) var msg pb.LockedProject var cMsg pb.Constraint for i, lp := range projects { copyLockedProjectTo(lp, &msg, &cMsg) v, err := proto.Marshal(&msg) if err != nil { return err } key.Put(uint64(i)) if err := lb.Put(key, v); err != nil { return err } } } return nil } // cacheGetLock returns a new *safeLock with the fields retrieved from the bolt.Bucket. func cacheGetLock(b *bolt.Bucket) (*safeLock, error) { l := &safeLock{} if ii := b.Get(cacheKeyInputImports); len(ii) > 0 { l.i = strings.Split(string(ii), "#") } if locked := b.Bucket(cacheKeyLock); locked != nil { var msg pb.LockedProject err := locked.ForEach(func(_, v []byte) error { if err := proto.Unmarshal(v, &msg); err != nil { return err } lp, err := lockedProjectFromCache(&msg) if err != nil { return err } l.p = append(l.p, lp) return nil }) if err != nil { return nil, errors.Wrap(err, "failed to get locked projects") } } return l, nil } // cachePutPackageOrError stores the pkgtree.PackageOrErr as fields in the bolt.Bucket. // Package.ImportPath is ignored. func cachePutPackageOrErr(b *bolt.Bucket, poe pkgtree.PackageOrErr) error { if poe.Err != nil { err := b.Put(cacheKeyError, []byte(poe.Err.Error())) return errors.Wrapf(err, "failed to put error: %v", poe.Err) } if len(poe.P.CommentPath) > 0 { err := b.Put(cacheKeyComment, []byte(poe.P.CommentPath)) if err != nil { return errors.Wrapf(err, "failed to put package: %v", poe.P) } } if len(poe.P.Imports) > 0 { ip, err := b.CreateBucket(cacheKeyImport) if err != nil { return err } key := make(nuts.Key, nuts.KeyLen(uint64(len(poe.P.Imports)-1))) for i := range poe.P.Imports { v := []byte(poe.P.Imports[i]) key.Put(uint64(i)) if err := ip.Put(key, v); err != nil { return err } } } if len(poe.P.Name) > 0 { err := b.Put(cacheKeyName, []byte(poe.P.Name)) if err != nil { return errors.Wrapf(err, "failed to put package: %v", poe.P) } } if len(poe.P.TestImports) > 0 { ip, err := b.CreateBucket(cacheKeyTestImport) if err != nil { return err } key := make(nuts.Key, nuts.KeyLen(uint64(len(poe.P.TestImports)-1))) for i := range poe.P.TestImports { v := []byte(poe.P.TestImports[i]) key.Put(uint64(i)) if err := ip.Put(key, v); err != nil { return err } } } return nil } // cacheGetPackageOrErr returns a new pkgtree.PackageOrErr with fields retrieved // from the bolt.Bucket. func cacheGetPackageOrErr(b *bolt.Bucket) (pkgtree.PackageOrErr, error) { if v := b.Get(cacheKeyError); len(v) > 0 { return pkgtree.PackageOrErr{ Err: errors.New(string(v)), }, nil } var p pkgtree.Package p.CommentPath = string(b.Get(cacheKeyComment)) if ip := b.Bucket(cacheKeyImport); ip != nil { err := ip.ForEach(func(_, v []byte) error { p.Imports = append(p.Imports, string(v)) return nil }) if err != nil { return pkgtree.PackageOrErr{}, err } } p.Name = string(b.Get(cacheKeyName)) if tip := b.Bucket(cacheKeyTestImport); tip != nil { err := tip.ForEach(func(_, v []byte) error { p.TestImports = append(p.TestImports, string(v)) return nil }) if err != nil { return pkgtree.PackageOrErr{}, err } } return pkgtree.PackageOrErr{P: p}, nil } // cacheTimestampedKey returns a prefixed key with a trailing timestamp. func cacheTimestampedKey(pre byte, t time.Time) []byte { b := make([]byte, 9) b[0] = pre binary.BigEndian.PutUint64(b[1:], uint64(t.Unix())) return b } // boltTxOrBucket is a minimal interface satisfied by bolt.Tx and bolt.Bucket. type boltTxOrBucket interface { Cursor() *bolt.Cursor DeleteBucket([]byte) error Bucket([]byte) *bolt.Bucket } // cachePrefixDelete prefix scans and deletes each bucket. func cachePrefixDelete(tob boltTxOrBucket, pre byte) error { c := tob.Cursor() for k, _ := c.Seek([]byte{pre}); len(k) > 0 && k[0] == pre; k, _ = c.Next() { if err := tob.DeleteBucket(k); err != nil { return errors.Wrapf(err, "failed to delete bucket: %s", k) } } return nil } // cacheFindLatestValid prefix scans for the latest bucket which is timestamped >= epoch, // or returns nil if none exists. func cacheFindLatestValid(tob boltTxOrBucket, pre byte, epoch int64) *bolt.Bucket { c := tob.Cursor() var latest []byte for k, _ := c.Seek([]byte{pre}); len(k) > 0 && k[0] == pre; k, _ = c.Next() { latest = k } if latest == nil { return nil } ts := latest[1:] if len(ts) != 8 { return nil } if int64(binary.BigEndian.Uint64(ts)) < epoch { return nil } return tob.Bucket(latest) }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/vcs_source_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "context" "io/ioutil" "log" "net/url" "os" "os/exec" "path/filepath" "reflect" "runtime" "strings" "sync" "testing" "github.com/golang/dep/internal/test" ) // Parent test that executes all the slow vcs interaction tests in parallel. func TestSlowVcs(t *testing.T) { t.Run("write-deptree", testWriteDepTree) t.Run("source-gateway", testSourceGateway) t.Run("bzr-repo", testBzrRepo) t.Run("bzr-source", testBzrSourceInteractions) t.Run("svn-repo", testSvnRepo) // TODO(sdboyer) svn-source t.Run("hg-repo", testHgRepo) t.Run("hg-source", testHgSourceInteractions) t.Run("git-repo", testGitRepo) t.Run("git-source", testGitSourceInteractions) t.Run("gopkgin-source", testGopkginSourceInteractions) } func testGitSourceInteractions(t *testing.T) { t.Parallel() // This test is slowish, skip it on -short if testing.Short() { t.Skip("Skipping git source version fetching test in short mode") } requiresBins(t, "git") cpath, err := ioutil.TempDir("", "smcache") if err != nil { t.Errorf("Failed to create temp dir: %s", err) } defer func() { if err := os.RemoveAll(cpath); err != nil { t.Errorf("removeAll failed: %s", err) } }() os.Mkdir(filepath.Join(cpath, "sources"), 0777) n := "github.com/sdboyer/gpkt" un := "https://" + n u, err := url.Parse(un) if err != nil { t.Fatalf("Error parsing URL %s: %s", un, err) } mb := maybeGitSource{ url: u, } ctx := context.Background() isrc, err := mb.try(ctx, cpath) if err != nil { t.Fatalf("Unexpected error while setting up gitSource for test repo: %s", err) } err = isrc.initLocal(ctx) if err != nil { t.Fatalf("Error on cloning git repo: %s", err) } src, ok := isrc.(*gitSource) if !ok { t.Fatalf("Expected a gitSource, got a %T", isrc) } if un != src.upstreamURL() { t.Errorf("Expected %s as source URL, got %s", un, src.upstreamURL()) } pvlist, err := src.listVersions(ctx) if err != nil { t.Fatalf("Unexpected error getting version pairs from git repo: %s", err) } vlist := hidePair(pvlist) // check that an expected rev is present is, err := src.revisionPresentIn(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")) if err != nil { t.Errorf("Unexpected error while checking revision presence: %s", err) } else if !is { t.Errorf("Revision that should exist was not present") } if len(vlist) != 7 { t.Errorf("git test repo should've produced seven versions, got %v: vlist was %s", len(vlist), vlist) } else { SortForUpgrade(vlist) evl := []Version{ NewVersion("v2.0.0").Pair(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")), NewVersion("v1.1.0").Pair(Revision("b2cb48dda625f6640b34d9ffb664533359ac8b91")), NewVersion("v1.0.0").Pair(Revision("bf85021c0405edbc4f3648b0603818d641674f72")), newDefaultBranch("master").Pair(Revision("bf85021c0405edbc4f3648b0603818d641674f72")), NewBranch("v1").Pair(Revision("e3777f683305eafca223aefe56b4e8ecf103f467")), NewBranch("v1.1").Pair(Revision("f1fbc520489a98306eb28c235204e39fa8a89c84")), NewBranch("v3").Pair(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")), } for k, e := range evl { if !vlist[k].identical(e) { t.Errorf("version list was not what we expected:\n\t(GOT): %#v\n\t(WNT): %#v", vlist, evl) break } } } // recheck that rev is present, this time interacting with cache differently is, err = src.revisionPresentIn(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e")) if err != nil { t.Errorf("Unexpected error while re-checking revision presence: %s", err) } else if !is { t.Errorf("Revision that should exist was not present on re-check") } } func testGopkginSourceInteractions(t *testing.T) { t.Parallel() // This test is slowish, skip it on -short if testing.Short() { t.Skip("Skipping gopkg.in source version fetching test in short mode") } requiresBins(t, "git") cpath, err := ioutil.TempDir("", "smcache") if err != nil { t.Errorf("Failed to create temp dir: %s", err) } defer func() { if err := os.RemoveAll(cpath); err != nil { t.Errorf("removeAll failed: %s", err) } }() os.Mkdir(filepath.Join(cpath, "sources"), 0777) tfunc := func(opath, n string, major uint64, evl []Version) { un := "https://" + opath u, err := url.Parse("https://" + n) if err != nil { t.Errorf("URL was bad, lolwut? errtext: %s", err) return } unstable := strings.HasSuffix(opath, gopkgUnstableSuffix) mb := maybeGopkginSource{ opath: opath, url: u, major: major, unstable: unstable, } ctx := context.Background() isrc, err := mb.try(ctx, cpath) if err != nil { t.Errorf("Unexpected error while setting up gopkginSource for test repo: %s", err) return } err = isrc.initLocal(ctx) if err != nil { t.Fatalf("Error on cloning git repo: %s", err) } src, ok := isrc.(*gopkginSource) if !ok { t.Errorf("Expected a gopkginSource, got a %T", isrc) return } if un != src.upstreamURL() { t.Errorf("Expected %s as source URL, got %s", un, src.upstreamURL()) } if src.major != major { t.Errorf("Expected %v as major version filter on gopkginSource, got %v", major, src.major) } // check that an expected rev is present rev := evl[0].(PairedVersion).Revision() is, err := src.revisionPresentIn(rev) if err != nil { t.Errorf("Unexpected error while checking revision presence: %s", err) } else if !is { t.Errorf("Revision %s that should exist was not present", rev) } pvlist, err := src.listVersions(ctx) if err != nil { t.Errorf("Unexpected error getting version pairs from hg repo: %s", err) } vlist := hidePair(pvlist) if len(vlist) != len(evl) { t.Errorf("gopkgin test repo (%s) should've produced %v versions, got %v.\n%v", un, len(evl), len(vlist), vlist) } else { SortForUpgrade(vlist) if !reflect.DeepEqual(vlist, evl) { t.Errorf("Version list for %s was not what we expected:\n\t(GOT): %#v\n\t(WNT): %#v", un, vlist, evl) } } // Run again, this time to ensure cache outputs correctly pvlist, err = src.listVersions(ctx) if err != nil { t.Errorf("Unexpected error getting version pairs from hg repo: %s", err) } vlist = hidePair(pvlist) if len(vlist) != len(evl) { t.Errorf("gopkgin test repo should've produced %v versions, got %v", len(evl), len(vlist)) } else { SortForUpgrade(vlist) if !reflect.DeepEqual(vlist, evl) { t.Errorf("Version list for %s was not what we expected:\n\t(GOT): %#v\n\t(WNT): %#v", un, vlist, evl) } } // recheck that rev is present, this time interacting with cache differently is, err = src.revisionPresentIn(rev) if err != nil { t.Errorf("Unexpected error while re-checking revision presence: %s", err) } else if !is { t.Errorf("Revision that should exist was not present on re-check") } } // simultaneously run for v1, v2, and v3 filters of the target repo wg := &sync.WaitGroup{} wg.Add(6) go func() { // Treat master as v0 when no other branches/tags exist that match gopkg.in's rules tfunc("gopkg.in/carolynvs/deptest-gopkgin-implicit-v0.v0", "github.com/carolynvs/deptest-gopkgin-implicit-v0", 0, []Version{ newDefaultBranch("notmaster").Pair(Revision("94ee631b9833cd805d15f50a52e0533124ec0292")), }) wg.Done() }() go func() { // Use the existing v0 branch for v0, not master tfunc("gopkg.in/carolynvs/deptest-gopkgin-explicit-v0.v0", "github.com/carolynvs/deptest-gopkgin-explicit-v0", 0, []Version{ newDefaultBranch("v0").Pair(Revision("ec73e84554fb28f08dba630e48dbec868e77f734")), }) wg.Done() }() go func() { tfunc("gopkg.in/sdboyer/gpkt.v1", "github.com/sdboyer/gpkt", 1, []Version{ NewVersion("v1.1.0").Pair(Revision("b2cb48dda625f6640b34d9ffb664533359ac8b91")), NewVersion("v1.0.0").Pair(Revision("bf85021c0405edbc4f3648b0603818d641674f72")), newDefaultBranch("v1.1").Pair(Revision("f1fbc520489a98306eb28c235204e39fa8a89c84")), NewBranch("v1").Pair(Revision("e3777f683305eafca223aefe56b4e8ecf103f467")), }) wg.Done() }() go func() { tfunc("gopkg.in/sdboyer/gpkt.v2", "github.com/sdboyer/gpkt", 2, []Version{ NewVersion("v2.0.0").Pair(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")), }) wg.Done() }() go func() { tfunc("gopkg.in/sdboyer/gpkt.v3", "github.com/sdboyer/gpkt", 3, []Version{ newDefaultBranch("v3").Pair(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")), }) wg.Done() }() go func() { tfunc("github.com/sdboyer/gpkt2.v1-unstable", "github.com/sdboyer/gpkt2", 1, []Version{ newDefaultBranch("v1-unstable").Pair(Revision("24de0be8f4a0b8a44321562117749b257bfcef69")), }) wg.Done() }() wg.Wait() } func testBzrSourceInteractions(t *testing.T) { t.Parallel() // This test is quite slow (ugh bzr), so skip it on -short if testing.Short() { t.Skip("Skipping bzr source version fetching test in short mode") } if runtime.GOOS == "windows" { // TODO bzr on Windows is sometimes weirdly reporting different // "revision-id" (with mention of git), and it's breaking tests. Maybe // this also breaks our model of bzr on Windows; maybe it breaks our // model of bzr in general. But use of bzr is rare and dwindling, so for // now it's least harmful to turn off the test on Windows, as the // alternative is a DEEP dive and possible refactor. t.Skip("TODO: Windows bzr reporting of underlying object ids is confusing") } requiresBins(t, "bzr") cpath, err := ioutil.TempDir("", "smcache") if err != nil { t.Errorf("Failed to create temp dir: %s", err) } defer func() { if err := os.RemoveAll(cpath); err != nil { t.Errorf("removeAll failed: %s", err) } }() n := "launchpad.net/govcstestbzrrepo" un := "https://" + n u, err := url.Parse(un) if err != nil { t.Fatalf("Error parsing URL %s: %s", un, err) } mb := maybeBzrSource{ url: u, } ctx := context.Background() isrc, err := mb.try(ctx, cpath) if err != nil { t.Fatalf("Unexpected error while setting up bzrSource for test repo: %s", err) } err = isrc.initLocal(ctx) if err != nil { t.Fatalf("Error on cloning bzr repo: %s", err) } src, ok := isrc.(*bzrSource) if !ok { t.Fatalf("Expected a bzrSource, got a %T", isrc) } if un != src.upstreamURL() { t.Errorf("Expected %s as source URL, got %s", un, src.upstreamURL()) } evl := []Version{ NewVersion("1.0.0").Pair(Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68")), newDefaultBranch("(default)").Pair(Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68")), } // check that an expected rev is present is, err := src.revisionPresentIn(Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68")) if err != nil { t.Errorf("Unexpected error while checking revision presence: %s", err) } else if !is { t.Errorf("Revision that should exist was not present") } pvlist, err := src.listVersions(ctx) if err != nil { t.Errorf("Unexpected error getting version pairs from bzr repo: %s", err) } vlist := hidePair(pvlist) if len(vlist) != 2 { t.Errorf("bzr test repo should've produced two versions, got %v", len(vlist)) } else { SortForUpgrade(vlist) if !reflect.DeepEqual(vlist, evl) { t.Errorf("bzr version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl) } } // Run again, this time to ensure cache outputs correctly pvlist, err = src.listVersions(ctx) if err != nil { t.Errorf("Unexpected error getting version pairs from bzr repo: %s", err) } vlist = hidePair(pvlist) if len(vlist) != 2 { t.Errorf("bzr test repo should've produced two versions, got %v", len(vlist)) } else { SortForUpgrade(vlist) if !reflect.DeepEqual(vlist, evl) { t.Errorf("bzr version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl) } } // recheck that rev is present, this time interacting with cache differently is, err = src.revisionPresentIn(Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68")) if err != nil { t.Errorf("Unexpected error while re-checking revision presence: %s", err) } else if !is { t.Errorf("Revision that should exist was not present on re-check") } } func testHgSourceInteractions(t *testing.T) { t.Parallel() // This test is slow, so skip it on -short if testing.Short() { t.Skip("Skipping hg source version fetching test in short mode") } requiresBins(t, "hg") cpath, err := ioutil.TempDir("", "smcache") if err != nil { t.Errorf("Failed to create temp dir: %s", err) } defer func() { if err := os.RemoveAll(cpath); err != nil { t.Errorf("removeAll failed: %s", err) } }() tfunc := func(n string, evl []Version) { un := "https://" + n u, err := url.Parse(un) if err != nil { t.Errorf("URL was bad, lolwut? errtext: %s", err) return } mb := maybeHgSource{ url: u, } ctx := context.Background() isrc, err := mb.try(ctx, cpath) if err != nil { t.Errorf("Unexpected error while setting up hgSource for test repo: %s", err) return } err = isrc.initLocal(ctx) if err != nil { t.Errorf("Error on cloning hg repo: %s", err) return } src, ok := isrc.(*hgSource) if !ok { t.Errorf("Expected a hgSource, got a %T", isrc) return } if un != src.upstreamURL() { t.Errorf("Expected %s as source URL, got %s", un, src.upstreamURL()) } // check that an expected rev is present is, err := src.revisionPresentIn(Revision("103d1bddef2199c80aad7c42041223083d613ef9")) if err != nil { t.Errorf("Unexpected error while checking revision presence: %s", err) } else if !is { t.Errorf("Revision that should exist was not present") } pvlist, err := src.listVersions(ctx) if err != nil { t.Errorf("Unexpected error getting version pairs from hg repo: %s", err) } vlist := hidePair(pvlist) if len(vlist) != len(evl) { t.Errorf("hg test repo should've produced %v versions, got %v", len(evl), len(vlist)) } else { SortForUpgrade(vlist) for k, e := range evl { if !vlist[k].identical(e) { t.Errorf("version list was not what we expected:\n\t(GOT): %#v\n\t(WNT): %#v", vlist, evl) break } } } // Run again, this time to ensure cache outputs correctly pvlist, err = src.listVersions(ctx) if err != nil { t.Errorf("Unexpected error getting version pairs from hg repo: %s", err) } vlist = hidePair(pvlist) if len(vlist) != len(evl) { t.Errorf("hg test repo should've produced %v versions, got %v", len(evl), len(vlist)) } else { SortForUpgrade(vlist) for k, e := range evl { if !vlist[k].identical(e) { t.Errorf("version list was not what we expected:\n\t(GOT): %#v\n\t(WNT): %#v", vlist, evl) break } } } // recheck that rev is present, this time interacting with cache differently is, err = src.revisionPresentIn(Revision("103d1bddef2199c80aad7c42041223083d613ef9")) if err != nil { t.Errorf("Unexpected error while re-checking revision presence: %s", err) } else if !is { t.Errorf("Revision that should exist was not present on re-check") } } // simultaneously run for both the repo with and without the magic bookmark donech := make(chan struct{}) go func() { tfunc("bitbucket.org/sdboyer/withbm", []Version{ NewVersion("v1.0.0").Pair(Revision("aa110802a0c64195d0a6c375c9f66668827c90b4")), newDefaultBranch("@").Pair(Revision("b10d05d581e5401f383e48ccfeb84b48fde99d06")), NewBranch("another").Pair(Revision("b10d05d581e5401f383e48ccfeb84b48fde99d06")), NewBranch("default").Pair(Revision("3d466f437f6616da594bbab6446cc1cb4328d1bb")), NewBranch("newbranch").Pair(Revision("5e2a01be9aee942098e44590ae545c7143da9675")), }) close(donech) }() tfunc("bitbucket.org/sdboyer/nobm", []Version{ NewVersion("v1.0.0").Pair(Revision("aa110802a0c64195d0a6c375c9f66668827c90b4")), newDefaultBranch("default").Pair(Revision("3d466f437f6616da594bbab6446cc1cb4328d1bb")), NewBranch("another").Pair(Revision("b10d05d581e5401f383e48ccfeb84b48fde99d06")), NewBranch("newbranch").Pair(Revision("5e2a01be9aee942098e44590ae545c7143da9675")), }) <-donech } func TestGitSourceListVersionsNoHEAD(t *testing.T) { // t.Parallel() requiresBins(t, "git") h := test.NewHelper(t) defer h.Cleanup() h.TempDir("smcache") cpath := h.Path("smcache") os.Mkdir(filepath.Join(cpath, "sources"), 0777) h.TempDir("repo") repoPath := h.Path("repo") // Create test repo with a single commit on the master branch h.RunGit(repoPath, "init") h.RunGit(repoPath, "config", "--local", "user.email", "test@example.com") h.RunGit(repoPath, "config", "--local", "user.name", "Test author") h.RunGit(repoPath, "commit", "--allow-empty", `--message="Initial commit"`) // Make HEAD point at a nonexistent branch (deleting it is not allowed) // The `git ls-remote` that listVersions() calls will not return a HEAD ref // because it points at a nonexistent branch h.RunGit(repoPath, "symbolic-ref", "HEAD", "refs/heads/nonexistent") un := "file://" + filepath.ToSlash(repoPath) u, err := url.Parse(un) if err != nil { t.Fatalf("Error parsing URL %s: %s", un, err) } mb := maybeGitSource{u} ctx := context.Background() isrc, err := mb.try(ctx, cpath) if err != nil { t.Fatalf("Unexpected error while setting up gitSource for test repo: %s", err) } err = isrc.initLocal(ctx) if err != nil { t.Fatalf("Error on cloning git repo: %s", err) } src, ok := isrc.(*gitSource) if !ok { t.Fatalf("Expected a gitSource, got a %T", isrc) } pvlist, err := src.listVersions(ctx) if err != nil { t.Fatalf("Unexpected error getting version pairs from git repo: %s", err) } if len(pvlist) != 1 { t.Errorf("Unexpected version pair length:\n\t(GOT): %d\n\t(WNT): %d", len(pvlist), 1) } } func TestGitSourceListVersionsNoDupes(t *testing.T) { // t.Parallel() // This test is slowish, skip it on -short if testing.Short() { t.Skip("Skipping git source version fetching test in short mode") } requiresBins(t, "git") cpath, err := ioutil.TempDir("", "smcache") if err != nil { t.Errorf("Failed to create temp dir: %s", err) } defer func() { if err := os.RemoveAll(cpath); err != nil { t.Errorf("removeAll failed: %s", err) } }() os.Mkdir(filepath.Join(cpath, "sources"), 0777) n := "github.com/carolynvs/deptest-importers" un := "https://" + n u, err := url.Parse(un) if err != nil { t.Fatalf("Error parsing URL %s: %s", un, err) } mb := maybeGitSource{ url: u, } ctx := context.Background() src, err := mb.try(ctx, cpath) if err != nil { t.Fatalf("Unexpected error while setting up gitSource for test repo: %s", err) } err = src.initLocal(ctx) if err != nil { t.Fatalf("Error on cloning git repo: %s", err) } pvlist, err := src.listVersions(ctx) if err != nil { t.Fatalf("Unexpected error getting version pairs from git repo: %s", err) } for i := range pvlist { pv1 := pvlist[i] uv1 := pv1.Unpair() for j := range pvlist { if i == j { continue } pv2 := pvlist[j] uv2 := pv2.Unpair() if uv1 == uv2 { t.Errorf("duplicate version pair mapping from %#v to both %q and %q", uv1, pv1.Revision(), pv2.Revision()) } } } } func TestGitSourceAdaptiveCleanup(t *testing.T) { // t.Parallel() // This test is slowish, skip it on -short if testing.Short() { t.Skip("Skipping git adaptive failure recovery test in short mode") } requiresBins(t, "git") cpath, err := ioutil.TempDir("", "smcache") if err != nil { t.Fatalf("Failed to create temp dir: %s", err) } var sm *SourceMgr mkSM := func() { // If sm is already set, make sure it's released, then create a new one. if sm != nil { sm.Release() } var err error sm, err = NewSourceManager(SourceManagerConfig{ Cachedir: cpath, Logger: log.New(test.Writer{TB: t}, "", 0), }) if err != nil { t.Fatalf("Unexpected error on SourceManager creation: %s", err) } } mkSM() id := mkPI("github.com/sdboyer/gpkt") err = sm.SyncSourceFor(id) if err != nil { t.Fatal(err) } repodir := filepath.Join(sm.cachedir, "sources", "https---github.com-sdboyer-gpkt") if _, err := os.Stat(repodir); err != nil { if os.IsNotExist(err) { t.Fatalf("expected location for repodir did not exist: %q", repodir) } else { t.Fatal(err) } } // Create a file that git will see as untracked. untrackedPath := filepath.Join(repodir, "untrackedfile") err = ioutil.WriteFile(untrackedPath, []byte("foo"), 0644) if err != nil { t.Fatal(err) } mkSM() err = sm.SyncSourceFor(id) if err != nil { t.Fatalf("choked after adding dummy file: %q", err) } if _, err := os.Stat(untrackedPath); err == nil { t.Fatal("untracked file still existed after cleanup should've been triggered") } // Remove a file that we know exists, which `git status` checks should catch. readmePath := filepath.Join(repodir, "README.md") os.Remove(readmePath) mkSM() err = sm.SyncSourceFor(id) if err != nil { t.Fatalf("choked after removing known file: %q", err) } if _, err := os.Stat(readmePath); err != nil { t.Fatal("README was still absent after cleanup should've been triggered") } // Remove .git/objects directory, which should make git bite it. err = os.RemoveAll(filepath.Join(repodir, ".git", "objects")) if err != nil { t.Fatal(err) } mkSM() err = sm.SyncSourceFor(id) if err != nil { t.Fatalf("choked after removing .git/objects directory: %q", err) } sm.Release() os.RemoveAll(cpath) } func Test_bzrSource_exportRevisionTo_removeVcsFiles(t *testing.T) { t.Parallel() // This test is slow, so skip it on -short if testing.Short() { t.Skip("Skipping hg source version fetching test in short mode") } if runtime.GOOS == "windows" { // TODO see todo in TestBzrSourceInteractions t.Skip("TODO: Windows bzr reporting of underlying object ids is confusing") } requiresBins(t, "bzr") h := test.NewHelper(t) defer h.Cleanup() h.TempDir("smcache") cpath := h.Path("smcache") repoPath := filepath.Join(h.Path("."), "repo") rev := Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68") n := "launchpad.net/govcstestbzrrepo" un := "https://" + n u, err := url.Parse(un) if err != nil { t.Errorf("URL was bad, lolwut? errtext: %s", err) return } mb := maybeBzrSource{u} ctx := context.Background() isrc, err := mb.try(ctx, cpath) if err != nil { t.Fatalf("unexpected error while setting up hgSource for test repo: %s", err) } err = isrc.initLocal(ctx) if err != nil { t.Fatalf("Error on cloning bzr repo: %s", err) } src, ok := isrc.(*bzrSource) if !ok { t.Fatalf("expected a bzrSource, got a %T", isrc) } if err := src.exportRevisionTo(ctx, rev, repoPath); err != nil { t.Fatalf("unexpected error: %v", err) } _, err = os.Stat(filepath.Join(repoPath, ".bzr")) if err == nil { t.Fatal("expected .bzr/ to not exists") } else if !os.IsNotExist(err) { t.Fatalf("unexpected error: %v", err) } } func Test_hgSource_exportRevisionTo_removeVcsFiles(t *testing.T) { t.Parallel() // This test is slow, so skip it on -short if testing.Short() { t.Skip("Skipping hg source version fetching test in short mode") } requiresBins(t, "hg") h := test.NewHelper(t) defer h.Cleanup() h.TempDir("smcache") cpath := h.Path("smcache") repoPath := filepath.Join(h.Path("."), "repo") rev := Revision("6f55e1f03d91f8a7cce35d1968eb60a2352e4d59") n := "bitbucket.org/golang-dep/dep-test" un := "https://" + n u, err := url.Parse(un) if err != nil { t.Errorf("URL was bad, lolwut? errtext: %s", err) return } mb := maybeHgSource{u} ctx := context.Background() isrc, err := mb.try(ctx, cpath) if err != nil { t.Fatalf("unexpected error while setting up hgSource for test repo: %s", err) } err = isrc.initLocal(ctx) if err != nil { t.Fatalf("Error on cloning hg repo: %s", err) } src, ok := isrc.(*hgSource) if !ok { t.Fatalf("expected a hgSource, got a %T", isrc) } if err := src.exportRevisionTo(ctx, rev, repoPath); err != nil { t.Fatalf("unexpected error: %v", err) } _, err = os.Stat(filepath.Join(repoPath, ".hg")) if err == nil { t.Fatal("expected .hg/ to not exists") } else if !os.IsNotExist(err) { t.Fatalf("unexpected error: %v", err) } } // Fail a test if the specified binaries aren't installed. func requiresBins(t *testing.T, bins ...string) { for _, b := range bins { _, err := exec.LookPath(b) if err != nil { t.Fatalf("%s is not installed", b) } } }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/source_cache_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "io/ioutil" "log" "path" "reflect" "sort" "testing" "time" "github.com/golang/dep/gps/pkgtree" "github.com/golang/dep/internal/test" "github.com/pkg/errors" ) func Test_singleSourceCache(t *testing.T) { newMem := func(*testing.T, string) sourceCache { return memoryCache{} } t.Run("mem", singleSourceCacheTest{newCache: newMem}.run) epoch := time.Now().Unix() newBolt := func(t *testing.T, cachedir string) sourceCache { bc, err := newBoltCache(cachedir, epoch, log.New(test.Writer{TB: t}, "", 0)) if err != nil { t.Fatal(err) } return bc } t.Run("bolt/keepOpen", singleSourceCacheTest{newCache: newBolt}.run) t.Run("bolt/reOpen", singleSourceCacheTest{newCache: newBolt, persistent: true}.run) newMulti := func(t *testing.T, cachedir string) sourceCache { bc, err := newBoltCache(cachedir, epoch, log.New(test.Writer{TB: t}, "", 0)) if err != nil { t.Fatal(err) } return newMultiCache(memoryCache{}, bc) } t.Run("multi/keepOpen", singleSourceCacheTest{newCache: newMulti}.run) t.Run("multi/reOpen", singleSourceCacheTest{persistent: true, newCache: newMulti}.run) t.Run("multi/keepOpen/noDisk", singleSourceCacheTest{ newCache: func(*testing.T, string) sourceCache { return newMultiCache(memoryCache{}, discardCache{}) }, }.run) t.Run("multi/reOpen/noMem", singleSourceCacheTest{ persistent: true, newCache: func(t *testing.T, cachedir string) sourceCache { bc, err := newBoltCache(cachedir, epoch, log.New(test.Writer{TB: t}, "", 0)) if err != nil { t.Fatal(err) } return newMultiCache(discardCache{}, bc) }, }.run) } var testAnalyzerInfo = ProjectAnalyzerInfo{ Name: "test-analyzer", Version: 1, } type singleSourceCacheTest struct { newCache func(*testing.T, string) sourceCache persistent bool } // run tests singleSourceCache methods of caches returned by test.newCache. // For test.persistent caches, test.newCache is periodically called mid-test to ensure persistence. func (test singleSourceCacheTest) run(t *testing.T) { const root = "example.com/test" pi := mkPI(root).normalize() cpath, err := ioutil.TempDir("", "singlesourcecache") if err != nil { t.Fatalf("Failed to create temp cache dir: %s", err) } t.Run("info", func(t *testing.T) { const rev Revision = "revision" sc := test.newCache(t, cpath) c := sc.newSingleSourceCache(pi) defer func() { if err := sc.close(); err != nil { t.Fatal("failed to close cache:", err) } }() var m Manifest = &simpleRootManifest{ c: ProjectConstraints{ ProjectRoot("foo"): ProjectProperties{ Constraint: Any(), }, ProjectRoot("bar"): ProjectProperties{ Source: "whatever", Constraint: testSemverConstraint(t, "> 1.3"), }, }, ovr: ProjectConstraints{ ProjectRoot("b"): ProjectProperties{ Constraint: testSemverConstraint(t, "2.0.0"), }, }, req: map[string]bool{ "c": true, "d": true, }, ig: pkgtree.NewIgnoredRuleset([]string{"a", "b"}), } var l Lock = &safeLock{ p: []LockedProject{ NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0").Pair("anything"), []string{"gps"}), NewLockedProject(mkPI("github.com/sdboyer/gps2"), NewVersion("v0.10.0").Pair("whatever"), nil), NewLockedProject(mkPI("github.com/sdboyer/gps3"), NewVersion("v0.10.0").Pair("again"), []string{"gps", "flugle"}), NewLockedProject(mkPI("foo"), NewVersion("nada").Pair("itsaliving"), []string{"foo"}), NewLockedProject(mkPI("github.com/sdboyer/gps4"), NewVersion("v0.10.0").Pair("meow"), []string{"flugle", "gps"}), }, } c.setManifestAndLock(rev, testAnalyzerInfo, m, l) if test.persistent { if err := sc.close(); err != nil { t.Fatal("failed to close cache:", err) } sc = test.newCache(t, cpath) c = sc.newSingleSourceCache(pi) } gotM, gotL, ok := c.getManifestAndLock(rev, testAnalyzerInfo) if !ok { t.Error("no manifest and lock found for revision") } compareManifests(t, m, gotM) // TODO(sdboyer) use DiffLocks after refactoring to avoid import cycles if !locksAreEq(l, gotL) { t.Errorf("locks are different:\n\t(GOT): %s\n\t(WNT): %s", l, gotL) } m = &simpleRootManifest{ c: ProjectConstraints{ ProjectRoot("foo"): ProjectProperties{ Source: "whatever", Constraint: Any(), }, }, ovr: ProjectConstraints{ ProjectRoot("bar"): ProjectProperties{ Constraint: testSemverConstraint(t, "2.0.0"), }, }, req: map[string]bool{ "a": true, "b": true, }, ig: pkgtree.NewIgnoredRuleset([]string{"c", "d"}), } l = &safeLock{ p: []LockedProject{ NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0").Pair("278a227dfc3d595a33a77ff3f841fd8ca1bc8cd0"), []string{"gps"}), NewLockedProject(mkPI("github.com/sdboyer/gps2"), NewVersion("v0.11.0").Pair("anything"), []string{"gps"}), NewLockedProject(mkPI("github.com/sdboyer/gps3"), Revision("278a227dfc3d595a33a77ff3f841fd8ca1bc8cd0"), []string{"gps"}), }, } c.setManifestAndLock(rev, testAnalyzerInfo, m, l) if test.persistent { if err := sc.close(); err != nil { t.Fatal("failed to close cache:", err) } sc = test.newCache(t, cpath) c = sc.newSingleSourceCache(pi) } gotM, gotL, ok = c.getManifestAndLock(rev, testAnalyzerInfo) if !ok { t.Error("no manifest and lock found for revision") } compareManifests(t, m, gotM) // TODO(sdboyer) use DiffLocks after refactoring to avoid import cycles if !locksAreEq(l, gotL) { t.Errorf("locks are different:\n\t(GOT): %s\n\t(WNT): %s", l, gotL) } }) t.Run("pkgTree", func(t *testing.T) { sc := test.newCache(t, cpath) c := sc.newSingleSourceCache(pi) defer func() { if err := sc.close(); err != nil { t.Fatal("failed to close cache:", err) } }() const rev Revision = "rev_adsfjkl" if got, ok := c.getPackageTree(rev, root); ok { t.Fatalf("unexpected result before setting package tree: %v", got) } if test.persistent { if err := sc.close(); err != nil { t.Fatal("failed to close cache:", err) } sc = test.newCache(t, cpath) c = sc.newSingleSourceCache(pi) } pt := pkgtree.PackageTree{ ImportRoot: root, Packages: map[string]pkgtree.PackageOrErr{ root: { P: pkgtree.Package{ ImportPath: root, CommentPath: "comment", Name: "test", Imports: []string{ "sort", }, }, }, path.Join(root, "simple"): { P: pkgtree.Package{ ImportPath: path.Join(root, "simple"), CommentPath: "comment", Name: "simple", Imports: []string{ "github.com/golang/dep/gps", "sort", }, }, }, path.Join(root, "m1p"): { P: pkgtree.Package{ ImportPath: path.Join(root, "m1p"), CommentPath: "", Name: "m1p", Imports: []string{ "github.com/golang/dep/gps", "os", "sort", }, }, }, }, } c.setPackageTree(rev, pt) if test.persistent { if err := sc.close(); err != nil { t.Fatal("failed to close cache:", err) } sc = test.newCache(t, cpath) c = sc.newSingleSourceCache(pi) } got, ok := c.getPackageTree(rev, root) if !ok { t.Errorf("no package tree found:\n\t(WNT): %#v", pt) } comparePackageTree(t, pt, got) if test.persistent { if err := sc.close(); err != nil { t.Fatal("failed to close cache:", err) } sc = test.newCache(t, cpath) c = sc.newSingleSourceCache(pi) } pt = pkgtree.PackageTree{ ImportRoot: root, Packages: map[string]pkgtree.PackageOrErr{ path.Join(root, "test"): { Err: errors.New("error"), }, }, } c.setPackageTree(rev, pt) if test.persistent { if err := sc.close(); err != nil { t.Fatal("failed to close cache:", err) } sc = test.newCache(t, cpath) c = sc.newSingleSourceCache(pi) } got, ok = c.getPackageTree(rev, root) if !ok { t.Errorf("no package tree found:\n\t(WNT): %#v", pt) } comparePackageTree(t, pt, got) }) t.Run("versions", func(t *testing.T) { sc := test.newCache(t, cpath) c := sc.newSingleSourceCache(pi) defer func() { if err := sc.close(); err != nil { t.Fatal("failed to close cache:", err) } }() const rev1, rev2 = "rev1", "rev2" const br, ver = "branch_name", "2.10" versions := []PairedVersion{ NewBranch(br).Pair(rev1), NewVersion(ver).Pair(rev2), } SortPairedForDowngrade(versions) c.setVersionMap(versions) if test.persistent { if err := sc.close(); err != nil { t.Fatal("failed to close cache:", err) } sc = test.newCache(t, cpath) c = sc.newSingleSourceCache(pi) } t.Run("getAllVersions", func(t *testing.T) { got, ok := c.getAllVersions() if !ok || len(got) != len(versions) { t.Errorf("unexpected versions:\n\t(GOT): %#v\n\t(WNT): %#v", got, versions) } else { SortPairedForDowngrade(got) for i := range versions { if !versions[i].identical(got[i]) { t.Errorf("unexpected versions:\n\t(GOT): %#v\n\t(WNT): %#v", got, versions) break } } } }) revToUV := map[Revision]UnpairedVersion{ rev1: NewBranch(br), rev2: NewVersion(ver), } t.Run("getVersionsFor", func(t *testing.T) { for rev, want := range revToUV { rev, want := rev, want t.Run(string(rev), func(t *testing.T) { uvs, ok := c.getVersionsFor(rev) if !ok { t.Errorf("no version found:\n\t(WNT) %#v", want) } else if len(uvs) != 1 { t.Errorf("expected one result but got %d", len(uvs)) } else { uv := uvs[0] if uv.Type() != want.Type() { t.Errorf("expected version type %d but got %d", want.Type(), uv.Type()) } if uv.String() != want.String() { t.Errorf("expected version %q but got %q", want.String(), uv.String()) } } }) } }) t.Run("getRevisionFor", func(t *testing.T) { for want, uv := range revToUV { want, uv := want, uv t.Run(uv.String(), func(t *testing.T) { rev, ok := c.getRevisionFor(uv) if !ok { t.Errorf("expected revision %q but got none", want) } else if rev != want { t.Errorf("expected revision %q but got %q", want, rev) } }) } }) t.Run("toRevision", func(t *testing.T) { for want, uv := range revToUV { want, uv := want, uv t.Run(uv.String(), func(t *testing.T) { rev, ok := c.toRevision(uv) if !ok { t.Errorf("expected revision %q but got none", want) } else if rev != want { t.Errorf("expected revision %q but got %q", want, rev) } }) } }) t.Run("toUnpaired", func(t *testing.T) { for rev, want := range revToUV { rev, want := rev, want t.Run(want.String(), func(t *testing.T) { uv, ok := c.toUnpaired(rev) if !ok { t.Errorf("no UnpairedVersion found:\n\t(WNT): %#v", uv) } else if !uv.identical(want) { t.Errorf("unexpected UnpairedVersion:\n\t(GOT): %#v\n\t(WNT): %#v", uv, want) } }) } }) }) } // compareManifests compares two manifests and reports differences as test errors. func compareManifests(t *testing.T, want, got Manifest) { if (want == nil || got == nil) && (got != nil || want != nil) { t.Errorf("one manifest is nil:\n\t(GOT): %#v\n\t(WNT): %#v", got, want) return } { want, got := want.DependencyConstraints(), got.DependencyConstraints() if !projectConstraintsEqual(want, got) { t.Errorf("unexpected constraints:\n\t(GOT): %#v\n\t(WNT): %#v", got, want) } } wantRM, wantOK := want.(RootManifest) gotRM, gotOK := got.(RootManifest) if wantOK && !gotOK { t.Errorf("expected RootManifest:\n\t(GOT): %#v", got) return } if gotOK && !wantOK { t.Errorf("didn't expected RootManifest:\n\t(GOT): %#v", got) return } { want, got := wantRM.IgnoredPackages(), gotRM.IgnoredPackages() if !reflect.DeepEqual(want.ToSlice(), got.ToSlice()) { t.Errorf("unexpected ignored packages:\n\t(GOT): %#v\n\t(WNT): %#v", got, want) } } { want, got := wantRM.Overrides(), gotRM.Overrides() if !projectConstraintsEqual(want, got) { t.Errorf("unexpected overrides:\n\t(GOT): %#v\n\t(WNT): %#v", got, want) } } { want, got := wantRM.RequiredPackages(), gotRM.RequiredPackages() if !mapStringBoolEqual(want, got) { t.Errorf("unexpected required packages:\n\t(GOT): %#v\n\t(WNT): %#v", got, want) } } } // comparePackageTree compares two pkgtree.PackageTree and reports differences as test errors. func comparePackageTree(t *testing.T, want, got pkgtree.PackageTree) { if got.ImportRoot != want.ImportRoot { t.Errorf("expected package tree root %q but got %q", want.ImportRoot, got.ImportRoot) } { want, got := want.Packages, got.Packages if len(want) != len(got) { t.Errorf("unexpected packages:\n\t(GOT): %#v\n\t(WNT): %#v", got, want) } else { for k, v := range want { if v2, ok := got[k]; !ok { t.Errorf("key %s: expected %v but got none", k, v) } else if !packageOrErrEqual(v, v2) { t.Errorf("key %s: expected %v but got %v", k, v, v2) } } } } } func projectConstraintsEqual(want, got ProjectConstraints) bool { loop, check := want, got if len(got) > len(want) { loop, check = got, want } for pr, pp := range loop { pp2, ok := check[pr] if !ok { return false } if pp.Source != pp2.Source { return false } if pp.Constraint == nil || pp2.Constraint == nil { if pp.Constraint != nil || pp2.Constraint != nil { return false } } else if !pp.Constraint.identical(pp2.Constraint) { return false } } return true } func mapStringBoolEqual(exp, got map[string]bool) bool { loop, check := exp, got if len(got) > len(exp) { loop, check = got, exp } for k, v := range loop { v2, ok := check[k] if !ok || v != v2 { return false } } return true } func safeError(err error) string { if err == nil { return "" } return err.Error() } // packageOrErrEqual return true if the pkgtree.PackageOrErrs are equal. Error equality is // string based. Imports and TestImports are treated as sets, and will be sorted. func packageOrErrEqual(a, b pkgtree.PackageOrErr) bool { if safeError(a.Err) != safeError(b.Err) { return false } if a.P.Name != b.P.Name { return false } if a.P.ImportPath != b.P.ImportPath { return false } if a.P.CommentPath != b.P.CommentPath { return false } if len(a.P.Imports) != len(b.P.Imports) { return false } sort.Strings(a.P.Imports) sort.Strings(b.P.Imports) for i := range a.P.Imports { if a.P.Imports[i] != b.P.Imports[i] { return false } } if len(a.P.TestImports) != len(b.P.TestImports) { return false } sort.Strings(a.P.TestImports) sort.Strings(b.P.TestImports) for i := range a.P.TestImports { if a.P.TestImports[i] != b.P.TestImports[i] { return false } } return true } // discardCache produces singleSourceDiscardCaches. type discardCache struct{} func (discardCache) newSingleSourceCache(ProjectIdentifier) singleSourceCache { return discard } func (discardCache) close() error { return nil } var discard singleSourceCache = singleSourceDiscardCache{} // singleSourceDiscardCache discards set values and returns nothing. type singleSourceDiscardCache struct{} func (singleSourceDiscardCache) setManifestAndLock(Revision, ProjectAnalyzerInfo, Manifest, Lock) {} func (singleSourceDiscardCache) getManifestAndLock(Revision, ProjectAnalyzerInfo) (Manifest, Lock, bool) { return nil, nil, false } func (singleSourceDiscardCache) setPackageTree(Revision, pkgtree.PackageTree) {} func (singleSourceDiscardCache) getPackageTree(Revision, ProjectRoot) (pkgtree.PackageTree, bool) { return pkgtree.PackageTree{}, false } func (singleSourceDiscardCache) markRevisionExists(r Revision) {} func (singleSourceDiscardCache) setVersionMap(versionList []PairedVersion) {} func (singleSourceDiscardCache) getVersionsFor(Revision) ([]UnpairedVersion, bool) { return nil, false } func (singleSourceDiscardCache) getAllVersions() ([]PairedVersion, bool) { return nil, false } func (singleSourceDiscardCache) getRevisionFor(UnpairedVersion) (Revision, bool) { return "", false } func (singleSourceDiscardCache) toRevision(v Version) (Revision, bool) { return "", false } func (singleSourceDiscardCache) toUnpaired(v Version) (UnpairedVersion, bool) { return nil, false }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/cmd.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "os" ) func (c cmd) Args() []string { return c.Cmd.Args } func (c cmd) SetDir(dir string) { c.Cmd.Dir = dir } func (c cmd) SetEnv(env []string) { c.Cmd.Env = env } func init() { // For our git repositories, we very much assume a "regular" topology. // Therefore, no value for the following variables can be relevant to // us. Unsetting globally properly propagates to libraries like // github.com/Masterminds/vcs, which cannot make the same assumption in // general. parasiteGitVars := []string{"GIT_DIR", "GIT_INDEX_FILE", "GIT_OBJECT_DIRECTORY", "GIT_WORK_TREE"} for _, e := range parasiteGitVars { os.Unsetenv(e) } }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/selection.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps type selection struct { // projects is a stack of the atoms that have currently been selected by the // solver. It can also be thought of as the vertex set of the current // selection graph. projects []selected // deps records the set of dependers on a given ProjectRoot. It is // essentially an adjacency list of *inbound* edges. deps map[ProjectRoot][]dependency // foldRoots records a mapping from a canonical, case-folded form of // ProjectRoots to the particular case variant that has currently been // selected. foldRoots map[string]ProjectRoot } type selected struct { a atomWithPackages first bool } func (s *selection) getDependenciesOn(id ProjectIdentifier) []dependency { if deps, exists := s.deps[id.ProjectRoot]; exists { return deps } return nil } // getIdentFor returns the ProjectIdentifier (so, the network name) currently in // use for the provided ProjectRoot. // // If no dependencies are present yet that designate a network name for // the provided root, this will return an empty ProjectIdentifier and false. func (s *selection) getIdentFor(pr ProjectRoot) (ProjectIdentifier, bool) { deps := s.getDependenciesOn(ProjectIdentifier{ProjectRoot: pr}) if len(deps) == 0 { return ProjectIdentifier{}, false } // For now, at least, the solver maintains (assumes?) the invariant that // whatever is first in the deps list decides the net name to be used. return deps[0].dep.Ident, true } // pushSelection pushes a new atomWithPackages onto the selection stack, along // with an indicator as to whether this selection indicates a new project *and* // packages, or merely some new packages on a project that was already selected. func (s *selection) pushSelection(a atomWithPackages, pkgonly bool) { s.projects = append(s.projects, selected{ a: a, first: !pkgonly, }) } // popSelection removes and returns the last atomWithPackages from the selection // stack, along with an indication of whether that element was the first from // that project - that is, if it represented an addition of both a project and // one or more packages to the overall selection. func (s *selection) popSelection() (atomWithPackages, bool) { var sel selected sel, s.projects = s.projects[len(s.projects)-1], s.projects[:len(s.projects)-1] return sel.a, sel.first } // findCaseConflicts checks to see if the given ProjectRoot has a // case-insensitive overlap with another, different ProjectRoot that's already // been picked. func (s *selection) findCaseConflicts(pr ProjectRoot) (bool, ProjectRoot) { if current, has := s.foldRoots[toFold(string(pr))]; has && pr != current { return true, current } return false, "" } func (s *selection) pushDep(dep dependency) { pr := dep.dep.Ident.ProjectRoot deps := s.deps[pr] if len(deps) == 0 { s.foldRoots[toFold(string(pr))] = pr } s.deps[pr] = append(deps, dep) } func (s *selection) popDep(id ProjectIdentifier) (dep dependency) { deps := s.deps[id.ProjectRoot] dlen := len(deps) if dlen == 1 { delete(s.foldRoots, toFold(string(id.ProjectRoot))) } dep, s.deps[id.ProjectRoot] = deps[dlen-1], deps[:dlen-1] return dep } func (s *selection) depperCount(id ProjectIdentifier) int { return len(s.deps[id.ProjectRoot]) } // Compute a list of the unique packages within the given ProjectIdentifier that // have dependers, and the number of dependers they have. func (s *selection) getRequiredPackagesIn(id ProjectIdentifier) map[string]int { // TODO(sdboyer) this is horribly inefficient to do on the fly; we need a method to // precompute it on pushing a new dep, and preferably with an immut // structure so that we can pop with zero cost. uniq := make(map[string]int) for _, dep := range s.deps[id.ProjectRoot] { for _, pkg := range dep.dep.pl { uniq[pkg]++ } } return uniq } // Suppress unused linting warning. var _ = (*selection)(nil).getSelectedPackagesIn var _ = (*selection)(nil).getProjectImportMap // Compute a list of the unique packages within the given ProjectIdentifier that // are currently selected, and the number of times each package has been // independently selected. func (s *selection) getSelectedPackagesIn(id ProjectIdentifier) map[string]int { // TODO(sdboyer) this is horribly inefficient to do on the fly; we need a method to // precompute it on pushing a new dep, and preferably with an immut // structure so that we can pop with zero cost. uniq := make(map[string]int) for _, p := range s.projects { if p.a.a.id.eq(id) { for _, pkg := range p.a.pl { uniq[pkg]++ } } } return uniq } // getProjectImportMap extracts the set of package imports from the used // packages in each selected project. func (s *selection) getProjectImportMap() map[ProjectRoot]map[string]struct{} { importMap := make(map[ProjectRoot]map[string]struct{}) for _, edges := range s.deps { for _, edge := range edges { var curmap map[string]struct{} if imap, has := importMap[edge.depender.id.ProjectRoot]; !has { curmap = make(map[string]struct{}) } else { curmap = imap } for _, pl := range edge.dep.pl { curmap[pl] = struct{}{} } importMap[edge.depender.id.ProjectRoot] = curmap } } return importMap } func (s *selection) getConstraint(id ProjectIdentifier) Constraint { deps, exists := s.deps[id.ProjectRoot] if !exists || len(deps) == 0 { return any } // TODO(sdboyer) recomputing this sucks and is quite wasteful. Precompute/cache it // on changes to the constraint set, instead. // The solver itself is expected to maintain the invariant that all the // constraints kept here collectively admit a non-empty set of versions. We // assume this is the case here while assembling a composite constraint. // Start with the open set var ret Constraint = any for _, dep := range deps { ret = ret.Intersect(dep.dep.Constraint) } return ret } // selected checks to see if the given ProjectIdentifier has been selected, and // if so, returns the corresponding atomWithPackages. // // It walks the projects selection list from front to back and returns the first // match it finds, which means it will always and only return the base selection // of the project, without any additional package selections that may or may not // have happened later. func (s *selection) selected(id ProjectIdentifier) (atomWithPackages, bool) { for _, p := range s.projects { if p.a.a.id.ProjectRoot == id.ProjectRoot { return p.a, true } } return atomWithPackages{a: nilpa}, false } type unselected struct { sl []bimodalIdentifier cmp func(i, j int) bool } func (u unselected) Len() int { return len(u.sl) } func (u unselected) Less(i, j int) bool { return u.cmp(i, j) } func (u unselected) Swap(i, j int) { u.sl[i], u.sl[j] = u.sl[j], u.sl[i] } func (u *unselected) Push(x interface{}) { u.sl = append(u.sl, x.(bimodalIdentifier)) } func (u *unselected) Pop() (v interface{}) { v, u.sl = u.sl[len(u.sl)-1], u.sl[:len(u.sl)-1] return v } // remove takes a bimodalIdentifier out of the priority queue, if present. Only // the first matching bmi will be removed. // // There are two events that cause this to be called: bmi selection, when the // bmi at the front of the queue is removed, and backtracking, when a bmi // becomes unnecessary because the dependency that induced it was backtracked // and popped off. // // The worst case for both of these is O(n), but in practice the first case is // O(1), as we iterate the queue from front to back. func (u *unselected) remove(bmi bimodalIdentifier) { plen := len(bmi.pl) outer: for i, pi := range u.sl { if pi.id.eq(bmi.id) && len(pi.pl) == plen { // Simple slice comparison - assume they're both sorted the same for i2, pkg := range pi.pl { if bmi.pl[i2] != pkg { continue outer } } if i == len(u.sl)-1 { // if we're on the last element, just pop, no splice u.sl = u.sl[:len(u.sl)-1] } else { u.sl = append(u.sl[:i], u.sl[i+1:]...) } break } } }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/constraint.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "fmt" "sort" "github.com/Masterminds/semver" "github.com/golang/dep/gps/internal/pb" ) var ( none = noneConstraint{} any = anyConstraint{} ) // A Constraint provides structured limitations on the versions that are // admissible for a given project. // // As with Version, it has a private method because the gps's internal // implementation of the problem is complete, and the system relies on type // magic to operate. type Constraint interface { fmt.Stringer // ImpliedCaretString converts the Constraint to a string in the same manner // as String(), but treats the empty operator as equivalent to ^, rather // than =. // // In the same way that String() is the inverse of NewConstraint(), this // method is the inverse of NewSemverConstraintIC(). ImpliedCaretString() string // Matches indicates if the provided Version is allowed by the Constraint. Matches(Version) bool // MatchesAny indicates if the intersection of the Constraint with the // provided Constraint would yield a Constraint that could allow *any* // Version. MatchesAny(Constraint) bool // Intersect computes the intersection of the Constraint with the provided // Constraint. Intersect(Constraint) Constraint // typedString emits the normal stringified representation of the provided // constraint, prefixed with a string that uniquely identifies the type of // the constraint. // // It also forces Constraint to be a private/sealed interface, which is a // design goal of the system. typedString() string // copyTo copies fields into a serializable representation which can be // converted back into an identical Constraint with constraintFromCache. copyTo(*pb.Constraint) // identical returns true if the constraints are identical. // // Identical Constraints behave identically for all methods defined by the // interface. A Constraint is always identical to itself. // // Constraints serialized for caching are de-serialized into identical instances. identical(Constraint) bool } // constraintFromCache returns a Constraint identical to the one which produced m. func constraintFromCache(m *pb.Constraint) (Constraint, error) { switch m.Type { case pb.Constraint_Revision: return Revision(m.Value), nil case pb.Constraint_Branch: return NewBranch(m.Value), nil case pb.Constraint_DefaultBranch: return newDefaultBranch(m.Value), nil case pb.Constraint_Version: return plainVersion(m.Value), nil case pb.Constraint_Semver: return NewSemverConstraint(m.Value) default: return nil, fmt.Errorf("unrecognized Constraint type: %#v", m) } } // unpairedVersionFromCache returns an UnpairedVersion identical to the one which produced m. func unpairedVersionFromCache(m *pb.Constraint) (UnpairedVersion, error) { switch m.Type { case pb.Constraint_Branch: return NewBranch(m.Value), nil case pb.Constraint_DefaultBranch: return newDefaultBranch(m.Value), nil case pb.Constraint_Version: return plainVersion(m.Value), nil case pb.Constraint_Semver: sv, err := semver.NewVersion(m.Value) if err != nil { return nil, err } return semVersion{sv: sv}, nil default: return nil, fmt.Errorf("unrecognized UnpairedVersion type: %#v", m) } } // NewSemverConstraint attempts to construct a semver Constraint object from the // input string. // // If the input string cannot be made into a valid semver Constraint, an error // is returned. func NewSemverConstraint(body string) (Constraint, error) { c, err := semver.NewConstraint(body) if err != nil { return nil, err } // If we got a simple semver.Version, simplify by returning our // corresponding type if sv, ok := c.(semver.Version); ok { return semVersion{sv: sv}, nil } return semverConstraint{c: c}, nil } // NewSemverConstraintIC attempts to construct a semver Constraint object from the // input string, defaulting to a caret, ^, when no operator is specified. Put // differently, ^ is the default operator for NewSemverConstraintIC, while = // is the default operator for NewSemverConstraint. // // If the input string cannot be made into a valid semver Constraint, an error // is returned. func NewSemverConstraintIC(body string) (Constraint, error) { c, err := semver.NewConstraintIC(body) if err != nil { return nil, err } // If we got a simple semver.Version, simplify by returning our // corresponding type if sv, ok := c.(semver.Version); ok { return semVersion{sv: sv}, nil } return semverConstraint{c: c}, nil } type semverConstraint struct { c semver.Constraint } func (c semverConstraint) String() string { return c.c.String() } // ImpliedCaretString converts the Constraint to a string in the same manner // as String(), but treats the empty operator as equivalent to ^, rather // than =. // // In the same way that String() is the inverse of NewConstraint(), this // method is the inverse of NewSemverConstraintIC(). func (c semverConstraint) ImpliedCaretString() string { return c.c.ImpliedCaretString() } func (c semverConstraint) typedString() string { return fmt.Sprintf("svc-%s", c.c.String()) } func (c semverConstraint) Matches(v Version) bool { switch tv := v.(type) { case semVersion: return c.c.Matches(tv.sv) == nil case versionPair: if tv2, ok := tv.v.(semVersion); ok { return c.c.Matches(tv2.sv) == nil } } return false } func (c semverConstraint) MatchesAny(c2 Constraint) bool { return c.Intersect(c2) != none } func (c semverConstraint) Intersect(c2 Constraint) Constraint { switch tc := c2.(type) { case anyConstraint: return c case semverConstraint: rc := c.c.Intersect(tc.c) if !semver.IsNone(rc) { return semverConstraint{c: rc} } case semVersion: rc := c.c.Intersect(tc.sv) if !semver.IsNone(rc) { // If single version intersected with constraint, we know the result // must be the single version, so just return it back out return c2 } case versionPair: if tc2, ok := tc.v.(semVersion); ok { rc := c.c.Intersect(tc2.sv) if !semver.IsNone(rc) { // same reasoning as previous case return c2 } } } return none } func (c semverConstraint) identical(c2 Constraint) bool { sc2, ok := c2.(semverConstraint) if !ok { return false } return c.c.String() == sc2.c.String() } func (c semverConstraint) copyTo(msg *pb.Constraint) { msg.Type = pb.Constraint_Semver msg.Value = c.String() } // IsAny indicates if the provided constraint is the wildcard "Any" constraint. func IsAny(c Constraint) bool { _, ok := c.(anyConstraint) return ok } // Any returns a constraint that will match anything. func Any() Constraint { return anyConstraint{} } // anyConstraint is an unbounded constraint - it matches all other types of // constraints. It mirrors the behavior of the semver package's any type. type anyConstraint struct{} func (anyConstraint) String() string { return "*" } func (anyConstraint) ImpliedCaretString() string { return "*" } func (anyConstraint) typedString() string { return "any-*" } func (anyConstraint) Matches(Version) bool { return true } func (anyConstraint) MatchesAny(Constraint) bool { return true } func (anyConstraint) Intersect(c Constraint) Constraint { return c } func (anyConstraint) identical(c Constraint) bool { return IsAny(c) } func (anyConstraint) copyTo(*pb.Constraint) { panic("anyConstraint should never be serialized; it is solver internal-only") } // noneConstraint is the empty set - it matches no versions. It mirrors the // behavior of the semver package's none type. type noneConstraint struct{} func (noneConstraint) String() string { return "" } func (noneConstraint) ImpliedCaretString() string { return "" } func (noneConstraint) typedString() string { return "none-" } func (noneConstraint) Matches(Version) bool { return false } func (noneConstraint) MatchesAny(Constraint) bool { return false } func (noneConstraint) Intersect(Constraint) Constraint { return none } func (noneConstraint) identical(c Constraint) bool { _, ok := c.(noneConstraint) return ok } func (noneConstraint) copyTo(*pb.Constraint) { panic("noneConstraint should never be serialized; it is solver internal-only") } // A ProjectConstraint combines a ProjectIdentifier with a Constraint. It // indicates that, if packages contained in the ProjectIdentifier enter the // depgraph, they must do so at a version that is allowed by the Constraint. type ProjectConstraint struct { Ident ProjectIdentifier Constraint Constraint } // ProjectConstraints is a map of projects, as identified by their import path // roots (ProjectRoots) to the corresponding ProjectProperties. // // They are the standard form in which Manifests declare their required // dependency properties - constraints and network locations - as well as the // form in which RootManifests declare their overrides. type ProjectConstraints map[ProjectRoot]ProjectProperties type workingConstraint struct { Ident ProjectIdentifier Constraint Constraint overrNet, overrConstraint bool } func pcSliceToMap(l []ProjectConstraint, r ...[]ProjectConstraint) ProjectConstraints { final := make(ProjectConstraints) for _, pc := range l { final[pc.Ident.ProjectRoot] = ProjectProperties{ Source: pc.Ident.Source, Constraint: pc.Constraint, } } for _, pcs := range r { for _, pc := range pcs { if pp, exists := final[pc.Ident.ProjectRoot]; exists { // Technically this should be done through a bridge for // cross-version-type matching...but this is a one off for root and // that's just ridiculous for this. pp.Constraint = pp.Constraint.Intersect(pc.Constraint) final[pc.Ident.ProjectRoot] = pp } else { final[pc.Ident.ProjectRoot] = ProjectProperties{ Source: pc.Ident.Source, Constraint: pc.Constraint, } } } } return final } // overrideAll treats the receiver ProjectConstraints map as a set of override // instructions, and applies overridden values to the ProjectConstraints. // // A slice of workingConstraint is returned, allowing differentiation between // values that were or were not overridden. func (m ProjectConstraints) overrideAll(pcm ProjectConstraints) (out []workingConstraint) { out = make([]workingConstraint, len(pcm)) k := 0 for pr, pp := range pcm { out[k] = m.override(pr, pp) k++ } sort.SliceStable(out, func(i, j int) bool { return out[i].Ident.Less(out[j].Ident) }) return } // override replaces a single ProjectConstraint with a workingConstraint, // overriding its values if a corresponding entry exists in the // ProjectConstraints map. func (m ProjectConstraints) override(pr ProjectRoot, pp ProjectProperties) workingConstraint { wc := workingConstraint{ Ident: ProjectIdentifier{ ProjectRoot: pr, Source: pp.Source, }, Constraint: pp.Constraint, } if opp, has := m[pr]; has { // The rule for overrides is that *any* non-zero value for the prop // should be considered an override, even if it's equal to what's // already there. if opp.Constraint != nil { wc.Constraint = opp.Constraint wc.overrConstraint = true } // This may appear incorrect, because the solver encodes meaning into // the empty string for NetworkName (it means that it would use the // import path by default, but could be coerced into using an alternate // URL). However, that 'coercion' can only happen if there's a // disagreement between projects on where a dependency should be sourced // from. Such disagreement is exactly what overrides preclude, so // there's no need to preserve the meaning of "" here - thus, we can // treat it as a zero value and ignore it, rather than applying it. if opp.Source != "" { wc.Ident.Source = opp.Source wc.overrNet = true } } return wc }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/manager_test.go
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "bytes" "context" "fmt" "io/ioutil" "log" "os" "path" "path/filepath" "runtime" "sort" "sync" "sync/atomic" "testing" "text/tabwriter" "time" "github.com/golang/dep/internal/test" ) // An analyzer that passes nothing back, but doesn't error. This is the naive // case - no constraints, no lock, and no errors. The SourceManager will // interpret this as open/Any constraints on everything in the import graph. type naiveAnalyzer struct{} func (naiveAnalyzer) DeriveManifestAndLock(string, ProjectRoot) (Manifest, Lock, error) { return nil, nil, nil } func (a naiveAnalyzer) Info() ProjectAnalyzerInfo { return ProjectAnalyzerInfo{ Name: "naive-analyzer", Version: 1, } } func mkNaiveSM(t *testing.T) (*SourceMgr, func()) { cpath, err := ioutil.TempDir("", "smcache") if err != nil { t.Fatalf("Failed to create temp dir: %s", err) } sm, err := NewSourceManager(SourceManagerConfig{ Cachedir: cpath, Logger: log.New(test.Writer{TB: t}, "", 0), }) if err != nil { t.Fatalf("Unexpected error on SourceManager creation: %s", err) } return sm, func() { sm.Release() err := os.RemoveAll(cpath) if err != nil { t.Errorf("removeAll failed: %s", err) } } } func remakeNaiveSM(osm *SourceMgr, t *testing.T) (*SourceMgr, func()) { cpath := osm.cachedir osm.Release() sm, err := NewSourceManager(SourceManagerConfig{ Cachedir: cpath, Logger: log.New(test.Writer{TB: t}, "", 0), }) if err != nil { t.Fatalf("unexpected error on SourceManager recreation: %s", err) } return sm, func() { sm.Release() err := os.RemoveAll(cpath) if err != nil { t.Errorf("removeAll failed: %s", err) } } } func TestSourceManagerInit(t *testing.T) { cpath, err := ioutil.TempDir("", "smcache") if err != nil { t.Errorf("Failed to create temp dir: %s", err) } cfg := SourceManagerConfig{ Cachedir: cpath, Logger: log.New(test.Writer{TB: t}, "", 0), } sm, err := NewSourceManager(cfg) if err != nil { t.Errorf("Unexpected error on SourceManager creation: %s", err) } _, err = NewSourceManager(cfg) if err == nil { t.Errorf("Creating second SourceManager should have failed due to file lock contention") } else if te, ok := err.(CouldNotCreateLockError); !ok { t.Errorf("Should have gotten CouldNotCreateLockError error type, but got %T", te) } if _, err = os.Stat(path.Join(cpath, "sm.lock")); err != nil { t.Errorf("Global cache lock file not created correctly") } sm.Release() err = os.RemoveAll(cpath) if err != nil { t.Errorf("removeAll failed: %s", err) } if _, err = os.Stat(path.Join(cpath, "sm.lock")); !os.IsNotExist(err) { t.Fatalf("Global cache lock file not cleared correctly on Release()") } err = os.MkdirAll(cpath, 0777) if err != nil { t.Errorf("Failed to re-create temp dir: %s", err) } defer func() { err = os.RemoveAll(cpath) if err != nil { t.Errorf("removeAll failed: %s", err) } }() // Set another one up at the same spot now, just to be sure sm, err = NewSourceManager(cfg) if err != nil { t.Fatalf("Creating a second SourceManager should have succeeded when the first was released, but failed with err %s", err) } sm.Release() } func TestSourceInit(t *testing.T) { // This test is a bit slow, skip it on -short if testing.Short() { t.Skip("Skipping project manager init test in short mode") } cpath, err := ioutil.TempDir("", "smcache") if err != nil { t.Fatalf("Failed to create temp dir: %s", err) } sm, err := NewSourceManager(SourceManagerConfig{ Cachedir: cpath, Logger: log.New(test.Writer{TB: t}, "", 0), }) if err != nil { t.Fatalf("Unexpected error on SourceManager creation: %s", err) } defer func() { sm.Release() err := os.RemoveAll(cpath) if err != nil { t.Errorf("removeAll failed: %s", err) } }() id := mkPI("github.com/sdboyer/gpkt").normalize() pvl, err := sm.ListVersions(id) if err != nil { t.Errorf("Unexpected error during initial project setup/fetching %s", err) } if len(pvl) != 7 { t.Errorf("Expected seven version results from the test repo, got %v", len(pvl)) } else { expected := []PairedVersion{ NewVersion("v2.0.0").Pair(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")), NewVersion("v1.1.0").Pair(Revision("b2cb48dda625f6640b34d9ffb664533359ac8b91")), NewVersion("v1.0.0").Pair(Revision("bf85021c0405edbc4f3648b0603818d641674f72")), newDefaultBranch("master").Pair(Revision("bf85021c0405edbc4f3648b0603818d641674f72")), NewBranch("v1").Pair(Revision("e3777f683305eafca223aefe56b4e8ecf103f467")), NewBranch("v1.1").Pair(Revision("f1fbc520489a98306eb28c235204e39fa8a89c84")), NewBranch("v3").Pair(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")), } // SourceManager itself doesn't guarantee ordering; sort them here so we // can dependably check output SortPairedForUpgrade(pvl) for k, e := range expected { if !pvl[k].Matches(e) { t.Errorf("Expected version %s in position %v but got %s", e, k, pvl[k]) } } } // Two birds, one stone - make sure the internal ProjectManager vlist cache // works (or at least doesn't not work) by asking for the versions again, // and do it through smcache to ensure its sorting works, as well. smc := &bridge{ sm: sm, vlists: make(map[ProjectIdentifier][]Version), s: &solver{mtr: newMetrics()}, } vl, err := smc.listVersions(id) if err != nil { t.Errorf("Unexpected error during initial project setup/fetching %s", err) } if len(vl) != 7 { t.Errorf("Expected seven version results from the test repo, got %v", len(vl)) } else { expected := []Version{ NewVersion("v2.0.0").Pair(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")), NewVersion("v1.1.0").Pair(Revision("b2cb48dda625f6640b34d9ffb664533359ac8b91")), NewVersion("v1.0.0").Pair(Revision("bf85021c0405edbc4f3648b0603818d641674f72")), newDefaultBranch("master").Pair(Revision("bf85021c0405edbc4f3648b0603818d641674f72")), NewBranch("v1").Pair(Revision("e3777f683305eafca223aefe56b4e8ecf103f467")), NewBranch("v1.1").Pair(Revision("f1fbc520489a98306eb28c235204e39fa8a89c84")), NewBranch("v3").Pair(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")), } for k, e := range expected { if !vl[k].Matches(e) { t.Errorf("Expected version %s in position %v but got %s", e, k, vl[k]) } } if !vl[3].(versionPair).v.(branchVersion).isDefault { t.Error("Expected master branch version to have isDefault flag, but it did not") } if vl[4].(versionPair).v.(branchVersion).isDefault { t.Error("Expected v1 branch version not to have isDefault flag, but it did") } if vl[5].(versionPair).v.(branchVersion).isDefault { t.Error("Expected v1.1 branch version not to have isDefault flag, but it did") } if vl[6].(versionPair).v.(branchVersion).isDefault { t.Error("Expected v3 branch version not to have isDefault flag, but it did") } } present, err := smc.RevisionPresentIn(id, Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")) if err != nil { t.Errorf("Should have found revision in source, but got err: %s", err) } else if !present { t.Errorf("Should have found revision in source, but did not") } // SyncSourceFor will ensure we have everything err = smc.SyncSourceFor(id) if err != nil { t.Errorf("SyncSourceFor failed with unexpected error: %s", err) } // Ensure that the appropriate cache dirs and files exist _, err = os.Stat(filepath.Join(cpath, "sources", "https---github.com-sdboyer-gpkt", ".git")) if err != nil { t.Error("Cache repo does not exist in expected location") } os.Stat(filepath.Join(cpath, "metadata", "github.com", "sdboyer", "gpkt", "cache.json")) // Ensure source existence values are what we expect var exists bool exists, err = sm.SourceExists(id) if err != nil { t.Errorf("Error on checking SourceExists: %s", err) } if !exists { t.Error("Source should exist after non-erroring call to ListVersions") } } func TestDefaultBranchAssignment(t *testing.T) { if testing.Short() { t.Skip("Skipping default branch assignment test in short mode") } sm, clean := mkNaiveSM(t) defer clean() id := mkPI("github.com/sdboyer/test-multibranch") v, err := sm.ListVersions(id) if err != nil { t.Errorf("Unexpected error during initial project setup/fetching %s", err) } if len(v) != 3 { t.Errorf("Expected three version results from the test repo, got %v", len(v)) } else { brev := Revision("fda020843ac81352004b9dca3fcccdd517600149") mrev := Revision("9f9c3a591773d9b28128309ac7a9a72abcab267d") expected := []PairedVersion{ NewBranch("branchone").Pair(brev), NewBranch("otherbranch").Pair(brev), NewBranch("master").Pair(mrev), } SortPairedForUpgrade(v) for k, e := range expected { if !v[k].Matches(e) { t.Errorf("Expected version %s in position %v but got %s", e, k, v[k]) } } if !v[0].(versionPair).v.(branchVersion).isDefault { t.Error("Expected branchone branch version to have isDefault flag, but it did not") } if !v[0].(versionPair).v.(branchVersion).isDefault { t.Error("Expected otherbranch branch version to have isDefault flag, but it did not") } if v[2].(versionPair).v.(branchVersion).isDefault { t.Error("Expected master branch version not to have isDefault flag, but it did") } } } func TestMgrMethodsFailWithBadPath(t *testing.T) { // a symbol will always bork it up bad := mkPI("foo/##&^").normalize() sm, clean := mkNaiveSM(t) defer clean() var err error if _, err = sm.SourceExists(bad); err == nil { t.Error("SourceExists() did not error on bad input") } if err = sm.SyncSourceFor(bad); err == nil { t.Error("SyncSourceFor() did not error on bad input") } if _, err = sm.ListVersions(bad); err == nil { t.Error("ListVersions() did not error on bad input") } if _, err = sm.RevisionPresentIn(bad, Revision("")); err == nil { t.Error("RevisionPresentIn() did not error on bad input") } if _, err = sm.ListPackages(bad, nil); err == nil { t.Error("ListPackages() did not error on bad input") } if _, _, err = sm.GetManifestAndLock(bad, nil, naiveAnalyzer{}); err == nil { t.Error("GetManifestAndLock() did not error on bad input") } if err = sm.ExportProject(context.Background(), bad, nil, ""); err == nil { t.Error("ExportProject() did not error on bad input") } } type sourceCreationTestFixture struct { roots []ProjectIdentifier namecount, srccount int } func (f sourceCreationTestFixture) run(t *testing.T) { t.Parallel() sm, clean := mkNaiveSM(t) defer clean() for _, pi := range f.roots { _, err := sm.SourceExists(pi) if err != nil { t.Fatal(err) } } if len(sm.srcCoord.nameToURL) != f.namecount { t.Errorf("want %v names in the name->url map, but got %v. contents: \n%v", f.namecount, len(sm.srcCoord.nameToURL), sm.srcCoord.nameToURL) } if len(sm.srcCoord.srcs) != f.srccount { t.Errorf("want %v gateways in the sources map, but got %v", f.srccount, len(sm.srcCoord.srcs)) } if t.Failed() { var keys []string for k := range sm.srcCoord.nameToURL { keys = append(keys, k) } sort.Strings(keys) var buf bytes.Buffer w := tabwriter.NewWriter(&buf, 0, 4, 2, ' ', 0) fmt.Fprint(w, "NAME\tMAPPED URL\n") for _, r := range keys { fmt.Fprintf(w, "%s\t%s\n", r, sm.srcCoord.nameToURL[r]) } w.Flush() t.Log("\n", buf.String()) t.Log("SRC KEYS") for k := range sm.srcCoord.srcs { t.Log(k) } } } // This test is primarily about making sure that the logic around folding // together different ways of referencing the same underlying resource - whether // that be intentionally folding them, or intentionally keeping them separate - // work as intended. func TestSourceCreationCounts(t *testing.T) { if testing.Short() { t.Skip("Skipping slow test in short mode") } fixtures := map[string]sourceCreationTestFixture{ "gopkgin uniqueness": { roots: []ProjectIdentifier{ mkPI("gopkg.in/sdboyer/gpkt.v1"), mkPI("gopkg.in/sdboyer/gpkt.v2"), mkPI("gopkg.in/sdboyer/gpkt.v3"), }, namecount: 6, srccount: 3, }, "gopkgin separation from github": { roots: []ProjectIdentifier{ mkPI("gopkg.in/sdboyer/gpkt.v1"), mkPI("github.com/sdboyer/gpkt"), mkPI("http://github.com/sdboyer/gpkt"), mkPI("https://github.com/sdboyer/gpkt"), }, namecount: 5, srccount: 3, }, "case variance across path and URL-based access": { roots: []ProjectIdentifier{ {ProjectRoot: ProjectRoot("github.com/sdboyer/gpkt"), Source: "https://github.com/Sdboyer/gpkt"}, {ProjectRoot: ProjectRoot("github.com/sdboyer/gpkt"), Source: "https://github.com/SdbOyer/gpkt"}, mkPI("github.com/sdboyer/gpkt"), {ProjectRoot: ProjectRoot("github.com/sdboyer/gpkt"), Source: "https://github.com/sdboyeR/gpkt"}, mkPI("github.com/sdboyeR/gpkt"), }, namecount: 6, srccount: 1, }, } for name, fix := range fixtures { t.Run(name, fix.run) } } func TestGetSources(t *testing.T) { // This test is a tad slow, skip it on -short if testing.Short() { t.Skip("Skipping source setup test in short mode") } requiresBins(t, "git", "hg", "bzr") sm, clean := mkNaiveSM(t) pil := []ProjectIdentifier{ mkPI("github.com/Masterminds/VCSTestRepo").normalize(), mkPI("bitbucket.org/mattfarina/testhgrepo").normalize(), mkPI("launchpad.net/govcstestbzrrepo").normalize(), } ctx := context.Background() // protects against premature release of sm t.Run("inner", func(t *testing.T) { for _, pi := range pil { lpi := pi t.Run(lpi.normalizedSource(), func(t *testing.T) { t.Parallel() srcg, err := sm.srcCoord.getSourceGatewayFor(ctx, lpi) if err != nil { t.Errorf("unexpected error setting up source: %s", err) return } // Re-get the same, make sure they are the same srcg2, err := sm.srcCoord.getSourceGatewayFor(ctx, lpi) if err != nil { t.Errorf("unexpected error re-getting source: %s", err) } else if srcg != srcg2 { t.Error("first and second sources are not eq") } // All of them _should_ select https, so this should work lpi.Source = "https://" + lpi.Source srcg3, err := sm.srcCoord.getSourceGatewayFor(ctx, lpi) if err != nil { t.Errorf("unexpected error getting explicit https source: %s", err) } else if srcg != srcg3 { t.Error("explicit https source should reuse autodetected https source") } // Now put in http, and they should differ lpi.Source = "http://" + string(lpi.ProjectRoot) srcg4, err := sm.srcCoord.getSourceGatewayFor(ctx, lpi) if err != nil { t.Errorf("unexpected error getting explicit http source: %s", err) } else if srcg == srcg4 { t.Error("explicit http source should create a new src") } }) } }) // nine entries (of which three are dupes): for each vcs, raw import path, // the https url, and the http url. also three more from case folding of // github.com/Masterminds/VCSTestRepo -> github.com/masterminds/vcstestrepo if len(sm.srcCoord.nameToURL) != 12 { t.Errorf("Should have twelve discrete entries in the nameToURL map, got %v", len(sm.srcCoord.nameToURL)) } clean() } func TestFSCaseSensitivityConvergesSources(t *testing.T) { if testing.Short() { t.Skip("Skipping slow test in short mode") } f := func(name string, pi1, pi2 ProjectIdentifier) { t.Run(name, func(t *testing.T) { t.Parallel() sm, clean := mkNaiveSM(t) defer clean() sm.SyncSourceFor(pi1) sg1, err := sm.srcCoord.getSourceGatewayFor(context.Background(), pi1) if err != nil { t.Fatal(err) } sm.SyncSourceFor(pi2) sg2, err := sm.srcCoord.getSourceGatewayFor(context.Background(), pi2) if err != nil { t.Fatal(err) } path1 := sg1.src.(*gitSource).repo.LocalPath() stat1, err := os.Stat(path1) if err != nil { t.Fatal("path1:", path1, err) } path2 := sg2.src.(*gitSource).repo.LocalPath() stat2, err := os.Stat(path2) if err != nil { t.Fatal("path2:", path2, err) } same, count := os.SameFile(stat1, stat2), len(sm.srcCoord.srcs) if same && count != 1 { t.Log("are same, count", count) t.Fatal("on case-insensitive filesystem, case-varying sources should have been folded together but were not") } if !same && count != 2 { t.Log("not same, count", count) t.Fatal("on case-sensitive filesystem, case-varying sources should not have been folded together, but were") } }) } folded := mkPI("github.com/sdboyer/deptest").normalize() casevar1 := mkPI("github.com/Sdboyer/deptest").normalize() casevar2 := mkPI("github.com/SdboyeR/deptest").normalize() f("folded first", folded, casevar1) f("folded second", casevar1, folded) f("both unfolded", casevar1, casevar2) } // Regression test for #32 func TestGetInfoListVersionsOrdering(t *testing.T) { // This test is quite slow, skip it on -short if testing.Short() { t.Skip("Skipping slow test in short mode") } sm, clean := mkNaiveSM(t) defer clean() // setup done, now do the test id := mkPI("github.com/sdboyer/gpkt").normalize() _, _, err := sm.GetManifestAndLock(id, NewVersion("v1.0.0"), naiveAnalyzer{}) if err != nil { t.Errorf("Unexpected error from GetInfoAt %s", err) } v, err := sm.ListVersions(id) if err != nil { t.Errorf("Unexpected error from ListVersions %s", err) } if len(v) != 7 { t.Errorf("Expected seven results from ListVersions, got %v", len(v)) } } func TestDeduceProjectRoot(t *testing.T) { sm, clean := mkNaiveSM(t) defer clean() in := "github.com/sdboyer/gps" pr, err := sm.DeduceProjectRoot(in) if err != nil { t.Errorf("Problem while detecting root of %q %s", in, err) } if string(pr) != in { t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in) } if sm.deduceCoord.rootxt.Len() != 1 { t.Errorf("Root path trie should have one element after one deduction, has %v", sm.deduceCoord.rootxt.Len()) } pr, err = sm.DeduceProjectRoot(in) if err != nil { t.Errorf("Problem while detecting root of %q %s", in, err) } else if string(pr) != in { t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in) } if sm.deduceCoord.rootxt.Len() != 1 { t.Errorf("Root path trie should still have one element after performing the same deduction twice; has %v", sm.deduceCoord.rootxt.Len()) } // Now do a subpath sub := path.Join(in, "foo") pr, err = sm.DeduceProjectRoot(sub) if err != nil { t.Errorf("Problem while detecting root of %q %s", sub, err) } else if string(pr) != in { t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in) } if sm.deduceCoord.rootxt.Len() != 1 { t.Errorf("Root path trie should still have one element, as still only one unique root has gone in; has %v", sm.deduceCoord.rootxt.Len()) } // Now do a fully different root, but still on github in2 := "github.com/bagel/lox" sub2 := path.Join(in2, "cheese") pr, err = sm.DeduceProjectRoot(sub2) if err != nil { t.Errorf("Problem while detecting root of %q %s", sub2, err) } else if string(pr) != in2 { t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in) } if sm.deduceCoord.rootxt.Len() != 2 { t.Errorf("Root path trie should have two elements, one for each unique root; has %v", sm.deduceCoord.rootxt.Len()) } // Ensure that our prefixes are bounded by path separators in4 := "github.com/bagel/loxx" pr, err = sm.DeduceProjectRoot(in4) if err != nil { t.Errorf("Problem while detecting root of %q %s", in4, err) } else if string(pr) != in4 { t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in) } if sm.deduceCoord.rootxt.Len() != 3 { t.Errorf("Root path trie should have three elements, one for each unique root; has %v", sm.deduceCoord.rootxt.Len()) } // Ensure that vcs extension-based matching comes through in5 := "ffffrrrraaaaaapppppdoesnotresolve.com/baz.git" pr, err = sm.DeduceProjectRoot(in5) if err != nil { t.Errorf("Problem while detecting root of %q %s", in5, err) } else if string(pr) != in5 { t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in) } if sm.deduceCoord.rootxt.Len() != 4 { t.Errorf("Root path trie should have four elements, one for each unique root; has %v", sm.deduceCoord.rootxt.Len()) } } func TestMultiFetchThreadsafe(t *testing.T) { // This test is quite slow, skip it on -short if testing.Short() { t.Skip("Skipping slow test in short mode") } projects := []ProjectIdentifier{ mkPI("github.com/sdboyer/gps"), mkPI("github.com/sdboyer/gpkt"), { ProjectRoot: ProjectRoot("github.com/sdboyer/gpkt"), Source: "https://github.com/sdboyer/gpkt", }, mkPI("github.com/sdboyer/gogl"), mkPI("github.com/sdboyer/gliph"), mkPI("github.com/sdboyer/frozone"), mkPI("gopkg.in/sdboyer/gpkt.v1"), mkPI("gopkg.in/sdboyer/gpkt.v2"), mkPI("github.com/Masterminds/VCSTestRepo"), mkPI("github.com/go-yaml/yaml"), mkPI("github.com/sirupsen/logrus"), mkPI("github.com/Masterminds/semver"), mkPI("github.com/Masterminds/vcs"), //mkPI("bitbucket.org/sdboyer/withbm"), //mkPI("bitbucket.org/sdboyer/nobm"), } do := func(name string, sm SourceManager) { t.Run(name, func(t *testing.T) { // This gives us ten calls per op, per project, which should be(?) // decently likely to reveal underlying concurrency problems ops := 4 cnum := len(projects) * ops * 10 for i := 0; i < cnum; i++ { // Trigger all four ops on each project, then move on to the next // project. id, op := projects[(i/ops)%len(projects)], i%ops // The count of times this op has been been invoked on this project // (after the upcoming invocation) opcount := i/(ops*len(projects)) + 1 switch op { case 0: t.Run(fmt.Sprintf("deduce:%v:%s", opcount, id), func(t *testing.T) { t.Parallel() if _, err := sm.DeduceProjectRoot(string(id.ProjectRoot)); err != nil { t.Error(err) } }) case 1: t.Run(fmt.Sprintf("sync:%v:%s", opcount, id), func(t *testing.T) { t.Parallel() err := sm.SyncSourceFor(id) if err != nil { t.Error(err) } }) case 2: t.Run(fmt.Sprintf("listVersions:%v:%s", opcount, id), func(t *testing.T) { t.Parallel() vl, err := sm.ListVersions(id) if err != nil { t.Fatal(err) } if len(vl) == 0 { t.Error("no versions returned") } }) case 3: t.Run(fmt.Sprintf("exists:%v:%s", opcount, id), func(t *testing.T) { t.Parallel() y, err := sm.SourceExists(id) if err != nil { t.Fatal(err) } if !y { t.Error("said source does not exist") } }) default: panic(fmt.Sprintf("wtf, %s %v", id, op)) } } }) } sm, _ := mkNaiveSM(t) do("first", sm) // Run the thing twice with a remade sm so that we cover both the cases of // pre-existing and new clones. // // This triggers a release of the first sm, which is much of what we're // testing here - that the release is complete and clean, and can be // immediately followed by a new sm coming in. sm2, clean := remakeNaiveSM(sm, t) do("second", sm2) clean() } // Ensure that we don't see concurrent map writes when calling ListVersions. // Regression test for https://github.com/sdboyer/gps/issues/156. // // Ideally this would be caught by TestMultiFetchThreadsafe, but perhaps the // high degree of parallelism pretty much eliminates that as a realistic // possibility? func TestListVersionsRacey(t *testing.T) { // This test is quite slow, skip it on -short if testing.Short() { t.Skip("Skipping slow test in short mode") } sm, clean := mkNaiveSM(t) defer clean() wg := &sync.WaitGroup{} id := mkPI("github.com/sdboyer/gps") for i := 0; i < 20; i++ { wg.Add(1) go func() { _, err := sm.ListVersions(id) if err != nil { t.Errorf("listing versions failed with err %s", err.Error()) } wg.Done() }() } wg.Wait() } func TestErrAfterRelease(t *testing.T) { sm, clean := mkNaiveSM(t) clean() id := ProjectIdentifier{} _, err := sm.SourceExists(id) if err == nil { t.Errorf("SourceExists did not error after calling Release()") } else if err != ErrSourceManagerIsReleased { t.Errorf("SourceExists errored after Release(), but with unexpected error: %T %s", err, err.Error()) } err = sm.SyncSourceFor(id) if err == nil { t.Errorf("SyncSourceFor did not error after calling Release()") } else if err != ErrSourceManagerIsReleased { t.Errorf("SyncSourceFor errored after Release(), but with unexpected error: %T %s", err, err.Error()) } _, err = sm.ListVersions(id) if err == nil { t.Errorf("ListVersions did not error after calling Release()") } else if err != ErrSourceManagerIsReleased { t.Errorf("ListVersions errored after Release(), but with unexpected error: %T %s", err, err.Error()) } _, err = sm.RevisionPresentIn(id, "") if err == nil { t.Errorf("RevisionPresentIn did not error after calling Release()") } else if err != ErrSourceManagerIsReleased { t.Errorf("RevisionPresentIn errored after Release(), but with unexpected error: %T %s", err, err.Error()) } _, err = sm.ListPackages(id, nil) if err == nil { t.Errorf("ListPackages did not error after calling Release()") } else if err != ErrSourceManagerIsReleased { t.Errorf("ListPackages errored after Release(), but with unexpected error: %T %s", err, err.Error()) } _, _, err = sm.GetManifestAndLock(id, nil, naiveAnalyzer{}) if err == nil { t.Errorf("GetManifestAndLock did not error after calling Release()") } else if err != ErrSourceManagerIsReleased { t.Errorf("GetManifestAndLock errored after Release(), but with unexpected error: %T %s", err, err.Error()) } err = sm.ExportProject(context.Background(), id, nil, "") if err == nil { t.Errorf("ExportProject did not error after calling Release()") } else if err != ErrSourceManagerIsReleased { t.Errorf("ExportProject errored after Release(), but with unexpected error: %T %s", err, err.Error()) } _, err = sm.DeduceProjectRoot("") if err == nil { t.Errorf("DeduceProjectRoot did not error after calling Release()") } else if err != ErrSourceManagerIsReleased { t.Errorf("DeduceProjectRoot errored after Release(), but with unexpected error: %T %s", err, err.Error()) } } func TestSignalHandling(t *testing.T) { if testing.Short() { t.Skip("Skipping slow test in short mode") } sm, clean := mkNaiveSM(t) sigch := make(chan os.Signal) sm.HandleSignals(sigch) sigch <- os.Interrupt <-time.After(10 * time.Millisecond) if atomic.LoadInt32(&sm.releasing) != 1 { t.Error("Releasing flag did not get set") } clean() // Test again, this time with a running call sm, clean = mkNaiveSM(t) sm.HandleSignals(sigch) errchan := make(chan error) go func() { _, callerr := sm.DeduceProjectRoot("k8s.io/kubernetes") errchan <- callerr }() go func() { sigch <- os.Interrupt }() runtime.Gosched() callerr := <-errchan if callerr == nil { t.Error("network call could not have completed before cancellation, should have gotten an error") } if atomic.LoadInt32(&sm.releasing) != 1 { t.Error("Releasing flag did not get set") } clean() sm, clean = mkNaiveSM(t) // Ensure that handling also works after stopping and restarting itself, // and that Release happens only once. sm.UseDefaultSignalHandling() sm.StopSignalHandling() sm.HandleSignals(sigch) go func() { _, callerr := sm.DeduceProjectRoot("k8s.io/kubernetes") errchan <- callerr }() go func() { sigch <- os.Interrupt sm.Release() }() runtime.Gosched() after := time.After(2 * time.Second) select { case <-sm.qch: case <-after: t.Error("did not shut down in reasonable time") } clean() } func TestUnreachableSource(t *testing.T) { // If a git remote is unreachable (maybe the server is only accessible behind a VPN, or // something), we should return a clear error, not a panic. if testing.Short() { t.Skip("Skipping slow test in short mode") } sm, clean := mkNaiveSM(t) defer clean() id := mkPI("github.com/golang/notexist").normalize() err := sm.SyncSourceFor(id) if err == nil { t.Error("expected err when listing versions of a bogus source, but got nil") } } func TestSupervisor(t *testing.T) { bgc := context.Background() ctx, cancelFunc := context.WithCancel(bgc) superv := newSupervisor(ctx) ci := callInfo{ name: "foo", typ: 0, } _, err := superv.start(ci) if err != nil { t.Fatal("unexpected err on setUpCall:", err) } tc, exists := superv.running[ci] if !exists { t.Fatal("running call not recorded in map") } if tc.count != 1 { t.Fatalf("wrong count of running ci: wanted 1 got %v", tc.count) } // run another, but via do block, wait := make(chan struct{}), make(chan struct{}) errchan := make(chan error) go func() { wait <- struct{}{} err := superv.do(bgc, "foo", 0, func(ctx context.Context) error { <-block return nil }) errchan <- err //if err != nil { // t.Fatal("unexpected err on do() completion:", err) //} close(wait) }() <-wait superv.mu.Lock() tc, exists = superv.running[ci] if !exists { t.Fatal("running call not recorded in map") } // TODO (kris-nova) We need to disable this bypass here, and in the .travis.yml // as soon as dep#501 is fixed bypass := os.Getenv("DEPTESTBYPASS501") if bypass != "" { t.Log("bypassing tc.count check for running ci") } else if tc.count != 2 { t.Fatalf("wrong count of running ci: wanted 2 got %v", tc.count) } superv.mu.Unlock() close(block) possibleConcurrentError := <-errchan if possibleConcurrentError != nil { t.Fatal("unexpected err on do() completion:", err) } <-wait superv.mu.Lock() if len(superv.ran) != 0 { t.Fatal("should not record metrics until last one drops") } tc, exists = superv.running[ci] if !exists { t.Fatal("running call not recorded in map") } if tc.count != 1 { t.Fatalf("wrong count of running ci: wanted 1 got %v", tc.count) } superv.mu.Unlock() superv.done(ci) superv.mu.Lock() ran, exists := superv.ran[0] if !exists { t.Fatal("should have metrics after closing last of a ci, but did not") } if ran.count != 1 { t.Fatalf("wrong count of serial runs of a call: wanted 1 got %v", ran.count) } superv.mu.Unlock() cancelFunc() _, err = superv.start(ci) if err == nil { t.Fatal("should have errored on cm.run() after canceling cm's input context") } superv.do(bgc, "foo", 0, func(ctx context.Context) error { t.Fatal("calls should not be initiated by do() after main context is cancelled") return nil }) }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/selection_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "reflect" "testing" ) // Regression test for https://github.com/sdboyer/gps/issues/174 func TestUnselectedRemoval(t *testing.T) { // We don't need a comparison function for this test bmi1 := bimodalIdentifier{ id: mkPI("foo"), pl: []string{"foo", "bar"}, } bmi2 := bimodalIdentifier{ id: mkPI("foo"), pl: []string{"foo", "bar", "baz"}, } bmi3 := bimodalIdentifier{ id: mkPI("foo"), pl: []string{"foo"}, } u := &unselected{ sl: []bimodalIdentifier{bmi1, bmi2, bmi3}, } u.remove(bimodalIdentifier{ id: mkPI("other"), pl: []string{"other"}, }) if len(u.sl) != 3 { t.Fatalf("len of unselected slice should have been 2 after no-op removal, got %v", len(u.sl)) } u.remove(bmi3) want := []bimodalIdentifier{bmi1, bmi2} if len(u.sl) != 2 { t.Fatalf("removal of matching bmi did not work, slice should have 2 items but has %v", len(u.sl)) } if !reflect.DeepEqual(u.sl, want) { t.Fatalf("wrong item removed from slice:\n\t(GOT): %v\n\t(WNT): %v", u.sl, want) } u.remove(bmi3) if len(u.sl) != 2 { t.Fatalf("removal of bmi w/non-matching packages should be a no-op but wasn't; slice should have 2 items but has %v", len(u.sl)) } u.remove(bmi2) want = []bimodalIdentifier{bmi1} if len(u.sl) != 1 { t.Fatalf("removal of matching bmi did not work, slice should have 1 items but has %v", len(u.sl)) } if !reflect.DeepEqual(u.sl, want) { t.Fatalf("wrong item removed from slice:\n\t(GOT): %v\n\t(WNT): %v", u.sl, want) } }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/rootdata_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "reflect" "testing" "github.com/golang/dep/gps/pkgtree" ) func TestRootdataExternalImports(t *testing.T) { fix := basicFixtures["shared dependency with overlapping constraints"] params := SolveParameters{ RootDir: string(fix.ds[0].n), RootPackageTree: fix.rootTree(), Manifest: fix.rootmanifest(), ProjectAnalyzer: naiveAnalyzer{}, stdLibFn: func(string) bool { return false }, mkBridgeFn: overrideMkBridge, } is, err := Prepare(params, newdepspecSM(fix.ds, nil)) if err != nil { t.Fatalf("Unexpected error while prepping solver: %s", err) } rd := is.(*solver).rd want := []string{"a", "b"} got := rd.externalImportList(params.stdLibFn) if !reflect.DeepEqual(want, got) { t.Errorf("Unexpected return from rootdata.externalImportList:\n\t(GOT): %s\n\t(WNT): %s", got, want) } // Add a require rd.req["c"] = true want = []string{"a", "b", "c"} got = rd.externalImportList(params.stdLibFn) if !reflect.DeepEqual(want, got) { t.Errorf("Unexpected return from rootdata.externalImportList:\n\t(GOT): %s\n\t(WNT): %s", got, want) } // Add same path as import poe := rd.rpt.Packages["root"] poe.P.Imports = []string{"a", "b", "c"} rd.rpt.Packages["root"] = poe // should still be the same got = rd.externalImportList(params.stdLibFn) if !reflect.DeepEqual(want, got) { t.Errorf("Unexpected return from rootdata.externalImportList:\n\t(GOT): %s\n\t(WNT): %s", got, want) } // Add an ignore, but not on the required path (Prepare makes that // combination impossible) rd.ir = pkgtree.NewIgnoredRuleset([]string{"b"}) want = []string{"a", "c"} got = rd.externalImportList(params.stdLibFn) if !reflect.DeepEqual(want, got) { t.Errorf("Unexpected return from rootdata.externalImportList:\n\t(GOT): %s\n\t(WNT): %s", got, want) } } func TestGetApplicableConstraints(t *testing.T) { fix := basicFixtures["shared dependency with overlapping constraints"] params := SolveParameters{ RootDir: string(fix.ds[0].n), RootPackageTree: fix.rootTree(), Manifest: fix.rootmanifest(), ProjectAnalyzer: naiveAnalyzer{}, stdLibFn: func(string) bool { return false }, mkBridgeFn: overrideMkBridge, } is, err := Prepare(params, newdepspecSM(fix.ds, nil)) if err != nil { t.Fatalf("Unexpected error while prepping solver: %s", err) } rd := is.(*solver).rd table := []struct { name string mut func() result []workingConstraint }{ { name: "base case, two constraints", mut: func() {}, result: []workingConstraint{ { Ident: mkPI("a"), Constraint: mkSVC("1.0.0"), }, { Ident: mkPI("b"), Constraint: mkSVC("1.0.0"), }, }, }, { name: "with unconstrained require", mut: func() { // No constraint means it doesn't show up rd.req["c"] = true }, result: []workingConstraint{ { Ident: mkPI("a"), Constraint: mkSVC("1.0.0"), }, { Ident: mkPI("b"), Constraint: mkSVC("1.0.0"), }, }, }, { name: "with unconstrained import", mut: func() { // Again, no constraint means it doesn't show up poe := rd.rpt.Packages["root"] poe.P.Imports = []string{"a", "b", "d"} rd.rpt.Packages["root"] = poe }, result: []workingConstraint{ { Ident: mkPI("a"), Constraint: mkSVC("1.0.0"), }, { Ident: mkPI("b"), Constraint: mkSVC("1.0.0"), }, }, }, { name: "constraint on required", mut: func() { rd.rm.Deps["c"] = ProjectProperties{ Constraint: NewBranch("foo"), } }, result: []workingConstraint{ { Ident: mkPI("a"), Constraint: mkSVC("1.0.0"), }, { Ident: mkPI("b"), Constraint: mkSVC("1.0.0"), }, { Ident: mkPI("c"), Constraint: NewBranch("foo"), }, }, }, { name: "override on imported", mut: func() { rd.ovr["d"] = ProjectProperties{ Constraint: NewBranch("bar"), } }, result: []workingConstraint{ { Ident: mkPI("a"), Constraint: mkSVC("1.0.0"), }, { Ident: mkPI("b"), Constraint: mkSVC("1.0.0"), }, { Ident: mkPI("c"), Constraint: NewBranch("foo"), }, { Ident: mkPI("d"), Constraint: NewBranch("bar"), overrConstraint: true, }, }, }, { // It is certainly the simplest and most rule-abiding solution to // drop the constraint in this case, but is there a chance it would // violate the principle of least surprise? name: "ignore imported and overridden pkg", mut: func() { rd.ir = pkgtree.NewIgnoredRuleset([]string{"d"}) }, result: []workingConstraint{ { Ident: mkPI("a"), Constraint: mkSVC("1.0.0"), }, { Ident: mkPI("b"), Constraint: mkSVC("1.0.0"), }, { Ident: mkPI("c"), Constraint: NewBranch("foo"), }, }, }, } for _, fix := range table { t.Run(fix.name, func(t *testing.T) { fix.mut() got := rd.getApplicableConstraints(params.stdLibFn) if !reflect.DeepEqual(fix.result, got) { t.Errorf("unexpected applicable constraint set:\n\t(GOT): %+v\n\t(WNT): %+v", got, fix.result) } }) } }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/bridge.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "fmt" "os" "path/filepath" "sync/atomic" "github.com/golang/dep/gps/pkgtree" ) // sourceBridge is an adapter to SourceManagers that tailor operations for a // single solve run. type sourceBridge interface { // sourceBridge includes many methods from the SourceManager interface. SourceExists(ProjectIdentifier) (bool, error) SyncSourceFor(ProjectIdentifier) error RevisionPresentIn(ProjectIdentifier, Revision) (bool, error) ListPackages(ProjectIdentifier, Version) (pkgtree.PackageTree, error) GetManifestAndLock(ProjectIdentifier, Version, ProjectAnalyzer) (Manifest, Lock, error) ExportProject(ProjectIdentifier, Version, string) error DeduceProjectRoot(ip string) (ProjectRoot, error) listVersions(ProjectIdentifier) ([]Version, error) verifyRootDir(path string) error vendorCodeExists(ProjectIdentifier) (bool, error) breakLock() } // bridge is an adapter around a proper SourceManager. It provides localized // caching that's tailored to the requirements of a particular solve run. // // Finally, it provides authoritative version/constraint operations, ensuring // that any possible approach to a match - even those not literally encoded in // the inputs - is achieved. type bridge struct { // The underlying, adapted-to SourceManager sm SourceManager // The solver which we're assisting. // // The link between solver and bridge is circular, which is typically a bit // awkward, but the bridge needs access to so many of the input arguments // held by the solver that it ends up being easier and saner to do this. s *solver // Map of project root name to their available version list. This cache is // layered on top of the proper SourceManager's cache; the only difference // is that this keeps the versions sorted in the direction required by the // current solve run. vlists map[ProjectIdentifier][]Version // Indicates whether lock breaking has already been run lockbroken int32 // Whether to sort version lists for downgrade. down bool // The cancellation context provided to the solver. Threading it through the // various solver methods is needlessly verbose so long as we maintain the // lifetime guarantees that a solver can only be run once. // TODO(sdboyer) uncomment this and thread it through SourceManager methods //ctx context.Context } // mkBridge creates a bridge func mkBridge(s *solver, sm SourceManager, down bool) *bridge { return &bridge{ sm: sm, s: s, down: down, vlists: make(map[ProjectIdentifier][]Version), } } func (b *bridge) GetManifestAndLock(id ProjectIdentifier, v Version, an ProjectAnalyzer) (Manifest, Lock, error) { if b.s.rd.isRoot(id.ProjectRoot) { return b.s.rd.rm, b.s.rd.rl, nil } b.s.mtr.push("b-gmal") m, l, e := b.sm.GetManifestAndLock(id, v, an) b.s.mtr.pop() return m, l, e } func (b *bridge) listVersions(id ProjectIdentifier) ([]Version, error) { if vl, exists := b.vlists[id]; exists { return vl, nil } b.s.mtr.push("b-list-versions") pvl, err := b.sm.ListVersions(id) if err != nil { b.s.mtr.pop() return nil, err } vl := hidePair(pvl) if b.down { SortForDowngrade(vl) } else { SortForUpgrade(vl) } b.vlists[id] = vl b.s.mtr.pop() return vl, nil } func (b *bridge) RevisionPresentIn(id ProjectIdentifier, r Revision) (bool, error) { b.s.mtr.push("b-rev-present-in") i, e := b.sm.RevisionPresentIn(id, r) b.s.mtr.pop() return i, e } func (b *bridge) SourceExists(id ProjectIdentifier) (bool, error) { b.s.mtr.push("b-source-exists") i, e := b.sm.SourceExists(id) b.s.mtr.pop() return i, e } func (b *bridge) vendorCodeExists(id ProjectIdentifier) (bool, error) { fi, err := os.Stat(filepath.Join(b.s.rd.dir, "vendor", string(id.ProjectRoot))) if err != nil { return false, err } else if fi.IsDir() { return true, nil } return false, nil } // listPackages lists all the packages contained within the given project at a // particular version. // // The root project is handled separately, as the source manager isn't // responsible for that code. func (b *bridge) ListPackages(id ProjectIdentifier, v Version) (pkgtree.PackageTree, error) { if b.s.rd.isRoot(id.ProjectRoot) { return b.s.rd.rpt, nil } b.s.mtr.push("b-list-pkgs") pt, err := b.sm.ListPackages(id, v) b.s.mtr.pop() return pt, err } func (b *bridge) ExportProject(id ProjectIdentifier, v Version, path string) error { panic("bridge should never be used to ExportProject") } // verifyRoot ensures that the provided path to the project root is in good // working condition. This check is made only once, at the beginning of a solve // run. func (b *bridge) verifyRootDir(path string) error { if fi, err := os.Stat(path); err != nil { return badOptsFailure(fmt.Sprintf("could not read project root (%s): %s", path, err)) } else if !fi.IsDir() { return badOptsFailure(fmt.Sprintf("project root (%s) is a file, not a directory", path)) } return nil } func (b *bridge) DeduceProjectRoot(ip string) (ProjectRoot, error) { b.s.mtr.push("b-deduce-proj-root") pr, e := b.sm.DeduceProjectRoot(ip) b.s.mtr.pop() return pr, e } // breakLock is called when the solver has to break a version recorded in the // lock file. It prefetches all the projects in the solver's lock, so that the // information is already on hand if/when the solver needs it. // // Projects that have already been selected are skipped, as it's generally unlikely that the // solver will have to backtrack through and fully populate their version queues. func (b *bridge) breakLock() { // No real conceivable circumstance in which multiple calls are made to // this, but being that this is the entrance point to a bunch of async work, // protect it with an atomic CAS in case things change in the future. // // We avoid using a sync.Once here, as there's no reason for other callers // to block until completion. if !atomic.CompareAndSwapInt32(&b.lockbroken, 0, 1) { return } for _, lp := range b.s.rd.rl.Projects() { if _, is := b.s.sel.selected(lp.Ident()); !is { pi, v := lp.Ident(), lp.Version() go func() { // Sync first b.sm.SyncSourceFor(pi) // Preload the package info for the locked version, too, as // we're more likely to need that b.sm.ListPackages(pi, v) }() } } } func (b *bridge) SyncSourceFor(id ProjectIdentifier) error { // we don't track metrics here b/c this is often called in its own goroutine // by the solver, and the metrics design is for wall time on a single thread return b.sm.SyncSourceFor(id) }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/solve_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "bytes" "context" "fmt" "log" "reflect" "sort" "testing" "github.com/golang/dep/internal/test" ) // overrideMkBridge overrides the base bridge with the depspecBridge that skips // verifyRootDir calls func overrideMkBridge(s *solver, sm SourceManager, down bool) sourceBridge { return &depspecBridge{mkBridge(s, sm, down)} } func fixSolve(params SolveParameters, sm SourceManager, t *testing.T) (Solution, error) { // Trace unconditionally; by passing the trace through t.Log(), the testing // system will decide whether or not to actually show the output (based on // -v, or selectively on test failure). params.TraceLogger = log.New(test.Writer{TB: t}, "", 0) // always return false, otherwise it would identify pretty much all of // our fixtures as being stdlib and skip everything params.stdLibFn = func(string) bool { return false } params.mkBridgeFn = overrideMkBridge s, err := Prepare(params, sm) if err != nil { return nil, err } return s.Solve(context.Background()) } // Test all the basic table fixtures. // // Or, just the one named in the fix arg. func TestBasicSolves(t *testing.T) { // sort them by their keys so we get stable output names := make([]string, 0, len(basicFixtures)) for n := range basicFixtures { names = append(names, n) } sort.Strings(names) for _, n := range names { n := n t.Run(n, func(t *testing.T) { t.Parallel() solveBasicsAndCheck(basicFixtures[n], t) }) } } func solveBasicsAndCheck(fix basicFixture, t *testing.T) (res Solution, err error) { sm := newdepspecSM(fix.ds, nil) if fix.broken != "" { t.Skip(fix.broken) } params := SolveParameters{ RootDir: string(fix.ds[0].n), RootPackageTree: fix.rootTree(), Manifest: fix.rootmanifest(), Lock: dummyLock{}, Downgrade: fix.downgrade, ChangeAll: fix.changeall, ToChange: fix.changelist, ProjectAnalyzer: naiveAnalyzer{}, } if fix.l != nil { params.Lock = fix.l } res, err = fixSolve(params, sm, t) return fixtureSolveSimpleChecks(fix, res, err, t) } // Test all the bimodal table fixtures. // // Or, just the one named in the fix arg. func TestBimodalSolves(t *testing.T) { // sort them by their keys so we get stable output names := make([]string, 0, len(bimodalFixtures)) for n := range bimodalFixtures { names = append(names, n) } sort.Strings(names) for _, n := range names { n := n t.Run(n, func(t *testing.T) { t.Parallel() solveBimodalAndCheck(bimodalFixtures[n], t) }) } } func solveBimodalAndCheck(fix bimodalFixture, t *testing.T) (res Solution, err error) { sm := newbmSM(fix) if fix.broken != "" { t.Skip(fix.broken) } params := SolveParameters{ RootDir: string(fix.ds[0].n), RootPackageTree: fix.rootTree(), Manifest: fix.rootmanifest(), Lock: dummyLock{}, Downgrade: fix.downgrade, ChangeAll: fix.changeall, ProjectAnalyzer: naiveAnalyzer{}, } if fix.l != nil { params.Lock = fix.l } res, err = fixSolve(params, sm, t) return fixtureSolveSimpleChecks(fix, res, err, t) } func fixtureSolveSimpleChecks(fix specfix, soln Solution, err error, t *testing.T) (Solution, error) { ppi := func(id ProjectIdentifier) string { // need this so we can clearly tell if there's a Source or not if id.Source == "" { return string(id.ProjectRoot) } return fmt.Sprintf("%s (from %s)", id.ProjectRoot, id.Source) } pv := func(v Version) string { if pv, ok := v.(PairedVersion); ok { return fmt.Sprintf("%s (%s)", pv.Unpair(), pv.Revision()) } return v.String() } fixfail := fix.failure() if err != nil { if fixfail == nil { t.Errorf("Solve failed unexpectedly:\n%s", err) } else if fixfail.Error() != err.Error() { // TODO(sdboyer) reflect.DeepEqual works for now, but once we start // modeling more complex cases, this should probably become more robust t.Errorf("Failure mismatch:\n\t(GOT): %s\n\t(WNT): %s", err, fixfail) } } else if fixfail != nil { var buf bytes.Buffer fmt.Fprintf(&buf, "Solver succeeded, but expecting failure:\n%s\nProjects in solution:", fixfail) for _, p := range soln.Projects() { fmt.Fprintf(&buf, "\n\t- %s at %s", ppi(p.Ident()), p.Version()) } t.Error(buf.String()) } else { r := soln.(solution) if fix.maxTries() > 0 && r.Attempts() > fix.maxTries() { t.Errorf("Solver completed in %v attempts, but expected %v or fewer", r.att, fix.maxTries()) } // Dump result projects into a map for easier interrogation rp := make(map[ProjectIdentifier]LockedProject) for _, lp := range r.p { rp[lp.Ident()] = lp } fixlen, rlen := len(fix.solution()), len(rp) if fixlen != rlen { // Different length, so they definitely disagree t.Errorf("Solver reported %v package results, result expected %v", rlen, fixlen) } // Whether or not len is same, still have to verify that results agree // Walk through fixture/expected results first for id, flp := range fix.solution() { if lp, exists := rp[id]; !exists { t.Errorf("Project %q expected but missing from results", ppi(id)) } else { // delete result from map so we skip it on the reverse pass delete(rp, id) if flp.Version() != lp.Version() { t.Errorf("Expected version %q of project %q, but actual version was %q", pv(flp.Version()), ppi(id), pv(lp.Version())) } if !reflect.DeepEqual(lp.Packages(), flp.Packages()) { t.Errorf("Package list was not not as expected for project %s@%s:\n\t(GOT) %s\n\t(WNT) %s", ppi(id), pv(lp.Version()), lp.Packages(), flp.Packages()) } } } // Now walk through remaining actual results for id, lp := range rp { if _, exists := fix.solution()[id]; !exists { t.Errorf("Unexpected project %s@%s present in results, with pkgs:\n\t%s", ppi(id), pv(lp.Version()), lp.Packages()) } } } return soln, err } // This tests that, when a root lock is underspecified (has only a version) we // don't allow a match on that version from a rev in the manifest. We may allow // this in the future, but disallow it for now because going from an immutable // requirement to a mutable lock automagically is a bad direction that could // produce weird side effects. func TestRootLockNoVersionPairMatching(t *testing.T) { fix := basicFixture{ n: "does not match unpaired lock versions with paired real versions", ds: []depspec{ mkDepspec("root 0.0.0", "foo *"), // foo's constraint rewritten below to foorev mkDepspec("foo 1.0.0", "bar 1.0.0"), mkDepspec("foo 1.0.1 foorev", "bar 1.0.1"), mkDepspec("foo 1.0.2 foorev", "bar 1.0.2"), mkDepspec("bar 1.0.0"), mkDepspec("bar 1.0.1"), mkDepspec("bar 1.0.2"), }, l: mklock( "foo 1.0.1", ), r: mksolution( "foo 1.0.2 foorev", "bar 1.0.2", ), } pd := fix.ds[0].deps[0] pd.Constraint = Revision("foorev") fix.ds[0].deps[0] = pd sm := newdepspecSM(fix.ds, nil) l2 := make(fixLock, 1) copy(l2, fix.l) l2lp := l2[0].(lockedProject) l2lp.v = nil l2[0] = l2lp params := SolveParameters{ RootDir: string(fix.ds[0].n), RootPackageTree: fix.rootTree(), Manifest: fix.rootmanifest(), Lock: l2, ProjectAnalyzer: naiveAnalyzer{}, } res, err := fixSolve(params, sm, t) fixtureSolveSimpleChecks(fix, res, err, t) }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/cmd_unix.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build !windows package gps import ( "bytes" "context" "os" "os/exec" "syscall" "time" "github.com/pkg/errors" "golang.org/x/sys/unix" ) type cmd struct { // ctx is provided by the caller; SIGINT is sent when it is cancelled. ctx context.Context Cmd *exec.Cmd } func commandContext(ctx context.Context, name string, arg ...string) cmd { c := exec.Command(name, arg...) // Force subprocesses into their own process group, rather than being in the // same process group as the dep process. Because Ctrl-C sent from a // terminal will send the signal to the entire currently running process // group, this allows us to directly manage the issuance of signals to // subprocesses. c.SysProcAttr = &syscall.SysProcAttr{ Setpgid: true, Pgid: 0, } return cmd{ctx: ctx, Cmd: c} } // CombinedOutput is like (*os/exec.Cmd).CombinedOutput except that it // terminates subprocesses gently (via os.Interrupt), but resorts to Kill if // the subprocess fails to exit after 1 minute. func (c cmd) CombinedOutput() ([]byte, error) { // Adapted from (*os/exec.Cmd).CombinedOutput if c.Cmd.Stdout != nil { return nil, errors.New("exec: Stdout already set") } if c.Cmd.Stderr != nil { return nil, errors.New("exec: Stderr already set") } var b bytes.Buffer c.Cmd.Stdout = &b c.Cmd.Stderr = &b if err := c.Cmd.Start(); err != nil { return nil, err } // Adapted from (*os/exec.Cmd).Start waitDone := make(chan struct{}) defer close(waitDone) go func() { select { case <-c.ctx.Done(): if err := c.Cmd.Process.Signal(os.Interrupt); err != nil { // If an error comes back from attempting to signal, proceed // immediately to hard kill. _ = unix.Kill(-c.Cmd.Process.Pid, syscall.SIGKILL) } else { defer time.AfterFunc(time.Minute, func() { _ = unix.Kill(-c.Cmd.Process.Pid, syscall.SIGKILL) }).Stop() <-waitDone } case <-waitDone: } }() err := c.Cmd.Wait() return b.Bytes(), err }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/lock_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "reflect" "sort" "testing" ) func TestLockedProjectSorting(t *testing.T) { // version doesn't matter here lps := []LockedProject{ NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0"), nil), NewLockedProject(mkPI("foo"), NewVersion("nada"), nil), NewLockedProject(mkPI("bar"), NewVersion("zip"), nil), NewLockedProject(mkPI("qux"), NewVersion("zilch"), nil), } lps2 := make([]LockedProject, len(lps)) copy(lps2, lps) sort.SliceStable(lps2, func(i, j int) bool { return lps2[i].Ident().Less(lps2[j].Ident()) }) // only the two should have switched positions lps[0], lps[2] = lps[2], lps[0] if !reflect.DeepEqual(lps, lps2) { t.Errorf("SortLockedProject did not sort as expected:\n\t(GOT) %s\n\t(WNT) %s", lps2, lps) } } func TestLockedProjectsEq(t *testing.T) { lps := []LockedProject{ NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0").Pair("REV"), []string{"gps"}), NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0").Pair("REV"), nil), NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0").Pair("REV"), []string{"gps", "flugle"}), NewLockedProject(mkPI("foo"), NewVersion("nada").Pair("OTHERREV"), []string{"foo"}), NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0").Pair("REV"), []string{"flugle", "gps"}), NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0").Pair("REV2"), []string{"gps"}), NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.11.0").Pair("REV"), []string{"gps"}), NewLockedProject(mkPI("github.com/sdboyer/gps"), Revision("REV2"), []string{"gps"}), } fix := map[string]struct { l1, l2 int shouldeq bool err string }{ "with self": {0, 0, true, "lp does not eq self"}, "with different revision": {0, 5, false, "should not eq with different rev"}, "with different versions": {0, 6, false, "should not eq with different version"}, "with same revsion": {5, 5, true, "should eq with same rev"}, "with empty pkg": {0, 1, false, "should not eq when other pkg list is empty"}, "with long pkg list": {0, 2, false, "should not eq when other pkg list is longer"}, "with different orders": {2, 4, false, "should not eq when pkg lists are out of order"}, "with different lp": {0, 3, false, "should not eq totally different lp"}, "with only rev": {7, 7, true, "should eq with only rev"}, "when only rev matches": {5, 7, false, "should not eq when only rev matches"}, } for k, f := range fix { k, f := k, f t.Run(k, func(t *testing.T) { if f.shouldeq { if !lps[f.l1].Eq(lps[f.l2]) { t.Error(f.err) } if !lps[f.l2].Eq(lps[f.l1]) { t.Error(f.err + (" (reversed)")) } } else { if lps[f.l1].Eq(lps[f.l2]) { t.Error(f.err) } if lps[f.l2].Eq(lps[f.l1]) { t.Error(f.err + (" (reversed)")) } } }) } } func TestLockedProjectsString(t *testing.T) { tt := []struct { name string lp LockedProject want string }{ { name: "full info", lp: NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0"), []string{"gps"}), want: "github.com/sdboyer/gps@v0.10.0 with packages: [gps]", }, { name: "empty package list", lp: NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0"), []string{}), want: "github.com/sdboyer/gps@v0.10.0 with packages: []", }, { name: "nil package", lp: NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0"), nil), want: "github.com/sdboyer/gps@v0.10.0 with packages: []", }, { name: "with source", lp: NewLockedProject( ProjectIdentifier{ProjectRoot: "github.com/sdboyer/gps", Source: "github.com/another/repo"}, NewVersion("v0.10.0"), []string{"."}), want: "github.com/sdboyer/gps (from github.com/another/repo)@v0.10.0 with packages: [.]", }, { name: "version pair", lp: NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0").Pair("278a227dfc3d595a33a77ff3f841fd8ca1bc8cd0"), []string{"gps"}), want: "github.com/sdboyer/gps@v0.10.0 with packages: [gps]", }, { name: "revision only", lp: NewLockedProject(mkPI("github.com/sdboyer/gps"), Revision("278a227dfc3d595a33a77ff3f841fd8ca1bc8cd0"), []string{"gps"}), want: "github.com/sdboyer/gps@278a227dfc3d595a33a77ff3f841fd8ca1bc8cd0 with packages: [gps]", }, } for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { s := tc.lp.String() if tc.want != s { t.Fatalf("want %s, got %s", tc.want, s) } }) } }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/source_manager.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "context" "fmt" "io/ioutil" "log" "net/url" "os" "os/signal" "path/filepath" "runtime" "strings" "sync" "sync/atomic" "time" "github.com/golang/dep/gps/pkgtree" "github.com/golang/dep/internal/fs" "github.com/nightlyone/lockfile" "github.com/pkg/errors" "github.com/sdboyer/constext" ) // Used to compute a friendly filepath from a URL-shaped input. var sanitizer = strings.NewReplacer("-", "--", ":", "-", "/", "-", "+", "-") // A locker is responsible for preventing multiple instances of dep from // interfering with one-another. // // Currently, anything that can either TryLock(), Unlock(), or GetOwner() // satisfies that need. type locker interface { TryLock() error Unlock() error GetOwner() (*os.Process, error) } // A falselocker adheres to the locker interface and its purpose is to quietly // fail to lock when the DEPNOLOCK environment variable is set. // // This allows dep to run on systems where file locking doesn't work -- // particularly those that use union mount type filesystems that don't // implement hard links or fnctl() style locking. type falseLocker struct{} // Always returns an error to indicate there's no current ower PID for our // lock. func (fl falseLocker) GetOwner() (*os.Process, error) { return nil, fmt.Errorf("falseLocker always fails") } // Does nothing and returns a nil error so caller believes locking succeeded. func (fl falseLocker) TryLock() error { return nil } // Does nothing and returns a nil error so caller believes unlocking succeeded. func (fl falseLocker) Unlock() error { return nil } // A SourceManager is responsible for retrieving, managing, and interrogating // source repositories. Its primary purpose is to serve the needs of a Solver, // but it is handy for other purposes, as well. // // gps's built-in SourceManager, SourceMgr, is intended to be generic and // sufficient for any purpose. It provides some additional semantics around the // methods defined here. type SourceManager interface { // SourceExists checks if a repository exists, either upstream or in the // SourceManager's central repository cache. SourceExists(ProjectIdentifier) (bool, error) // SyncSourceFor will attempt to bring all local information about a source // fully up to date. SyncSourceFor(ProjectIdentifier) error // ListVersions retrieves a list of the available versions for a given // repository name. ListVersions(ProjectIdentifier) ([]PairedVersion, error) // RevisionPresentIn indicates whether the provided Version is present in // the given repository. RevisionPresentIn(ProjectIdentifier, Revision) (bool, error) // ListPackages parses the tree of the Go packages at or below root of the // provided ProjectIdentifier, at the provided version. ListPackages(ProjectIdentifier, Version) (pkgtree.PackageTree, error) // GetManifestAndLock returns manifest and lock information for the provided // root import path. // // gps currently requires that projects be rooted at their repository root, // necessitating that the ProjectIdentifier's ProjectRoot must also be a // repository root. GetManifestAndLock(ProjectIdentifier, Version, ProjectAnalyzer) (Manifest, Lock, error) // ExportProject writes out the tree of the provided import path, at the // provided version, to the provided directory. ExportProject(context.Context, ProjectIdentifier, Version, string) error // ExportPrunedProject writes out the tree corresponding to the provided // LockedProject, the provided version, to the provided directory, applying // the provided pruning options. // // The first return value is the hex-encoded string representation of the // hash, including colon-separated leaders indicating the version of the // hashing function used, and the prune options that were applied. ExportPrunedProject(context.Context, LockedProject, PruneOptions, string) error // DeduceProjectRoot takes an import path and deduces the corresponding // project/source root. DeduceProjectRoot(ip string) (ProjectRoot, error) // SourceURLsForPath takes an import path and deduces the set of source URLs // that may refer to a canonical upstream source. // In general, these URLs differ only by protocol (e.g. https vs. ssh), not path SourceURLsForPath(ip string) ([]*url.URL, error) // Release lets go of any locks held by the SourceManager. Once called, it // is no longer allowed to call methods of that SourceManager; all // method calls will immediately result in errors. Release() // InferConstraint tries to puzzle out what kind of version is given in a string - // semver, a revision, or as a fallback, a plain tag InferConstraint(s string, pi ProjectIdentifier) (Constraint, error) } // A ProjectAnalyzer is responsible for analyzing a given path for Manifest and // Lock information. Tools relying on gps must implement one. type ProjectAnalyzer interface { // Perform analysis of the filesystem tree rooted at path, with the // root import path importRoot, to determine the project's constraints, as // indicated by a Manifest and Lock. // // Note that an error will typically cause the solver to treat the analyzed // version as unusable. As such, an error should generally only be returned // if the code tree is somehow malformed, but not if the implementor's // expected files containing Manifest and Lock data are merely absent. DeriveManifestAndLock(path string, importRoot ProjectRoot) (Manifest, Lock, error) // Info reports this project analyzer's info. Info() ProjectAnalyzerInfo } // ProjectAnalyzerInfo indicates a ProjectAnalyzer's name and version. type ProjectAnalyzerInfo struct { Name string Version int } // String returns a string like: "<name>.<decimal version>" func (p ProjectAnalyzerInfo) String() string { return fmt.Sprintf("%s.%d", p.Name, p.Version) } // SourceMgr is the default SourceManager for gps. // // There's no (planned) reason why it would need to be reimplemented by other // tools; control via dependency injection is intended to be sufficient. type SourceMgr struct { cachedir string // path to root of cache dir lf locker // handle for the sm lock file on disk suprvsr *supervisor // subsystem that supervises running calls/io cancelAll context.CancelFunc // cancel func to kill all running work deduceCoord *deductionCoordinator // subsystem that manages import path deduction srcCoord *sourceCoordinator // subsystem that manages sources sigmut sync.Mutex // mutex protecting signal handling setup/teardown qch chan struct{} // quit chan for signal handler relonce sync.Once // once-er to ensure we only release once releasing int32 // flag indicating release of sm has begun } var _ SourceManager = &SourceMgr{} // ErrSourceManagerIsReleased is the error returned by any SourceManager method // called after the SourceManager has been released, rendering its methods no // longer safe to call. var ErrSourceManagerIsReleased = fmt.Errorf("this SourceManager has been released, its methods can no longer be called") // SourceManagerConfig holds configuration information for creating SourceMgrs. type SourceManagerConfig struct { CacheAge time.Duration // Maximum valid age of cached data. <=0: Don't cache. Cachedir string // Where to store local instances of upstream sources. Logger *log.Logger // Optional info/warn logger. Discards if nil. DisableLocking bool // True if the SourceManager should NOT use a lock file to protect the Cachedir from multiple processes. } // NewSourceManager produces an instance of gps's built-in SourceManager. // // The returned SourceManager aggressively caches information wherever possible. // If tools need to do preliminary work involving upstream repository analysis // prior to invoking a solve run, it is recommended that they create this // SourceManager as early as possible and use it to their ends. That way, the // solver can benefit from any caches that may have already been warmed. // // A cacheEpoch is calculated from now()-cacheAge, and older persistent cache data // is discarded. When cacheAge is <= 0, the persistent cache is // not used. // // gps's SourceManager is intended to be threadsafe (if it's not, please file a // bug!). It should be safe to reuse across concurrent solving runs, even on // unrelated projects. func NewSourceManager(c SourceManagerConfig) (*SourceMgr, error) { if c.Logger == nil { c.Logger = log.New(ioutil.Discard, "", 0) } err := fs.EnsureDir(filepath.Join(c.Cachedir, "sources"), 0777) if err != nil { return nil, err } // Fix for #820 // // Consult https://godoc.org/github.com/nightlyone/lockfile for the lockfile // behaviour. It's magic. It deals with stale processes, and if there is // a process keeping the lock busy, it will pass back a temporary error that // we can spin on. glpath := filepath.Join(c.Cachedir, "sm.lock") lockfile, err := func() (locker, error) { if c.DisableLocking { return falseLocker{}, nil } return lockfile.New(glpath) }() if err != nil { return nil, CouldNotCreateLockError{ Path: glpath, Err: errors.Wrapf(err, "unable to create lock %s", glpath), } } process, err := lockfile.GetOwner() if err == nil { // If we didn't get an error, then the lockfile exists already. We should // check to see if it's us already: if process.Pid == os.Getpid() { return nil, CouldNotCreateLockError{ Path: glpath, Err: fmt.Errorf("lockfile %s already locked by this process", glpath), } } // There is a lockfile, but it's owned by someone else. We'll try to lock // it anyway. } // If it's a TemporaryError, we retry every second. Otherwise, we fail // permanently. // // TODO: #534 needs to be implemented to provide a better way to log warnings, // but until then we will just use stderr. // Implicit Time of 0. var lasttime time.Time err = lockfile.TryLock() for err != nil { nowtime := time.Now() duration := nowtime.Sub(lasttime) // The first time this is evaluated, duration will be very large as lasttime is 0. // Unless time travel is invented and someone travels back to the year 1, we should // be ok. if duration > 15*time.Second { fmt.Fprintf(os.Stderr, "waiting for lockfile %s: %s\n", glpath, err.Error()) lasttime = nowtime } if t, ok := err.(interface { Temporary() bool }); ok && t.Temporary() { time.Sleep(time.Second * 1) } else { return nil, CouldNotCreateLockError{ Path: glpath, Err: errors.Wrapf(err, "unable to lock %s", glpath), } } err = lockfile.TryLock() } ctx, cf := context.WithCancel(context.TODO()) superv := newSupervisor(ctx) deducer := newDeductionCoordinator(superv) var sc sourceCache if c.CacheAge > 0 { // Try to open the BoltDB cache from disk. epoch := time.Now().Add(-c.CacheAge).Unix() boltCache, err := newBoltCache(c.Cachedir, epoch, c.Logger) if err != nil { c.Logger.Println(errors.Wrapf(err, "failed to open persistent cache %q", c.Cachedir)) } else { sc = newMultiCache(memoryCache{}, boltCache) } } sm := &SourceMgr{ cachedir: c.Cachedir, lf: lockfile, suprvsr: superv, cancelAll: cf, deduceCoord: deducer, srcCoord: newSourceCoordinator(superv, deducer, c.Cachedir, sc, c.Logger), qch: make(chan struct{}), } return sm, nil } // Cachedir returns the location of the cache directory. func (sm *SourceMgr) Cachedir() string { return sm.cachedir } // UseDefaultSignalHandling sets up typical os.Interrupt signal handling for a // SourceMgr. func (sm *SourceMgr) UseDefaultSignalHandling() { sigch := make(chan os.Signal, 1) signal.Notify(sigch, os.Interrupt) sm.HandleSignals(sigch) } // HandleSignals sets up logic to handle incoming signals with the goal of // shutting down the SourceMgr safely. // // Calling code must provide the signal channel, and is responsible for calling // signal.Notify() on that channel. // // Successive calls to HandleSignals() will deregister the previous handler and // set up a new one. It is not recommended that the same channel be passed // multiple times to this method. // // SetUpSigHandling() will set up a handler that is appropriate for most // use cases. func (sm *SourceMgr) HandleSignals(sigch chan os.Signal) { sm.sigmut.Lock() // always start by closing the qch, which will lead to any existing signal // handler terminating, and deregistering its sigch. if sm.qch != nil { close(sm.qch) } sm.qch = make(chan struct{}) // Run a new goroutine with the input sigch and the fresh qch go func(sch chan os.Signal, qch <-chan struct{}) { defer signal.Stop(sch) select { case <-sch: // Set up a timer to uninstall the signal handler after three // seconds, so that the user can easily force termination with a // second ctrl-c time.AfterFunc(3*time.Second, func() { signal.Stop(sch) }) if opc := sm.suprvsr.count(); opc > 0 { fmt.Printf("Signal received: waiting for %v ops to complete...\n", opc) } sm.Release() case <-qch: // quit channel triggered - deregister our sigch and return } }(sigch, sm.qch) // Try to ensure handler is blocked in for-select before releasing the mutex runtime.Gosched() sm.sigmut.Unlock() } // StopSignalHandling deregisters any signal handler running on this SourceMgr. // // It's normally not necessary to call this directly; it will be called as // needed by Release(). func (sm *SourceMgr) StopSignalHandling() { sm.sigmut.Lock() if sm.qch != nil { close(sm.qch) sm.qch = nil runtime.Gosched() } sm.sigmut.Unlock() } // CouldNotCreateLockError describe failure modes in which creating a SourceMgr // did not succeed because there was an error while attempting to create the // on-disk lock file. type CouldNotCreateLockError struct { Path string Err error } func (e CouldNotCreateLockError) Error() string { return e.Err.Error() } // Release lets go of any locks held by the SourceManager. Once called, it is no // longer allowed to call methods of that SourceManager; all method calls will // immediately result in errors. func (sm *SourceMgr) Release() { atomic.StoreInt32(&sm.releasing, 1) sm.relonce.Do(func() { // Send the signal to the supervisor to cancel all running calls. sm.cancelAll() sm.suprvsr.wait() // Close the source coordinator. sm.srcCoord.close() // Close the file handle for the lock file and remove it from disk sm.lf.Unlock() os.Remove(filepath.Join(sm.cachedir, "sm.lock")) // Close the qch, if non-nil, so the signal handlers run out. This will // also deregister the sig channel, if any has been set up. if sm.qch != nil { close(sm.qch) } }) } // GetManifestAndLock returns manifest and lock information for the provided // ProjectIdentifier, at the provided Version. The work of producing the // manifest and lock is delegated to the provided ProjectAnalyzer's // DeriveManifestAndLock() method. func (sm *SourceMgr) GetManifestAndLock(id ProjectIdentifier, v Version, an ProjectAnalyzer) (Manifest, Lock, error) { if atomic.LoadInt32(&sm.releasing) == 1 { return nil, nil, ErrSourceManagerIsReleased } srcg, err := sm.srcCoord.getSourceGatewayFor(context.TODO(), id) if err != nil { return nil, nil, err } return srcg.getManifestAndLock(context.TODO(), id.ProjectRoot, v, an) } // ListPackages parses the tree of the Go packages at and below the ProjectRoot // of the given ProjectIdentifier, at the given version. func (sm *SourceMgr) ListPackages(id ProjectIdentifier, v Version) (pkgtree.PackageTree, error) { if atomic.LoadInt32(&sm.releasing) == 1 { return pkgtree.PackageTree{}, ErrSourceManagerIsReleased } srcg, err := sm.srcCoord.getSourceGatewayFor(context.TODO(), id) if err != nil { return pkgtree.PackageTree{}, err } return srcg.listPackages(context.TODO(), id.ProjectRoot, v) } // ListVersions retrieves a list of the available versions for a given // repository name. // // The list is not sorted; while it may be returned in the order that the // underlying VCS reports version information, no guarantee is made. It is // expected that the caller either not care about order, or sort the result // themselves. // // This list is always retrieved from upstream on the first call. Subsequent // calls will return a cached version of the first call's results. if upstream // is not accessible (network outage, access issues, or the resource actually // went away), an error will be returned. func (sm *SourceMgr) ListVersions(id ProjectIdentifier) ([]PairedVersion, error) { if atomic.LoadInt32(&sm.releasing) == 1 { return nil, ErrSourceManagerIsReleased } srcg, err := sm.srcCoord.getSourceGatewayFor(context.TODO(), id) if err != nil { // TODO(sdboyer) More-er proper-er errors return nil, err } return srcg.listVersions(context.TODO()) } // RevisionPresentIn indicates whether the provided Revision is present in the given // repository. func (sm *SourceMgr) RevisionPresentIn(id ProjectIdentifier, r Revision) (bool, error) { if atomic.LoadInt32(&sm.releasing) == 1 { return false, ErrSourceManagerIsReleased } srcg, err := sm.srcCoord.getSourceGatewayFor(context.TODO(), id) if err != nil { // TODO(sdboyer) More-er proper-er errors return false, err } return srcg.revisionPresentIn(context.TODO(), r) } // SourceExists checks if a repository exists, either upstream or in the cache, // for the provided ProjectIdentifier. func (sm *SourceMgr) SourceExists(id ProjectIdentifier) (bool, error) { if atomic.LoadInt32(&sm.releasing) == 1 { return false, ErrSourceManagerIsReleased } srcg, err := sm.srcCoord.getSourceGatewayFor(context.TODO(), id) if err != nil { return false, err } ctx := context.TODO() if err := srcg.existsInCache(ctx); err == nil { return true, nil } if err := srcg.existsUpstream(ctx); err != nil { return false, err } return true, nil } // SyncSourceFor will ensure that all local caches and information about a // source are up to date with any network-acccesible information. // // The primary use case for this is prefetching. func (sm *SourceMgr) SyncSourceFor(id ProjectIdentifier) error { if atomic.LoadInt32(&sm.releasing) == 1 { return ErrSourceManagerIsReleased } srcg, err := sm.srcCoord.getSourceGatewayFor(context.TODO(), id) if err != nil { return err } return srcg.syncLocal(context.TODO()) } // ExportProject writes out the tree of the provided ProjectIdentifier's // ProjectRoot, at the provided version, to the provided directory. func (sm *SourceMgr) ExportProject(ctx context.Context, id ProjectIdentifier, v Version, to string) error { if atomic.LoadInt32(&sm.releasing) == 1 { return ErrSourceManagerIsReleased } srcg, err := sm.srcCoord.getSourceGatewayFor(ctx, id) if err != nil { return err } return srcg.exportVersionTo(ctx, v, to) } // ExportPrunedProject writes out a tree of the provided LockedProject, applying // provided pruning rules as appropriate. func (sm *SourceMgr) ExportPrunedProject(ctx context.Context, lp LockedProject, prune PruneOptions, to string) error { if atomic.LoadInt32(&sm.releasing) == 1 { return ErrSourceManagerIsReleased } srcg, err := sm.srcCoord.getSourceGatewayFor(ctx, lp.Ident()) if err != nil { return err } return srcg.exportPrunedVersionTo(ctx, lp, prune, to) } // DeduceProjectRoot takes an import path and deduces the corresponding // project/source root. // // Note that some import paths may require network activity to correctly // determine the root of the path, such as, but not limited to, vanity import // paths. (A special exception is written for gopkg.in to minimize network // activity, as its behavior is well-structured) func (sm *SourceMgr) DeduceProjectRoot(ip string) (ProjectRoot, error) { if atomic.LoadInt32(&sm.releasing) == 1 { return "", ErrSourceManagerIsReleased } // TODO(sdboyer) refactor deduceRootPath() so that this validation can move // back down below a cache point, rather than executing on every call. if !pathvld.MatchString(ip) { return "", errors.Errorf("%q is not a valid import path", ip) } pd, err := sm.deduceCoord.deduceRootPath(context.TODO(), ip) return ProjectRoot(pd.root), err } // InferConstraint tries to puzzle out what kind of version is given in a // string. Preference is given first for branches, then semver constraints, then // plain tags, and then revisions. func (sm *SourceMgr) InferConstraint(s string, pi ProjectIdentifier) (Constraint, error) { if s == "" { return Any(), nil } // Lookup the string in the repository var version PairedVersion versions, err := sm.ListVersions(pi) if err != nil { return nil, errors.Wrapf(err, "list versions for %s", pi) // means repo does not exist } SortPairedForUpgrade(versions) for _, v := range versions { if s == v.String() { version = v break } } // Branch if version != nil && version.Type() == IsBranch { return version.Unpair(), nil } // Semver Constraint c, err := NewSemverConstraintIC(s) if c != nil && err == nil { return c, nil } // Tag if version != nil { return version.Unpair(), nil } // Revision, possibly abbreviated r, err := sm.disambiguateRevision(context.TODO(), pi, Revision(s)) if err == nil { return r, nil } return nil, errors.Errorf("%s is not a valid version for the package %s(%s)", s, pi.ProjectRoot, pi.Source) } // SourceURLsForPath takes an import path and deduces the set of source URLs // that may refer to a canonical upstream source. // In general, these URLs differ only by protocol (e.g. https vs. ssh), not path func (sm *SourceMgr) SourceURLsForPath(ip string) ([]*url.URL, error) { deduced, err := sm.deduceCoord.deduceRootPath(context.TODO(), ip) if err != nil { return nil, err } return deduced.mb.possibleURLs(), nil } // disambiguateRevision looks up a revision in the underlying source, spitting // it back out in an unabbreviated, disambiguated form. // // For example, if pi refers to a git-based project, then rev could be an // abbreviated git commit hash. disambiguateRevision would return the complete // hash. func (sm *SourceMgr) disambiguateRevision(ctx context.Context, pi ProjectIdentifier, rev Revision) (Revision, error) { srcg, err := sm.srcCoord.getSourceGatewayFor(context.TODO(), pi) if err != nil { return "", err } return srcg.disambiguateRevision(ctx, rev) } type timeCount struct { count int start time.Time } type durCount struct { count int dur time.Duration } type supervisor struct { ctx context.Context mu sync.Mutex // Guards all maps cond sync.Cond // Wraps mu so callers can wait until all calls end running map[callInfo]timeCount ran map[callType]durCount } func newSupervisor(ctx context.Context) *supervisor { supv := &supervisor{ ctx: ctx, running: make(map[callInfo]timeCount), ran: make(map[callType]durCount), } supv.cond = sync.Cond{L: &supv.mu} return supv } // do executes the incoming closure using a conjoined context, and keeps // counters to ensure the sourceMgr can't finish Release()ing until after all // calls have returned. func (sup *supervisor) do(inctx context.Context, name string, typ callType, f func(context.Context) error) error { ci := callInfo{ name: name, typ: typ, } octx, err := sup.start(ci) if err != nil { return err } cctx, cancelFunc := constext.Cons(inctx, octx) err = f(cctx) sup.done(ci) cancelFunc() return err } func (sup *supervisor) start(ci callInfo) (context.Context, error) { sup.mu.Lock() defer sup.mu.Unlock() if err := sup.ctx.Err(); err != nil { // We've already been canceled; error out. return nil, err } if existingInfo, has := sup.running[ci]; has { existingInfo.count++ sup.running[ci] = existingInfo } else { sup.running[ci] = timeCount{ count: 1, start: time.Now(), } } return sup.ctx, nil } func (sup *supervisor) count() int { sup.mu.Lock() defer sup.mu.Unlock() return len(sup.running) } func (sup *supervisor) done(ci callInfo) { sup.mu.Lock() existingInfo, has := sup.running[ci] if !has { panic(fmt.Sprintf("sourceMgr: tried to complete a call that had not registered via run()")) } if existingInfo.count > 1 { // If more than one is pending, don't stop the clock yet. existingInfo.count-- sup.running[ci] = existingInfo } else { // Last one for this particular key; update metrics with info. durCnt := sup.ran[ci.typ] durCnt.count++ durCnt.dur += time.Since(existingInfo.start) sup.ran[ci.typ] = durCnt delete(sup.running, ci) if len(sup.running) == 0 { // This is the only place where we signal the cond, as it's the only // time that the number of running calls could become zero. sup.cond.Signal() } } sup.mu.Unlock() } // wait until all active calls have terminated. // // Assumes something else has already canceled the supervisor via its context. func (sup *supervisor) wait() { sup.cond.L.Lock() for len(sup.running) > 0 { sup.cond.Wait() } sup.cond.L.Unlock() } type callType uint const ( ctHTTPMetadata callType = iota ctListVersions ctGetManifestAndLock ctListPackages ctSourcePing ctSourceInit ctSourceFetch ctExportTree ctValidateLocal ) func (ct callType) String() string { switch ct { case ctHTTPMetadata: return "Retrieving go get metadata" case ctListVersions: return "Retrieving latest version list" case ctGetManifestAndLock: return "Reading manifest and lock data" case ctListPackages: return "Parsing PackageTree" case ctSourcePing: return "Checking for upstream existence" case ctSourceInit: return "Initializing local source cache" case ctSourceFetch: return "Fetching latest data into local source cache" case ctExportTree: return "Writing code tree out to disk" default: panic("unknown calltype") } } // callInfo provides metadata about an ongoing call. type callInfo struct { name string typ callType }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/source_cache_multi.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "github.com/golang/dep/gps/pkgtree" ) // multiCache creates singleSourceMultiCaches, and coordinates their async updates. type multiCache struct { mem, disk sourceCache // Asynchronous disk cache updates. Closed by the close method. async chan func() // Closed when async has completed processing. done chan struct{} } // newMultiCache returns a new multiCache backed by mem and disk sourceCaches. // Spawns a single background goroutine which lives until close() is called. func newMultiCache(mem, disk sourceCache) *multiCache { m := &multiCache{ mem: mem, disk: disk, async: make(chan func(), 50), done: make(chan struct{}), } go m.processAsync() return m } func (c *multiCache) processAsync() { for f := range c.async { f() } close(c.done) } // close releases resources after blocking until async writes complete. func (c *multiCache) close() error { close(c.async) _ = c.mem.close() <-c.done return c.disk.close() } // newSingleSourceCache returns a singleSourceMultiCache for id. func (c *multiCache) newSingleSourceCache(id ProjectIdentifier) singleSourceCache { return &singleSourceMultiCache{ mem: c.mem.newSingleSourceCache(id), disk: c.disk.newSingleSourceCache(id), async: c.async, } } // singleSourceMultiCache manages two cache levels, ephemeral in-memory and persistent on-disk. // // The in-memory cache is always checked first, with the on-disk used as a fallback. // Values read from disk are set in-memory when an appropriate method exists. // // Set values are cached both in-memory and on-disk. Values are set synchronously // in-memory. Writes to the on-disk cache are asynchronous, and executed in order by a // background goroutine. type singleSourceMultiCache struct { mem, disk singleSourceCache // Asynchronous disk cache updates. async chan<- func() } func (c *singleSourceMultiCache) setManifestAndLock(r Revision, ai ProjectAnalyzerInfo, m Manifest, l Lock) { c.mem.setManifestAndLock(r, ai, m, l) c.async <- func() { c.disk.setManifestAndLock(r, ai, m, l) } } func (c *singleSourceMultiCache) getManifestAndLock(r Revision, ai ProjectAnalyzerInfo) (Manifest, Lock, bool) { m, l, ok := c.mem.getManifestAndLock(r, ai) if ok { return m, l, true } m, l, ok = c.disk.getManifestAndLock(r, ai) if ok { c.mem.setManifestAndLock(r, ai, m, l) return m, l, true } return nil, nil, false } func (c *singleSourceMultiCache) setPackageTree(r Revision, ptree pkgtree.PackageTree) { c.mem.setPackageTree(r, ptree) c.async <- func() { c.disk.setPackageTree(r, ptree) } } func (c *singleSourceMultiCache) getPackageTree(r Revision, pr ProjectRoot) (pkgtree.PackageTree, bool) { ptree, ok := c.mem.getPackageTree(r, pr) if ok { return ptree, true } ptree, ok = c.disk.getPackageTree(r, pr) if ok { c.mem.setPackageTree(r, ptree) return ptree, true } return pkgtree.PackageTree{}, false } func (c *singleSourceMultiCache) markRevisionExists(r Revision) { c.mem.markRevisionExists(r) c.async <- func() { c.disk.markRevisionExists(r) } } func (c *singleSourceMultiCache) setVersionMap(pvs []PairedVersion) { c.mem.setVersionMap(pvs) c.async <- func() { c.disk.setVersionMap(pvs) } } func (c *singleSourceMultiCache) getVersionsFor(rev Revision) ([]UnpairedVersion, bool) { uvs, ok := c.mem.getVersionsFor(rev) if ok { return uvs, true } return c.disk.getVersionsFor(rev) } func (c *singleSourceMultiCache) getAllVersions() ([]PairedVersion, bool) { pvs, ok := c.mem.getAllVersions() if ok { return pvs, true } pvs, ok = c.disk.getAllVersions() if ok { c.mem.setVersionMap(pvs) return pvs, true } return nil, false } func (c *singleSourceMultiCache) getRevisionFor(uv UnpairedVersion) (Revision, bool) { rev, ok := c.mem.getRevisionFor(uv) if ok { return rev, true } return c.disk.getRevisionFor(uv) } func (c *singleSourceMultiCache) toRevision(v Version) (Revision, bool) { rev, ok := c.mem.toRevision(v) if ok { return rev, true } return c.disk.toRevision(v) } func (c *singleSourceMultiCache) toUnpaired(v Version) (UnpairedVersion, bool) { uv, ok := c.mem.toUnpaired(v) if ok { return uv, true } return c.disk.toUnpaired(v) }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/discovery.go
// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps // This code is taken from cmd/go/discovery.go; it is the logic go get itself // uses to interpret meta imports information. import ( "encoding/xml" "fmt" "io" "strings" ) // charsetReader returns a reader for the given charset. Currently // it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful // error which is printed by go get, so the user can find why the package // wasn't downloaded if the encoding is not supported. Note that, in // order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters // greater than 0x7f are not rejected). func charsetReader(charset string, input io.Reader) (io.Reader, error) { switch strings.ToLower(charset) { case "ascii": return input, nil default: return nil, fmt.Errorf("can't decode XML document using charset %q", charset) } } type metaImport struct { Prefix, VCS, RepoRoot string } // parseMetaGoImports returns meta imports from the HTML in r. // Parsing ends at the end of the <head> section or the beginning of the <body>. // // This copy of cmd/go/internal/vcs.parseMetaGoImports always operates // in IgnoreMod ModuleMode. func parseMetaGoImports(r io.Reader) (imports []metaImport, err error) { d := xml.NewDecoder(r) d.CharsetReader = charsetReader d.Strict = false var t xml.Token for { t, err = d.RawToken() if err != nil { if err == io.EOF || len(imports) > 0 { err = nil } return } if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") { return } if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") { return } e, ok := t.(xml.StartElement) if !ok || !strings.EqualFold(e.Name.Local, "meta") { continue } if attrValue(e.Attr, "name") != "go-import" { continue } if f := strings.Fields(attrValue(e.Attr, "content")); len(f) == 3 { // Ignore VCS type "mod", which is applicable only in module mode. if f[1] == "mod" { continue } imports = append(imports, metaImport{ Prefix: f[0], VCS: f[1], RepoRoot: f[2], }) } } } // attrValue returns the attribute value for the case-insensitive key // `name', or the empty string if nothing is found. func attrValue(attrs []xml.Attr, name string) string { for _, a := range attrs { if strings.EqualFold(a.Name.Local, name) { return a.Value } } return "" }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/filesystem_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "fmt" "os" "path/filepath" "reflect" "testing" "github.com/golang/dep/internal/test" ) // This file contains utilities for running tests around file system state. type fsTestCase struct { before, after filesystemState } // assert makes sure that the tc.after state matches the state of the actual host // file system at tc.after.root. func (tc fsTestCase) assert(t *testing.T) { dirMap := make(map[string]bool) fileMap := make(map[string]bool) linkMap := make(map[string]bool) for _, d := range tc.after.dirs { dirMap[filepath.Join(tc.after.root, d)] = true } for _, f := range tc.after.files { fileMap[filepath.Join(tc.after.root, f)] = true } for _, l := range tc.after.links { linkMap[filepath.Join(tc.after.root, l.path)] = true } err := filepath.Walk(tc.after.root, func(path string, info os.FileInfo, err error) error { if err != nil { t.Errorf("filepath.Walk path=%q err=%q", path, err) return err } if path == tc.after.root { return nil } // Careful! Have to check whether the path is a symlink first because, on // windows, a symlink to a directory will return 'true' for info.IsDir(). if (info.Mode() & os.ModeSymlink) != 0 { if linkMap[path] { delete(linkMap, path) } else { t.Errorf("unexpected symlink exists %q", path) } return nil } if info.IsDir() { if dirMap[path] { delete(dirMap, path) } else { t.Errorf("unexpected directory exists %q", path) } return nil } if fileMap[path] { delete(fileMap, path) } else { t.Errorf("unexpected file exists %q", path) } return nil }) if err != nil { t.Errorf("filesystem.Walk err=%q", err) } for d := range dirMap { t.Errorf("could not find expected directory %q", d) } for f := range fileMap { t.Errorf("could not find expected file %q", f) } for l := range linkMap { t.Errorf("could not find expected symlink %q", l) } } // setup inflates fs onto the actual host file system at tc.before.root. // It doesn't delete existing files and should be used on empty roots only. func (tc fsTestCase) setup(t *testing.T) { if err := tc.before.setup(); err != nil { t.Fatal(err) } } func TestDeriveFilesystemState(t *testing.T) { testcases := []struct { name string fs fsTestCase }{ { name: "simple-case", fs: fsTestCase{ before: filesystemState{ dirs: []string{ "simple-dir", }, files: []string{ "simple-file", }, }, after: filesystemState{ dirs: []string{ "simple-dir", }, files: []string{ "simple-file", }, }, }, }, { name: "simple-symlink-case", fs: fsTestCase{ before: filesystemState{ dirs: []string{ "simple-dir", }, files: []string{ "simple-file", }, links: []fsLink{ { path: "link", to: "nonexisting", broken: true, }, }, }, after: filesystemState{ dirs: []string{ "simple-dir", }, files: []string{ "simple-file", }, links: []fsLink{ { path: "link", to: "", broken: true, }, }, }, }, }, { name: "complex-symlink-case", fs: fsTestCase{ before: filesystemState{ links: []fsLink{ { path: "link1", to: "link2", circular: true, }, { path: "link2", to: "link1", circular: true, }, }, }, after: filesystemState{ links: []fsLink{ { path: "link1", to: "", circular: true, }, { path: "link2", to: "", circular: true, }, }, }, }, }, } for _, tc := range testcases { h := test.NewHelper(t) h.TempDir(tc.name) tc.fs.before.root = h.Path(tc.name) tc.fs.after.root = h.Path(tc.name) tc.fs.setup(t) state, err := deriveFilesystemState(h.Path(tc.name)) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(tc.fs.after, state) { fmt.Println(tc.fs.after) fmt.Println(state) t.Fatal("filesystem state mismatch") } h.Cleanup() } }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/version.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "fmt" "sort" "github.com/Masterminds/semver" "github.com/golang/dep/gps/internal/pb" ) // VersionType indicates a type for a Version that conveys some additional // semantics beyond that which is literally embedded on the Go type. type VersionType uint8 // VersionTypes for the four major classes of version. const ( IsRevision VersionType = iota IsVersion IsSemver IsBranch ) // Version represents one of the different types of versions used by gps. // // Version composes Constraint, because all versions can be used as a constraint // (where they allow one, and only one, version - themselves), but constraints // are not necessarily discrete versions. // // Version is an interface, but it contains private methods, which restricts it // to gps's own internal implementations. We do this for the confluence of // two reasons: the implementation of Versions is complete (there is no case in // which we'd need other types), and the implementation relies on type magic // under the hood, which would be unsafe to do if other dynamic types could be // hiding behind the interface. type Version interface { Constraint // Indicates the type of version - Revision, Branch, Version, or Semver. Type() VersionType } // PairedVersion represents a normal Version, but paired with its corresponding, // underlying Revision. type PairedVersion interface { Version // Revision returns the immutable Revision that identifies this Version. Revision() Revision // Unpair returns the surface-level UnpairedVersion that half of the pair. // // It does NOT modify the original PairedVersion. Unpair() UnpairedVersion // Ensures it is impossible to be both a PairedVersion and an // UnpairedVersion. _pair(int) } // UnpairedVersion represents a normal Version, with a method for creating a // VersionPair by indicating the version's corresponding, underlying Revision. type UnpairedVersion interface { Version // Pair takes the underlying Revision that this UnpairedVersion corresponds // to and unites them into a PairedVersion. Pair(Revision) PairedVersion // Ensures it is impossible to be both a PairedVersion and an // UnpairedVersion. _pair(bool) } // types are weird func (branchVersion) _pair(bool) {} func (plainVersion) _pair(bool) {} func (semVersion) _pair(bool) {} func (versionPair) _pair(int) {} // NewBranch creates a new Version to represent a floating version (in // general, a branch). func NewBranch(body string) UnpairedVersion { return branchVersion{ name: body, // We always set isDefault to false here, because the property is // specifically designed to be internal-only: only the SourceManager // gets to mark it. This is OK because nothing that client code is // responsible for needs to care about has to touch it it. // // TODO(sdboyer) ...maybe. this just ugly. isDefault: false, } } func newDefaultBranch(body string) UnpairedVersion { return branchVersion{ name: body, isDefault: true, } } // NewVersion creates a Semver-typed Version if the provided version string is // valid semver, and a plain/non-semver version if not. func NewVersion(body string) UnpairedVersion { sv, err := semver.NewVersion(body) if err != nil { return plainVersion(body) } return semVersion{sv: sv} } // A Revision represents an immutable versioning identifier. type Revision string // String converts the Revision back into a string. func (r Revision) String() string { return string(r) } // ImpliedCaretString follows the same rules as String(), but in accordance with // the Constraint interface will always print a leading "=", as all Versions, // when acting as a Constraint, act as exact matches. func (r Revision) ImpliedCaretString() string { return r.String() } func (r Revision) typedString() string { return "r-" + string(r) } // Type indicates the type of version - for revisions, "revision". func (r Revision) Type() VersionType { return IsRevision } // Matches is the Revision acting as a constraint; it checks to see if the provided // version is the same Revision as itself. func (r Revision) Matches(v Version) bool { switch tv := v.(type) { case Revision: return r == tv case versionPair: return r == tv.r } return false } // MatchesAny is the Revision acting as a constraint; it checks to see if the provided // version is the same Revision as itself. func (r Revision) MatchesAny(c Constraint) bool { switch tc := c.(type) { case anyConstraint: return true case noneConstraint: return false case Revision: return r == tc case versionPair: return r == tc.r } return false } // Intersect computes the intersection of the Constraint with the provided // Constraint. For Revisions, this can only be another, exactly equal // Revision, or a PairedVersion whose underlying Revision is exactly equal. func (r Revision) Intersect(c Constraint) Constraint { switch tc := c.(type) { case anyConstraint: return r case noneConstraint: return none case Revision: if r == tc { return r } case versionPair: if r == tc.r { return r } } return none } func (r Revision) identical(c Constraint) bool { r2, ok := c.(Revision) if !ok { return false } return r == r2 } func (r Revision) copyTo(msg *pb.Constraint) { msg.Type = pb.Constraint_Revision msg.Value = string(r) } type branchVersion struct { name string isDefault bool } func (v branchVersion) String() string { return string(v.name) } func (v branchVersion) ImpliedCaretString() string { return v.String() } func (v branchVersion) typedString() string { return fmt.Sprintf("b-%s", v.String()) } func (v branchVersion) Type() VersionType { return IsBranch } func (v branchVersion) Matches(v2 Version) bool { switch tv := v2.(type) { case branchVersion: return v.name == tv.name case versionPair: if tv2, ok := tv.v.(branchVersion); ok { return tv2.name == v.name } } return false } func (v branchVersion) MatchesAny(c Constraint) bool { switch tc := c.(type) { case anyConstraint: return true case noneConstraint: return false case branchVersion: return v.name == tc.name case versionPair: if tc2, ok := tc.v.(branchVersion); ok { return tc2.name == v.name } } return false } func (v branchVersion) Intersect(c Constraint) Constraint { switch tc := c.(type) { case anyConstraint: return v case noneConstraint: return none case branchVersion: if v.name == tc.name { return v } case versionPair: if tc2, ok := tc.v.(branchVersion); ok { if v.name == tc2.name { return v } } } return none } func (v branchVersion) Pair(r Revision) PairedVersion { return versionPair{ v: v, r: r, } } func (v branchVersion) identical(c Constraint) bool { v2, ok := c.(branchVersion) if !ok { return false } return v == v2 } func (v branchVersion) copyTo(msg *pb.Constraint) { if v.isDefault { msg.Type = pb.Constraint_DefaultBranch } else { msg.Type = pb.Constraint_Branch } msg.Value = v.name } type plainVersion string func (v plainVersion) String() string { return string(v) } func (v plainVersion) ImpliedCaretString() string { return v.String() } func (v plainVersion) typedString() string { return fmt.Sprintf("pv-%s", v.String()) } func (v plainVersion) Type() VersionType { return IsVersion } func (v plainVersion) Matches(v2 Version) bool { switch tv := v2.(type) { case plainVersion: return v == tv case versionPair: if tv2, ok := tv.v.(plainVersion); ok { return tv2 == v } } return false } func (v plainVersion) MatchesAny(c Constraint) bool { switch tc := c.(type) { case anyConstraint: return true case noneConstraint: return false case plainVersion: return v == tc case versionPair: if tc2, ok := tc.v.(plainVersion); ok { return tc2 == v } } return false } func (v plainVersion) Intersect(c Constraint) Constraint { switch tc := c.(type) { case anyConstraint: return v case noneConstraint: return none case plainVersion: if v == tc { return v } case versionPair: if tc2, ok := tc.v.(plainVersion); ok { if v == tc2 { return v } } } return none } func (v plainVersion) Pair(r Revision) PairedVersion { return versionPair{ v: v, r: r, } } func (v plainVersion) identical(c Constraint) bool { v2, ok := c.(plainVersion) if !ok { return false } return v == v2 } func (v plainVersion) copyTo(msg *pb.Constraint) { msg.Type = pb.Constraint_Version msg.Value = string(v) } type semVersion struct { sv semver.Version } func (v semVersion) String() string { str := v.sv.Original() if str == "" { str = v.sv.String() } return str } func (v semVersion) ImpliedCaretString() string { return v.sv.ImpliedCaretString() } func (v semVersion) typedString() string { return fmt.Sprintf("sv-%s", v.String()) } func (v semVersion) Type() VersionType { return IsSemver } func (v semVersion) Matches(v2 Version) bool { switch tv := v2.(type) { case semVersion: return v.sv.Equal(tv.sv) case versionPair: if tv2, ok := tv.v.(semVersion); ok { return tv2.sv.Equal(v.sv) } } return false } func (v semVersion) MatchesAny(c Constraint) bool { switch tc := c.(type) { case anyConstraint: return true case noneConstraint: return false case semVersion: return v.sv.Equal(tc.sv) case semverConstraint: return tc.Intersect(v) != none case versionPair: if tc2, ok := tc.v.(semVersion); ok { return tc2.sv.Equal(v.sv) } } return false } func (v semVersion) Intersect(c Constraint) Constraint { switch tc := c.(type) { case anyConstraint: return v case noneConstraint: return none case semVersion: if v.sv.Equal(tc.sv) { return v } case semverConstraint: return tc.Intersect(v) case versionPair: if tc2, ok := tc.v.(semVersion); ok { if v.sv.Equal(tc2.sv) { return v } } } return none } func (v semVersion) Pair(r Revision) PairedVersion { return versionPair{ v: v, r: r, } } func (v semVersion) identical(c Constraint) bool { v2, ok := c.(semVersion) if !ok { return false } return v == v2 } func (v semVersion) copyTo(msg *pb.Constraint) { msg.Type = pb.Constraint_Semver msg.Value = v.String() //TODO better encoding which doesn't require re-parsing } type versionPair struct { v UnpairedVersion r Revision } func (v versionPair) String() string { return v.v.String() } func (v versionPair) ImpliedCaretString() string { return v.v.ImpliedCaretString() } func (v versionPair) typedString() string { return fmt.Sprintf("%s-%s", v.Unpair().typedString(), v.Revision().typedString()) } func (v versionPair) Type() VersionType { return v.v.Type() } func (v versionPair) Revision() Revision { return v.r } func (v versionPair) Unpair() UnpairedVersion { return v.v } func (v versionPair) Matches(v2 Version) bool { switch tv2 := v2.(type) { case versionPair: return v.r == tv2.r case Revision: return v.r == tv2 } switch tv := v.v.(type) { case plainVersion, branchVersion: if tv.Matches(v2) { return true } case semVersion: if tv2, ok := v2.(semVersion); ok { if tv.sv.Equal(tv2.sv) { return true } } } return false } func (v versionPair) MatchesAny(c2 Constraint) bool { return c2.Matches(v) } func (v versionPair) Intersect(c2 Constraint) Constraint { switch tc := c2.(type) { case anyConstraint: return v case noneConstraint: return none case versionPair: if v.r == tc.r { return v.r } case Revision: if v.r == tc { return v.r } case semverConstraint: if tv, ok := v.v.(semVersion); ok { if tc.Intersect(tv) == v.v { return v } } // If the semver intersection failed, we know nothing could work return none } switch tv := v.v.(type) { case plainVersion, branchVersion: if c2.Matches(v) { return v } case semVersion: if tv2, ok := c2.(semVersion); ok { if tv.sv.Equal(tv2.sv) { return v } } } return none } func (v versionPair) identical(c Constraint) bool { v2, ok := c.(versionPair) if !ok { return false } if v.r != v2.r { return false } return v.v.identical(v2.v) } func (v versionPair) copyTo(*pb.Constraint) { panic("versionPair should never be serialized; it is solver internal-only") } // compareVersionType is a sort func helper that makes a coarse-grained sorting // decision based on version type. // // Make sure that l and r have already been converted from versionPair (if // applicable). func compareVersionType(l, r Version) int { // Big fugly double type switch. No reflect, because this can be smack in a hot loop switch l.(type) { case Revision: switch r.(type) { case Revision: return 0 case branchVersion, plainVersion, semVersion: return 1 } case plainVersion: switch r.(type) { case Revision: return -1 case plainVersion: return 0 case branchVersion, semVersion: return 1 } case branchVersion: switch r.(type) { case Revision, plainVersion: return -1 case branchVersion: return 0 case semVersion: return 1 } case semVersion: switch r.(type) { case Revision, branchVersion, plainVersion: return -1 case semVersion: return 0 } } panic("unknown version type") } // SortForUpgrade sorts a slice of []Version in roughly descending order, so // that presumably newer versions are visited first. The rules are: // // - All semver versions come first, and sort mostly according to the semver // 2.0 spec (as implemented by github.com/Masterminds/semver lib), with one // exception: // - Semver versions with a prerelease are after *all* non-prerelease semver. // Within this subset they are sorted first by their numerical component, then // lexicographically by their prerelease version. // - The default branch(es) is next; the exact semantics of that are specific // to the underlying source. // - All other branches come next, sorted lexicographically. // - All non-semver versions (tags) are next, sorted lexicographically. // - Revisions, if any, are last, sorted lexicographically. Revisions do not // typically appear in version lists, so the only invariant we maintain is // determinism - deeper semantics, like chronology or topology, do not matter. // // So, given a slice of the following versions: // // - Branch: master devel // - Semver tags: v1.0.0, v1.1.0, v1.1.0-alpha1 // - Non-semver tags: footag // - Revision: f6e74e8d // // Sorting for upgrade will result in the following slice: // // [v1.1.0 v1.0.0 v1.1.0-alpha1 master devel footag f6e74e8d] func SortForUpgrade(vl []Version) { sort.Sort(upgradeVersionSorter(vl)) } // SortPairedForUpgrade has the same behavior as SortForUpgrade, but operates on // []PairedVersion types. func SortPairedForUpgrade(vl []PairedVersion) { sort.Sort(pvupgradeVersionSorter(vl)) } // SortForDowngrade sorts a slice of []Version in roughly ascending order, so // that presumably older versions are visited first. // // This is *not* the same as reversing SortForUpgrade (or you could simply // sort.Reverse()). The type precedence is the same, including the semver vs. // semver-with-prerelease relation. Lexicographical comparisons within // non-semver tags, branches, and revisions remains the same as well; because we // treat these domains as having no ordering relation, there can be no real // concept of "upgrade" vs "downgrade", so there is no reason to reverse them. // // Thus, the only binary relation that is reversed for downgrade is within-type // comparisons for semver. // // So, given a slice of the following versions: // // - Branch: master devel // - Semver tags: v1.0.0, v1.1.0, v1.1.0-alpha1 // - Non-semver tags: footag // - Revision: f6e74e8d // // Sorting for downgrade will result in the following slice: // // [v1.0.0 v1.1.0 v1.1.0-alpha1 footag devel master f6e74e8d] func SortForDowngrade(vl []Version) { sort.Sort(downgradeVersionSorter(vl)) } // SortPairedForDowngrade has the same behavior as SortForDowngrade, but // operates on []PairedVersion types. func SortPairedForDowngrade(vl []PairedVersion) { sort.Sort(pvdowngradeVersionSorter(vl)) } type upgradeVersionSorter []Version func (vs upgradeVersionSorter) Len() int { return len(vs) } func (vs upgradeVersionSorter) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } func (vs upgradeVersionSorter) Less(i, j int) bool { l, r := vs[i], vs[j] return vLess(l, r, false) } type pvupgradeVersionSorter []PairedVersion func (vs pvupgradeVersionSorter) Len() int { return len(vs) } func (vs pvupgradeVersionSorter) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } func (vs pvupgradeVersionSorter) Less(i, j int) bool { l, r := vs[i], vs[j] return vLess(l, r, false) } type downgradeVersionSorter []Version func (vs downgradeVersionSorter) Len() int { return len(vs) } func (vs downgradeVersionSorter) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } func (vs downgradeVersionSorter) Less(i, j int) bool { l, r := vs[i], vs[j] return vLess(l, r, true) } type pvdowngradeVersionSorter []PairedVersion func (vs pvdowngradeVersionSorter) Len() int { return len(vs) } func (vs pvdowngradeVersionSorter) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } func (vs pvdowngradeVersionSorter) Less(i, j int) bool { l, r := vs[i], vs[j] return vLess(l, r, true) } func vLess(l, r Version, down bool) bool { if tl, ispair := l.(versionPair); ispair { l = tl.v } if tr, ispair := r.(versionPair); ispair { r = tr.v } switch compareVersionType(l, r) { case -1: return true case 1: return false case 0: break default: panic("unreachable") } switch tl := l.(type) { case branchVersion: tr := r.(branchVersion) if tl.isDefault != tr.isDefault { // If they're not both defaults, then return the left val: if left // is the default, then it is "less" (true) b/c we want it earlier. // Else the right is the default, and so the left should be later // (false). return tl.isDefault } return l.String() < r.String() case Revision, plainVersion: // All that we can do now is alpha sort return l.String() < r.String() } // This ensures that pre-release versions are always sorted after ALL // full-release versions lsv, rsv := l.(semVersion).sv, r.(semVersion).sv lpre, rpre := lsv.Prerelease() == "", rsv.Prerelease() == "" if (lpre && !rpre) || (!lpre && rpre) { return lpre } if down { return lsv.LessThan(rsv) } return lsv.GreaterThan(rsv) } func hidePair(pvl []PairedVersion) []Version { vl := make([]Version, 0, len(pvl)) for _, v := range pvl { vl = append(vl, v) } return vl } // VersionComponentStrings decomposes a Version into the underlying number, branch and revision. func VersionComponentStrings(v Version) (revision string, branch string, version string) { switch tv := v.(type) { case UnpairedVersion: case Revision: revision = tv.String() case PairedVersion: revision = tv.Revision().String() } switch v.Type() { case IsBranch: branch = v.String() case IsSemver, IsVersion: version = v.String() } return }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/manifest.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import "github.com/golang/dep/gps/pkgtree" // Manifest represents manifest-type data for a project at a particular version. // The constraints expressed in a manifest determine the set of versions that // are acceptable to try for a given project. // // Expressing a constraint in a manifest does not guarantee that a particular // dependency will be present. It only guarantees that if packages in the // project specified by the dependency are discovered through static analysis of // the (transitive) import graph, then they will conform to the constraint. // // This does entail that manifests can express constraints on projects they do // not themselves import. This is by design, but its implications are complex. // See the gps docs for more information: https://github.com/sdboyer/gps/wiki type Manifest interface { // Returns a list of project-level constraints. DependencyConstraints() ProjectConstraints } // RootManifest extends Manifest to add special controls over solving that are // only afforded to the root project. type RootManifest interface { Manifest // Overrides returns a list of ProjectConstraints that will unconditionally // supersede any ProjectConstraint declarations made in either the root // manifest, or in any dependency's manifest. // // Overrides are a special control afforded only to root manifests. Tool // users should be encouraged to use them only as a last resort; they do not // "play well with others" (that is their express goal), and overreliance on // them can harm the ecosystem as a whole. Overrides() ProjectConstraints // IgnoredPackages returns a pkgtree.IgnoredRuleset, which comprises a set // of import paths, or import path patterns, that are to be ignored during // solving. These ignored import paths can be within the root project, or // part of other projects. Ignoring a package means that both it and its // (unique) imports will be disregarded by all relevant solver operations. // // It is an error to include a package in both the ignored and required // sets. IgnoredPackages() *pkgtree.IgnoredRuleset // RequiredPackages returns a set of import paths to require. These packages // are required to be present in any solution. The list can include main // packages. // // It is meaningless to specify packages that are within the // PackageTree of the ProjectRoot (though not an error, because the // RootManifest itself does not report a ProjectRoot). // // It is an error to include a package in both the ignored and required // sets. RequiredPackages() map[string]bool } // SimpleManifest is a helper for tools to enumerate manifest data. It's // generally intended for ephemeral manifests, such as those Analyzers create on // the fly for projects with no manifest metadata, or metadata through a foreign // tool's idioms. type SimpleManifest struct { Deps ProjectConstraints } var _ Manifest = SimpleManifest{} // DependencyConstraints returns the project's dependencies. func (m SimpleManifest) DependencyConstraints() ProjectConstraints { return m.Deps } // simpleRootManifest exists so that we have a safe value to swap into solver // params when a nil Manifest is provided. type simpleRootManifest struct { c, ovr ProjectConstraints ig *pkgtree.IgnoredRuleset req map[string]bool } func (m simpleRootManifest) DependencyConstraints() ProjectConstraints { return m.c } func (m simpleRootManifest) Overrides() ProjectConstraints { return m.ovr } func (m simpleRootManifest) IgnoredPackages() *pkgtree.IgnoredRuleset { return m.ig } func (m simpleRootManifest) RequiredPackages() map[string]bool { return m.req } // prepManifest ensures a manifest is prepared and safe for use by the solver. // This is mostly about ensuring that no outside routine can modify the manifest // while the solver is in-flight, but it also filters out any empty // ProjectProperties. // // This is achieved by copying the manifest's data into a new SimpleManifest. func prepManifest(m Manifest) SimpleManifest { if m == nil { return SimpleManifest{} } deps := m.DependencyConstraints() rm := SimpleManifest{ Deps: make(ProjectConstraints, len(deps)), } for k, d := range deps { // A zero-value ProjectProperties is equivalent to one with an // anyConstraint{} in terms of how the solver will treat it. However, we // normalize between these two by omitting such instances entirely, as // it negates some possibility for false mismatches in input hashing. if d.Constraint == nil { if d.Source == "" { continue } d.Constraint = anyConstraint{} } rm.Deps[k] = d } return rm }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/source_cache.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "fmt" "path" "sort" "strings" "sync" "github.com/golang/dep/gps/pkgtree" ) // sourceCache is an interface for creating singleSourceCaches, and safely // releasing backing resources via close. type sourceCache interface { // newSingleSourceCache creates a new singleSourceCache for id, which // remains valid until close is called. newSingleSourceCache(id ProjectIdentifier) singleSourceCache // close releases background resources. close() error } // singleSourceCache provides a method set for storing and retrieving data about // a single source. type singleSourceCache interface { // Store the manifest and lock information for a given revision, as defined by // a particular ProjectAnalyzer. setManifestAndLock(Revision, ProjectAnalyzerInfo, Manifest, Lock) // Get the manifest and lock information for a given revision, as defined by // a particular ProjectAnalyzer. getManifestAndLock(Revision, ProjectAnalyzerInfo) (Manifest, Lock, bool) // Store a PackageTree for a given revision. setPackageTree(Revision, pkgtree.PackageTree) // Get the PackageTree for a given revision. getPackageTree(Revision, ProjectRoot) (pkgtree.PackageTree, bool) // Indicate to the cache that an individual revision is known to exist. markRevisionExists(r Revision) // Store the mappings between a set of PairedVersions' surface versions // their corresponding revisions. // // The existing list of versions will be purged before writing. Revisions // will have their pairings purged, but record of the revision existing will // be kept, on the assumption that revisions are immutable and permanent. setVersionMap(versionList []PairedVersion) // Get the list of unpaired versions corresponding to the given revision. getVersionsFor(Revision) ([]UnpairedVersion, bool) // Gets all the version pairs currently known to the cache. getAllVersions() ([]PairedVersion, bool) // Get the revision corresponding to the given unpaired version. getRevisionFor(UnpairedVersion) (Revision, bool) // Attempt to convert the given Version to a Revision, given information // currently present in the cache, and in the Version itself. toRevision(v Version) (Revision, bool) // Attempt to convert the given Version to an UnpairedVersion, given // information currently present in the cache, or in the Version itself. // // If the input is a revision and multiple UnpairedVersions are associated // with it, whatever happens to be the first is returned. toUnpaired(v Version) (UnpairedVersion, bool) } // memoryCache is a sourceCache which creates singleSourceCacheMemory instances. type memoryCache struct{} func (memoryCache) newSingleSourceCache(ProjectIdentifier) singleSourceCache { return newMemoryCache() } func (memoryCache) close() error { return nil } type singleSourceCacheMemory struct { // Protects all fields. mut sync.RWMutex infos map[ProjectAnalyzerInfo]map[Revision]projectInfo // Replaced, never modified. Imports are *relative* (ImportRoot prefix trimmed). ptrees map[Revision]map[string]pkgtree.PackageOrErr // Replaced, never modified. vList []PairedVersion vMap map[UnpairedVersion]Revision rMap map[Revision][]UnpairedVersion } func newMemoryCache() singleSourceCache { return &singleSourceCacheMemory{ infos: make(map[ProjectAnalyzerInfo]map[Revision]projectInfo), ptrees: make(map[Revision]map[string]pkgtree.PackageOrErr), vMap: make(map[UnpairedVersion]Revision), rMap: make(map[Revision][]UnpairedVersion), } } type projectInfo struct { Manifest Lock } func (c *singleSourceCacheMemory) setManifestAndLock(r Revision, pai ProjectAnalyzerInfo, m Manifest, l Lock) { c.mut.Lock() inner, has := c.infos[pai] if !has { inner = make(map[Revision]projectInfo) c.infos[pai] = inner } inner[r] = projectInfo{Manifest: m, Lock: l} // Ensure there's at least an entry in the rMap so that the rMap always has // a complete picture of the revisions we know to exist if _, has = c.rMap[r]; !has { c.rMap[r] = nil } c.mut.Unlock() } func (c *singleSourceCacheMemory) getManifestAndLock(r Revision, pai ProjectAnalyzerInfo) (Manifest, Lock, bool) { c.mut.Lock() defer c.mut.Unlock() inner, has := c.infos[pai] if !has { return nil, nil, false } pi, has := inner[r] if has { return pi.Manifest, pi.Lock, true } return nil, nil, false } func (c *singleSourceCacheMemory) setPackageTree(r Revision, ptree pkgtree.PackageTree) { // Make a copy, with relative import paths. pkgs := pkgtree.CopyPackages(ptree.Packages, func(ip string, poe pkgtree.PackageOrErr) (string, pkgtree.PackageOrErr) { poe.P.ImportPath = "" // Don't store this return strings.TrimPrefix(ip, ptree.ImportRoot), poe }) c.mut.Lock() c.ptrees[r] = pkgs // Ensure there's at least an entry in the rMap so that the rMap always has // a complete picture of the revisions we know to exist if _, has := c.rMap[r]; !has { c.rMap[r] = nil } c.mut.Unlock() } func (c *singleSourceCacheMemory) getPackageTree(r Revision, pr ProjectRoot) (pkgtree.PackageTree, bool) { c.mut.Lock() rptree, has := c.ptrees[r] c.mut.Unlock() if !has { return pkgtree.PackageTree{}, false } // Return a copy, with full import paths. pkgs := pkgtree.CopyPackages(rptree, func(rpath string, poe pkgtree.PackageOrErr) (string, pkgtree.PackageOrErr) { ip := path.Join(string(pr), rpath) if poe.Err == nil { poe.P.ImportPath = ip } return ip, poe }) return pkgtree.PackageTree{ ImportRoot: string(pr), Packages: pkgs, }, true } func (c *singleSourceCacheMemory) setVersionMap(versionList []PairedVersion) { c.mut.Lock() c.vList = versionList // TODO(sdboyer) how do we handle cache consistency here - revs that may // be out of date vis-a-vis the ptrees or infos maps? for r := range c.rMap { c.rMap[r] = nil } c.vMap = make(map[UnpairedVersion]Revision, len(versionList)) for _, pv := range versionList { u, r := pv.Unpair(), pv.Revision() c.vMap[u] = r c.rMap[r] = append(c.rMap[r], u) } c.mut.Unlock() } func (c *singleSourceCacheMemory) markRevisionExists(r Revision) { c.mut.Lock() if _, has := c.rMap[r]; !has { c.rMap[r] = nil } c.mut.Unlock() } func (c *singleSourceCacheMemory) getVersionsFor(r Revision) ([]UnpairedVersion, bool) { c.mut.Lock() versionList, has := c.rMap[r] c.mut.Unlock() return versionList, has } func (c *singleSourceCacheMemory) getAllVersions() ([]PairedVersion, bool) { c.mut.Lock() vList := c.vList c.mut.Unlock() if vList == nil { return nil, false } cp := make([]PairedVersion, len(vList)) copy(cp, vList) return cp, true } func (c *singleSourceCacheMemory) getRevisionFor(uv UnpairedVersion) (Revision, bool) { c.mut.Lock() r, has := c.vMap[uv] c.mut.Unlock() return r, has } func (c *singleSourceCacheMemory) toRevision(v Version) (Revision, bool) { switch t := v.(type) { case Revision: return t, true case PairedVersion: return t.Revision(), true case UnpairedVersion: c.mut.Lock() r, has := c.vMap[t] c.mut.Unlock() return r, has default: panic(fmt.Sprintf("Unknown version type %T", v)) } } func (c *singleSourceCacheMemory) toUnpaired(v Version) (UnpairedVersion, bool) { switch t := v.(type) { case UnpairedVersion: return t, true case PairedVersion: return t.Unpair(), true case Revision: c.mut.Lock() upv, has := c.rMap[t] c.mut.Unlock() if has && len(upv) > 0 { return upv[0], true } return nil, false default: panic(fmt.Sprintf("unknown version type %T", v)) } } // TODO(sdboyer) remove once source caching can be moved into separate package func locksAreEq(l1, l2 Lock) bool { ii1, ii2 := l1.InputImports(), l2.InputImports() if len(ii1) != len(ii2) { return false } ilen := len(ii1) if ilen > 0 { sort.Strings(ii1) sort.Strings(ii2) for i := 0; i < ilen; i++ { if ii1[i] != ii2[i] { return false } } } p1, p2 := l1.Projects(), l2.Projects() if len(p1) != len(p2) { return false } p1, p2 = sortLockedProjects(p1), sortLockedProjects(p2) for k, lp := range p1 { if !lp.Eq(p2[k]) { return false } } return true }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/metrics.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "bytes" "fmt" "log" "sort" "text/tabwriter" "time" ) type metrics struct { stack []string times map[string]time.Duration last time.Time } func newMetrics() *metrics { return &metrics{ stack: []string{"other"}, times: map[string]time.Duration{ "other": 0, }, last: time.Now(), } } func (m *metrics) push(name string) { cn := m.stack[len(m.stack)-1] m.times[cn] += time.Since(m.last) m.stack = append(m.stack, name) m.last = time.Now() } func (m *metrics) pop() { on := m.stack[len(m.stack)-1] m.times[on] += time.Since(m.last) m.stack = m.stack[:len(m.stack)-1] m.last = time.Now() } func (m *metrics) dump(l *log.Logger) { s := make(ndpairs, len(m.times)) k := 0 for n, d := range m.times { s[k] = ndpair{ n: n, d: d, } k++ } sort.Sort(sort.Reverse(s)) var tot time.Duration var buf bytes.Buffer w := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', tabwriter.AlignRight) for _, nd := range s { tot += nd.d fmt.Fprintf(w, "\t%s:\t%v\t\n", nd.n, nd.d) } fmt.Fprintf(w, "\n\tTOTAL:\t%v\t\n", tot) w.Flush() l.Println("\nSolver wall times by segment:") l.Println((&buf).String()) } type ndpair struct { n string d time.Duration } type ndpairs []ndpair func (s ndpairs) Less(i, j int) bool { return s[i].d < s[j].d } func (s ndpairs) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s ndpairs) Len() int { return len(s) }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/rootdata.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "sort" "github.com/armon/go-radix" "github.com/golang/dep/gps/pkgtree" ) // rootdata holds static data and constraining rules from the root project for // use in solving. type rootdata struct { // Path to the root of the project on which gps is operating. dir string // Ruleset for ignored import paths. ir *pkgtree.IgnoredRuleset // Map of packages to require. req map[string]bool // A ProjectConstraints map containing the validated (guaranteed non-empty) // overrides declared by the root manifest. ovr ProjectConstraints // A map of the ProjectRoot (local names) that should be allowed to change chng map[ProjectRoot]struct{} // Flag indicating all projects should be allowed to change, without regard // for lock. chngall bool // A map of the project names listed in the root's lock. rlm map[ProjectRoot]LockedProject // A defensively copied instance of the root manifest. rm SimpleManifest // A defensively copied instance of the root lock. rl safeLock // A defensively copied instance of params.RootPackageTree rpt pkgtree.PackageTree // The ProjectAnalyzer to use for all GetManifestAndLock calls. an ProjectAnalyzer } // externalImportList returns a list of the unique imports from the root data. // Ignores and requires are taken into consideration, stdlib is excluded, and // errors within the local set of package are not backpropagated. func (rd rootdata) externalImportList(stdLibFn func(string) bool) []string { rm, _ := rd.rpt.ToReachMap(true, true, false, rd.ir) reach := rm.FlattenFn(stdLibFn) // If there are any requires, slide them into the reach list, as well. if len(rd.req) > 0 { // Make a map of imports that are both in the import path list and the // required list to avoid duplication. skip := make(map[string]bool, len(rd.req)) for _, r := range reach { if rd.req[r] { skip[r] = true } } for r := range rd.req { if !skip[r] { reach = append(reach, r) } } } sort.Strings(reach) return reach } func (rd rootdata) getApplicableConstraints(stdLibFn func(string) bool) []workingConstraint { pc := rd.rm.DependencyConstraints() // Ensure that overrides which aren't in the combined pc map already make it // in. Doing so makes input hashes equal in more useful cases. for pr, pp := range rd.ovr { if _, has := pc[pr]; !has { cpp := ProjectProperties{ Constraint: pp.Constraint, Source: pp.Source, } if cpp.Constraint == nil { cpp.Constraint = anyConstraint{} } pc[pr] = cpp } } // Now override them all to produce a consolidated workingConstraint slice combined := rd.ovr.overrideAll(pc) type wccount struct { count int wc workingConstraint } xt := radix.New() for _, wc := range combined { xt.Insert(string(wc.Ident.ProjectRoot), wccount{wc: wc}) } // Walk all dep import paths we have to consider and mark the corresponding // wc entry in the trie, if any for _, im := range rd.externalImportList(stdLibFn) { if stdLibFn(im) { continue } if pre, v, match := xt.LongestPrefix(im); match && isPathPrefixOrEqual(pre, im) { wcc := v.(wccount) wcc.count++ xt.Insert(pre, wcc) } } var ret []workingConstraint xt.Walk(func(s string, v interface{}) bool { wcc := v.(wccount) if wcc.count > 0 { ret = append(ret, wcc.wc) } return false }) return ret } func (rd rootdata) combineConstraints() []workingConstraint { return rd.ovr.overrideAll(rd.rm.DependencyConstraints()) } // needVersionListFor indicates whether we need a version list for a given // project root, based solely on general solver inputs (no constraint checking // required). Assuming the argument is not the root project itself, this will be // true if any of the following conditions hold: // // - ChangeAll is on // - The project is not in the lock // - The project is in the lock, but is also in the list of projects to change func (rd rootdata) needVersionsFor(pr ProjectRoot) bool { if rd.isRoot(pr) { return false } if rd.chngall { return true } if _, has := rd.rlm[pr]; !has { // not in the lock return true } if _, has := rd.chng[pr]; has { // in the lock, but marked for change return true } // in the lock, not marked for change return false } func (rd rootdata) isRoot(pr ProjectRoot) bool { return pr == ProjectRoot(rd.rpt.ImportRoot) } // rootAtom creates an atomWithPackages that represents the root project. func (rd rootdata) rootAtom() atomWithPackages { a := atom{ id: ProjectIdentifier{ ProjectRoot: ProjectRoot(rd.rpt.ImportRoot), }, // This is a hack so that the root project doesn't have a nil version. // It's sort of OK because the root never makes it out into the results. // We may need a more elegant solution if we discover other side // effects, though. v: rootRev, } list := make([]string, 0, len(rd.rpt.Packages)) for path, pkg := range rd.rpt.Packages { if pkg.Err != nil && !rd.ir.IsIgnored(path) { list = append(list, path) } } sort.Strings(list) return atomWithPackages{ a: a, pl: list, } }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/source_cache_bolt_encode_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "bytes" "testing" "time" "github.com/golang/protobuf/proto" ) func TestPropertiesFromCache(t *testing.T) { for _, test := range []struct { name string ip ProjectRoot pp ProjectProperties }{ {"defaultBranch", "root", ProjectProperties{"", newDefaultBranch("test")}}, {"branch", "root", ProjectProperties{"source", NewBranch("test")}}, {"semver", "root", ProjectProperties{"", testSemverConstraint(t, "^1.0.0")}}, {"rev", "root", ProjectProperties{"source", Revision("test")}}, {"any", "root", ProjectProperties{"source", Any()}}, } { t.Run(test.name, func(t *testing.T) { var buf projectPropertiesMsgs buf.copyFrom(test.ip, test.pp) v, err := proto.Marshal(&buf.pp) if err != nil { t.Fatal(err) } if err := proto.Unmarshal(v, &buf.pp); err != nil { t.Fatal(err) } else { ip, pp, err := propertiesFromCache(&buf.pp) if err != nil { t.Fatal(err) } if ip != test.ip { t.Errorf("decoded unexpected ProjectRoot:\n\t(GOT): %#v\n\t(WNT): %#v", ip, test.ip) } if pp.Source != test.pp.Source { t.Errorf("decoded unexpected ProjectRoot.Source:\n\t(GOT): %s\n\t (WNT): %s", pp.Source, test.pp.Source) } if !pp.Constraint.identical(test.pp.Constraint) { t.Errorf("decoded non-identical ProjectRoot.Constraint:\n\t(GOT): %#v\n\t(WNT): %#v", pp.Constraint, test.pp.Constraint) } } }) } } func TestCacheTimestampedKey(t *testing.T) { pre := byte('p') for _, test := range []struct { ts time.Time suffix []byte }{ {time.Unix(0, 0), []byte{0, 0, 0, 0, 0, 0, 0, 0}}, {time.Unix(100, 0), []byte{0, 0, 0, 0, 0, 0, 0, 100}}, {time.Unix(255, 0), []byte{0, 0, 0, 0, 0, 0, 0, 255}}, {time.Unix(1+1<<8+1<<16+1<<24, 0), []byte{0, 0, 0, 0, 1, 1, 1, 1}}, {time.Unix(255<<48, 0), []byte{0, 255, 0, 0, 0, 0, 0, 0}}, } { b := cacheTimestampedKey(pre, test.ts) if !bytes.Equal(b, append([]byte{pre}, test.suffix...)) { t.Errorf("unexpected suffix:\n\t(GOT):%v\n\t(WNT):%v", b[4:], test.suffix) } } }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/satisfy.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps // check performs constraint checks on the provided atom. The set of checks // differ slightly depending on whether the atom is pkgonly, or if it's the // entire project being added for the first time. // // The goal is to determine whether selecting the atom would result in a state // where all the solver requirements are still satisfied. func (s *solver) check(a atomWithPackages, pkgonly bool) error { pa := a.a if nilpa == pa { // This shouldn't be able to happen, but if it does, it unequivocally // indicates a logical bug somewhere, so blowing up is preferable panic("canary - checking version of empty ProjectAtom") } s.mtr.push("satisfy") var err error defer func() { if err != nil { s.traceInfo(err) } s.mtr.pop() }() // If we're pkgonly, then base atom was already determined to be allowable, // so we can skip the checkAtomAllowable step. if !pkgonly { if err = s.checkAtomAllowable(pa); err != nil { return err } } if err = s.checkRequiredPackagesExist(a); err != nil { return err } var deps []completeDep _, deps, err = s.getImportsAndConstraintsOf(a) if err != nil { // An err here would be from the package fetcher; pass it straight back return err } // TODO(sdboyer) this deps list contains only packages not already selected // from the target atom (assuming one is selected at all). It's fine for // now, but won't be good enough when we get around to doing static // analysis. for _, dep := range deps { if err = s.checkIdentMatches(a, dep); err != nil { return err } if err = s.checkRootCaseConflicts(a, dep); err != nil { return err } if err = s.checkDepsConstraintsAllowable(a, dep); err != nil { return err } if err = s.checkDepsDisallowsSelected(a, dep); err != nil { return err } if err = s.checkRevisionExists(a, dep); err != nil { return err } if err = s.checkPackageImportsFromDepExist(a, dep); err != nil { return err } // TODO(sdboyer) add check that fails if adding this atom would create a loop } return nil } // checkAtomAllowable ensures that an atom itself is acceptable with respect to // the constraints established by the current solution. func (s *solver) checkAtomAllowable(pa atom) error { constraint := s.sel.getConstraint(pa.id) if constraint.Matches(pa.v) { return nil } // TODO(sdboyer) collect constraint failure reason (wait...aren't we, below?) deps := s.sel.getDependenciesOn(pa.id) var failparent []dependency for _, dep := range deps { if !dep.dep.Constraint.Matches(pa.v) { s.fail(dep.depender.id) failparent = append(failparent, dep) } } err := &versionNotAllowedFailure{ goal: pa, failparent: failparent, c: constraint, } return err } // checkRequiredPackagesExist ensures that all required packages enumerated by // existing dependencies on this atom are actually present in the atom. func (s *solver) checkRequiredPackagesExist(a atomWithPackages) error { ptree, err := s.b.ListPackages(a.a.id, a.a.v) if err != nil { // TODO(sdboyer) handle this more gracefully return err } deps := s.sel.getDependenciesOn(a.a.id) fp := make(map[string]errDeppers) // We inspect these in a bit of a roundabout way, in order to incrementally // build up the failure we'd return if there is, indeed, a missing package. // TODO(sdboyer) rechecking all of these every time is wasteful. Is there a shortcut? for _, dep := range deps { for _, pkg := range dep.dep.pl { if errdep, seen := fp[pkg]; seen { errdep.deppers = append(errdep.deppers, dep.depender) fp[pkg] = errdep } else { perr, has := ptree.Packages[pkg] if !has || perr.Err != nil { fp[pkg] = errDeppers{ err: perr.Err, deppers: []atom{dep.depender}, } } } } } if len(fp) > 0 { return &checkeeHasProblemPackagesFailure{ goal: a.a, failpkg: fp, } } return nil } // checkDepsConstraintsAllowable checks that the constraints of an atom on a // given dep are valid with respect to existing constraints. func (s *solver) checkDepsConstraintsAllowable(a atomWithPackages, cdep completeDep) error { dep := cdep.workingConstraint constraint := s.sel.getConstraint(dep.Ident) // Ensure the constraint expressed by the dep has at least some possible // intersection with the intersection of existing constraints. if constraint.MatchesAny(dep.Constraint) { return nil } siblings := s.sel.getDependenciesOn(dep.Ident) // No admissible versions - visit all siblings and identify the disagreement(s) var failsib []dependency var nofailsib []dependency for _, sibling := range siblings { if !sibling.dep.Constraint.MatchesAny(dep.Constraint) { s.fail(sibling.depender.id) failsib = append(failsib, sibling) } else { nofailsib = append(nofailsib, sibling) } } return &disjointConstraintFailure{ goal: dependency{depender: a.a, dep: cdep}, failsib: failsib, nofailsib: nofailsib, c: constraint, } } // checkDepsDisallowsSelected ensures that an atom's constraints on a particular // dep are not incompatible with the version of that dep that's already been // selected. func (s *solver) checkDepsDisallowsSelected(a atomWithPackages, cdep completeDep) error { dep := cdep.workingConstraint selected, exists := s.sel.selected(dep.Ident) if exists && !dep.Constraint.Matches(selected.a.v) { s.fail(dep.Ident) return &constraintNotAllowedFailure{ goal: dependency{depender: a.a, dep: cdep}, v: selected.a.v, } } return nil } // checkIdentMatches ensures that the LocalName of a dep introduced by an atom, // has the same Source as what's already been selected (assuming anything's been // selected). // // In other words, this ensures that the solver never simultaneously selects two // identifiers with the same local name, but that disagree about where their // network source is. func (s *solver) checkIdentMatches(a atomWithPackages, cdep completeDep) error { dep := cdep.workingConstraint if curid, has := s.sel.getIdentFor(dep.Ident.ProjectRoot); has && !curid.equiv(dep.Ident) { deps := s.sel.getDependenciesOn(a.a.id) // Fail all the other deps, as there's no way atom can ever be // compatible with them for _, d := range deps { s.fail(d.depender.id) } return &sourceMismatchFailure{ shared: dep.Ident.ProjectRoot, sel: deps, current: curid.normalizedSource(), mismatch: dep.Ident.normalizedSource(), prob: a.a, } } return nil } // checkRootCaseConflicts ensures that the ProjectRoot specified in the completeDep // does not have case conflicts with any existing dependencies. // // We only need to check the ProjectRoot, rather than any packages therein, as // the later check for package existence is case-sensitive. func (s *solver) checkRootCaseConflicts(a atomWithPackages, cdep completeDep) error { pr := cdep.workingConstraint.Ident.ProjectRoot hasConflict, current := s.sel.findCaseConflicts(pr) if !hasConflict { return nil } curid, _ := s.sel.getIdentFor(current) deps := s.sel.getDependenciesOn(curid) for _, d := range deps { s.fail(d.depender.id) } // If a project has multiple packages that import each other, we treat that // as establishing a canonical case variant for the ProjectRoot. It's possible, // however, that that canonical variant is not the same one that others // imported it under. If that's the situation, then we'll have arrived here // when visiting the project, not its dependers, having misclassified its // internal imports as external. That means the atomWithPackages will // be the wrong case variant induced by the importers, and the cdep will be // a link pointing back at the canonical case variant. // // If this is the case, use a special failure, wrongCaseFailure, that // makes a stronger statement as to the correctness of case variants. // // TODO(sdboyer) This approach to marking failure is less than great, as // this will mark the current atom as failed, as well, causing the // backtracker to work through it. While that could prove fruitful, it's // quite likely just to be wasted effort. Addressing this - if that's a good // idea - would entail creating another path back out of checking to enable // backjumping directly to the incorrect importers. if current == a.a.id.ProjectRoot { return &wrongCaseFailure{ correct: pr, goal: dependency{depender: a.a, dep: cdep}, badcase: deps, } } return &caseMismatchFailure{ goal: dependency{depender: a.a, dep: cdep}, current: current, failsib: deps, } } // checkPackageImportsFromDepExist ensures that, if the dep is already selected, // the newly-required set of packages being placed on it exist and are valid. func (s *solver) checkPackageImportsFromDepExist(a atomWithPackages, cdep completeDep) error { sel, is := s.sel.selected(cdep.workingConstraint.Ident) if !is { // dep is not already selected; nothing to do return nil } ptree, err := s.b.ListPackages(sel.a.id, sel.a.v) if err != nil { // TODO(sdboyer) handle this more gracefully return err } e := &depHasProblemPackagesFailure{ goal: dependency{ depender: a.a, dep: cdep, }, v: sel.a.v, prob: make(map[string]error), } for _, pkg := range cdep.pl { perr, has := ptree.Packages[pkg] if !has || perr.Err != nil { if has { e.prob[pkg] = perr.Err } else { e.prob[pkg] = nil } } } if len(e.prob) > 0 { return e } return nil } // checkRevisionExists ensures that if a dependency is constrained by a // revision, that that revision actually exists. func (s *solver) checkRevisionExists(a atomWithPackages, cdep completeDep) error { r, isrev := cdep.Constraint.(Revision) if !isrev { // Constraint is not a revision; nothing to do return nil } present, _ := s.b.RevisionPresentIn(cdep.Ident, r) if present { return nil } return &nonexistentRevisionFailure{ goal: dependency{ depender: a.a, dep: cdep, }, r: r, } }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/solution.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "context" "fmt" "os" "path/filepath" "sync" "github.com/pkg/errors" "golang.org/x/sync/errgroup" ) // A Solution is returned by a solver run. It is mostly just a Lock, with some // additional methods that report information about the solve run. type Solution interface { Lock // The name of the ProjectAnalyzer used in generating this solution. AnalyzerName() string // The version of the ProjectAnalyzer used in generating this solution. AnalyzerVersion() int // The name of the Solver used in generating this solution. SolverName() string // The version of the Solver used in generating this solution. SolverVersion() int Attempts() int } type solution struct { // The projects selected by the solver. p []LockedProject // The import inputs that created this solution (including requires). i []string // The number of solutions that were attempted att int // The analyzer info analyzerInfo ProjectAnalyzerInfo // The solver used in producing this solution solv Solver } // WriteProgress informs about the progress of WriteDepTree. type WriteProgress struct { Count int Total int LP LockedProject Failure bool } func (p WriteProgress) String() string { msg := "Wrote" if p.Failure { msg = "Failed to write" } return fmt.Sprintf("(%d/%d) %s %s@%s", p.Count, p.Total, msg, p.LP.Ident(), p.LP.Version()) } const concurrentWriters = 16 // WriteDepTree takes a basedir, a Lock and a RootPruneOptions and exports all // the projects listed in the lock to the appropriate target location within basedir. // // If the goal is to populate a vendor directory, basedir should be the absolute // path to that vendor directory, not its parent (a project root, typically). // // It requires a SourceManager to do the work. Prune options are read from the // passed manifest. // // If onWrite is not nil, it will be called after each project write. Calls are ordered and atomic. func WriteDepTree(basedir string, l Lock, sm SourceManager, co CascadingPruneOptions, onWrite func(WriteProgress)) error { if l == nil { return fmt.Errorf("must provide non-nil Lock to WriteDepTree") } if err := os.MkdirAll(basedir, 0777); err != nil { return err } g, ctx := errgroup.WithContext(context.TODO()) lps := l.Projects() sem := make(chan struct{}, concurrentWriters) var cnt struct { sync.Mutex i int } for i := range lps { p := lps[i] // per-iteration copy g.Go(func() error { err := func() error { select { case sem <- struct{}{}: defer func() { <-sem }() case <-ctx.Done(): return ctx.Err() } ident := p.Ident() projectRoot := string(ident.ProjectRoot) to := filepath.FromSlash(filepath.Join(basedir, projectRoot)) if err := sm.ExportProject(ctx, ident, p.Version(), to); err != nil { return errors.Wrapf(err, "failed to export %s", projectRoot) } err := PruneProject(to, p, co.PruneOptionsFor(ident.ProjectRoot)) if err != nil { return errors.Wrapf(err, "failed to prune %s", projectRoot) } return ctx.Err() }() switch err { case context.Canceled, context.DeadlineExceeded: // Don't report "secondary" errors. default: if onWrite != nil { // Increment and call atomically to prevent re-ordering. cnt.Lock() cnt.i++ onWrite(WriteProgress{ Count: cnt.i, Total: len(lps), LP: p, Failure: err != nil, }) cnt.Unlock() } } return err }) } err := g.Wait() if err != nil { os.RemoveAll(basedir) } return errors.Wrap(err, "failed to write dep tree") } func (r solution) Projects() []LockedProject { return r.p } func (r solution) InputImports() []string { return r.i } func (r solution) Attempts() int { return r.att } func (r solution) AnalyzerName() string { return r.analyzerInfo.Name } func (r solution) AnalyzerVersion() int { return r.analyzerInfo.Version } func (r solution) SolverName() string { return r.solv.Name() } func (r solution) SolverVersion() int { return r.solv.Version() }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/filesystem.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "os" "path/filepath" "strings" "github.com/pkg/errors" ) // fsLink represents a symbolic link. type fsLink struct { path string to string // circular denotes if evaluating the symlink fails with "too many links" error. // This error means that it's very likely that the symlink has a circular reference. circular bool // broken denotes that attempting to resolve the link fails, most likely because // the destaination doesn't exist. broken bool } // filesystemState represents the state of a file system. type filesystemState struct { root string dirs []string files []string links []fsLink } func (s filesystemState) setup() error { for _, dir := range s.dirs { p := filepath.Join(s.root, dir) if err := os.MkdirAll(p, 0777); err != nil { return errors.Errorf("os.MkdirAll(%q, 0777) err=%q", p, err) } } for _, file := range s.files { p := filepath.Join(s.root, file) f, err := os.Create(p) if err != nil { return errors.Errorf("os.Create(%q) err=%q", p, err) } if err := f.Close(); err != nil { return errors.Errorf("file %q Close() err=%q", p, err) } } for _, link := range s.links { p := filepath.Join(s.root, link.path) // On Windows, relative symlinks confuse filepath.Walk. So, we'll just sigh // and do absolute links, assuming they are relative to the directory of // link.path. // // Reference: https://github.com/golang/go/issues/17540 // // TODO(ibrasho): This was fixed in Go 1.9. Remove this when support for // 1.8 is dropped. dir := filepath.Dir(p) to := "" if link.to != "" { to = filepath.Join(dir, link.to) } if err := os.Symlink(to, p); err != nil { return errors.Errorf("os.Symlink(%q, %q) err=%q", to, p, err) } } return nil } // deriveFilesystemState returns a filesystemState based on the state of // the filesystem on root. func deriveFilesystemState(root string) (filesystemState, error) { fs := filesystemState{root: root} err := filepath.Walk(fs.root, func(path string, info os.FileInfo, err error) error { if err != nil { return err } if path == fs.root { return nil } relPath, err := filepath.Rel(fs.root, path) if err != nil { return err } if (info.Mode() & os.ModeSymlink) != 0 { l := fsLink{path: relPath} l.to, err = filepath.EvalSymlinks(path) if err != nil && strings.HasSuffix(err.Error(), "too many links") { l.circular = true } else if err != nil && os.IsNotExist(err) { l.broken = true } else if err != nil { return err } fs.links = append(fs.links, l) return nil } if info.IsDir() { fs.dirs = append(fs.dirs, relPath) return nil } fs.files = append(fs.files, relPath) return nil }) if err != nil { return filesystemState{}, err } return fs, nil }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/doc.go
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package gps is a Go packaging solver library. package gps
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/solution_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "io/ioutil" "log" "os" "path" "path/filepath" "runtime" "testing" "github.com/golang/dep/internal/test" ) var basicResult solution func pi(n string) ProjectIdentifier { return ProjectIdentifier{ ProjectRoot: ProjectRoot(n), } } func init() { basicResult = solution{ att: 1, p: []LockedProject{ pa2lp(atom{ id: pi("github.com/sdboyer/testrepo"), v: NewBranch("master").Pair(Revision("4d59fb584b15a94d7401e356d2875c472d76ef45")), }, nil), pa2lp(atom{ id: pi("github.com/Masterminds/VCSTestRepo"), v: NewVersion("1.0.0").Pair(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e")), }, nil), }, } basicResult.analyzerInfo = (naiveAnalyzer{}).Info() } func testWriteDepTree(t *testing.T) { t.Parallel() // This test is a bit slow, skip it on -short if testing.Short() { t.Skip("Skipping dep tree writing test in short mode") } requiresBins(t, "git", "hg", "bzr") tmp, err := ioutil.TempDir("", "writetree") if err != nil { t.Fatalf("Failed to create temp dir: %s", err) } defer os.RemoveAll(tmp) // bzr appears to not be consistent across...versions? platforms? (who // knows) with respect to its internal object identifiers. This has tanked // our Windows tests, because it's sniffing for this one revision, but the // rev is reported differently on some Windows versions, with some versions // of bzr. It's especially vexing because these tests worked fine for years, // then abruptly broke for no obvious reason. var bzrv Version = NewVersion("1.0.0") if runtime.GOOS != "windows" { bzrv = bzrv.(semVersion).Pair(Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68")) } r := solution{ att: 1, p: []LockedProject{ pa2lp(atom{ id: pi("github.com/sdboyer/testrepo"), v: NewBranch("master").Pair(Revision("4d59fb584b15a94d7401e356d2875c472d76ef45")), }, nil), pa2lp(atom{ id: pi("launchpad.net/govcstestbzrrepo"), v: bzrv, }, nil), pa2lp(atom{ id: pi("bitbucket.org/sdboyer/withbm"), v: NewVersion("v1.0.0").Pair(Revision("aa110802a0c64195d0a6c375c9f66668827c90b4")), }, nil), }, } sm, clean := mkNaiveSM(t) defer clean() // Trigger simultaneous fetch of all three to speed up test execution time for _, p := range r.p { go func(pi ProjectIdentifier) { sm.SyncSourceFor(pi) }(p.Ident()) } // nil lock/result should err immediately err = WriteDepTree(tmp, nil, sm, defaultCascadingPruneOptions(), nil) if err == nil { t.Errorf("Should error if nil lock is passed to WriteDepTree") } err = WriteDepTree(tmp, r, sm, defaultCascadingPruneOptions(), nil) if err != nil { t.Errorf("Unexpected error while creating vendor tree: %s", err) } if _, err = os.Stat(filepath.Join(tmp, "github.com", "sdboyer", "testrepo")); err != nil { t.Errorf("Directory for github.com/sdboyer/testrepo does not exist") } if _, err = os.Stat(filepath.Join(tmp, "launchpad.net", "govcstestbzrrepo")); err != nil { t.Errorf("Directory for launchpad.net/govcstestbzrrepo does not exist") } if _, err = os.Stat(filepath.Join(tmp, "bitbucket.org", "sdboyer", "withbm")); err != nil { t.Errorf("Directory for bitbucket.org/sdboyer/withbm does not exist") } } func BenchmarkCreateVendorTree(b *testing.B) { // We're fs-bound here, so restrict to single parallelism b.SetParallelism(1) r := basicResult tmp := path.Join(os.TempDir(), "vsolvtest") clean := true sm, err := NewSourceManager(SourceManagerConfig{ Cachedir: path.Join(tmp, "cache"), Logger: log.New(test.Writer{TB: b}, "", 0), }) if err != nil { b.Fatalf("failed to create SourceManager: %q", err) } // Prefetch the projects before timer starts for _, lp := range r.p { err := sm.SyncSourceFor(lp.Ident()) if err != nil { b.Errorf("failed getting project info during prefetch: %s", err) clean = false } } if clean { b.ResetTimer() b.StopTimer() exp := path.Join(tmp, "export") for i := 0; i < b.N; i++ { // Order the loop this way to make it easy to disable final cleanup, to // ease manual inspection os.RemoveAll(exp) b.StartTimer() err = WriteDepTree(exp, r, sm, defaultCascadingPruneOptions(), nil) b.StopTimer() if err != nil { b.Errorf("unexpected error after %v iterations: %s", i, err) break } } } sm.Release() os.RemoveAll(tmp) // comment this to leave temp dir behind for inspection }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/typed_radix_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import "testing" // basically a regression test func TestPathPrefixOrEqual(t *testing.T) { if !isPathPrefixOrEqual("foo", "foo") { t.Error("Same path should return true") } if isPathPrefixOrEqual("foo", "fooer") { t.Error("foo is not a path-type prefix of fooer") } if !isPathPrefixOrEqual("foo", "foo/bar") { t.Error("foo is a path prefix of foo/bar") } if isPathPrefixOrEqual("foo", "foo/") { t.Error("special case - foo is not a path prefix of foo/") } }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/vcs_source.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "bytes" "context" "fmt" "os" "path/filepath" "regexp" "strings" "github.com/Masterminds/semver" "github.com/golang/dep/gps/pkgtree" "github.com/golang/dep/internal/fs" "github.com/pkg/errors" ) type baseVCSSource struct { repo ctxRepo } func (bs *baseVCSSource) sourceType() string { return string(bs.repo.Vcs()) } func (bs *baseVCSSource) existsLocally(ctx context.Context) bool { return bs.repo.CheckLocal() } func (bs *baseVCSSource) existsUpstream(ctx context.Context) bool { return bs.repo.Ping() } func (*baseVCSSource) existsCallsListVersions() bool { return false } func (*baseVCSSource) listVersionsRequiresLocal() bool { return false } func (bs *baseVCSSource) upstreamURL() string { return bs.repo.Remote() } func (bs *baseVCSSource) disambiguateRevision(ctx context.Context, r Revision) (Revision, error) { ci, err := bs.repo.CommitInfo(string(r)) if err != nil { return "", err } return Revision(ci.Commit), nil } func (bs *baseVCSSource) getManifestAndLock(ctx context.Context, pr ProjectRoot, r Revision, an ProjectAnalyzer) (Manifest, Lock, error) { err := bs.repo.updateVersion(ctx, r.String()) if err != nil { return nil, nil, unwrapVcsErr(err) } m, l, err := an.DeriveManifestAndLock(bs.repo.LocalPath(), pr) if err != nil { return nil, nil, err } if l != nil && l != Lock(nil) { l = prepLock(l) } return prepManifest(m), l, nil } func (bs *baseVCSSource) revisionPresentIn(r Revision) (bool, error) { return bs.repo.IsReference(string(r)), nil } // initLocal clones/checks out the upstream repository to disk for the first // time. func (bs *baseVCSSource) initLocal(ctx context.Context) error { err := bs.repo.get(ctx) if err != nil { return unwrapVcsErr(err) } return nil } // updateLocal ensures the local data (versions and code) we have about the // source is fully up to date with that of the canonical upstream source. func (bs *baseVCSSource) updateLocal(ctx context.Context) error { err := bs.repo.fetch(ctx) if err == nil { return nil } ec, ok := bs.repo.(ensureCleaner) if !ok { return err } if err := ec.ensureClean(ctx); err != nil { return unwrapVcsErr(err) } if err := bs.repo.fetch(ctx); err != nil { return unwrapVcsErr(err) } return nil } func (bs *baseVCSSource) maybeClean(ctx context.Context) error { ec, ok := bs.repo.(ensureCleaner) if !ok { return nil } if err := ec.ensureClean(ctx); err != nil { return unwrapVcsErr(err) } return nil } func (bs *baseVCSSource) listPackages(ctx context.Context, pr ProjectRoot, r Revision) (ptree pkgtree.PackageTree, err error) { err = bs.repo.updateVersion(ctx, r.String()) if err != nil { err = unwrapVcsErr(err) } else { ptree, err = pkgtree.ListPackages(bs.repo.LocalPath(), string(pr)) } return } func (bs *baseVCSSource) exportRevisionTo(ctx context.Context, r Revision, to string) error { // Only make the parent dir, as CopyDir will balk on trying to write to an // empty but existing dir. if err := os.MkdirAll(filepath.Dir(to), 0777); err != nil { return err } if err := bs.repo.updateVersion(ctx, r.String()); err != nil { return unwrapVcsErr(err) } return fs.CopyDir(bs.repo.LocalPath(), to) } var ( gitHashRE = regexp.MustCompile(`^[a-f0-9]{40}$`) ) // gitSource is a generic git repository implementation that should work with // all standard git remotes. type gitSource struct { baseVCSSource } func (s *gitSource) exportRevisionTo(ctx context.Context, rev Revision, to string) error { r := s.repo if err := os.MkdirAll(to, 0777); err != nil { return err } // Back up original index idx, bak := filepath.Join(r.LocalPath(), ".git", "index"), filepath.Join(r.LocalPath(), ".git", "origindex") err := fs.RenameWithFallback(idx, bak) if err != nil { return err } // could have an err here...but it's hard to imagine how? defer fs.RenameWithFallback(bak, idx) { cmd := commandContext(ctx, "git", "read-tree", rev.String()) cmd.SetDir(r.LocalPath()) if out, err := cmd.CombinedOutput(); err != nil { return errors.Wrap(err, string(out)) } } // Ensure we have exactly one trailing slash to = strings.TrimSuffix(to, string(os.PathSeparator)) + string(os.PathSeparator) // Checkout from our temporary index to the desired target location on // disk; now it's git's job to make it fast. // // Sadly, this approach *does* also write out vendor dirs. There doesn't // appear to be a way to make checkout-index respect sparse checkout // rules (-a supersedes it). The alternative is using plain checkout, // though we have a bunch of housekeeping to do to set up, then tear // down, the sparse checkout controls, as well as restore the original // index and HEAD. { cmd := commandContext(ctx, "git", "checkout-index", "-a", "--prefix="+to) cmd.SetDir(r.LocalPath()) if out, err := cmd.CombinedOutput(); err != nil { return errors.Wrap(err, string(out)) } } return nil } func (s *gitSource) isValidHash(hash []byte) bool { return gitHashRE.Match(hash) } func (*gitSource) existsCallsListVersions() bool { return true } func (s *gitSource) listVersions(ctx context.Context) (vlist []PairedVersion, err error) { r := s.repo cmd := commandContext(ctx, "git", "ls-remote", r.Remote()) // We want to invoke from a place where it's not possible for there to be a // .git file instead of a .git directory, as git ls-remote will choke on the // former and erroneously quit. However, we can't be sure that the repo // exists on disk yet at this point; if it doesn't, then instead use the // parent of the local path, as that's still likely a good bet. if r.CheckLocal() { cmd.SetDir(r.LocalPath()) } else { cmd.SetDir(filepath.Dir(r.LocalPath())) } // Ensure no prompting for PWs cmd.SetEnv(append([]string{"GIT_ASKPASS=", "GIT_TERMINAL_PROMPT=0"}, os.Environ()...)) out, err := cmd.CombinedOutput() if err != nil { return nil, errors.Wrap(err, string(out)) } all := bytes.Split(bytes.TrimSpace(out), []byte("\n")) if len(all) == 1 && len(all[0]) == 0 { return nil, fmt.Errorf("no data returned from ls-remote") } // Pull out the HEAD rev (it's always first) so we know what branches to // mark as default. This is, perhaps, not the best way to glean this, but it // was good enough for git itself until 1.8.5. Also, the alternative is // sniffing data out of the pack protocol, which is a separate request, and // also waaaay more than we want to do right now. // // The cost is that we could potentially have multiple branches marked as // the default. If that does occur, a later check (again, emulating git // <1.8.5 behavior) further narrows the failure mode by choosing master as // the sole default branch if a) master exists and b) master is one of the // branches marked as a default. // // This all reduces the failure mode to a very narrow range of // circumstances. Nevertheless, if we do end up emitting multiple // default branches, it is possible that a user could end up following a // non-default branch, IF: // // * Multiple branches match the HEAD rev // * None of them are master // * The solver makes it into the branch list in the version queue // * The user/tool has provided no constraint (so, anyConstraint) // * A branch that is not actually the default, but happens to share the // rev, is lexicographically less than the true default branch // // If all of those conditions are met, then the user would end up with an // erroneous non-default branch in their lock file. var headrev Revision var onedef, multidef, defmaster bool smap := make(map[string]int) uniq := 0 vlist = make([]PairedVersion, len(all)) for _, pair := range all { var v PairedVersion // Valid `git ls-remote` output should start with hash, be at least // 45 chars long and 40th character should be '\t' // // See: https://github.com/golang/dep/pull/1160#issuecomment-328843519 if len(pair) < 45 || pair[40] != '\t' || !s.isValidHash(pair[:40]) { continue } if string(pair[41:]) == "HEAD" { // If HEAD is present, it's always first headrev = Revision(pair[:40]) } else if string(pair[46:51]) == "heads" { rev := Revision(pair[:40]) isdef := rev == headrev n := string(pair[52:]) if isdef { if onedef { multidef = true } onedef = true if n == "master" { defmaster = true } } v = branchVersion{ name: n, isDefault: isdef, }.Pair(rev).(PairedVersion) vlist[uniq] = v uniq++ } else if string(pair[46:50]) == "tags" { vstr := string(pair[51:]) if strings.HasSuffix(vstr, "^{}") { // If the suffix is there, then we *know* this is the rev of // the underlying commit object that we actually want vstr = strings.TrimSuffix(vstr, "^{}") if i, ok := smap[vstr]; ok { v = NewVersion(vstr).Pair(Revision(pair[:40])) vlist[i] = v continue } } else if _, ok := smap[vstr]; ok { // Already saw the deref'd version of this tag, if one // exists, so skip this. continue // Can only hit this branch if we somehow got the deref'd // version first. Which should be impossible, but this // covers us in case of weirdness, anyway. } v = NewVersion(vstr).Pair(Revision(pair[:40])) smap[vstr] = uniq vlist[uniq] = v uniq++ } } // Trim off excess from the slice vlist = vlist[:uniq] // There were multiple default branches, but one was master. So, go through // and strip the default flag from all the non-master branches. if multidef && defmaster { for k, v := range vlist { pv := v.(PairedVersion) if bv, ok := pv.Unpair().(branchVersion); ok { if bv.name != "master" && bv.isDefault { bv.isDefault = false vlist[k] = bv.Pair(pv.Revision()) } } } } return } // gopkginSource is a specialized git source that performs additional filtering // according to the input URL. type gopkginSource struct { gitSource major uint64 unstable bool // The aliased URL we report as being the one we talk to, even though we're // actually talking directly to GitHub. aliasURL string } func (s *gopkginSource) upstreamURL() string { return s.aliasURL } func (s *gopkginSource) listVersions(ctx context.Context) ([]PairedVersion, error) { ovlist, err := s.gitSource.listVersions(ctx) if err != nil { return nil, err } // Apply gopkg.in's filtering rules vlist := make([]PairedVersion, len(ovlist)) k := 0 var dbranch int // index of branch to be marked default var bsv semver.Version var defaultBranch PairedVersion tryDefaultAsV0 := s.major == 0 for _, v := range ovlist { // all git versions will always be paired pv := v.(versionPair) switch tv := pv.v.(type) { case semVersion: tryDefaultAsV0 = false if tv.sv.Major() == s.major && !s.unstable { vlist[k] = v k++ } case branchVersion: if tv.isDefault && defaultBranch == nil { defaultBranch = pv } // The semver lib isn't exactly the same as gopkg.in's logic, but // it's close enough that it's probably fine to use. We can be more // exact if real problems crop up. sv, err := semver.NewVersion(tv.name) if err != nil { continue } tryDefaultAsV0 = false if sv.Major() != s.major { // not the same major version as specified in the import path constraint continue } // Gopkg.in has a special "-unstable" suffix which we need to handle // separately. if s.unstable != strings.HasSuffix(tv.name, gopkgUnstableSuffix) { continue } // Turn off the default branch marker unconditionally; we can't know // which one to mark as default until we've seen them all tv.isDefault = false // Figure out if this is the current leader for default branch if bsv == (semver.Version{}) || bsv.LessThan(sv) { bsv = sv dbranch = k } pv.v = tv vlist[k] = pv k++ } // The switch skips plainVersions because they cannot possibly meet // gopkg.in's requirements } vlist = vlist[:k] if bsv != (semver.Version{}) { dbv := vlist[dbranch].(versionPair) vlist[dbranch] = branchVersion{ name: dbv.v.(branchVersion).name, isDefault: true, }.Pair(dbv.r) } // Treat the default branch as v0 only when no other semver branches/tags exist // See http://labix.org/gopkg.in#VersionZero if tryDefaultAsV0 && defaultBranch != nil { vlist = append(vlist, defaultBranch) } return vlist, nil } // bzrSource is a generic bzr repository implementation that should work with // all standard bazaar remotes. type bzrSource struct { baseVCSSource } func (s *bzrSource) exportRevisionTo(ctx context.Context, rev Revision, to string) error { if err := s.baseVCSSource.exportRevisionTo(ctx, rev, to); err != nil { return err } return os.RemoveAll(filepath.Join(to, ".bzr")) } func (s *bzrSource) listVersionsRequiresLocal() bool { return true } func (s *bzrSource) listVersions(ctx context.Context) ([]PairedVersion, error) { r := s.repo // Now, list all the tags tagsCmd := commandContext(ctx, "bzr", "tags", "--show-ids", "-v") tagsCmd.SetDir(r.LocalPath()) out, err := tagsCmd.CombinedOutput() if err != nil { return nil, errors.Wrap(err, string(out)) } all := bytes.Split(bytes.TrimSpace(out), []byte("\n")) viCmd := commandContext(ctx, "bzr", "version-info", "--custom", "--template={revision_id}", "--revision=branch:.") viCmd.SetDir(r.LocalPath()) branchrev, err := viCmd.CombinedOutput() if err != nil { return nil, errors.Wrap(err, string(branchrev)) } vlist := make([]PairedVersion, 0, len(all)+1) // Now, all the tags. for _, line := range all { idx := bytes.IndexByte(line, 32) // space v := NewVersion(string(line[:idx])) r := Revision(bytes.TrimSpace(line[idx:])) vlist = append(vlist, v.Pair(r)) } // Last, add the default branch, hardcoding the visual representation of it // that bzr uses when operating in the workflow mode we're using. v := newDefaultBranch("(default)") vlist = append(vlist, v.Pair(Revision(string(branchrev)))) return vlist, nil } func (s *bzrSource) disambiguateRevision(ctx context.Context, r Revision) (Revision, error) { // If we used the default baseVCSSource behavior here, we would return the // bazaar revision number, which is not a globally unique identifier - it is // only unique within a branch. This is just the way that // github.com/Masterminds/vcs chooses to handle bazaar. We want a // disambiguated unique ID, though, so we need slightly different behavior: // check whether r doesn't error when we try to look it up. If so, trust that // it's a revision. _, err := s.repo.CommitInfo(string(r)) if err != nil { return "", err } return r, nil } // hgSource is a generic hg repository implementation that should work with // all standard mercurial servers. type hgSource struct { baseVCSSource } func (s *hgSource) exportRevisionTo(ctx context.Context, rev Revision, to string) error { // TODO: use hg instead of the generic approach in // baseVCSSource.exportRevisionTo to make it faster. if err := s.baseVCSSource.exportRevisionTo(ctx, rev, to); err != nil { return err } return os.RemoveAll(filepath.Join(to, ".hg")) } func (s *hgSource) listVersionsRequiresLocal() bool { return true } func (s *hgSource) hgCmd(ctx context.Context, args ...string) cmd { r := s.repo cmd := commandContext(ctx, "hg", args...) cmd.SetDir(r.LocalPath()) // Let's make sure extensions don't interfere with our expectations // regarding the output of commands. cmd.Cmd.Env = append(cmd.Cmd.Env, "HGRCPATH=") return cmd } func (s *hgSource) listVersions(ctx context.Context) ([]PairedVersion, error) { var vlist []PairedVersion // Now, list all the tags tagsCmd := s.hgCmd(ctx, "tags", "--debug", "--verbose") out, err := tagsCmd.CombinedOutput() if err != nil { return nil, errors.Wrap(err, string(out)) } all := bytes.Split(bytes.TrimSpace(out), []byte("\n")) lbyt := []byte("local") nulrev := []byte("0000000000000000000000000000000000000000") for _, line := range all { if bytes.Equal(lbyt, line[len(line)-len(lbyt):]) { // Skip local tags continue } // tip is magic, don't include it if bytes.HasPrefix(line, []byte("tip")) { continue } // Split on colon; this gets us the rev and the tag plus local revno pair := bytes.Split(line, []byte(":")) if bytes.Equal(nulrev, pair[1]) { // null rev indicates this tag is marked for deletion continue } idx := bytes.IndexByte(pair[0], 32) // space v := NewVersion(string(pair[0][:idx])).Pair(Revision(pair[1])).(PairedVersion) vlist = append(vlist, v) } // bookmarks next, because the presence of the magic @ bookmark has to // determine how we handle the branches var magicAt bool bookmarksCmd := s.hgCmd(ctx, "bookmarks", "--debug") out, err = bookmarksCmd.CombinedOutput() if err != nil { // better nothing than partial and misleading return nil, errors.Wrap(err, string(out)) } out = bytes.TrimSpace(out) if !bytes.Equal(out, []byte("no bookmarks set")) { all = bytes.Split(out, []byte("\n")) for _, line := range all { // Trim leading spaces, and * marker if present line = bytes.TrimLeft(line, " *") pair := bytes.Split(line, []byte(":")) // if this doesn't split exactly once, we have something weird if len(pair) != 2 { continue } // Split on colon; this gets us the rev and the branch plus local revno idx := bytes.IndexByte(pair[0], 32) // space // if it's the magic @ marker, make that the default branch str := string(pair[0][:idx]) var v PairedVersion if str == "@" { magicAt = true v = newDefaultBranch(str).Pair(Revision(pair[1])).(PairedVersion) } else { v = NewBranch(str).Pair(Revision(pair[1])).(PairedVersion) } vlist = append(vlist, v) } } cmd := s.hgCmd(ctx, "branches", "-c", "--debug") out, err = cmd.CombinedOutput() if err != nil { // better nothing than partial and misleading return nil, errors.Wrap(err, string(out)) } all = bytes.Split(bytes.TrimSpace(out), []byte("\n")) for _, line := range all { // Trim inactive and closed suffixes, if present; we represent these // anyway line = bytes.TrimSuffix(line, []byte(" (inactive)")) line = bytes.TrimSuffix(line, []byte(" (closed)")) // Split on colon; this gets us the rev and the branch plus local revno pair := bytes.Split(line, []byte(":")) idx := bytes.IndexByte(pair[0], 32) // space str := string(pair[0][:idx]) // if there was no magic @ bookmark, and this is mercurial's magic // "default" branch, then mark it as default branch var v PairedVersion if !magicAt && str == "default" { v = newDefaultBranch(str).Pair(Revision(pair[1])).(PairedVersion) } else { v = NewBranch(str).Pair(Revision(pair[1])).(PairedVersion) } vlist = append(vlist, v) } return vlist, nil }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/maybe_source.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "context" "fmt" "net/url" "os" "path/filepath" "github.com/Masterminds/vcs" ) // A maybeSource represents a set of information that, given some // typically-expensive network effort, could be transformed into a proper source. // // Wrapping these up as their own type achieves two goals: // // * Allows control over when deduction logic triggers network activity // * Makes it easy to attempt multiple URLs for a given import path type maybeSource interface { // try tries to set up a source. try(ctx context.Context, cachedir string) (source, error) URL() *url.URL fmt.Stringer } type maybeSources []maybeSource func (mbs maybeSources) possibleURLs() []*url.URL { urlslice := make([]*url.URL, len(mbs)) for i, mb := range mbs { urlslice[i] = mb.URL() } return urlslice } // sourceCachePath returns a url-sanitized source cache dir path. func sourceCachePath(cacheDir, sourceURL string) string { return filepath.Join(cacheDir, "sources", sanitizer.Replace(sourceURL)) } type maybeGitSource struct { url *url.URL } func (m maybeGitSource) try(ctx context.Context, cachedir string) (source, error) { ustr := m.url.String() path := sourceCachePath(cachedir, ustr) r, err := vcs.NewGitRepo(ustr, path) if err != nil { os.RemoveAll(path) r, err = vcs.NewGitRepo(ustr, path) if err != nil { return nil, unwrapVcsErr(err) } } return &gitSource{ baseVCSSource: baseVCSSource{ repo: &gitRepo{r}, }, }, nil } func (m maybeGitSource) URL() *url.URL { return m.url } func (m maybeGitSource) String() string { return fmt.Sprintf("%T: %s", m, ufmt(m.url)) } type maybeGopkginSource struct { // the original gopkg.in import path. this is used to create the on-disk // location to avoid duplicate resource management - e.g., if instances of // a gopkg.in project are accessed via different schemes, or if the // underlying github repository is accessed directly. opath string // the actual upstream URL - always github url *url.URL // the major version to apply for filtering major uint64 // whether or not the source package is "unstable" unstable bool } func (m maybeGopkginSource) try(ctx context.Context, cachedir string) (source, error) { // We don't actually need a fully consistent transform into the on-disk path // - just something that's unique to the particular gopkg.in domain context. // So, it's OK to just dumb-join the scheme with the path. aliasURL := m.url.Scheme + "://" + m.opath path := sourceCachePath(cachedir, aliasURL) ustr := m.url.String() r, err := vcs.NewGitRepo(ustr, path) if err != nil { os.RemoveAll(path) r, err = vcs.NewGitRepo(ustr, path) if err != nil { return nil, unwrapVcsErr(err) } } return &gopkginSource{ gitSource: gitSource{ baseVCSSource: baseVCSSource{ repo: &gitRepo{r}, }, }, major: m.major, unstable: m.unstable, aliasURL: aliasURL, }, nil } func (m maybeGopkginSource) URL() *url.URL { return &url.URL{ Scheme: m.url.Scheme, Path: m.opath, } } func (m maybeGopkginSource) String() string { return fmt.Sprintf("%T: %s (v%v) %s ", m, m.opath, m.major, ufmt(m.url)) } type maybeBzrSource struct { url *url.URL } func (m maybeBzrSource) try(ctx context.Context, cachedir string) (source, error) { ustr := m.url.String() path := sourceCachePath(cachedir, ustr) r, err := vcs.NewBzrRepo(ustr, path) if err != nil { os.RemoveAll(path) r, err = vcs.NewBzrRepo(ustr, path) if err != nil { return nil, unwrapVcsErr(err) } } return &bzrSource{ baseVCSSource: baseVCSSource{ repo: &bzrRepo{r}, }, }, nil } func (m maybeBzrSource) URL() *url.URL { return m.url } func (m maybeBzrSource) String() string { return fmt.Sprintf("%T: %s", m, ufmt(m.url)) } type maybeHgSource struct { url *url.URL } func (m maybeHgSource) try(ctx context.Context, cachedir string) (source, error) { ustr := m.url.String() path := sourceCachePath(cachedir, ustr) r, err := vcs.NewHgRepo(ustr, path) if err != nil { os.RemoveAll(path) r, err = vcs.NewHgRepo(ustr, path) if err != nil { return nil, unwrapVcsErr(err) } } return &hgSource{ baseVCSSource: baseVCSSource{ repo: &hgRepo{r}, }, }, nil } func (m maybeHgSource) URL() *url.URL { return m.url } func (m maybeHgSource) String() string { return fmt.Sprintf("%T: %s", m, ufmt(m.url)) } // borrow from stdlib // more useful string for debugging than fmt's struct printer func ufmt(u *url.URL) string { var user, pass interface{} if u.User != nil { user = u.User.Username() if p, ok := u.User.Password(); ok { pass = p } } return fmt.Sprintf("host=%q, path=%q, opaque=%q, scheme=%q, user=%#v, pass=%#v, rawpath=%q, rawq=%q, frag=%q", u.Host, u.Path, u.Opaque, u.Scheme, user, pass, u.RawPath, u.RawQuery, u.Fragment) }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/deduce.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "context" "fmt" "io" "io/ioutil" "net/http" "net/url" "os" "path" "path/filepath" "regexp" "runtime" "strconv" "strings" "sync" radix "github.com/armon/go-radix" "github.com/pkg/errors" ) var ( gitSchemes = []string{"https", "ssh", "git", "http"} bzrSchemes = []string{"https", "bzr+ssh", "bzr", "http"} hgSchemes = []string{"https", "ssh", "http"} svnSchemes = []string{"https", "http", "svn", "svn+ssh"} gopkginSchemes = []string{"https", "http"} netrc []netrcLine readNetrcOnce sync.Once ) const gopkgUnstableSuffix = "-unstable" func validateVCSScheme(scheme, typ string) bool { // everything allows plain ssh if scheme == "ssh" { return true } var schemes []string switch typ { case "git": schemes = gitSchemes case "bzr": schemes = bzrSchemes case "hg": schemes = hgSchemes case "svn": schemes = svnSchemes default: panic(fmt.Sprint("unsupported vcs type", scheme)) } for _, valid := range schemes { if scheme == valid { return true } } return false } // Regexes for the different known import path flavors var ( // This regex allows some usernames that github currently disallows. They // have allowed them in the past. ghRegex = regexp.MustCompile(`^(?P<root>github\.com(/[A-Za-z0-9][-A-Za-z0-9]*/[A-Za-z0-9_.\-]+))((?:/[A-Za-z0-9_.\-]+)*)$`) gpinNewRegex = regexp.MustCompile(`^(?P<root>gopkg\.in(?:(/[a-zA-Z0-9][-a-zA-Z0-9]+)?)(/[a-zA-Z][-.a-zA-Z0-9]*)\.((?:v0|v[1-9][0-9]*)(?:\.0|\.[1-9][0-9]*){0,2}(?:-unstable)?)(?:\.git)?)((?:/[a-zA-Z0-9][-.a-zA-Z0-9]*)*)$`) //gpinOldRegex = regexp.MustCompile(`^(?P<root>gopkg\.in/(?:([a-z0-9][-a-z0-9]+)/)?((?:v0|v[1-9][0-9]*)(?:\.0|\.[1-9][0-9]*){0,2}(-unstable)?)/([a-zA-Z][-a-zA-Z0-9]*)(?:\.git)?)((?:/[a-zA-Z][-a-zA-Z0-9]*)*)$`) bbRegex = regexp.MustCompile(`^(?P<root>bitbucket\.org(?P<bitname>/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+))((?:/[A-Za-z0-9_.\-]+)*)$`) //lpRegex = regexp.MustCompile(`^(?P<root>launchpad\.net/([A-Za-z0-9-._]+)(/[A-Za-z0-9-._]+)?)(/.+)?`) lpRegex = regexp.MustCompile(`^(?P<root>launchpad\.net(/[A-Za-z0-9-._]+))((?:/[A-Za-z0-9_.\-]+)*)?$`) //glpRegex = regexp.MustCompile(`^(?P<root>git\.launchpad\.net/([A-Za-z0-9_.\-]+)|~[A-Za-z0-9_.\-]+/(\+git|[A-Za-z0-9_.\-]+)/[A-Za-z0-9_.\-]+)$`) glpRegex = regexp.MustCompile(`^(?P<root>git\.launchpad\.net(/[A-Za-z0-9_.\-]+))((?:/[A-Za-z0-9_.\-]+)*)$`) //gcRegex = regexp.MustCompile(`^(?P<root>code\.google\.com/[pr]/(?P<project>[a-z0-9\-]+)(\.(?P<subrepo>[a-z0-9\-]+))?)(/[A-Za-z0-9_.\-]+)*$`) jazzRegex = regexp.MustCompile(`^(?P<root>hub\.jazz\.net(/git/[a-z0-9]+/[A-Za-z0-9_.\-]+))((?:/[A-Za-z0-9_.\-]+)*)$`) apacheRegex = regexp.MustCompile(`^(?P<root>git\.apache\.org(/[a-z0-9_.\-]+\.git))((?:/[A-Za-z0-9_.\-]+)*)$`) vcsExtensionRegex = regexp.MustCompile(`^(?P<root>([a-z0-9.\-]+\.)+[a-z0-9.\-]+(:[0-9]+)?/[A-Za-z0-9_.\-/~]*?\.(?P<vcs>bzr|git|hg|svn))((?:/[A-Za-z0-9_.\-]+)*)$`) ) // Other helper regexes var ( scpSyntaxRe = regexp.MustCompile(`^([a-zA-Z0-9_]+)@([a-zA-Z0-9._-]+):(.*)$`) pathvld = regexp.MustCompile(`^([A-Za-z0-9-]+)(\.[A-Za-z0-9-]+)+(/[A-Za-z0-9-_.~]+)*$`) ) func pathDeducerTrie() *deducerTrie { dxt := newDeducerTrie() dxt.Insert("github.com/", githubDeducer{regexp: ghRegex}) dxt.Insert("gopkg.in/", gopkginDeducer{regexp: gpinNewRegex}) dxt.Insert("bitbucket.org/", bitbucketDeducer{regexp: bbRegex}) dxt.Insert("launchpad.net/", launchpadDeducer{regexp: lpRegex}) dxt.Insert("git.launchpad.net/", launchpadGitDeducer{regexp: glpRegex}) dxt.Insert("hub.jazz.net/", jazzDeducer{regexp: jazzRegex}) dxt.Insert("git.apache.org/", apacheDeducer{regexp: apacheRegex}) return dxt } type pathDeducer interface { // deduceRoot takes an import path such as // "github.com/some-user/some-package/some-subpackage" // and returns the root folder to where the version control // system exists. For example, the root folder where .git exists. // So the return of the above string would be // "github.com/some-user/some-package" deduceRoot(string) (string, error) deduceSource(string, *url.URL) (maybeSources, error) } type githubDeducer struct { regexp *regexp.Regexp } func (m githubDeducer) deduceRoot(path string) (string, error) { v := m.regexp.FindStringSubmatch(path) if v == nil { return "", fmt.Errorf("%s is not a valid path for a source on github.com", path) } return "github.com" + v[2], nil } func (m githubDeducer) deduceSource(path string, u *url.URL) (maybeSources, error) { v := m.regexp.FindStringSubmatch(path) if v == nil { return nil, fmt.Errorf("%s is not a valid path for a source on github.com", path) } u.Host = "github.com" u.Path = v[2] if u.Scheme == "ssh" && u.User != nil && u.User.Username() != "git" { return nil, fmt.Errorf("github ssh must be accessed via the 'git' user; %s was provided", u.User.Username()) } else if u.Scheme != "" { if !validateVCSScheme(u.Scheme, "git") { return nil, fmt.Errorf("%s is not a valid scheme for accessing a git repository", u.Scheme) } if u.Scheme == "ssh" { u.User = url.User("git") } return maybeSources{maybeGitSource{url: u}}, nil } mb := make(maybeSources, len(gitSchemes)) for k, scheme := range gitSchemes { u2 := *u if scheme == "ssh" { u2.User = url.User("git") } u2.Scheme = scheme mb[k] = maybeGitSource{url: &u2} } return mb, nil } type bitbucketDeducer struct { regexp *regexp.Regexp } func (m bitbucketDeducer) deduceRoot(path string) (string, error) { v := m.regexp.FindStringSubmatch(path) if v == nil { return "", fmt.Errorf("%s is not a valid path for a source on bitbucket.org", path) } return "bitbucket.org" + v[2], nil } func (m bitbucketDeducer) deduceSource(path string, u *url.URL) (maybeSources, error) { v := m.regexp.FindStringSubmatch(path) if v == nil { return nil, fmt.Errorf("%s is not a valid path for a source on bitbucket.org", path) } u.Host = "bitbucket.org" u.Path = v[2] // This isn't definitive, but it'll probably catch most isgit := strings.HasSuffix(u.Path, ".git") || (u.User != nil && u.User.Username() == "git") ishg := strings.HasSuffix(u.Path, ".hg") || (u.User != nil && u.User.Username() == "hg") // TODO(sdboyer) resolve scm ambiguity if needed by querying bitbucket's REST API if u.Scheme != "" { validgit, validhg := validateVCSScheme(u.Scheme, "git"), validateVCSScheme(u.Scheme, "hg") if isgit { if !validgit { // This is unreachable for now, as the git schemes are a // superset of the hg schemes return nil, fmt.Errorf("%s is not a valid scheme for accessing a git repository", u.Scheme) } return maybeSources{maybeGitSource{url: u}}, nil } else if ishg { if !validhg { return nil, fmt.Errorf("%s is not a valid scheme for accessing an hg repository", u.Scheme) } return maybeSources{maybeHgSource{url: u}}, nil } else if !validgit && !validhg { return nil, fmt.Errorf("%s is not a valid scheme for accessing either a git or hg repository", u.Scheme) } // No other choice, make an option for both git and hg return maybeSources{ maybeHgSource{url: u}, maybeGitSource{url: u}, }, nil } mb := make(maybeSources, 0) // git is probably more common, even on bitbucket. however, bitbucket // appears to fail _extremely_ slowly on git pings (ls-remote) when the // underlying repository is actually an hg repository, so it's better // to try hg first. if !isgit { for _, scheme := range hgSchemes { u2 := *u if scheme == "ssh" { u2.User = url.User("hg") } u2.Scheme = scheme mb = append(mb, maybeHgSource{url: &u2}) } } if !ishg { for _, scheme := range gitSchemes { u2 := *u if scheme == "ssh" { u2.User = url.User("git") } u2.Scheme = scheme mb = append(mb, maybeGitSource{url: &u2}) } } return mb, nil } type gopkginDeducer struct { regexp *regexp.Regexp } func (m gopkginDeducer) deduceRoot(p string) (string, error) { v, err := m.parseAndValidatePath(p) if err != nil { return "", err } return v[1], nil } func (m gopkginDeducer) parseAndValidatePath(p string) ([]string, error) { v := m.regexp.FindStringSubmatch(p) if v == nil { return nil, fmt.Errorf("%s is not a valid path for a source on gopkg.in", p) } // We duplicate some logic from the gopkg.in server in order to validate the // import path string without having to make a network request if strings.Contains(v[4], ".") { return nil, fmt.Errorf("%s is not a valid import path; gopkg.in only allows major versions (%q instead of %q)", p, v[4][:strings.Index(v[4], ".")], v[4]) } return v, nil } func (m gopkginDeducer) deduceSource(p string, u *url.URL) (maybeSources, error) { // Reuse root detection logic for initial validation v, err := m.parseAndValidatePath(p) if err != nil { return nil, err } // Putting a scheme on gopkg.in would be really weird, disallow it if u.Scheme != "" { return nil, fmt.Errorf("specifying alternate schemes on gopkg.in imports is not permitted") } // gopkg.in is always backed by github u.Host = "github.com" if v[2] == "" { elem := v[3][1:] u.Path = path.Join("/go-"+elem, elem) } else { u.Path = path.Join(v[2], v[3]) } unstable := false majorStr := v[4] if strings.HasSuffix(majorStr, gopkgUnstableSuffix) { unstable = true majorStr = strings.TrimSuffix(majorStr, gopkgUnstableSuffix) } major, err := strconv.ParseUint(majorStr[1:], 10, 64) if err != nil { // this should only be reachable if there's an error in the regex return nil, fmt.Errorf("could not parse %q as a gopkg.in major version", majorStr[1:]) } mb := make(maybeSources, len(gopkginSchemes)) for k, scheme := range gopkginSchemes { u2 := *u u2.Scheme = scheme mb[k] = maybeGopkginSource{ opath: v[1], url: &u2, major: major, unstable: unstable, } } return mb, nil } type launchpadDeducer struct { regexp *regexp.Regexp } func (m launchpadDeducer) deduceRoot(path string) (string, error) { // TODO(sdboyer) lp handling is nasty - there's ambiguities which can only really // be resolved with a metadata request. See https://github.com/golang/go/issues/11436 v := m.regexp.FindStringSubmatch(path) if v == nil { return "", fmt.Errorf("%s is not a valid path for a source on launchpad.net", path) } return "launchpad.net" + v[2], nil } func (m launchpadDeducer) deduceSource(path string, u *url.URL) (maybeSources, error) { v := m.regexp.FindStringSubmatch(path) if v == nil { return nil, fmt.Errorf("%s is not a valid path for a source on launchpad.net", path) } u.Host = "launchpad.net" u.Path = v[2] if u.Scheme != "" { if !validateVCSScheme(u.Scheme, "bzr") { return nil, fmt.Errorf("%s is not a valid scheme for accessing a bzr repository", u.Scheme) } return maybeSources{maybeBzrSource{url: u}}, nil } mb := make(maybeSources, len(bzrSchemes)) for k, scheme := range bzrSchemes { u2 := *u u2.Scheme = scheme mb[k] = maybeBzrSource{url: &u2} } return mb, nil } type launchpadGitDeducer struct { regexp *regexp.Regexp } func (m launchpadGitDeducer) deduceRoot(path string) (string, error) { // TODO(sdboyer) same ambiguity issues as with normal bzr lp v := m.regexp.FindStringSubmatch(path) if v == nil { return "", fmt.Errorf("%s is not a valid path for a source on git.launchpad.net", path) } return "git.launchpad.net" + v[2], nil } func (m launchpadGitDeducer) deduceSource(path string, u *url.URL) (maybeSources, error) { v := m.regexp.FindStringSubmatch(path) if v == nil { return nil, fmt.Errorf("%s is not a valid path for a source on git.launchpad.net", path) } u.Host = "git.launchpad.net" u.Path = v[2] if u.Scheme != "" { if !validateVCSScheme(u.Scheme, "git") { return nil, fmt.Errorf("%s is not a valid scheme for accessing a git repository", u.Scheme) } return maybeSources{maybeGitSource{url: u}}, nil } mb := make(maybeSources, len(gitSchemes)) for k, scheme := range gitSchemes { u2 := *u u2.Scheme = scheme mb[k] = maybeGitSource{url: &u2} } return mb, nil } type jazzDeducer struct { regexp *regexp.Regexp } func (m jazzDeducer) deduceRoot(path string) (string, error) { v := m.regexp.FindStringSubmatch(path) if v == nil { return "", fmt.Errorf("%s is not a valid path for a source on hub.jazz.net", path) } return "hub.jazz.net" + v[2], nil } func (m jazzDeducer) deduceSource(path string, u *url.URL) (maybeSources, error) { v := m.regexp.FindStringSubmatch(path) if v == nil { return nil, fmt.Errorf("%s is not a valid path for a source on hub.jazz.net", path) } u.Host = "hub.jazz.net" u.Path = v[2] switch u.Scheme { case "": u.Scheme = "https" fallthrough case "https": return maybeSources{maybeGitSource{url: u}}, nil default: return nil, fmt.Errorf("IBM's jazz hub only supports https, %s is not allowed", u.String()) } } type apacheDeducer struct { regexp *regexp.Regexp } func (m apacheDeducer) deduceRoot(path string) (string, error) { v := m.regexp.FindStringSubmatch(path) if v == nil { return "", fmt.Errorf("%s is not a valid path for a source on git.apache.org", path) } return "git.apache.org" + v[2], nil } func (m apacheDeducer) deduceSource(path string, u *url.URL) (maybeSources, error) { v := m.regexp.FindStringSubmatch(path) if v == nil { return nil, fmt.Errorf("%s is not a valid path for a source on git.apache.org", path) } u.Host = "git.apache.org" u.Path = v[2] if u.Scheme != "" { if !validateVCSScheme(u.Scheme, "git") { return nil, fmt.Errorf("%s is not a valid scheme for accessing a git repository", u.Scheme) } return maybeSources{maybeGitSource{url: u}}, nil } mb := make(maybeSources, len(gitSchemes)) for k, scheme := range gitSchemes { u2 := *u u2.Scheme = scheme mb[k] = maybeGitSource{url: &u2} } return mb, nil } type vcsExtensionDeducer struct { regexp *regexp.Regexp } func (m vcsExtensionDeducer) deduceRoot(path string) (string, error) { v := m.regexp.FindStringSubmatch(path) if v == nil { return "", fmt.Errorf("%s contains no vcs extension hints for matching", path) } return v[1], nil } func (m vcsExtensionDeducer) deduceSource(path string, u *url.URL) (maybeSources, error) { v := m.regexp.FindStringSubmatch(path) if v == nil { return nil, fmt.Errorf("%s contains no vcs extension hints for matching", path) } switch v[4] { case "git", "hg", "bzr": x := strings.SplitN(v[1], "/", 2) // TODO(sdboyer) is this actually correct for bzr? u.Host = x[0] u.Path = "/" + x[1] if u.Scheme != "" { if !validateVCSScheme(u.Scheme, v[4]) { return nil, fmt.Errorf("%s is not a valid scheme for accessing %s repositories (path %s)", u.Scheme, v[4], path) } switch v[4] { case "git": return maybeSources{maybeGitSource{url: u}}, nil case "bzr": return maybeSources{maybeBzrSource{url: u}}, nil case "hg": return maybeSources{maybeHgSource{url: u}}, nil } } var schemes []string var mb maybeSources var f func(k int, u *url.URL) switch v[4] { case "git": schemes = gitSchemes f = func(k int, u *url.URL) { mb[k] = maybeGitSource{url: u} } case "bzr": schemes = bzrSchemes f = func(k int, u *url.URL) { mb[k] = maybeBzrSource{url: u} } case "hg": schemes = hgSchemes f = func(k int, u *url.URL) { mb[k] = maybeHgSource{url: u} } } mb = make(maybeSources, len(schemes)) for k, scheme := range schemes { u2 := *u u2.Scheme = scheme f(k, &u2) } return mb, nil default: return nil, fmt.Errorf("unknown repository type: %q", v[4]) } } // A deducer takes an import path and inspects it to determine where the // corresponding project root should be. It applies a number of matching // techniques, eventually falling back to an HTTP request for go-get metadata if // none of the explicit rules succeed. // // The only real implementation is deductionCoordinator. The interface is // primarily intended for testing purposes. type deducer interface { deduceRootPath(ctx context.Context, path string) (pathDeduction, error) } type deductionCoordinator struct { suprvsr *supervisor mut sync.RWMutex rootxt *radix.Tree deducext *deducerTrie } func newDeductionCoordinator(superv *supervisor) *deductionCoordinator { dc := &deductionCoordinator{ suprvsr: superv, rootxt: radix.New(), deducext: pathDeducerTrie(), } return dc } // deduceRootPath takes an import path and attempts to deduce various // metadata about it - what type of source should handle it, and where its // "root" is (for vcs repositories, the repository root). // // If no errors are encountered, the returned pathDeduction will contain both // the root path and a list of maybeSources, which can be subsequently used to // create a handler that will manage the particular source. func (dc *deductionCoordinator) deduceRootPath(ctx context.Context, path string) (pathDeduction, error) { if err := dc.suprvsr.ctx.Err(); err != nil { return pathDeduction{}, err } // First, check the rootxt to see if there's a prefix match - if so, we // can return that and move on. dc.mut.RLock() prefix, data, has := dc.rootxt.LongestPrefix(path) dc.mut.RUnlock() if has && isPathPrefixOrEqual(prefix, path) { switch d := data.(type) { case maybeSources: return pathDeduction{root: prefix, mb: d}, nil case *httpMetadataDeducer: // Multiple calls have come in for a similar path shape during // the window in which the HTTP request to retrieve go get // metadata is in flight. Fold this request in with the existing // one(s) by calling the deduction method, which will avoid // duplication of work through a sync.Once. return d.deduce(ctx, path) } panic(fmt.Sprintf("unexpected %T in deductionCoordinator.rootxt: %v", data, data)) } // No match. Try known path deduction first. pd, err := dc.deduceKnownPaths(path) if err == nil { // Deduction worked; store it in the rootxt, send on retchan and // terminate. // FIXME(sdboyer) deal with changing path vs. root. Probably needs // to be predeclared and reused in the hmd returnFunc dc.mut.Lock() dc.rootxt.Insert(pd.root, pd.mb) dc.mut.Unlock() return pd, nil } if err != errNoKnownPathMatch { return pathDeduction{}, err } // The err indicates no known path matched. It's still possible that // retrieving go get metadata might do the trick. hmd := &httpMetadataDeducer{ basePath: path, suprvsr: dc.suprvsr, // The vanity deducer will call this func with a completed // pathDeduction if it succeeds in finding one. We process it // back through the action channel to ensure serialized // access to the rootxt map. returnFunc: func(pd pathDeduction) { dc.mut.Lock() dc.rootxt.Insert(pd.root, pd.mb) dc.mut.Unlock() }, } // Save the hmd in the rootxt so that calls checking on similar // paths made while the request is in flight can be folded together. dc.mut.Lock() dc.rootxt.Insert(path, hmd) dc.mut.Unlock() // Trigger the HTTP-backed deduction process for this requestor. return hmd.deduce(ctx, path) } // pathDeduction represents the results of a successful import path deduction - // a root path, plus a maybeSource that can be used to attempt to connect to // the source. type pathDeduction struct { root string mb maybeSources } var errNoKnownPathMatch = errors.New("no known path match") func (dc *deductionCoordinator) deduceKnownPaths(path string) (pathDeduction, error) { u, path, err := normalizeURI(path) if err != nil { return pathDeduction{}, err } // First, try the root path-based matches if _, mtch, has := dc.deducext.LongestPrefix(path); has { root, err := mtch.deduceRoot(path) if err != nil { return pathDeduction{}, err } mb, err := mtch.deduceSource(path, u) if err != nil { return pathDeduction{}, err } return pathDeduction{ root: root, mb: mb, }, nil } // Next, try the vcs extension-based (infix) matcher exm := vcsExtensionDeducer{regexp: vcsExtensionRegex} if root, err := exm.deduceRoot(path); err == nil { mb, err := exm.deduceSource(path, u) if err != nil { return pathDeduction{}, err } return pathDeduction{ root: root, mb: mb, }, nil } return pathDeduction{}, errNoKnownPathMatch } type httpMetadataDeducer struct { once sync.Once deduced pathDeduction deduceErr error basePath string returnFunc func(pathDeduction) suprvsr *supervisor } func (hmd *httpMetadataDeducer) deduce(ctx context.Context, path string) (pathDeduction, error) { hmd.once.Do(func() { opath := path u, path, err := normalizeURI(path) if err != nil { err = errors.Wrapf(err, "unable to normalize URI") hmd.deduceErr = err return } pd := pathDeduction{} // Make the HTTP call to attempt to retrieve go-get metadata var root, vcs, reporoot string err = hmd.suprvsr.do(ctx, path, ctHTTPMetadata, func(ctx context.Context) error { root, vcs, reporoot, err = getMetadata(ctx, path, u.Scheme) if err != nil { err = errors.Wrapf(err, "unable to read metadata") } return err }) if err != nil { err = errors.Wrapf(err, "unable to deduce repository and source type for %q", opath) hmd.deduceErr = err return } pd.root = root // If we got something back at all, then it supersedes the actual input for // the real URL to hit repoURL, err := url.Parse(reporoot) if err != nil { err = errors.Wrapf(err, "server returned bad URL in go-get metadata, reporoot=%q", reporoot) hmd.deduceErr = err return } // If the input path specified a scheme, then try to honor it. if u.Scheme != "" && repoURL.Scheme != u.Scheme { // If the input scheme was http, but the go-get metadata // nevertheless indicated https should be used for the repo, then // trust the metadata and use https. // // To err on the secure side, do NOT allow the same in the other // direction (https -> http). if u.Scheme != "http" || repoURL.Scheme != "https" { hmd.deduceErr = errors.Errorf("scheme mismatch for %q: input asked for %q, but go-get metadata specified %q", path, u.Scheme, repoURL.Scheme) return } } switch vcs { case "git": pd.mb = maybeSources{maybeGitSource{url: repoURL}} case "bzr": pd.mb = maybeSources{maybeBzrSource{url: repoURL}} case "hg": pd.mb = maybeSources{maybeHgSource{url: repoURL}} default: hmd.deduceErr = errors.Errorf("unsupported vcs type %s in go-get metadata from %s", vcs, path) return } hmd.deduced = pd // All data is assigned for other goroutines that may be waiting. Now, // send the pathDeduction back to the deductionCoordinator by calling // the returnFunc. This will also remove the reference to this hmd in // the coordinator's trie. // // When this call finishes, it is guaranteed the coordinator will have // at least begun running the action to insert the path deduction, which // means no other deduction request will be able to interleave and // request the same path before the pathDeduction can be processed, but // after this hmd has been dereferenced from the trie. hmd.returnFunc(pd) }) return hmd.deduced, hmd.deduceErr } // normalizeURI takes a path string - which can be a plain import path, or a // proper URI, or something SCP-shaped - performs basic validity checks, and // returns both a full URL and just the path portion. func normalizeURI(p string) (*url.URL, string, error) { var u *url.URL var newpath string if m := scpSyntaxRe.FindStringSubmatch(p); m != nil { // Match SCP-like syntax and convert it to a URL. // Eg, "git@github.com:user/repo" becomes // "ssh://git@github.com/user/repo". u = &url.URL{ Scheme: "ssh", User: url.User(m[1]), Host: m[2], Path: "/" + m[3], // TODO(sdboyer) This is what stdlib sets; grok why better //RawPath: m[3], } } else { var err error u, err = url.Parse(p) if err != nil { return nil, "", errors.Errorf("%q is not a valid URI", p) } } // If no scheme was passed, then the entire path will have been put into // u.Path. Either way, construct the normalized path correctly. if u.Host == "" { newpath = p } else { newpath = path.Join(u.Host, u.Path) } return u, newpath, nil } // fetchMetadata fetches the remote metadata for path. func fetchMetadata(ctx context.Context, path, scheme string) (rc io.ReadCloser, err error) { if scheme == "http" { rc, err = doFetchMetadata(ctx, "http", path) return } rc, err = doFetchMetadata(ctx, "https", path) if err == nil { return } rc, err = doFetchMetadata(ctx, "http", path) return } func doFetchMetadata(ctx context.Context, scheme, path string) (io.ReadCloser, error) { url := fmt.Sprintf("%s://%s?go-get=1", scheme, path) switch scheme { case "https", "http": req, err := http.NewRequest("GET", url, nil) if err != nil { return nil, errors.Wrapf(err, "unable to build HTTP request for URL %q", url) } req = addAuthFromNetrc(url, req) resp, err := http.DefaultClient.Do(req.WithContext(ctx)) if err != nil { return nil, errors.Wrapf(err, "failed HTTP request to URL %q", url) } return resp.Body, nil default: return nil, errors.Errorf("unknown remote protocol scheme: %q", scheme) } } // See https://github.com/golang/go/blob/master/src/cmd/go/internal/web2/web.go // for implementation // Temporary netrc reader until https://github.com/golang/go/issues/31334 is solved type netrcLine struct { machine string login string password string } func parseNetrc(data string) []netrcLine { // See https://www.gnu.org/software/inetutils/manual/html_node/The-_002enetrc-file.html // for documentation on the .netrc format. var nrc []netrcLine var l netrcLine inMacro := false for _, line := range strings.Split(data, "\n") { if inMacro { if line == "" { inMacro = false } continue } f := strings.Fields(line) i := 0 for ; i < len(f)-1; i += 2 { // Reset at each "machine" token. // “The auto-login process searches the .netrc file for a machine token // that matches […]. Once a match is made, the subsequent .netrc tokens // are processed, stopping when the end of file is reached or another // machine or a default token is encountered.” switch f[i] { case "machine": l = netrcLine{machine: f[i+1]} case "login": l.login = f[i+1] case "password": l.password = f[i+1] case "macdef": // “A macro is defined with the specified name; its contents begin with // the next .netrc line and continue until a null line (consecutive // new-line characters) is encountered.” inMacro = true } if l.machine != "" && l.login != "" && l.password != "" { nrc = append(nrc, l) l = netrcLine{} } } if i < len(f) && f[i] == "default" { // “There can be only one default token, and it must be after all machine tokens.” break } } return nrc } func netrcPath() (string, error) { if env := os.Getenv("NETRC"); env != "" { return env, nil } dir := os.Getenv("HOME") base := ".netrc" if runtime.GOOS == "windows" { base = "_netrc" } return filepath.Join(dir, base), nil } // readNetrc parses a user's netrc file, ignoring any errors that occur. func readNetrc() { path, err := netrcPath() if err != nil { return } data, err := ioutil.ReadFile(path) if err != nil { return } netrc = parseNetrc(string(data)) } // addAuthFromNetrc uses basic authentication on go-get requests // for private repositories. func addAuthFromNetrc(rawurl string, req *http.Request) *http.Request { readNetrcOnce.Do(readNetrc) for _, m := range netrc { u, err := url.Parse(rawurl) if err != nil { continue } if u.Host == m.machine { req.SetBasicAuth(m.login, m.password) break } } return req } // getMetadata fetches and decodes remote metadata for path. // // scheme is optional. If it's http, only http will be attempted for fetching. // Any other scheme (including none) will first try https, then fall back to // http. func getMetadata(ctx context.Context, path, scheme string) (string, string, string, error) { rc, err := fetchMetadata(ctx, path, scheme) if err != nil { return "", "", "", errors.Wrapf(err, "unable to fetch raw metadata") } defer rc.Close() imports, err := parseMetaGoImports(rc) if err != nil { return "", "", "", errors.Wrapf(err, "unable to parse go-import metadata") } match := -1 for i, im := range imports { if !strings.HasPrefix(path, im.Prefix) { continue } if match != -1 { return "", "", "", errors.Errorf("multiple meta tags match import path %q", path) } match = i } if match == -1 { return "", "", "", errors.Errorf("go-import metadata not found") } return imports[match].Prefix, imports[match].VCS, imports[match].RepoRoot, nil }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/version_queue_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "testing" "github.com/pkg/errors" ) // just need a listVersions method type fakeBridge struct { *bridge vl []Version } var fakevl = []Version{ NewVersion("v2.0.0").Pair("200rev"), NewVersion("v1.1.1").Pair("111rev"), NewVersion("v1.1.0").Pair("110rev"), NewVersion("v1.0.0").Pair("100rev"), NewBranch("master").Pair("masterrev"), } func init() { SortForUpgrade(fakevl) } func (fb *fakeBridge) listVersions(id ProjectIdentifier) ([]Version, error) { // it's a fixture, we only ever do the one, regardless of id return fb.vl, nil } type fakeFailBridge struct { *bridge } var errVQ = errors.New("vqerr") func (fb *fakeFailBridge) listVersions(id ProjectIdentifier) ([]Version, error) { return nil, errVQ } func TestVersionQueueSetup(t *testing.T) { id := ProjectIdentifier{ProjectRoot: ProjectRoot("foo")}.normalize() // shouldn't even need to embed a real bridge fb := &fakeBridge{vl: fakevl} ffb := &fakeFailBridge{} _, err := newVersionQueue(id, nil, nil, ffb) if err == nil { t.Error("Expected err when providing no prefv or lockv, and injected bridge returns err from ListVersions()") } vq, err := newVersionQueue(id, nil, nil, fb) if err != nil { t.Errorf("Unexpected err on vq create: %s", err) } else { if len(vq.pi) != 5 { t.Errorf("Should have five versions from listVersions() when providing no prefv or lockv; got %v:\n\t%s", len(vq.pi), vq.String()) } if !vq.allLoaded { t.Errorf("allLoaded flag should be set, but wasn't") } if vq.prefv != nil || vq.lockv != nil { t.Error("lockv and prefv should be nil") } if vq.current() != fakevl[0] { t.Errorf("current should be head of fakevl (%s), got %s", fakevl[0], vq.current()) } } lockv := fakevl[0] prefv := fakevl[1] vq, err = newVersionQueue(id, lockv, nil, fb) if err != nil { t.Errorf("Unexpected err on vq create: %s", err) } else { if len(vq.pi) != 1 { t.Errorf("Should have one version when providing only a lockv; got %v:\n\t%s", len(vq.pi), vq.String()) } if vq.allLoaded { t.Errorf("allLoaded flag should not be set") } if vq.lockv != lockv { t.Errorf("lockv should be %s, was %s", lockv, vq.lockv) } if vq.current() != lockv { t.Errorf("current should be lockv (%s), got %s", lockv, vq.current()) } } vq, err = newVersionQueue(id, nil, prefv, fb) if err != nil { t.Errorf("Unexpected err on vq create: %s", err) } else { if len(vq.pi) != 1 { t.Errorf("Should have one version when providing only a prefv; got %v:\n\t%s", len(vq.pi), vq.String()) } if vq.allLoaded { t.Errorf("allLoaded flag should not be set") } if vq.prefv != prefv { t.Errorf("prefv should be %s, was %s", prefv, vq.prefv) } if vq.current() != prefv { t.Errorf("current should be prefv (%s), got %s", prefv, vq.current()) } } vq, err = newVersionQueue(id, lockv, prefv, fb) if err != nil { t.Errorf("Unexpected err on vq create: %s", err) } else { if len(vq.pi) != 2 { t.Errorf("Should have two versions when providing both a prefv and lockv; got %v:\n\t%s", len(vq.pi), vq.String()) } if vq.allLoaded { t.Errorf("allLoaded flag should not be set") } if vq.prefv != prefv { t.Errorf("prefv should be %s, was %s", prefv, vq.prefv) } if vq.lockv != lockv { t.Errorf("lockv should be %s, was %s", lockv, vq.lockv) } if vq.current() != lockv { t.Errorf("current should be lockv (%s), got %s", lockv, vq.current()) } } } func TestVersionQueueAdvance(t *testing.T) { fb := &fakeBridge{vl: fakevl} id := ProjectIdentifier{ProjectRoot: ProjectRoot("foo")}.normalize() // First with no prefv or lockv vq, err := newVersionQueue(id, nil, nil, fb) if err != nil { t.Fatalf("Unexpected err on vq create: %s", err) } for k, v := range fakevl[1:] { err = vq.advance(errors.Errorf("advancment fail for %s", fakevl[k])) if err != nil { t.Errorf("error on advancing vq from %s to %s", fakevl[k], v) break } if vq.current() != v { t.Errorf("on advance() %v, current should be %s, got %s", k, v, vq.current()) } } if vq.isExhausted() { t.Error("should not be exhausted until advancing 'past' the end") } if err = vq.advance(errors.Errorf("final advance failure")); err != nil { t.Errorf("should not error on advance, even past end, but got %s", err) } if !vq.isExhausted() { t.Error("advanced past end, should now report exhaustion") } if vq.current() != nil { t.Error("advanced past end, current should return nil") } // now, do one with both a prefv and lockv lockv := fakevl[2] prefv := fakevl[0] vq, err = newVersionQueue(id, lockv, prefv, fb) if err != nil { t.Errorf("error creating version queue: %v", err) } if vq.String() != "[v1.1.0, v2.0.0]" { t.Error("stringifying vq did not have expected outcome, got", vq.String()) } if vq.isExhausted() { t.Error("can't be exhausted, we aren't even 'allLoaded' yet") } err = vq.advance(errors.Errorf("dequeue lockv")) if err != nil { t.Error("unexpected error when advancing past lockv", err) } else { if vq.current() != prefv { t.Errorf("current should be prefv (%s) after first advance, got %s", prefv, vq.current()) } if len(vq.pi) != 1 { t.Errorf("should have just prefv elem left in vq, but there are %v:\n\t%s", len(vq.pi), vq.String()) } } err = vq.advance(errors.Errorf("dequeue prefv")) if err != nil { t.Error("unexpected error when advancing past prefv", err) } else { if !vq.allLoaded { t.Error("allLoaded should now be true") } if len(vq.pi) != 3 { t.Errorf("should have three remaining versions after removing prefv and lockv, but there are %v:\n\t%s", len(vq.pi), vq.String()) } if vq.current() != fakevl[1] { t.Errorf("current should be first elem of fakevl (%s) after advancing into all, got %s", fakevl[1], vq.current()) } } // make sure the queue ordering is still right even with a double-delete vq.advance(nil) if vq.current() != fakevl[3] { t.Errorf("second elem after ListVersions() should be idx 3 of fakevl (%s), got %s", fakevl[3], vq.current()) } vq.advance(nil) if vq.current() != fakevl[4] { t.Errorf("third elem after ListVersions() should be idx 4 of fakevl (%s), got %s", fakevl[4], vq.current()) } vq.advance(nil) if vq.current() != nil || !vq.isExhausted() { t.Error("should be out of versions in the queue") } // Make sure we handle things correctly when listVersions adds nothing new fb = &fakeBridge{vl: []Version{lockv, prefv}} vq, err = newVersionQueue(id, lockv, prefv, fb) if err != nil { t.Errorf("error creating version queue: %v", err) } vq.advance(nil) vq.advance(nil) if vq.current() != nil || !vq.isExhausted() { t.Errorf("should have no versions left, as ListVersions() added nothing new, but still have %s", vq.String()) } err = vq.advance(nil) if err != nil { t.Errorf("should be fine to advance on empty queue, per docs, but got err %s", err) } // Also handle it well when advancing calls ListVersions() and it gets an // error vq, err = newVersionQueue(id, lockv, nil, &fakeFailBridge{}) if err != nil { t.Errorf("should not err on creation when preseeded with lockv, but got err %s", err) } err = vq.advance(nil) if err == nil { t.Error("advancing should trigger call to erroring bridge, but no err") } err = vq.advance(nil) if err == nil { t.Error("err should be stored for reuse on any subsequent calls") } }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/source_errors.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "github.com/Masterminds/vcs" "github.com/pkg/errors" ) // unwrapVcsErr recognizes *vcs.LocalError and *vsc.RemoteError, and returns a form // preserving the actual vcs command output and error, in addition to the message. // All other types pass through unchanged. func unwrapVcsErr(err error) error { var cause error var out, msg string switch t := err.(type) { case *vcs.LocalError: cause, out, msg = t.Original(), t.Out(), t.Error() case *vcs.RemoteError: cause, out, msg = t.Original(), t.Out(), t.Error() default: return err } if cause == nil { cause = errors.New(out) } else { cause = errors.Wrap(cause, out) } return errors.Wrap(cause, msg) }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/trace.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "fmt" "strconv" "strings" "github.com/golang/dep/gps/pkgtree" ) const ( successChar = "✓" successCharSp = successChar + " " failChar = "✗" failCharSp = failChar + " " backChar = "←" innerIndent = " " ) func (s *solver) traceCheckPkgs(bmi bimodalIdentifier) { if s.tl == nil { return } prefix := getprei(len(s.vqs) + 1) s.tl.Printf("%s\n", tracePrefix(fmt.Sprintf("? revisit %s to add %v pkgs", bmi.id, len(bmi.pl)), prefix, prefix)) } func (s *solver) traceCheckQueue(q *versionQueue, bmi bimodalIdentifier, cont bool, offset int) { if s.tl == nil { return } prefix := getprei(len(s.vqs) + offset) vlen := strconv.Itoa(len(q.pi)) if !q.allLoaded { vlen = "at least " + vlen } // TODO(sdboyer) how...to list the packages in the limited space we have? var verb string indent := "" if cont { // Continue is an "inner" message.. indenting verb = "continue" vlen += " more" indent = innerIndent } else { verb = "attempt" } s.tl.Printf("%s\n", tracePrefix(fmt.Sprintf("%s? %s %s with %v pkgs; %s versions to try", indent, verb, bmi.id, len(bmi.pl), vlen), prefix, prefix)) } // traceStartBacktrack is called with the bmi that first failed, thus initiating // backtracking func (s *solver) traceStartBacktrack(bmi bimodalIdentifier, err error, pkgonly bool) { if s.tl == nil { return } var msg string if pkgonly { msg = fmt.Sprintf("%s%s could not add %v pkgs to %s; begin backtrack", innerIndent, backChar, len(bmi.pl), bmi.id) } else { msg = fmt.Sprintf("%s%s no more versions of %s to try; begin backtrack", innerIndent, backChar, bmi.id) } prefix := getprei(len(s.sel.projects)) s.tl.Printf("%s\n", tracePrefix(msg, prefix, prefix)) } // traceBacktrack is called when a package or project is poppped off during // backtracking func (s *solver) traceBacktrack(bmi bimodalIdentifier, pkgonly bool) { if s.tl == nil { return } var msg string if pkgonly { msg = fmt.Sprintf("%s backtrack: popped %v pkgs from %s", backChar, len(bmi.pl), bmi.id) } else { msg = fmt.Sprintf("%s backtrack: no more versions of %s to try", backChar, bmi.id) } prefix := getprei(len(s.sel.projects)) s.tl.Printf("%s\n", tracePrefix(msg, prefix, prefix)) } // Called just once after solving has finished, whether success or not func (s *solver) traceFinish(sol solution, err error) { if s.tl == nil { return } if err == nil { var pkgcount int for _, lp := range sol.Projects() { pkgcount += len(lp.Packages()) } s.tl.Printf("%s%s found solution with %v packages from %v projects", innerIndent, successChar, pkgcount, len(sol.Projects())) } else { s.tl.Printf("%s%s solving failed", innerIndent, failChar) } } // traceSelectRoot is called just once, when the root project is selected func (s *solver) traceSelectRoot(ptree pkgtree.PackageTree, cdeps []completeDep) { if s.tl == nil { return } // This duplicates work a bit, but we're in trace mode and it's only once, // so who cares rm, _ := ptree.ToReachMap(true, true, false, s.rd.ir) s.tl.Printf("Root project is %q", s.rd.rpt.ImportRoot) var expkgs int for _, cdep := range cdeps { expkgs += len(cdep.pl) } // TODO(sdboyer) include info on ignored pkgs/imports, etc. s.tl.Printf(" %v transitively valid internal packages", len(rm)) s.tl.Printf(" %v external packages imported from %v projects", expkgs, len(cdeps)) s.tl.Printf("(0) " + successCharSp + "select (root)") } // traceSelect is called when an atom is successfully selected func (s *solver) traceSelect(awp atomWithPackages, pkgonly bool) { if s.tl == nil { return } var msg string if pkgonly { msg = fmt.Sprintf("%s%s include %v more pkgs from %s", innerIndent, successChar, len(awp.pl), a2vs(awp.a)) } else { msg = fmt.Sprintf("%s select %s w/%v pkgs", successChar, a2vs(awp.a), len(awp.pl)) } prefix := getprei(len(s.sel.projects) - 1) s.tl.Printf("%s\n", tracePrefix(msg, prefix, prefix)) } func (s *solver) traceInfo(args ...interface{}) { if s.tl == nil { return } if len(args) == 0 { panic("must pass at least one param to traceInfo") } preflen := len(s.sel.projects) var msg string switch data := args[0].(type) { case string: msg = tracePrefix(innerIndent+fmt.Sprintf(data, args[1:]...), " ", " ") case traceError: preflen++ // We got a special traceError, use its custom method msg = tracePrefix(innerIndent+data.traceString(), " ", failCharSp) case error: // Regular error; still use the x leader but default Error() string msg = tracePrefix(innerIndent+data.Error(), " ", failCharSp) default: // panic here because this can *only* mean a stupid internal bug panic(fmt.Sprintf("canary - unknown type passed as first param to traceInfo %T", data)) } prefix := getprei(preflen) s.tl.Printf("%s\n", tracePrefix(msg, prefix, prefix)) } func getprei(i int) string { var s string if i < 10 { s = fmt.Sprintf("(%d) ", i) } else if i < 100 { s = fmt.Sprintf("(%d) ", i) } else { s = fmt.Sprintf("(%d) ", i) } return s } func tracePrefix(msg, sep, fsep string) string { parts := strings.Split(strings.TrimSuffix(msg, "\n"), "\n") for k, str := range parts { if k == 0 { parts[k] = fsep + str } else { parts[k] = sep + str } } return strings.Join(parts, "\n") }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/vcs_version.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "strings" "github.com/Masterminds/vcs" "github.com/pkg/errors" ) // VCSVersion returns the current project version for an absolute path. func VCSVersion(path string) (Version, error) { repo, err := vcs.NewRepo("", path) if err != nil { return nil, errors.Wrapf(err, "creating new repo for root: %s", path) } ver, err := repo.Current() if err != nil { return nil, errors.Wrapf(err, "finding current branch/version for root: %s", path) } rev, err := repo.Version() if err != nil { return nil, errors.Wrapf(err, "getting repo version for root: %s", path) } // First look through tags. tags, err := repo.Tags() if err != nil { return nil, errors.Wrapf(err, "getting repo tags for root: %s", path) } // Try to match the current version to a tag. if contains(tags, ver) { // Assume semver if it starts with a v. if strings.HasPrefix(ver, "v") { return NewVersion(ver).Pair(Revision(rev)), nil } return nil, errors.Errorf("version for root %s does not start with a v: %q", path, ver) } // Look for the current branch. branches, err := repo.Branches() if err != nil { return nil, errors.Wrapf(err, "getting repo branch for root: %s", path) } // Try to match the current version to a branch. if contains(branches, ver) { return NewBranch(ver).Pair(Revision(rev)), nil } return Revision(rev), nil } // contains checks if a array of strings contains a value func contains(a []string, b string) bool { for _, v := range a { if b == v { return true } } return false }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/strings.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "bytes" "unicode" "unicode/utf8" ) // toFold returns a string with the property that strings.EqualFold(s, t) iff // ToFold(s) == ToFold(t) This lets us test a large set of strings for // fold-equivalent duplicates without making a quadratic number of calls to // EqualFold. Note that strings.ToUpper and strings.ToLower do not have the // desired property in some corner cases. // // This is hoisted from toolchain internals: src/cmd/go/internal/str/str.go func toFold(s string) string { // Fast path: all ASCII, no upper case. // Most paths look like this already. for i := 0; i < len(s); i++ { c := s[i] if c >= utf8.RuneSelf || 'A' <= c && c <= 'Z' { goto Slow } } return s Slow: var buf bytes.Buffer for _, r := range s { // SimpleFold(x) cycles to the next equivalent rune > x // or wraps around to smaller values. Iterate until it wraps, // and we've found the minimum value. for { r0 := r r = unicode.SimpleFold(r0) if r <= r0 { break } } // Exception to allow fast path above: A-Z => a-z if 'A' <= r && r <= 'Z' { r += 'a' - 'A' } buf.WriteRune(r) } return buf.String() }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/lock.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "fmt" "sort" ) // Lock represents data from a lock file (or however the implementing tool // chooses to store it) at a particular version that is relevant to the // satisfiability solving process. // // In general, the information produced by gps on finding a successful // solution is all that would be necessary to constitute a lock file, though // tools can include whatever other information they want in their storage. type Lock interface { // Projects returns the list of LockedProjects contained in the lock data. Projects() []LockedProject // The set of imports (and required statements) that were the inputs that // generated this Lock. It is acceptable to return a nil slice from this // method if the information cannot reasonably be made available. InputImports() []string } // sortLockedProjects returns a sorted copy of lps, or itself if already sorted. func sortLockedProjects(lps []LockedProject) []LockedProject { if len(lps) <= 1 || sort.SliceIsSorted(lps, func(i, j int) bool { return lps[i].Ident().Less(lps[j].Ident()) }) { return lps } cp := make([]LockedProject, len(lps)) copy(cp, lps) sort.Slice(cp, func(i, j int) bool { return cp[i].Ident().Less(cp[j].Ident()) }) return cp } // LockedProject is a single project entry from a lock file. It expresses the // project's name, one or both of version and underlying revision, the network // URI for accessing it, the path at which it should be placed within a vendor // directory, and the packages that are used in it. type LockedProject interface { Ident() ProjectIdentifier Version() Version Packages() []string Eq(LockedProject) bool String() string } // lockedProject is the default implementation of LockedProject. type lockedProject struct { pi ProjectIdentifier v UnpairedVersion r Revision pkgs []string } // SimpleLock is a helper for tools to easily describe lock data when they know // that input imports are unavailable. type SimpleLock []LockedProject var _ Lock = SimpleLock{} // Projects returns the entire contents of the SimpleLock. func (l SimpleLock) Projects() []LockedProject { return l } // InputImports returns a nil string slice, as SimpleLock does not provide a way // of capturing string slices. func (l SimpleLock) InputImports() []string { return nil } // NewLockedProject creates a new LockedProject struct with a given // ProjectIdentifier (name and optional upstream source URL), version. and list // of packages required from the project. // // Note that passing a nil version will cause a panic. This is a correctness // measure to ensure that the solver is never exposed to a version-less lock // entry. Such a case would be meaningless - the solver would have no choice but // to simply dismiss that project. By creating a hard failure case via panic // instead, we are trying to avoid inflicting the resulting pain on the user by // instead forcing a decision on the Analyzer implementation. func NewLockedProject(id ProjectIdentifier, v Version, pkgs []string) LockedProject { if v == nil { panic("must provide a non-nil version to create a LockedProject") } lp := lockedProject{ pi: id, pkgs: pkgs, } switch tv := v.(type) { case Revision: lp.r = tv case branchVersion: lp.v = tv case semVersion: lp.v = tv case plainVersion: lp.v = tv case versionPair: lp.r = tv.r lp.v = tv.v } return lp } // Ident returns the identifier describing the project. This includes both the // local name (the root name by which the project is referenced in import paths) // and the network name, where the upstream source lives. func (lp lockedProject) Ident() ProjectIdentifier { return lp.pi } // Version assembles together whatever version and/or revision data is // available into a single Version. func (lp lockedProject) Version() Version { if lp.r == "" { return lp.v } if lp.v == nil { return lp.r } return lp.v.Pair(lp.r) } // Eq checks if two LockedProject instances are equal. The implementation // assumes both Packages lists are already sorted lexicographically. func (lp lockedProject) Eq(lp2 LockedProject) bool { if lp.pi != lp2.Ident() { return false } var uv UnpairedVersion switch tv := lp2.Version().(type) { case Revision: if lp.r != tv { return false } case versionPair: if lp.r != tv.r { return false } uv = tv.v case branchVersion, semVersion, plainVersion: // For now, we're going to say that revisions must be present in order // to indicate equality. We may need to change this later, as it may be // more appropriate to enforce elsewhere. return false } v1n := lp.v == nil v2n := uv == nil if v1n != v2n { return false } if !v1n && !lp.v.Matches(uv) { return false } opkgs := lp2.Packages() if len(lp.pkgs) != len(opkgs) { return false } for k, v := range lp.pkgs { if opkgs[k] != v { return false } } return true } // Packages returns the list of packages from within the LockedProject that are // actually used in the import graph. Some caveats: // // * The names given are relative to the root import path for the project. If // the root package itself is imported, it's represented as ".". // * Just because a package path isn't included in this list doesn't mean it's // safe to remove - it could contain C files, or other assets, that can't be // safely removed. // * The slice is not a copy. If you need to modify it, copy it first. func (lp lockedProject) Packages() []string { return lp.pkgs } func (lp lockedProject) String() string { return fmt.Sprintf("%s@%s with packages: %v", lp.Ident(), lp.Version(), lp.pkgs) } type safeLock struct { p []LockedProject i []string } func (sl safeLock) InputImports() []string { return sl.i } func (sl safeLock) Projects() []LockedProject { return sl.p } // prepLock ensures a lock is prepared and safe for use by the solver. This is // mostly about defensively ensuring that no outside routine can modify the lock // while the solver is in-flight. // // This is achieved by copying the lock's data into a new safeLock. func prepLock(l Lock) safeLock { pl := l.Projects() rl := safeLock{ p: make([]LockedProject, len(pl)), } copy(rl.p, pl) rl.i = make([]string, len(l.InputImports())) copy(rl.i, l.InputImports()) return rl }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/prune.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "bytes" "fmt" "os" "path/filepath" "sort" "strings" "github.com/golang/dep/internal/fs" "github.com/pkg/errors" ) // PruneOptions represents the pruning options used to write the dependecy tree. type PruneOptions uint8 const ( // PruneNestedVendorDirs indicates if nested vendor directories should be pruned. PruneNestedVendorDirs PruneOptions = 1 << iota // PruneUnusedPackages indicates if unused Go packages should be pruned. PruneUnusedPackages // PruneNonGoFiles indicates if non-Go files should be pruned. // Files matching licenseFilePrefixes and legalFileSubstrings are kept in // an attempt to comply with legal requirements. PruneNonGoFiles // PruneGoTestFiles indicates if Go test files should be pruned. PruneGoTestFiles ) // PruneOptionSet represents trinary distinctions for each of the types of // prune rules (as expressed via PruneOptions): nested vendor directories, // unused packages, non-go files, and go test files. // // The three-way distinction is between "none", "true", and "false", represented // by uint8 values of 0, 1, and 2, respectively. // // This trinary distinction is necessary in order to record, with full fidelity, // a cascading tree of pruning values, as expressed in CascadingPruneOptions; a // simple boolean cannot delineate between "false" and "none". type PruneOptionSet struct { NestedVendor uint8 UnusedPackages uint8 NonGoFiles uint8 GoTests uint8 } // CascadingPruneOptions is a set of rules for pruning a dependency tree. // // The DefaultOptions are the global default pruning rules, expressed as a // single PruneOptions bitfield. These global rules will cascade down to // individual project rules, unless superseded. type CascadingPruneOptions struct { DefaultOptions PruneOptions PerProjectOptions map[ProjectRoot]PruneOptionSet } // ParsePruneOptions extracts PruneOptions from a string using the standard // encoding. func ParsePruneOptions(input string) (PruneOptions, error) { var po PruneOptions for _, char := range input { switch char { case 'T': po |= PruneGoTestFiles case 'U': po |= PruneUnusedPackages case 'N': po |= PruneNonGoFiles case 'V': po |= PruneNestedVendorDirs default: return 0, errors.Errorf("unknown pruning code %q", char) } } return po, nil } func (po PruneOptions) String() string { var buf bytes.Buffer if po&PruneNonGoFiles != 0 { fmt.Fprintf(&buf, "N") } if po&PruneUnusedPackages != 0 { fmt.Fprintf(&buf, "U") } if po&PruneGoTestFiles != 0 { fmt.Fprintf(&buf, "T") } if po&PruneNestedVendorDirs != 0 { fmt.Fprintf(&buf, "V") } return buf.String() } // PruneOptionsFor returns the PruneOptions bits for the given project, // indicating which pruning rules should be applied to the project's code. // // It computes the cascade from default to project-specific options (if any) on // the fly. func (o CascadingPruneOptions) PruneOptionsFor(pr ProjectRoot) PruneOptions { po, has := o.PerProjectOptions[pr] if !has { return o.DefaultOptions } ops := o.DefaultOptions if po.NestedVendor != 0 { if po.NestedVendor == 1 { ops |= PruneNestedVendorDirs } else { ops &^= PruneNestedVendorDirs } } if po.UnusedPackages != 0 { if po.UnusedPackages == 1 { ops |= PruneUnusedPackages } else { ops &^= PruneUnusedPackages } } if po.NonGoFiles != 0 { if po.NonGoFiles == 1 { ops |= PruneNonGoFiles } else { ops &^= PruneNonGoFiles } } if po.GoTests != 0 { if po.GoTests == 1 { ops |= PruneGoTestFiles } else { ops &^= PruneGoTestFiles } } return ops } func defaultCascadingPruneOptions() CascadingPruneOptions { return CascadingPruneOptions{ DefaultOptions: PruneNestedVendorDirs, PerProjectOptions: map[ProjectRoot]PruneOptionSet{}, } } var ( // licenseFilePrefixes is a list of name prefixes for license files. licenseFilePrefixes = []string{ "license", "licence", "copying", "unlicense", "copyright", "copyleft", } // legalFileSubstrings contains substrings that are likey part of a legal // declaration file. legalFileSubstrings = []string{ "authors", "contributors", "legal", "notice", "disclaimer", "patent", "third-party", "thirdparty", } ) // PruneProject remove excess files according to the options passed, from // the lp directory in baseDir. func PruneProject(baseDir string, lp LockedProject, options PruneOptions) error { fsState, err := deriveFilesystemState(baseDir) if err != nil { return errors.Wrap(err, "could not derive filesystem state") } if (options & PruneNestedVendorDirs) != 0 { if err := pruneVendorDirs(fsState); err != nil { return errors.Wrapf(err, "failed to prune nested vendor directories") } } if (options & PruneUnusedPackages) != 0 { if _, err := pruneUnusedPackages(lp, fsState); err != nil { return errors.Wrap(err, "failed to prune unused packages") } } if (options & PruneNonGoFiles) != 0 { if err := pruneNonGoFiles(fsState); err != nil { return errors.Wrap(err, "failed to prune non-Go files") } } if (options & PruneGoTestFiles) != 0 { if err := pruneGoTestFiles(fsState); err != nil { return errors.Wrap(err, "failed to prune Go test files") } } if err := deleteEmptyDirs(fsState); err != nil { return errors.Wrap(err, "could not delete empty dirs") } return nil } // pruneVendorDirs deletes all nested vendor directories within baseDir. func pruneVendorDirs(fsState filesystemState) error { for _, dir := range fsState.dirs { if filepath.Base(dir) == "vendor" { err := os.RemoveAll(filepath.Join(fsState.root, dir)) if err != nil && !os.IsNotExist(err) { return err } } } for _, link := range fsState.links { if filepath.Base(link.path) == "vendor" { err := os.Remove(filepath.Join(fsState.root, link.path)) if err != nil && !os.IsNotExist(err) { return err } } } return nil } // pruneUnusedPackages deletes unimported packages found in fsState. // Determining whether packages are imported or not is based on the passed LockedProject. func pruneUnusedPackages(lp LockedProject, fsState filesystemState) (map[string]interface{}, error) { unusedPackages := calculateUnusedPackages(lp, fsState) toDelete := collectUnusedPackagesFiles(fsState, unusedPackages) for _, path := range toDelete { if err := os.Remove(path); err != nil && !os.IsNotExist(err) { return nil, err } } return unusedPackages, nil } // calculateUnusedPackages generates a list of unused packages in lp. func calculateUnusedPackages(lp LockedProject, fsState filesystemState) map[string]interface{} { unused := make(map[string]interface{}) imported := make(map[string]interface{}) for _, pkg := range lp.Packages() { imported[pkg] = nil } // Add the root package if it's not imported. if _, ok := imported["."]; !ok { unused["."] = nil } for _, dirPath := range fsState.dirs { pkg := filepath.ToSlash(dirPath) if _, ok := imported[pkg]; !ok { unused[pkg] = nil } } return unused } // collectUnusedPackagesFiles returns a slice of all files in the unused // packages based on fsState. func collectUnusedPackagesFiles(fsState filesystemState, unusedPackages map[string]interface{}) []string { // TODO(ibrasho): is this useful? files := make([]string, 0, len(unusedPackages)) for _, path := range fsState.files { // Keep preserved files. if isPreservedFile(filepath.Base(path)) { continue } pkg := filepath.ToSlash(filepath.Dir(path)) if _, ok := unusedPackages[pkg]; ok { files = append(files, filepath.Join(fsState.root, path)) } } return files } func isSourceFile(path string) bool { ext := fileExt(path) // Refer to: https://github.com/golang/go/blob/release-branch.go1.9/src/go/build/build.go#L750 switch ext { case ".go": return true case ".c": return true case ".cc", ".cpp", ".cxx": return true case ".m": return true case ".h", ".hh", ".hpp", ".hxx": return true case ".f", ".F", ".for", ".f90": return true case ".s": return true case ".S": return true case ".swig": return true case ".swigcxx": return true case ".syso": return true } return false } // pruneNonGoFiles delete all non-Go files existing in fsState. // // Files matching licenseFilePrefixes and legalFileSubstrings are not pruned. func pruneNonGoFiles(fsState filesystemState) error { toDelete := make([]string, 0, len(fsState.files)/4) for _, path := range fsState.files { if isSourceFile(path) { continue } // Ignore preserved files. if isPreservedFile(filepath.Base(path)) { continue } toDelete = append(toDelete, filepath.Join(fsState.root, path)) } for _, path := range toDelete { if err := os.Remove(path); err != nil && !os.IsNotExist(err) { return err } } return nil } // isPreservedFile checks if the file name indicates that the file should be // preserved based on licenseFilePrefixes or legalFileSubstrings. // This applies only to non-source files. func isPreservedFile(name string) bool { if isSourceFile(name) { return false } name = strings.ToLower(name) for _, prefix := range licenseFilePrefixes { if strings.HasPrefix(name, prefix) { return true } } for _, substring := range legalFileSubstrings { if strings.Contains(name, substring) { return true } } return false } // pruneGoTestFiles deletes all Go test files (*_test.go) in fsState. func pruneGoTestFiles(fsState filesystemState) error { toDelete := make([]string, 0, len(fsState.files)/2) for _, path := range fsState.files { if strings.HasSuffix(path, "_test.go") { toDelete = append(toDelete, filepath.Join(fsState.root, path)) } } for _, path := range toDelete { if err := os.Remove(path); err != nil && !os.IsNotExist(err) { return err } } return nil } func deleteEmptyDirs(fsState filesystemState) error { sort.Sort(sort.Reverse(sort.StringSlice(fsState.dirs))) for _, dir := range fsState.dirs { path := filepath.Join(fsState.root, dir) notEmpty, err := fs.IsNonEmptyDir(path) if err != nil { return err } if !notEmpty { if err := os.Remove(path); err != nil && !os.IsNotExist(err) { return err } } } return nil } func fileExt(name string) string { i := strings.LastIndex(name, ".") if i < 0 { return "" } return name[i:] }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/source_cache_bolt.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "fmt" "log" "os" "path" "path/filepath" "strings" "time" "github.com/boltdb/bolt" "github.com/golang/dep/gps/internal/pb" "github.com/golang/dep/gps/pkgtree" "github.com/golang/protobuf/proto" "github.com/jmank88/nuts" "github.com/pkg/errors" ) // boltCacheFilename is a versioned filename for the bolt cache. The version // must be incremented whenever incompatible changes are made. const boltCacheFilename = "bolt-v1.db" // boltCache manages a bolt.DB cache and provides singleSourceCaches. type boltCache struct { db *bolt.DB epoch int64 // getters will not return values older than this unix timestamp logger *log.Logger // info logging } // newBoltCache returns a new boltCache backed by a BoltDB file under the cache directory. func newBoltCache(cd string, epoch int64, logger *log.Logger) (*boltCache, error) { path := filepath.Join(cd, boltCacheFilename) dir := filepath.Dir(path) if fi, err := os.Stat(dir); os.IsNotExist(err) { if err := os.MkdirAll(dir, os.ModeDir|os.ModePerm); err != nil { return nil, errors.Wrapf(err, "failed to create source cache directory: %s", dir) } } else if err != nil { return nil, errors.Wrapf(err, "failed to check source cache directory: %s", dir) } else if !fi.IsDir() { return nil, errors.Wrapf(err, "source cache path is not directory: %s", dir) } db, err := bolt.Open(path, 0600, &bolt.Options{Timeout: 1 * time.Second}) if err != nil { return nil, errors.Wrapf(err, "failed to open BoltDB cache file %q", path) } return &boltCache{ db: db, epoch: epoch, logger: logger, }, nil } // newSingleSourceCache returns a new singleSourceCache for pi. func (c *boltCache) newSingleSourceCache(pi ProjectIdentifier) singleSourceCache { return &singleSourceCacheBolt{ boltCache: c, sourceName: []byte(pi.normalizedSource()), } } // close releases all cache resources. func (c *boltCache) close() error { return errors.Wrapf(c.db.Close(), "error closing Bolt database %q", c.db.String()) } // singleSourceCacheBolt implements a singleSourceCache backed by a persistent BoltDB file. // Version mappings are timestamped, and the `epoch` field limits the age of returned values. // Database access methods are safe for concurrent use. // // Implementation: // // Each source has a top-level bucket containing sub-buckets for (1) versions and (2) revisions. // // 1) Versions buckets hold version keys with revision values: // // Bucket: "v<timestamp>" // Keys: Unpaired Versions serialized via ConstraintMsg // Values: "<revision>" // // 2) Revision buckets hold (a) manifest and lock data for various ProjectAnalyzers, // (b) package trees, and (c) version lists. // // Bucket: "r<revision>" // // a) Manifest and Lock info are stored in buckets derived from ProjectAnalyzer.Info: // // Sub-Bucket: "<name>.<version>m", "<name>.<version>l" // Keys/Values: Manifest or Lock fields // // b) Package tree buckets contain package import path keys and package-or-error buckets: // // Sub-Bucket: "p" // Sub-Bucket: "<import_path>" // Key/Values: PackageOrErr fields // // c) Revision-versions buckets contain lists of version values: // // Sub-Bucket: "v<timestamp>" // Keys: "<sequence_number>" // Values: Unpaired Versions serialized via ConstraintMsg type singleSourceCacheBolt struct { *boltCache sourceName []byte } func (s *singleSourceCacheBolt) setManifestAndLock(rev Revision, ai ProjectAnalyzerInfo, m Manifest, l Lock) { err := s.updateRevBucket(rev, func(b *bolt.Bucket) error { info := ai.String() name := make([]byte, len(info)+1) copy(name, info) name[len(info)] = 'm' if b.Bucket(name) != nil { if err := b.DeleteBucket(name); err != nil { return err } } // Manifest mb, err := b.CreateBucket(name) if err != nil { return err } if err := cachePutManifest(mb, m); err != nil { return errors.Wrap(err, "failed to put manifest") } if l == nil { return nil } // Lock name[len(info)] = 'l' if b.Bucket(name) != nil { if err := b.DeleteBucket(name); err != nil { return err } } lb, err := b.CreateBucket(name) if err != nil { return err } return errors.Wrap(cachePutLock(lb, l), "failed to put lock") }) if err != nil { s.logger.Println(errors.Wrapf(err, "failed to cache manifest/lock for revision %q, analyzer: %v", rev, ai)) } } func (s *singleSourceCacheBolt) getManifestAndLock(rev Revision, ai ProjectAnalyzerInfo) (m Manifest, l Lock, ok bool) { err := s.viewRevBucket(rev, func(b *bolt.Bucket) error { info := ai.String() name := make([]byte, len(info)+1) copy(name, info) name[len(info)] = 'm' // Manifest mb := b.Bucket(name) if mb == nil { return nil } var err error m, err = cacheGetManifest(mb) if err != nil { return errors.Wrap(err, "failed to get manifest") } // Lock name[len(info)] = 'l' lb := b.Bucket(name) if lb == nil { ok = true return nil } l, err = cacheGetLock(lb) if err != nil { return errors.Wrap(err, "failed to get lock") } ok = true return nil }) if err != nil { s.logger.Println(errors.Wrapf(err, "failed to get cached manifest/lock for revision %q, analyzer: %v", rev, ai)) } return } func (s *singleSourceCacheBolt) setPackageTree(rev Revision, ptree pkgtree.PackageTree) { err := s.updateRevBucket(rev, func(b *bolt.Bucket) error { if b.Bucket(cacheKeyPTree) != nil { if err := b.DeleteBucket(cacheKeyPTree); err != nil { return err } } ptrees, err := b.CreateBucket(cacheKeyPTree) if err != nil { return err } root := string(ptree.ImportRoot) for ip, poe := range ptree.Packages { // Stored by relative import path. rip := strings.TrimPrefix(ip, root) if rip == "" { rip = "/" } pb, err := ptrees.CreateBucket([]byte(rip)) if err != nil { return err } if err := cachePutPackageOrErr(pb, poe); err != nil { return err } } return nil }) if err != nil { s.logger.Println(errors.Wrapf(err, "failed to cache package tree for revision %q", rev)) } } func (s *singleSourceCacheBolt) getPackageTree(rev Revision, pr ProjectRoot) (ptree pkgtree.PackageTree, ok bool) { err := s.viewRevBucket(rev, func(b *bolt.Bucket) error { ptrees := b.Bucket(cacheKeyPTree) if ptrees == nil { return nil } pkgs := make(map[string]pkgtree.PackageOrErr) err := ptrees.ForEach(func(rip, _ []byte) error { poe, err := cacheGetPackageOrErr(ptrees.Bucket(rip)) if err != nil { return err } srip := string(rip) if srip == "/" { srip = "" } // Return full import paths. ip := path.Join(string(pr), srip) if poe.Err == nil { poe.P.ImportPath = ip } pkgs[ip] = poe return nil }) if err != nil { return err } ptree.ImportRoot = string(pr) ptree.Packages = pkgs ok = true return nil }) if err != nil { s.logger.Println(errors.Wrapf(err, "failed to get cached package tree for revision %q", rev)) } return } func (s *singleSourceCacheBolt) markRevisionExists(rev Revision) { err := s.updateRevBucket(rev, func(versions *bolt.Bucket) error { return nil }) if err != nil { s.logger.Println(errors.Wrapf(err, "failed to mark revision %q in cache", rev)) } } func (s *singleSourceCacheBolt) setVersionMap(pvs []PairedVersion) { err := s.updateSourceBucket(func(src *bolt.Bucket) error { if err := cachePrefixDelete(src, cacheVersion); err != nil { return err } vk := cacheTimestampedKey(cacheVersion, time.Now()) versions, err := src.CreateBucket(vk) if err != nil { return err } c := src.Cursor() for k, _ := c.Seek(cacheKeyRevision); len(k) > 0 && k[0] == cacheRevision; k, _ = c.Next() { rb := src.Bucket(k) if err := cachePrefixDelete(rb, cacheVersion); err != nil { return err } } revVersions := make(map[Revision]*bolt.Bucket) key := make(nuts.Key, nuts.KeyLen(uint64(len(pvs)-1))) var msg pb.Constraint for i, pv := range pvs { uv, rev := pv.Unpair(), pv.Revision() uv.copyTo(&msg) uvB, err := proto.Marshal(&msg) if err != nil { return errors.Wrapf(err, "failed to serialize UnpairedVersion: %#v", uv) } if err := versions.Put(uvB, []byte(rev)); err != nil { return errors.Wrap(err, "failed to put version->revision") } b, err := src.CreateBucketIfNotExists(cacheRevisionName(rev)) if err != nil { return errors.Wrapf(err, "failed to create bucket for revision: %s", rev) } var versions *bolt.Bucket if versions = revVersions[rev]; versions == nil { err := cachePrefixDelete(b, cacheVersion) if err != nil { return err } versions, err = b.CreateBucket(vk) if err != nil { return errors.Wrapf(err, "failed to create bucket for revision versions: %s", rev) } revVersions[rev] = versions } key.Put(uint64(i)) if err := versions.Put(key, uvB); err != nil { return errors.Wrap(err, "failed to put revision->version") } } return nil }) if err != nil { s.logger.Println(errors.Wrap(err, "failed to cache version map")) } } func (s *singleSourceCacheBolt) getVersionsFor(rev Revision) (uvs []UnpairedVersion, ok bool) { err := s.viewRevBucket(rev, func(b *bolt.Bucket) error { versions := cacheFindLatestValid(b, cacheVersion, s.epoch) if versions == nil { return nil } ok = true var msg pb.Constraint return versions.ForEach(func(_, v []byte) error { if err := proto.Unmarshal(v, &msg); err != nil { return err } uv, err := unpairedVersionFromCache(&msg) if err != nil { return err } uvs = append(uvs, uv) return nil }) }) if err != nil { s.logger.Println(errors.Wrapf(err, "failed to get cached versions for revision %q", rev)) return nil, false } return } func (s *singleSourceCacheBolt) getAllVersions() (pvs []PairedVersion, ok bool) { err := s.viewSourceBucket(func(src *bolt.Bucket) error { versions := cacheFindLatestValid(src, cacheVersion, s.epoch) if versions == nil { return nil } var msg pb.Constraint return versions.ForEach(func(k, v []byte) error { if err := proto.Unmarshal(k, &msg); err != nil { return err } uv, err := unpairedVersionFromCache(&msg) if err != nil { return err } pvs = append(pvs, uv.Pair(Revision(v))) ok = true return nil }) }) if err != nil { s.logger.Println(errors.Wrap(err, "failed to get all cached versions")) return nil, false } return } func (s *singleSourceCacheBolt) getRevisionFor(uv UnpairedVersion) (rev Revision, ok bool) { err := s.viewSourceBucket(func(src *bolt.Bucket) error { versions := cacheFindLatestValid(src, cacheVersion, s.epoch) if versions == nil { return nil } var msg pb.Constraint uv.copyTo(&msg) b, err := proto.Marshal(&msg) if err != nil { return errors.Wrapf(err, "failed to serialize UnpairedVersion: %#v", uv) } v := versions.Get(b) if len(v) > 0 { rev = Revision(v) ok = true } return nil }) if err != nil { s.logger.Println(errors.Wrapf(err, "failed to get cached revision for unpaired version: %v", uv)) } return } func (s *singleSourceCacheBolt) toRevision(v Version) (rev Revision, ok bool) { switch t := v.(type) { case Revision: return t, true case PairedVersion: return t.Revision(), true case UnpairedVersion: return s.getRevisionFor(t) default: s.logger.Println(fmt.Sprintf("failed to get cached revision for version %v: unknown type %T", v, v)) return "", false } } func (s *singleSourceCacheBolt) toUnpaired(v Version) (uv UnpairedVersion, ok bool) { const errMsg = "failed to get cached unpaired version for version: %v" switch t := v.(type) { case UnpairedVersion: return t, true case PairedVersion: return t.Unpair(), true case Revision: err := s.viewRevBucket(t, func(b *bolt.Bucket) error { versions := cacheFindLatestValid(b, cacheVersion, s.epoch) if versions == nil { return nil } _, v := versions.Cursor().First() if len(v) == 0 { return nil } var msg pb.Constraint if err := proto.Unmarshal(v, &msg); err != nil { return err } var err error uv, err = unpairedVersionFromCache(&msg) if err != nil { return err } ok = true return nil }) if err != nil { s.logger.Println(errors.Wrapf(err, errMsg, v)) } return default: s.logger.Println(fmt.Sprintf(errMsg, v)) return } } // cacheRevisionName returns the bucket name for rev. func cacheRevisionName(rev Revision) []byte { name := make([]byte, 1+len(rev)) name[0] = 'r' copy(name[1:], string(rev)) return name } // viewSourceBucket executes view with the source bucket, if it exists. func (s *singleSourceCacheBolt) viewSourceBucket(view func(b *bolt.Bucket) error) error { return s.db.View(func(tx *bolt.Tx) error { b := tx.Bucket(s.sourceName) if b == nil { return nil } return view(b) }) } // updateSourceBucket executes update (in batch) with the source bucket, creating it first if necessary. func (s *singleSourceCacheBolt) updateSourceBucket(update func(b *bolt.Bucket) error) error { return s.db.Batch(func(tx *bolt.Tx) error { b, err := tx.CreateBucketIfNotExists(s.sourceName) if err != nil { return errors.Wrapf(err, "failed to create bucket: %s", s.sourceName) } return update(b) }) } // viewRevBucket executes view with rev's bucket for this source, if it exists. func (s *singleSourceCacheBolt) viewRevBucket(rev Revision, view func(b *bolt.Bucket) error) error { return s.viewSourceBucket(func(src *bolt.Bucket) error { b := src.Bucket(cacheRevisionName(rev)) if b == nil { return nil } return view(b) }) } // updateRevBucket executes update with rev's bucket for this source, creating it first if necessary. func (s *singleSourceCacheBolt) updateRevBucket(rev Revision, update func(b *bolt.Bucket) error) error { return s.updateSourceBucket(func(src *bolt.Bucket) error { name := cacheRevisionName(rev) b, err := src.CreateBucketIfNotExists(name) if err != nil { return errors.Wrapf(err, "failed to create bucket: %s", name) } return update(b) }) }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/maybe_source_test.go
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "archive/tar" "compress/gzip" "context" "io" "io/ioutil" "net/url" "os" "path/filepath" "testing" "github.com/Masterminds/vcs" ) func TestMaybeGitSource_try(t *testing.T) { t.Parallel() tempDir, err := ioutil.TempDir("", "go-try-happy-test") if err != nil { t.Fatal(err) } defer func() { err = os.RemoveAll(tempDir) if err != nil { t.Error(err) } }() url, err := url.Parse(gitRemoteTestRepo) if err != nil { t.Fatal(err) } var ms maybeSource = maybeGitSource{url: url} _, err = ms.try(context.Background(), tempDir) if err != nil { t.Fatal(err) } } func TestMaybeGitSource_try_recovery(t *testing.T) { t.Parallel() tempDir, err := ioutil.TempDir("", "go-try-recovery-test") if err != nil { t.Fatal(err) } defer func() { err = os.RemoveAll(tempDir) if err != nil { t.Error(err) } }() cwd, err := os.Getwd() if err != nil { t.Fatal(err) } src := filepath.Join(cwd, "_testdata", "badrepo", "corrupt_dot_git_directory.tar") f, err := os.Open(src) if err != nil { t.Fatal(err) } defer f.Close() dest := filepath.Join(tempDir, ".git") err = untar(dest, f) if err != nil { t.Fatalf("could not untar corrupt repo into temp folder: %v\n", err) } _, err = vcs.NewGitRepo(gitRemoteTestRepo, tempDir) if err != nil { if _, ok := err.(*vcs.LocalError); !ok { t.Fatalf("expected a local error but got: %v\n", err) } } else { t.Fatal("expected getVCSRepo to fail when pointing to a corrupt local path. It is possible that vcs.GitNewRepo updated to gracefully handle this test scenario. Check the return of vcs.GitNewRepo.") } url, err := url.Parse(gitRemoteTestRepo) if err != nil { t.Fatal(err) } var ms maybeSource = maybeGitSource{url: url} _, err = ms.try(context.Background(), tempDir) if err != nil { t.Fatal(err) } } func untar(dst string, r io.Reader) error { gzr, err := gzip.NewReader(r) if err != nil { return err } defer gzr.Close() tr := tar.NewReader(gzr) for { header, err := tr.Next() switch { case err == io.EOF: return nil case err != nil: return err case header == nil: continue } target := filepath.Join(dst, header.Name) switch header.Typeflag { case tar.TypeDir: if _, err := os.Stat(target); err != nil { if err := os.MkdirAll(target, 0755); err != nil { return err } } case tar.TypeReg: f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) if err != nil { return err } defer f.Close() if _, err := io.Copy(f, tr); err != nil { return err } } } }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/deduce_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "bytes" "context" "errors" "fmt" "net/url" "reflect" "testing" ) type pathDeductionFixture struct { in string root string rerr error mb maybeSources srcerr error } // helper func to generate testing *url.URLs, panicking on err func mkurl(s string) (u *url.URL) { var err error u, err = url.Parse(s) if err != nil { panic(fmt.Sprint("string is not a valid URL:", s)) } return } var pathDeductionFixtures = map[string][]pathDeductionFixture{ "github": { { in: "github.com/sdboyer/gps", root: "github.com/sdboyer/gps", mb: maybeSources{ maybeGitSource{url: mkurl("https://github.com/sdboyer/gps")}, maybeGitSource{url: mkurl("ssh://git@github.com/sdboyer/gps")}, maybeGitSource{url: mkurl("git://github.com/sdboyer/gps")}, maybeGitSource{url: mkurl("http://github.com/sdboyer/gps")}, }, }, { in: "github.com/sdboyer/gps/foo", root: "github.com/sdboyer/gps", mb: maybeSources{ maybeGitSource{url: mkurl("https://github.com/sdboyer/gps")}, maybeGitSource{url: mkurl("ssh://git@github.com/sdboyer/gps")}, maybeGitSource{url: mkurl("git://github.com/sdboyer/gps")}, maybeGitSource{url: mkurl("http://github.com/sdboyer/gps")}, }, }, { // TODO(sdboyer) is this a problem for enforcing uniqueness? do we // need to collapse these extensions? in: "github.com/sdboyer/gps.git/foo", root: "github.com/sdboyer/gps.git", mb: maybeSources{ maybeGitSource{url: mkurl("https://github.com/sdboyer/gps.git")}, maybeGitSource{url: mkurl("ssh://git@github.com/sdboyer/gps.git")}, maybeGitSource{url: mkurl("git://github.com/sdboyer/gps.git")}, maybeGitSource{url: mkurl("http://github.com/sdboyer/gps.git")}, }, }, { in: "git@github.com:sdboyer/gps", root: "github.com/sdboyer/gps", mb: maybeSources{ maybeGitSource{url: mkurl("ssh://git@github.com/sdboyer/gps")}, }, }, { in: "https://github.com/sdboyer/gps", root: "github.com/sdboyer/gps", mb: maybeSources{ maybeGitSource{url: mkurl("https://github.com/sdboyer/gps")}, }, }, { in: "https://github.com/sdboyer/gps/foo/bar", root: "github.com/sdboyer/gps", mb: maybeSources{ maybeGitSource{url: mkurl("https://github.com/sdboyer/gps")}, }, }, { in: "github.com/sdboyer-/gps/foo", root: "github.com/sdboyer-/gps", mb: maybeSources{ maybeGitSource{url: mkurl("https://github.com/sdboyer-/gps")}, maybeGitSource{url: mkurl("ssh://git@github.com/sdboyer-/gps")}, maybeGitSource{url: mkurl("git://github.com/sdboyer-/gps")}, maybeGitSource{url: mkurl("http://github.com/sdboyer-/gps")}, }, }, { in: "github.com/a/gps/foo", root: "github.com/a/gps", mb: maybeSources{ maybeGitSource{url: mkurl("https://github.com/a/gps")}, maybeGitSource{url: mkurl("ssh://git@github.com/a/gps")}, maybeGitSource{url: mkurl("git://github.com/a/gps")}, maybeGitSource{url: mkurl("http://github.com/a/gps")}, }, }, // some invalid github username patterns { in: "github.com/-sdboyer/gps/foo", rerr: errors.New("github.com/-sdboyer/gps/foo is not a valid path for a source on github.com"), }, { in: "github.com/sdbo.yer/gps/foo", rerr: errors.New("github.com/sdbo.yer/gps/foo is not a valid path for a source on github.com"), }, { in: "github.com/sdbo_yer/gps/foo", rerr: errors.New("github.com/sdbo_yer/gps/foo is not a valid path for a source on github.com"), }, // Regression - gh does allow two-letter usernames { in: "github.com/kr/pretty", root: "github.com/kr/pretty", mb: maybeSources{ maybeGitSource{url: mkurl("https://github.com/kr/pretty")}, maybeGitSource{url: mkurl("ssh://git@github.com/kr/pretty")}, maybeGitSource{url: mkurl("git://github.com/kr/pretty")}, maybeGitSource{url: mkurl("http://github.com/kr/pretty")}, }, }, }, "gopkg.in": { { in: "gopkg.in/sdboyer/gps.v0", root: "gopkg.in/sdboyer/gps.v0", mb: maybeSources{ maybeGopkginSource{opath: "gopkg.in/sdboyer/gps.v0", url: mkurl("https://github.com/sdboyer/gps"), major: 0}, maybeGopkginSource{opath: "gopkg.in/sdboyer/gps.v0", url: mkurl("http://github.com/sdboyer/gps"), major: 0}, }, }, { in: "gopkg.in/sdboyer/gps.v0/foo", root: "gopkg.in/sdboyer/gps.v0", mb: maybeSources{ maybeGopkginSource{opath: "gopkg.in/sdboyer/gps.v0", url: mkurl("https://github.com/sdboyer/gps"), major: 0}, maybeGopkginSource{opath: "gopkg.in/sdboyer/gps.v0", url: mkurl("http://github.com/sdboyer/gps"), major: 0}, }, }, { in: "gopkg.in/sdboyer/gps.v1/foo/bar", root: "gopkg.in/sdboyer/gps.v1", mb: maybeSources{ maybeGopkginSource{opath: "gopkg.in/sdboyer/gps.v1", url: mkurl("https://github.com/sdboyer/gps"), major: 1}, maybeGopkginSource{opath: "gopkg.in/sdboyer/gps.v1", url: mkurl("http://github.com/sdboyer/gps"), major: 1}, }, }, { in: "gopkg.in/yaml.v1", root: "gopkg.in/yaml.v1", mb: maybeSources{ maybeGopkginSource{opath: "gopkg.in/yaml.v1", url: mkurl("https://github.com/go-yaml/yaml"), major: 1}, maybeGopkginSource{opath: "gopkg.in/yaml.v1", url: mkurl("http://github.com/go-yaml/yaml"), major: 1}, }, }, { in: "gopkg.in/yaml.v1/foo/bar", root: "gopkg.in/yaml.v1", mb: maybeSources{ maybeGopkginSource{opath: "gopkg.in/yaml.v1", url: mkurl("https://github.com/go-yaml/yaml"), major: 1}, maybeGopkginSource{opath: "gopkg.in/yaml.v1", url: mkurl("http://github.com/go-yaml/yaml"), major: 1}, }, }, { in: "gopkg.in/inf.v0", root: "gopkg.in/inf.v0", mb: maybeSources{ maybeGopkginSource{opath: "gopkg.in/inf.v0", url: mkurl("https://github.com/go-inf/inf"), major: 0}, maybeGopkginSource{opath: "gopkg.in/inf.v0", url: mkurl("http://github.com/go-inf/inf"), major: 0}, }, }, { // gopkg.in only allows specifying major version in import path in: "gopkg.in/yaml.v1.2", rerr: errors.New("gopkg.in/yaml.v1.2 is not a valid import path; gopkg.in only allows major versions (\"v1\" instead of \"v1.2\")"), }, }, "jazz": { // IBM hub devops services - fixtures borrowed from go get { in: "hub.jazz.net/git/user1/pkgname", root: "hub.jazz.net/git/user1/pkgname", mb: maybeSources{ maybeGitSource{url: mkurl("https://hub.jazz.net/git/user1/pkgname")}, }, }, { in: "hub.jazz.net/git/user1/pkgname/submodule/submodule/submodule", root: "hub.jazz.net/git/user1/pkgname", mb: maybeSources{ maybeGitSource{url: mkurl("https://hub.jazz.net/git/user1/pkgname")}, }, }, { in: "hub.jazz.net/someotherprefix", rerr: errors.New("hub.jazz.net/someotherprefix is not a valid path for a source on hub.jazz.net"), }, { in: "hub.jazz.net/someotherprefix/user1/packagename", rerr: errors.New("hub.jazz.net/someotherprefix/user1/packagename is not a valid path for a source on hub.jazz.net"), }, // Spaces are not valid in user names or package names { in: "hub.jazz.net/git/User 1/pkgname", rerr: errors.New("hub.jazz.net/git/User 1/pkgname is not a valid path for a source on hub.jazz.net"), }, { in: "hub.jazz.net/git/user1/pkg name", rerr: errors.New("hub.jazz.net/git/user1/pkg name is not a valid path for a source on hub.jazz.net"), }, // Dots are not valid in user names { in: "hub.jazz.net/git/user.1/pkgname", rerr: errors.New("hub.jazz.net/git/user.1/pkgname is not a valid path for a source on hub.jazz.net"), }, { in: "hub.jazz.net/git/user1/pkg.name", root: "hub.jazz.net/git/user1/pkg.name", mb: maybeSources{ maybeGitSource{url: mkurl("https://hub.jazz.net/git/user1/pkg.name")}, }, }, // User names cannot have uppercase letters { in: "hub.jazz.net/git/USER/pkgname", rerr: errors.New("hub.jazz.net/git/USER/pkgname is not a valid path for a source on hub.jazz.net"), }, }, "bitbucket": { { in: "bitbucket.org/sdboyer/reporoot", root: "bitbucket.org/sdboyer/reporoot", mb: maybeSources{ maybeHgSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot")}, maybeHgSource{url: mkurl("ssh://hg@bitbucket.org/sdboyer/reporoot")}, maybeHgSource{url: mkurl("http://bitbucket.org/sdboyer/reporoot")}, maybeGitSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot")}, maybeGitSource{url: mkurl("ssh://git@bitbucket.org/sdboyer/reporoot")}, maybeGitSource{url: mkurl("git://bitbucket.org/sdboyer/reporoot")}, maybeGitSource{url: mkurl("http://bitbucket.org/sdboyer/reporoot")}, }, }, { in: "bitbucket.org/sdboyer/reporoot/foo/bar", root: "bitbucket.org/sdboyer/reporoot", mb: maybeSources{ maybeHgSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot")}, maybeHgSource{url: mkurl("ssh://hg@bitbucket.org/sdboyer/reporoot")}, maybeHgSource{url: mkurl("http://bitbucket.org/sdboyer/reporoot")}, maybeGitSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot")}, maybeGitSource{url: mkurl("ssh://git@bitbucket.org/sdboyer/reporoot")}, maybeGitSource{url: mkurl("git://bitbucket.org/sdboyer/reporoot")}, maybeGitSource{url: mkurl("http://bitbucket.org/sdboyer/reporoot")}, }, }, { in: "https://bitbucket.org/sdboyer/reporoot/foo/bar", root: "bitbucket.org/sdboyer/reporoot", mb: maybeSources{ maybeHgSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot")}, maybeGitSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot")}, }, }, // Less standard behaviors possible due to the hg/git ambiguity { in: "bitbucket.org/sdboyer/reporoot.git", root: "bitbucket.org/sdboyer/reporoot.git", mb: maybeSources{ maybeGitSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot.git")}, maybeGitSource{url: mkurl("ssh://git@bitbucket.org/sdboyer/reporoot.git")}, maybeGitSource{url: mkurl("git://bitbucket.org/sdboyer/reporoot.git")}, maybeGitSource{url: mkurl("http://bitbucket.org/sdboyer/reporoot.git")}, }, }, { in: "git@bitbucket.org:sdboyer/reporoot.git", root: "bitbucket.org/sdboyer/reporoot.git", mb: maybeSources{ maybeGitSource{url: mkurl("ssh://git@bitbucket.org/sdboyer/reporoot.git")}, }, }, { in: "bitbucket.org/sdboyer/reporoot.hg", root: "bitbucket.org/sdboyer/reporoot.hg", mb: maybeSources{ maybeHgSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot.hg")}, maybeHgSource{url: mkurl("ssh://hg@bitbucket.org/sdboyer/reporoot.hg")}, maybeHgSource{url: mkurl("http://bitbucket.org/sdboyer/reporoot.hg")}, }, }, { in: "hg@bitbucket.org:sdboyer/reporoot", root: "bitbucket.org/sdboyer/reporoot", mb: maybeSources{ maybeHgSource{url: mkurl("ssh://hg@bitbucket.org/sdboyer/reporoot")}, }, }, { in: "git://bitbucket.org/sdboyer/reporoot.hg", root: "bitbucket.org/sdboyer/reporoot.hg", srcerr: errors.New("git is not a valid scheme for accessing an hg repository"), }, }, "launchpad": { // tests for launchpad, mostly bazaar // TODO(sdboyer) need more tests to deal w/launchpad's oddities { in: "launchpad.net/govcstestbzrrepo", root: "launchpad.net/govcstestbzrrepo", mb: maybeSources{ maybeBzrSource{url: mkurl("https://launchpad.net/govcstestbzrrepo")}, maybeBzrSource{url: mkurl("bzr+ssh://launchpad.net/govcstestbzrrepo")}, maybeBzrSource{url: mkurl("bzr://launchpad.net/govcstestbzrrepo")}, maybeBzrSource{url: mkurl("http://launchpad.net/govcstestbzrrepo")}, }, }, { in: "launchpad.net/govcstestbzrrepo/foo/bar", root: "launchpad.net/govcstestbzrrepo", mb: maybeSources{ maybeBzrSource{url: mkurl("https://launchpad.net/govcstestbzrrepo")}, maybeBzrSource{url: mkurl("bzr+ssh://launchpad.net/govcstestbzrrepo")}, maybeBzrSource{url: mkurl("bzr://launchpad.net/govcstestbzrrepo")}, maybeBzrSource{url: mkurl("http://launchpad.net/govcstestbzrrepo")}, }, }, { in: "launchpad.net/repo root", rerr: errors.New("launchpad.net/repo root is not a valid path for a source on launchpad.net"), }, }, "git.launchpad": { { in: "git.launchpad.net/reporoot", root: "git.launchpad.net/reporoot", mb: maybeSources{ maybeGitSource{url: mkurl("https://git.launchpad.net/reporoot")}, maybeGitSource{url: mkurl("ssh://git.launchpad.net/reporoot")}, maybeGitSource{url: mkurl("git://git.launchpad.net/reporoot")}, maybeGitSource{url: mkurl("http://git.launchpad.net/reporoot")}, }, }, { in: "git.launchpad.net/reporoot/foo/bar", root: "git.launchpad.net/reporoot", mb: maybeSources{ maybeGitSource{url: mkurl("https://git.launchpad.net/reporoot")}, maybeGitSource{url: mkurl("ssh://git.launchpad.net/reporoot")}, maybeGitSource{url: mkurl("git://git.launchpad.net/reporoot")}, maybeGitSource{url: mkurl("http://git.launchpad.net/reporoot")}, }, }, { in: "git.launchpad.net/repo root", rerr: errors.New("git.launchpad.net/repo root is not a valid path for a source on git.launchpad.net"), }, }, "apache": { { in: "git.apache.org/package-name.git", root: "git.apache.org/package-name.git", mb: maybeSources{ maybeGitSource{url: mkurl("https://git.apache.org/package-name.git")}, maybeGitSource{url: mkurl("ssh://git.apache.org/package-name.git")}, maybeGitSource{url: mkurl("git://git.apache.org/package-name.git")}, maybeGitSource{url: mkurl("http://git.apache.org/package-name.git")}, }, }, { in: "git.apache.org/package-name.git/foo/bar", root: "git.apache.org/package-name.git", mb: maybeSources{ maybeGitSource{url: mkurl("https://git.apache.org/package-name.git")}, maybeGitSource{url: mkurl("ssh://git.apache.org/package-name.git")}, maybeGitSource{url: mkurl("git://git.apache.org/package-name.git")}, maybeGitSource{url: mkurl("http://git.apache.org/package-name.git")}, }, }, }, "vcsext": { // VCS extension-based syntax { in: "foobar.com/baz.git", root: "foobar.com/baz.git", mb: maybeSources{ maybeGitSource{url: mkurl("https://foobar.com/baz.git")}, maybeGitSource{url: mkurl("ssh://foobar.com/baz.git")}, maybeGitSource{url: mkurl("git://foobar.com/baz.git")}, maybeGitSource{url: mkurl("http://foobar.com/baz.git")}, }, }, { in: "foobar.com/baz.git/extra/path", root: "foobar.com/baz.git", mb: maybeSources{ maybeGitSource{url: mkurl("https://foobar.com/baz.git")}, maybeGitSource{url: mkurl("ssh://foobar.com/baz.git")}, maybeGitSource{url: mkurl("git://foobar.com/baz.git")}, maybeGitSource{url: mkurl("http://foobar.com/baz.git")}, }, }, { in: "foobar.com/baz.bzr", root: "foobar.com/baz.bzr", mb: maybeSources{ maybeBzrSource{url: mkurl("https://foobar.com/baz.bzr")}, maybeBzrSource{url: mkurl("bzr+ssh://foobar.com/baz.bzr")}, maybeBzrSource{url: mkurl("bzr://foobar.com/baz.bzr")}, maybeBzrSource{url: mkurl("http://foobar.com/baz.bzr")}, }, }, { in: "foo-bar.com/baz.hg", root: "foo-bar.com/baz.hg", mb: maybeSources{ maybeHgSource{url: mkurl("https://foo-bar.com/baz.hg")}, maybeHgSource{url: mkurl("ssh://foo-bar.com/baz.hg")}, maybeHgSource{url: mkurl("http://foo-bar.com/baz.hg")}, }, }, { in: "git@foobar.com:baz.git", root: "foobar.com/baz.git", mb: maybeSources{ maybeGitSource{url: mkurl("ssh://git@foobar.com/baz.git")}, }, }, { in: "bzr+ssh://foobar.com/baz.bzr", root: "foobar.com/baz.bzr", mb: maybeSources{ maybeBzrSource{url: mkurl("bzr+ssh://foobar.com/baz.bzr")}, }, }, { in: "ssh://foobar.com/baz.bzr", root: "foobar.com/baz.bzr", mb: maybeSources{ maybeBzrSource{url: mkurl("ssh://foobar.com/baz.bzr")}, }, }, { in: "https://foobar.com/baz.hg", root: "foobar.com/baz.hg", mb: maybeSources{ maybeHgSource{url: mkurl("https://foobar.com/baz.hg")}, }, }, { in: "git://foobar.com/baz.hg", root: "foobar.com/baz.hg", srcerr: errors.New("git is not a valid scheme for accessing hg repositories (path foobar.com/baz.hg)"), }, // who knows why anyone would do this, but having a second vcs ext // shouldn't throw us off - only the first one counts { in: "foobar.com/baz.git/quark/quizzle.bzr/quorum", root: "foobar.com/baz.git", mb: maybeSources{ maybeGitSource{url: mkurl("https://foobar.com/baz.git")}, maybeGitSource{url: mkurl("ssh://foobar.com/baz.git")}, maybeGitSource{url: mkurl("git://foobar.com/baz.git")}, maybeGitSource{url: mkurl("http://foobar.com/baz.git")}, }, }, }, "vanity": { // Vanity imports { in: "golang.org/x/exp", root: "golang.org/x/exp", mb: maybeSources{ maybeGitSource{url: mkurl("https://go.googlesource.com/exp")}, }, }, { in: "golang.org/x/exp/inotify", root: "golang.org/x/exp", mb: maybeSources{ maybeGitSource{url: mkurl("https://go.googlesource.com/exp")}, }, }, { in: "golang.org/x/net/html", root: "golang.org/x/net", mb: maybeSources{ maybeGitSource{url: mkurl("https://go.googlesource.com/net")}, }, }, }, } func TestDeduceFromPath(t *testing.T) { do := func(typ string, fixtures []pathDeductionFixture, t *testing.T) { t.Run(typ, func(t *testing.T) { t.Parallel() var deducer pathDeducer switch typ { case "github": deducer = githubDeducer{regexp: ghRegex} case "gopkg.in": deducer = gopkginDeducer{regexp: gpinNewRegex} case "jazz": deducer = jazzDeducer{regexp: jazzRegex} case "bitbucket": deducer = bitbucketDeducer{regexp: bbRegex} case "launchpad": deducer = launchpadDeducer{regexp: lpRegex} case "git.launchpad": deducer = launchpadGitDeducer{regexp: glpRegex} case "apache": deducer = apacheDeducer{regexp: apacheRegex} case "vcsext": deducer = vcsExtensionDeducer{regexp: vcsExtensionRegex} default: // Should just be the vanity imports, which we do elsewhere t.Log("skipping") t.SkipNow() } printmb := func(mb maybeSources) string { var buf bytes.Buffer fmt.Fprintf(&buf, "%v maybeSources:", len(mb)) for _, elem := range mb { fmt.Fprintf(&buf, "\n\t\t%s", elem) } return buf.String() } for _, fix := range fixtures { fix := fix t.Run(fix.in, func(t *testing.T) { t.Parallel() u, in, uerr := normalizeURI(fix.in) if uerr != nil { if fix.rerr == nil { t.Errorf("bad input URI %s", uerr) } t.SkipNow() } root, rerr := deducer.deduceRoot(in) if fix.rerr != nil { if rerr == nil { t.Errorf("Expected error on deducing root, got none:\n\t(WNT) %s", fix.rerr) } else if fix.rerr.Error() != rerr.Error() { t.Errorf("Got unexpected error on deducing root:\n\t(GOT) %s\n\t(WNT) %s", rerr, fix.rerr) } } else if rerr != nil { t.Errorf("Got unexpected error on deducing root:\n\t(GOT) %s", rerr) } else if root != fix.root { t.Errorf("Deducer did not return expected root:\n\t(GOT) %s\n\t(WNT) %s", root, fix.root) } mb, mberr := deducer.deduceSource(in, u) if fix.srcerr != nil { if mberr == nil { t.Errorf("Expected error on deducing source, got none:\n\t(WNT) %s", fix.srcerr) } else if fix.srcerr.Error() != mberr.Error() { t.Errorf("Got unexpected error on deducing source:\n\t(GOT) %s\n\t(WNT) %s", mberr, fix.srcerr) } } else if mberr != nil { // don't complain the fix already expected an rerr if fix.rerr == nil { t.Errorf("Got unexpected error on deducing source:\n\t(GOT) %s", mberr) } } else if !reflect.DeepEqual(mb, fix.mb) { if mb == nil { t.Errorf("Deducer returned source maybes, but none expected:\n\t(GOT) (none)\n\t(WNT) %s", printmb(fix.mb)) } else if fix.mb == nil { t.Errorf("Deducer returned source maybes, but none expected:\n\t(GOT) %s\n\t(WNT) (none)", printmb(mb)) } else { t.Errorf("Deducer did not return expected source:\n\t(GOT) %s\n\t(WNT) %s", printmb(mb), printmb(fix.mb)) } } }) } }) } runSet := func(t *testing.T) { for typ, fixtures := range pathDeductionFixtures { do(typ, fixtures, t) } } t.Run("first", runSet) // Run the test set twice to ensure results are correct for both cached // and uncached deductions. t.Run("second", runSet) } func TestVanityDeduction(t *testing.T) { if testing.Short() { t.Skip("Skipping slow test in short mode") } sm, clean := mkNaiveSM(t) defer clean() vanities := pathDeductionFixtures["vanity"] // group to avoid sourcemanager cleanup ctx := context.Background() do := func(t *testing.T) { for _, fix := range vanities { fix := fix t.Run(fix.in, func(t *testing.T) { t.Parallel() pr, err := sm.DeduceProjectRoot(fix.in) if err != nil { t.Errorf("Unexpected err on deducing project root: %s", err) return } else if string(pr) != fix.root { t.Errorf("Deducer did not return expected root:\n\t(GOT) %s\n\t(WNT) %s", pr, fix.root) } pd, err := sm.deduceCoord.deduceRootPath(ctx, fix.in) if err != nil { t.Errorf("Unexpected err on deducing source: %s", err) return } if len(pd.mb) != 1 { t.Errorf("Expected single maybeSource, but found: %d", len(pd.mb)) return } goturl, wanturl := pd.mb[0].(maybeGitSource).url.String(), fix.mb[0].(maybeGitSource).url.String() if goturl != wanturl { t.Errorf("Deduced repo ident does not match fixture:\n\t(GOT) %s\n\t(WNT) %s", goturl, wanturl) } urls, err := sm.SourceURLsForPath(fix.in) if err != nil { t.Errorf("Unexpected err on deducing source urls: %s", err) return } if len(urls) != 1 { t.Errorf("Deduced source URLs count for a vanity import should be 1, got %d", len(urls)) } goturl = urls[0].String() if goturl != wanturl { t.Errorf("Deduced source URL does not match fixture:\n\t(GOT) %s\n\t(WNT) %s", goturl, wanturl) } }) } } // Run twice, to ensure correctness of cache t.Run("first", do) t.Run("second", do) } func TestVanityDeductionSchemeMismatch(t *testing.T) { if testing.Short() { t.Skip("Skipping slow test in short mode") } ctx := context.Background() cm := newSupervisor(ctx) dc := newDeductionCoordinator(cm) _, err := dc.deduceRootPath(ctx, "ssh://golang.org/exp") // TODO(sdboyer) this is not actually the error that it should be if err == nil { t.Error("should have errored on scheme mismatch between input and go-get metadata") } }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/solve_failures.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "bytes" "fmt" "sort" "strings" ) func a2vs(a atom) string { if a.v == rootRev || a.v == nil { return "(root)" } return fmt.Sprintf("%s@%s", a.id, a.v) } type traceError interface { traceString() string } type noVersionError struct { pn ProjectIdentifier fails []failedVersion } func (e *noVersionError) Error() string { if len(e.fails) == 0 { return fmt.Sprintf("No versions found for project %q.", e.pn.ProjectRoot) } var buf bytes.Buffer fmt.Fprintf(&buf, "No versions of %s met constraints:", e.pn.ProjectRoot) for _, f := range e.fails { fmt.Fprintf(&buf, "\n\t%s: %s", f.v, f.f.Error()) } return buf.String() } func (e *noVersionError) traceString() string { if len(e.fails) == 0 { return fmt.Sprintf("No versions found") } var buf bytes.Buffer fmt.Fprintf(&buf, "No versions of %s met constraints:", e.pn.ProjectRoot) for _, f := range e.fails { if te, ok := f.f.(traceError); ok { fmt.Fprintf(&buf, "\n %s: %s", f.v, te.traceString()) } else { fmt.Fprintf(&buf, "\n %s: %s", f.v, f.f.Error()) } } return buf.String() } // caseMismatchFailure occurs when there are import paths that differ only by // case. The compiler disallows this case. type caseMismatchFailure struct { // goal is the depender atom that tried to introduce the case-varying name, // along with the case-varying name. goal dependency // current is the specific casing of a ProjectRoot that is presently // selected for all possible case variations of its contained unicode code // points. current ProjectRoot // failsib is the list of active dependencies that have determined the // specific casing for the target project. failsib []dependency } func (e *caseMismatchFailure) Error() string { if len(e.failsib) == 1 { str := "Could not introduce %s due to a case-only variation: it depends on %q, but %q was already established as the case variant for that project root by depender %s" return fmt.Sprintf(str, a2vs(e.goal.depender), e.goal.dep.Ident.ProjectRoot, e.current, a2vs(e.failsib[0].depender)) } var buf bytes.Buffer str := "Could not introduce %s due to a case-only variation: it depends on %q, but %q was already established as the case variant for that project root by the following other dependers:\n" fmt.Fprintf(&buf, str, a2vs(e.goal.depender), e.goal.dep.Ident.ProjectRoot, e.current) for _, c := range e.failsib { fmt.Fprintf(&buf, "\t%s\n", a2vs(c.depender)) } return buf.String() } func (e *caseMismatchFailure) traceString() string { var buf bytes.Buffer fmt.Fprintf(&buf, "case-only variation in dependency on %q; %q already established by:\n", e.goal.dep.Ident.ProjectRoot, e.current) for _, f := range e.failsib { fmt.Fprintf(&buf, "%s\n", a2vs(f.depender)) } return buf.String() } // wrongCaseFailure occurs when one or more projects - A, B, ... - depend on // another project - Z - with an incorrect case variant, as indicated by the // case variant used internally by Z to reference its own packages. // // For example, github.com/sirupsen/logrus/hooks/syslog references itself via // github.com/sirupsen/logrus, establishing that as the canonical case variant. type wrongCaseFailure struct { // correct is the canonical representation of the ProjectRoot correct ProjectRoot // goal is the incorrectly-referenced target project goal dependency // badcase is the list of active dependencies that have specified an // incorrect ProjectRoot casing for the project in question. badcase []dependency } func (e *wrongCaseFailure) Error() string { if len(e.badcase) == 1 { str := "Could not introduce %s; imports amongst its packages establish %q as the canonical casing for root, but %s tried to import it as %q" return fmt.Sprintf(str, a2vs(e.goal.depender), e.correct, a2vs(e.badcase[0].depender), e.badcase[0].dep.Ident.ProjectRoot) } var buf bytes.Buffer str := "Could not introduce %s; imports amongst its packages establish %q as the canonical casing for root, but the following projects tried to import it as %q" fmt.Fprintf(&buf, str, a2vs(e.goal.depender), e.correct, e.badcase[0].dep.Ident.ProjectRoot) for _, c := range e.badcase { fmt.Fprintf(&buf, "\t%s\n", a2vs(c.depender)) } return buf.String() } func (e *wrongCaseFailure) traceString() string { var buf bytes.Buffer fmt.Fprintf(&buf, "internal imports establish %q as correct casing; %q was used by:\n", e.correct, e.goal.dep.Ident.ProjectRoot) for _, f := range e.badcase { fmt.Fprintf(&buf, "%s\n", a2vs(f.depender)) } return buf.String() } // disjointConstraintFailure occurs when attempting to introduce an atom that // itself has an acceptable version, but one of its dependency constraints is // disjoint with one or more dependency constraints already active for that // identifier. type disjointConstraintFailure struct { // goal is the dependency with the problematic constraint, forcing us to // reject the atom that introduces it. goal dependency // failsib is the list of active dependencies that are disjoint with the // goal dependency. This will be at least one, but may not be all of the // active dependencies. failsib []dependency // nofailsib is the list of active dependencies that are NOT disjoint with // the goal dependency. The total of nofailsib and failsib will always be // the total number of active dependencies on target identifier. nofailsib []dependency // c is the current constraint on the target identifier. It is intersection // of all the active dependencies' constraints. c Constraint } func (e *disjointConstraintFailure) Error() string { if len(e.failsib) == 1 { str := "Could not introduce %s, as it has a dependency on %s with constraint %s, which has no overlap with existing constraint %s from %s" return fmt.Sprintf(str, a2vs(e.goal.depender), e.goal.dep.Ident, e.goal.dep.Constraint.String(), e.failsib[0].dep.Constraint.String(), a2vs(e.failsib[0].depender)) } var buf bytes.Buffer var sibs []dependency if len(e.failsib) > 1 { sibs = e.failsib str := "Could not introduce %s, as it has a dependency on %s with constraint %s, which has no overlap with the following existing constraints:\n" fmt.Fprintf(&buf, str, a2vs(e.goal.depender), e.goal.dep.Ident, e.goal.dep.Constraint.String()) } else { sibs = e.nofailsib str := "Could not introduce %s, as it has a dependency on %s with constraint %s, which does not overlap with the intersection of existing constraints from other currently selected packages:\n" fmt.Fprintf(&buf, str, a2vs(e.goal.depender), e.goal.dep.Ident, e.goal.dep.Constraint.String()) } for _, c := range sibs { fmt.Fprintf(&buf, "\t%s from %s\n", c.dep.Constraint.String(), a2vs(c.depender)) } return buf.String() } func (e *disjointConstraintFailure) traceString() string { var buf bytes.Buffer fmt.Fprintf(&buf, "constraint %s on %s disjoint with other dependers:\n", e.goal.dep.Constraint.String(), e.goal.dep.Ident) for _, f := range e.failsib { fmt.Fprintf( &buf, "%s from %s (no overlap)\n", f.dep.Constraint.String(), a2vs(f.depender), ) } for _, f := range e.nofailsib { fmt.Fprintf( &buf, "%s from %s (some overlap)\n", f.dep.Constraint.String(), a2vs(f.depender), ) } return buf.String() } // Indicates that an atom could not be introduced because one of its dep // constraints does not admit the currently-selected version of the target // project. type constraintNotAllowedFailure struct { // The dependency with the problematic constraint that could not be // introduced. goal dependency // The (currently selected) version of the target project that was not // admissible by the goal dependency. v Version } func (e *constraintNotAllowedFailure) Error() string { return fmt.Sprintf( "Could not introduce %s, as it has a dependency on %s with constraint %s, which does not allow the currently selected version of %s", a2vs(e.goal.depender), e.goal.dep.Ident, e.goal.dep.Constraint, e.v, ) } func (e *constraintNotAllowedFailure) traceString() string { return fmt.Sprintf( "%s depends on %s with %s, but that's already selected at %s", a2vs(e.goal.depender), e.goal.dep.Ident.ProjectRoot, e.goal.dep.Constraint, e.v, ) } // versionNotAllowedFailure describes a failure where an atom is rejected // because its version is not allowed by current constraints. // // (This is one of the more straightforward types of failures) type versionNotAllowedFailure struct { // goal is the atom that was rejected by current constraints. goal atom // failparent is the list of active dependencies that caused the atom to be // rejected. Note that this only includes dependencies that actually // rejected the atom, which will be at least one, but may not be all the // active dependencies on the atom's identifier. failparent []dependency // c is the current constraint on the atom's identifier. This is the intersection // of all active dependencies' constraints. c Constraint } func (e *versionNotAllowedFailure) Error() string { if len(e.failparent) == 1 { return fmt.Sprintf( "Could not introduce %s, as it is not allowed by constraint %s from project %s.", a2vs(e.goal), e.failparent[0].dep.Constraint.String(), e.failparent[0].depender.id, ) } var buf bytes.Buffer fmt.Fprintf(&buf, "Could not introduce %s, as it is not allowed by constraints from the following projects:\n", a2vs(e.goal)) for _, f := range e.failparent { fmt.Fprintf(&buf, "\t%s from %s\n", f.dep.Constraint.String(), a2vs(f.depender)) } return buf.String() } func (e *versionNotAllowedFailure) traceString() string { var buf bytes.Buffer fmt.Fprintf(&buf, "%s not allowed by constraint %s:\n", a2vs(e.goal), e.c.String()) for _, f := range e.failparent { fmt.Fprintf(&buf, " %s from %s\n", f.dep.Constraint.String(), a2vs(f.depender)) } return buf.String() } type missingSourceFailure struct { goal ProjectIdentifier prob string } func (e *missingSourceFailure) Error() string { return fmt.Sprintf(e.prob, e.goal) } type badOptsFailure string func (e badOptsFailure) Error() string { return string(e) } type sourceMismatchFailure struct { // The ProjectRoot over which there is disagreement about where it should be // sourced from shared ProjectRoot // The current value for the network source current string // The mismatched value for the network source mismatch string // The currently selected dependencies which have agreed upon/established // the given network source sel []dependency // The atom with the constraint that has the new, incompatible network source prob atom } func (e *sourceMismatchFailure) Error() string { var cur []string for _, c := range e.sel { cur = append(cur, string(c.depender.id.ProjectRoot)) } str := "Could not introduce %s, as it depends on %s from %s, but %s is already marked as coming from %s by %s" return fmt.Sprintf(str, a2vs(e.prob), e.shared, e.mismatch, e.shared, e.current, strings.Join(cur, ", ")) } func (e *sourceMismatchFailure) traceString() string { var buf bytes.Buffer fmt.Fprintf(&buf, "disagreement on network addr for %s:\n", e.shared) fmt.Fprintf(&buf, " %s from %s\n", e.mismatch, e.prob.id) for _, dep := range e.sel { fmt.Fprintf(&buf, " %s from %s\n", e.current, dep.depender.id) } return buf.String() } type errDeppers struct { err error deppers []atom } // checkeeHasProblemPackagesFailure indicates that the goal atom was rejected // because one or more of the packages required by its deppers had errors. // // "errors" includes package nonexistence, which is indicated by a nil err in // the corresponding errDeppers failpkg map value. // // checkeeHasProblemPackagesFailure complements depHasProblemPackagesFailure; // one or the other could appear to describe the same fundamental issue, // depending on the order in which dependencies were visited. type checkeeHasProblemPackagesFailure struct { // goal is the atom that was rejected due to problematic packages. goal atom // failpkg is a map of package names to the error describing the problem // with them, plus a list of the selected atoms that require that package. failpkg map[string]errDeppers } func (e *checkeeHasProblemPackagesFailure) Error() string { var buf bytes.Buffer indent := "" if len(e.failpkg) > 1 { indent = "\t" fmt.Fprintf( &buf, "Could not introduce %s due to multiple problematic subpackages:\n", a2vs(e.goal), ) } for pkg, errdep := range e.failpkg { var cause string if errdep.err == nil { cause = "is missing" } else { cause = fmt.Sprintf("does not contain usable Go code (%T).", errdep.err) } if len(e.failpkg) == 1 { fmt.Fprintf( &buf, "Could not introduce %s, as its subpackage %s %s.", a2vs(e.goal), pkg, cause, ) } else { fmt.Fprintf(&buf, "\tSubpackage %s %s.", pkg, cause) } if len(errdep.deppers) == 1 { fmt.Fprintf( &buf, " (Package is required by %s.)", a2vs(errdep.deppers[0]), ) } else { fmt.Fprintf(&buf, " Package is required by:") for _, pa := range errdep.deppers { fmt.Fprintf(&buf, "\n%s\t%s", indent, a2vs(pa)) } } } return buf.String() } func (e *checkeeHasProblemPackagesFailure) traceString() string { var buf bytes.Buffer fmt.Fprintf(&buf, "%s at %s has problem subpkg(s):\n", e.goal.id.ProjectRoot, e.goal.v) for pkg, errdep := range e.failpkg { if errdep.err == nil { fmt.Fprintf(&buf, "\t%s is missing; ", pkg) } else { fmt.Fprintf(&buf, "\t%s has err (%T); ", pkg, errdep.err) } if len(errdep.deppers) == 1 { fmt.Fprintf(&buf, "required by %s.", a2vs(errdep.deppers[0])) } else { fmt.Fprintf(&buf, " required by:") for _, pa := range errdep.deppers { fmt.Fprintf(&buf, "\n\t\t%s at %s", pa.id, pa.v) } } } return buf.String() } // depHasProblemPackagesFailure indicates that the goal dependency was rejected // because there were problems with one or more of the packages the dependency // requires in the atom currently selected for that dependency. (This failure // can only occur if the target dependency is already selected.) // // "errors" includes package nonexistence, which is indicated by a nil err as // the corresponding prob map value. // // depHasProblemPackagesFailure complements checkeeHasProblemPackagesFailure; // one or the other could appear to describe the same fundamental issue, // depending on the order in which dependencies were visited. type depHasProblemPackagesFailure struct { // goal is the dependency that was rejected due to the atom currently // selected for the dependency's target id having errors (including, and // probably most commonly, // nonexistence) in one or more packages named by the dependency. goal dependency // v is the version of the currently selected atom targeted by the goal // dependency. v Version // prob is a map of problem packages to their specific error. It does not // include missing packages. prob map[string]error } func (e *depHasProblemPackagesFailure) Error() string { fcause := func(pkg string) string { if err := e.prob[pkg]; err != nil { return fmt.Sprintf("does not contain usable Go code (%T).", err) } return "is missing." } if len(e.prob) == 1 { var pkg string for pkg = range e.prob { } return fmt.Sprintf( "Could not introduce %s, as it requires package %s from %s, but in version %s that package %s", a2vs(e.goal.depender), pkg, e.goal.dep.Ident, e.v, fcause(pkg), ) } var buf bytes.Buffer fmt.Fprintf( &buf, "Could not introduce %s, as it requires problematic packages from %s (current version %s):", a2vs(e.goal.depender), e.goal.dep.Ident, e.v, ) pkgs := make([]string, len(e.prob)) k := 0 for pkg := range e.prob { pkgs[k] = pkg k++ } sort.Strings(pkgs) for _, pkg := range pkgs { fmt.Fprintf(&buf, "\t%s %s", pkg, fcause(pkg)) } return buf.String() } func (e *depHasProblemPackagesFailure) traceString() string { var buf bytes.Buffer fcause := func(pkg string) string { if err := e.prob[pkg]; err != nil { return fmt.Sprintf("has parsing err (%T).", err) } return "is missing" } fmt.Fprintf( &buf, "%s depping on %s at %s has problem subpkg(s):", a2vs(e.goal.depender), e.goal.dep.Ident, e.v, ) pkgs := make([]string, len(e.prob)) k := 0 for pkg := range e.prob { pkgs[k] = pkg k++ } sort.Strings(pkgs) for _, pkg := range pkgs { fmt.Fprintf(&buf, "\t%s %s", pkg, fcause(pkg)) } return buf.String() } // nonexistentRevisionFailure indicates that a revision constraint was specified // for a given project, but that that revision does not exist in the source // repository. type nonexistentRevisionFailure struct { goal dependency r Revision } func (e *nonexistentRevisionFailure) Error() string { return fmt.Sprintf( "Could not introduce %s, as it requires %s at revision %s, but that revision does not exist", a2vs(e.goal.depender), e.goal.dep.Ident, e.r, ) } func (e *nonexistentRevisionFailure) traceString() string { return fmt.Sprintf( "%s wants missing rev %s of %s", a2vs(e.goal.depender), e.r, e.goal.dep.Ident, ) }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/vcs_version_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "path/filepath" "testing" "github.com/golang/dep/internal/test" ) func TestVCSVersion(t *testing.T) { if testing.Short() { t.Skip("Skipping slow test in short mode") } h := test.NewHelper(t) defer h.Cleanup() requiresBins(t, "git") h.TempDir("src") gopath := h.Path(".") h.Setenv("GOPATH", gopath) importPaths := map[string]struct { rev Version checkout bool }{ "github.com/pkg/errors": { rev: NewVersion("v0.8.0").Pair("645ef00459ed84a119197bfb8d8205042c6df63d"), // semver checkout: true, }, "github.com/sirupsen/logrus": { rev: Revision("42b84f9ec624953ecbf81a94feccb3f5935c5edf"), // random sha checkout: true, }, "github.com/rsc/go-get-default-branch": { rev: NewBranch("another-branch").Pair("8e6902fdd0361e8fa30226b350e62973e3625ed5"), }, } // checkout the specified revisions for ip, info := range importPaths { h.RunGo("get", ip) repoDir := h.Path("src/" + ip) if info.checkout { h.RunGit(repoDir, "checkout", info.rev.String()) } abs := filepath.FromSlash(filepath.Join(gopath, "src", ip)) got, err := VCSVersion(abs) h.Must(err) if got != info.rev { t.Fatalf("expected %q, got %q", got.String(), info.rev.String()) } } }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/source_errors_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "testing" "github.com/Masterminds/vcs" ) func TestUnwrapVcsErrNonNil(t *testing.T) { for _, err := range []error{ vcs.NewRemoteError("msg", nil, "out"), vcs.NewRemoteError("msg", nil, ""), vcs.NewRemoteError("", nil, "out"), vcs.NewRemoteError("", nil, ""), vcs.NewLocalError("msg", nil, "out"), vcs.NewLocalError("msg", nil, ""), vcs.NewLocalError("", nil, "out"), vcs.NewLocalError("", nil, ""), &vcs.RemoteError{}, &vcs.LocalError{}, } { if unwrapVcsErr(err) == nil { t.Errorf("unexpected nil error unwrapping: %#v", err) } } }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/prune_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "io/ioutil" "os" "testing" "github.com/golang/dep/internal/test" ) func TestCascadingPruneOptions(t *testing.T) { cases := []struct { name string co CascadingPruneOptions results map[ProjectRoot]PruneOptions }{ { name: "all empty values", co: CascadingPruneOptions{ DefaultOptions: PruneNestedVendorDirs, PerProjectOptions: map[ProjectRoot]PruneOptionSet{ ProjectRoot("github.com/golang/dep"): {}, }, }, results: map[ProjectRoot]PruneOptions{ ProjectRoot("github.com/golang/dep"): PruneNestedVendorDirs, }, }, { name: "all overridden", co: CascadingPruneOptions{ DefaultOptions: PruneNestedVendorDirs, PerProjectOptions: map[ProjectRoot]PruneOptionSet{ ProjectRoot("github.com/golang/dep"): { NestedVendor: 2, UnusedPackages: 1, NonGoFiles: 1, GoTests: 1, }, }, }, results: map[ProjectRoot]PruneOptions{ ProjectRoot("github.com/golang/dep"): PruneUnusedPackages | PruneNonGoFiles | PruneGoTestFiles, }, }, { name: "all redundant", co: CascadingPruneOptions{ DefaultOptions: PruneNestedVendorDirs, PerProjectOptions: map[ProjectRoot]PruneOptionSet{ ProjectRoot("github.com/golang/dep"): { NestedVendor: 1, UnusedPackages: 2, NonGoFiles: 2, GoTests: 2, }, }, }, results: map[ProjectRoot]PruneOptions{ ProjectRoot("github.com/golang/dep"): PruneNestedVendorDirs, }, }, { name: "multiple projects, all combos", co: CascadingPruneOptions{ DefaultOptions: PruneNestedVendorDirs, PerProjectOptions: map[ProjectRoot]PruneOptionSet{ ProjectRoot("github.com/golang/dep"): { NestedVendor: 1, UnusedPackages: 2, NonGoFiles: 2, GoTests: 2, }, ProjectRoot("github.com/other/one"): { NestedVendor: 2, UnusedPackages: 1, NonGoFiles: 1, GoTests: 1, }, }, }, results: map[ProjectRoot]PruneOptions{ ProjectRoot("github.com/golang/dep"): PruneNestedVendorDirs, ProjectRoot("github.com/other/one"): PruneUnusedPackages | PruneNonGoFiles | PruneGoTestFiles, ProjectRoot("not/there"): PruneNestedVendorDirs, }, }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { for pr, wanted := range c.results { if c.co.PruneOptionsFor(pr) != wanted { t.Fatalf("did not get expected final PruneOptions value from cascade:\n\t(GOT): %d\n\t(WNT): %d", c.co.PruneOptionsFor(pr), wanted) } } }) } } func TestPruneProject(t *testing.T) { h := test.NewHelper(t) defer h.Cleanup() pr := "github.com/project/repository" h.TempDir(pr) baseDir := h.Path(".") lp := lockedProject{ pi: ProjectIdentifier{ ProjectRoot: ProjectRoot(pr), }, pkgs: []string{}, } options := PruneNestedVendorDirs | PruneNonGoFiles | PruneGoTestFiles | PruneUnusedPackages err := PruneProject(baseDir, lp, options) if err != nil { t.Fatal(err) } } func TestPruneUnusedPackages(t *testing.T) { h := test.NewHelper(t) defer h.Cleanup() h.TempDir(".") pr := "github.com/sample/repository" pi := ProjectIdentifier{ProjectRoot: ProjectRoot(pr)} testcases := []struct { name string lp LockedProject fs fsTestCase err bool }{ { "one-package", lockedProject{ pi: pi, pkgs: []string{ ".", }, }, fsTestCase{ before: filesystemState{ files: []string{ "main.go", }, }, after: filesystemState{ files: []string{ "main.go", }, }, }, false, }, { "nested-package", lockedProject{ pi: pi, pkgs: []string{ "pkg", }, }, fsTestCase{ before: filesystemState{ dirs: []string{ "pkg", }, files: []string{ "main.go", "pkg/main.go", }, }, after: filesystemState{ dirs: []string{ "pkg", }, files: []string{ "pkg/main.go", }, }, }, false, }, { "complex-project", lockedProject{ pi: pi, pkgs: []string{ "pkg", "pkg/nestedpkg/otherpkg", }, }, fsTestCase{ before: filesystemState{ dirs: []string{ "pkg", "pkg/nestedpkg", "pkg/nestedpkg/otherpkg", }, files: []string{ "main.go", "COPYING", "pkg/main.go", "pkg/nestedpkg/main.go", "pkg/nestedpkg/legal.go", "pkg/nestedpkg/PATENT.md", "pkg/nestedpkg/otherpkg/main.go", }, }, after: filesystemState{ dirs: []string{ "pkg", "pkg/nestedpkg", "pkg/nestedpkg/otherpkg", }, files: []string{ "COPYING", "pkg/main.go", "pkg/nestedpkg/PATENT.md", "pkg/nestedpkg/otherpkg/main.go", }, }, }, false, }, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { h.TempDir(pr) baseDir := h.Path(pr) tc.fs.before.root = baseDir tc.fs.after.root = baseDir tc.fs.setup(t) fs, err := deriveFilesystemState(baseDir) if err != nil { t.Fatal(err) } _, err = pruneUnusedPackages(tc.lp, fs) if tc.err && err == nil { t.Fatalf("expected an error, got nil") } else if !tc.err && err != nil { t.Fatalf("unexpected error: %s", err) } tc.fs.assert(t) }) } } func TestPruneNonGoFiles(t *testing.T) { h := test.NewHelper(t) defer h.Cleanup() h.TempDir(".") testcases := []struct { name string fs fsTestCase err bool }{ { "one-file", fsTestCase{ before: filesystemState{ files: []string{ "README.md", }, }, after: filesystemState{}, }, false, }, { "multiple-files", fsTestCase{ before: filesystemState{ files: []string{ "main.go", "main_test.go", "README", }, }, after: filesystemState{ files: []string{ "main.go", "main_test.go", }, }, }, false, }, { "mixed-files", fsTestCase{ before: filesystemState{ dirs: []string{ "dir", }, files: []string{ "dir/main.go", "dir/main_test.go", "dir/db.sqlite", }, }, after: filesystemState{ dirs: []string{ "dir", }, files: []string{ "dir/main.go", "dir/main_test.go", }, }, }, false, }, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { h.TempDir(tc.name) baseDir := h.Path(tc.name) tc.fs.before.root = baseDir tc.fs.after.root = baseDir tc.fs.setup(t) fs, err := deriveFilesystemState(baseDir) if err != nil { t.Fatal(err) } err = pruneNonGoFiles(fs) if tc.err && err == nil { t.Errorf("expected an error, got nil") } else if !tc.err && err != nil { t.Errorf("unexpected error: %s", err) } tc.fs.assert(t) }) } } func TestPruneGoTestFiles(t *testing.T) { h := test.NewHelper(t) defer h.Cleanup() h.TempDir(".") testcases := []struct { name string fs fsTestCase err bool }{ { "one-test-file", fsTestCase{ before: filesystemState{ files: []string{ "main_test.go", }, }, after: filesystemState{}, }, false, }, { "multiple-files", fsTestCase{ before: filesystemState{ dirs: []string{ "dir", }, files: []string{ "dir/main_test.go", "dir/main2_test.go", }, }, after: filesystemState{ dirs: []string{ "dir", }, }, }, false, }, { "mixed-files", fsTestCase{ before: filesystemState{ dirs: []string{ "dir", }, files: []string{ "dir/main.go", "dir/main2.go", "dir/main_test.go", "dir/main2_test.go", }, }, after: filesystemState{ dirs: []string{ "dir", }, files: []string{ "dir/main.go", "dir/main2.go", }, }, }, false, }, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { h.TempDir(tc.name) baseDir := h.Path(tc.name) tc.fs.before.root = baseDir tc.fs.after.root = baseDir tc.fs.setup(t) fs, err := deriveFilesystemState(baseDir) if err != nil { t.Fatal(err) } err = pruneGoTestFiles(fs) if tc.err && err == nil { t.Fatalf("expected an error, got nil") } else if !tc.err && err != nil { t.Fatalf("unexpected error: %s", err) } tc.fs.assert(t) }) } } func TestPruneVendorDirs(t *testing.T) { tests := []struct { name string test fsTestCase }{ { name: "vendor directory", test: fsTestCase{ before: filesystemState{ dirs: []string{ "package", "package/vendor", }, }, after: filesystemState{ dirs: []string{ "package", }, }, }, }, { name: "vendor file", test: fsTestCase{ before: filesystemState{ dirs: []string{ "package", }, files: []string{ "package/vendor", }, }, after: filesystemState{ dirs: []string{ "package", }, files: []string{ "package/vendor", }, }, }, }, { name: "vendor symlink", test: fsTestCase{ before: filesystemState{ dirs: []string{ "package", "package/_vendor", }, links: []fsLink{ { path: "package/vendor", to: "_vendor", }, }, }, after: filesystemState{ dirs: []string{ "package", "package/_vendor", }, }, }, }, { name: "nonvendor symlink", test: fsTestCase{ before: filesystemState{ dirs: []string{ "package", "package/_vendor", }, links: []fsLink{ { path: "package/link", to: "_vendor", }, }, }, after: filesystemState{ dirs: []string{ "package", "package/_vendor", }, links: []fsLink{ { path: "package/link", to: "_vendor", }, }, }, }, }, { name: "vendor symlink to file", test: fsTestCase{ before: filesystemState{ files: []string{ "file", }, links: []fsLink{ { path: "vendor", to: "file", }, }, }, after: filesystemState{ files: []string{ "file", }, }, }, }, { name: "broken vendor symlink", test: fsTestCase{ before: filesystemState{ dirs: []string{ "package", }, links: []fsLink{ { path: "package/vendor", to: "nonexistence", }, }, }, after: filesystemState{ dirs: []string{ "package", }, links: []fsLink{}, }, }, }, { name: "chained symlinks", test: fsTestCase{ before: filesystemState{ dirs: []string{ "_vendor", }, links: []fsLink{ { path: "vendor", to: "vendor2", }, { path: "vendor2", to: "_vendor", }, }, }, after: filesystemState{ dirs: []string{ "_vendor", }, links: []fsLink{ { path: "vendor2", to: "_vendor", }, }, }, }, }, { name: "circular symlinks", test: fsTestCase{ before: filesystemState{ dirs: []string{ "package", }, links: []fsLink{ { path: "package/link1", to: "link2", }, { path: "package/link2", to: "link1", }, }, }, after: filesystemState{ dirs: []string{ "package", }, links: []fsLink{ { path: "package/link1", to: "link2", }, { path: "package/link2", to: "link1", }, }, }, }, }, } for _, test := range tests { t.Run(test.name, pruneVendorDirsTestCase(test.test)) } } func pruneVendorDirsTestCase(tc fsTestCase) func(*testing.T) { return func(t *testing.T) { tempDir, err := ioutil.TempDir("", "pruneVendorDirsTestCase") if err != nil { t.Fatalf("ioutil.TempDir err=%q", err) } defer func() { if err := os.RemoveAll(tempDir); err != nil { t.Errorf("os.RemoveAll(%q) err=%q", tempDir, err) } }() tc.before.root = tempDir tc.after.root = tempDir tc.setup(t) fs, err := deriveFilesystemState(tempDir) if err != nil { t.Fatalf("deriveFilesystemState failed: %s", err) } if err := pruneVendorDirs(fs); err != nil { t.Errorf("pruneVendorDirs err=%q", err) } tc.assert(t) } } func TestDeleteEmptyDirs(t *testing.T) { testcases := []struct { name string fs fsTestCase }{ { name: "empty-dir", fs: fsTestCase{ before: filesystemState{ dirs: []string{ "pkg1", }, }, after: filesystemState{}, }, }, { name: "nested-empty-dirs", fs: fsTestCase{ before: filesystemState{ dirs: []string{ "pkg1", "pkg1/pkg2", }, }, after: filesystemState{}, }, }, { name: "non-empty-dir", fs: fsTestCase{ before: filesystemState{ dirs: []string{ "pkg1", }, files: []string{ "pkg1/file1", }, }, after: filesystemState{ dirs: []string{ "pkg1", }, files: []string{ "pkg1/file1", }, }, }, }, { name: "mixed-dirs", fs: fsTestCase{ before: filesystemState{ dirs: []string{ "pkg1", "pkg1/pkg2", }, files: []string{ "pkg1/file1", }, }, after: filesystemState{ dirs: []string{ "pkg1", }, files: []string{ "pkg1/file1", }, }, }, }, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { h := test.NewHelper(t) h.Cleanup() h.TempDir(".") tc.fs.before.root = h.Path(".") tc.fs.after.root = h.Path(".") if err := tc.fs.before.setup(); err != nil { t.Fatal("unexpected error in fs setup: ", err) } if err := deleteEmptyDirs(tc.fs.before); err != nil { t.Fatal("unexpected error in deleteEmptyDirs: ", err) } tc.fs.assert(t) }) } }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/typed_radix.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "strings" "sync" "github.com/armon/go-radix" ) // Typed implementations of radix trees. These are just simple wrappers that let // us avoid having to type assert anywhere else, cleaning up other code a bit. // // Some of the more annoying things to implement (like walks) aren't // implemented. They can be added if/when we actually need them. // // Oh generics, where art thou... type deducerTrie struct { sync.RWMutex t *radix.Tree } func newDeducerTrie() *deducerTrie { return &deducerTrie{ t: radix.New(), } } // Suppress unused warning. var _ = (*deducerTrie)(nil).Delete // Delete is used to delete a key, returning the previous value and if it was deleted func (t *deducerTrie) Delete(s string) (pathDeducer, bool) { t.Lock() defer t.Unlock() if d, had := t.t.Delete(s); had { return d.(pathDeducer), had } return nil, false } // Insert is used to add a newentry or update an existing entry. Returns if updated. func (t *deducerTrie) Insert(s string, d pathDeducer) (pathDeducer, bool) { t.Lock() defer t.Unlock() if d2, had := t.t.Insert(s, d); had { return d2.(pathDeducer), had } return nil, false } // LongestPrefix is like Get, but instead of an exact match, it will return the // longest prefix match. func (t *deducerTrie) LongestPrefix(s string) (string, pathDeducer, bool) { t.RLock() defer t.RUnlock() if p, d, has := t.t.LongestPrefix(s); has { return p, d.(pathDeducer), has } return "", nil, false } // isPathPrefixOrEqual is an additional helper check to ensure that the literal // string prefix returned from a radix tree prefix match is also a path tree // match. // // The radix tree gets it mostly right, but we have to guard against // possibilities like this: // // github.com/sdboyer/foo // github.com/sdboyer/foobar/baz // // The latter would incorrectly be conflated with the former. As we know we're // operating on strings that describe import paths, guard against this case by // verifying that either the input is the same length as the match (in which // case we know they're equal), or that the next character is a "/". (Import // paths are defined to always use "/", not the OS-specific path separator.) func isPathPrefixOrEqual(pre, path string) bool { prflen, pathlen := len(pre), len(path) if pathlen == prflen+1 { // this can never be the case return false } // we assume something else (a trie) has done equality check up to the point // of the prefix, so we just check len return prflen == pathlen || strings.Index(path[prflen:], "/") == 0 }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/constraint_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "fmt" "testing" "github.com/golang/dep/gps/internal/pb" "github.com/golang/protobuf/proto" "github.com/pkg/errors" ) // gu - helper func for stringifying what we assume is a VersionPair (otherwise // will panic), but is given as a Constraint func gu(v Constraint) string { return fmt.Sprintf("%q at rev %q", v, v.(PairedVersion).Revision()) } func TestBranchConstraintOps(t *testing.T) { v1 := NewBranch("master").(branchVersion) v2 := NewBranch("test").(branchVersion) if !v1.MatchesAny(any) { t.Errorf("Branches should always match the any constraint") } if v1.Intersect(any) != v1 { t.Errorf("Branches should always return self when intersecting the any constraint, but got %s", v1.Intersect(any)) } if v1.MatchesAny(none) { t.Errorf("Branches should never match the none constraint") } if v1.Intersect(none) != none { t.Errorf("Branches should always return none when intersecting the none constraint, but got %s", v1.Intersect(none)) } if v1.Matches(v2) { t.Errorf("%s should not match %s", v1, v2) } if v1.MatchesAny(v2) { t.Errorf("%s should not allow any matches when combined with %s", v1, v2) } if v1.Intersect(v2) != none { t.Errorf("Intersection of %s with %s should result in empty set", v1, v2) } // Add rev to one snuffster := Revision("snuffleupagus") v3 := v1.Pair(snuffster).(versionPair) if v2.Matches(v3) { t.Errorf("%s should not match %s", v2, gu(v3)) } if v3.Matches(v2) { t.Errorf("%s should not match %s", gu(v3), v2) } if v2.MatchesAny(v3) { t.Errorf("%s should not allow any matches when combined with %s", v2, gu(v3)) } if v3.MatchesAny(v2) { t.Errorf("%s should not allow any matches when combined with %s", v2, gu(v3)) } if v2.Intersect(v3) != none { t.Errorf("Intersection of %s with %s should result in empty set", v2, gu(v3)) } if v3.Intersect(v2) != none { t.Errorf("Intersection of %s with %s should result in empty set", gu(v3), v2) } // Add different rev to the other v4 := v2.Pair(Revision("cookie monster")).(versionPair) if v4.Matches(v3) { t.Errorf("%s should not match %s", gu(v4), gu(v3)) } if v3.Matches(v4) { t.Errorf("%s should not match %s", gu(v3), gu(v4)) } if v4.MatchesAny(v3) { t.Errorf("%s should not allow any matches when combined with %s", gu(v4), gu(v3)) } if v3.MatchesAny(v4) { t.Errorf("%s should not allow any matches when combined with %s", gu(v4), gu(v3)) } if v4.Intersect(v3) != none { t.Errorf("Intersection of %s with %s should result in empty set", gu(v4), gu(v3)) } if v3.Intersect(v4) != none { t.Errorf("Intersection of %s with %s should result in empty set", gu(v3), gu(v4)) } // Now add same rev to different branches v5 := v2.Pair(Revision("snuffleupagus")).(versionPair) if !v5.Matches(v3) { t.Errorf("%s should match %s", gu(v5), gu(v3)) } if !v3.Matches(v5) { t.Errorf("%s should match %s", gu(v3), gu(v5)) } if !v5.MatchesAny(v3) { t.Errorf("%s should allow some matches when combined with %s", gu(v5), gu(v3)) } if !v3.MatchesAny(v5) { t.Errorf("%s should allow some matches when combined with %s", gu(v5), gu(v3)) } if v5.Intersect(v3) != snuffster { t.Errorf("Intersection of %s with %s should return underlying rev", gu(v5), gu(v3)) } if v3.Intersect(v5) != snuffster { t.Errorf("Intersection of %s with %s should return underlying rev", gu(v3), gu(v5)) } // Set up for cross-type constraint ops cookie := Revision("cookie monster") o1 := NewVersion("master").(plainVersion) o2 := NewVersion("1.0.0").(semVersion) o3 := o1.Pair(cookie).(versionPair) o4 := o2.Pair(cookie).(versionPair) v6 := v1.Pair(cookie).(versionPair) if v1.Matches(o1) { t.Errorf("%s (branch) should not match %s (version) across types", v1, o1) } if v1.MatchesAny(o1) { t.Errorf("%s (branch) should not allow any matches when combined with %s (version)", v1, o1) } if v1.Intersect(o1) != none { t.Errorf("Intersection of %s (branch) with %s (version) should result in empty set", v1, o1) } if v1.Matches(o2) { t.Errorf("%s (branch) should not match %s (semver) across types", v1, o2) } if v1.MatchesAny(o2) { t.Errorf("%s (branch) should not allow any matches when combined with %s (semver)", v1, o2) } if v1.Intersect(o2) != none { t.Errorf("Intersection of %s (branch) with %s (semver) should result in empty set", v1, o2) } if v1.Matches(o3) { t.Errorf("%s (branch) should not match %s (version) across types", v1, gu(o3)) } if v1.MatchesAny(o3) { t.Errorf("%s (branch) should not allow any matches when combined with %s (version)", v1, gu(o3)) } if v1.Intersect(o3) != none { t.Errorf("Intersection of %s (branch) with %s (version) should result in empty set", v1, gu(o3)) } if v1.Matches(o4) { t.Errorf("%s (branch) should not match %s (semver) across types", v1, gu(o4)) } if v1.MatchesAny(o4) { t.Errorf("%s (branch) should not allow any matches when combined with %s (semver)", v1, gu(o4)) } if v1.Intersect(o4) != none { t.Errorf("Intersection of %s (branch) with %s (semver) should result in empty set", v1, gu(o4)) } if !v6.Matches(o3) { t.Errorf("%s (branch) should match %s (version) across types due to shared rev", gu(v6), gu(o3)) } if !v6.MatchesAny(o3) { t.Errorf("%s (branch) should allow some matches when combined with %s (version) across types due to shared rev", gu(v6), gu(o3)) } if v6.Intersect(o3) != cookie { t.Errorf("Intersection of %s (branch) with %s (version) should return shared underlying rev", gu(v6), gu(o3)) } if !v6.Matches(o4) { t.Errorf("%s (branch) should match %s (version) across types due to shared rev", gu(v6), gu(o4)) } if !v6.MatchesAny(o4) { t.Errorf("%s (branch) should allow some matches when combined with %s (version) across types due to shared rev", gu(v6), gu(o4)) } if v6.Intersect(o4) != cookie { t.Errorf("Intersection of %s (branch) with %s (version) should return shared underlying rev", gu(v6), gu(o4)) } } func TestVersionConstraintOps(t *testing.T) { v1 := NewVersion("ab123").(plainVersion) v2 := NewVersion("b2a13").(plainVersion) if !v1.MatchesAny(any) { t.Errorf("Versions should always match the any constraint") } if v1.Intersect(any) != v1 { t.Errorf("Versions should always return self when intersecting the any constraint, but got %s", v1.Intersect(any)) } if v1.MatchesAny(none) { t.Errorf("Versions should never match the none constraint") } if v1.Intersect(none) != none { t.Errorf("Versions should always return none when intersecting the none constraint, but got %s", v1.Intersect(none)) } if v1.Matches(v2) { t.Errorf("%s should not match %s", v1, v2) } if v1.MatchesAny(v2) { t.Errorf("%s should not allow any matches when combined with %s", v1, v2) } if v1.Intersect(v2) != none { t.Errorf("Intersection of %s with %s should result in empty set", v1, v2) } // Add rev to one snuffster := Revision("snuffleupagus") v3 := v1.Pair(snuffster).(versionPair) if v2.Matches(v3) { t.Errorf("%s should not match %s", v2, gu(v3)) } if v3.Matches(v2) { t.Errorf("%s should not match %s", gu(v3), v2) } if v2.MatchesAny(v3) { t.Errorf("%s should not allow any matches when combined with %s", v2, gu(v3)) } if v3.MatchesAny(v2) { t.Errorf("%s should not allow any matches when combined with %s", v2, gu(v3)) } if v2.Intersect(v3) != none { t.Errorf("Intersection of %s with %s should result in empty set", v2, gu(v3)) } if v3.Intersect(v2) != none { t.Errorf("Intersection of %s with %s should result in empty set", gu(v3), v2) } // Add different rev to the other v4 := v2.Pair(Revision("cookie monster")).(versionPair) if v4.Matches(v3) { t.Errorf("%s should not match %s", gu(v4), gu(v3)) } if v3.Matches(v4) { t.Errorf("%s should not match %s", gu(v3), gu(v4)) } if v4.MatchesAny(v3) { t.Errorf("%s should not allow any matches when combined with %s", gu(v4), gu(v3)) } if v3.MatchesAny(v4) { t.Errorf("%s should not allow any matches when combined with %s", gu(v4), gu(v3)) } if v4.Intersect(v3) != none { t.Errorf("Intersection of %s with %s should result in empty set", gu(v4), gu(v3)) } if v3.Intersect(v4) != none { t.Errorf("Intersection of %s with %s should result in empty set", gu(v3), gu(v4)) } // Now add same rev to different versions, and things should line up v5 := v2.Pair(Revision("snuffleupagus")).(versionPair) if !v5.Matches(v3) { t.Errorf("%s should match %s", gu(v5), gu(v3)) } if !v3.Matches(v5) { t.Errorf("%s should match %s", gu(v3), gu(v5)) } if !v5.MatchesAny(v3) { t.Errorf("%s should allow some matches when combined with %s", gu(v5), gu(v3)) } if !v3.MatchesAny(v5) { t.Errorf("%s should allow some matches when combined with %s", gu(v5), gu(v3)) } if v5.Intersect(v3) != snuffster { t.Errorf("Intersection of %s with %s should return underlying rev", gu(v5), gu(v3)) } if v3.Intersect(v5) != snuffster { t.Errorf("Intersection of %s with %s should return underlying rev", gu(v3), gu(v5)) } // Set up for cross-type constraint ops cookie := Revision("cookie monster") o1 := NewBranch("master").(branchVersion) o2 := NewVersion("1.0.0").(semVersion) o3 := o1.Pair(cookie).(versionPair) o4 := o2.Pair(cookie).(versionPair) v6 := v1.Pair(cookie).(versionPair) if v1.Matches(o1) { t.Errorf("%s (version) should not match %s (branch) across types", v1, o1) } if v1.MatchesAny(o1) { t.Errorf("%s (version) should not allow any matches when combined with %s (branch)", v1, o1) } if v1.Intersect(o1) != none { t.Errorf("Intersection of %s (version) with %s (branch) should result in empty set", v1, o1) } if v1.Matches(o2) { t.Errorf("%s (version) should not match %s (semver) across types", v1, o2) } if v1.MatchesAny(o2) { t.Errorf("%s (version) should not allow any matches when combined with %s (semver)", v1, o2) } if v1.Intersect(o2) != none { t.Errorf("Intersection of %s (version) with %s (semver) should result in empty set", v1, o2) } if v1.Matches(o3) { t.Errorf("%s (version) should not match %s (branch) across types", v1, gu(o3)) } if v1.MatchesAny(o3) { t.Errorf("%s (version) should not allow any matches when combined with %s (branch)", v1, gu(o3)) } if v1.Intersect(o3) != none { t.Errorf("Intersection of %s (version) with %s (branch) should result in empty set", v1, gu(o3)) } if v1.Matches(o4) { t.Errorf("%s (version) should not match %s (semver) across types", v1, gu(o4)) } if v1.MatchesAny(o4) { t.Errorf("%s (version) should not allow any matches when combined with %s (semver)", v1, gu(o4)) } if v1.Intersect(o4) != none { t.Errorf("Intersection of %s (version) with %s (semver) should result in empty set", v1, gu(o4)) } if !v6.Matches(o3) { t.Errorf("%s (version) should match %s (branch) across types due to shared rev", gu(v6), gu(o3)) } if !v6.MatchesAny(o3) { t.Errorf("%s (version) should allow some matches when combined with %s (branch) across types due to shared rev", gu(v6), gu(o3)) } if v6.Intersect(o3) != cookie { t.Errorf("Intersection of %s (version) with %s (branch) should return shared underlying rev", gu(v6), gu(o3)) } if !v6.Matches(o4) { t.Errorf("%s (version) should match %s (branch) across types due to shared rev", gu(v6), gu(o4)) } if !v6.MatchesAny(o4) { t.Errorf("%s (version) should allow some matches when combined with %s (branch) across types due to shared rev", gu(v6), gu(o4)) } if v6.Intersect(o4) != cookie { t.Errorf("Intersection of %s (version) with %s (branch) should return shared underlying rev", gu(v6), gu(o4)) } } func TestSemverVersionConstraintOps(t *testing.T) { v1 := NewVersion("1.0.0").(semVersion) v2 := NewVersion("2.0.0").(semVersion) if !v1.MatchesAny(any) { t.Errorf("Semvers should always match the any constraint") } if v1.Intersect(any) != v1 { t.Errorf("Semvers should always return self when intersecting the any constraint, but got %s", v1.Intersect(any)) } if v1.MatchesAny(none) { t.Errorf("Semvers should never match the none constraint") } if v1.Intersect(none) != none { t.Errorf("Semvers should always return none when intersecting the none constraint, but got %s", v1.Intersect(none)) } if v1.Matches(v2) { t.Errorf("%s should not match %s", v1, v2) } if v1.MatchesAny(v2) { t.Errorf("%s should not allow any matches when combined with %s", v1, v2) } if v1.Intersect(v2) != none { t.Errorf("Intersection of %s with %s should result in empty set", v1, v2) } // Add rev to one snuffster := Revision("snuffleupagus") v3 := v1.Pair(snuffster).(versionPair) if v2.Matches(v3) { t.Errorf("%s should not match %s", v2, gu(v3)) } if v3.Matches(v2) { t.Errorf("%s should not match %s", gu(v3), v2) } if v2.MatchesAny(v3) { t.Errorf("%s should not allow any matches when combined with %s", v2, gu(v3)) } if v3.MatchesAny(v2) { t.Errorf("%s should not allow any matches when combined with %s", v2, gu(v3)) } if v2.Intersect(v3) != none { t.Errorf("Intersection of %s with %s should result in empty set", v2, gu(v3)) } if v3.Intersect(v2) != none { t.Errorf("Intersection of %s with %s should result in empty set", gu(v3), v2) } // Add different rev to the other v4 := v2.Pair(Revision("cookie monster")).(versionPair) if v4.Matches(v3) { t.Errorf("%s should not match %s", gu(v4), gu(v3)) } if v3.Matches(v4) { t.Errorf("%s should not match %s", gu(v3), gu(v4)) } if v4.MatchesAny(v3) { t.Errorf("%s should not allow any matches when combined with %s", gu(v4), gu(v3)) } if v3.MatchesAny(v4) { t.Errorf("%s should not allow any matches when combined with %s", gu(v4), gu(v3)) } if v4.Intersect(v3) != none { t.Errorf("Intersection of %s with %s should result in empty set", gu(v4), gu(v3)) } if v3.Intersect(v4) != none { t.Errorf("Intersection of %s with %s should result in empty set", gu(v3), gu(v4)) } // Now add same rev to different versions, and things should line up v5 := v2.Pair(Revision("snuffleupagus")).(versionPair) if !v5.Matches(v3) { t.Errorf("%s should match %s", gu(v5), gu(v3)) } if !v3.Matches(v5) { t.Errorf("%s should match %s", gu(v3), gu(v5)) } if !v5.MatchesAny(v3) { t.Errorf("%s should allow some matches when combined with %s", gu(v5), gu(v3)) } if !v3.MatchesAny(v5) { t.Errorf("%s should allow some matches when combined with %s", gu(v5), gu(v3)) } if v5.Intersect(v3) != snuffster { t.Errorf("Intersection of %s with %s should return underlying rev", gu(v5), gu(v3)) } if v3.Intersect(v5) != snuffster { t.Errorf("Intersection of %s with %s should return underlying rev", gu(v3), gu(v5)) } // Set up for cross-type constraint ops cookie := Revision("cookie monster") o1 := NewBranch("master").(branchVersion) o2 := NewVersion("ab123").(plainVersion) o3 := o1.Pair(cookie).(versionPair) o4 := o2.Pair(cookie).(versionPair) v6 := v1.Pair(cookie).(versionPair) if v1.Matches(o1) { t.Errorf("%s (semver) should not match %s (branch) across types", v1, o1) } if v1.MatchesAny(o1) { t.Errorf("%s (semver) should not allow any matches when combined with %s (branch)", v1, o1) } if v1.Intersect(o1) != none { t.Errorf("Intersection of %s (semver) with %s (branch) should result in empty set", v1, o1) } if v1.Matches(o2) { t.Errorf("%s (semver) should not match %s (version) across types", v1, o2) } if v1.MatchesAny(o2) { t.Errorf("%s (semver) should not allow any matches when combined with %s (version)", v1, o2) } if v1.Intersect(o2) != none { t.Errorf("Intersection of %s (semver) with %s (version) should result in empty set", v1, o2) } if v1.Matches(o3) { t.Errorf("%s (semver) should not match %s (branch) across types", v1, gu(o3)) } if v1.MatchesAny(o3) { t.Errorf("%s (semver) should not allow any matches when combined with %s (branch)", v1, gu(o3)) } if v1.Intersect(o3) != none { t.Errorf("Intersection of %s (semver) with %s (branch) should result in empty set", v1, gu(o3)) } if v1.Matches(o4) { t.Errorf("%s (semver) should not match %s (version) across types", v1, gu(o4)) } if v1.MatchesAny(o4) { t.Errorf("%s (semver) should not allow any matches when combined with %s (version)", v1, gu(o4)) } if v1.Intersect(o4) != none { t.Errorf("Intersection of %s (semver) with %s (version) should result in empty set", v1, gu(o4)) } if !v6.Matches(o3) { t.Errorf("%s (semver) should match %s (branch) across types due to shared rev", gu(v6), gu(o3)) } if !v6.MatchesAny(o3) { t.Errorf("%s (semver) should allow some matches when combined with %s (branch) across types due to shared rev", gu(v6), gu(o3)) } if v6.Intersect(o3) != cookie { t.Errorf("Intersection of %s (semver) with %s (branch) should return shared underlying rev", gu(v6), gu(o3)) } if !v6.Matches(o4) { t.Errorf("%s (semver) should match %s (branch) across types due to shared rev", gu(v6), gu(o4)) } if !v6.MatchesAny(o4) { t.Errorf("%s (semver) should allow some matches when combined with %s (branch) across types due to shared rev", gu(v6), gu(o4)) } if v6.Intersect(o4) != cookie { t.Errorf("Intersection of %s (semver) with %s (branch) should return shared underlying rev", gu(v6), gu(o4)) } // Regression check - make sure that semVersion -> semverConstraint works // the same as verified in the other test c1, _ := NewSemverConstraint("=1.0.0") if !v1.MatchesAny(c1) { t.Errorf("%s (semver) should allow some matches - itself - when combined with an equivalent semverConstraint", gu(v1)) } if v1.Intersect(c1) != v1 { t.Errorf("Intersection of %s (semver) with equivalent semver constraint should return self, got %s", gu(v1), v1.Intersect(c1)) } if !v6.MatchesAny(c1) { t.Errorf("%s (semver pair) should allow some matches - itself - when combined with an equivalent semverConstraint", gu(v6)) } if v6.Intersect(c1) != v6 { t.Errorf("Intersection of %s (semver pair) with equivalent semver constraint should return self, got %s", gu(v6), v6.Intersect(c1)) } } // The other test is about the semverVersion, this is about semverConstraint func TestSemverConstraintOps(t *testing.T) { v1 := NewBranch("master").(branchVersion) v2 := NewVersion("ab123").(plainVersion) v3 := NewVersion("1.0.0").(semVersion) fozzie := Revision("fozzie bear") v4 := v1.Pair(fozzie).(versionPair) v5 := v2.Pair(fozzie).(versionPair) v6 := v3.Pair(fozzie).(versionPair) // TODO(sdboyer) we can't use the same range as below b/c semver.rangeConstraint is // still an incomparable type c1, err := NewSemverConstraint("=1.0.0") if err != nil { t.Fatalf("Failed to create constraint: %s", err) } if !c1.MatchesAny(any) { t.Errorf("Semver constraints should always match the any constraint") } if c1.Intersect(any) != c1 { t.Errorf("Semver constraints should always return self when intersecting the any constraint, but got %s", c1.Intersect(any)) } if c1.MatchesAny(none) { t.Errorf("Semver constraints should never match the none constraint") } if c1.Intersect(none) != none { t.Errorf("Semver constraints should always return none when intersecting the none constraint, but got %s", c1.Intersect(none)) } c1, err = NewSemverConstraint(">= 1.0.0") if err != nil { t.Fatalf("Failed to create constraint: %s", err) } if c1.Matches(v1) { t.Errorf("Semver constraint should not match simple branch") } if c1.Matches(v2) { t.Errorf("Semver constraint should not match simple version") } if !c1.Matches(v3) { t.Errorf("Semver constraint should match a simple semver version in its range") } if c1.Matches(v4) { t.Errorf("Semver constraint should not match paired branch") } if c1.Matches(v5) { t.Errorf("Semver constraint should not match paired version") } if !c1.Matches(v6) { t.Errorf("Semver constraint should match a paired semver version in its range") } if c1.MatchesAny(v1) { t.Errorf("Semver constraint should not allow any when intersected with simple branch") } if c1.MatchesAny(v2) { t.Errorf("Semver constraint should not allow any when intersected with simple version") } if !c1.MatchesAny(v3) { t.Errorf("Semver constraint should allow some when intersected with a simple semver version in its range") } if c1.MatchesAny(v4) { t.Errorf("Semver constraint should not allow any when intersected with paired branch") } if c1.MatchesAny(v5) { t.Errorf("Semver constraint should not allow any when intersected with paired version") } if !c1.MatchesAny(v6) { t.Errorf("Semver constraint should allow some when intersected with a paired semver version in its range") } if c1.Intersect(v1) != none { t.Errorf("Semver constraint should return none when intersected with a simple branch") } if c1.Intersect(v2) != none { t.Errorf("Semver constraint should return none when intersected with a simple version") } if c1.Intersect(v3) != v3 { t.Errorf("Semver constraint should return input when intersected with a simple semver version in its range") } if c1.Intersect(v4) != none { t.Errorf("Semver constraint should return none when intersected with a paired branch") } if c1.Intersect(v5) != none { t.Errorf("Semver constraint should return none when intersected with a paired version") } if c1.Intersect(v6) != v6 { t.Errorf("Semver constraint should return input when intersected with a paired semver version in its range") } } func TestSemverConstraint_ImpliedCaret(t *testing.T) { c, _ := NewSemverConstraintIC("1.0.0") wantS := "^1.0.0" gotS := c.String() if wantS != gotS { t.Errorf("Expected string %s, got %s", wantS, gotS) } wantI := "1.0.0" gotI := c.ImpliedCaretString() if wantI != gotI { t.Errorf("Expected implied string %s, got %s", wantI, gotI) } wantT := "svc-^1.0.0" gotT := c.typedString() if wantT != gotT { t.Errorf("Expected type string %s, got %s", wantT, gotT) } } func TestTypedConstraintString(t *testing.T) { // Also tests typedVersionString(), as this nests down into that rev := Revision("flooboofoobooo") v1 := NewBranch("master") v2 := NewBranch("test").Pair(rev) v3 := NewVersion("1.0.1") v4 := NewVersion("v2.0.5") v5 := NewVersion("2.0.5.2") table := []struct { in Constraint out string }{ { in: anyConstraint{}, out: "any-*", }, { in: noneConstraint{}, out: "none-", }, { in: mkSVC("^1.0.0"), out: "svc-^1.0.0", }, { in: v1, out: "b-master", }, { in: v2, out: "b-test-r-" + string(rev), }, { in: v3, out: "sv-1.0.1", }, { in: v4, out: "sv-v2.0.5", }, { in: v5, out: "pv-2.0.5.2", }, } for _, fix := range table { got := fix.in.typedString() if got != fix.out { t.Errorf("Typed string for %v (%T) was not expected %q; got %q", fix.in, fix.in, fix.out, got) } } } func TestConstraintsIdentical(t *testing.T) { for _, test := range []struct { a, b Constraint eq bool }{ {Any(), Any(), true}, {none, noneConstraint{}, true}, {NewVersion("test"), NewVersion("test"), true}, {NewVersion("test"), NewVersion("test2"), false}, {NewBranch("test"), NewBranch("test"), true}, {NewBranch("test"), newDefaultBranch("test"), false}, {newDefaultBranch("test"), newDefaultBranch("test"), true}, {Revision("test"), Revision("test"), true}, {Revision("test"), Revision("test2"), false}, {testSemverConstraint(t, "v2.10.7"), testSemverConstraint(t, "v2.10.7"), true}, } { if test.eq != test.a.identical(test.b) { want := "identical" if !test.eq { want = "not " + want } t.Errorf("expected %s:\n\t(a) %#v\n\t(b) %#v", want, test.a, test.b) } } } func testSemverConstraint(t *testing.T, body string) Constraint { c, err := NewSemverConstraint(body) if err != nil { t.Fatal(errors.Wrapf(err, "failed to create semver constraint: %s", body)) } return c } func TestConstraintEncoding(t *testing.T) { for _, test := range []struct { name string c Constraint }{ {"defaultBranch", newDefaultBranch("test")}, {"branch", NewBranch("test")}, {"ver", NewVersion("test")}, {"semver", testSemverConstraint(t, "^1.0.0")}, {"rev", Revision("test")}, } { t.Run(test.name, func(t *testing.T) { var msg pb.Constraint test.c.copyTo(&msg) b, err := proto.Marshal(&msg) if err != nil { t.Fatal(err) } if err := proto.Unmarshal(b, &msg); err != nil { t.Fatal(err) } got, err := constraintFromCache(&msg) if err != nil { t.Error("failed to decode:", err) } else if !got.identical(test.c) { t.Errorf("decoded non-identical Constraint:\n\t(GOT): %#v\n\t(WNT): %#v", got, test.c) } if _, ok := test.c.(UnpairedVersion); ok { got, err := unpairedVersionFromCache(&msg) if err != nil { t.Error("failed to decode:", err) } else if !got.identical(test.c) { t.Errorf("decoded non-identical UnpairedVersion:\n\t(GOT): %#v\n\t(WNT): %#v", got, test.c) } } }) } }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/solver_inputs_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "io/ioutil" "log" "math/rand" "strconv" "strings" "testing" "github.com/golang/dep/gps/pkgtree" "github.com/golang/dep/internal/test" ) // TestBadSolveOpts exercises the different possible inputs to a solver that can // be determined as invalid in Prepare(), without any further work func TestBadSolveOpts(t *testing.T) { pn := strconv.FormatInt(rand.Int63(), 36) fix := basicFixtures["no dependencies"] fix.ds[0].n = ProjectRoot(pn) sm := newdepspecSM(fix.ds, nil) params := SolveParameters{ mkBridgeFn: overrideMkBridge, } _, err := Prepare(params, nil) if err == nil { t.Errorf("Prepare should have errored on nil SourceManager") } else if !strings.Contains(err.Error(), "non-nil SourceManager") { t.Error("Prepare should have given error on nil SourceManager, but gave:", err) } _, err = Prepare(params, sm) if err == nil { t.Errorf("Prepare should have errored without ProjectAnalyzer") } else if !strings.Contains(err.Error(), "must provide a ProjectAnalyzer") { t.Error("Prepare should have given error without ProjectAnalyzer, but gave:", err) } params.ProjectAnalyzer = naiveAnalyzer{} _, err = Prepare(params, sm) if err == nil { t.Errorf("Prepare should have errored on empty root") } else if !strings.Contains(err.Error(), "non-empty root directory") { t.Error("Prepare should have given error on empty root, but gave:", err) } params.RootDir = pn _, err = Prepare(params, sm) if err == nil { t.Errorf("Prepare should have errored on empty name") } else if !strings.Contains(err.Error(), "non-empty import root") { t.Error("Prepare should have given error on empty import root, but gave:", err) } params.RootPackageTree = pkgtree.PackageTree{ ImportRoot: pn, } _, err = Prepare(params, sm) if err == nil { t.Errorf("Prepare should have errored on empty name") } else if !strings.Contains(err.Error(), "at least one package") { t.Error("Prepare should have given error on empty import root, but gave:", err) } params.RootPackageTree = pkgtree.PackageTree{ ImportRoot: pn, Packages: map[string]pkgtree.PackageOrErr{ pn: { P: pkgtree.Package{ ImportPath: pn, Name: pn, }, }, }, } params.TraceLogger = log.New(ioutil.Discard, "", 0) params.Manifest = simpleRootManifest{ ovr: ProjectConstraints{ ProjectRoot("foo"): ProjectProperties{}, }, } _, err = Prepare(params, sm) if err == nil { t.Errorf("Should have errored on override with empty ProjectProperties") } else if !strings.Contains(err.Error(), "foo, but without any non-zero properties") { t.Error("Prepare should have given error override with empty ProjectProperties, but gave:", err) } params.Manifest = simpleRootManifest{ ig: pkgtree.NewIgnoredRuleset([]string{"foo"}), req: map[string]bool{"foo": true}, } _, err = Prepare(params, sm) if err == nil { t.Errorf("Should have errored on pkg both ignored and required") } else if !strings.Contains(err.Error(), "was given as both a required and ignored package") { t.Error("Prepare should have given error with single ignore/require conflict error, but gave:", err) } params.Manifest = simpleRootManifest{ ig: pkgtree.NewIgnoredRuleset([]string{"foo", "bar"}), req: map[string]bool{"foo": true, "bar": true}, } _, err = Prepare(params, sm) if err == nil { t.Errorf("Should have errored on pkg both ignored and required") } else if !strings.Contains(err.Error(), "multiple packages given as both required and ignored:") { t.Error("Prepare should have given error with multiple ignore/require conflict error, but gave:", err) } params.Manifest = simpleRootManifest{ ig: pkgtree.NewIgnoredRuleset([]string{"foo*"}), req: map[string]bool{"foo/bar": true}, } _, err = Prepare(params, sm) if err == nil { t.Errorf("Should have errored on pkg both ignored (with wildcard) and required") } else if !strings.Contains(err.Error(), "was given as both a required and ignored package") { t.Error("Prepare should have given error with single ignore/require conflict error, but gave:", err) } params.Manifest = nil params.ToChange = []ProjectRoot{"foo"} _, err = Prepare(params, sm) if err == nil { t.Errorf("Should have errored on non-empty ToChange without a lock provided") } else if !strings.Contains(err.Error(), "update specifically requested for") { t.Error("Prepare should have given error on ToChange without Lock, but gave:", err) } params.Lock = safeLock{ p: []LockedProject{ NewLockedProject(mkPI("bar"), Revision("makebelieve"), nil), }, } _, err = Prepare(params, sm) if err == nil { t.Errorf("Should have errored on ToChange containing project not in lock") } else if !strings.Contains(err.Error(), "cannot update foo as it is not in the lock") { t.Error("Prepare should have given error on ToChange with item not present in Lock, but gave:", err) } params.Lock, params.ToChange = nil, nil _, err = Prepare(params, sm) if err != nil { t.Error("Basic conditions satisfied, prepare should have completed successfully, err as:", err) } // swap out the test mkBridge override temporarily, just to make sure we get // the right error params.mkBridgeFn = nil _, err = Prepare(params, sm) if err == nil { t.Errorf("Should have errored on nonexistent root") } else if !strings.Contains(err.Error(), "could not read project root") { t.Error("Prepare should have given error nonexistent project root dir, but gave:", err) } // Pointing it at a file should also be an err params.RootDir = "solve_test.go" _, err = Prepare(params, sm) if err == nil { t.Errorf("Should have errored on file for RootDir") } else if !strings.Contains(err.Error(), "is a file, not a directory") { t.Error("Prepare should have given error on file as RootDir, but gave:", err) } } func TestValidateParams(t *testing.T) { h := test.NewHelper(t) defer h.Cleanup() cacheDir := "gps-cache" h.TempDir(cacheDir) sm, err := NewSourceManager(SourceManagerConfig{ Cachedir: h.Path(cacheDir), Logger: log.New(test.Writer{TB: t}, "", 0), }) h.Must(err) defer sm.Release() h.TempDir("src") testcases := []struct { imports []string err bool }{ {[]string{"google.com/non-existing/package"}, true}, {[]string{"google.com/non-existing/package/subpkg"}, true}, {[]string{"github.com/sdboyer/testrepo"}, false}, {[]string{"github.com/sdboyer/testrepo/subpkg"}, false}, } params := SolveParameters{ ProjectAnalyzer: naiveAnalyzer{}, RootDir: h.Path("src"), RootPackageTree: pkgtree.PackageTree{ ImportRoot: "github.com/sdboyer/dep", }, } for _, tc := range testcases { params.RootPackageTree.Packages = map[string]pkgtree.PackageOrErr{ "github.com/sdboyer/dep": { P: pkgtree.Package{ Name: "github.com/sdboyer/dep", ImportPath: "github.com/sdboyer/dep", Imports: tc.imports, }, }, } err = ValidateParams(params, sm) if tc.err && err == nil { t.Fatalf("expected an error when deducing package fails, got none") } else if !tc.err && err != nil { t.Fatalf("deducing packges should have succeeded, got err: %#v", err) } } }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/solver.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "container/heap" "context" "fmt" "log" "sort" "strings" "sync" "sync/atomic" "github.com/armon/go-radix" "github.com/golang/dep/gps/paths" "github.com/golang/dep/gps/pkgtree" "github.com/pkg/errors" ) var rootRev = Revision("") // SolveParameters hold all arguments to a solver run. // // Only RootDir and RootPackageTree are absolutely required. A nil Manifest is // allowed, though it usually makes little sense. // // Of these properties, only the Manifest and RootPackageTree are (directly) // incorporated in memoization hashing. type SolveParameters struct { // The path to the root of the project on which the solver should operate. // This should point to the directory that should contain the vendor/ // directory. // // In general, it is wise for this to be under an active GOPATH, though it // is not (currently) required. // // A real path to a readable directory is required. RootDir string // The ProjectAnalyzer is responsible for extracting Manifest and // (optionally) Lock information from dependencies. The solver passes it // along to its SourceManager's GetManifestAndLock() method as needed. // // An analyzer is required. ProjectAnalyzer ProjectAnalyzer // The tree of packages that comprise the root project, as well as the // import path that should identify the root of that tree. // // In most situations, tools should simply pass the result of ListPackages() // directly through here. // // The ImportRoot property must be a non-empty string, and at least one // element must be present in the Packages map. RootPackageTree pkgtree.PackageTree // The root manifest. This contains all the dependency constraints // associated with normal Manifests, as well as the particular controls // afforded only to the root project. // // May be nil, but for most cases, that would be unwise. Manifest RootManifest // The root lock. Optional. Generally, this lock is the output of a previous // solve run. // // If provided, the solver will attempt to preserve the versions specified // in the lock, unless ToChange or ChangeAll settings indicate otherwise. Lock Lock // ToChange is a list of project names that should be changed - that is, any // versions specified for those projects in the root lock file should be // ignored. // // Passing ChangeAll has subtly different behavior from enumerating all // projects into ToChange. In general, ToChange should *only* be used if the // user expressly requested an upgrade for a specific project. ToChange []ProjectRoot // ChangeAll indicates that all projects should be changed - that is, any // versions specified in the root lock file should be ignored. ChangeAll bool // Downgrade indicates whether the solver will attempt to upgrade (false) or // downgrade (true) projects that are not locked, or are marked for change. // // Upgrading is, by far, the most typical case. The field is named // 'Downgrade' so that the bool's zero value corresponds to that most // typical case. Downgrade bool // TraceLogger is the logger to use for generating trace output. If set, the // solver will generate informative trace output as it moves through the // solving process. TraceLogger *log.Logger // stdLibFn is the function to use to recognize standard library import paths. // Only overridden for tests. Defaults to paths.IsStandardImportPath if nil. stdLibFn func(string) bool // mkBridgeFn is the function to use to create sourceBridges. // Only overridden for tests (so we can run with virtual RootDir). // Defaults to mkBridge if nil. mkBridgeFn func(*solver, SourceManager, bool) sourceBridge } // solver is a CDCL-style constraint solver with satisfiability conditions // hardcoded to the needs of the Go package management problem space. type solver struct { // The current number of attempts made over the course of this solve. This // number increments each time the algorithm completes a backtrack and // starts moving forward again. attempts int // Logger used exclusively for trace output, or nil to suppress. tl *log.Logger // The function to use to recognize standard library import paths. stdLibFn func(string) bool // A bridge to the standard SourceManager. The adapter does some local // caching of pre-sorted version lists, as well as translation between the // full-on ProjectIdentifiers that the solver deals with and the simplified // names a SourceManager operates on. b sourceBridge // A stack containing projects and packages that are currently "selected" - // that is, they have passed all satisfiability checks, and are part of the // current solution. // // The *selection type is mostly just a dumb data container; the solver // itself is responsible for maintaining that invariant. sel *selection // The current list of projects that we need to incorporate into the solution in // order for the solution to be complete. This list is implemented as a // priority queue that places projects least likely to induce errors at the // front, in order to minimize the amount of backtracking required to find a // solution. // // Entries are added to and removed from this list by the solver at the same // time that the selected queue is updated, either with an addition or // removal. unsel *unselected // A stack of all the currently active versionQueues in the solver. The set // of projects represented here corresponds closely to what's in s.sel, // although s.sel will always contain the root project, and s.vqs never // will. Also, s.vqs is only added to (or popped from during backtracking) // when a new project is selected; it is untouched when new packages are // added to an existing project. vqs []*versionQueue // Contains data and constraining information from the root project rd rootdata // metrics for the current solve run. mtr *metrics // Indicates whether the solver has been run. It is invalid to run this type // of solver more than once. hasrun int32 } func (params SolveParameters) toRootdata() (rootdata, error) { if params.ProjectAnalyzer == nil { return rootdata{}, badOptsFailure("must provide a ProjectAnalyzer") } if params.RootDir == "" { return rootdata{}, badOptsFailure("params must specify a non-empty root directory") } if params.RootPackageTree.ImportRoot == "" { return rootdata{}, badOptsFailure("params must include a non-empty import root") } if len(params.RootPackageTree.Packages) == 0 { return rootdata{}, badOptsFailure("at least one package must be present in the PackageTree") } if params.Lock == nil && len(params.ToChange) != 0 { return rootdata{}, badOptsFailure(fmt.Sprintf("update specifically requested for %s, but no lock was provided to upgrade from", params.ToChange)) } if params.Manifest == nil { params.Manifest = simpleRootManifest{} } rd := rootdata{ ir: params.Manifest.IgnoredPackages(), req: params.Manifest.RequiredPackages(), ovr: params.Manifest.Overrides(), rpt: params.RootPackageTree.Copy(), chng: make(map[ProjectRoot]struct{}), rlm: make(map[ProjectRoot]LockedProject), chngall: params.ChangeAll, dir: params.RootDir, an: params.ProjectAnalyzer, } // Ensure the required and overrides maps are at least initialized if rd.req == nil { rd.req = make(map[string]bool) } if rd.ovr == nil { rd.ovr = make(ProjectConstraints) } if rd.ir.Len() > 0 { var both []string for pkg := range params.Manifest.RequiredPackages() { if rd.ir.IsIgnored(pkg) { both = append(both, pkg) } } switch len(both) { case 0: break case 1: return rootdata{}, badOptsFailure(fmt.Sprintf("%q was given as both a required and ignored package", both[0])) default: return rootdata{}, badOptsFailure(fmt.Sprintf("multiple packages given as both required and ignored: %s", strings.Join(both, ", "))) } } // Validate no empties in the overrides map var eovr []string for pr, pp := range rd.ovr { if pp.Constraint == nil && pp.Source == "" { eovr = append(eovr, string(pr)) } } if eovr != nil { // Maybe it's a little nitpicky to do this (we COULD proceed; empty // overrides have no effect), but this errs on the side of letting the // tool/user know there's bad input. Purely as a principle, that seems // preferable to silently allowing progress with icky input. if len(eovr) > 1 { return rootdata{}, badOptsFailure(fmt.Sprintf("Overrides lacked any non-zero properties for multiple project roots: %s", strings.Join(eovr, " "))) } return rootdata{}, badOptsFailure(fmt.Sprintf("An override was declared for %s, but without any non-zero properties", eovr[0])) } // Prep safe, normalized versions of root manifest and lock data rd.rm = prepManifest(params.Manifest) if params.Lock != nil { for _, lp := range params.Lock.Projects() { rd.rlm[lp.Ident().ProjectRoot] = lp } // Also keep a prepped one, mostly for the bridge. This is probably // wasteful, but only minimally so, and yay symmetry rd.rl = prepLock(params.Lock) } for _, p := range params.ToChange { if _, exists := rd.rlm[p]; !exists { return rootdata{}, badOptsFailure(fmt.Sprintf("cannot update %s as it is not in the lock", p)) } rd.chng[p] = struct{}{} } return rd, nil } // Prepare readies a Solver for use. // // This function reads and validates the provided SolveParameters. If a problem // with the inputs is detected, an error is returned. Otherwise, a Solver is // returned, ready to hash and check inputs or perform a solving run. func Prepare(params SolveParameters, sm SourceManager) (Solver, error) { if sm == nil { return nil, badOptsFailure("must provide non-nil SourceManager") } rd, err := params.toRootdata() if err != nil { return nil, err } if params.stdLibFn == nil { params.stdLibFn = paths.IsStandardImportPath } s := &solver{ tl: params.TraceLogger, stdLibFn: params.stdLibFn, rd: rd, } // Set up the bridge and ensure the root dir is in good, working order // before doing anything else. if params.mkBridgeFn == nil { s.b = mkBridge(s, sm, params.Downgrade) } else { s.b = params.mkBridgeFn(s, sm, params.Downgrade) } err = s.b.verifyRootDir(params.RootDir) if err != nil { return nil, err } // Initialize stacks and queues s.sel = &selection{ deps: make(map[ProjectRoot][]dependency), foldRoots: make(map[string]ProjectRoot), } s.unsel = &unselected{ sl: make([]bimodalIdentifier, 0), cmp: s.unselectedComparator, } return s, nil } // A Solver is the main workhorse of gps: given a set of project inputs, it // performs a constraint solving analysis to develop a complete Solution, or // else fail with an informative error. // // If a Solution is found, an implementing tool may persist it - typically into // a "lock file" - and/or use it to write out a directory tree of dependencies, // suitable to be a vendor directory, via CreateVendorTree. type Solver interface { // Solve initiates a solving run. It will either abort due to a canceled // Context, complete successfully with a Solution, or fail with an // informative error. // // It is generally not allowed that this method be called twice for any // given solver. Solve(context.Context) (Solution, error) // Name returns a string identifying the particular solver backend. // // Different solvers likely have different invariants, and likely will not // have the same result sets for any particular inputs. Name() string // Version returns an int indicating the version of the solver of the given // Name(). Implementations should change their reported version ONLY when // the logic is changed in such a way that substantially changes the result // set that is possible for a substantial subset of likely inputs. // // "Substantial" is an imprecise term, and it is used intentionally. There // are no easy, general ways of subdividing constraint solving problems such // that one can know, a priori, the full impact that subtle algorithmic // changes will have on possible result sets. Consequently, we have to fall // back on coarser, intuition-based reasoning as to whether a change is // large enough that it is likely to be broadly user-visible. // // This is acceptable, because this value is not used programmatically by // the solver in any way. Rather, it is intend for implementing tools to // use as a coarse signal to users about compatibility between their tool's // version and the current data, typically via persistence to a Lock. // Changes to the version number reported should be weighed between // confusing teams by having two members' tools continuously rolling back // each others' chosen Solutions for no apparent reason, and annoying teams // by changing the number for changes so remote that warnings about solver // version mismatches become meaningless. // // Err on the side of caution. // // Chronology is the only implication of the ordering - that lower version // numbers were published before higher numbers. Version() int } func (s *solver) Name() string { return "gps-cdcl" } func (s *solver) Version() int { return 1 } // DeductionErrs maps package import path to errors occurring during deduction. type DeductionErrs map[string]error func (e DeductionErrs) Error() string { return "could not deduce external imports' project roots" } // ValidateParams validates the solver parameters to ensure solving can be completed. func ValidateParams(params SolveParameters, sm SourceManager) error { // Ensure that all packages are deducible without issues. var deducePkgsGroup sync.WaitGroup deductionErrs := make(DeductionErrs) var errsMut sync.Mutex rd, err := params.toRootdata() if err != nil { return err } deducePkg := func(ip string, sm SourceManager) { _, err := sm.DeduceProjectRoot(ip) if err != nil { errsMut.Lock() deductionErrs[ip] = err errsMut.Unlock() } deducePkgsGroup.Done() } for _, ip := range rd.externalImportList(paths.IsStandardImportPath) { deducePkgsGroup.Add(1) go deducePkg(ip, sm) } deducePkgsGroup.Wait() if len(deductionErrs) > 0 { return deductionErrs } return nil } // Solve attempts to find a dependency solution for the given project, as // represented by the SolveParameters with which this Solver was created. // // This is the entry point to the main gps workhorse. func (s *solver) Solve(ctx context.Context) (Solution, error) { // Solving can only be run once per solver. if !atomic.CompareAndSwapInt32(&s.hasrun, 0, 1) { return nil, errors.New("solve method can only be run once per instance") } // Make sure the bridge has the context before we start. //s.b.ctx = ctx // Set up a metrics object s.mtr = newMetrics() // Prime the queues with the root project if err := s.selectRoot(); err != nil { return nil, err } all, err := s.solve(ctx) s.mtr.pop() var soln solution if err == nil { soln = solution{ att: s.attempts, solv: s, } soln.analyzerInfo = s.rd.an.Info() soln.i = s.rd.externalImportList(s.stdLibFn) // Convert ProjectAtoms into LockedProjects soln.p = make([]LockedProject, 0, len(all)) for pa, pl := range all { lp := pa2lp(pa, pl) // Pass back the original inputlp directly if it Eqs what was // selected. if inputlp, has := s.rd.rlm[lp.Ident().ProjectRoot]; has && lp.Eq(inputlp) { lp = inputlp } soln.p = append(soln.p, lp) } } s.traceFinish(soln, err) if s.tl != nil { s.mtr.dump(s.tl) } return soln, err } // solve is the top-level loop for the solving process. func (s *solver) solve(ctx context.Context) (map[atom]map[string]struct{}, error) { // Pull out the donechan once up front so that we're not potentially // triggering mutex cycling and channel creation on each iteration. donechan := ctx.Done() // Main solving loop for { select { case <-donechan: return nil, ctx.Err() default: } bmi, has := s.nextUnselected() if !has { // no more packages to select - we're done. break } // This split is the heart of "bimodal solving": we follow different // satisfiability and selection paths depending on whether we've already // selected the base project/repo that came off the unselected queue. // // (If we've already selected the project, other parts of the algorithm // guarantee the bmi will contain at least one package from this project // that has yet to be selected.) if awp, is := s.sel.selected(bmi.id); !is { s.mtr.push("new-atom") // Analysis path for when we haven't selected the project yet - need // to create a version queue. queue, err := s.createVersionQueue(bmi) if err != nil { s.mtr.pop() // Err means a failure somewhere down the line; try backtracking. s.traceStartBacktrack(bmi, err, false) success, berr := s.backtrack(ctx) if berr != nil { err = berr } else if success { // backtracking succeeded, move to the next unselected id continue } return nil, err } if queue.current() == nil { panic("canary - queue is empty, but flow indicates success") } awp := atomWithPackages{ a: atom{ id: queue.id, v: queue.current(), }, pl: bmi.pl, } err = s.selectAtom(awp, false) s.mtr.pop() if err != nil { // Only a released SourceManager should be able to cause this. return nil, err } s.vqs = append(s.vqs, queue) } else { s.mtr.push("add-atom") // We're just trying to add packages to an already-selected project. // That means it's not OK to burn through the version queue for that // project as we do when first selecting a project, as doing so // would upend the guarantees on which all previous selections of // the project are based (both the initial one, and any package-only // ones). // Because we can only safely operate within the scope of the // single, currently selected version, we can skip looking for the // queue and just use the version given in what came back from // s.sel.selected(). nawp := atomWithPackages{ a: atom{ id: bmi.id, v: awp.a.v, }, pl: bmi.pl, } s.traceCheckPkgs(bmi) err := s.check(nawp, true) if err != nil { s.mtr.pop() // Err means a failure somewhere down the line; try backtracking. s.traceStartBacktrack(bmi, err, true) success, berr := s.backtrack(ctx) if berr != nil { err = berr } else if success { // backtracking succeeded, move to the next unselected id continue } return nil, err } err = s.selectAtom(nawp, true) s.mtr.pop() if err != nil { // Only a released SourceManager should be able to cause this. return nil, err } // We don't add anything to the stack of version queues because the // backtracker knows not to pop the vqstack if it backtracks // across a pure-package addition. } } // Getting this far means we successfully found a solution. Combine the // selected projects and packages. projs := make(map[atom]map[string]struct{}) // Skip the first project. It's always the root, and that shouldn't be // included in results. for _, sel := range s.sel.projects[1:] { pm, exists := projs[sel.a.a] if !exists { pm = make(map[string]struct{}) projs[sel.a.a] = pm } for _, path := range sel.a.pl { pm[path] = struct{}{} } } return projs, nil } // selectRoot is a specialized selectAtom, used solely to initially // populate the queues at the beginning of a solve run. func (s *solver) selectRoot() error { s.mtr.push("select-root") // Push the root project onto the queue. awp := s.rd.rootAtom() s.sel.pushSelection(awp, false) // If we're looking for root's deps, get it from opts and local root // analysis, rather than having the sm do it. deps, err := s.intersectConstraintsWithImports(s.rd.combineConstraints(), s.rd.externalImportList(s.stdLibFn)) if err != nil { if contextCanceledOrSMReleased(err) { return err } // TODO(sdboyer) this could well happen; handle it with a more graceful error panic(fmt.Sprintf("canary - shouldn't be possible %s", err)) } for _, dep := range deps { // If we have no lock, or if this dep isn't in the lock, then prefetch // it. See longer explanation in selectAtom() for how we benefit from // parallelism here. if s.rd.needVersionsFor(dep.Ident.ProjectRoot) { go s.b.SyncSourceFor(dep.Ident) } s.sel.pushDep(dependency{depender: awp.a, dep: dep}) // Add all to unselected queue heap.Push(s.unsel, bimodalIdentifier{id: dep.Ident, pl: dep.pl, fromRoot: true}) } s.traceSelectRoot(s.rd.rpt, deps) s.mtr.pop() return nil } func (s *solver) getImportsAndConstraintsOf(a atomWithPackages) ([]string, []completeDep, error) { var err error if s.rd.isRoot(a.a.id.ProjectRoot) { panic("Should never need to recheck imports/constraints from root during solve") } // Work through the source manager to get project info and static analysis // information. m, _, err := s.b.GetManifestAndLock(a.a.id, a.a.v, s.rd.an) if err != nil { return nil, nil, err } ptree, err := s.b.ListPackages(a.a.id, a.a.v) if err != nil { return nil, nil, err } rm, em := ptree.ToReachMap(true, false, true, s.rd.ir) // Use maps to dedupe the unique internal and external packages. exmap, inmap := make(map[string]struct{}), make(map[string]struct{}) for _, pkg := range a.pl { inmap[pkg] = struct{}{} for _, ipkg := range rm[pkg].Internal { inmap[ipkg] = struct{}{} } } var pl []string // If lens are the same, then the map must have the same contents as the // slice; no need to build a new one. if len(inmap) == len(a.pl) { pl = a.pl } else { pl = make([]string, 0, len(inmap)) for pkg := range inmap { pl = append(pl, pkg) } sort.Strings(pl) } // Add to the list those packages that are reached by the packages // explicitly listed in the atom for _, pkg := range a.pl { // Skip ignored packages if s.rd.ir.IsIgnored(pkg) { continue } ie, exists := rm[pkg] if !exists { // Missing package here *should* only happen if the target pkg was // poisoned; check the errors map. if importErr, eexists := em[pkg]; eexists { return nil, nil, importErr } // Nope, it's actually full-on not there. return nil, nil, fmt.Errorf("package %s does not exist within project %s", pkg, a.a.id) } for _, ex := range ie.External { exmap[ex] = struct{}{} } } reach := make([]string, 0, len(exmap)) for pkg := range exmap { reach = append(reach, pkg) } sort.Strings(reach) deps := s.rd.ovr.overrideAll(m.DependencyConstraints()) cd, err := s.intersectConstraintsWithImports(deps, reach) return pl, cd, err } // intersectConstraintsWithImports takes a list of constraints and a list of // externally reached packages, and creates a []completeDep that is guaranteed // to include all packages named by import reach, using constraints where they // are available, or Any() where they are not. func (s *solver) intersectConstraintsWithImports(deps []workingConstraint, reach []string) ([]completeDep, error) { // Create a radix tree with all the projects we know from the manifest xt := radix.New() for _, dep := range deps { xt.Insert(string(dep.Ident.ProjectRoot), dep) } // Step through the reached packages; if they have prefix matches in // the trie, assume (mostly) it's a correct correspondence. dmap := make(map[ProjectRoot]completeDep) for _, rp := range reach { // If it's a stdlib-shaped package, skip it. if s.stdLibFn(rp) { continue } // Look for a prefix match; it'll be the root project/repo containing // the reached package if pre, idep, match := xt.LongestPrefix(rp); match && isPathPrefixOrEqual(pre, rp) { // Match is valid; put it in the dmap, either creating a new // completeDep or appending it to the existing one for this base // project/prefix. dep := idep.(workingConstraint) if cdep, exists := dmap[dep.Ident.ProjectRoot]; exists { cdep.pl = append(cdep.pl, rp) dmap[dep.Ident.ProjectRoot] = cdep } else { dmap[dep.Ident.ProjectRoot] = completeDep{ workingConstraint: dep, pl: []string{rp}, } } continue } // No match. Let the SourceManager try to figure out the root root, err := s.b.DeduceProjectRoot(rp) if err != nil { // Nothing we can do if we can't suss out a root return nil, err } // Make a new completeDep with an open constraint, respecting overrides pd := s.rd.ovr.override(root, ProjectProperties{Constraint: Any()}) // Insert the pd into the trie so that further deps from this // project get caught by the prefix search xt.Insert(string(root), pd) // And also put the complete dep into the dmap dmap[root] = completeDep{ workingConstraint: pd, pl: []string{rp}, } } // Dump all the deps from the map into the expected return slice cdeps := make([]completeDep, 0, len(dmap)) for _, cdep := range dmap { cdeps = append(cdeps, cdep) } return cdeps, nil } func (s *solver) createVersionQueue(bmi bimodalIdentifier) (*versionQueue, error) { id := bmi.id // If on the root package, there's no queue to make if s.rd.isRoot(id.ProjectRoot) { return newVersionQueue(id, nil, nil, s.b) } exists, err := s.b.SourceExists(id) if err != nil { return nil, err } if !exists { exists, err = s.b.vendorCodeExists(id) if err != nil { return nil, err } if exists { // Project exists only in vendor // FIXME(sdboyer) this just totally doesn't work at all right now } else { return nil, fmt.Errorf("project '%s' could not be located", id) } } var lockv Version if len(s.rd.rlm) > 0 { lockv, err = s.getLockVersionIfValid(id) if err != nil { // Can only get an error here if an upgrade was expressly requested on // code that exists only in vendor return nil, err } } var prefv Version if bmi.fromRoot { // If this bmi came from the root, then we want to search through things // with a dependency on it in order to see if any have a lock that might // express a prefv // // TODO(sdboyer) nested loop; prime candidate for a cache somewhere for _, dep := range s.sel.getDependenciesOn(bmi.id) { // Skip the root, of course if s.rd.isRoot(dep.depender.id.ProjectRoot) { continue } _, l, err := s.b.GetManifestAndLock(dep.depender.id, dep.depender.v, s.rd.an) if err != nil || l == nil { // err being non-nil really shouldn't be possible, but the lock // being nil is quite likely continue } for _, lp := range l.Projects() { if lp.Ident().eq(bmi.id) { prefv = lp.Version() } } } // OTHER APPROACH - WRONG, BUT MAYBE USEFUL FOR REFERENCE? // If this bmi came from the root, then we want to search the unselected // queue to see if anything *else* wants this ident, in which case we // pick up that prefv //for _, bmi2 := range s.unsel.sl { //// Take the first thing from the queue that's for the same ident, //// and has a non-nil prefv //if bmi.id.eq(bmi2.id) { //if bmi2.prefv != nil { //prefv = bmi2.prefv //} //} //} } else { // Otherwise, just use the preferred version expressed in the bmi prefv = bmi.prefv } q, err := newVersionQueue(id, lockv, prefv, s.b) if err != nil { // TODO(sdboyer) this particular err case needs to be improved to be ONLY for cases // where there's absolutely nothing findable about a given project name return nil, err } // Hack in support for revisions. // // By design, revs aren't returned from ListVersion(). Thus, if the dep in // the bmi was has a rev constraint, it is (almost) guaranteed to fail, even // if that rev does exist in the repo. So, detect a rev and push it into the // vq here, instead. // // Happily, the solver maintains the invariant that constraints on a given // ident cannot be incompatible, so we know that if we find one rev, then // any other deps will have to also be on that rev (or Any). // // TODO(sdboyer) while this does work, it bypasses the interface-implied guarantees // of the version queue, and is therefore not a great strategy for API // coherency. Folding this in to a formal interface would be better. if tc, ok := s.sel.getConstraint(bmi.id).(Revision); ok && q.pi[0] != tc { // We know this is the only thing that could possibly match, so put it // in at the front - if it isn't there already. // TODO(sdboyer) existence of the revision is guaranteed by checkRevisionExists(); restore that call. q.pi = append([]Version{tc}, q.pi...) } // Having assembled the queue, search it for a valid version. s.traceCheckQueue(q, bmi, false, 1) return q, s.findValidVersion(q, bmi.pl) } // findValidVersion walks through a versionQueue until it finds a version that // satisfies the constraints held in the current state of the solver. // // The satisfiability checks triggered from here are constrained to operate only // on those dependencies induced by the list of packages given in the second // parameter. func (s *solver) findValidVersion(q *versionQueue, pl []string) error { if nil == q.current() { // this case should not be reachable, but reflects improper solver state // if it is, so panic immediately panic("version queue is empty, should not happen: " + string(q.id.ProjectRoot) + " " + q.id.Source) } faillen := len(q.fails) for { cur := q.current() s.traceInfo("try %s@%s", q.id, cur) err := s.check(atomWithPackages{ a: atom{ id: q.id, v: cur, }, pl: pl, }, false) if err == nil { // we have a good version, can return safely return nil } if q.advance(err) != nil { // Error on advance, have to bail out break } if q.isExhausted() { // Queue is empty, bail with error break } } s.fail(s.sel.getDependenciesOn(q.id)[0].depender.id) // Return a compound error of all the new errors encountered during this // attempt to find a new, valid version return &noVersionError{ pn: q.id, fails: q.fails[faillen:], } } // getLockVersionIfValid finds an atom for the given ProjectIdentifier from the // root lock, assuming: // // 1. A root lock was provided // 2. The general flag to change all projects was not passed // 3. A flag to change this particular ProjectIdentifier was not passed // // If any of these three conditions are true (or if the id cannot be found in // the root lock), then no atom will be returned. func (s *solver) getLockVersionIfValid(id ProjectIdentifier) (Version, error) { // If the project is specifically marked for changes, then don't look for a // locked version. if _, explicit := s.rd.chng[id.ProjectRoot]; explicit || s.rd.chngall { // For projects with an upstream or cache repository, it's safe to // ignore what's in the lock, because there's presumably more versions // to be found and attempted in the repository. If it's only in vendor, // though, then we have to try to use what's in the lock, because that's // the only version we'll be able to get. if exist, _ := s.b.SourceExists(id); exist { // Upgrades mean breaking the lock s.b.breakLock() return nil, nil } // However, if a change was *expressly* requested for something that // exists only in vendor, then that guarantees we don't have enough // information to complete a solution. In that case, error out. if explicit { return nil, &missingSourceFailure{ goal: id, prob: "Cannot upgrade %s, as no source repository could be found.", } } } lp, exists := s.rd.rlm[id.ProjectRoot] if !exists { return nil, nil } constraint := s.sel.getConstraint(id) v := lp.Version() if !constraint.Matches(v) { // No match found, which means we're going to be breaking the lock // Still return the invalid version so that is included in the trace s.b.breakLock() } return v, nil } // backtrack works backwards from the current failed solution to find the next // solution to try. func (s *solver) backtrack(ctx context.Context) (bool, error) { if len(s.vqs) == 0 { // nothing to backtrack to return false, nil } donechan := ctx.Done() s.mtr.push("backtrack") defer s.mtr.pop() for { for { select { case <-donechan: return false, ctx.Err() default: } if len(s.vqs) == 0 { // no more versions, nowhere further to backtrack return false, nil } if s.vqs[len(s.vqs)-1].failed { break } s.vqs, s.vqs[len(s.vqs)-1] = s.vqs[:len(s.vqs)-1], nil // Pop selections off until we get to a project. var proj bool var awp atomWithPackages for !proj { var err error awp, proj, err = s.unselectLast() if err != nil { if !contextCanceledOrSMReleased(err) { panic(fmt.Sprintf("canary - should only have been able to get a context cancellation or SM release, got %T %s", err, err)) } return false, err } s.traceBacktrack(awp.bmi(), !proj) } } // Grab the last versionQueue off the list of queues q := s.vqs[len(s.vqs)-1] // Walk back to the next project. This may entail walking through some // package-only selections. var proj bool var awp atomWithPackages for !proj { var err error awp, proj, err = s.unselectLast() if err != nil { if !contextCanceledOrSMReleased(err) { panic(fmt.Sprintf("canary - should only have been able to get a context cancellation or SM release, got %T %s", err, err)) } return false, err } s.traceBacktrack(awp.bmi(), !proj) } if !q.id.eq(awp.a.id) { panic("canary - version queue stack and selected project stack are misaligned") } // Advance the queue past the current version, which we know is bad // TODO(sdboyer) is it feasible to make available the failure reason here? if q.advance(nil) == nil && !q.isExhausted() { // Search for another acceptable version of this failed dep in its queue s.traceCheckQueue(q, awp.bmi(), true, 0) if s.findValidVersion(q, awp.pl) == nil { // Found one! Put it back on the selected queue and stop // backtracking // reusing the old awp is fine awp.a.v = q.current() err := s.selectAtom(awp, false) if err != nil { if !contextCanceledOrSMReleased(err) { panic(fmt.Sprintf("canary - should only have been able to get a context cancellation or SM release, got %T %s", err, err)) } return false, err } break } } s.traceBacktrack(awp.bmi(), false) // No solution found; continue backtracking after popping the queue // we just inspected off the list // GC-friendly pop pointer elem in slice s.vqs, s.vqs[len(s.vqs)-1] = s.vqs[:len(s.vqs)-1], nil } // Backtracking was successful if loop ended before running out of versions if len(s.vqs) == 0 { return false, nil } s.attempts++ return true, nil } func (s *solver) nextUnselected() (bimodalIdentifier, bool) { if len(s.unsel.sl) > 0 { return s.unsel.sl[0], true } return bimodalIdentifier{}, false } func (s *solver) unselectedComparator(i, j int) bool { ibmi, jbmi := s.unsel.sl[i], s.unsel.sl[j] iname, jname := ibmi.id, jbmi.id // Most important thing is pushing package additions ahead of project // additions. Package additions can't walk their version queue, so all they // do is narrow the possibility of success; better to find out early and // fast if they're going to fail than wait until after we've done real work // on a project and have to backtrack across it. // FIXME the impl here is currently O(n) in the number of selections; it // absolutely cannot stay in a hot sorting path like this // FIXME while other solver invariants probably protect us from it, this // call-out means that it's possible for external state change to invalidate // heap invariants. _, isel := s.sel.selected(iname) _, jsel := s.sel.selected(jname) if isel && !jsel { return true } if !isel && jsel { return false } if iname.eq(jname) { return false } _, ilock := s.rd.rlm[iname.ProjectRoot] _, jlock := s.rd.rlm[jname.ProjectRoot] switch { case ilock && !jlock: return true case !ilock && jlock: return false case ilock && jlock: return iname.Less(jname) } // Now, sort by number of available versions. This will trigger network // activity, but at this point we know that the project we're looking at // isn't locked by the root. And, because being locked by root is the only // way avoid that call when making a version queue, we know we're gonna have // to pay that cost anyway. // We can safely ignore an err from listVersions here because, if there is // an actual problem, it'll be noted and handled somewhere else saner in the // solving algorithm. ivl, _ := s.b.listVersions(iname) jvl, _ := s.b.listVersions(jname) iv, jv := len(ivl), len(jvl) // Packages with fewer versions to pick from are less likely to benefit from // backtracking, so deal with them earlier in order to minimize the amount // of superfluous backtracking through them we do. switch { case iv == 0 && jv != 0: return true case iv != 0 && jv == 0: return false case iv != jv: return iv < jv } // Finally, if all else fails, fall back to comparing by name return iname.Less(jname) } func (s *solver) fail(id ProjectIdentifier) { // TODO(sdboyer) does this need updating, now that we have non-project package // selection? // skip if the root project if !s.rd.isRoot(id.ProjectRoot) { // just look for the first (oldest) one; the backtracker will necessarily // traverse through and pop off any earlier ones for _, vq := range s.vqs { if vq.id.eq(id) { vq.failed = true return } } } } // selectAtom pulls an atom into the selection stack, alongside some of // its contained packages. New resultant dependency requirements are added to // the unselected priority queue. // // Behavior is slightly diffferent if pkgonly is true. func (s *solver) selectAtom(a atomWithPackages, pkgonly bool) error { s.mtr.push("select-atom") s.unsel.remove(bimodalIdentifier{ id: a.a.id, pl: a.pl, }) pl, deps, err := s.getImportsAndConstraintsOf(a) if err != nil { if contextCanceledOrSMReleased(err) { return err } // This shouldn't be possible; other checks should have ensured all // packages and deps are present for any argument passed to this method. panic(fmt.Sprintf("canary - shouldn't be possible %s", err)) } // Assign the new internal package list into the atom, then push it onto the // selection stack a.pl = pl s.sel.pushSelection(a, pkgonly) // If this atom has a lock, pull it out so that we can potentially inject // preferred versions into any bmis we enqueue // // TODO(sdboyer) making this call here could be the first thing to trigger // network activity...maybe? if so, can we mitigate by deferring the work to // queue consumption time? _, l, _ := s.b.GetManifestAndLock(a.a.id, a.a.v, s.rd.an) var lmap map[ProjectIdentifier]Version if l != nil { lmap = make(map[ProjectIdentifier]Version) for _, lp := range l.Projects() { lmap[lp.Ident()] = lp.Version() } } for _, dep := range deps { // Root can come back up here if there's a project-level cycle. // Satisfiability checks have already ensured invariants are maintained, // so we know we can just skip it here. if s.rd.isRoot(dep.Ident.ProjectRoot) { continue } // If this is dep isn't in the lock, do some prefetching. (If it is, we // might be able to get away with zero network activity for it, so don't // prefetch). This provides an opportunity for some parallelism wins, on // two fronts: // // 1. Because this loop may have multiple deps in it, we could end up // simultaneously fetching both in the background while solving proceeds // // 2. Even if only one dep gets prefetched here, the worst case is that // that same dep comes out of the unselected queue next, and we gain a // few microseconds before blocking later. Best case, the dep doesn't // come up next, but some other dep comes up that wasn't prefetched, and // both fetches proceed in parallel. if s.rd.needVersionsFor(dep.Ident.ProjectRoot) { go s.b.SyncSourceFor(dep.Ident) } s.sel.pushDep(dependency{depender: a.a, dep: dep}) // Go through all the packages introduced on this dep, selecting only // the ones where the only depper on them is what the preceding line just // pushed in. Then, put those into the unselected queue. rpm := s.sel.getRequiredPackagesIn(dep.Ident) var newp []string for _, pkg := range dep.pl { // Just one means that the dep we're visiting is the sole importer. if rpm[pkg] == 1 { newp = append(newp, pkg) } } if len(newp) > 0 { // If there was a previously-established alternate source for this // dependency, but the current atom did not express one (and getting // here means the atom passed the source hot-swapping check - see // checkIdentMatches()), then we have to create the new bmi with the // alternate source. Otherwise, we end up with two discrete project // entries for the project root in the final output, one with the // alternate source, and one without. See #969. id, _ := s.sel.getIdentFor(dep.Ident.ProjectRoot) bmi := bimodalIdentifier{ id: id, pl: newp, // This puts in a preferred version if one's in the map, else // drops in the zero value (nil) prefv: lmap[dep.Ident], } heap.Push(s.unsel, bmi) } } s.traceSelect(a, pkgonly) s.mtr.pop() return nil } func (s *solver) unselectLast() (atomWithPackages, bool, error) { s.mtr.push("unselect") defer s.mtr.pop() awp, first := s.sel.popSelection() heap.Push(s.unsel, bimodalIdentifier{id: awp.a.id, pl: awp.pl}) _, deps, err := s.getImportsAndConstraintsOf(awp) if err != nil { if contextCanceledOrSMReleased(err) { return atomWithPackages{}, false, err } // This shouldn't be possible; other checks should have ensured all // packages and deps are present for any argument passed to this method. panic(fmt.Sprintf("canary - shouldn't be possible %s", err)) } for _, dep := range deps { // Skip popping if the dep is the root project, which can occur if // there's a project-level import cycle. (This occurs frequently with // e.g. kubernetes and docker) if s.rd.isRoot(dep.Ident.ProjectRoot) { continue } s.sel.popDep(dep.Ident) // if no parents/importers, remove from unselected queue if s.sel.depperCount(dep.Ident) == 0 { s.unsel.remove(bimodalIdentifier{id: dep.Ident, pl: dep.pl}) } } return awp, first, nil } // simple (temporary?) helper just to convert atoms into locked projects func pa2lp(pa atom, pkgs map[string]struct{}) LockedProject { lp := lockedProject{ pi: pa.id, } switch v := pa.v.(type) { case UnpairedVersion: lp.v = v case Revision: lp.r = v case versionPair: lp.v = v.v lp.r = v.r default: panic("unreachable") } lp.pkgs = make([]string, 0, len(pkgs)) pr := string(pa.id.ProjectRoot) trim := pr + "/" for pkg := range pkgs { if pkg == string(pa.id.ProjectRoot) { lp.pkgs = append(lp.pkgs, ".") } else { lp.pkgs = append(lp.pkgs, strings.TrimPrefix(pkg, trim)) } } sort.Strings(lp.pkgs) return lp } func contextCanceledOrSMReleased(err error) bool { return err == context.Canceled || err == context.DeadlineExceeded || err == ErrSourceManagerIsReleased }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/identifier.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "fmt" "math/rand" "strconv" ) // ProjectRoot is the topmost import path in a tree of other import paths - the // root of the tree. In gps' current design, ProjectRoots have to correspond to // a repository root (mostly), but their real purpose is to identify the root // import path of a "project", logically encompassing all child packages. // // Projects are a crucial unit of operation in gps. Constraints are declared by // a project's manifest, and apply to all packages in a ProjectRoot's tree. // Solving itself mostly proceeds on a project-by-project basis. // // Aliasing string types is usually a bit of an anti-pattern. gps does it here // as a means of clarifying API intent. This is important because Go's package // management domain has lots of different path-ish strings floating around: // // actual directories: // /home/sdboyer/go/src/github.com/sdboyer/gps/example // URLs: // https://github.com/sdboyer/gps // import paths: // github.com/sdboyer/gps/example // portions of import paths that refer to a package: // example // portions that could not possibly refer to anything sane: // github.com/sdboyer // portions that correspond to a repository root: // github.com/sdboyer/gps // // While not a panacea, having ProjectRoot allows gps to clearly indicate via // the type system when a path-ish string must have particular semantics. type ProjectRoot string // A ProjectIdentifier provides the name and source location of a dependency. It // is related to, but differs in two key ways from, a plain import path. // // First, ProjectIdentifiers do not identify a single package. Rather, they // encompass the whole tree of packages, including tree's root - the // ProjectRoot. In gps' current design, this ProjectRoot almost always // corresponds to the root of a repository. // // Second, ProjectIdentifiers can optionally carry a Source, which // identifies where the underlying source code can be located on the network. // These can be either a full URL, including protocol, or plain import paths. // So, these are all valid data for Source: // // github.com/sdboyer/gps // github.com/fork/gps // git@github.com:sdboyer/gps // https://github.com/sdboyer/gps // // With plain import paths, network addresses are derived purely through an // algorithm. By having an explicit network name, it becomes possible to, for // example, transparently substitute a fork for the original upstream source // repository. // // Note that gps makes no guarantees about the actual import paths contained in // a repository aligning with ImportRoot. If tools, or their users, specify an // alternate Source that contains a repository with incompatible internal // import paths, gps' solving operations will error. (gps does no import // rewriting.) // // Also note that if different projects' manifests report a different // Source for a given ImportRoot, it is a solve failure. Everyone has to // agree on where a given import path should be sourced from. // // If Source is not explicitly set, gps will derive the network address from // the ImportRoot using a similar algorithm to that utilized by `go get`. type ProjectIdentifier struct { ProjectRoot ProjectRoot Source string } // Less compares by ProjectRoot then normalized Source. func (i ProjectIdentifier) Less(j ProjectIdentifier) bool { if i.ProjectRoot < j.ProjectRoot { return true } if j.ProjectRoot < i.ProjectRoot { return false } return i.normalizedSource() < j.normalizedSource() } func (i ProjectIdentifier) eq(j ProjectIdentifier) bool { if i.ProjectRoot != j.ProjectRoot { return false } if i.Source == j.Source { return true } if (i.Source == "" && j.Source == string(j.ProjectRoot)) || (j.Source == "" && i.Source == string(i.ProjectRoot)) { return true } return false } // equiv will check if the two identifiers are "equivalent," under special // rules. // // Given that the ProjectRoots are equal (==), equivalency occurs if: // // 1. The Sources are equal (==), OR // 2. The LEFT (the receiver) Source is non-empty, and the right // Source is empty. // // *This is asymmetry in this binary relation is intentional.* It facilitates // the case where we allow for a ProjectIdentifier with an explicit Source // to match one without. func (i ProjectIdentifier) equiv(j ProjectIdentifier) bool { if i.ProjectRoot != j.ProjectRoot { return false } if i.Source == j.Source { return true } if i.Source != "" && j.Source == "" { return true } return false } func (i ProjectIdentifier) normalizedSource() string { if i.Source == "" { return string(i.ProjectRoot) } return i.Source } func (i ProjectIdentifier) String() string { if i.Source == "" || i.Source == string(i.ProjectRoot) { return string(i.ProjectRoot) } return fmt.Sprintf("%s (from %s)", i.ProjectRoot, i.Source) } func (i ProjectIdentifier) normalize() ProjectIdentifier { if i.Source == "" { i.Source = string(i.ProjectRoot) } return i } // ProjectProperties comprise the properties that can be attached to a // ProjectRoot. // // In general, these are declared in the context of a map of ProjectRoot to its // ProjectProperties; they make little sense without their corresponding // ProjectRoot. type ProjectProperties struct { Source string Constraint Constraint } // bimodalIdentifiers are used to track work to be done in the unselected queue. type bimodalIdentifier struct { id ProjectIdentifier // List of packages required within/under the ProjectIdentifier pl []string // prefv is used to indicate a 'preferred' version. This is expected to be // derived from a dep's lock data, or else is empty. prefv Version // Indicates that the bmi came from the root project originally fromRoot bool } type atom struct { id ProjectIdentifier v Version } // With a random revision and no name, collisions are...unlikely var nilpa = atom{ v: Revision(strconv.FormatInt(rand.Int63(), 36)), } type atomWithPackages struct { a atom pl []string } // bmi converts an atomWithPackages into a bimodalIdentifier. // // This is mostly intended for (read-only) trace use, so the package list slice // is not copied. It is the callers responsibility to not modify the pl slice, // lest that backpropagate and cause inconsistencies. func (awp atomWithPackages) bmi() bimodalIdentifier { return bimodalIdentifier{ id: awp.a.id, pl: awp.pl, } } // completeDep (name hopefully to change) provides the whole picture of a // dependency - the root (repo and project, since currently we assume the two // are the same) name, a constraint, and the actual packages needed that are // under that root. type completeDep struct { // The base workingConstraint workingConstraint // The specific packages required from the ProjectDep pl []string } // dependency represents an incomplete edge in the depgraph. It has a // fully-realized atom as the depender (the tail/source of the edge), and a set // of requirements that any atom to be attached at the head/target must satisfy. type dependency struct { depender atom dep completeDep }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/version_queue.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "fmt" "strings" ) type failedVersion struct { v Version f error } type versionQueue struct { id ProjectIdentifier pi []Version lockv, prefv Version fails []failedVersion b sourceBridge failed bool allLoaded bool adverr error } func newVersionQueue(id ProjectIdentifier, lockv, prefv Version, b sourceBridge) (*versionQueue, error) { vq := &versionQueue{ id: id, b: b, } // Lock goes in first, if present if lockv != nil { vq.lockv = lockv vq.pi = append(vq.pi, lockv) } // Preferred version next if prefv != nil { vq.prefv = prefv vq.pi = append(vq.pi, prefv) } if len(vq.pi) == 0 { var err error vq.pi, err = vq.b.listVersions(vq.id) if err != nil { // TODO(sdboyer) pushing this error this early entails that we // unconditionally deep scan (e.g. vendor), as well as hitting the // network. return nil, err } vq.allLoaded = true } return vq, nil } func (vq *versionQueue) current() Version { if len(vq.pi) > 0 { return vq.pi[0] } return nil } // advance moves the versionQueue forward to the next available version, // recording the failure that eliminated the current version. func (vq *versionQueue) advance(fail error) error { // Nothing in the queue means...nothing in the queue, nicely enough if vq.adverr != nil || len(vq.pi) == 0 { // should be a redundant check, but just in case return vq.adverr } // Record the fail reason and pop the queue vq.fails = append(vq.fails, failedVersion{ v: vq.pi[0], f: fail, }) vq.pi = vq.pi[1:] // *now*, if the queue is empty, ensure all versions have been loaded if len(vq.pi) == 0 { if vq.allLoaded { // This branch gets hit when the queue is first fully exhausted, // after a previous advance() already called ListVersions(). return nil } vq.allLoaded = true var vltmp []Version vltmp, vq.adverr = vq.b.listVersions(vq.id) if vq.adverr != nil { return vq.adverr } // defensive copy - calling listVersions here means slice contents may // be modified when removing prefv/lockv. vq.pi = make([]Version, len(vltmp)) copy(vq.pi, vltmp) // search for and remove lockv and prefv, in a pointer GC-safe manner // // could use the version comparator for binary search here to avoid // O(n) each time...if it matters var delkeys []int for k, pi := range vq.pi { if pi == vq.lockv || pi == vq.prefv { delkeys = append(delkeys, k) } } for k, dk := range delkeys { dk -= k copy(vq.pi[dk:], vq.pi[dk+1:]) // write nil to final position for GC safety vq.pi[len(vq.pi)-1] = nil vq.pi = vq.pi[:len(vq.pi)-1] } if len(vq.pi) == 0 { // If listing versions added nothing (new), then return now return nil } } // We're finally sure that there's something in the queue. Remove the // failure marker, as the current version may have failed, but the next one // hasn't yet vq.failed = false // If all have been loaded and the queue is empty, we're definitely out // of things to try. Return empty, though, because vq semantics dictate // that we don't explicitly indicate the end of the queue here. return nil } // isExhausted indicates whether or not the queue has definitely been exhausted, // in which case it will return true. // // It may return false negatives - suggesting that there is more in the queue // when a subsequent call to current() will be empty. Plan accordingly. func (vq *versionQueue) isExhausted() bool { if !vq.allLoaded { return false } return len(vq.pi) == 0 } func (vq *versionQueue) String() string { var vs []string for _, v := range vq.pi { vs = append(vs, v.String()) } return fmt.Sprintf("[%s]", strings.Join(vs, ", ")) }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/vcs_repo.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "bytes" "context" "encoding/xml" "os" "path/filepath" "runtime" "strings" "time" "github.com/Masterminds/vcs" "github.com/pkg/errors" ) type ctxRepo interface { vcs.Repo get(context.Context) error fetch(context.Context) error updateVersion(context.Context, string) error //ping(context.Context) (bool, error) } // ensureCleaner is an optional extension of ctxRepo. type ensureCleaner interface { // ensureClean ensures a repository is clean and in working order, // or returns an error if the adaptive recovery attempts fail. ensureClean(context.Context) error } // original implementation of these methods come from // https://github.com/Masterminds/vcs type gitRepo struct { *vcs.GitRepo } func newVcsRemoteErrorOr(err error, args []string, out, msg string) error { if err == context.Canceled || err == context.DeadlineExceeded { return err } return vcs.NewRemoteError(msg, errors.Wrapf(err, "command failed: %v", args), out) } func newVcsLocalErrorOr(err error, args []string, out, msg string) error { if err == context.Canceled || err == context.DeadlineExceeded { return err } return vcs.NewLocalError(msg, errors.Wrapf(err, "command failed: %v", args), out) } func (r *gitRepo) get(ctx context.Context) error { cmd := commandContext( ctx, "git", "clone", "--recursive", "-v", "--progress", r.Remote(), r.LocalPath(), ) // Ensure no prompting for PWs cmd.SetEnv(append([]string{"GIT_ASKPASS=", "GIT_TERMINAL_PROMPT=0"}, os.Environ()...)) if out, err := cmd.CombinedOutput(); err != nil { return newVcsRemoteErrorOr(err, cmd.Args(), string(out), "unable to get repository") } return nil } func (r *gitRepo) fetch(ctx context.Context) error { cmd := commandContext( ctx, "git", "fetch", "--tags", "--prune", r.RemoteLocation, ) cmd.SetDir(r.LocalPath()) // Ensure no prompting for PWs cmd.SetEnv(append([]string{"GIT_ASKPASS=", "GIT_TERMINAL_PROMPT=0"}, os.Environ()...)) if out, err := cmd.CombinedOutput(); err != nil { return newVcsRemoteErrorOr(err, cmd.Args(), string(out), "unable to update repository") } return nil } func (r *gitRepo) updateVersion(ctx context.Context, v string) error { cmd := commandContext(ctx, "git", "checkout", v) cmd.SetDir(r.LocalPath()) if out, err := cmd.CombinedOutput(); err != nil { return newVcsLocalErrorOr(err, cmd.Args(), string(out), "unable to update checked out version") } return r.defendAgainstSubmodules(ctx) } // defendAgainstSubmodules tries to keep repo state sane in the event of // submodules. Or nested submodules. What a great idea, submodules. func (r *gitRepo) defendAgainstSubmodules(ctx context.Context) error { // First, update them to whatever they should be, if there should happen to be any. { cmd := commandContext( ctx, "git", "submodule", "update", "--init", "--recursive", ) cmd.SetDir(r.LocalPath()) // Ensure no prompting for PWs cmd.SetEnv(append([]string{"GIT_ASKPASS=", "GIT_TERMINAL_PROMPT=0"}, os.Environ()...)) if out, err := cmd.CombinedOutput(); err != nil { return newVcsLocalErrorOr(err, cmd.Args(), string(out), "unexpected error while defensively updating submodules") } } // Now, do a special extra-aggressive clean in case changing versions caused // one or more submodules to go away. { cmd := commandContext(ctx, "git", "clean", "-x", "-d", "-f", "-f") cmd.SetDir(r.LocalPath()) if out, err := cmd.CombinedOutput(); err != nil { return newVcsLocalErrorOr(err, cmd.Args(), string(out), "unexpected error while defensively cleaning up after possible derelict submodule directories") } } // Then, repeat just in case there are any nested submodules that went away. { cmd := commandContext( ctx, "git", "submodule", "foreach", "--recursive", "git clean -x -d -f -f", ) cmd.SetDir(r.LocalPath()) if out, err := cmd.CombinedOutput(); err != nil { return newVcsLocalErrorOr(err, cmd.Args(), string(out), "unexpected error while defensively cleaning up after possible derelict nested submodule directories") } } return nil } func (r *gitRepo) ensureClean(ctx context.Context) error { cmd := commandContext( ctx, "git", "status", "--porcelain", ) cmd.SetDir(r.LocalPath()) out, err := cmd.CombinedOutput() if err != nil { // An error on simple git status indicates some aggressive repository // corruption, outside of the purview that we can deal with here. return err } if len(bytes.TrimSpace(out)) == 0 { // No output from status indicates a clean tree, without any modified or // untracked files - we're in good shape. return nil } // We could be more parsimonious about this, but it's probably not worth it // - it's a rare case to have to do any cleanup anyway, so when we do, we // might as well just throw the kitchen sink at it. cmd = commandContext( ctx, "git", "reset", "--hard", ) cmd.SetDir(r.LocalPath()) _, err = cmd.CombinedOutput() if err != nil { return err } // We also need to git clean -df; just reuse defendAgainstSubmodules here, // even though it's a bit layer-breaky. err = r.defendAgainstSubmodules(ctx) if err != nil { return err } // Check status one last time. If it's still not clean, give up. cmd = commandContext( ctx, "git", "status", "--porcelain", ) cmd.SetDir(r.LocalPath()) out, err = cmd.CombinedOutput() if err != nil { return err } if len(bytes.TrimSpace(out)) != 0 { return errors.Errorf("failed to clean up git repository at %s - dirty? corrupted? status output: \n%s", r.LocalPath(), string(out)) } return nil } type bzrRepo struct { *vcs.BzrRepo } func (r *bzrRepo) get(ctx context.Context) error { basePath := filepath.Dir(filepath.FromSlash(r.LocalPath())) if _, err := os.Stat(basePath); os.IsNotExist(err) { err = os.MkdirAll(basePath, 0755) if err != nil { return newVcsLocalErrorOr(err, nil, "", "unable to create directory") } } cmd := commandContext(ctx, "bzr", "branch", r.Remote(), r.LocalPath()) if out, err := cmd.CombinedOutput(); err != nil { return newVcsRemoteErrorOr(err, cmd.Args(), string(out), "unable to get repository") } return nil } func (r *bzrRepo) fetch(ctx context.Context) error { cmd := commandContext(ctx, "bzr", "pull") cmd.SetDir(r.LocalPath()) if out, err := cmd.CombinedOutput(); err != nil { return newVcsRemoteErrorOr(err, cmd.Args(), string(out), "unable to update repository") } return nil } func (r *bzrRepo) updateVersion(ctx context.Context, version string) error { cmd := commandContext(ctx, "bzr", "update", "-r", version) cmd.SetDir(r.LocalPath()) if out, err := cmd.CombinedOutput(); err != nil { return newVcsLocalErrorOr(err, cmd.Args(), string(out), "unable to update checked out version") } return nil } type hgRepo struct { *vcs.HgRepo } func (r *hgRepo) get(ctx context.Context) error { cmd := commandContext(ctx, "hg", "clone", r.Remote(), r.LocalPath()) if out, err := cmd.CombinedOutput(); err != nil { return newVcsRemoteErrorOr(err, cmd.Args(), string(out), "unable to get repository") } return nil } func (r *hgRepo) fetch(ctx context.Context) error { cmd := commandContext(ctx, "hg", "pull") cmd.SetDir(r.LocalPath()) if out, err := cmd.CombinedOutput(); err != nil { return newVcsRemoteErrorOr(err, cmd.Args(), string(out), "unable to fetch latest changes") } return nil } func (r *hgRepo) updateVersion(ctx context.Context, version string) error { cmd := commandContext(ctx, "hg", "update", version) cmd.SetDir(r.LocalPath()) if out, err := cmd.CombinedOutput(); err != nil { return newVcsRemoteErrorOr(err, cmd.Args(), string(out), "unable to update checked out version") } return nil } type svnRepo struct { *vcs.SvnRepo } func (r *svnRepo) get(ctx context.Context) error { remote := r.Remote() if strings.HasPrefix(remote, "/") { remote = "file://" + remote } else if runtime.GOOS == "windows" && filepath.VolumeName(remote) != "" { remote = "file:///" + remote } cmd := commandContext(ctx, "svn", "checkout", remote, r.LocalPath()) if out, err := cmd.CombinedOutput(); err != nil { return newVcsRemoteErrorOr(err, cmd.Args(), string(out), "unable to get repository") } return nil } func (r *svnRepo) fetch(ctx context.Context) error { cmd := commandContext(ctx, "svn", "update") cmd.SetDir(r.LocalPath()) if out, err := cmd.CombinedOutput(); err != nil { return newVcsRemoteErrorOr(err, cmd.Args(), string(out), "unable to update repository") } return nil } func (r *svnRepo) updateVersion(ctx context.Context, version string) error { cmd := commandContext(ctx, "svn", "update", "-r", version) cmd.SetDir(r.LocalPath()) if out, err := cmd.CombinedOutput(); err != nil { return newVcsRemoteErrorOr(err, cmd.Args(), string(out), "unable to update checked out version") } return nil } func (r *svnRepo) CommitInfo(id string) (*vcs.CommitInfo, error) { ctx := context.TODO() // There are cases where Svn log doesn't return anything for HEAD or BASE. // svn info does provide details for these but does not have elements like // the commit message. if id == "HEAD" || id == "BASE" { type commit struct { Revision string `xml:"revision,attr"` } type info struct { Commit commit `xml:"entry>commit"` } cmd := commandContext(ctx, "svn", "info", "-r", id, "--xml") cmd.SetDir(r.LocalPath()) out, err := cmd.CombinedOutput() if err != nil { return nil, newVcsLocalErrorOr(err, cmd.Args(), string(out), "unable to retrieve commit information") } infos := new(info) if err := xml.Unmarshal(out, &infos); err != nil { return nil, newVcsLocalErrorOr(err, cmd.Args(), string(out), "unable to retrieve commit information") } id = infos.Commit.Revision if id == "" { return nil, vcs.ErrRevisionUnavailable } } cmd := commandContext(ctx, "svn", "log", "-r", id, "--xml") cmd.SetDir(r.LocalPath()) out, err := cmd.CombinedOutput() if err != nil { return nil, newVcsRemoteErrorOr(err, cmd.Args(), string(out), "unable to retrieve commit information") } type logentry struct { Author string `xml:"author"` Date string `xml:"date"` Msg string `xml:"msg"` } type log struct { XMLName xml.Name `xml:"log"` Logs []logentry `xml:"logentry"` } logs := new(log) if err := xml.Unmarshal(out, &logs); err != nil { return nil, newVcsLocalErrorOr(err, cmd.Args(), string(out), "unable to retrieve commit information") } if len(logs.Logs) == 0 { return nil, vcs.ErrRevisionUnavailable } ci := &vcs.CommitInfo{ Commit: id, Author: logs.Logs[0].Author, Message: logs.Logs[0].Msg, } if len(logs.Logs[0].Date) > 0 { ci.Date, err = time.Parse(time.RFC3339Nano, logs.Logs[0].Date) if err != nil { return nil, newVcsLocalErrorOr(err, cmd.Args(), string(out), "unable to retrieve commit information") } } return ci, nil }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/version_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import "testing" func TestVersionSorts(t *testing.T) { rev := Revision("flooboofoobooo") v1 := NewBranch("master").Pair(rev) v2 := NewBranch("test").Pair(rev) v3 := NewVersion("1.0.0").Pair(rev) v4 := NewVersion("1.0.1").Pair(rev) v5 := NewVersion("v2.0.5").Pair(rev) v6 := NewVersion("2.0.5.2").Pair(rev) v7 := newDefaultBranch("unwrapped").Pair(rev) v8 := NewVersion("20.0.5.2").Pair(rev) v9 := NewVersion("v1.5.5-beta.4").Pair(rev) v10 := NewVersion("v3.0.1-alpha.1").Pair(rev) start := []Version{ v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, rev, } down := make([]Version, len(start)) copy(down, start) up := make([]Version, len(start)) copy(up, start) edown := []Version{ v3, v4, v5, // semvers v9, v10, // prerelease semver v7, v1, v2, // floating/branches v6, v8, // plain versions rev, // revs } eup := []Version{ v5, v4, v3, // semvers v10, v9, // prerelease semver v7, v1, v2, // floating/branches v6, v8, // plain versions rev, // revs } SortForUpgrade(up) var wrong []int for k, v := range up { if eup[k] != v { wrong = append(wrong, k) t.Errorf("Expected version %s in position %v on upgrade sort, but got %s", eup[k], k, v) } } if len(wrong) > 0 { // Just helps with readability a bit t.Errorf("Upgrade sort positions with wrong versions: %v", wrong) } SortForDowngrade(down) wrong = wrong[:0] for k, v := range down { if edown[k] != v { wrong = append(wrong, k) t.Errorf("Expected version %s in position %v on downgrade sort, but got %s", edown[k], k, v) } } if len(wrong) > 0 { // Just helps with readability a bit t.Errorf("Downgrade sort positions with wrong versions: %v", wrong) } // Now make sure we sort back the other way correctly...just because SortForUpgrade(down) wrong = wrong[:0] for k, v := range down { if eup[k] != v { wrong = append(wrong, k) t.Errorf("Expected version %s in position %v on down-then-upgrade sort, but got %s", eup[k], k, v) } } if len(wrong) > 0 { // Just helps with readability a bit t.Errorf("Down-then-upgrade sort positions with wrong versions: %v", wrong) } // Now make sure we sort back the other way correctly...just because SortForDowngrade(up) wrong = wrong[:0] for k, v := range up { if edown[k] != v { wrong = append(wrong, k) t.Errorf("Expected version %s in position %v on up-then-downgrade sort, but got %s", edown[k], k, v) } } if len(wrong) > 0 { // Just helps with readability a bit t.Fatalf("Up-then-downgrade sort positions with wrong versions: %v", wrong) } /////////// // Repeat for PairedVersion slices & sorts pdown, pup := make([]PairedVersion, 0, len(start)), make([]PairedVersion, 0, len(start)) for _, v := range start { if _, ok := v.(Revision); ok { continue } pdown = append(pdown, v.(PairedVersion)) pup = append(pup, v.(PairedVersion)) } pedown, peup := make([]PairedVersion, 0, len(edown)), make([]PairedVersion, 0, len(eup)) for _, v := range edown { if _, ok := v.(Revision); ok { continue } pedown = append(pedown, v.(PairedVersion)) } for _, v := range eup { if _, ok := v.(Revision); ok { continue } peup = append(peup, v.(PairedVersion)) } SortPairedForUpgrade(pup) for k, v := range pup { if peup[k] != v { wrong = append(wrong, k) t.Errorf("Expected version %s in position %v on upgrade sort, but got %s", peup[k], k, v) } } if len(wrong) > 0 { // Just helps with readability a bit t.Errorf("Upgrade sort positions with wrong versions: %v", wrong) } SortPairedForDowngrade(pdown) wrong = wrong[:0] for k, v := range pdown { if pedown[k] != v { wrong = append(wrong, k) t.Errorf("Expected version %s in position %v on downgrade sort, but got %s", pedown[k], k, v) } } if len(wrong) > 0 { // Just helps with readability a bit t.Errorf("Downgrade sort positions with wrong versions: %v", wrong) } // Now make sure we sort back the other way correctly...just because SortPairedForUpgrade(pdown) wrong = wrong[:0] for k, v := range pdown { if peup[k] != v { wrong = append(wrong, k) t.Errorf("Expected version %s in position %v on down-then-upgrade sort, but got %s", peup[k], k, v) } } if len(wrong) > 0 { // Just helps with readability a bit t.Errorf("Down-then-upgrade sort positions with wrong versions: %v", wrong) } // Now make sure we sort back the other way correctly...just because SortPairedForDowngrade(pup) wrong = wrong[:0] for k, v := range pup { if pedown[k] != v { wrong = append(wrong, k) t.Errorf("Expected version %s in position %v on up-then-downgrade sort, but got %s", pedown[k], k, v) } } if len(wrong) > 0 { // Just helps with readability a bit t.Errorf("Up-then-downgrade sort positions with wrong versions: %v", wrong) } }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/solve_basic_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "context" "fmt" "net/url" "regexp" "strings" "github.com/Masterminds/semver" "github.com/golang/dep/gps/pkgtree" ) var regfrom = regexp.MustCompile(`^(\w*) from (\w*) ([0-9\.\*]*)`) // nvSplit splits an "info" string on " " into the pair of name and // version/constraint, and returns each individually. // // This is for narrow use - panics if there are less than two resulting items in // the slice. func nvSplit(info string) (id ProjectIdentifier, version string) { if strings.Contains(info, " from ") { parts := regfrom.FindStringSubmatch(info) info = parts[1] + " " + parts[3] id.Source = parts[2] } s := strings.SplitN(info, " ", 2) if len(s) < 2 { panic(fmt.Sprintf("Malformed name/version info string '%s'", info)) } id.ProjectRoot, version = ProjectRoot(s[0]), s[1] return } // nvrSplit splits an "info" string on " " into the triplet of name, // version/constraint, and revision, and returns each individually. // // It will work fine if only name and version/constraint are provided. // // This is for narrow use - panics if there are less than two resulting items in // the slice. func nvrSplit(info string) (id ProjectIdentifier, version string, revision Revision) { if strings.Contains(info, " from ") { parts := regfrom.FindStringSubmatch(info) info = fmt.Sprintf("%s %s", parts[1], parts[3]) id.Source = parts[2] } s := strings.SplitN(info, " ", 3) if len(s) < 2 { panic(fmt.Sprintf("Malformed name/version info string '%s'", info)) } id.ProjectRoot, version = ProjectRoot(s[0]), s[1] if len(s) == 3 { revision = Revision(s[2]) } return } // mkAtom splits the input string on a space, and uses the first two elements as // the project identifier and version, respectively. // // The version segment may have a leading character indicating the type of // version to create: // // p: create a "plain" (non-semver) version. // b: create a branch version. // r: create a revision. // // No prefix is assumed to indicate a semver version. // // If a third space-delimited element is provided, it will be interepreted as a // revision, and used as the underlying version in a PairedVersion. No prefix // should be provided in this case. It is an error (and will panic) to try to // pass a revision with an underlying revision. func mkAtom(info string) atom { // if info is "root", special case it to use the root "version" if info == "root" { return atom{ id: ProjectIdentifier{ ProjectRoot: ProjectRoot("root"), }, v: rootRev, } } id, ver, rev := nvrSplit(info) var v Version switch ver[0] { case 'r': if rev != "" { panic("Cannot pair a revision with a revision") } v = Revision(ver[1:]) case 'p': v = NewVersion(ver[1:]) case 'b': v = NewBranch(ver[1:]) default: _, err := semver.NewVersion(ver) if err != nil { // don't want to allow bad test data at this level, so just panic panic(fmt.Sprintf("Error when converting '%s' into semver: %s", ver, err)) } v = NewVersion(ver) } if rev != "" { v = v.(UnpairedVersion).Pair(rev) } return atom{ id: id, v: v, } } // mkPCstrnt splits the input string on a space, and uses the first two elements // as the project identifier and constraint body, respectively. // // The constraint body may have a leading character indicating the type of // version to create: // // p: create a "plain" (non-semver) version. // b: create a branch version. // r: create a revision. // // If no leading character is used, a semver constraint is assumed. func mkPCstrnt(info string) ProjectConstraint { id, ver, rev := nvrSplit(info) var c Constraint switch ver[0] { case 'r': c = Revision(ver[1:]) case 'p': c = NewVersion(ver[1:]) case 'b': c = NewBranch(ver[1:]) default: // Without one of those leading characters, we know it's a proper semver // expression, so use the other parser that doesn't look for a rev rev = "" id, ver = nvSplit(info) var err error c, err = NewSemverConstraint(ver) if err != nil { // don't want bad test data at this level, so just panic panic(fmt.Sprintf("Error when converting '%s' into semver constraint: %s (full info: %s)", ver, err, info)) } } // There's no practical reason that a real tool would need to produce a // constraint that's a PairedVersion, but it is a possibility admitted by the // system, so we at least allow for it in our testing harness. if rev != "" { // Of course, this *will* panic if the predicate is a revision or a // semver constraint, neither of which implement UnpairedVersion. This // is as intended, to prevent bad data from entering the system. c = c.(UnpairedVersion).Pair(rev) } return ProjectConstraint{ Ident: id, Constraint: c, } } // mkCDep composes a completeDep struct from the inputs. // // The only real work here is passing the initial string to mkPDep. All the // other args are taken as package names. func mkCDep(pdep string, pl ...string) completeDep { pc := mkPCstrnt(pdep) return completeDep{ workingConstraint: workingConstraint{ Ident: pc.Ident, Constraint: pc.Constraint, }, pl: pl, } } // A depspec is a fixture representing all the information a SourceManager would // ordinarily glean directly from interrogating a repository. type depspec struct { n ProjectRoot v Version deps []ProjectConstraint pkgs []tpkg } // mkDepspec creates a depspec by processing a series of strings, each of which // contains an identiifer and version information. // // The first string is broken out into the name and version of the package being // described - see the docs on mkAtom for details. subsequent strings are // interpreted as dep constraints of that dep at that version. See the docs on // mkPDep for details. func mkDepspec(pi string, deps ...string) depspec { pa := mkAtom(pi) if string(pa.id.ProjectRoot) != pa.id.Source && pa.id.Source != "" { panic("alternate source on self makes no sense") } ds := depspec{ n: pa.id.ProjectRoot, v: pa.v, } for _, dep := range deps { ds.deps = append(ds.deps, mkPCstrnt(dep)) } return ds } func mkDep(atom, pdep string, pl ...string) dependency { return dependency{ depender: mkAtom(atom), dep: mkCDep(pdep, pl...), } } func mkADep(atom, pdep string, c Constraint, pl ...string) dependency { return dependency{ depender: mkAtom(atom), dep: completeDep{ workingConstraint: workingConstraint{ Ident: ProjectIdentifier{ ProjectRoot: ProjectRoot(pdep), }, Constraint: c, }, pl: pl, }, } } // mkPI creates a ProjectIdentifier with the ProjectRoot as the provided // string, and the Source unset. // // Call normalize() on the returned value if you need the Source to be be // equal to the ProjectRoot. func mkPI(root string) ProjectIdentifier { return ProjectIdentifier{ ProjectRoot: ProjectRoot(root), } } // mkSVC creates a new semver constraint, panicking if an error is returned. func mkSVC(body string) Constraint { c, err := NewSemverConstraint(body) if err != nil { panic(fmt.Sprintf("Error while trying to create semver constraint from %s: %s", body, err.Error())) } return c } // mklock makes a fixLock, suitable to act as a lock file func mklock(pairs ...string) fixLock { l := make(fixLock, 0) for _, s := range pairs { pa := mkAtom(s) l = append(l, NewLockedProject(pa.id, pa.v, nil)) } return l } // mkrevlock makes a fixLock, suitable to act as a lock file, with only a name // and a rev func mkrevlock(pairs ...string) fixLock { l := make(fixLock, 0) for _, s := range pairs { pa := mkAtom(s) l = append(l, NewLockedProject(pa.id, pa.v.(PairedVersion).Revision(), nil)) } return l } // mksolution creates a map of project identifiers to their LockedProject // result, which is sufficient to act as a solution fixture for the purposes of // most tests. // // Either strings or LockedProjects can be provided. If a string is provided, it // is assumed that we're in the default, "basic" case where there is exactly one // package in a project, and it is the root of the project - meaning that only // the "." package should be listed. If a LockedProject is provided (e.g. as // returned from mklp()), then it's incorporated directly. // // If any other type is provided, the func will panic. func mksolution(inputs ...interface{}) map[ProjectIdentifier]LockedProject { m := make(map[ProjectIdentifier]LockedProject) for _, in := range inputs { switch t := in.(type) { case string: a := mkAtom(t) m[a.id] = NewLockedProject(a.id, a.v, []string{"."}) case LockedProject: m[t.Ident()] = t default: panic(fmt.Sprintf("unexpected input to mksolution: %T %s", in, in)) } } return m } // mklp creates a LockedProject from string inputs func mklp(pair string, pkgs ...string) LockedProject { a := mkAtom(pair) return NewLockedProject(a.id, a.v, pkgs) } // computeBasicReachMap takes a depspec and computes a reach map which is // identical to the explicit depgraph. // // Using a reachMap here is overkill for what the basic fixtures actually need, // but we use it anyway for congruence with the more general cases. func computeBasicReachMap(ds []depspec) reachMap { rm := make(reachMap) for k, d := range ds { n := string(d.n) lm := map[string][]string{ n: nil, } v := d.v if k == 0 { // Put the root in with a nil rev, to accommodate the solver v = nil } rm[pident{n: d.n, v: v}] = lm for _, dep := range d.deps { lm[n] = append(lm[n], string(dep.Ident.ProjectRoot)) } } return rm } type pident struct { n ProjectRoot v Version } type specfix interface { name() string rootmanifest() RootManifest rootTree() pkgtree.PackageTree specs() []depspec maxTries() int solution() map[ProjectIdentifier]LockedProject failure() error } // A basicFixture is a declarative test fixture that can cover a wide variety of // solver cases. All cases, however, maintain one invariant: package == project. // There are no subpackages, and so it is impossible for them to trigger or // require bimodal solving. // // This type is separate from bimodalFixture in part for legacy reasons - many // of these were adapted from similar tests in dart's pub lib, where there is no // such thing as "bimodal solving". // // But it's also useful to keep them separate because bimodal solving involves // considerably more complexity than simple solving, both in terms of fixture // declaration and actual solving mechanics. Thus, we gain a lot of value for // contributors and maintainers by keeping comprehension costs relatively low // while still covering important cases. type basicFixture struct { // name of this fixture datum n string // depspecs. always treat first as root ds []depspec // results; map of name/atom pairs r map[ProjectIdentifier]LockedProject // max attempts the solver should need to find solution. 0 means no limit maxAttempts int // Use downgrade instead of default upgrade sorter downgrade bool // lock file simulator, if one's to be used at all l fixLock // solve failure expected, if any fail error // overrides, if any ovr ProjectConstraints // request up/downgrade to all projects changeall bool // individual projects to change changelist []ProjectRoot // if the fixture is currently broken/expected to fail, this has a message // recording why broken string } func (f basicFixture) name() string { return f.n } func (f basicFixture) specs() []depspec { return f.ds } func (f basicFixture) maxTries() int { return f.maxAttempts } func (f basicFixture) solution() map[ProjectIdentifier]LockedProject { return f.r } func (f basicFixture) rootmanifest() RootManifest { return simpleRootManifest{ c: pcSliceToMap(f.ds[0].deps), ovr: f.ovr, } } func (f basicFixture) rootTree() pkgtree.PackageTree { var imp []string for _, dep := range f.ds[0].deps { imp = append(imp, string(dep.Ident.ProjectRoot)) } n := string(f.ds[0].n) pt := pkgtree.PackageTree{ ImportRoot: n, Packages: map[string]pkgtree.PackageOrErr{ string(n): { P: pkgtree.Package{ ImportPath: n, Name: n, Imports: imp, }, }, }, } return pt } func (f basicFixture) failure() error { return f.fail } // A table of basicFixtures, used in the basic solving test set. var basicFixtures = map[string]basicFixture{ // basic fixtures "no dependencies": { ds: []depspec{ mkDepspec("root 0.0.0"), }, r: mksolution(), }, "simple dependency tree": { ds: []depspec{ mkDepspec("root 0.0.0", "a 1.0.0", "b 1.0.0"), mkDepspec("a 1.0.0", "aa 1.0.0", "ab 1.0.0"), mkDepspec("aa 1.0.0"), mkDepspec("ab 1.0.0"), mkDepspec("b 1.0.0", "ba 1.0.0", "bb 1.0.0"), mkDepspec("ba 1.0.0"), mkDepspec("bb 1.0.0"), }, r: mksolution( "a 1.0.0", "aa 1.0.0", "ab 1.0.0", "b 1.0.0", "ba 1.0.0", "bb 1.0.0", ), }, "shared dependency with overlapping constraints": { ds: []depspec{ mkDepspec("root 0.0.0", "a 1.0.0", "b 1.0.0"), mkDepspec("a 1.0.0", "shared >=2.0.0, <4.0.0"), mkDepspec("b 1.0.0", "shared >=3.0.0, <5.0.0"), mkDepspec("shared 2.0.0"), mkDepspec("shared 3.0.0"), mkDepspec("shared 3.6.9"), mkDepspec("shared 4.0.0"), mkDepspec("shared 5.0.0"), }, r: mksolution( "a 1.0.0", "b 1.0.0", "shared 3.6.9", ), }, "downgrade on overlapping constraints": { ds: []depspec{ mkDepspec("root 0.0.0", "a 1.0.0", "b 1.0.0"), mkDepspec("a 1.0.0", "shared >=2.0.0, <=4.0.0"), mkDepspec("b 1.0.0", "shared >=3.0.0, <5.0.0"), mkDepspec("shared 2.0.0"), mkDepspec("shared 3.0.0"), mkDepspec("shared 3.6.9"), mkDepspec("shared 4.0.0"), mkDepspec("shared 5.0.0"), }, r: mksolution( "a 1.0.0", "b 1.0.0", "shared 3.0.0", ), downgrade: true, }, "shared dependency where dependent version in turn affects other dependencies": { ds: []depspec{ mkDepspec("root 0.0.0", "foo <=1.0.2", "bar 1.0.0"), mkDepspec("foo 1.0.0"), mkDepspec("foo 1.0.1", "bang 1.0.0"), mkDepspec("foo 1.0.2", "whoop 1.0.0"), mkDepspec("foo 1.0.3", "zoop 1.0.0"), mkDepspec("bar 1.0.0", "foo <=1.0.1"), mkDepspec("bang 1.0.0"), mkDepspec("whoop 1.0.0"), mkDepspec("zoop 1.0.0"), }, r: mksolution( "foo 1.0.1", "bar 1.0.0", "bang 1.0.0", ), }, "removed dependency": { ds: []depspec{ mkDepspec("root 1.0.0", "foo 1.0.0", "bar *"), mkDepspec("foo 1.0.0"), mkDepspec("foo 2.0.0"), mkDepspec("bar 1.0.0"), mkDepspec("bar 2.0.0", "baz 1.0.0"), mkDepspec("baz 1.0.0", "foo 2.0.0"), }, r: mksolution( "foo 1.0.0", "bar 1.0.0", ), maxAttempts: 2, }, // fixtures with locks "with compatible locked dependency": { ds: []depspec{ mkDepspec("root 0.0.0", "foo *"), mkDepspec("foo 1.0.0", "bar 1.0.0"), mkDepspec("foo 1.0.1", "bar 1.0.1"), mkDepspec("foo 1.0.2", "bar 1.0.2"), mkDepspec("bar 1.0.0"), mkDepspec("bar 1.0.1"), mkDepspec("bar 1.0.2"), }, l: mklock( "foo 1.0.1", ), r: mksolution( "foo 1.0.1", "bar 1.0.1", ), }, "upgrade through lock": { ds: []depspec{ mkDepspec("root 0.0.0", "foo *"), mkDepspec("foo 1.0.0", "bar 1.0.0"), mkDepspec("foo 1.0.1", "bar 1.0.1"), mkDepspec("foo 1.0.2", "bar 1.0.2"), mkDepspec("bar 1.0.0"), mkDepspec("bar 1.0.1"), mkDepspec("bar 1.0.2"), }, l: mklock( "foo 1.0.1", ), r: mksolution( "foo 1.0.2", "bar 1.0.2", ), changeall: true, }, "downgrade through lock": { ds: []depspec{ mkDepspec("root 0.0.0", "foo *"), mkDepspec("foo 1.0.0", "bar 1.0.0"), mkDepspec("foo 1.0.1", "bar 1.0.1"), mkDepspec("foo 1.0.2", "bar 1.0.2"), mkDepspec("bar 1.0.0"), mkDepspec("bar 1.0.1"), mkDepspec("bar 1.0.2"), }, l: mklock( "foo 1.0.1", ), r: mksolution( "foo 1.0.0", "bar 1.0.0", ), changeall: true, downgrade: true, }, "update one with only one": { ds: []depspec{ mkDepspec("root 0.0.0", "foo *"), mkDepspec("foo 1.0.0"), mkDepspec("foo 1.0.1"), mkDepspec("foo 1.0.2"), }, l: mklock( "foo 1.0.1", ), r: mksolution( "foo 1.0.2", ), changelist: []ProjectRoot{"foo"}, }, "update one of multi": { ds: []depspec{ mkDepspec("root 0.0.0", "foo *", "bar *"), mkDepspec("foo 1.0.0"), mkDepspec("foo 1.0.1"), mkDepspec("foo 1.0.2"), mkDepspec("bar 1.0.0"), mkDepspec("bar 1.0.1"), mkDepspec("bar 1.0.2"), }, l: mklock( "foo 1.0.1", "bar 1.0.1", ), r: mksolution( "foo 1.0.2", "bar 1.0.1", ), changelist: []ProjectRoot{"foo"}, }, "update both of multi": { ds: []depspec{ mkDepspec("root 0.0.0", "foo *", "bar *"), mkDepspec("foo 1.0.0"), mkDepspec("foo 1.0.1"), mkDepspec("foo 1.0.2"), mkDepspec("bar 1.0.0"), mkDepspec("bar 1.0.1"), mkDepspec("bar 1.0.2"), }, l: mklock( "foo 1.0.1", "bar 1.0.1", ), r: mksolution( "foo 1.0.2", "bar 1.0.2", ), changelist: []ProjectRoot{"foo", "bar"}, }, "update two of more": { ds: []depspec{ mkDepspec("root 0.0.0", "foo *", "bar *", "baz *"), mkDepspec("foo 1.0.0"), mkDepspec("foo 1.0.1"), mkDepspec("foo 1.0.2"), mkDepspec("bar 1.0.0"), mkDepspec("bar 1.0.1"), mkDepspec("bar 1.0.2"), mkDepspec("baz 1.0.0"), mkDepspec("baz 1.0.1"), mkDepspec("baz 1.0.2"), }, l: mklock( "foo 1.0.1", "bar 1.0.1", "baz 1.0.1", ), r: mksolution( "foo 1.0.2", "bar 1.0.2", "baz 1.0.1", ), changelist: []ProjectRoot{"foo", "bar"}, }, "break other lock with targeted update": { ds: []depspec{ mkDepspec("root 0.0.0", "foo *", "baz *"), mkDepspec("foo 1.0.0", "bar 1.0.0"), mkDepspec("foo 1.0.1", "bar 1.0.1"), mkDepspec("foo 1.0.2", "bar 1.0.2"), mkDepspec("bar 1.0.0"), mkDepspec("bar 1.0.1"), mkDepspec("bar 1.0.2"), mkDepspec("baz 1.0.0"), mkDepspec("baz 1.0.1"), mkDepspec("baz 1.0.2"), }, l: mklock( "foo 1.0.1", "bar 1.0.1", "baz 1.0.1", ), r: mksolution( "foo 1.0.2", "bar 1.0.2", "baz 1.0.1", ), changelist: []ProjectRoot{"foo", "bar"}, }, "with incompatible locked dependency": { ds: []depspec{ mkDepspec("root 0.0.0", "foo >1.0.1"), mkDepspec("foo 1.0.0", "bar 1.0.0"), mkDepspec("foo 1.0.1", "bar 1.0.1"), mkDepspec("foo 1.0.2", "bar 1.0.2"), mkDepspec("bar 1.0.0"), mkDepspec("bar 1.0.1"), mkDepspec("bar 1.0.2"), }, l: mklock( "foo 1.0.1", ), r: mksolution( "foo 1.0.2", "bar 1.0.2", ), }, "with unrelated locked dependency": { ds: []depspec{ mkDepspec("root 0.0.0", "foo *"), mkDepspec("foo 1.0.0", "bar 1.0.0"), mkDepspec("foo 1.0.1", "bar 1.0.1"), mkDepspec("foo 1.0.2", "bar 1.0.2"), mkDepspec("bar 1.0.0"), mkDepspec("bar 1.0.1"), mkDepspec("bar 1.0.2"), mkDepspec("baz 1.0.0 bazrev"), }, l: mklock( "baz 1.0.0 bazrev", ), r: mksolution( "foo 1.0.2", "bar 1.0.2", ), }, "unlocks dependencies if necessary to ensure that a new dependency is satisfied": { ds: []depspec{ mkDepspec("root 0.0.0", "foo *", "newdep *"), mkDepspec("foo 1.0.0 foorev", "bar <2.0.0"), mkDepspec("bar 1.0.0 barrev", "baz <2.0.0"), mkDepspec("baz 1.0.0 bazrev", "qux <2.0.0"), mkDepspec("qux 1.0.0 quxrev"), mkDepspec("foo 2.0.0", "bar <3.0.0"), mkDepspec("bar 2.0.0", "baz <3.0.0"), mkDepspec("baz 2.0.0", "qux <3.0.0"), mkDepspec("qux 2.0.0"), mkDepspec("newdep 2.0.0", "baz >=1.5.0"), }, l: mklock( "foo 1.0.0 foorev", "bar 1.0.0 barrev", "baz 1.0.0 bazrev", "qux 1.0.0 quxrev", ), r: mksolution( "foo 2.0.0", "bar 2.0.0", "baz 2.0.0", "qux 1.0.0 quxrev", "newdep 2.0.0", ), maxAttempts: 4, }, "break lock when only the deps necessitate it": { ds: []depspec{ mkDepspec("root 0.0.0", "foo *", "bar *"), mkDepspec("foo 1.0.0 foorev", "bar <2.0.0"), mkDepspec("foo 2.0.0", "bar <3.0.0"), mkDepspec("bar 2.0.0", "baz <3.0.0"), mkDepspec("baz 2.0.0", "foo >1.0.0"), }, l: mklock( "foo 1.0.0 foorev", ), r: mksolution( "foo 2.0.0", "bar 2.0.0", "baz 2.0.0", ), maxAttempts: 4, }, "locked atoms are matched on both local and net name": { ds: []depspec{ mkDepspec("root 0.0.0", "foo *"), mkDepspec("foo 1.0.0 foorev"), mkDepspec("foo 2.0.0 foorev2"), }, l: mklock( "foo from baz 1.0.0 foorev", ), r: mksolution( "foo 2.0.0 foorev2", ), }, // This fixture describes a situation that should be impossible with a // real-world VCS (contents of dep at same rev are different, as indicated // by different constraints on bar). But, that's not the SUT here, so it's // OK. "pairs bare revs in lock with all versions": { ds: []depspec{ mkDepspec("root 0.0.0", "foo ~1.0.1"), mkDepspec("foo 1.0.0", "bar 1.0.0"), mkDepspec("foo 1.0.1 foorev", "bar 1.0.1"), mkDepspec("foo 1.0.2 foorev", "bar 1.0.2"), mkDepspec("bar 1.0.0"), mkDepspec("bar 1.0.1"), mkDepspec("bar 1.0.2"), }, l: mkrevlock( "foo 1.0.1 foorev", // mkrevlock drops the 1.0.1 ), r: mksolution( "foo 1.0.2 foorev", "bar 1.0.2", ), }, "does not pair bare revs in manifest with unpaired lock version": { ds: []depspec{ mkDepspec("root 0.0.0", "foo ~1.0.1"), mkDepspec("foo 1.0.0", "bar 1.0.0"), mkDepspec("foo 1.0.1 foorev", "bar 1.0.1"), mkDepspec("foo 1.0.2", "bar 1.0.2"), mkDepspec("bar 1.0.0"), mkDepspec("bar 1.0.1"), mkDepspec("bar 1.0.2"), }, l: mkrevlock( "foo 1.0.1 foorev", // mkrevlock drops the 1.0.1 ), r: mksolution( "foo 1.0.2", "bar 1.0.2", ), }, "lock to branch on old rev keeps old rev": { ds: []depspec{ mkDepspec("root 0.0.0", "foo bmaster"), mkDepspec("foo bmaster newrev"), }, l: mklock( "foo bmaster oldrev", ), r: mksolution( "foo bmaster oldrev", ), }, // Whereas this is a normal situation for a branch, when it occurs for a // tag, it means someone's been naughty upstream. Still, though, the outcome // is the same. // // TODO(sdboyer) this needs to generate a warning, once we start doing that "lock to now-moved tag on old rev keeps old rev": { ds: []depspec{ mkDepspec("root 0.0.0", "foo ptaggerino"), mkDepspec("foo ptaggerino newrev"), }, l: mklock( "foo ptaggerino oldrev", ), r: mksolution( "foo ptaggerino oldrev", ), }, "no version that matches requirement": { ds: []depspec{ mkDepspec("root 0.0.0", "foo ^1.0.0"), mkDepspec("foo 2.0.0"), mkDepspec("foo 2.1.3"), }, fail: &noVersionError{ pn: mkPI("foo"), fails: []failedVersion{ { v: NewVersion("2.1.3"), f: &versionNotAllowedFailure{ goal: mkAtom("foo 2.1.3"), failparent: []dependency{mkDep("root", "foo ^1.0.0", "foo")}, c: mkSVC("^1.0.0"), }, }, { v: NewVersion("2.0.0"), f: &versionNotAllowedFailure{ goal: mkAtom("foo 2.0.0"), failparent: []dependency{mkDep("root", "foo ^1.0.0", "foo")}, c: mkSVC("^1.0.0"), }, }, }, }, }, "no version that matches combined constraint": { ds: []depspec{ mkDepspec("root 0.0.0", "foo 1.0.0", "bar 1.0.0"), mkDepspec("foo 1.0.0", "shared >=2.0.0, <3.0.0"), mkDepspec("bar 1.0.0", "shared >=2.9.0, <4.0.0"), mkDepspec("shared 2.5.0"), mkDepspec("shared 3.5.0"), }, fail: &noVersionError{ pn: mkPI("shared"), fails: []failedVersion{ { v: NewVersion("3.5.0"), f: &versionNotAllowedFailure{ goal: mkAtom("shared 3.5.0"), failparent: []dependency{mkDep("foo 1.0.0", "shared >=2.0.0, <3.0.0", "shared")}, c: mkSVC(">=2.9.0, <3.0.0"), }, }, { v: NewVersion("2.5.0"), f: &versionNotAllowedFailure{ goal: mkAtom("shared 2.5.0"), failparent: []dependency{mkDep("bar 1.0.0", "shared >=2.9.0, <4.0.0", "shared")}, c: mkSVC(">=2.9.0, <3.0.0"), }, }, }, }, }, "disjoint constraints": { ds: []depspec{ mkDepspec("root 0.0.0", "foo 1.0.0", "bar 1.0.0"), mkDepspec("foo 1.0.0", "shared <=2.0.0"), mkDepspec("bar 1.0.0", "shared >3.0.0"), mkDepspec("shared 2.0.0"), mkDepspec("shared 4.0.0"), }, fail: &noVersionError{ pn: mkPI("foo"), fails: []failedVersion{ { v: NewVersion("1.0.0"), f: &disjointConstraintFailure{ goal: mkDep("foo 1.0.0", "shared <=2.0.0", "shared"), failsib: []dependency{mkDep("bar 1.0.0", "shared >3.0.0", "shared")}, nofailsib: nil, c: mkSVC(">3.0.0"), }, }, }, }, }, "no valid solution": { ds: []depspec{ mkDepspec("root 0.0.0", "a *", "b *"), mkDepspec("a 1.0.0", "b 1.0.0"), mkDepspec("a 2.0.0", "b 2.0.0"), mkDepspec("b 1.0.0", "a 2.0.0"), mkDepspec("b 2.0.0", "a 1.0.0"), }, fail: &noVersionError{ pn: mkPI("b"), fails: []failedVersion{ { v: NewVersion("2.0.0"), f: &versionNotAllowedFailure{ goal: mkAtom("b 2.0.0"), failparent: []dependency{mkDep("a 1.0.0", "b 1.0.0", "b")}, c: mkSVC("1.0.0"), }, }, { v: NewVersion("1.0.0"), f: &constraintNotAllowedFailure{ goal: mkDep("b 1.0.0", "a 2.0.0", "a"), v: NewVersion("1.0.0"), }, }, }, }, }, "no version that matches while backtracking": { ds: []depspec{ mkDepspec("root 0.0.0", "a *", "b >1.0.0"), mkDepspec("a 1.0.0"), mkDepspec("b 1.0.0"), }, fail: &noVersionError{ pn: mkPI("b"), fails: []failedVersion{ { v: NewVersion("1.0.0"), f: &versionNotAllowedFailure{ goal: mkAtom("b 1.0.0"), failparent: []dependency{mkDep("root", "b >1.0.0", "b")}, c: mkSVC(">1.0.0"), }, }, }, }, }, // The latest versions of a and b disagree on c. An older version of either // will resolve the problem. This test validates that b, which is farther // in the dependency graph from myapp is downgraded first. "rolls back leaf versions first": { ds: []depspec{ mkDepspec("root 0.0.0", "a *"), mkDepspec("a 1.0.0", "b *"), mkDepspec("a 2.0.0", "b *", "c 2.0.0"), mkDepspec("b 1.0.0"), mkDepspec("b 2.0.0", "c 1.0.0"), mkDepspec("c 1.0.0"), mkDepspec("c 2.0.0"), }, r: mksolution( "a 2.0.0", "b 1.0.0", "c 2.0.0", ), maxAttempts: 2, }, // Only one version of baz, so foo and bar will have to downgrade until they // reach it. "mutual downgrading": { ds: []depspec{ mkDepspec("root 0.0.0", "foo *"), mkDepspec("foo 1.0.0", "bar 1.0.0"), mkDepspec("foo 2.0.0", "bar 2.0.0"), mkDepspec("foo 3.0.0", "bar 3.0.0"), mkDepspec("bar 1.0.0", "baz *"), mkDepspec("bar 2.0.0", "baz 2.0.0"), mkDepspec("bar 3.0.0", "baz 3.0.0"), mkDepspec("baz 1.0.0"), }, r: mksolution( "foo 1.0.0", "bar 1.0.0", "baz 1.0.0", ), maxAttempts: 3, }, // Ensures the solver doesn't exhaustively search all versions of b when // it's a-2.0.0 whose dependency on c-2.0.0-nonexistent led to the // problem. We make sure b has more versions than a so that the solver // tries a first since it sorts sibling dependencies by number of // versions. "search real failer": { ds: []depspec{ mkDepspec("root 0.0.0", "a *", "b *"), mkDepspec("a 1.0.0", "c 1.0.0"), mkDepspec("a 2.0.0", "c 2.0.0"), mkDepspec("b 1.0.0"), mkDepspec("b 2.0.0"), mkDepspec("b 3.0.0"), mkDepspec("c 1.0.0"), }, r: mksolution( "a 1.0.0", "b 3.0.0", "c 1.0.0", ), maxAttempts: 2, }, // Dependencies are ordered so that packages with fewer versions are tried // first. Here, there are two valid solutions (either a or b must be // downgraded once). The chosen one depends on which dep is traversed first. // Since b has fewer versions, it will be traversed first, which means a // will come later. Since later selections are revised first, a gets // downgraded. "traverse into package with fewer versions first": { ds: []depspec{ mkDepspec("root 0.0.0", "a *", "b *"), mkDepspec("a 1.0.0", "c *"), mkDepspec("a 2.0.0", "c *"), mkDepspec("a 3.0.0", "c *"), mkDepspec("a 4.0.0", "c *"), mkDepspec("a 5.0.0", "c 1.0.0"), mkDepspec("b 1.0.0", "c *"), mkDepspec("b 2.0.0", "c *"), mkDepspec("b 3.0.0", "c *"), mkDepspec("b 4.0.0", "c 2.0.0"), mkDepspec("c 1.0.0"), mkDepspec("c 2.0.0"), }, r: mksolution( "a 4.0.0", "b 4.0.0", "c 2.0.0", ), maxAttempts: 2, }, // This is similar to the preceding fixture. When getting the number of // versions of a package to determine which to traverse first, versions that // are disallowed by the root package's constraints should not be // considered. Here, foo has more versions than bar in total (4), but fewer // that meet myapp"s constraints (only 2). There is no solution, but we will // do less backtracking if foo is tested first. "root constraints pre-eliminate versions": { ds: []depspec{ mkDepspec("root 0.0.0", "foo *", "bar *"), mkDepspec("foo 1.0.0", "none 2.0.0"), mkDepspec("foo 2.0.0", "none 2.0.0"), mkDepspec("foo 3.0.0", "none 2.0.0"), mkDepspec("foo 4.0.0", "none 2.0.0"), mkDepspec("bar 1.0.0"), mkDepspec("bar 2.0.0"), mkDepspec("bar 3.0.0"), mkDepspec("none 1.0.0"), }, fail: &noVersionError{ pn: mkPI("none"), fails: []failedVersion{ { v: NewVersion("1.0.0"), f: &versionNotAllowedFailure{ goal: mkAtom("none 1.0.0"), failparent: []dependency{mkDep("foo 1.0.0", "none 2.0.0", "none")}, c: mkSVC("2.0.0"), }, }, }, }, }, // If there"s a disjoint constraint on a package, then selecting other // versions of it is a waste of time: no possible versions can match. We // need to jump past it to the most recent package that affected the // constraint. "backjump past failed package on disjoint constraint": { ds: []depspec{ mkDepspec("root 0.0.0", "a *", "foo *"), mkDepspec("a 1.0.0", "foo *"), mkDepspec("a 2.0.0", "foo <1.0.0"), mkDepspec("foo 2.0.0"), mkDepspec("foo 2.0.1"), mkDepspec("foo 2.0.2"), mkDepspec("foo 2.0.3"), mkDepspec("foo 2.0.4"), mkDepspec("none 1.0.0"), }, r: mksolution( "a 1.0.0", "foo 2.0.4", ), maxAttempts: 2, }, // Revision enters vqueue if a dep has a constraint on that revision "revision injected into vqueue": { ds: []depspec{ mkDepspec("root 0.0.0", "foo r123abc"), mkDepspec("foo r123abc"), mkDepspec("foo 1.0.0 foorev"), mkDepspec("foo 2.0.0 foorev2"), }, r: mksolution( "foo r123abc", ), }, // Some basic override checks "override root's own constraint": { ds: []depspec{ mkDepspec("root 0.0.0", "a *", "b *"), mkDepspec("a 1.0.0", "b 1.0.0"), mkDepspec("a 2.0.0", "b 1.0.0"), mkDepspec("b 1.0.0"), }, ovr: ProjectConstraints{ ProjectRoot("a"): ProjectProperties{ Constraint: NewVersion("1.0.0"), }, }, r: mksolution( "a 1.0.0", "b 1.0.0", ), }, "override dep's constraint": { ds: []depspec{ mkDepspec("root 0.0.0", "a *"), mkDepspec("a 1.0.0", "b 1.0.0"), mkDepspec("a 2.0.0", "b 1.0.0"), mkDepspec("b 1.0.0"), mkDepspec("b 2.0.0"), }, ovr: ProjectConstraints{ ProjectRoot("b"): ProjectProperties{ Constraint: NewVersion("2.0.0"), }, }, r: mksolution( "a 2.0.0", "b 2.0.0", ), }, "overridden mismatched net addrs, alt in dep, back to default": { ds: []depspec{ mkDepspec("root 1.0.0", "foo 1.0.0", "bar 1.0.0"), mkDepspec("foo 1.0.0", "bar from baz 1.0.0"), mkDepspec("bar 1.0.0"), }, ovr: ProjectConstraints{ ProjectRoot("bar"): ProjectProperties{ Source: "bar", }, }, r: mksolution( "foo 1.0.0", "bar from bar 1.0.0", ), }, // TODO(sdboyer) decide how to refactor the solver in order to re-enable these. // Checking for revision existence is important...but kinda obnoxious. //{ //// Solve fails if revision constraint calls for a nonexistent revision //n: "fail on missing revision", //ds: []depspec{ //mkDepspec("root 0.0.0", "bar *"), //mkDepspec("bar 1.0.0", "foo r123abc"), //mkDepspec("foo r123nomatch"), //mkDepspec("foo 1.0.0"), //mkDepspec("foo 2.0.0"), //}, //errp: []string{"bar", "foo", "bar"}, //}, //{ //// Solve fails if revision constraint calls for a nonexistent revision, //// even if rev constraint is specified by root //n: "fail on missing revision from root", //ds: []depspec{ //mkDepspec("root 0.0.0", "foo r123nomatch"), //mkDepspec("foo r123abc"), //mkDepspec("foo 1.0.0"), //mkDepspec("foo 2.0.0"), //}, //errp: []string{"foo", "root", "foo"}, //}, // TODO(sdboyer) add fixture that tests proper handling of loops via aliases (where // a project that wouldn't be a loop is aliased to a project that is a loop) } func init() { // This sets up a hundred versions of foo and bar, 0.0.0 through 9.9.0. Each // version of foo depends on a baz with the same major version. Each version // of bar depends on a baz with the same minor version. There is only one // version of baz, 0.0.0, so only older versions of foo and bar will // satisfy it. fix := basicFixture{ ds: []depspec{ mkDepspec("root 0.0.0", "foo *", "bar *"), mkDepspec("baz 0.0.0"), }, r: mksolution( "foo 0.9.0", "bar 9.0.0", "baz 0.0.0", ), maxAttempts: 10, } for i := 0; i < 10; i++ { for j := 0; j < 10; j++ { fix.ds = append(fix.ds, mkDepspec(fmt.Sprintf("foo %v.%v.0", i, j), fmt.Sprintf("baz %v.0.0", i))) fix.ds = append(fix.ds, mkDepspec(fmt.Sprintf("bar %v.%v.0", i, j), fmt.Sprintf("baz 0.%v.0", j))) } } basicFixtures["complex backtrack"] = fix for k, fix := range basicFixtures { // Assign the name into the fixture itself fix.n = k basicFixtures[k] = fix } } // reachMaps contain externalReach()-type data for a given depspec fixture's // universe of projects, packages, and versions. type reachMap map[pident]map[string][]string type depspecSourceManager struct { specs []depspec rm reachMap ig map[string]bool } type fixSM interface { SourceManager rootSpec() depspec allSpecs() []depspec ignore() map[string]bool } var _ fixSM = &depspecSourceManager{} func newdepspecSM(ds []depspec, ignore []string) *depspecSourceManager { ig := make(map[string]bool) if len(ignore) > 0 { for _, pkg := range ignore { ig[pkg] = true } } return &depspecSourceManager{ specs: ds, rm: computeBasicReachMap(ds), ig: ig, } } func (sm *depspecSourceManager) GetManifestAndLock(id ProjectIdentifier, v Version, an ProjectAnalyzer) (Manifest, Lock, error) { // If the input version is a PairedVersion, look only at its top version, // not the underlying. This is generally consistent with the idea that, for // this class of lookup, the rev probably DOES exist, but upstream changed // it (typically a branch). For the purposes of tests, then, that's an OK // scenario, because otherwise we'd have to enumerate all the revs in the // fixture declarations, which would screw up other things. if pv, ok := v.(PairedVersion); ok { v = pv.Unpair() } src := toFold(id.normalizedSource()) for _, ds := range sm.specs { if src == string(ds.n) && v.Matches(ds.v) { return ds, dummyLock{}, nil } } return nil, nil, fmt.Errorf("project %s at version %s could not be found", id, v) } func (sm *depspecSourceManager) ListPackages(id ProjectIdentifier, v Version) (pkgtree.PackageTree, error) { pid := pident{n: ProjectRoot(toFold(id.normalizedSource())), v: v} if pv, ok := v.(PairedVersion); ok && pv.Revision() == "FAKEREV" { // An empty rev may come in here because that's what we produce in // ListVersions(). If that's what we see, then just pretend like we have // an unpaired. pid.v = pv.Unpair() } if r, exists := sm.rm[pid]; exists { return pkgtree.PackageTree{ ImportRoot: id.normalizedSource(), Packages: map[string]pkgtree.PackageOrErr{ string(pid.n): { P: pkgtree.Package{ ImportPath: string(pid.n), Name: string(pid.n), Imports: r[string(pid.n)], }, }, }, }, nil } // if incoming version was paired, walk the map and search for a match on // top-only version if pv, ok := v.(PairedVersion); ok { uv := pv.Unpair() for pid, r := range sm.rm { if uv.Matches(pid.v) { return pkgtree.PackageTree{ ImportRoot: id.normalizedSource(), Packages: map[string]pkgtree.PackageOrErr{ string(pid.n): { P: pkgtree.Package{ ImportPath: string(pid.n), Name: string(pid.n), Imports: r[string(pid.n)], }, }, }, }, nil } } } return pkgtree.PackageTree{}, fmt.Errorf("project %s at version %s could not be found", pid.n, v) } func (sm *depspecSourceManager) ListVersions(id ProjectIdentifier) ([]PairedVersion, error) { var pvl []PairedVersion src := toFold(id.normalizedSource()) for _, ds := range sm.specs { if src != string(ds.n) { continue } switch tv := ds.v.(type) { case Revision: // To simulate the behavior of the real SourceManager, we do not return // raw revisions from listVersions(). case PairedVersion: pvl = append(pvl, tv) case UnpairedVersion: // Dummy revision; if the fixture doesn't provide it, we know // the test doesn't need revision info, anyway. pvl = append(pvl, tv.Pair(Revision("FAKEREV"))) default: panic(fmt.Sprintf("unreachable: type of version was %#v for spec %s", ds.v, id)) } } if len(pvl) == 0 { return nil, fmt.Errorf("project %s could not be found", id) } return pvl, nil } func (sm *depspecSourceManager) RevisionPresentIn(id ProjectIdentifier, r Revision) (bool, error) { src := toFold(id.normalizedSource()) for _, ds := range sm.specs { if src == string(ds.n) && r == ds.v { return true, nil } } return false, fmt.Errorf("project %s has no revision %s", id, r) } func (sm *depspecSourceManager) SourceExists(id ProjectIdentifier) (bool, error) { src := toFold(id.normalizedSource()) for _, ds := range sm.specs { if src == string(ds.n) { return true, nil } } return false, nil } func (sm *depspecSourceManager) SyncSourceFor(id ProjectIdentifier) error { // Ignore err because it can't happen if exist, _ := sm.SourceExists(id); !exist { return fmt.Errorf("source %s does not exist", id) } return nil } func (sm *depspecSourceManager) Release() {} func (sm *depspecSourceManager) ExportProject(context.Context, ProjectIdentifier, Version, string) error { return fmt.Errorf("dummy sm doesn't support exporting") } func (sm *depspecSourceManager) ExportPrunedProject(context.Context, LockedProject, PruneOptions, string) error { return fmt.Errorf("dummy sm doesn't support exporting") } func (sm *depspecSourceManager) DeduceProjectRoot(ip string) (ProjectRoot, error) { fip := toFold(ip) for _, ds := range sm.allSpecs() { n := string(ds.n) if fip == n || strings.HasPrefix(fip, n+"/") { return ProjectRoot(ip[:len(n)]), nil } } return "", fmt.Errorf("could not find %s, or any parent, in list of known fixtures", ip) } func (sm *depspecSourceManager) SourceURLsForPath(ip string) ([]*url.URL, error) { return nil, fmt.Errorf("dummy sm doesn't implement SourceURLsForPath") } func (sm *depspecSourceManager) rootSpec() depspec { return sm.specs[0] } func (sm *depspecSourceManager) allSpecs() []depspec { return sm.specs } func (sm *depspecSourceManager) ignore() map[string]bool { return sm.ig } // InferConstraint tries to puzzle out what kind of version is given in a string - // semver, a revision, or as a fallback, a plain tag. This current implementation // is a panic because there's no current circumstance under which the depspecSourceManager // is useful outside of the gps solving tests, and it shouldn't be used anywhere else without a conscious and intentional // expansion of its semantics. func (sm *depspecSourceManager) InferConstraint(s string, pi ProjectIdentifier) (Constraint, error) { panic("depsecSourceManager is only for gps solving tests") } type depspecBridge struct { *bridge } func (b *depspecBridge) listVersions(id ProjectIdentifier) ([]Version, error) { if vl, exists := b.vlists[id]; exists { return vl, nil } pvl, err := b.sm.ListVersions(id) if err != nil { return nil, err } // Construct a []Version slice. If any paired versions use the fake rev, // remove the underlying component. vl := make([]Version, 0, len(pvl)) for _, v := range pvl { if v.Revision() == "FAKEREV" { vl = append(vl, v.Unpair()) } else { vl = append(vl, v) } } if b.down { SortForDowngrade(vl) } else { SortForUpgrade(vl) } b.vlists[id] = vl return vl, nil } // override verifyRoot() on bridge to prevent any filesystem interaction func (b *depspecBridge) verifyRootDir(path string) error { root := b.sm.(fixSM).rootSpec() if string(root.n) != path { return fmt.Errorf("expected only root project %q to verifyRootDir(), got %q", root.n, path) } return nil } func (b *depspecBridge) ListPackages(id ProjectIdentifier, v Version) (pkgtree.PackageTree, error) { return b.sm.(fixSM).ListPackages(id, v) } func (b *depspecBridge) vendorCodeExists(id ProjectIdentifier) (bool, error) { return false, nil } // enforce interfaces var _ Manifest = depspec{} var _ Lock = dummyLock{} var _ Lock = fixLock{} // impl Spec interface func (ds depspec) DependencyConstraints() ProjectConstraints { return pcSliceToMap(ds.deps) } type fixLock []LockedProject // impl Lock interface func (l fixLock) Projects() []LockedProject { return l } // impl Lock interface func (fixLock) InputImports() []string { return nil } type dummyLock struct{} // impl Lock interface func (dummyLock) Projects() []LockedProject { return nil } // impl Lock interface func (dummyLock) InputImports() []string { return nil }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/source_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "context" "fmt" "io/ioutil" "log" "os" "path/filepath" "testing" "github.com/golang/dep/gps/pkgtree" "github.com/golang/dep/internal/test" ) // Executed in parallel by TestSlowVcs func testSourceGateway(t *testing.T) { t.Parallel() if testing.Short() { t.Skip("Skipping gateway testing in short mode") } requiresBins(t, "git") cachedir, err := ioutil.TempDir("", "smcache") if err != nil { t.Fatalf("failed to create temp dir: %s", err) } bgc := context.Background() ctx, cancelFunc := context.WithCancel(bgc) defer func() { os.RemoveAll(cachedir) cancelFunc() }() os.Mkdir(filepath.Join(cachedir, "sources"), 0777) do := func(wantstate sourceState) func(t *testing.T) { return func(t *testing.T) { superv := newSupervisor(ctx) deducer := newDeductionCoordinator(superv) logger := log.New(test.Writer{TB: t}, "", 0) sc := newSourceCoordinator(superv, deducer, cachedir, nil, logger) defer sc.close() id := mkPI("github.com/sdboyer/deptest") sg, err := sc.getSourceGatewayFor(ctx, id) if err != nil { t.Fatal(err) } if sg.srcState != wantstate { t.Fatalf("expected state to be %q, got %q", wantstate, sg.srcState) } if err := sg.existsUpstream(ctx); err != nil { t.Fatalf("failed to verify upstream source: %s", err) } wantstate |= sourceExistsUpstream if sg.src.existsCallsListVersions() { wantstate |= sourceHasLatestVersionList } if sg.srcState != wantstate { t.Fatalf("expected state to be %q, got %q", wantstate, sg.srcState) } if err := sg.syncLocal(ctx); err != nil { t.Fatalf("error on cloning git repo: %s", err) } wantstate |= sourceExistsLocally | sourceHasLatestLocally if sg.srcState != wantstate { t.Fatalf("expected state to be %q, got %q", wantstate, sg.srcState) } if _, ok := sg.src.(*gitSource); !ok { t.Fatalf("Expected a gitSource, got a %T", sg.src) } vlist, err := sg.listVersions(ctx) if err != nil { t.Fatalf("Unexpected error getting version pairs from git repo: %s", err) } wantstate |= sourceHasLatestVersionList if sg.srcState != wantstate { t.Fatalf("expected state to be %q, got %q", wantstate, sg.srcState) } if len(vlist) != 4 { t.Fatalf("git test repo should've produced four versions, got %v: vlist was %s", len(vlist), vlist) } else { SortPairedForUpgrade(vlist) evl := []PairedVersion{ NewVersion("v1.0.0").Pair(Revision("ff2948a2ac8f538c4ecd55962e919d1e13e74baf")), NewVersion("v0.8.1").Pair(Revision("3f4c3bea144e112a69bbe5d8d01c1b09a544253f")), NewVersion("v0.8.0").Pair(Revision("ff2948a2ac8f538c4ecd55962e919d1e13e74baf")), newDefaultBranch("master").Pair(Revision("3f4c3bea144e112a69bbe5d8d01c1b09a544253f")), } if len(evl) != len(vlist) { t.Errorf("expected %d versions but got %d", len(evl), len(vlist)) } else { for i := range evl { if !evl[i].identical(vlist[i]) { t.Errorf("index %d: expected version identical to %#v but got %#v", i, evl[i], vlist[i]) } } } } rev := Revision("c575196502940c07bf89fd6d95e83b999162e051") // check that an expected rev is not in cache _, has := sg.cache.getVersionsFor(rev) if has { t.Fatal("shouldn't have bare revs in cache without specifically requesting them") } is, err := sg.revisionPresentIn(ctx, rev) if err != nil { t.Fatalf("unexpected error while checking revision presence: %s", err) } else if !is { t.Fatalf("revision that should exist was not present") } // check that an expected rev is not in cache _, has = sg.cache.getVersionsFor(rev) if !has { t.Fatal("bare rev should be in cache after specific request for it") } // Ensure that a bad rev doesn't work on any method that takes // versions badver := NewVersion("notexist") wanterr := fmt.Errorf("version %q does not exist in source", badver) _, _, err = sg.getManifestAndLock(ctx, ProjectRoot("github.com/sdboyer/deptest"), badver, naiveAnalyzer{}) if err == nil { t.Fatal("wanted err on nonexistent version") } else if err.Error() != wanterr.Error() { t.Fatalf("wanted nonexistent err when passing bad version, got: %s", err) } _, err = sg.listPackages(ctx, ProjectRoot("github.com/sdboyer/deptest"), badver) if err == nil { t.Fatal("wanted err on nonexistent version") } else if err.Error() != wanterr.Error() { t.Fatalf("wanted nonexistent err when passing bad version, got: %s", err) } err = sg.exportVersionTo(ctx, badver, cachedir) if err == nil { t.Fatal("wanted err on nonexistent version") } else if err.Error() != wanterr.Error() { t.Fatalf("wanted nonexistent err when passing bad version, got: %s", err) } wantptree := pkgtree.PackageTree{ ImportRoot: "github.com/sdboyer/deptest", Packages: map[string]pkgtree.PackageOrErr{ "github.com/sdboyer/deptest": { P: pkgtree.Package{ ImportPath: "github.com/sdboyer/deptest", Name: "deptest", Imports: []string{}, }, }, }, } ptree, err := sg.listPackages(ctx, ProjectRoot("github.com/sdboyer/deptest"), Revision("ff2948a2ac8f538c4ecd55962e919d1e13e74baf")) if err != nil { t.Fatalf("unexpected err when getting package tree with known rev: %s", err) } comparePackageTree(t, wantptree, ptree) ptree, err = sg.listPackages(ctx, ProjectRoot("github.com/sdboyer/deptest"), NewVersion("v1.0.0")) if err != nil { t.Fatalf("unexpected err when getting package tree with unpaired good version: %s", err) } comparePackageTree(t, wantptree, ptree) } } // Run test twice so that we cover both the existing and non-existing case. t.Run("empty", do(sourceExistsUpstream|sourceHasLatestVersionList)) t.Run("exists", do(sourceExistsLocally)) }
gps
/home/linuxreitt/Michinereitt/Tuning/Workshop_Scripts/hf-codegen/data/golang_public_repos/dep/gps/source_cache_bolt_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gps import ( "io/ioutil" "log" "path" "testing" "time" "github.com/golang/dep/gps/pkgtree" "github.com/golang/dep/internal/test" ) func TestBoltCacheTimeout(t *testing.T) { const root = "example.com/test" cpath, err := ioutil.TempDir("", "singlesourcecache") if err != nil { t.Fatalf("Failed to create temp cache dir: %s", err) } pi := ProjectIdentifier{ProjectRoot: root} logger := log.New(test.Writer{TB: t}, "", 0) start := time.Now() bc, err := newBoltCache(cpath, start.Unix(), logger) if err != nil { t.Fatal(err) } defer bc.close() c := bc.newSingleSourceCache(pi) rev := Revision("test") ai := ProjectAnalyzerInfo{Name: "name", Version: 42} manifest := &simpleRootManifest{ c: ProjectConstraints{ ProjectRoot("foo"): ProjectProperties{ Constraint: Any(), }, ProjectRoot("bar"): ProjectProperties{ Source: "whatever", Constraint: testSemverConstraint(t, "> 1.3"), }, }, ovr: ProjectConstraints{ ProjectRoot("b"): ProjectProperties{ Constraint: testSemverConstraint(t, "2.0.0"), }, }, } lock := &safeLock{ p: []LockedProject{ NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0").Pair("foo"), []string{"gps"}), NewLockedProject(mkPI("github.com/sdboyer/gps2"), NewVersion("v0.10.0").Pair("bar"), nil), NewLockedProject(mkPI("github.com/sdboyer/gps3"), NewVersion("v0.10.0").Pair("baz"), []string{"gps", "flugle"}), NewLockedProject(mkPI("foo"), NewVersion("nada").Pair("zero"), []string{"foo"}), NewLockedProject(mkPI("github.com/sdboyer/gps4"), NewVersion("v0.10.0").Pair("qux"), []string{"flugle", "gps"}), }, } ptree := pkgtree.PackageTree{ ImportRoot: root, Packages: map[string]pkgtree.PackageOrErr{ root: { P: pkgtree.Package{ ImportPath: root, CommentPath: "comment", Name: "test", Imports: []string{ "sort", }, }, }, path.Join(root, "simple"): { P: pkgtree.Package{ ImportPath: path.Join(root, "simple"), CommentPath: "comment", Name: "simple", Imports: []string{ "github.com/golang/dep/gps", "sort", }, }, }, path.Join(root, "m1p"): { P: pkgtree.Package{ ImportPath: path.Join(root, "m1p"), CommentPath: "", Name: "m1p", Imports: []string{ "github.com/golang/dep/gps", "os", "sort", }, }, }, }, } pvs := []PairedVersion{ NewBranch("originalbranch").Pair("rev1"), NewVersion("originalver").Pair("rev2"), } // Write values timestamped > `start`. { c.setManifestAndLock(rev, ai, manifest, lock) c.setPackageTree(rev, ptree) c.setVersionMap(pvs) } // Read back values timestamped > `start`. { gotM, gotL, ok := c.getManifestAndLock(rev, ai) if !ok { t.Error("no manifest and lock found for revision") } compareManifests(t, manifest, gotM) // TODO(sdboyer) use DiffLocks after refactoring to avoid import cycles if !locksAreEq(lock, gotL) { t.Errorf("locks are different:\n\t(GOT): %s\n\t(WNT): %s", lock, gotL) } got, ok := c.getPackageTree(rev, root) if !ok { t.Errorf("no package tree found:\n\t(WNT): %#v", ptree) } comparePackageTree(t, ptree, got) gotV, ok := c.getAllVersions() if !ok || len(gotV) != len(pvs) { t.Errorf("unexpected versions:\n\t(GOT): %#v\n\t(WNT): %#v", gotV, pvs) } else { SortPairedForDowngrade(gotV) for i := range pvs { if !pvs[i].identical(gotV[i]) { t.Errorf("unexpected versions:\n\t(GOT): %#v\n\t(WNT): %#v", gotV, pvs) break } } } } if err := bc.close(); err != nil { t.Fatal("failed to close cache:", err) } // Read with a later epoch. Expect no *timestamped* values, since all were < `after`. { after := start.Add(1000 * time.Hour) bc, err = newBoltCache(cpath, after.Unix(), logger) if err != nil { t.Fatal(err) } c = bc.newSingleSourceCache(pi) gotM, gotL, ok := c.getManifestAndLock(rev, ai) if !ok { t.Error("no manifest and lock found for revision") } compareManifests(t, manifest, gotM) // TODO(sdboyer) use DiffLocks after refactoring to avoid import cycles if !locksAreEq(lock, gotL) { t.Errorf("locks are different:\n\t(GOT): %s\n\t(WNT): %s", lock, gotL) } gotPtree, ok := c.getPackageTree(rev, root) if !ok { t.Errorf("no package tree found:\n\t(WNT): %#v", ptree) } comparePackageTree(t, ptree, gotPtree) pvs, ok := c.getAllVersions() if ok || len(pvs) > 0 { t.Errorf("expected no cached versions, but got:\n\t%#v", pvs) } } if err := bc.close(); err != nil { t.Fatal("failed to close cache:", err) } // Re-connect with the original epoch. bc, err = newBoltCache(cpath, start.Unix(), logger) if err != nil { t.Fatal(err) } c = bc.newSingleSourceCache(pi) // Read values timestamped > `start`. { gotM, gotL, ok := c.getManifestAndLock(rev, ai) if !ok { t.Error("no manifest and lock found for revision") } compareManifests(t, manifest, gotM) // TODO(sdboyer) use DiffLocks after refactoring to avoid import cycles if !locksAreEq(lock, gotL) { t.Errorf("locks are different:\n\t(GOT): %s\n\t(WNT): %s", lock, gotL) } got, ok := c.getPackageTree(rev, root) if !ok { t.Errorf("no package tree found:\n\t(WNT): %#v", ptree) } comparePackageTree(t, ptree, got) gotV, ok := c.getAllVersions() if !ok || len(gotV) != len(pvs) { t.Errorf("unexpected versions:\n\t(GOT): %#v\n\t(WNT): %#v", gotV, pvs) } else { SortPairedForDowngrade(gotV) for i := range pvs { if !pvs[i].identical(gotV[i]) { t.Errorf("unexpected versions:\n\t(GOT): %#v\n\t(WNT): %#v", gotV, pvs) break } } } } // New values. newManifest := &simpleRootManifest{ c: ProjectConstraints{ ProjectRoot("foo"): ProjectProperties{ Constraint: NewBranch("master"), }, ProjectRoot("bar"): ProjectProperties{ Source: "whatever", Constraint: testSemverConstraint(t, "> 1.5"), }, }, } newLock := &safeLock{ p: []LockedProject{ NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v1").Pair("rev1"), []string{"gps"}), }, i: []string{"foo", "bar"}, } newPtree := pkgtree.PackageTree{ ImportRoot: root, Packages: map[string]pkgtree.PackageOrErr{ path.Join(root, "simple"): { P: pkgtree.Package{ ImportPath: path.Join(root, "simple"), CommentPath: "newcomment", Name: "simple", Imports: []string{ "github.com/golang/dep/gps42", "test", }, }, }, path.Join(root, "m1p"): { P: pkgtree.Package{ ImportPath: path.Join(root, "m1p"), CommentPath: "", Name: "m1p", Imports: []string{ "os", }, }, }, }, } newPVS := []PairedVersion{ NewBranch("newbranch").Pair("revA"), NewVersion("newver").Pair("revB"), } // Overwrite with new values, and with timestamps > `after`. { c.setManifestAndLock(rev, ai, newManifest, newLock) c.setPackageTree(rev, newPtree) c.setVersionMap(newPVS) } // Read new values. { gotM, gotL, ok := c.getManifestAndLock(rev, ai) if !ok { t.Error("no manifest and lock found for revision") } compareManifests(t, newManifest, gotM) // TODO(sdboyer) use DiffLocks after refactoring to avoid import cycles if !locksAreEq(newLock, gotL) { t.Errorf("locks are different:\n\t(GOT): %s\n\t(WNT): %s", newLock, gotL) } got, ok := c.getPackageTree(rev, root) if !ok { t.Errorf("no package tree found:\n\t(WNT): %#v", newPtree) } comparePackageTree(t, newPtree, got) gotV, ok := c.getAllVersions() if !ok || len(gotV) != len(newPVS) { t.Errorf("unexpected versions:\n\t(GOT): %#v\n\t(WNT): %#v", gotV, newPVS) } else { SortPairedForDowngrade(gotV) for i := range newPVS { if !newPVS[i].identical(gotV[i]) { t.Errorf("unexpected versions:\n\t(GOT): %#v\n\t(WNT): %#v", gotV, newPVS) break } } } } }