repo stringlengths 5 92 | file_url stringlengths 80 287 | file_path stringlengths 5 197 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:37:27 2026-01-04 17:58:21 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/vagrant/spec/live/storage/dropbox_spec.rb | vagrant/spec/live/storage/dropbox_spec.rb | require File.expand_path('../../../spec_helper', __FILE__)
# To run these tests, you need to setup your Dropbox credentials in
# /vagrant/spec/live.yml
# You also need to have a cached authorization file in
# /vagrant/spec/live/.cache/
# If you already have one, you can simply copy it there.
# If not, change a test from :live to :focus and run it to generate one.
module Backup
describe Storage::Dropbox,
:if => BackupSpec::LIVE['storage']['dropbox']['specs_enabled'] == true do
# Note that the remote will only be cleaned after successful tests,
# but it will clean files uploaded by all previous failed tests.
before do
# Each archive is 1.09 MB (1,090,000).
# With Splitter set to 2 MiB, package files will be 2,097,152 and 1,172,848.
# With #chunk_size set to 1, the chunked uploader will upload 1 MiB per request.
create_model :my_backup, <<-EOS
Backup::Model.new(:my_backup, 'a description') do
split_into_chunks_of 2 # MiB
archive :archive_a do |archive|
archive.add '~/test_data'
end
archive :archive_b do |archive|
archive.add '~/test_data'
end
archive :archive_c do |archive|
archive.add '~/test_data'
end
config = BackupSpec::LIVE['storage']['dropbox']
store_with Dropbox do |db|
db.api_key = config['api_key']
db.api_secret = config['api_secret']
db.cache_path = '/vagrant/spec/live/.cache'
db.access_type = config['access_type']
db.path = config['path']
db.chunk_size = 1 # MiB
db.keep = 2
end
end
EOS
end
it 'stores package files', :live do
job = backup_perform :my_backup
files_sent = files_sent_for(job)
expect( files_sent.count ).to be(2)
expect( files_on_remote_for(job) ).to eq files_sent
clean_remote(job)
end
it 'cycles stored packages', :live do
job_a = backup_perform :my_backup
job_b = backup_perform :my_backup
# package files for job_a should be on the remote
files_sent = files_sent_for(job_a)
expect( files_sent.count ).to be(2)
expect( files_on_remote_for(job_a) ).to eq files_sent
# package files for job_b should be on the remote
files_sent = files_sent_for(job_b)
expect( files_sent.count ).to be(2)
expect( files_on_remote_for(job_b) ).to eq files_sent
job_c = backup_perform :my_backup
# package files for job_b should still be on the remote
files_sent = files_sent_for(job_b)
expect( files_sent.count ).to be(2)
expect( files_on_remote_for(job_b) ).to eq files_sent
# package files for job_c should be on the remote
files_sent = files_sent_for(job_c)
expect( files_sent.count ).to be(2)
expect( files_on_remote_for(job_c) ).to eq files_sent
# package files for job_a should be gone
expect( files_on_remote_for(job_a) ).to be_empty
clean_remote(job_a) # will clean up after all jobs
end
private
def files_sent_for(job)
job.model.package.filenames.map {|name|
File.join('/', remote_path_for(job), name)
}.sort
end
def remote_path_for(job)
path = BackupSpec::LIVE['storage']['dropbox']['path']
package = job.model.package
File.join(path, package.trigger, package.time)
end
# files_on_remote_for(job) should match #files_sent_for(job).
# If the files do not exist, or were removed by cycling, this will return [].
def files_on_remote_for(job)
storage = job.model.storages.first
# search(dir_to_search, query) => metadata for each entry
# entry['path'] will start with '/'
storage.send(:connection).search(remote_path_for(job), job.model.trigger).
map {|entry| entry['path'] }.sort
end
def clean_remote(job)
storage = job.model.storages.first
path = BackupSpec::LIVE['storage']['dropbox']['path']
storage.send(:connection).file_delete(path)
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/vagrant/spec/live/storage/cloud_files_spec.rb | vagrant/spec/live/storage/cloud_files_spec.rb | require File.expand_path('../../../spec_helper', __FILE__)
# To run these tests, you need to setup your Cloudfiles credentials in
# /vagrant/spec/live.yml
#
# It's recommended you use dedicated Containers for this, like:
# backup.testing.container
# backup.testing.segments.container
#
module Backup
describe Storage::CloudFiles,
:if => BackupSpec::LIVE['storage']['cloudfiles']['specs_enabled'] == true do
before { clean_remote }
after { clean_remote }
# Each archive is 1.09 MB (1,090,000).
# Each job here will create 2 package files (6,291,456 + 248,544).
# With :segment_size at 2 MiB, the first package file will be stored
# as a SLO with 3 segments. The second package file will use put_object.
it 'stores package', :live do
create_model :my_backup, %q{
Backup::Model.new(:my_backup, 'a description') do
split_into_chunks_of 6 # MiB
6.times do |n|
archive "archive_#{ n }" do |archive|
archive.add '~/test_data'
end
end
config = BackupSpec::LIVE['storage']['cloudfiles']
store_with CloudFiles do |cf|
cf.username = config['username']
cf.api_key = config['api_key']
cf.auth_url = config['auth_url']
cf.region = config['region']
cf.servicenet = config['servicenet']
cf.container = config['container']
cf.segments_container = config['segments_container']
cf.path = config['path']
cf.max_retries = 3
cf.retry_waitsec = 5
cf.segment_size = 2 # MiB
cf.days_to_keep = 1
end
end
}
job = backup_perform :my_backup
files_sent = files_sent_for(job)
expect( files_sent.count ).to be(2)
objects_on_remote = objects_on_remote_for(job)
expect( objects_on_remote.map(&:name) ).to eq files_sent
expect( objects_on_remote.all?(&:marked_for_deletion?) ).to be(true)
segments_on_remote = segments_on_remote_for(job)
expect( segments_on_remote.count ).to be(3)
expect( segments_on_remote.all?(&:marked_for_deletion?) ).to be(true)
end
it 'cycles package', :live do
create_model :my_backup, %q{
Backup::Model.new(:my_backup, 'a description') do
split_into_chunks_of 6 # MiB
6.times do |n|
archive "archive_#{ n }" do |archive|
archive.add '~/test_data'
end
end
config = BackupSpec::LIVE['storage']['cloudfiles']
store_with CloudFiles do |cf|
cf.username = config['username']
cf.api_key = config['api_key']
cf.auth_url = config['auth_url']
cf.region = config['region']
cf.servicenet = config['servicenet']
cf.container = config['container']
cf.segments_container = config['segments_container']
cf.path = config['path']
cf.max_retries = 3
cf.retry_waitsec = 5
cf.segment_size = 2 # MiB
cf.keep = 2
end
end
}
job_a = backup_perform :my_backup
job_b = backup_perform :my_backup
# package files for job_a should be on the remote
files_sent = files_sent_for(job_a)
expect( files_sent.count ).to be(2)
objects_on_remote = objects_on_remote_for(job_a)
expect( objects_on_remote.map(&:name) ).to eq files_sent
expect( objects_on_remote.any?(&:marked_for_deletion?) ).to be(false)
segments_on_remote = segments_on_remote_for(job_a)
expect( segments_on_remote.count ).to be(3)
expect( segments_on_remote.any?(&:marked_for_deletion?) ).to be(false)
# package files for job_b should be on the remote
files_sent = files_sent_for(job_b)
expect( files_sent.count ).to be(2)
objects_on_remote = objects_on_remote_for(job_b)
expect( objects_on_remote.map(&:name) ).to eq files_sent
expect( objects_on_remote.any?(&:marked_for_deletion?) ).to be(false)
segments_on_remote = segments_on_remote_for(job_b)
expect( segments_on_remote.count ).to be(3)
expect( segments_on_remote.any?(&:marked_for_deletion?) ).to be(false)
job_c = backup_perform :my_backup
# package files for job_b should still be on the remote
files_sent = files_sent_for(job_b)
expect( files_sent.count ).to be(2)
expect( objects_on_remote_for(job_b).map(&:name) ).to eq files_sent
expect( segments_on_remote_for(job_b).count ).to be(3)
# package files for job_c should be on the remote
files_sent = files_sent_for(job_c)
expect( files_sent.count ).to be(2)
objects_on_remote = objects_on_remote_for(job_c)
expect( objects_on_remote.map(&:name) ).to eq files_sent
expect( objects_on_remote.any?(&:marked_for_deletion?) ).to be(false)
segments_on_remote = segments_on_remote_for(job_c)
expect( segments_on_remote.count ).to be(3)
expect( segments_on_remote.any?(&:marked_for_deletion?) ).to be(false)
# package files for job_a should be gone
expect( objects_on_remote_for(job_a) ).to be_empty
expect( segments_on_remote_for(job_a) ).to be_empty
end
private
def config
config = BackupSpec::LIVE['storage']['cloudfiles']
@config ||= {
:username => config['username'],
:api_key => config['api_key'],
:auth_url => config['auth_url'],
:region => config['region'],
:servicenet => config['servicenet'],
:container => config['container'],
:segments_container => config['segments_container'],
:segment_size => 0,
:max_retries => 3,
:retry_waitsec => 5
}
end
def cloud_io
@cloud_io ||= CloudIO::CloudFiles.new(config)
end
def segments_cloud_io
@segments_cloud_io ||= CloudIO::CloudFiles.new(
config.merge(:container => config[:segments_container])
)
end
def files_sent_for(job)
job.model.package.filenames.map {|name|
File.join(remote_path_for(job), name)
}.sort
end
def remote_path_for(job)
path = BackupSpec::LIVE['storage']['cloudfiles']['path']
package = job.model.package
File.join(path, package.trigger, package.time)
end
# objects_on_remote_for(job).map(&:name) should match #files_sent_for(job).
# If the files do not exist, or were removed by cycling, this will return [].
def objects_on_remote_for(job)
cloud_io.objects(remote_path_for(job)).sort_by(&:name)
end
def segments_on_remote_for(job)
segments_cloud_io.objects(remote_path_for(job))
end
def clean_remote
path = BackupSpec::LIVE['storage']['cloudfiles']['path']
objects = cloud_io.objects(path)
unless objects.empty?
slo_objects, objects = objects.partition(&:slo?)
cloud_io.delete_slo(slo_objects)
cloud_io.delete(objects)
end
# in case segments are uploaded, but the manifest isn't
objects = segments_cloud_io.objects(path)
segments_cloud_io.delete(objects) unless objects.empty?
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/vagrant/spec/live/notifier/mail_spec.rb | vagrant/spec/live/notifier/mail_spec.rb | require File.expand_path('../../../spec_helper', __FILE__)
# To run these tests, you need to setup your Mail credentials in
# /vagrant/spec/live.yml
#
module Backup
describe Notifier::Mail,
:if => BackupSpec::LIVE['notifier']['mail']['specs_enabled'] == true do
# These tests send actual emails. Check your mail to verify success.
context 'when using :smtp delivery method' do
it 'sends a success email', :live do
create_model :my_backup, <<-EOS
Backup::Model.new(:my_backup, 'a description') do
config = BackupSpec::LIVE['notifier']['mail']
notify_by Mail do |mail|
mail.delivery_method = :smtp
mail.from = config['from']
mail.to = config['to']
mail.address = config['address']
mail.port = config['port']
mail.user_name = config['user_name']
mail.password = config['password']
mail.authentication = config['authentication']
mail.encryption = config['encryption']
mail.openssl_verify_mode = config['openssl_verify_mode']
end
end
EOS
backup_perform :my_backup
end
it 'sends a warning email', :live do
create_model :my_backup, <<-EOS
Backup::Model.new(:my_backup, 'a description') do
config = BackupSpec::LIVE['notifier']['mail']
notify_by Mail do |mail|
mail.delivery_method = :smtp
mail.from = config['from']
mail.to = config['to']
mail.address = config['address']
mail.port = config['port']
mail.user_name = config['user_name']
mail.password = config['password']
mail.authentication = config['authentication']
mail.encryption = config['encryption']
mail.openssl_verify_mode = config['openssl_verify_mode']
end
# log a warning
Backup::Logger.warn 'test warning'
end
EOS
backup_perform :my_backup, :exit_status => 1
end
it 'sends a failure email (non-fatal)', :live do
create_model :my_backup, <<-EOS
Backup::Model.new(:my_backup, 'a description') do
config = BackupSpec::LIVE['notifier']['mail']
notify_by Mail do |mail|
mail.delivery_method = :smtp
mail.from = config['from']
mail.to = config['to']
mail.address = config['address']
mail.port = config['port']
mail.user_name = config['user_name']
mail.password = config['password']
mail.authentication = config['authentication']
mail.encryption = config['encryption']
mail.openssl_verify_mode = config['openssl_verify_mode']
end
archive :my_archive do |archive|
archive.add '~/test_data/dir_a/file_a'
end
end
EOS
Archive.any_instance.should_receive(:perform!).
and_raise('a non-fatal error')
backup_perform :my_backup, :exit_status => 2
end
it 'sends a failure email (fatal)', :live do
create_model :my_backup, <<-EOS
Backup::Model.new(:my_backup, 'a description') do
config = BackupSpec::LIVE['notifier']['mail']
notify_by Mail do |mail|
mail.delivery_method = :smtp
mail.from = config['from']
mail.to = config['to']
mail.address = config['address']
mail.port = config['port']
mail.user_name = config['user_name']
mail.password = config['password']
mail.authentication = config['authentication']
mail.encryption = config['encryption']
mail.openssl_verify_mode = config['openssl_verify_mode']
end
archive :my_archive do |archive|
archive.add '~/test_data/dir_a/file_a'
end
end
EOS
Archive.any_instance.should_receive(:perform!).
and_raise(Exception.new('a fatal error'))
backup_perform :my_backup, :exit_status => 3
end
end # context 'when using :smtp delivery method'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/vagrant/spec/live/syncer/s3_spec.rb | vagrant/spec/live/syncer/s3_spec.rb | require File.expand_path('../../../spec_helper', __FILE__)
# To run these tests, you need to setup your AWS S3 credentials in
# /vagrant/spec/live.yml
#
# It's recommended you use a dedicated Bucket for this, like:
# <aws_username>-backup-testing
#
# Note: The S3 Bucket you use should have read-after-write consistency.
# So don't use the US Standard region.
module Backup
describe Syncer::Cloud::S3,
:if => BackupSpec::LIVE['syncer']['cloud']['s3']['specs_enabled'] == true do
before { prepare_local_sync_files; clean_remote }
after { clean_remote }
shared_examples 'sync test (s3)' do
it 'works' do
create_model :my_backup, <<-EOS
Backup::Model.new(:my_backup, 'a description') do
config = BackupSpec::LIVE['syncer']['cloud']['s3']
sync_with Cloud::S3 do |s3|
s3.access_key_id = config['access_key_id']
s3.secret_access_key = config['secret_access_key']
s3.region = config['region']
s3.bucket = config['bucket']
s3.path = config['path']
s3.thread_count = #{ use_threads ? 2 : 0 }
s3.mirror = #{ mirror }
s3.directories do
add File.join(BackupSpec::LOCAL_SYNC_PATH, 'dir_a')
add File.join(BackupSpec::LOCAL_SYNC_PATH, 'dir_b')
end
end
end
EOS
job = backup_perform :my_backup, :exit_status => 1
expect(
objects_on_remote.map {|obj| [obj.key, obj.etag] }
).to eq(
expected_on_remote(:before_update, mirror)
)
expect( skipped_file_logged?(job) ).to be_true
update_local_sync_files
job = backup_perform :my_backup, :exit_status => 1
objects = objects_on_remote
expect(
objects.map {|obj| [obj.key, obj.etag] }
).to eq(
expected_on_remote(:after_update, mirror)
)
expect( skipped_file_logged?(job) ).to be_true
expect(
objects.all? {|obj| obj.storage_class == 'STANDARD' }
).to be(true)
expect(
objects.all? {|obj| obj.encryption.nil? }
).to be(true)
end
end # shared_examples 'sync test (s3)'
context 'with threads', :live do
let(:use_threads) { true }
context 'with mirroring' do
let(:mirror) { true }
include_examples 'sync test (s3)'
end
context 'without mirroring' do
let(:mirror) { false }
include_examples 'sync test (s3)'
end
end
context 'without threads', :live do
let(:use_threads) { false }
context 'with mirroring' do
let(:mirror) { true }
include_examples 'sync test (s3)'
end
context 'without mirroring' do
let(:mirror) { false }
include_examples 'sync test (s3)'
end
end
it 'uses :storage_class and :encryption', :live do
create_model :my_backup, <<-EOS
Backup::Model.new(:my_backup, 'a description') do
config = BackupSpec::LIVE['syncer']['cloud']['s3']
sync_with Cloud::S3 do |s3|
s3.access_key_id = config['access_key_id']
s3.secret_access_key = config['secret_access_key']
s3.region = config['region']
s3.bucket = config['bucket']
s3.path = config['path']
s3.storage_class = :reduced_redundancy
s3.encryption = :aes256
s3.directories do
add File.join(BackupSpec::LOCAL_SYNC_PATH, 'dir_a')
add File.join(BackupSpec::LOCAL_SYNC_PATH, 'dir_b')
end
end
end
EOS
backup_perform :my_backup, :exit_status => 1
objects = objects_on_remote
expect(
objects.all? {|obj| obj.storage_class == 'REDUCED_REDUNDANCY' }
).to be(true)
expect(
objects.all? {|obj| obj.encryption == 'AES256' }
).to be_true
end
it 'excludes files', :live do
create_model :my_backup, <<-EOS
Backup::Model.new(:my_backup, 'a description') do
config = BackupSpec::LIVE['syncer']['cloud']['s3']
sync_with Cloud::S3 do |s3|
s3.access_key_id = config['access_key_id']
s3.secret_access_key = config['secret_access_key']
s3.region = config['region']
s3.bucket = config['bucket']
s3.path = config['path']
s3.directories do
add File.join(BackupSpec::LOCAL_SYNC_PATH, 'dir_a')
add File.join(BackupSpec::LOCAL_SYNC_PATH, 'dir_b')
exclude '**/two.*'
exclude /three\.file$/
end
end
end
EOS
backup_perform :my_backup, :exit_status => 1
expect(
objects_on_remote.map {|obj| [obj.key, obj.etag] }
).to eq([
[File.join(remote_path, 'dir_a/one.file'), 'd3b07384d113edec49eaa6238ad5ff00']
])
end
private
def cloud_io
config = BackupSpec::LIVE['syncer']['cloud']['s3']
@cloud_io ||= CloudIO::S3.new(
:access_key_id => config['access_key_id'],
:secret_access_key => config['secret_access_key'],
:region => config['region'],
:bucket => config['bucket'],
:max_retries => 3,
:retry_waitsec => 5,
# Syncers can not use multipart upload.
:chunk_size => 0
)
end
def remote_path
BackupSpec::LIVE['syncer']['cloud']['s3']['path']
end
def objects_on_remote
cloud_io.objects(remote_path).sort_by(&:key)
end
def clean_remote
cloud_io.delete(objects_on_remote)
end
def expected_on_remote(state, mirror)
case state
when :before_update
files = [['dir_a/one.file', 'd3b07384d113edec49eaa6238ad5ff00'],
['dir_b/dir_c/three.file', 'd3b07384d113edec49eaa6238ad5ff00'],
['dir_b/two.file', 'd3b07384d113edec49eaa6238ad5ff00']]
when :after_update
files = [['dir_a/dir_d/two.new', '14758f1afd44c09b7992073ccf00b43d'],
['dir_a/one.file', '14758f1afd44c09b7992073ccf00b43d'],
['dir_b/dir_c/three.file', 'd3b07384d113edec49eaa6238ad5ff00'],
['dir_b/one.new', '14758f1afd44c09b7992073ccf00b43d']]
files << ['dir_b/two.file', 'd3b07384d113edec49eaa6238ad5ff00'] unless mirror
end
files.map {|path, md5| [File.join(remote_path, path), md5] }.sort_by(&:first)
end
def skipped_file_logged?(job)
messages = job.logger.messages.map {|m| m.formatted_lines }.flatten
file = File.join(BackupSpec::LOCAL_SYNC_PATH, "dir_b/bad\uFFFDfile")
messages.any? {|line| line.include? "[warn] [skipping] #{ file }" }
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/vagrant/spec/live/syncer/cloud_files_spec.rb | vagrant/spec/live/syncer/cloud_files_spec.rb | require File.expand_path('../../../spec_helper', __FILE__)
# To run these tests, you need to setup your Cloudfiles credentials in
# /vagrant/spec/live.yml
#
# It's recommended you use a dedicated Container for this, like:
# backup.testing.container
#
# Note: Expectations will occasionally fail due to eventual consistency.
module Backup
describe Syncer::Cloud::CloudFiles,
:if => BackupSpec::LIVE['syncer']['cloud']['cloudfiles']['specs_enabled'] == true do
before { prepare_local_sync_files; clean_remote }
after { clean_remote }
shared_examples 'sync test (cf)' do
it 'works' do
create_model :my_backup, <<-EOS
Backup::Model.new(:my_backup, 'a description') do
config = BackupSpec::LIVE['syncer']['cloud']['cloudfiles']
sync_with Cloud::CloudFiles do |cf|
cf.username = config['username']
cf.api_key = config['api_key']
cf.auth_url = config['auth_url']
cf.region = config['region']
cf.servicenet = config['servicenet']
cf.container = config['container']
cf.path = config['path']
cf.thread_count = #{ use_threads ? 2 : 0 }
cf.mirror = #{ mirror }
cf.directories do
add File.join(BackupSpec::LOCAL_SYNC_PATH, 'dir_a')
add File.join(BackupSpec::LOCAL_SYNC_PATH, 'dir_b')
end
end
end
EOS
job = backup_perform :my_backup, :exit_status => 1
expect(
objects_on_remote.map {|obj| [obj.name, obj.hash] }
).to eq(
expected_on_remote(:before_update, mirror)
)
expect( skipped_file_logged?(job) ).to be_true
update_local_sync_files
job = backup_perform :my_backup, :exit_status => 1
expect(
objects_on_remote.map {|obj| [obj.name, obj.hash] }
).to eq(
expected_on_remote(:after_update, mirror)
)
expect( skipped_file_logged?(job) ).to be_true
end
end # shared_examples 'sync test (cf)'
context 'with threads', :live do
let(:use_threads) { true }
context 'with mirroring' do
let(:mirror) { true }
include_examples 'sync test (cf)'
end
context 'without mirroring' do
let(:mirror) { false }
include_examples 'sync test (cf)'
end
end
context 'without threads', :live do
let(:use_threads) { false }
context 'with mirroring' do
let(:mirror) { true }
include_examples 'sync test (cf)'
end
context 'without mirroring' do
let(:mirror) { false }
include_examples 'sync test (cf)'
end
end
it 'excludes files', :live do
create_model :my_backup, <<-EOS
Backup::Model.new(:my_backup, 'a description') do
config = BackupSpec::LIVE['syncer']['cloud']['cloudfiles']
sync_with Cloud::CloudFiles do |cf|
cf.username = config['username']
cf.api_key = config['api_key']
cf.auth_url = config['auth_url']
cf.region = config['region']
cf.servicenet = config['servicenet']
cf.container = config['container']
cf.path = config['path']
cf.directories do
add File.join(BackupSpec::LOCAL_SYNC_PATH, 'dir_a')
add File.join(BackupSpec::LOCAL_SYNC_PATH, 'dir_b')
exclude '**/two.*'
exclude /three\.file$/
end
end
end
EOS
backup_perform :my_backup, :exit_status => 1
expect(
objects_on_remote.map {|obj| [obj.name, obj.hash] }
).to eq([
[File.join(remote_path, 'dir_a/one.file'), 'd3b07384d113edec49eaa6238ad5ff00']
])
end
private
def cloud_io
config = BackupSpec::LIVE['syncer']['cloud']['cloudfiles']
@cloud_io ||= CloudIO::CloudFiles.new(
:username => config['username'],
:api_key => config['api_key'],
:auth_url => config['auth_url'],
:region => config['region'],
:servicenet => config['servicenet'],
:container => config['container'],
:max_retries => 3,
:retry_waitsec => 5,
# Syncers can not use multipart upload.
:segments_container => nil,
:segment_size => 0
)
end
def remote_path
BackupSpec::LIVE['syncer']['cloud']['cloudfiles']['path']
end
def objects_on_remote
cloud_io.objects(remote_path).sort_by(&:name)
end
def clean_remote
cloud_io.delete(objects_on_remote)
end
def expected_on_remote(state, mirror)
case state
when :before_update
files = [['dir_a/one.file', 'd3b07384d113edec49eaa6238ad5ff00'],
['dir_b/dir_c/three.file', 'd3b07384d113edec49eaa6238ad5ff00'],
['dir_b/two.file', 'd3b07384d113edec49eaa6238ad5ff00']]
when :after_update
files = [['dir_a/dir_d/two.new', '14758f1afd44c09b7992073ccf00b43d'],
['dir_a/one.file', '14758f1afd44c09b7992073ccf00b43d'],
['dir_b/dir_c/three.file', 'd3b07384d113edec49eaa6238ad5ff00'],
['dir_b/one.new', '14758f1afd44c09b7992073ccf00b43d']]
files << ['dir_b/two.file', 'd3b07384d113edec49eaa6238ad5ff00'] unless mirror
end
files.map {|path, md5| [File.join(remote_path, path), md5] }.sort_by(&:first)
end
def skipped_file_logged?(job)
messages = job.logger.messages.map {|m| m.formatted_lines }.flatten
file = File.join(BackupSpec::LOCAL_SYNC_PATH, "dir_b/bad\uFFFDfile")
messages.any? {|line| line.include? "[warn] [skipping] #{ file }" }
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/archive_spec.rb | spec/archive_spec.rb | require "spec_helper"
module Backup
describe Archive do
let(:model) { Model.new(:test_trigger, "test model") }
describe "#initialize" do
it "sets default values" do
archive = Archive.new(model, :test_archive) {}
expect(archive.name).to eq "test_archive"
expect(archive.options[:sudo]).to be(false)
expect(archive.options[:root]).to be(false)
expect(archive.options[:paths]).to eq []
expect(archive.options[:excludes]).to eq []
expect(archive.options[:tar_options]).to eq ""
end
it "sets configured values" do
archive = Archive.new(model, :test_archive) do |a|
a.use_sudo
a.root "root/path"
a.add "a/path"
a.add "/another/path"
a.exclude "excluded/path"
a.exclude "/another/excluded/path"
a.tar_options "-h --xattrs"
end
expect(archive.name).to eq "test_archive"
expect(archive.options[:sudo]).to be(true)
expect(archive.options[:root]).to eq "root/path"
expect(archive.options[:paths]).to eq(
["a/path", "/another/path"]
)
expect(archive.options[:excludes]).to eq(
["excluded/path", "/another/excluded/path"]
)
expect(archive.options[:tar_options]).to eq "-h --xattrs"
end
end # describe '#initialize'
describe "#perform!" do
before do
allow_any_instance_of(Archive).to receive(:utility).with(:tar).and_return("tar")
allow_any_instance_of(Archive).to receive(:utility).with(:cat).and_return("cat")
allow_any_instance_of(Archive).to receive(:utility).with(:sudo).and_return("sudo")
allow_any_instance_of(Archive).to receive(:with_files_from).and_yield("")
allow(Config).to receive(:tmp_path).and_return("/tmp/path")
allow_any_instance_of(Pipeline).to receive(:success?).and_return(true)
end
describe "success/failure messages" do
let(:archive) { Archive.new(model, :my_archive) {} }
it "logs info messages on success" do
expect(Logger).to receive(:info).with("Creating Archive 'my_archive'...")
expect(Logger).to receive(:info).with("Archive 'my_archive' Complete!")
archive.perform!
end
it "raises error on failure" do
allow_any_instance_of(Pipeline).to receive(:success?).and_return(false)
allow_any_instance_of(Pipeline).to receive(:error_messages).and_return("error messages")
expect(Logger).to receive(:info).with("Creating Archive 'my_archive'...")
expect(Logger).to receive(:info).with("Archive 'my_archive' Complete!").never
expect do
archive.perform!
end.to raise_error(Archive::Error) { |err|
expect(err.message).to eq(
"Archive::Error: Failed to Create Archive 'my_archive'\n" \
" error messages"
)
}
end
end
describe "using GNU tar" do
before do
expect_any_instance_of(Pipeline).to receive(:<<).with(
"cat > '/tmp/path/test_trigger/archives/my_archive.tar'"
)
end
it "returns GNU tar options" do
archive = Archive.new(model, :my_archive) {}
expect_any_instance_of(Pipeline).to receive(:add).with(
"tar --ignore-failed-read -cPf - ", [0, 1]
)
archive.perform!
end
it "prepends GNU tar options" do
archive = Archive.new(model, :my_archive) do |a|
a.tar_options "-h --xattrs"
end
expect_any_instance_of(Pipeline).to receive(:add).with(
"tar --ignore-failed-read -h --xattrs -cPf - ", [0, 1]
)
archive.perform!
end
end
describe "using BSD tar" do
before do
allow_any_instance_of(Archive).to receive(:gnu_tar?).and_return(false)
expect_any_instance_of(Pipeline).to receive(:<<).with(
"cat > '/tmp/path/test_trigger/archives/my_archive.tar'"
)
end
it "returns no GNU options" do
archive = Archive.new(model, :my_archive) {}
expect_any_instance_of(Pipeline).to receive(:add).with("tar -cPf - ", [0])
archive.perform!
end
it "returns only the configured options" do
archive = Archive.new(model, :my_archive) do |a|
a.tar_options "-h --xattrs"
end
expect_any_instance_of(Pipeline).to receive(:add).with("tar -h --xattrs -cPf - ", [0])
archive.perform!
end
end
describe "root path option" do
context "when a root path is given" do
it "changes directories to create relative path archives" do
archive = Archive.new(model, :my_archive) do |a|
a.root "root/path"
a.add "this/path"
a.add "/that/path"
a.exclude "other/path"
a.exclude "/another/path"
end
expect(archive).to receive(:with_files_from).with(
["this/path", "/that/path"]
).and_yield("-T '/path/to/tmpfile'")
expect_any_instance_of(Pipeline).to receive(:add).with(
"tar --ignore-failed-read -cPf - " \
"-C '#{File.expand_path("root/path")}' " \
"--exclude='other/path' --exclude='/another/path' " \
"-T '/path/to/tmpfile'",
[0, 1]
)
expect_any_instance_of(Pipeline).to receive(:<<).with(
"cat > '/tmp/path/test_trigger/archives/my_archive.tar'"
)
archive.perform!
end
end
context "when no root path is given" do
it "creates archives with expanded paths" do
archive = Archive.new(model, :my_archive) do |a|
a.add "this/path"
a.add "/that/path"
a.exclude "other/path"
a.exclude "/another/path"
end
expect(archive).to receive(:with_files_from).with(
[File.expand_path("this/path"), "/that/path"]
).and_yield("-T '/path/to/tmpfile'")
expect_any_instance_of(Pipeline).to receive(:add).with(
"tar --ignore-failed-read -cPf - " \
"--exclude='#{File.expand_path("other/path")}' " \
"--exclude='/another/path' " \
"-T '/path/to/tmpfile'",
[0, 1]
)
expect_any_instance_of(Pipeline).to receive(:<<).with(
"cat > '/tmp/path/test_trigger/archives/my_archive.tar'"
)
archive.perform!
end
end
end # describe 'root path option'
describe "compressor usage" do
let(:archive) { Archive.new(model, :my_archive) {} }
it "creates a compressed archive" do
compressor = double
allow(model).to receive(:compressor).and_return(compressor)
allow(compressor).to receive(:compress_with).and_yield("comp_command", ".comp_ext")
expect_any_instance_of(Pipeline).to receive(:<<).with("comp_command")
expect_any_instance_of(Pipeline).to receive(:<<).with(
"cat > '/tmp/path/test_trigger/archives/my_archive.tar.comp_ext'"
)
archive.perform!
end
it "creates an uncompressed archive" do
expect_any_instance_of(Pipeline).to receive(:<<).with("comp_command").never
expect_any_instance_of(Pipeline).to receive(:<<).with(
"cat > '/tmp/path/test_trigger/archives/my_archive.tar'"
)
archive.perform!
end
end
specify "may use sudo" do
archive = Archive.new(model, :my_archive, &:use_sudo)
expect_any_instance_of(Pipeline).to receive(:add).with(
"sudo -n tar --ignore-failed-read -cPf - ", [0, 1]
)
expect_any_instance_of(Pipeline).to receive(:<<).with(
"cat > '/tmp/path/test_trigger/archives/my_archive.tar'"
)
archive.perform!
end
end # describe '#perform!'
describe "#with_files_from" do
let(:archive) { Archive.new(model, :test_archive) {} }
let(:s) { sequence "" }
let(:tmpfile) { double(File, path: "/path/to/tmpfile") }
let(:paths) { ["this/path", "/that/path"] }
# -T is used for BSD compatibility
it "yields the tar --files-from option" do
expect(Tempfile).to receive(:new).ordered.and_return(tmpfile)
expect(tmpfile).to receive(:puts).ordered.with("this/path")
expect(tmpfile).to receive(:puts).ordered.with("/that/path")
expect(tmpfile).to receive(:close).ordered
expect(tmpfile).to receive(:delete).ordered
archive.send(:with_files_from, paths) do |files_from|
expect(files_from).to eq "-T '/path/to/tmpfile'"
end
end
it "ensures the tmpfile is removed" do
expect(Tempfile).to receive(:new).and_return(tmpfile)
expect(tmpfile).to receive(:close)
expect(tmpfile).to receive(:delete)
expect do
archive.send(:with_files_from, []) { raise "foo" }
end.to raise_error("foo")
end
it "writes the given paths to a tempfile" do
archive.send(:with_files_from, paths) do |files_from|
path = files_from.match(/-T '(.*)'/)[1]
expect(File.read(path)).to eq "this/path\n/that/path\n"
end
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/package_spec.rb | spec/package_spec.rb | require "spec_helper"
module Backup
describe Package do
let(:model) { Model.new(:test_trigger, "test label") }
let(:package) { Package.new(model) }
describe "#initialize" do
it "sets defaults" do
expect(package.time).to be_nil
expect(package.trigger).to eq "test_trigger"
expect(package.extension).to eq "tar"
expect(package.chunk_suffixes).to eq []
expect(package.no_cycle).to be(false)
expect(package.version).to eq VERSION
end
end
it "allows time to be set" do
package.time = "foo"
expect(package.time).to eq "foo"
end
it "allows chunk_suffixes to be set" do
package.chunk_suffixes = "foo"
expect(package.chunk_suffixes).to eq "foo"
end
it "allows extension to be updated" do
package.extension << ".foo"
expect(package.extension).to eq "tar.foo"
package.extension = "foo"
expect(package.extension).to eq "foo"
end
it "allows no_cycle to be set" do
package.no_cycle = true
expect(package.no_cycle).to be(true)
end
describe "#filenames" do
context "when the package files were not split" do
it "returns an array with the single package filename" do
expect(package.filenames).to eq ["test_trigger.tar"]
end
it "reflects changes in the extension" do
package.extension << ".enc"
expect(package.filenames).to eq ["test_trigger.tar.enc"]
end
end
context "when the package files were split" do
before { package.chunk_suffixes = ["aa", "ab"] }
it "returns an array of the package filenames" do
expect(package.filenames).to eq(
["test_trigger.tar-aa", "test_trigger.tar-ab"]
)
end
it "reflects changes in the extension" do
package.extension << ".enc"
expect(package.filenames).to eq(
["test_trigger.tar.enc-aa", "test_trigger.tar.enc-ab"]
)
end
end
end
describe "#basename" do
it "returns the base filename for the package" do
expect(package.basename).to eq "test_trigger.tar"
end
it "reflects changes in the extension" do
package.extension << ".enc"
expect(package.basename).to eq "test_trigger.tar.enc"
end
end
describe "#time_as_object" do
it "returns Time object from string" do
package.time = dummy_time = "2015.12.30.20.45.59"
expect(package.time_as_object)
.to eq Time.strptime(dummy_time, "%Y.%m.%d.%H.%M.%S")
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/cleaner_spec.rb | spec/cleaner_spec.rb | require "spec_helper"
module Backup
describe Cleaner do
let(:model) { Model.new(:test_trigger, "test label") }
describe "#prepare" do
let(:error_tail) do
<<-EOS.gsub(/^ +/, " ")
Please check the log for messages and/or your notifications
concerning this backup: 'test label (test_trigger)'
The temporary files which had to be removed should not have existed.
EOS
end
context "when no temporary packaging folder or package files exist" do
it "does nothing" do
expect(File).to receive(:exist?)
.with(File.join(Config.tmp_path, "test_trigger"))
.and_return(false)
expect(Cleaner).to receive(:package_files_for).with("test_trigger").and_return([])
expect(FileUtils).to receive(:rm_rf).never
expect(FileUtils).to receive(:rm_f).never
expect(Logger).to receive(:warn).never
Cleaner.prepare(model)
end
end
context "when a temporary packaging folder exists" do
it "removes the folder and logs a warning" do
expect(File).to receive(:exist?)
.with(File.join(Config.tmp_path, "test_trigger"))
.and_return(true)
expect(Cleaner).to receive(:package_files_for).with("test_trigger").and_return([])
expect(FileUtils).to receive(:rm_rf)
.with(File.join(Config.tmp_path, "test_trigger"))
expect(FileUtils).to receive(:rm_f).never
expect(Logger).to receive(:warn) do |err|
expect(err).to be_an_instance_of Cleaner::Error
expect(err.message).to eq(<<-EOS.gsub(/^ +/, " ").strip)
Cleaner::Error: Cleanup Warning
The temporary packaging folder still exists!
'#{File.join(Config.tmp_path, "test_trigger")}'
It will now be removed.
#{error_tail}
EOS
end
Cleaner.prepare(model)
end
end
context "when package files exist" do
it "removes the files and logs a warning" do
expect(File).to receive(:exist?)
.with(File.join(Config.tmp_path, "test_trigger"))
.and_return(false)
expect(Cleaner).to receive(:package_files_for)
.with("test_trigger")
.and_return(["file1", "file2"])
expect(FileUtils).to receive(:rm_rf).never
expect(FileUtils).to receive(:rm_f).with("file1")
expect(FileUtils).to receive(:rm_f).with("file2")
expect(Logger).to receive(:warn) do |err|
expect(err).to be_an_instance_of Cleaner::Error
expect(err.message).to eq(<<-EOS.gsub(/^ +/, " ").strip)
Cleaner::Error: Cleanup Warning
The temporary backup folder '#{Config.tmp_path}'
appears to contain the package files from the previous backup!
file1
file2
These files will now be removed.
#{error_tail}
EOS
end
Cleaner.prepare(model)
end
end
context "when both the temporary packaging folder and package files exist" do
it "removes both and logs a warning" do
expect(File).to receive(:exist?)
.with(File.join(Config.tmp_path, "test_trigger"))
.and_return(true)
expect(Cleaner).to receive(:package_files_for)
.with("test_trigger")
.and_return(["file1", "file2"])
expect(FileUtils).to receive(:rm_rf)
.with(File.join(Config.tmp_path, "test_trigger"))
expect(FileUtils).to receive(:rm_f).with("file1")
expect(FileUtils).to receive(:rm_f).with("file2")
expect(Logger).to receive(:warn) do |err|
expect(err).to be_an_instance_of Cleaner::Error
expect(err.message).to eq(<<-EOS.gsub(/^ +/, " ").strip)
Cleaner::Error: Cleanup Warning
The temporary packaging folder still exists!
'#{File.join(Config.tmp_path, "test_trigger")}'
It will now be removed.
#{"\n #{"-" * 74}"}
The temporary backup folder '#{Config.tmp_path}'
appears to contain the package files from the previous backup!
file1
file2
These files will now be removed.
#{error_tail}
EOS
end
Cleaner.prepare(model)
end
end
end # describe '#prepare'
describe "#remove_packaging" do
it "removes the packaging directory" do
expect(Logger).to receive(:info).with("Cleaning up the temporary files...")
expect(FileUtils).to receive(:rm_rf)
.with(File.join(Config.tmp_path, "test_trigger"))
Cleaner.remove_packaging(model)
end
end
describe "#remove_package" do
it "removes the package files" do
package = double(Backup::Package, filenames: ["file1", "file2"])
expect(Backup::Logger).to receive(:info).with("Cleaning up the package files...")
expect(FileUtils).to receive(:rm_f).with(File.join(Config.tmp_path, "file1"))
expect(FileUtils).to receive(:rm_f).with(File.join(Config.tmp_path, "file2"))
Cleaner.remove_package(package)
end
end
describe "#warnings" do
let(:error_tail) do
<<-EOS.gsub(/^ +/, " ")
Make sure you check these files before the next scheduled backup for
'test label (test_trigger)'
These files will be removed at that time!
EOS
end
context "when no temporary packaging folder or package files exist" do
it "does nothing" do
expect(File).to receive(:exist?)
.with(File.join(Config.tmp_path, "test_trigger"))
.and_return(false)
expect(Cleaner).to receive(:package_files_for).with("test_trigger").and_return([])
expect(Logger).to receive(:warn).never
Cleaner.warnings(model)
end
end
context "when a temporary packaging folder exists" do
it "logs a warning" do
expect(File).to receive(:exist?)
.with(File.join(Config.tmp_path, "test_trigger"))
.and_return(true)
expect(Cleaner).to receive(:package_files_for).with("test_trigger").and_return([])
expect(Logger).to receive(:warn) do |err|
expect(err).to be_an_instance_of Cleaner::Error
expect(err.message).to eq(<<-EOS.gsub(/^ +/, " ").strip)
Cleaner::Error: Cleanup Warning
The temporary packaging folder still exists!
'#{File.join(Config.tmp_path, "test_trigger")}'
This folder may contain completed Archives and/or Database backups.
#{error_tail}
EOS
end
Cleaner.warnings(model)
end
end
context "when package files exist" do
it "logs a warning" do
expect(File).to receive(:exist?)
.with(File.join(Config.tmp_path, "test_trigger"))
.and_return(false)
expect(Cleaner).to receive(:package_files_for)
.with("test_trigger")
.and_return(["file1", "file2"])
expect(Logger).to receive(:warn) do |err|
expect(err).to be_an_instance_of Cleaner::Error
expect(err.message).to eq(<<-EOS.gsub(/^ +/, " ").strip)
Cleaner::Error: Cleanup Warning
The temporary backup folder '#{Config.tmp_path}'
appears to contain the backup files which were to be stored:
file1
file2
#{error_tail}
EOS
end
Cleaner.warnings(model)
end
end
context "when both the temporary packaging folder and package files exist" do
it "logs a warning" do
expect(File).to receive(:exist?)
.with(File.join(Config.tmp_path, "test_trigger"))
.and_return(true)
expect(Cleaner).to receive(:package_files_for)
.with("test_trigger")
.and_return(["file1", "file2"])
expect(Logger).to receive(:warn) do |err|
expect(err).to be_an_instance_of Cleaner::Error
expect(err.message).to eq(<<-EOS.gsub(/^ +/, " ").strip)
Cleaner::Error: Cleanup Warning
The temporary packaging folder still exists!
'#{File.join(Config.tmp_path, "test_trigger")}'
This folder may contain completed Archives and/or Database backups.
#{"\n #{"-" * 74}"}
The temporary backup folder '#{Config.tmp_path}'
appears to contain the backup files which were to be stored:
file1
file2
#{error_tail}
EOS
end
Cleaner.warnings(model)
end
end
end # describe '#warnings'
describe "#package_files_for" do
before do
@tmpdir = Dir.mktmpdir("backup_spec")
SandboxFileUtils.activate!(@tmpdir)
Config.send(:update, root_path: @tmpdir)
FileUtils.mkdir_p(Config.tmp_path)
end
after { FileUtils.rm_r(@tmpdir, force: true, secure: true) }
context "when package files exist" do
it "returns the package files for the given trigger" do
package_files = [
"test_trigger.tar",
"test_trigger.tar-aa",
"test_trigger.tar.enc",
"test_trigger.tar.enc-aa"
].map { |f| File.join(Config.tmp_path, f) }
other_files = [
"test_trigger.target.tar",
"other_trigger.tar",
"foo.tar"
].map { |f| File.join(Config.tmp_path, f) }
FileUtils.touch(package_files + other_files)
expect(Dir[File.join(Config.tmp_path, "*")].count).to eq 7
expect(Cleaner.send(:package_files_for, "test_trigger").sort)
.to eq package_files
end
end
context "when no packaging files exist" do
it "returns an empty array" do
other_files = [
"test_trigger.target.tar",
"other_trigger.tar",
"foo.tar"
].map { |f| File.join(Config.tmp_path, f) }
FileUtils.touch(other_files)
expect(Dir[File.join(Config.tmp_path, "*")].count).to be 3
expect(Cleaner.send(:package_files_for, "test_trigger")).to eq []
end
end
end # describe '#package_files_for'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/pipeline_spec.rb | spec/pipeline_spec.rb | require "spec_helper"
describe "Backup::Pipeline" do
let(:pipeline) { Backup::Pipeline.new }
it "should include Utilities::Helpers" do
expect(Backup::Pipeline
.include?(Backup::Utilities::Helpers)).to eq(true)
end
describe "#initialize" do
it "should create a new pipeline" do
expect(pipeline.instance_variable_get(:@commands)).to eq([])
expect(pipeline.instance_variable_get(:@success_codes)).to eq([])
expect(pipeline.errors).to eq([])
expect(pipeline.stderr).to eq("")
end
end
describe "#add" do
it "should add a command with the given successful exit codes" do
pipeline.add "a command", [0]
expect(pipeline.instance_variable_get(:@commands)).to eq(["a command"])
expect(pipeline.instance_variable_get(:@success_codes)).to eq([[0]])
pipeline.add "another command", [1, 3]
expect(pipeline.instance_variable_get(:@commands))
.to eq(["a command", "another command"])
expect(pipeline.instance_variable_get(:@success_codes))
.to eq([[0], [1, 3]])
end
end
describe "#<<" do
it "should add a command with the default successful exit code (0)" do
expect(pipeline).to receive(:add).with("a command", [0])
pipeline << "a command"
end
end
describe "#run" do
let(:stdout) { double }
let(:stderr) { double }
before do
allow_any_instance_of(Backup::Pipeline).to receive(:run).and_call_original
expect(pipeline).to receive(:pipeline).and_return("foo")
# stub Utilities::Helpers#command_name so it simply returns what it's passed
pipeline.class.send(:define_method, :command_name, ->(arg) { arg })
end
context "when pipeline command is successfully executed" do
before do
expect(Open4).to receive(:popen4).with("foo").and_yield(nil, nil, stdout, stderr)
end
context "when all commands within the pipeline are successful" do
before do
pipeline.instance_variable_set(:@success_codes, [[0], [0, 3]])
expect(stdout).to receive(:read).and_return("0|0:1|3:\n")
end
context "when commands output no stderr messages" do
before do
expect(stderr).to receive(:read).and_return("")
allow(pipeline).to receive(:stderr_messages).and_return(false)
end
it "should process the returned stdout/stderr and report no errors" do
expect(Backup::Logger).to receive(:warn).never
pipeline.run
expect(pipeline.stderr).to eq("")
expect(pipeline.errors).to eq([])
end
end
context "when successful commands output messages on stderr" do
before do
expect(stderr).to receive(:read).and_return("stderr output\n")
allow(pipeline).to receive(:stderr_messages).and_return("stderr_messages_output")
end
it "should log a warning with the stderr messages" do
expect(Backup::Logger).to receive(:warn).with("stderr_messages_output")
pipeline.run
expect(pipeline.stderr).to eq("stderr output")
expect(pipeline.errors).to eq([])
end
end
end # context 'when all commands within the pipeline are successful'
context "when commands within the pipeline are not successful" do
before do
pipeline.instance_variable_set(:@commands, ["first", "second", "third"])
pipeline.instance_variable_set(:@success_codes, [[0, 1], [0, 3], [0]])
expect(stderr).to receive(:read).and_return("stderr output\n")
allow(pipeline).to receive(:stderr_messages).and_return("success? should be false")
end
context "when the commands return in sequence" do
before do
expect(stdout).to receive(:read).and_return("0|1:1|1:2|0:\n")
end
it "should set @errors and @stderr without logging warnings" do
expect(Backup::Logger).to receive(:warn).never
pipeline.run
expect(pipeline.stderr).to eq("stderr output")
expect(pipeline.errors.count).to be(1)
expect(pipeline.errors.first).to be_a_kind_of SystemCallError
expect(pipeline.errors.first.errno).to be(1)
expect(pipeline.errors.first.message).to match(
"'second' returned exit code: 1"
)
end
end # context 'when the commands return in sequence'
context "when the commands return out of sequence" do
before do
expect(stdout).to receive(:read).and_return("1|3:2|4:0|1:\n")
end
it "should properly associate the exitstatus for each command" do
expect(Backup::Logger).to receive(:warn).never
pipeline.run
expect(pipeline.stderr).to eq("stderr output")
expect(pipeline.errors.count).to be(1)
expect(pipeline.errors.first).to be_a_kind_of SystemCallError
expect(pipeline.errors.first.errno).to be(4)
expect(pipeline.errors.first.message).to match(
"'third' returned exit code: 4"
)
end
end # context 'when the commands return out of sequence'
context "when multiple commands fail (out of sequence)" do
before do
expect(stdout).to receive(:read).and_return("1|1:2|0:0|3:\n")
end
it "should properly associate the exitstatus for each command" do
expect(Backup::Logger).to receive(:warn).never
pipeline.run
expect(pipeline.stderr).to eq("stderr output")
expect(pipeline.errors.count).to be(2)
pipeline.errors.each { |err| expect(err).to be_a_kind_of SystemCallError }
expect(pipeline.errors[0].errno).to be(3)
expect(pipeline.errors[0].message).to match(
"'first' returned exit code: 3"
)
expect(pipeline.errors[1].errno).to be(1)
expect(pipeline.errors[1].message).to match(
"'second' returned exit code: 1"
)
end
end # context 'when the commands return (out of sequence)'
end # context 'when commands within the pipeline are not successful'
end # context 'when pipeline command is successfully executed'
context "when pipeline command fails to execute" do
before do
expect(Open4).to receive(:popen4).with("foo").and_raise("exec failed")
end
it "should raise an error" do
expect do
pipeline.run
end.to raise_error(Backup::Pipeline::Error) { |err|
expect(err.message).to eq(
"Pipeline::Error: Pipeline failed to execute\n" \
"--- Wrapped Exception ---\n" \
"RuntimeError: exec failed"
)
}
end
end # context 'when pipeline command fails to execute'
end # describe '#run'
describe "#success?" do
it "returns true when @errors is empty" do
expect(pipeline.success?).to eq(true)
end
it "returns false when @errors is not empty" do
pipeline.instance_variable_set(:@errors, ["foo"])
expect(pipeline.success?).to eq(false)
end
end # describe '#success?'
describe "#error_messages" do
let(:sys_err) { RUBY_VERSION < "1.9" ? "SystemCallError" : "Errno::NOERROR" }
before do
# use 0 since others may be platform-dependent
pipeline.instance_variable_set(
:@errors, [
SystemCallError.new("first error", 0),
SystemCallError.new("second error", 0)
]
)
end
context "when #stderr_messages has messages" do
before do
expect(pipeline).to receive(:stderr_messages).and_return("stderr messages\n")
end
it "should output #stderr_messages and formatted system error messages" do
expect(pipeline.error_messages).to match(/
stderr\smessages\n
The\sfollowing\ssystem\serrors\swere\sreturned:\n
#{ sys_err }:\s(.*?)\sfirst\serror\n
#{ sys_err }:\s(.*?)\ssecond\serror
/x)
end
end
context "when #stderr_messages has no messages" do
before do
expect(pipeline).to receive(:stderr_messages).and_return("stderr messages\n")
end
it "should only output the formatted system error messages" do
expect(pipeline.error_messages).to match(/
stderr\smessages\n
The\sfollowing\ssystem\serrors\swere\sreturned:\n
#{ sys_err }:\s(.*?)\sfirst\serror\n
#{ sys_err }:\s(.*?)\ssecond\serror
/x)
end
end
end # describe '#error_messages'
describe "#pipeline" do
context "when there are multiple system commands to execute" do
before do
pipeline.instance_variable_set(:@commands, %w[one two three])
end
it "should build a pipeline with redirected/collected exit codes" do
expect(pipeline.send(:pipeline)).to eq(
'{ { one 2>&4 ; echo "0|$?:" >&3 ; } | ' \
'{ two 2>&4 ; echo "1|$?:" >&3 ; } | ' \
'{ three 2>&4 ; echo "2|$?:" >&3 ; } } 3>&1 1>&2 4>&2'
)
end
end
context "when there is only one system command to execute" do
before do
pipeline.instance_variable_set(:@commands, ["foo"])
end
it "should build the command line in the same manner, but without pipes" do
expect(pipeline.send(:pipeline)).to eq(
'{ { foo 2>&4 ; echo "0|$?:" >&3 ; } } 3>&1 1>&2 4>&2'
)
end
end
end # describe '#pipeline'
describe "#stderr_message" do
context "when @stderr has messages" do
before do
pipeline.instance_variable_set(:@stderr, "stderr message\n output")
end
it "should return a formatted message with the @stderr messages" do
expect(pipeline.send(:stderr_messages)).to eq(
" Pipeline STDERR Messages:\n" \
" (Note: may be interleaved if multiple commands returned error messages)\n" \
"\n" \
" stderr message\n" \
" output\n"
)
end
end
context "when @stderr is empty" do
it "should return false" do
expect(pipeline.send(:stderr_messages)).to eq(false)
end
end
end # describe '#stderr_message'
end # describe 'Backup::Pipeline'
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/cli_spec.rb | spec/cli_spec.rb | require "spec_helper"
require "rubygems/dependency_installer"
describe "Backup::CLI" do
let(:cli) { Backup::CLI }
let(:s) { sequence "" }
before { @argv_save = ARGV }
after { ARGV.replace(@argv_save) }
describe "#perform" do
let(:model_a) { Backup::Model.new(:test_trigger_a, "test label a") }
let(:model_b) { Backup::Model.new(:test_trigger_b, "test label b") }
let(:s) { sequence "" }
after { Backup::Model.send(:reset!) }
describe "setting logger options" do
let(:logger_options) { Backup::Logger.instance_variable_get(:@config).dsl }
before do
expect(Backup::Config).to receive(:load).ordered
expect(Backup::Logger).to receive(:start!).ordered
expect(model_a).to receive(:perform!).ordered
expect(Backup::Logger).to receive(:clear!).ordered
expect(model_b).to receive(:perform!).ordered
expect(Backup::Logger).to receive(:clear!).ordered
end
it "configures console and logfile loggers by default" do
ARGV.replace(["perform", "-t", "test_trigger_a,test_trigger_b"])
cli.start
expect(logger_options.console.quiet).to eq(false)
expect(logger_options.logfile.enabled).to eq(true)
expect(logger_options.logfile.log_path).to eq("")
expect(logger_options.syslog.enabled).to eq(false)
end
it "configures only the syslog" do
ARGV.replace(
["perform", "-t", "test_trigger_a,test_trigger_b",
"--quiet", "--no-logfile", "--syslog"]
)
cli.start
expect(logger_options.console.quiet).to eq(true)
expect(logger_options.logfile.enabled).to eq(false)
expect(logger_options.logfile.log_path).to eq("")
expect(logger_options.syslog.enabled).to eq(true)
end
it "forces console logging" do
ARGV.replace(
["perform", "-t", "test_trigger_a,test_trigger_b", "--no-quiet"]
)
cli.start
expect(logger_options.console.quiet).to eq(false)
expect(logger_options.logfile.enabled).to eq(true)
expect(logger_options.logfile.log_path).to eq("")
expect(logger_options.syslog.enabled).to eq(false)
end
it "forces the logfile and syslog to be disabled" do
ARGV.replace(
["perform", "-t", "test_trigger_a,test_trigger_b",
"--no-logfile", "--no-syslog"]
)
cli.start
expect(logger_options.console.quiet).to eq(false)
expect(logger_options.logfile.enabled).to eq(false)
expect(logger_options.logfile.log_path).to eq("")
expect(logger_options.syslog.enabled).to eq(false)
end
it "configures the log_path" do
ARGV.replace(
["perform", "-t", "test_trigger_a,test_trigger_b",
"--log-path", "my/log/path"]
)
cli.start
expect(logger_options.console.quiet).to eq(false)
expect(logger_options.logfile.enabled).to eq(true)
expect(logger_options.logfile.log_path).to eq("my/log/path")
expect(logger_options.syslog.enabled).to eq(false)
end
end # describe 'setting logger options'
describe "setting triggers" do
let(:model_c) { Backup::Model.new(:test_trigger_c, "test label c") }
before do
expect(Backup::Logger).to receive(:configure).ordered
expect(Backup::Config).to receive(:load).ordered
expect(Backup::Logger).to receive(:start!).ordered
end
it "performs a given trigger" do
expect(model_a).to receive(:perform!).ordered
expect(Backup::Logger).to receive(:clear!).ordered
expect(model_b).to receive(:perform!).never
ARGV.replace(
["perform", "-t", "test_trigger_a"]
)
cli.start
end
it "performs multiple triggers" do
expect(model_a).to receive(:perform!).ordered
expect(Backup::Logger).to receive(:clear!).ordered
expect(model_b).to receive(:perform!).ordered
expect(Backup::Logger).to receive(:clear!).ordered
ARGV.replace(
["perform", "-t", "test_trigger_a,test_trigger_b"]
)
cli.start
end
it "performs multiple models that share a trigger name" do
expect(model_c).to receive(:perform!).ordered
expect(Backup::Logger).to receive(:clear!).ordered
model_d = Backup::Model.new(:test_trigger_c, "test label d")
expect(model_d).to receive(:perform!).ordered
expect(Backup::Logger).to receive(:clear!).ordered
ARGV.replace(
["perform", "-t", "test_trigger_c"]
)
cli.start
end
it "performs unique models only once, in the order first found" do
expect(model_a).to receive(:perform!).ordered
expect(Backup::Logger).to receive(:clear!).ordered
expect(model_b).to receive(:perform!).ordered
expect(Backup::Logger).to receive(:clear!).ordered
expect(model_c).to receive(:perform!).ordered
expect(Backup::Logger).to receive(:clear!).ordered
ARGV.replace(
["perform", "-t", "test_trigger_a,test_trigger_b,test_trigger_c,test_trigger_b"]
)
cli.start
end
it "performs unique models only once, in the order first found (wildcard)" do
expect(model_a).to receive(:perform!).ordered
expect(Backup::Logger).to receive(:clear!).ordered
expect(model_b).to receive(:perform!).ordered
expect(Backup::Logger).to receive(:clear!).ordered
expect(model_c).to receive(:perform!).ordered
expect(Backup::Logger).to receive(:clear!).ordered
ARGV.replace(
["perform", "-t", "test_trigger_*"]
)
cli.start
end
end # describe 'setting triggers'
describe "failure to prepare for backups" do
before do
expect(Backup::Logger).to receive(:configure).ordered
expect(Backup::Logger).to receive(:start!).never
expect(model_a).to receive(:perform!).never
expect(model_b).to receive(:perform!).never
expect(Backup::Logger).to receive(:clear!).never
end
describe "when errors are raised while loading config.rb" do
before do
expect(Backup::Config).to receive(:load).ordered.and_raise("config load error")
end
it "aborts with status code 3 and logs messages to the console only" do
expectations = [
proc do |err|
expect(err).to be_a(Backup::CLI::Error)
expect(err.message).to match(/config load error/)
end,
proc { |err| expect(err).to be_a(String) }
]
expect(Backup::Logger).to receive(:error).ordered.exactly(2).times do |err|
expectation = expectations.shift
expectation.call(err) if expectation
end
expect(Backup::Logger).to receive(:abort!).ordered
expect do
ARGV.replace(
["perform", "-t", "test_trigger_a"]
)
cli.start
end.to raise_error(SystemExit) { |exit| expect(exit.status).to be(3) }
end
end
describe "when no models are found for the given triggers" do
before do
expect(Backup::Config).to receive(:load).ordered
end
it "aborts and logs messages to the console only" do
expect(Backup::Logger).to receive(:error).ordered do |err|
expect(err).to be_a(Backup::CLI::Error)
expect(err.message).to match(
/No Models found for trigger\(s\) 'test_trigger_foo'/
)
end
expect(Backup::Logger).to receive(:abort!).ordered
expect do
ARGV.replace(
["perform", "-t", "test_trigger_foo"]
)
cli.start
end.to raise_error(SystemExit) { |exit| expect(exit.status).to be(3) }
end
end
end # describe 'failure to prepare for backups'
describe "exit codes and notifications" do
let(:notifier_a) { double }
let(:notifier_b) { double }
let(:notifier_c) { double }
let(:notifier_d) { double }
before do
allow(Backup::Config).to receive(:load)
allow(Backup::Logger).to receive(:start!)
allow(model_a).to receive(:notifiers).and_return([notifier_a, notifier_c])
allow(model_b).to receive(:notifiers).and_return([notifier_b, notifier_d])
end
specify "when jobs are all successful" do
allow(model_a).to receive(:exit_status).and_return(0)
allow(model_b).to receive(:exit_status).and_return(0)
expect(model_a).to receive(:perform!).ordered
expect(notifier_a).to receive(:perform!).ordered
expect(notifier_c).to receive(:perform!).ordered
expect(Backup::Logger).to receive(:clear!).ordered
expect(model_b).to receive(:perform!).ordered
expect(notifier_b).to receive(:perform!).ordered
expect(notifier_d).to receive(:perform!).ordered
expect(Backup::Logger).to receive(:clear!).ordered
ARGV.replace(
["perform", "-t", "test_trigger_a,test_trigger_b"]
)
cli.start
end
specify "when a job has warnings" do
allow(model_a).to receive(:exit_status).and_return(1)
allow(model_b).to receive(:exit_status).and_return(0)
expect(model_a).to receive(:perform!).ordered
expect(notifier_a).to receive(:perform!).ordered
expect(notifier_c).to receive(:perform!).ordered
expect(Backup::Logger).to receive(:clear!).ordered
expect(model_b).to receive(:perform!).ordered
expect(notifier_b).to receive(:perform!).ordered
expect(notifier_d).to receive(:perform!).ordered
expect(Backup::Logger).to receive(:clear!).ordered
expect do
ARGV.replace(
["perform", "-t", "test_trigger_a,test_trigger_b"]
)
cli.start
end.to raise_error(SystemExit) { |err| expect(err.status).to be(1) }
end
specify "when a job has non-fatal errors" do
allow(model_a).to receive(:exit_status).and_return(2)
allow(model_b).to receive(:exit_status).and_return(0)
expect(model_a).to receive(:perform!).ordered
expect(notifier_a).to receive(:perform!).ordered
expect(notifier_c).to receive(:perform!).ordered
expect(Backup::Logger).to receive(:clear!).ordered
expect(model_b).to receive(:perform!).ordered
expect(notifier_b).to receive(:perform!).ordered
expect(notifier_d).to receive(:perform!).ordered
expect(Backup::Logger).to receive(:clear!).ordered
expect do
ARGV.replace(
["perform", "-t", "test_trigger_a,test_trigger_b"]
)
cli.start
end.to raise_error(SystemExit) { |err| expect(err.status).to be(2) }
end
specify "when a job has fatal errors" do
allow(model_a).to receive(:exit_status).and_return(3)
allow(model_b).to receive(:exit_status).and_return(0)
expect(model_a).to receive(:perform!).ordered
expect(notifier_a).to receive(:perform!).ordered
expect(notifier_c).to receive(:perform!).ordered
expect(Backup::Logger).to receive(:clear!).never
expect(model_b).to receive(:perform!).never
expect do
ARGV.replace(
["perform", "-t", "test_trigger_a,test_trigger_b"]
)
cli.start
end.to raise_error(SystemExit) { |err| expect(err.status).to be(3) }
end
specify "when jobs have errors and warnings" do
allow(model_a).to receive(:exit_status).and_return(2)
allow(model_b).to receive(:exit_status).and_return(1)
expect(model_a).to receive(:perform!).ordered
expect(notifier_a).to receive(:perform!).ordered
expect(notifier_c).to receive(:perform!).ordered
expect(Backup::Logger).to receive(:clear!).ordered
expect(model_b).to receive(:perform!).ordered
expect(notifier_b).to receive(:perform!).ordered
expect(notifier_d).to receive(:perform!).ordered
expect(Backup::Logger).to receive(:clear!).ordered
expect do
ARGV.replace(
["perform", "-t", "test_trigger_a,test_trigger_b"]
)
cli.start
end.to raise_error(SystemExit) { |err| expect(err.status).to be(2) }
end
end # describe 'exit codes and notifications'
describe "--check" do
it "runs the check command" do
# RSpec aliases old check method to __check_without_any_instance__,
# and thor does not like it, rendering a warning message about the lack
# of description. Here we define a description before stubbing the method.
cli.desc "check", "RSpec Check Command"
expect_any_instance_of(cli).to receive(:check).and_raise(SystemExit)
expect do
ARGV.replace(
["perform", "-t", "test_trigger_foo", "--check"]
)
cli.start
end.to raise_error(SystemExit)
end
end # describe '--check'
end # describe '#perform'
describe "#check" do
it "fails if errors are raised" do
allow(Backup::Config).to receive(:load).and_raise("an error")
out, err = capture_io do
ARGV.replace(["check"])
expect do
cli.start
end.to raise_error(SystemExit) { |exit| expect(exit.status).to be(1) }
end
expect(err).to match(/RuntimeError: an error/)
expect(err).to match(/\[error\] Configuration Check Failed/)
expect(out).to be_empty
end
it "fails if warnings are issued" do
allow(Backup::Config).to receive(:load) do
Backup::Logger.warn "warning message"
end
out, err = capture_io do
ARGV.replace(["check"])
expect do
cli.start
end.to raise_error(SystemExit) { |exit| expect(exit.status).to be(1) }
end
expect(err).to match(/\[warn\] warning message/)
expect(err).to match(/\[error\] Configuration Check Failed/)
expect(out).to be_empty
end
it "succeeds if there are no errors or warnings" do
allow(Backup::Config).to receive(:load)
out, err = capture_io do
ARGV.replace(["check"])
expect do
cli.start
end.to raise_error(SystemExit) { |exit| expect(exit.status).to be(0) }
end
expect(err).to be_empty
expect(out).to match(/\[info\] Configuration Check Succeeded/)
end
it "uses --config-file if given" do
# Note: Thor#options is returning a HashWithIndifferentAccess.
expect(Backup::Config).to receive(:load) do |options|
options[:config_file] == "/my/config.rb"
end
allow(Backup::Logger).to receive(:abort!) # suppress output
ARGV.replace(["check", "--config-file", "/my/config.rb"])
expect do
cli.start
end.to raise_error(SystemExit) { |exit| expect(exit.status).to be(0) }
end
end # describe '#check'
describe "#generate:model" do
before do
@tmpdir = Dir.mktmpdir("backup_spec")
SandboxFileUtils.activate!(@tmpdir)
end
after do
FileUtils.rm_r(@tmpdir, force: true, secure: true)
end
context "when given a --config-file" do
context "when no config file exists" do
it "should create both a config and a model under the given path" do
Dir.chdir(@tmpdir) do |path|
model_file = File.join(path, "custom", "models", "my_test_trigger.rb")
config_file = File.join(path, "custom", "config.rb")
out, err = capture_io do
ARGV.replace([
"generate:model",
"--config-file", config_file,
"--trigger",
"my test#trigger"
])
cli.start
end
expect(err).to be_empty
expect(out).to eq("Generated configuration file: '#{config_file}'.\n" \
"Generated model file: '#{model_file}'.\n")
expect(File.exist?(model_file)).to eq(true)
expect(File.exist?(config_file)).to eq(true)
end
end
end
context "when a config file already exists" do
it "should only create a model under the given path" do
Dir.chdir(@tmpdir) do |path|
model_file = File.join(path, "custom", "models", "my_test_trigger.rb")
config_file = File.join(path, "custom", "config.rb")
FileUtils.mkdir_p(File.join(path, "custom"))
FileUtils.touch(config_file)
expect(cli::Helpers).to receive(:overwrite?).with(config_file).never
expect(cli::Helpers).to receive(:overwrite?).with(model_file).and_return(true)
out, err = capture_io do
ARGV.replace([
"generate:model",
"--config-file", config_file,
"--trigger",
"my+test@trigger"
])
cli.start
end
expect(err).to be_empty
expect(out).to eq("Generated model file: '#{model_file}'.\n")
expect(File.exist?(model_file)).to eq(true)
end
end
end
context "when a model file already exists" do
it "should prompt to overwrite the model under the given path" do
Dir.chdir(@tmpdir) do |path|
model_file = File.join(path, "models", "test_trigger.rb")
config_file = File.join(path, "config.rb")
FileUtils.mkdir_p(File.dirname(model_file))
FileUtils.touch(model_file)
expect($stdin).to receive(:gets).and_return("n")
out, err = capture_io do
ARGV.replace([
"generate:model",
"--config-file", config_file,
"--trigger",
"test_trigger"
])
cli.start
end
expect(err).to include("Do you want to overwrite?")
expect(out).to eq("Generated configuration file: '#{config_file}'.\n")
expect(File.exist?(config_file)).to eq(true)
end
end
end
end # context 'when given a --config-file'
context "when not given a --config-file" do
it "should create both a config and a model under the root path" do
Dir.chdir(@tmpdir) do |path|
Backup::Config.send(:update, root_path: path)
model_file = File.join(path, "models", "test_trigger.rb")
config_file = File.join(path, "config.rb")
out, err = capture_io do
ARGV.replace(["generate:model", "--trigger", "test_trigger"])
cli.start
end
expect(err).to be_empty
expect(out).to eq("Generated configuration file: '#{config_file}'.\n" \
"Generated model file: '#{model_file}'.\n")
expect(File.exist?(model_file)).to eq(true)
expect(File.exist?(config_file)).to eq(true)
end
end
end
it "should include the correct option values" do
options = <<-EOS.lines.to_a.map(&:strip).map { |l| l.partition(" ") }
databases (mongodb, mysql, openldap, postgresql, redis, riak)
storages (cloud_files, dropbox, ftp, local, qiniu, rsync, s3, scp, sftp)
syncers (cloud_files, rsync_local, rsync_pull, rsync_push, s3)
encryptor (gpg, openssl)
compressor (bzip2, custom, gzip)
notifiers (campfire, command, datadog, flowdock, hipchat, http_post, mail, nagios, pagerduty, prowl, pushover, ses, slack, twitter)
EOS
out, err = capture_io do
ARGV.replace(["help", "generate:model"])
cli.start
end
expect(err).to be_empty
options.each do |option|
expect(out).to match(/#{ option[0] }.*#{ option[2] }/)
end
end
end # describe '#generate:model'
describe "#generate:config" do
before do
@tmpdir = Dir.mktmpdir("backup_spec")
SandboxFileUtils.activate!(@tmpdir)
end
after do
FileUtils.rm_r(@tmpdir, force: true, secure: true)
end
context "when given a --config-file" do
it "should create a config file in the given path" do
Dir.chdir(@tmpdir) do |path|
config_file = File.join(path, "custom", "my_config.rb")
out, err = capture_io do
ARGV.replace(["generate:config",
"--config-file", config_file])
cli.start
end
expect(err).to be_empty
expect(out).to eq("Generated configuration file: '#{config_file}'.\n")
expect(File.exist?(config_file)).to eq(true)
end
end
end
context "when not given a --config-file" do
it "should create a config file in the root path" do
Dir.chdir(@tmpdir) do |path|
Backup::Config.send(:update, root_path: path)
config_file = File.join(path, "config.rb")
out, err = capture_io do
ARGV.replace(["generate:config"])
cli.start
end
expect(err).to be_empty
expect(out).to eq("Generated configuration file: '#{config_file}'.\n")
expect(File.exist?(config_file)).to eq(true)
end
end
end
context "when a config file already exists" do
it "should prompt to overwrite the config file" do
Dir.chdir(@tmpdir) do |path|
Backup::Config.send(:update, root_path: path)
config_file = File.join(path, "config.rb")
FileUtils.mkdir_p(File.dirname(config_file))
FileUtils.touch(config_file)
expect($stdin).to receive(:gets).and_return("n")
out, err = capture_io do
ARGV.replace(["generate:config"])
cli.start
end
expect(err).to include("Do you want to overwrite?")
expect(out).to be_empty
end
end
end
end # describe '#generate:config'
describe "#version" do
specify "using `backup version`" do
ARGV.replace ["version"]
out, err = capture_io do
cli.start
end
expect(err).to be_empty
expect(out).to eq("Backup #{Backup::VERSION}\n")
end
specify "using `backup -v`" do
ARGV.replace ["-v"]
out, err = capture_io do
cli.start
end
expect(err).to be_empty
expect(out).to eq("Backup #{Backup::VERSION}\n")
end
end
describe "Helpers" do
let(:helpers) { Backup::CLI::Helpers }
describe "#overwrite?" do
it "prompts user and accepts confirmation" do
expect(File).to receive(:exist?).with("a/path").and_return(true)
expect($stderr).to receive(:print).with(
"A file already exists at 'a/path'.\nDo you want to overwrite? [y/n] "
)
expect($stdin).to receive(:gets).and_return("yes\n")
expect(helpers.overwrite?("a/path")).to be_truthy
end
it "prompts user and accepts cancelation" do
expect(File).to receive(:exist?).with("a/path").and_return(true)
expect($stderr).to receive(:print).with(
"A file already exists at 'a/path'.\nDo you want to overwrite? [y/n] "
)
expect($stdin).to receive(:gets).and_return("no\n")
expect(helpers.overwrite?("a/path")).to be_falsy
end
it "returns true if path does not exist" do
expect(File).to receive(:exist?).with("a/path").and_return(false)
expect($stderr).to receive(:print).never
expect(helpers.overwrite?("a/path")).to eq(true)
end
end
end # describe 'Helpers'
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/model_spec.rb | spec/model_spec.rb | require "spec_helper"
describe "Backup::Model" do
let(:model) { Backup::Model.new(:test_trigger, "test label") }
let(:s) { sequence "" }
before { Backup::Model.send(:reset!) }
after { Backup::Model.send(:reset!) }
describe ".all" do
it "should be an empty array by default" do
expect(Backup::Model.all).to eq([])
end
end
describe ".find_by_trigger" do
before do
[:one, :two, :three, :one].each_with_index do |sym, i|
Backup::Model.new("trigger_#{sym}", "label#{i + 1}")
end
end
it "should return an array of all models matching the trigger" do
models = Backup::Model.find_by_trigger("trigger_one")
expect(models).to be_a(Array)
expect(models.count).to be(2)
expect(models[0].label).to eq("label1")
expect(models[1].label).to eq("label4")
end
it "should return an array of all models matching a wildcard trigger" do
models = Backup::Model.find_by_trigger("trigger_t*")
expect(models.count).to be(2)
expect(models[0].label).to eq("label2")
expect(models[1].label).to eq("label3")
models = Backup::Model.find_by_trigger("trig*ne")
expect(models.count).to be(2)
expect(models[0].label).to eq("label1")
expect(models[1].label).to eq("label4")
expect(Backup::Model.find_by_trigger("trigg*").count).to be(4)
end
it "should accept a symbol" do
models = Backup::Model.find_by_trigger(:trigger_two)
expect(models.count).to be(1)
expect(models[0].label).to eq("label2")
end
it "should return an empty array if no matches are found" do
expect(Backup::Model.find_by_trigger("foo*")).to eq([])
end
end # describe '.find_by_trigger'
describe ".preconfigure" do
it "returns preconfiguration block if set" do
block = proc {}
expect(Backup::Model.preconfigure).to be_nil
Backup::Model.preconfigure(&block)
expect(Backup::Model.preconfigure).to be(block)
end
it "stores preconfiguration for each subclass" do
klass_a = Class.new(Backup::Model)
klass_b = Class.new(Backup::Model)
block_a = proc {}
block_b = proc {}
klass_a.preconfigure(&block_a)
klass_b.preconfigure(&block_b)
expect(klass_a.preconfigure).to be(block_a)
expect(klass_b.preconfigure).to be(block_b)
end
end
describe "subclassing Model" do
specify "custom model triggers can be found" do
klass = Class.new(Backup::Model)
model_a = klass.new(:model_a, "Model A")
model_b = Backup::Model.new(:model_b, "Mowel B")
model_c = klass.new(:model_c, "Model C")
expect(Backup::Model.all).to eq([model_a, model_b, model_c])
expect(Backup::Model.find_by_trigger(:model_c).first).to be(model_c)
end
end
describe "#initialize" do
it "sets default values" do
expect(model.trigger).to eq("test_trigger")
expect(model.label).to eq("test label")
expect(model.package).to be_an_instance_of Backup::Package
expect(model.time).to be_nil
expect(model.databases).to eq([])
expect(model.archives).to eq([])
expect(model.storages).to eq([])
expect(model.notifiers).to eq([])
expect(model.syncers).to eq([])
expect(model.compressor).to be_nil
expect(model.encryptor).to be_nil
expect(model.splitter).to be_nil
expect(model.exit_status).to be_nil
expect(model.exception).to be_nil
end
it "should convert trigger to a string" do
expect(Backup::Model.new(:foo, :bar).trigger).to eq("foo")
end
it "should convert label to a string" do
expect(Backup::Model.new(:foo, :bar).label).to eq("bar")
end
it "should accept and instance_eval a block" do
before_block = proc {}
block = proc do
before(&before_block)
end
model = Backup::Model.new(:foo, "", &block)
expect(model.before).to be(before_block)
end
it "should instance_eval the preconfiguration block" do
model_config_block = ->(_) { throw(:block_called, :model_config) }
pre_config_block = ->(_) { throw(:block_called, :pre_config) }
caught = catch(:block_called) do
Backup::Model.preconfigure(&pre_config_block)
Backup::Model.new("foo", "", &model_config_block)
end
expect(caught).to eq(:pre_config)
end
it "should add itself to Model.all" do
expect(Backup::Model.all).to eq([model])
end
# see also: spec/support/shared_examples/database.rb
it "triggers each database to generate it's #dump_filename" do
db1 = double
db2 = double
expect(db1).to receive(:dump_filename)
expect(db2).to receive(:dump_filename)
Backup::Model.new(:test_trigger, "test label") do
databases << db1
databases << db2
end
end
end # describe '#initialize'
describe "DSL Methods" do
module Fake
module NoArg
class Base
attr_accessor :block_arg
def initialize(&block)
instance_eval(&block) if block_given?
end
end
end
module OneArg
class Base
attr_accessor :arg1, :block_arg
def initialize(arg1, &block)
@arg1 = arg1
instance_eval(&block) if block_given?
end
end
end
module TwoArgs
class Base
attr_accessor :arg1, :arg2, :block_arg
def initialize(arg1, arg2, &block)
@arg1 = arg1
@arg2 = arg2
instance_eval(&block) if block_given?
end
end
end
module ThreeArgs
class Base
attr_accessor :arg1, :arg2, :arg3, :block_arg
def initialize(arg1, arg2, arg3, &block)
@arg1 = arg1
@arg2 = arg2
@arg3 = arg3
instance_eval(&block) if block_given?
end
end
end
end
# Set +const+ to +replacement+ for the calling block
def using_fake(const, replacement)
orig = Backup.const_get(const)
Backup.send(:remove_const, const)
Backup.const_set(const, replacement)
yield
Backup.send(:remove_const, const)
Backup.const_set(const, orig)
end
describe "#archive" do
it "should add archives" do
using_fake("Archive", Fake::TwoArgs::Base) do
model.archive("foo") { |a| a.block_arg = :foo }
model.archive("bar") { |a| a.block_arg = :bar }
expect(model.archives.count).to eq(2)
a1, a2 = model.archives
expect(a1.arg1).to be(model)
expect(a1.arg2).to eq("foo")
expect(a1.block_arg).to eq(:foo)
expect(a2.arg1).to be(model)
expect(a2.arg2).to eq("bar")
expect(a2.block_arg).to eq(:bar)
end
end
end
describe "#database" do
it "should add databases" do
using_fake("Database", Fake::TwoArgs) do
model.database("Base", "foo") { |a| a.block_arg = :foo }
# second arg is optional
model.database("Base") { |a| a.block_arg = :bar }
expect(model.databases.count).to be(2)
d1, d2 = model.databases
expect(d1.arg1).to be(model)
expect(d1.arg2).to eq("foo")
expect(d1.block_arg).to eq(:foo)
expect(d2.arg1).to be(model)
expect(d2.arg2).to be_nil
expect(d2.block_arg).to eq(:bar)
end
end
it "should accept a nested class name" do
using_fake("Database", Fake) do
model.database("TwoArgs::Base")
expect(model.databases.first).to be_an_instance_of Fake::TwoArgs::Base
end
end
end
describe "#store_with" do
it "should add storages" do
using_fake("Storage", Fake::TwoArgs) do
model.store_with("Base", "foo") { |a| a.block_arg = :foo }
# second arg is optional
model.store_with("Base") { |a| a.block_arg = :bar }
expect(model.storages.count).to be(2)
s1, s2 = model.storages
expect(s1.arg1).to be(model)
expect(s1.arg2).to eq("foo")
expect(s1.block_arg).to eq(:foo)
expect(s2.arg1).to be(model)
expect(s2.arg2).to be_nil
expect(s2.block_arg).to eq(:bar)
end
end
it "should accept a nested class name" do
using_fake("Storage", Fake) do
model.store_with("TwoArgs::Base")
expect(model.storages.first).to be_an_instance_of Fake::TwoArgs::Base
end
end
end
describe "#sync_with" do
it "should add syncers" do
using_fake("Syncer", Fake::OneArg) do
model.sync_with("Base", "foo") { |a| a.block_arg = :foo }
# second arg is optional
model.sync_with("Base") { |a| a.block_arg = :bar }
expect(model.syncers.count).to be(2)
s1, s2 = model.syncers
expect(s1.arg1).to eq("foo")
expect(s1.block_arg).to eq(:foo)
expect(s2.arg1).to be_nil
expect(s2.block_arg).to eq(:bar)
end
end
it "should accept a nested class name" do
using_fake("Syncer", Fake) do
model.sync_with("OneArg::Base")
expect(model.syncers.first).to be_an_instance_of Fake::OneArg::Base
end
end
end
describe "#notify_by" do
it "should add notifiers" do
using_fake("Notifier", Fake::OneArg) do
model.notify_by("Base") { |a| a.block_arg = :foo }
model.notify_by("Base") { |a| a.block_arg = :bar }
expect(model.notifiers.count).to be(2)
n1, n2 = model.notifiers
expect(n1.arg1).to be(model)
expect(n1.block_arg).to eq(:foo)
expect(n2.arg1).to be(model)
expect(n2.block_arg).to eq(:bar)
end
end
it "should accept a nested class name" do
using_fake("Notifier", Fake) do
model.notify_by("OneArg::Base")
expect(model.notifiers.first).to be_an_instance_of Fake::OneArg::Base
end
end
end
describe "#encrypt_with" do
it "should add an encryptor" do
using_fake("Encryptor", Fake::NoArg) do
model.encrypt_with("Base") { |a| a.block_arg = :foo }
expect(model.encryptor).to be_an_instance_of Fake::NoArg::Base
expect(model.encryptor.block_arg).to eq(:foo)
end
end
it "should accept a nested class name" do
using_fake("Encryptor", Fake) do
model.encrypt_with("NoArg::Base")
expect(model.encryptor).to be_an_instance_of Fake::NoArg::Base
end
end
end
describe "#compress_with" do
it "should add a compressor" do
using_fake("Compressor", Fake::NoArg) do
model.compress_with("Base") { |a| a.block_arg = :foo }
expect(model.compressor).to be_an_instance_of Fake::NoArg::Base
expect(model.compressor.block_arg).to eq(:foo)
end
end
it "should accept a nested class name" do
using_fake("Compressor", Fake) do
model.compress_with("NoArg::Base")
expect(model.compressor).to be_an_instance_of Fake::NoArg::Base
end
end
end
describe "#split_into_chunks_of" do
it "should add a splitter" do
using_fake("Splitter", Fake::ThreeArgs::Base) do
model.split_into_chunks_of(123, 2)
expect(model.splitter).to be_an_instance_of Fake::ThreeArgs::Base
expect(model.splitter.arg1).to be(model)
expect(model.splitter.arg2).to eq(123)
expect(model.splitter.arg3).to eq(2)
end
end
it "should raise an error if chunk_size is not an Integer" do
expect do
model.split_into_chunks_of("345", 2)
end.to raise_error Backup::Model::Error, /must be Integers/
end
it "should raise an error if suffix_size is not an Integer" do
expect do
model.split_into_chunks_of(345, "2")
end.to raise_error Backup::Model::Error, /must be Integers/
end
end
end # describe 'DSL Methods'
describe "#perform!" do
let(:procedure_a) { -> {} }
let(:procedure_b) { double }
let(:procedure_c) { double }
let(:syncer_a) { double }
let(:syncer_b) { double }
it "sets started_at, time, package.time and finished_at" do
Timecop.freeze
started_at = Time.now.utc
time = started_at.strftime("%Y.%m.%d.%H.%M.%S")
finished_at = started_at + 5
model.before { Timecop.freeze(finished_at) }
model.perform!
Timecop.return
expect(model.started_at).to eq(started_at)
expect(model.time).to eq(time)
expect(model.package.time).to eq(time)
expect(model.finished_at).to eq(finished_at)
end
it "performs all procedures" do
allow(model).to receive(:procedures).and_return([procedure_a, [procedure_b, procedure_c]])
allow(model).to receive(:syncers).and_return([syncer_a, syncer_b])
expect(model).to receive(:log!).ordered.with(:started)
expect(procedure_a).to receive(:call).ordered
expect(procedure_b).to receive(:perform!).ordered
expect(procedure_c).to receive(:perform!).ordered
expect(syncer_a).to receive(:perform!).ordered
expect(syncer_b).to receive(:perform!).ordered
expect(model).to receive(:log!).ordered.with(:finished)
model.perform!
expect(model.exception).to be_nil
expect(model.exit_status).to be 0
end
describe "exit status" do
it "sets exit_status to 0 when successful" do
model.perform!
expect(model.exception).to be_nil
expect(model.exit_status).to be 0
end
it "sets exit_status to 1 when warnings are logged" do
allow(model).to receive(:procedures).and_return([-> { Backup::Logger.warn "foo" }])
model.perform!
expect(model.exception).to be_nil
expect(model.exit_status).to be 1
end
it "sets exit_status 2 for a StandardError" do
err = StandardError.new "non-fatal error"
allow(model).to receive(:procedures).and_return([-> { raise err }])
model.perform!
expect(model.exception).to eq(err)
expect(model.exit_status).to be 2
end
it "sets exit_status 3 for an Exception" do
err = Exception.new "fatal error"
allow(model).to receive(:procedures).and_return([-> { raise err }])
model.perform!
expect(model.exception).to eq(err)
expect(model.exit_status).to be 3
end
end # context 'when errors occur'
describe "before/after hooks" do
specify "both are called" do
before_called = nil
procedure_called = nil
after_called_with = nil
model.before { before_called = true }
allow(model).to receive(:procedures).and_return([-> { procedure_called = true }])
model.after { |status| after_called_with = status }
model.perform!
expect(before_called).to be_truthy
expect(procedure_called).to be_truthy
expect(after_called_with).to be 0
end
specify "before hook may log warnings" do
procedure_called = nil
after_called_with = nil
model.before { Backup::Logger.warn "foo" }
allow(model).to receive(:procedures).and_return([-> { procedure_called = true }])
model.after { |status| after_called_with = status }
model.perform!
expect(model.exit_status).to be 1
expect(procedure_called).to be_truthy
expect(after_called_with).to be 1
end
specify "before hook may abort model with non-fatal exception" do
procedure_called = false
after_called = false
model.before { raise StandardError }
allow(model).to receive(:procedures).and_return([-> { procedure_called = true }])
model.after { after_called = true }
model.perform!
expect(model.exit_status).to be 2
expect(procedure_called).to eq(false)
expect(after_called).to eq(false)
end
specify "before hook may abort backup with fatal exception" do
procedure_called = false
after_called = false
model.before { raise Exception }
allow(model).to receive(:procedures).and_return([-> { procedure_called = true }])
model.after { after_called = true }
model.perform!
expect(model.exit_status).to be 3
expect(procedure_called).to eq(false)
expect(after_called).to eq(false)
end
specify "after hook is called when procedure raises non-fatal exception" do
after_called_with = nil
allow(model).to receive(:procedures).and_return([-> { raise StandardError }])
model.after { |status| after_called_with = status }
model.perform!
expect(model.exit_status).to be 2
expect(after_called_with).to be 2
end
specify "after hook is called when procedure raises fatal exception" do
after_called_with = nil
allow(model).to receive(:procedures).and_return([-> { raise Exception }])
model.after { |status| after_called_with = status }
model.perform!
expect(model.exit_status).to be 3
expect(after_called_with).to be 3
end
specify "after hook may log warnings" do
after_called_with = nil
model.after do |status|
after_called_with = status
Backup::Logger.warn "foo"
end
model.perform!
expect(model.exit_status).to be 1
expect(after_called_with).to be 0
end
specify "after hook warnings will not decrease exit_status" do
after_called_with = nil
allow(model).to receive(:procedures).and_return([-> { raise StandardError }])
model.after do |status|
after_called_with = status
Backup::Logger.warn "foo"
end
model.perform!
expect(model.exit_status).to be 2
expect(after_called_with).to be 2
expect(Backup::Logger.has_warnings?).to be_truthy
end
specify "after hook may fail model with non-fatal exceptions" do
after_called_with = nil
allow(model).to receive(:procedures).and_return([-> { Backup::Logger.warn "foo" }])
model.after do |status|
after_called_with = status
raise StandardError
end
model.perform!
expect(model.exit_status).to be 2
expect(after_called_with).to be 1
end
specify "after hook exception will not decrease exit_status" do
after_called_with = nil
allow(model).to receive(:procedures).and_return([-> { raise Exception }])
model.after do |status|
after_called_with = status
raise StandardError
end
model.perform!
expect(model.exit_status).to be 3
expect(after_called_with).to be 3
end
specify "after hook may abort backup with fatal exceptions" do
after_called_with = nil
allow(model).to receive(:procedures).and_return([-> { raise StandardError }])
model.after do |status|
after_called_with = status
raise Exception
end
model.perform!
expect(model.exit_status).to be 3
expect(after_called_with).to be 2
end
specify "hooks may be overridden" do
block_a = proc {}
block_b = proc {}
model.before(&block_a)
expect(model.before).to be(block_a)
model.before(&block_b)
expect(model.before).to be(block_b)
end
end # describe 'hooks'
end # describe '#perform!'
describe "#duration" do
it "returns a string representing the elapsed time" do
Timecop.freeze do
allow(model).to receive(:finished_at).and_return(Time.now)
{ 0 => "00:00:00", 1 => "00:00:01", 59 => "00:00:59",
60 => "00:01:00", 61 => "00:01:01", 119 => "00:01:59",
3540 => "00:59:00", 3541 => "00:59:01", 3599 => "00:59:59",
3600 => "01:00:00", 3601 => "01:00:01", 3659 => "01:00:59",
3660 => "01:01:00", 3661 => "01:01:01", 3719 => "01:01:59",
7140 => "01:59:00", 7141 => "01:59:01", 7199 => "01:59:59",
212_400 => "59:00:00", 212_401 => "59:00:01", 212_459 => "59:00:59",
212_460 => "59:01:00", 212_461 => "59:01:01", 212_519 => "59:01:59",
215_940 => "59:59:00", 215_941 => "59:59:01", 215_999 => "59:59:59" }.each do |duration, expected|
allow(model).to receive(:started_at).and_return(Time.now - duration)
expect(model.duration).to eq(expected)
end
end
end
it "returns nil if job has not finished" do
allow(model).to receive(:started_at).and_return(Time.now)
expect(model.duration).to be_nil
end
end # describe '#duration'
describe "#procedures" do
before do
allow(model).to receive(:prepare!).and_return(:prepare)
allow(model).to receive(:package!).and_return(:package)
allow(model).to receive(:store!).and_return([:storage])
allow(model).to receive(:clean!).and_return(:clean)
end
context "when no databases or archives are configured" do
it "returns an empty array" do
expect(model.send(:procedures)).to eq([])
end
end
context "when databases are configured" do
before do
allow(model).to receive(:databases).and_return([:database])
end
it "returns all procedures" do
one, two, three, four, five, six = model.send(:procedures)
expect(one.call).to eq(:prepare)
expect(two).to eq([:database])
expect(three).to eq([])
expect(four.call).to eq(:package)
expect(five.call).to eq([:storage])
expect(six.call).to eq(:clean)
end
end
context "when archives are configured" do
before do
allow(model).to receive(:archives).and_return([:archive])
end
it "returns all procedures" do
one, two, three, four, five, six = model.send(:procedures)
expect(one.call).to eq(:prepare)
expect(two).to eq([])
expect(three).to eq([:archive])
expect(four.call).to eq(:package)
expect(five.call).to eq([:storage])
expect(six.call).to eq(:clean)
end
end
end # describe '#procedures'
describe "#prepare!" do
it "should prepare for the backup" do
expect(Backup::Cleaner).to receive(:prepare).with(model)
model.send(:prepare!)
end
end
describe "#package!" do
it "should package the backup" do
expect(Backup::Packager).to receive(:package!).ordered.with(model)
expect(Backup::Cleaner).to receive(:remove_packaging).ordered.with(model)
model.send(:package!)
end
end
describe "#store!" do
context "when no storages are configured" do
before do
allow(model).to receive(:storages).and_return([])
end
it "should return true" do
expect(model.send(:store!)).to eq true
end
end
context "when multiple storages are configured" do
let(:storage_one) { double }
let(:storage_two) { double }
before do
allow(model).to receive(:storages).and_return([storage_one, storage_two])
end
it "should call storages in sequence and return true if all succeed" do
expect(storage_one).to receive(:perform!).ordered.and_return(true)
expect(storage_two).to receive(:perform!).ordered.and_return(true)
expect(model.send(:store!)).to eq true
end
it "should call storages in sequence and re-raise the first exception that occours" do
expect(storage_one).to receive(:perform!).ordered.and_raise "Storage error"
expect(storage_two).to receive(:perform!).ordered.and_return(true)
expect { model.send(:store!) }.to raise_error StandardError, "Storage error"
end
context "and multiple storages fail" do
let(:storage_three) { double }
before do
allow(model).to receive(:storages).and_return([storage_one, storage_two, storage_three])
end
it "should log the exceptions that are not re-raised" do
expect(storage_one).to receive(:perform!).and_raise "Storage error"
expect(storage_two).to receive(:perform!).and_raise "Different error"
expect(storage_three).to receive(:perform!).and_raise "Another error"
expected_messages = [/\ADifferent error\z/, /.*/, /\AAnother error\z/, /.*/] # every other invocation contains a stack trace
expect(Backup::Logger).to receive(:error).ordered.exactly(4).times do |err|
err.to_s =~ expected_messages.shift
end
expect { model.send(:store!) }.to raise_error StandardError, "Storage error"
end
end
end
end
describe "#clean!" do
it "should remove the final packaged files" do
expect(Backup::Cleaner).to receive(:remove_package).with(model.package)
model.send(:clean!)
end
end
describe "#get_class_from_scope" do
module Fake
module TestScope
class TestKlass; end
end
end
module TestScope
module TestKlass; end
end
context "when name is given as a string" do
it "should return the constant for the given scope and name" do
result = model.send(:get_class_from_scope, Fake, "TestScope")
expect(result).to eq(Fake::TestScope)
end
it "should accept a nested class name" do
result = model.send(:get_class_from_scope, Fake, "TestScope::TestKlass")
expect(result).to eq(Fake::TestScope::TestKlass)
end
end
context "when name is given as a module" do
it "should return the constant for the given scope and name" do
result = model.send(:get_class_from_scope, Fake, TestScope)
expect(result).to eq(Fake::TestScope)
end
it "should accept a nested class name" do
result = model.send(:get_class_from_scope, Fake, TestScope::TestKlass)
expect(result).to eq(Fake::TestScope::TestKlass)
end
end
context "when name is given as a module defined under Backup::Config::DSL" do
# this is necessary since the specs in spec/config/dsl_spec.rb
# remove all the constants from Backup::Config::DSL as part of those tests.
before(:context) do
class Backup::Config::DSL
module TestScope
module TestKlass; end
end
end
end
it "should return the constant for the given scope and name" do
result = model.send(
:get_class_from_scope,
Fake,
Backup::Config::DSL::TestScope
)
expect(result).to eq(Fake::TestScope)
end
it "should accept a nested class name" do
result = model.send(
:get_class_from_scope,
Fake,
Backup::Config::DSL::TestScope::TestKlass
)
expect(result).to eq(Fake::TestScope::TestKlass)
end
end
end # describe '#get_class_from_scope'
describe "#set_exit_status" do
context "when the model completed successfully without warnings" do
it "sets exit status to 0" do
model.send(:set_exit_status)
expect(model.exit_status).to be(0)
end
end
context "when the model completed successfully with warnings" do
before { allow(Backup::Logger).to receive(:has_warnings?).and_return(true) }
it "sets exit status to 1" do
model.send(:set_exit_status)
expect(model.exit_status).to be(1)
end
end
context "when the model failed with a non-fatal exception" do
before { allow(model).to receive(:exception).and_return(StandardError.new("non-fatal")) }
it "sets exit status to 2" do
model.send(:set_exit_status)
expect(model.exit_status).to be(2)
end
end
context "when the model failed with a fatal exception" do
before { allow(model).to receive(:exception).and_return(Exception.new("fatal")) }
it "sets exit status to 3" do
model.send(:set_exit_status)
expect(model.exit_status).to be(3)
end
end
end # describe '#set_exit_status'
describe "#log!" do
context "when action is :started" do
it "logs that the backup has started" do
expect(Backup::Logger).to receive(:info).with(
"Performing Backup for 'test label (test_trigger)'!\n" \
"[ backup #{Backup::VERSION} : #{RUBY_DESCRIPTION} ]"
)
model.send(:log!, :started)
end
end
context "when action is :finished" do
before { allow(model).to receive(:duration).and_return("01:02:03") }
context "when #exit_status is 0" do
before { allow(model).to receive(:exit_status).and_return(0) }
it "logs that the backup completed successfully" do
expect(Backup::Logger).to receive(:info).with(
"Backup for 'test label (test_trigger)' " \
"Completed Successfully in 01:02:03"
)
model.send(:log!, :finished)
end
end
context "when #exit_status is 1" do
before { allow(model).to receive(:exit_status).and_return(1) }
it "logs that the backup completed successfully with warnings" do
expect(Backup::Logger).to receive(:warn).with(
"Backup for 'test label (test_trigger)' " \
"Completed Successfully (with Warnings) in 01:02:03"
)
model.send(:log!, :finished)
end
end
context "when #exit_status is 2" do
let(:error_a) { double }
before do
allow(model).to receive(:exit_status).and_return(2)
allow(model).to receive(:exception).and_return(StandardError.new("non-fatal error"))
allow(error_a).to receive(:backtrace).and_return(["many", "backtrace", "lines"])
end
it "logs that the backup failed with a non-fatal exception" do
expect(Backup::Model::Error).to receive(:wrap).ordered do |err, msg|
expect(err.message).to eq("non-fatal error")
expect(msg).to match(/Backup for test label \(test_trigger\) Failed!/)
end.and_return(error_a)
expect(Backup::Logger).to receive(:error).ordered.with(error_a)
expect(Backup::Logger).to receive(:error).ordered.with(
"\nBacktrace:\n\s\smany\n\s\sbacktrace\n\s\slines\n\n"
)
expect(Backup::Cleaner).to receive(:warnings).ordered.with(model)
model.send(:log!, :finished)
end
end
context "when #exit_status is 3" do
let(:error_a) { double }
before do
allow(model).to receive(:exit_status).and_return(3)
allow(model).to receive(:exception).and_return(Exception.new("fatal error"))
allow(error_a).to receive(:backtrace).and_return(["many", "backtrace", "lines"])
end
it "logs that the backup failed with a fatal exception" do
expect(Backup::Model::FatalError).to receive(:wrap).ordered do |err, msg|
expect(err.message).to eq("fatal error")
expect(msg).to match(/Backup for test label \(test_trigger\) Failed!/)
end.and_return(error_a)
expect(Backup::Logger).to receive(:error).ordered.with(error_a)
expect(Backup::Logger).to receive(:error).ordered.with(
"\nBacktrace:\n\s\smany\n\s\sbacktrace\n\s\slines\n\n"
)
expect(Backup::Cleaner).to receive(:warnings).ordered.with(model)
model.send(:log!, :finished)
end
end
end
end # describe '#log!'
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/logger_spec.rb | spec/logger_spec.rb | require "spec_helper"
module Backup
describe Logger do
let(:console_logger) { double("Console Logger") }
let(:logfile_logger) { double("Logfile Logger") }
let(:syslog_logger) { double("Syslog Logger") }
let(:default_loggers) { [console_logger, logfile_logger] }
# Note: spec_helper calls Logger.reset! before each example
before do
allow(Logger::Console).to receive(:new)
.with(kind_of(Logger::Console::Options))
.and_return(console_logger)
allow(Logger::Logfile).to receive(:new)
.with(kind_of(Logger::Logfile::Options))
.and_return(logfile_logger)
allow(Logger::Syslog).to receive(:new)
.with(kind_of(Logger::Syslog::Options))
.and_return(syslog_logger)
end
describe Logger::Message do
describe "#initialize" do
it "returns a new message object" do
Timecop.freeze do
msg = Logger::Message.new(Time.now, :log_level, ["message", "lines"])
expect(msg.time).to eq(Time.now)
expect(msg.level).to eq(:log_level)
expect(msg.lines).to eq(["message", "lines"])
end
end
end
describe "#formatted_lines" do
it "returns the message lines formatted" do
Timecop.freeze do
timestamp = Time.now.strftime("%Y/%m/%d %H:%M:%S")
msg = Logger::Message.new(Time.now, :log_level, ["message", "lines"])
expect(msg.formatted_lines).to eq([
"[#{timestamp}][log_level] message",
"[#{timestamp}][log_level] lines"
])
end
end
it "preserves blank lines in messages" do
Timecop.freeze do
timestamp = Time.now.strftime("%Y/%m/%d %H:%M:%S")
msg = Logger::Message.new(Time.now, :log_level, ["message", "", "lines"])
expect(msg.formatted_lines).to eq([
"[#{timestamp}][log_level] message",
"[#{timestamp}][log_level] ",
"[#{timestamp}][log_level] lines"
])
end
end
end
describe "#matches?" do
let(:message) do
Logger::Message.new(
:foo, :foo, ["line one of message", "line two of message"]
)
end
it "returns true if message lines match the given matchers" do
expect(message.matches?(["not", "one of"])).to be(true)
expect(message.matches?(["not", "message\nline two"])).to be(true)
expect(message.matches?(["not", /^line one/])).to be(true)
expect(message.matches?(["not", /two \w+ message$/])).to be(true)
end
it "returns false if no match is found" do
expect(message.matches?(["not", "three"])).to be(false)
expect(message.matches?(["not", /three/])).to be(false)
end
end
end # describe Logger::Message
describe ".configure" do
context "when the console and logfile loggers are enabled" do
before do
expect(Logger::Syslog).to receive(:new).never
Logger.info "line 1\nline 2"
Logger.configure do
console.quiet = false
logfile.enabled = true
syslog.enabled = false
end
end
it "sends messages to only the enabled loggers" do
expect(console_logger).to receive(:log) do |msg|
expect(msg.lines).to eq(["line 1", "line 2"])
end
expect(logfile_logger).to receive(:log) do |msg|
expect(msg.lines).to eq(["line 1", "line 2"])
end
expect(syslog_logger).to receive(:log).never
Logger.start!
end
end
context "when the logfile and syslog loggers are enabled" do
before do
expect(Logger::Console).to receive(:new).never
Logger.info "line 1\nline 2"
Logger.configure do
console.quiet = true
logfile.enabled = true
syslog.enabled = true
end
end
it "sends messages to only the enabled loggers" do
expect(console_logger).to receive(:log).never
expect(logfile_logger).to receive(:log) do |msg|
expect(msg.lines).to eq(["line 1", "line 2"])
end
expect(syslog_logger).to receive(:log) do |msg|
expect(msg.lines).to eq(["line 1", "line 2"])
end
Logger.start!
end
end
context "when the console and syslog loggers are enabled" do
before do
expect(Logger::Logfile).to receive(:new).never
Logger.info "line 1\nline 2"
Logger.configure do
console.quiet = false
logfile.enabled = false
syslog.enabled = true
end
end
it "sends messages to only the enabled loggers" do
expect(console_logger).to receive(:log) do |msg|
expect(msg.lines).to eq(["line 1", "line 2"])
end
expect(logfile_logger).to receive(:log).never
expect(syslog_logger).to receive(:log) do |msg|
expect(msg.lines).to eq(["line 1", "line 2"])
end
Logger.start!
end
end
# Note that this will only work for :warn messages
# sent *after* the Logger has been configured.
context "when warnings are ignored" do
before do
Logger.configure do
ignore_warning "one\nline two"
ignore_warning(/line\nline two/)
end
end
it "converts ignored :warn messages to :info messages" do
Logger.warn "message line one\nline two"
Logger.warn "first line\nline two of message"
Logger.warn "first line\nsecond line"
Logger.error "one of"
m1, m2, m3, m4 = Logger.messages
expect(m1.level).to be(:info)
expect(m2.level).to be(:info)
expect(m3.level).to be(:warn)
expect(m4.level).to be(:error)
expect(Logger.has_warnings?).to be(true)
expect(Logger.has_errors?).to be(true)
end
it "does not flag logger as having warnings" do
Logger.warn "message line one\nline two"
Logger.warn "first line\nline two of message"
expect(Logger.has_warnings?).to be(false)
end
end
end # describe '.configure'
describe ".start!" do
context "before the Logger is started" do
it "only stores the messages to be sent" do
default_loggers.each { |logger| expect(logger).to receive(:log).never }
Logger.info "a message"
expect(Logger.messages.first.lines).to eq(["a message"])
end
it "does not instantiate any loggers" do
expect(Logger::Console).to receive(:new).never
expect(Logger::Logfile).to receive(:new).never
expect(Logger::Syslog).to receive(:new).never
Logger.info "a message"
expect(Logger.send(:logger).instance_variable_get(:@loggers)).to be_empty
end
end
context "when Logger is started" do
before do
Logger.info "info message"
Logger.warn "warn message"
Logger.error "error message"
end
it "sends all messages sent before being started" do
Logger.messages.each do |msg|
default_loggers.each do |logger|
expect(logger).to receive(:log).ordered.with(msg)
end
end
Logger.start!
end
end
context "after the Logger is started" do
it "stores and sends messages" do
default_loggers.each do |logger|
expect(logger).to receive(:log) do |msg|
expect(msg.lines).to eq(["a message"])
end
end
Logger.start!
Logger.info "a message"
expect(Logger.messages.first.lines).to eq(["a message"])
end
it "instantiates all enabled loggers" do
Logger.start!
expect(Logger.send(:logger).instance_variable_get(:@loggers))
.to eq(default_loggers)
end
end
end # describe '.start!'
describe "log messaging methods" do
before do
expect(Logger::MUTEX).to receive(:synchronize).and_yield
end
describe ".info" do
it "sends messages with log level :info" do
Logger.info "info message"
msg = Logger.messages.last
expect(msg.level).to eq(:info)
expect(msg.lines).to eq(["info message"])
default_loggers.each { |logger| expect(logger).to receive(:log).with(msg) }
Logger.start!
end
end
describe ".warn" do
it "sends messages with log level :warn" do
Logger.warn "warn message"
msg = Logger.messages.last
expect(msg.level).to eq(:warn)
expect(msg.lines).to eq(["warn message"])
default_loggers.each { |logger| expect(logger).to receive(:log).with(msg) }
Logger.start!
end
end
describe ".error" do
it "sends messages with log level :error" do
Logger.error "error message"
msg = Logger.messages.last
expect(msg.level).to eq(:error)
expect(msg.lines).to eq(["error message"])
default_loggers.each { |logger| expect(logger).to receive(:log).with(msg) }
Logger.start!
end
end
it "accepts objects responding to #to_s" do
Logger.info StandardError.new("message")
msg = Logger.messages.last
expect(msg.level).to eq(:info)
expect(msg.lines).to eq(["message"])
end
it "preserves blank lines in messages" do
Logger.info "line one\n\nline two"
msg = Logger.messages.last
expect(msg.level).to eq(:info)
expect(msg.lines).to eq(["line one", "", "line two"])
end
it "logs messages with UTC time" do
Logger.info "message"
msg = Logger.messages.last
expect(msg.time).to be_utc
end
end # describe 'log messaging methods'
describe ".has_warnings?" do
context "when messages with :warn log level are sent" do
it "returns true" do
Logger.warn "warn message"
expect(Logger.has_warnings?).to eq(true)
end
end
context "when no messages with :warn log level are sent" do
it "returns false" do
Logger.info "info message"
Logger.error "error message"
expect(Logger.has_warnings?).to eq(false)
end
end
end
describe ".has_errors?" do
context "when messages with :error log level are sent" do
it "returns true" do
Logger.error "error message"
expect(Logger.has_errors?).to eq(true)
end
end
context "when no messages with :warn log level are sent" do
it "returns false" do
Logger.info "info message"
Logger.warn "warn message"
expect(Logger.has_errors?).to eq(false)
end
end
end
describe ".clear!" do
before do
Logger.info "info message"
Logger.warn "warn message"
Logger.error "error message"
expect(Logger.messages.count).to be(3)
expect(Logger.has_warnings?).to eq(true)
expect(Logger.has_errors?).to eq(true)
@initial_logger = Logger.instance_variable_get(:@logger)
Logger.clear!
@current_logger = Logger.instance_variable_get(:@logger)
end
it "clears all stored messages" do
expect(Logger.messages).to be_empty
end
it "resets has_warnings? to false" do
expect(Logger.has_warnings?).to eq(false)
end
it "resets has_errors? to false" do
expect(Logger.has_errors?).to eq(false)
end
it "replaces the logger" do
expect(@current_logger).to be_a(Backup::Logger)
expect(@current_logger).to_not be(@initial_logger)
end
it "starts the new logger" do
expect(@current_logger.instance_variable_get(:@loggers)).to eq(default_loggers)
end
end
describe ".abort!" do
before do
allow(Logger::Console).to receive(:new)
.with(no_args)
.and_return(console_logger)
expect(Logger::Logfile).to receive(:new).never
expect(Logger::Syslog).to receive(:new).never
Logger.info "info message"
Logger.warn "warn message"
Logger.error "error message"
end
it "dumps all messages via a new console logger" do
expect(logfile_logger).to receive(:log).never
expect(console_logger).to receive(:log).exactly(3).times
Logger.abort!
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/splitter_spec.rb | spec/splitter_spec.rb | require "spec_helper"
module Backup
describe Splitter do
let(:model) { Model.new(:test_trigger, "test label") }
let(:package) { model.package }
let(:splitter) { Splitter.new(model, 250, 2) }
let(:splitter_long_suffix) { Splitter.new(model, 250, 3) }
let(:s) { sequence "" }
before do
allow_any_instance_of(Splitter).to receive(:utility).with(:split).and_return("split")
end
# Note: BSD split will not accept a 'M' suffix for the byte size
# e.g. split -a 2 -b 250M
describe "#initialize" do
it "sets instance variables" do
expect(splitter.package).to be package
expect(splitter.chunk_size).to be 250
expect(splitter.suffix_length).to be 2
expect(splitter_long_suffix.package).to be package
expect(splitter_long_suffix.chunk_size).to be 250
expect(splitter_long_suffix.suffix_length).to be 3
end
end
describe "#split_with" do
let(:given_block) { double }
let(:block) { ->(arg) { given_block.got(arg) } }
shared_examples "split suffix handling" do
context "when final package was larger than chunk_size" do
it "updates chunk_suffixes for the package" do
suffixes = ["a" * splitter.suffix_length] * 2
suffixes.last.next!
allow(splitter).to receive(:chunks).and_return(
suffixes.map { |s| "/tmp/test_trigger.tar-#{s}" }
)
expect(given_block).to receive(:got).ordered.with(
"split -a #{splitter.suffix_length} -b 250m - " \
"'#{File.join(Config.tmp_path, "test_trigger.tar-")}'"
)
expect(FileUtils).to receive(:mv).never
splitter.split_with(&block)
expect(package.chunk_suffixes).to eq suffixes
end
end
context "when final package was not larger than chunk_size" do
it "removes the suffix from the single file output by split" do
suffix = "a" * splitter.suffix_length
allow(splitter).to receive(:chunks).and_return(["/tmp/test_trigger.tar-#{suffix}"])
expect(given_block).to receive(:got).ordered.with(
"split -a #{splitter.suffix_length} -b 250m - " \
"'#{File.join(Config.tmp_path, "test_trigger.tar-")}'"
)
expect(FileUtils).to receive(:mv).ordered.with(
File.join(Config.tmp_path, "test_trigger.tar-#{suffix}"),
File.join(Config.tmp_path, "test_trigger.tar")
)
splitter.split_with(&block)
expect(package.chunk_suffixes).to eq []
end
end
end
context "with suffix_length of 2" do
let(:splitter) { Splitter.new(model, 250, 2) }
include_examples "split suffix handling"
end
context "with suffix_length of 3" do
let(:splitter) { Splitter.new(model, 250, 3) }
include_examples "split suffix handling"
end
end # describe '#split_with'
describe "#chunks" do
before do
@tmpdir = Dir.mktmpdir("backup_spec")
SandboxFileUtils.activate!(@tmpdir)
Config.send(:update, root_path: @tmpdir)
FileUtils.mkdir_p(Config.tmp_path)
end
after do
FileUtils.rm_r(@tmpdir, force: true, secure: true)
end
it "should return a sorted array of chunked file paths" do
files = [
"test_trigger.tar-aa",
"test_trigger.tar-ab",
"other_trigger.tar-aa"
].map { |name| File.join(Config.tmp_path, name) }
FileUtils.touch(files)
expect(splitter.send(:chunks)).to eq files[0..1]
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/utilities_spec.rb | spec/utilities_spec.rb | require "spec_helper"
describe Backup::Utilities do
let(:utilities) { Backup::Utilities }
let(:helpers) { Module.new.extend(Backup::Utilities::Helpers) }
# Note: spec_helper resets Utilities before each example
describe ".configure" do
before do
allow(File).to receive(:executable?).and_return(true)
allow(utilities).to receive(:gnu_tar?).and_call_original
allow(utilities).to receive(:utility).and_call_original
utilities.configure do
# General Utilites
tar "/path/to/tar"
tar_dist :gnu # or :bsd
cat "/path/to/cat"
split "/path/to/split"
sudo "/path/to/sudo"
chown "/path/to/chown"
hostname "/path/to/hostname"
# Compressors
gzip "/path/to/gzip"
bzip2 "/path/to/bzip2"
# Database Utilities
mongo "/path/to/mongo"
mongodump "/path/to/mongodump"
mysqldump "/path/to/mysqldump"
pg_dump "/path/to/pg_dump"
pg_dumpall "/path/to/pg_dumpall"
redis_cli "/path/to/redis-cli"
riak_admin "/path/to/riak-admin"
innobackupex "/path/to/innobackupex"
# Encryptors
gpg "/path/to/gpg"
openssl "/path/to/openssl"
# Syncer and Storage
rsync "/path/to/rsync"
ssh "/path/to/ssh"
# Notifiers
sendmail "/path/to/sendmail"
exim "/path/to/exim"
send_nsca "/path/to/send_nsca"
zabbix_sender "/path/to/zabbix_sender"
end
end
it "allows utilities to be configured" do
utilities::UTILITIES_NAMES.each do |name|
expect(helpers.send(:utility, name)).to eq("/path/to/#{name}")
end
end
it "presets gnu_tar? value to true" do
expect(utilities).to_not receive(:run)
expect(utilities.gnu_tar?).to be(true)
expect(helpers.send(:gnu_tar?)).to be(true)
end
it "presets gnu_tar? value to false" do
utilities.configure do
tar_dist :bsd
end
expect(utilities).to_not receive(:run)
expect(utilities.gnu_tar?).to be(false)
expect(helpers.send(:gnu_tar?)).to be(false)
end
it "expands relative paths" do
utilities.configure do
tar "my_tar"
end
path = File.expand_path("my_tar")
expect(utilities.utilities["tar"]).to eq(path)
expect(helpers.send(:utility, :tar)).to eq(path)
end
it "raises Error if utility is not found or executable" do
allow(File).to receive(:executable?).and_return(false)
expect do
utilities.configure do
tar "not_found"
end
end.to raise_error(Backup::Utilities::Error)
end
end # describe '.configure'
describe ".gnu_tar?" do
before do
allow(utilities).to receive(:gnu_tar?).and_call_original
end
it "determines when tar is GNU tar" do
expect(utilities).to receive(:utility).with(:tar).and_return("tar")
expect(utilities).to receive(:run).with("tar --version").and_return(
'tar (GNU tar) 1.26\nCopyright (C) 2011 Free Software Foundation, Inc.'
)
expect(utilities.gnu_tar?).to be(true)
expect(utilities.instance_variable_get(:@gnu_tar)).to be(true)
end
it "determines when tar is BSD tar" do
expect(utilities).to receive(:utility).with(:tar).and_return("tar")
expect(utilities).to receive(:run).with("tar --version").and_return(
"bsdtar 3.0.4 - libarchive 3.0.4"
)
expect(utilities.gnu_tar?).to be(false)
expect(utilities.instance_variable_get(:@gnu_tar)).to be(false)
end
it "returns cached true value" do
utilities.instance_variable_set(:@gnu_tar, true)
expect(utilities).to_not receive(:run)
expect(utilities.gnu_tar?).to be(true)
end
it "returns cached false value" do
utilities.instance_variable_set(:@gnu_tar, false)
expect(utilities).to_not receive(:run)
expect(utilities.gnu_tar?).to be(false)
end
end
end # describe Backup::Utilities
describe Backup::Utilities::Helpers do
let(:helpers) { Module.new.extend(Backup::Utilities::Helpers) }
let(:utilities) { Backup::Utilities }
describe "#utility" do
before do
allow(utilities).to receive(:utility).and_call_original
end
context "when a system path for the utility is available" do
it "should return the system path with newline removed" do
expect(utilities).to receive(:`).with("which 'foo' 2>/dev/null").and_return("system_path\n")
expect(helpers.send(:utility, :foo)).to eq("system_path")
end
it "should cache the returned path" do
expect(utilities).to receive(:`).once.with("which 'cache_me' 2>/dev/null")
.and_return("cached_path\n")
expect(helpers.send(:utility, :cache_me)).to eq("cached_path")
expect(helpers.send(:utility, :cache_me)).to eq("cached_path")
end
it "should return a mutable copy of the path" do
expect(utilities).to receive(:`).once.with("which 'cache_me' 2>/dev/null")
.and_return("cached_path\n")
helpers.send(:utility, :cache_me) << "foo"
expect(helpers.send(:utility, :cache_me)).to eq("cached_path")
end
it "should cache the value for all extended objects" do
expect(utilities).to receive(:`).once.with("which 'once_only' 2>/dev/null")
.and_return("cached_path\n")
expect(helpers.send(:utility, :once_only)).to eq("cached_path")
result = Class.new.extend(Backup::Utilities::Helpers).send(
:utility, :once_only
)
expect(result).to eq("cached_path")
end
end
it "should raise an error if the utiilty is not found" do
expect(utilities).to receive(:`).with("which 'unknown' 2>/dev/null").and_return("\n")
expect do
helpers.send(:utility, :unknown)
end.to raise_error(Backup::Utilities::Error, /Could not locate 'unknown'/)
end
it "should raise an error if name is nil" do
expect(utilities).to_not receive(:`)
expect do
helpers.send(:utility, nil)
end.to raise_error(Backup::Utilities::Error, "Utilities::Error: Utility Name Empty")
end
it "should raise an error if name is empty" do
expect(utilities).to_not receive(:`)
expect do
helpers.send(:utility, " ")
end.to raise_error(Backup::Utilities::Error, "Utilities::Error: Utility Name Empty")
end
end # describe '#utility'
describe "#command_name" do
it "returns the base command name" do
cmd = "/path/to/a/command"
expect(helpers.send(:command_name, cmd)).to eq "command"
cmd = "/path/to/a/command with_args"
expect(helpers.send(:command_name, cmd)).to eq "command"
cmd = "/path/to/a/command with multiple args"
expect(helpers.send(:command_name, cmd)).to eq "command"
# should not happen, but should handle it
cmd = "command args"
expect(helpers.send(:command_name, cmd)).to eq "command"
cmd = "command"
expect(helpers.send(:command_name, cmd)).to eq "command"
end
it "returns command name run with sudo" do
cmd = "/path/to/sudo -n /path/to/command args"
expect(helpers.send(:command_name, cmd))
.to eq "sudo -n command"
cmd = "/path/to/sudo -n -u username /path/to/command args"
expect(helpers.send(:command_name, cmd))
.to eq "sudo -n -u username command"
# should not happen, but should handle it
cmd = "/path/to/sudo -n -u username command args"
expect(helpers.send(:command_name, cmd))
.to eq "sudo -n -u username command args"
end
it "strips environment variables" do
cmd = "FOO='bar' BAR=foo /path/to/a/command with_args"
expect(helpers.send(:command_name, cmd)).to eq "command"
end
end # describe '#command_name'
describe "#run" do
let(:stdout_io) { double(IO, read: stdout_messages) }
let(:stderr_io) { double(IO, read: stderr_messages) }
let(:stdin_io) { double(IO, close: nil) }
let(:process_status) { double(Process::Status, success?: process_success) }
let(:command) { "/path/to/cmd_name arg1 arg2" }
before do
allow(utilities).to receive(:run).and_call_original
end
context "when the command is successful" do
let(:process_success) { true }
before do
expect(Backup::Logger).to receive(:info).with(
"Running system utility 'cmd_name'..."
)
expect(Open4).to receive(:popen4).with(command).and_yield(
nil, stdin_io, stdout_io, stderr_io
).and_return(process_status)
end
context "and generates no messages" do
let(:stdout_messages) { "" }
let(:stderr_messages) { "" }
it "should return stdout and generate no additional log messages" do
expect(helpers.send(:run, command)).to eq("")
end
end
context "and generates only stdout messages" do
let(:stdout_messages) { "out line1\nout line2\n" }
let(:stderr_messages) { "" }
it "should return stdout and log the stdout messages" do
expect(Backup::Logger).to receive(:info).with(
"cmd_name:STDOUT: out line1\ncmd_name:STDOUT: out line2"
)
expect(helpers.send(:run, command)).to eq(stdout_messages.strip)
end
end
context "and generates only stderr messages" do
let(:stdout_messages) { "" }
let(:stderr_messages) { "err line1\nerr line2\n" }
it "should return stdout and log the stderr messages" do
expect(Backup::Logger).to receive(:warn).with(
"cmd_name:STDERR: err line1\ncmd_name:STDERR: err line2"
)
expect(helpers.send(:run, command)).to eq("")
end
end
context "and generates messages on both stdout and stderr" do
let(:stdout_messages) { "out line1\nout line2\n" }
let(:stderr_messages) { "err line1\nerr line2\n" }
it "should return stdout and log both stdout and stderr messages" do
expect(Backup::Logger).to receive(:info).with(
"cmd_name:STDOUT: out line1\ncmd_name:STDOUT: out line2"
)
expect(Backup::Logger).to receive(:warn).with(
"cmd_name:STDERR: err line1\ncmd_name:STDERR: err line2"
)
expect(helpers.send(:run, command)).to eq(stdout_messages.strip)
end
end
end # context 'when the command is successful'
context "when the command is not successful" do
let(:process_success) { false }
let(:message_head) do
"Utilities::Error: 'cmd_name' failed with exit status: 1\n"
end
before do
expect(Backup::Logger).to receive(:info).with(
"Running system utility 'cmd_name'..."
)
expect(Open4).to receive(:popen4).with(command).and_yield(
nil, stdin_io, stdout_io, stderr_io
).and_return(process_status)
allow(process_status).to receive(:exitstatus).and_return(1)
end
context "and generates no messages" do
let(:stdout_messages) { "" }
let(:stderr_messages) { "" }
it "should raise an error reporting no messages" do
expect do
helpers.send(:run, command)
end.to raise_error StandardError, "#{message_head}"\
" STDOUT Messages: None\n" \
" STDERR Messages: None"
end
end
context "and generates only stdout messages" do
let(:stdout_messages) { "out line1\nout line2\n" }
let(:stderr_messages) { "" }
it "should raise an error and report the stdout messages" do
expect do
helpers.send(:run, command)
end.to raise_error StandardError, "#{message_head}" \
" STDOUT Messages: \n" \
" out line1\n" \
" out line2\n" \
" STDERR Messages: None"
end
end
context "and generates only stderr messages" do
let(:stdout_messages) { "" }
let(:stderr_messages) { "err line1\nerr line2\n" }
it "should raise an error and report the stderr messages" do
expect do
helpers.send(:run, command)
end.to raise_error StandardError, "#{message_head}" \
" STDOUT Messages: None\n" \
" STDERR Messages: \n" \
" err line1\n" \
" err line2"
end
end
context "and generates messages on both stdout and stderr" do
let(:stdout_messages) { "out line1\nout line2\n" }
let(:stderr_messages) { "err line1\nerr line2\n" }
it "should raise an error and report the stdout and stderr messages" do
expect do
helpers.send(:run, command)
end.to raise_error StandardError, "#{message_head}" \
" STDOUT Messages: \n" \
" out line1\n" \
" out line2\n" \
" STDERR Messages: \n" \
" err line1\n" \
" err line2"
end
end
end # context 'when the command is not successful'
context "when the system fails to execute the command" do
before do
expect(Backup::Logger).to receive(:info).with(
"Running system utility 'cmd_name'..."
)
expect(Open4).to receive(:popen4).and_raise("exec call failed")
end
it "should raise an error wrapping the system error raised" do
expect do
helpers.send(:run, command)
end.to raise_error(Backup::Utilities::Error) { |err|
expect(err.message).to match("Failed to execute 'cmd_name'")
expect(err.message).to match("RuntimeError: exec call failed")
}
end
end # context 'when the system fails to execute the command'
end # describe '#run'
describe "gnu_tar?" do
it "returns true if tar_dist is gnu" do
expect(Backup::Utilities).to receive(:gnu_tar?).and_return(true)
expect(helpers.send(:gnu_tar?)).to be(true)
end
it "returns false if tar_dist is bsd" do
expect(Backup::Utilities).to receive(:gnu_tar?).and_return(false)
expect(helpers.send(:gnu_tar?)).to be(false)
end
end
end # describe Backup::Utilities::Helpers
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/packager_spec.rb | spec/packager_spec.rb | require "spec_helper"
describe "Backup::Packager" do
let(:packager) { Backup::Packager }
it "should include Utilities::Helpers" do
expect(packager.instance_eval("class << self; self; end")
.include?(Backup::Utilities::Helpers)).to eq(true)
end
describe "#package!" do
let(:model) { double }
let(:package) { double }
let(:encryptor) { double }
let(:splitter) { double }
let(:pipeline) { double }
let(:procedure) { double }
let(:s) { sequence "" }
context "when pipeline command is successful" do
it "should setup variables and perform packaging procedures" do
expect(model).to receive(:package).ordered.and_return(package)
expect(model).to receive(:encryptor).ordered.and_return(encryptor)
expect(model).to receive(:splitter).ordered.and_return(splitter)
expect(Backup::Pipeline).to receive(:new).ordered.and_return(pipeline)
expect(Backup::Logger).to receive(:info).ordered.with(
"Packaging the backup files..."
)
expect(packager).to receive(:procedure).ordered.and_return(procedure)
expect(procedure).to receive(:call).ordered
expect(pipeline).to receive(:success?).ordered.and_return(true)
expect(Backup::Logger).to receive(:info).ordered.with(
"Packaging Complete!"
)
packager.package!(model)
expect(packager.instance_variable_get(:@package)).to be(package)
expect(packager.instance_variable_get(:@encryptor)).to be(encryptor)
expect(packager.instance_variable_get(:@splitter)).to be(splitter)
expect(packager.instance_variable_get(:@pipeline)).to be(pipeline)
end
end # context 'when pipeline command is successful'
context "when pipeline command is not successful" do
it "should raise an error" do
expect(model).to receive(:package).ordered.and_return(package)
expect(model).to receive(:encryptor).ordered.and_return(encryptor)
expect(model).to receive(:splitter).ordered.and_return(splitter)
expect(Backup::Pipeline).to receive(:new).ordered.and_return(pipeline)
expect(Backup::Logger).to receive(:info).ordered.with(
"Packaging the backup files..."
)
expect(packager).to receive(:procedure).ordered.and_return(procedure)
expect(procedure).to receive(:call).ordered
expect(pipeline).to receive(:success?).ordered.and_return(false)
expect(pipeline).to receive(:error_messages).ordered.and_return("pipeline_errors")
expect do
packager.package!(model)
end.to raise_error(
Backup::Packager::Error,
"Packager::Error: Failed to Create Backup Package\n" \
" pipeline_errors"
)
expect(packager.instance_variable_get(:@package)).to be(package)
expect(packager.instance_variable_get(:@encryptor)).to be(encryptor)
expect(packager.instance_variable_get(:@splitter)).to be(splitter)
expect(packager.instance_variable_get(:@pipeline)).to be(pipeline)
end
end # context 'when pipeline command is successful'
end # describe '#package!'
describe "#procedure" do
module Fake
def self.stack_trace
@stack ||= []
end
class Encryptor
def encrypt_with
Fake.stack_trace << :encryptor_before
yield "encryption_command", ".enc"
Fake.stack_trace << :encryptor_after
end
end
class Splitter
def split_with
Fake.stack_trace << :splitter_before
yield "splitter_command"
Fake.stack_trace << :splitter_after
end
end
class Package
attr_accessor :trigger, :extension
def basename
"base_filename." + extension
end
end
end
let(:package) { Fake::Package.new }
let(:encryptor) { Fake::Encryptor.new }
let(:splitter) { Fake::Splitter.new }
let(:pipeline) { double }
let(:s) { sequence "" }
before do
Fake.stack_trace.clear
expect(packager).to receive(:utility).with(:tar).and_return("tar")
packager.instance_variable_set(:@package, package)
packager.instance_variable_set(:@pipeline, pipeline)
package.trigger = "model_trigger"
package.extension = "tar"
end
context "when no encryptor or splitter are defined" do
it "should package the backup without encryption into a single file" do
expect(packager).to receive(:utility).with(:cat).and_return("cat")
packager.instance_variable_set(:@encryptor, nil)
packager.instance_variable_set(:@splitter, nil)
expect(pipeline).to receive(:add).ordered.with(
"tar -cf - -C '#{Backup::Config.tmp_path}' 'model_trigger'", [0, 1]
)
expect(pipeline).to receive(:<<).ordered.with(
"cat > #{File.join(Backup::Config.tmp_path, "base_filename.tar")}"
)
expect(pipeline).to receive(:run).ordered
packager.send(:procedure).call
end
end
context "when only an encryptor is configured" do
it "should package the backup with encryption" do
expect(packager).to receive(:utility).with(:cat).and_return("cat")
packager.instance_variable_set(:@encryptor, encryptor)
packager.instance_variable_set(:@splitter, nil)
expect(pipeline).to receive(:add).ordered.with(
"tar -cf - -C '#{Backup::Config.tmp_path}' 'model_trigger'", [0, 1]
)
expect(pipeline).to receive(:<<).ordered.with("encryption_command")
expect(pipeline).to receive(:<<).ordered.with(
"cat > #{File.join(Backup::Config.tmp_path, "base_filename.tar.enc")}"
)
expect(pipeline).to receive(:run).ordered do
Fake.stack_trace << :command_executed
true
end
packager.send(:procedure).call
expect(Fake.stack_trace).to eq([
:encryptor_before, :command_executed, :encryptor_after
])
end
end
context "when only a splitter is configured" do
it "should package the backup without encryption through the splitter" do
expect(packager).to receive(:utility).with(:cat).never
packager.instance_variable_set(:@encryptor, nil)
packager.instance_variable_set(:@splitter, splitter)
expect(pipeline).to receive(:add).ordered.with(
"tar -cf - -C '#{Backup::Config.tmp_path}' 'model_trigger'", [0, 1]
)
expect(pipeline).to receive(:<<).ordered.with("splitter_command")
expect(pipeline).to receive(:run).ordered do
Fake.stack_trace << :command_executed
true
end
packager.send(:procedure).call
expect(Fake.stack_trace).to eq([
:splitter_before, :command_executed, :splitter_after
])
end
end
context "when both an encryptor and a splitter are configured" do
it "should package the backup with encryption through the splitter" do
expect(packager).to receive(:utility).with(:cat).never
packager.instance_variable_set(:@encryptor, encryptor)
packager.instance_variable_set(:@splitter, splitter)
expect(pipeline).to receive(:add).ordered.with(
"tar -cf - -C '#{Backup::Config.tmp_path}' 'model_trigger'", [0, 1]
)
expect(pipeline).to receive(:<<).ordered.with("encryption_command")
expect(pipeline).to receive(:<<).ordered.with("splitter_command")
expect(pipeline).to receive(:run).ordered do
Fake.stack_trace << :command_executed
true
end
packager.send(:procedure).call
expect(Fake.stack_trace).to eq([
:encryptor_before, :splitter_before,
:command_executed,
:splitter_after, :encryptor_after
])
expect(package.extension).to eq("tar.enc")
end
end
end # describe '#procedure'
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/errors_spec.rb | spec/errors_spec.rb | require "spec_helper"
module Backup
describe "Backup Errors" do
shared_examples "a nested exception" do
let(:class_name) { described_class.name.sub(/^Backup::/, "") }
context "with stubbed constants" do
before do
ErrorA = Class.new(described_class)
ErrorB = Class.new(described_class)
ErrorC = Class.new(described_class)
end
after do
Backup.send(:remove_const, :ErrorA)
Backup.send(:remove_const, :ErrorB)
Backup.send(:remove_const, :ErrorC)
end
it "allows errors to cascade through the system" do
expect do
begin
begin
begin
raise StandardError, "error message"
rescue => err
raise ErrorA.wrap(err), <<-EOS
an error occurred in Zone A
the following error should give a reason
EOS
end
rescue Exception => err
raise ErrorB.wrap(err)
end
rescue Exception => err
raise ErrorC.wrap(err), "an error occurred in Zone C"
end
end.to raise_error described_class, "ErrorC: an error occurred in Zone C\n" \
"--- Wrapped Exception ---\n" \
"ErrorB\n" \
"--- Wrapped Exception ---\n" \
"ErrorA: an error occurred in Zone A\n" \
" \n" \
" the following error should give a reason\n" \
"--- Wrapped Exception ---\n" \
"StandardError: error message"
end
end
context "with no wrapped exception" do
describe "#initialize" do
it "sets message to class name when not given" do
err = described_class.new
expect(err.message).to eq class_name
end
it "prefixes given message with class name" do
err = described_class.new("a message")
expect(err.message).to eq class_name + ": a message"
end
it "formats message" do
err = described_class.new(<<-EOS)
error message
this is a multi-line message
the above blank line will remain
the blank line below will not
EOS
expect(err.message).to eq(
"#{class_name}: error message\n" \
" this is a multi-line message\n" \
" \n" \
" the above blank line will remain\n" \
" the blank line below will not"
)
end
# This usage wouldn't be expected if using this Error class,
# since you would typically use .wrap, but this is the default
# behavior for Ruby if you want to raise an exception that takes
# it's message from another exception.
#
# begin
# ...code...
# rescue => other_error
# raise MyError, other_error
# end
#
# Under 1.8.7/1.9.2, the message is the result of other_err.inspect,
# but under 1.9.3 you get other_err.message.
# This Error class uses other_error.message under all versions.
# Note that this will format the message.
it "accepts message from another error" do
other_err = StandardError.new " error\nmessage "
err = described_class.new(other_err)
expect(err.message).to eq class_name + ": error\n message"
end
end # describe '#initialize'
# i.e. use of raise with Error class
describe ".exception" do
it "sets message to class name when not given" do
expect do
raise described_class
end.to raise_error described_class, class_name
end
it "prefixes given message with class name" do
expect do
raise described_class, "a message"
end.to raise_error described_class, "#{class_name}: a message"
end
it "formats message" do
expect do
raise described_class, <<-EOS
error message
this is a multi-line message
the above blank line will remain
the blank line below will not
EOS
end.to raise_error described_class, "#{class_name}: error message\n" \
" this is a multi-line message\n" \
" \n" \
" the above blank line will remain\n" \
" the blank line below will not"
end
# see note under '#initialize'
it "accepts message from another error" do
expect do
begin
raise StandardError, " wrapped error\nmessage "
rescue => err
raise described_class, err
end
end.to raise_error described_class, "#{class_name}: wrapped error\n message"
end
it "allows backtrace to be set (with message)" do
expect do
raise described_class, "error message", ["bt"]
end.to raise_error { |err|
expect(err.message).to eq class_name + ": error message"
expect(err.backtrace).to eq ["bt"]
}
end
it "allows backtrace to be set (without message)" do
expect do
raise described_class, nil, ["bt"]
end.to raise_error { |err|
expect(err.message).to eq class_name
expect(err.backtrace).to eq ["bt"]
}
end
end # describe '.exception'
# i.e. use of raise with an instance of Error
describe "#exception" do
it "sets message to class name when not given" do
expect do
err = described_class.new
raise err
end.to raise_error { |err|
expect(err.message).to eq class_name
}
end
it "prefixes given message with class name" do
expect do
err = described_class.new "a message"
raise err
end.to raise_error { |err|
expect(err.message).to eq class_name + ": a message"
}
end
it "formats message" do
expect do
err = described_class.new(<<-EOS)
error message
this is a multi-line message
the above blank line will remain
the blank line below will not
EOS
raise err
end.to raise_error { |err|
expect(err.message).to eq(
"#{class_name}: error message\n" \
" this is a multi-line message\n" \
" \n" \
" the above blank line will remain\n" \
" the blank line below will not"
)
}
end
it "allows message to be overridden" do
expect do
err = described_class.new "error message"
raise err, "new message"
end.to raise_error { |err|
expect(err.message).to eq class_name + ": new message"
}
end
# see note under '#initialize'
it "accepts message from another error" do
expect do
begin
raise StandardError, " wrapped error\nmessage "
rescue => err
err2 = described_class.new "message to be replaced"
raise err2, err
end
end.to raise_error { |err|
expect(err.message).to eq(
"#{class_name}: wrapped error\n" \
" message"
)
}
end
it "allows backtrace to be set (with new message)" do
initial_error = nil
expect do
err = described_class.new "error message"
initial_error = err
raise err, "new message", ["bt"]
end.to raise_error { |err|
expect(err.message).to eq class_name + ": new message"
expect(err.backtrace).to eq ["bt"]
# when a message is given, a new error is returned
expect(err).not_to be initial_error
}
end
it "allows backtrace to be set (without new message)" do
initial_error = nil
expect do
err = described_class.new "error message"
initial_error = err
raise err, nil, ["bt"]
end.to raise_error { |err|
expect(err.backtrace).to eq ["bt"]
expect(err.message).to eq class_name + ": error message"
# when no message is given, returns self
expect(err).to be initial_error
}
end
it "retains backtrace (with message given)" do
initial_error = nil
expect do
begin
raise described_class, "foo", ["bt"]
rescue Exception => err
initial_error = err
raise err, "bar"
end
end.to raise_error { |err|
expect(err.backtrace).to eq ["bt"]
expect(err.message).to eq class_name + ": bar"
# when a message is given, a new error is returned
expect(err).not_to be initial_error
}
end
it "retains backtrace (without message given)" do
initial_error = nil
expect do
begin
raise described_class, "foo", ["bt"]
rescue Exception => err
initial_error = err
raise err
end
end.to raise_error { |err|
expect(err.backtrace).to eq ["bt"]
# when no message is given, returns self
expect(err).to be initial_error
}
end
end # describe '#exception'
end # context 'with no wrapped exception'
context "with a wrapped exception" do
describe ".wrap" do
it "wraps #initialize to reverse parameters" do
ex = double
expect(described_class).to receive(:new).with(nil, ex)
expect(described_class).to receive(:new).with("error message", ex)
described_class.wrap(ex)
described_class.wrap(ex, "error message")
end
it "appends wrapped error message" do
orig_err = StandardError.new "wrapped error message"
err = described_class.wrap(orig_err, "error message")
expect(err.message).to eq(
"#{class_name}: error message\n" \
"--- Wrapped Exception ---\n" \
"StandardError: wrapped error message"
)
end
it "leaves wrapped error message formatting as-is" do
orig_err = StandardError.new " wrapped error\nmessage "
err = described_class.wrap(orig_err, <<-EOS)
error message
this error is wrapping another error
EOS
expect(err.message).to eq(
"#{class_name}: error message\n" \
" \n" \
" this error is wrapping another error\n" \
"--- Wrapped Exception ---\n" \
"StandardError: wrapped error\n" \
"message "
)
end
end # describe '.wrap'
# i.e. use of raise with an instance of Error
describe "#exception" do
it "appends wrapped error message" do
expect do
begin
raise StandardError, " wrapped error\nmessage "
rescue => err
raise described_class.wrap(err), <<-EOS
error message
this error is wrapping another error
EOS
end
end.to raise_error described_class, "#{class_name}: error message\n" \
" \n" \
" this error is wrapping another error\n" \
"--- Wrapped Exception ---\n" \
"StandardError: wrapped error\n" \
"message "
end
# see note under '#initialize'
it "accepts message from another error" do
expect do
begin
raise StandardError, " wrapped error\nmessage "
rescue => err
raise described_class.wrap(err), err
end
end.to raise_error described_class, "#{class_name}: wrapped error\n" \
" message\n" \
"--- Wrapped Exception ---\n" \
"StandardError: wrapped error\n" \
"message "
end
it "uses backtrace from wrapped exception" do
expect do
begin
raise StandardError, "wrapped error message", ["bt"]
rescue => err
raise described_class.wrap(err), "error message"
end
end.to raise_error { |err|
expect(err.message).to eq(
"#{class_name}: error message\n" \
"--- Wrapped Exception ---\n" \
"StandardError: wrapped error message"
)
expect(err.backtrace).to eq ["bt"]
}
end
it "allows wrapped error backtrace to be overridden" do
expect do
begin
raise StandardError, "wrapped error message", ["bt"]
rescue => err
raise described_class.wrap(err), "error message", ["new bt"]
end
end.to raise_error { |err|
expect(err.message).to eq(
"#{class_name}: error message\n" \
"--- Wrapped Exception ---\n" \
"StandardError: wrapped error message"
)
expect(err.backtrace).to eq ["new bt"]
}
end
# Since a new message is given, a new error will be created
# which would take the bt from the wrapped exception (nil).
# So, the existing bt is set on the new error in this case.
# With no message given (a simple re-raise), #exception would simply
# return self, in which case the bt set by raise would remain.
# It would be rare for a wrapped exception not to have a bt.
it "retains backtrace if wrapped error has none" do
expect do
begin
err = StandardError.new "foo"
raise described_class.wrap(err), nil, ["bt"]
rescue Exception => err2
raise err2, "bar"
end
end.to raise_error { |err|
expect(err.backtrace).to eq ["bt"]
}
end
end # describe '#exception'
end # context 'with a wrapped exception'
end # shared_examples 'a nested exception'
describe Error do
it_behaves_like "a nested exception"
end
describe FatalError do
it_behaves_like "a nested exception"
end
end # describe 'Backup Errors'
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/config_spec.rb | spec/config_spec.rb | require "spec_helper"
module Backup
describe Config do
let(:config) { Config }
let(:major_gem_version) { Gem::Version.new(Backup::VERSION).segments.first }
# Note: spec_helper resets Config before each example
describe "#load" do
it "loads config.rb and models" do
allow(File).to receive(:exist?).and_return(true)
allow(File).to receive(:read).and_return("# Backup v#{major_gem_version}.x Configuration\n@loaded << :config")
allow(File).to receive(:directory?).and_return(true)
allow(Dir).to receive(:[]).and_return(["model_a", "model_b"])
expect(File).to receive(:read).with("model_a").and_return("@loaded << :model_a")
expect(File).to receive(:read).with("model_b").and_return("@loaded << :model_b")
dsl = config::DSL.new
dsl.instance_variable_set(:@loaded, [])
allow(config::DSL).to receive(:new).and_return(dsl)
config.load
expect(dsl.instance_variable_get(:@loaded)).to eq(
[:config, :model_a, :model_b]
)
end
it "raises an error if config_file does not exist" do
config_file = File.expand_path("foo")
expect do
config.load(config_file: config_file)
end.to raise_error config::Error, /Could not find configuration file: '#{ config_file }'/
end
it "raises an error if config file version is invalid" do
allow(File).to receive(:exist?).and_return(true)
allow(File).to receive(:read).and_return("# Backup v3.x Configuration")
allow(File).to receive(:directory?).and_return(true)
allow(Dir).to receive(:[]).and_return([])
expect do
config.load(config_file: "/foo")
end.to raise_error config::Error, /Invalid Configuration File/
end
describe "setting config paths from command line options" do
let(:default_root_path) do
File.join(File.expand_path(ENV["HOME"] || ""), "Backup")
end
before do
allow(File).to receive(:exist?).and_return(true)
allow(File).to receive(:read).and_return("# Backup v#{major_gem_version}.x Configuration")
allow(File).to receive(:directory?).and_return(true)
allow(Dir).to receive(:[]).and_return([])
end
context "when no options are given" do
it "uses defaults" do
config.load
config::DEFAULTS.each do |attr, ending|
expect(config.send(attr)).to eq File.join(default_root_path, ending)
end
end
end
context "when no root_path is given" do
it "updates the given paths" do
options = { data_path: "/my/data" }
config.load(options)
expect(config.root_path).to eq default_root_path
expect(config.tmp_path).to eq(
File.join(default_root_path, config::DEFAULTS[:tmp_path])
)
expect(config.data_path).to eq "/my/data"
end
it "expands relative paths using PWD" do
options = {
tmp_path: "my_tmp",
data_path: "/my/data"
}
config.load(options)
expect(config.root_path).to eq default_root_path
expect(config.tmp_path).to eq File.expand_path("my_tmp")
expect(config.data_path).to eq "/my/data"
end
it "overrides config.rb settings only for the paths given" do
expect_any_instance_of(config::DSL).to receive(:_config_options).and_return(
root_path: "/orig/root",
tmp_path: "/orig/root/my_tmp",
data_path: "/orig/root/my_data"
)
options = { tmp_path: "new_tmp" }
config.load(options)
expect(config.root_path).to eq "/orig/root"
# the root_path set in config.rb will not apply
# to relative paths given on the command line.
expect(config.tmp_path).to eq File.expand_path("new_tmp")
expect(config.data_path).to eq "/orig/root/my_data"
end
end
context "when a root_path is given" do
it "updates all paths" do
options = {
root_path: "/my/root",
tmp_path: "my_tmp",
data_path: "/my/data"
}
config.load(options)
expect(config.root_path).to eq "/my/root"
expect(config.tmp_path).to eq "/my/root/my_tmp"
expect(config.data_path).to eq "/my/data"
end
it "uses root_path to update defaults" do
config.load(root_path: "/my/root")
config::DEFAULTS.each do |attr, ending|
expect(config.send(attr)).to eq File.join("/my/root", ending)
end
end
it "overrides all config.rb settings" do
expect_any_instance_of(config::DSL).to receive(:_config_options).and_return(
root_path: "/orig/root",
tmp_path: "/orig/root/my_tmp",
data_path: "/orig/root/my_data"
)
options = { root_path: "/new/root", tmp_path: "new_tmp" }
config.load(options)
expect(config.root_path).to eq "/new/root"
expect(config.tmp_path).to eq "/new/root/new_tmp"
# paths not given on the command line will be updated to their
# default location (relative to the new root)
expect(config.data_path).to eq(
File.join("/new/root", config::DEFAULTS[:data_path])
)
end
end
end
end # describe '#load'
describe "#hostname" do
before do
config.instance_variable_set(:@hostname, nil)
allow(Utilities).to receive(:utility).with(:hostname).and_return("/path/to/hostname")
end
it "caches the hostname" do
expect(Utilities).to receive(:run).once.with("/path/to/hostname").and_return("my_hostname")
expect(config.hostname).to eq("my_hostname")
expect(config.hostname).to eq("my_hostname")
end
end
describe "#set_root_path" do
context "when the given path == @root_path" do
it "should return @root_path without requiring the path to exist" do
expect(File).to receive(:directory?).never
expect(config.send(:set_root_path, config.root_path)).to eq(config.root_path)
end
end
context "when the given path exists" do
it "should set and return the @root_path" do
expect(config.send(:set_root_path, Dir.pwd)).to eq(Dir.pwd)
expect(config.root_path).to eq(Dir.pwd)
end
it "should expand relative paths" do
expect(config.send(:set_root_path, "")).to eq(Dir.pwd)
expect(config.root_path).to eq(Dir.pwd)
end
end
context "when the given path does not exist" do
it "should raise an error" do
path = File.expand_path("foo")
expect do
config.send(:set_root_path, "foo")
end.to raise_error(proc do |err|
expect(err).to be_an_instance_of config::Error
expect(err.message).to match(/Root Path Not Found/)
expect(err.message).to match(/Path was: #{ path }/)
end)
end
end
end # describe '#set_root_path'
describe "#set_path_variable" do
after do
if config.instance_variable_defined?(:@var)
config.send(:remove_instance_variable, :@var)
end
end
context "when a path is given" do
context "when the given path is an absolute path" do
it "should always use the given path" do
path = File.expand_path("foo")
config.send(:set_path_variable, "var", path, "none", "/root/path")
expect(config.instance_variable_get(:@var)).to eq(path)
config.send(:set_path_variable, "var", path, "none", nil)
expect(config.instance_variable_get(:@var)).to eq(path)
end
end
context "when the given path is a relative path" do
context "when a root_path is given" do
it "should append the path to the root_path" do
config.send(:set_path_variable, "var", "foo", "none", "/root/path")
expect(config.instance_variable_get(:@var)).to eq("/root/path/foo")
end
end
context "when a root_path is not given" do
it "should expand the path" do
path = File.expand_path("foo")
config.send(:set_path_variable, "var", "foo", "none", false)
expect(config.instance_variable_get(:@var)).to eq(path)
end
end
end
end # context 'when a path is given'
context "when no path is given" do
context "when a root_path is given" do
it "should use the root_path with the given ending" do
config.send(:set_path_variable, "var", nil, "ending", "/root/path")
expect(config.instance_variable_get(:@var)).to eq("/root/path/ending")
end
end
context "when a root_path is not given" do
it "should do nothing" do
config.send(:set_path_variable, "var", nil, "ending", false)
expect(config.instance_variable_defined?(:@var)).to eq(false)
end
end
end # context 'when no path is given'
end # describe '#set_path_variable'
describe "#reset!" do
before do
@env_user = ENV["USER"]
@env_home = ENV["HOME"]
end
after do
ENV["USER"] = @env_user
ENV["HOME"] = @env_home
end
it "should be called to set variables when module is loaded" do
# just to avoid 'already initialized constant' warnings
config.send(:remove_const, "DEFAULTS")
expected = config.instance_variables.sort.map(&:to_sym) - [:@hostname, :@mocha]
config.instance_variables.each do |var|
config.send(:remove_instance_variable, var)
end
expect(config.instance_variables).to be_empty
load File.expand_path("../../lib/backup/config.rb", __FILE__)
expect(config.instance_variables.sort.map(&:to_sym)).to eq(expected)
end
context "when setting @user" do
context 'when ENV["USER"] is set' do
before { ENV["USER"] = "test" }
it 'should set value for @user to ENV["USER"]' do
config.send(:reset!)
expect(config.user).to eq("test")
end
end
context 'when ENV["USER"] is not set' do
before { ENV.delete("USER") }
it "should set value using the user login name" do
config.send(:reset!)
expect(config.user).to eq(Etc.getpwuid.name)
end
end
end # context 'when setting @user'
context "when setting @root_path" do
context 'when ENV["HOME"] is set' do
before { ENV["HOME"] = "test/home/dir" }
it 'should set value using ENV["HOME"]' do
config.send(:reset!)
expect(config.root_path).to eq(
File.join(File.expand_path("test/home/dir"), "Backup")
)
end
end
context 'when ENV["HOME"] is not set' do
before { ENV.delete("HOME") }
it "should set value using $PWD" do
config.send(:reset!)
expect(config.root_path).to eq(File.expand_path("Backup"))
end
end
end # context 'when setting @root_path'
context "when setting other path variables" do
before { ENV["HOME"] = "test/home/dir" }
it "should use #update" do
expect(config).to receive(:update).with(
root_path: File.join(File.expand_path("test/home/dir"), "Backup")
)
config.send(:reset!)
end
end
end # describe '#reset!'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/spec_helper.rb | spec/spec_helper.rb | require "rubygems" if RUBY_VERSION < "1.9"
require "bundler/setup"
require "backup"
require "timecop"
Dir[File.expand_path("../support/**/*.rb", __FILE__)].each { |f| require f }
module Backup
module ExampleHelpers
# ripped from MiniTest :)
# RSpec doesn't have a method for this? Am I missing something?
def capture_io
require "stringio"
orig_stdout = $stdout
orig_stderr = $stderr
captured_stdout = StringIO.new
captured_stderr = StringIO.new
$stdout = captured_stdout
$stderr = captured_stderr
yield
return captured_stdout.string, captured_stderr.string
ensure
$stdout = orig_stdout
$stderr = orig_stderr
end
end
end
RSpec.configure do |config|
##
# Example Helpers
config.include Backup::ExampleHelpers
config.filter_run focus: true
config.run_all_when_everything_filtered = true
config.before(:suite) do
# Initializes SandboxFileUtils so the first call to deactivate!(:noop)
# will set ::FileUtils to FileUtils::NoWrite
SandboxFileUtils.activate!
end
config.before(:example) do
# ::FileUtils will always be either SandboxFileUtils or FileUtils::NoWrite.
SandboxFileUtils.deactivate!(:noop)
# prevent system calls
allow(Backup::Utilities).to receive(:gnu_tar?).and_return(true)
allow(Backup::Utilities).to receive(:utility)
allow(Backup::Utilities).to receive(:run)
allow_any_instance_of(Backup::Pipeline).to receive(:run)
Backup::Utilities.send(:reset!)
Backup::Config.send(:reset!)
# Logger only queues messages received until Logger.start! is called.
Backup::Logger.send(:reset!)
end
end
puts "\nRuby version: #{RUBY_DESCRIPTION}\n\n"
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/storage/s3_spec.rb | spec/storage/s3_spec.rb | require "spec_helper"
module Backup
describe Storage::S3 do
let(:model) { Model.new(:test_trigger, "test label") }
let(:required_config) do
proc do |s3|
s3.access_key_id = "my_access_key_id"
s3.secret_access_key = "my_secret_access_key"
s3.bucket = "my_bucket"
end
end
let(:required_iam_config) do
proc do |s3|
s3.use_iam_profile = true
s3.bucket = "my_bucket"
end
end
let(:storage) { Storage::S3.new(model, &required_config) }
let(:s) { sequence "" }
it_behaves_like "a class that includes Config::Helpers" do
let(:default_overrides) do
{ "chunk_size" => 15,
"encryption" => :aes256,
"storage_class" => :reduced_redundancy }
end
let(:new_overrides) do
{ "chunk_size" => 20,
"encryption" => "aes256",
"storage_class" => "standard" }
end
end
it_behaves_like "a subclass of Storage::Base"
it_behaves_like "a storage that cycles"
describe "#initialize" do
it "provides default values" do
# required
expect(storage.bucket).to eq "my_bucket"
# required unless using IAM profile
expect(storage.access_key_id).to eq "my_access_key_id"
expect(storage.secret_access_key).to eq "my_secret_access_key"
# defaults
expect(storage.use_iam_profile).to be_nil
expect(storage.storage_id).to be_nil
expect(storage.keep).to be_nil
expect(storage.region).to be_nil
expect(storage.path).to eq "backups"
expect(storage.chunk_size).to be 5
expect(storage.max_retries).to be 10
expect(storage.retry_waitsec).to be 30
expect(storage.encryption).to be_nil
expect(storage.storage_class).to be :standard
expect(storage.fog_options).to be_nil
end
it "configures the storage" do
storage = Storage::S3.new(model, :my_id) do |s3|
s3.keep = 2
s3.access_key_id = "my_access_key_id"
s3.secret_access_key = "my_secret_access_key"
s3.bucket = "my_bucket"
s3.region = "my_region"
s3.path = "my/path"
s3.chunk_size = 10
s3.max_retries = 5
s3.retry_waitsec = 60
s3.encryption = "aes256"
s3.storage_class = :reduced_redundancy
s3.fog_options = { my_key: "my_value" }
end
expect(storage.storage_id).to eq "my_id"
expect(storage.keep).to be 2
expect(storage.access_key_id).to eq "my_access_key_id"
expect(storage.secret_access_key).to eq "my_secret_access_key"
expect(storage.use_iam_profile).to be_nil
expect(storage.bucket).to eq "my_bucket"
expect(storage.region).to eq "my_region"
expect(storage.path).to eq "my/path"
expect(storage.chunk_size).to be 10
expect(storage.max_retries).to be 5
expect(storage.retry_waitsec).to be 60
expect(storage.encryption).to eq "aes256"
expect(storage.storage_class).to eq :reduced_redundancy
expect(storage.fog_options).to eq my_key: "my_value"
end
it "configures the storage with values passed as frozen strings" do
storage = Storage::S3.new(model, :my_id) do |s3|
s3.access_key_id = "my_access_key_id".freeze
s3.secret_access_key = "my_secret_access_key".freeze
s3.bucket = "my_bucket".freeze
s3.region = "my_region".freeze
s3.path = "my/path".freeze
s3.encryption = "aes256".freeze
s3.fog_options = { my_key: "my_value".freeze }
end
expect(storage.storage_id).to eq "my_id"
expect(storage.access_key_id).to eq "my_access_key_id"
expect(storage.secret_access_key).to eq "my_secret_access_key"
expect(storage.bucket).to eq "my_bucket"
expect(storage.region).to eq "my_region"
expect(storage.path).to eq "my/path"
expect(storage.encryption).to eq "aes256"
expect(storage.fog_options).to eq my_key: "my_value"
end
it "requires bucket" do
pre_config = required_config
expect do
Storage::S3.new(model) do |s3|
pre_config.call(s3)
s3.bucket = nil
end
end.to raise_error StandardError, /are all required/
end
context "when using AWS IAM profile" do
it "does not require access_key_id or secret_access_key" do
pre_config = required_iam_config
expect do
Storage::S3.new(model) do |s3|
pre_config.call(s3)
end
end.not_to raise_error
end
end
context "when using AWS access keys" do
it "requires access_key_id" do
pre_config = required_config
expect do
Storage::S3.new(model) do |s3|
pre_config.call(s3)
s3.access_key_id = nil
end
end.to raise_error StandardError, /are all required/
end
it "requires secret_access_key" do
pre_config = required_config
expect do
Storage::S3.new(model) do |s3|
pre_config.call(s3)
s3.secret_access_key = nil
end
end.to raise_error StandardError, /are all required/
end
end
it "strips leading path separator" do
pre_config = required_config
storage = Storage::S3.new(model) do |s3|
pre_config.call(s3)
s3.path = "/this/path"
end
expect(storage.path).to eq "this/path"
end
it "allows chunk_size 0" do
pre_config = required_config
expect do
Storage::S3.new(model) do |s3|
pre_config.call(s3)
s3.chunk_size = 0
end
end.not_to raise_error
end
it "validates chunk_size minimum" do
pre_config = required_config
expect do
Storage::S3.new(model) do |s3|
pre_config.call(s3)
s3.chunk_size = 4
end
end.to raise_error StandardError, /must be between 5 and 5120/
end
it "validates chunk_size maximum" do
pre_config = required_config
expect do
Storage::S3.new(model) do |s3|
pre_config.call(s3)
s3.chunk_size = 5121
end
end.to raise_error StandardError, /must be between 5 and 5120/
end
it "validates encryption" do
pre_config = required_config
expect do
Storage::S3.new(model) do |s3|
pre_config.call(s3)
s3.encryption = :aes512
end
end.to raise_error StandardError, /must be :aes256 or nil/
end
it "validates storage_class" do
pre_config = required_config
expect do
Storage::S3.new(model) do |s3|
pre_config.call(s3)
s3.storage_class = :glacier
end
end.to raise_error StandardError, /must be :standard or :standard_ia or :reduced_redundancy/
end
end # describe '#initialize'
describe "#cloud_io" do
specify "when using AWS access keys" do
expect(CloudIO::S3).to receive(:new).once.with(
access_key_id: "my_access_key_id",
secret_access_key: "my_secret_access_key",
use_iam_profile: nil,
region: nil,
bucket: "my_bucket",
encryption: nil,
storage_class: :standard,
max_retries: 10,
retry_waitsec: 30,
chunk_size: 5,
fog_options: nil
).and_return(:cloud_io)
storage = Storage::S3.new(model, &required_config)
expect(storage.send(:cloud_io)).to eq :cloud_io
expect(storage.send(:cloud_io)).to eq :cloud_io
end
specify "when using AWS IAM profile" do
expect(CloudIO::S3).to receive(:new).once.with(
access_key_id: nil,
secret_access_key: nil,
use_iam_profile: true,
region: nil,
bucket: "my_bucket",
encryption: nil,
storage_class: :standard,
max_retries: 10,
retry_waitsec: 30,
chunk_size: 5,
fog_options: nil
).and_return(:cloud_io)
storage = Storage::S3.new(model, &required_iam_config)
expect(storage.send(:cloud_io)).to eq :cloud_io
expect(storage.send(:cloud_io)).to eq :cloud_io
end
end # describe '#cloud_io'
describe "#transfer!" do
let(:cloud_io) { double }
let(:timestamp) { Time.now.strftime("%Y.%m.%d.%H.%M.%S") }
let(:remote_path) { File.join("my/path/test_trigger", timestamp) }
before do
Timecop.freeze
storage.package.time = timestamp
allow(storage.package).to receive(:filenames).and_return(
["test_trigger.tar-aa", "test_trigger.tar-ab"]
)
allow(storage).to receive(:cloud_io).and_return(cloud_io)
storage.bucket = "my_bucket"
storage.path = "my/path"
end
after { Timecop.return }
it "transfers the package files" do
src = File.join(Config.tmp_path, "test_trigger.tar-aa")
dest = File.join(remote_path, "test_trigger.tar-aa")
expect(Logger).to receive(:info).ordered.with("Storing 'my_bucket/#{dest}'...")
expect(cloud_io).to receive(:upload).ordered.with(src, dest)
src = File.join(Config.tmp_path, "test_trigger.tar-ab")
dest = File.join(remote_path, "test_trigger.tar-ab")
expect(Logger).to receive(:info).ordered.with("Storing 'my_bucket/#{dest}'...")
expect(cloud_io).to receive(:upload).ordered.with(src, dest)
storage.send(:transfer!)
end
end # describe '#transfer!'
describe "#remove!" do
let(:cloud_io) { double }
let(:timestamp) { Time.now.strftime("%Y.%m.%d.%H.%M.%S") }
let(:remote_path) { File.join("my/path/test_trigger", timestamp) }
let(:package) do
double(
Package, # loaded from YAML storage file
trigger: "test_trigger",
time: timestamp
)
end
before do
Timecop.freeze
allow(storage).to receive(:cloud_io).and_return(cloud_io)
storage.bucket = "my_bucket"
storage.path = "my/path"
end
after { Timecop.return }
it "removes the given package from the remote" do
expect(Logger).to receive(:info).with("Removing backup package dated #{timestamp}...")
objects = ["some objects"]
expect(cloud_io).to receive(:objects).with(remote_path).and_return(objects)
expect(cloud_io).to receive(:delete).with(objects)
storage.send(:remove!, package)
end
it "raises an error if remote package is missing" do
objects = []
expect(cloud_io).to receive(:objects).with(remote_path).and_return(objects)
expect(cloud_io).to receive(:delete).never
expect do
storage.send(:remove!, package)
end.to raise_error(
Storage::S3::Error,
"Storage::S3::Error: Package at '#{remote_path}' not found"
)
end
end # describe '#remove!'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/storage/dropbox_spec.rb | spec/storage/dropbox_spec.rb | require "spec_helper"
module Backup
describe Storage::Dropbox do
let(:model) { Model.new(:test_trigger, "test label") }
let(:storage) { Storage::Dropbox.new(model) }
let(:s) { sequence "" }
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Storage::Base"
it_behaves_like "a storage that cycles"
describe "#initialize" do
it "provides default values" do
expect(storage.storage_id).to be_nil
expect(storage.keep).to be_nil
expect(storage.api_key).to be_nil
expect(storage.api_secret).to be_nil
expect(storage.cache_path).to eq ".cache"
expect(storage.chunk_size).to be 4
expect(storage.max_retries).to be 10
expect(storage.retry_waitsec).to be 30
expect(storage.path).to eq "backups"
end
it "configures the storage" do
storage = Storage::Dropbox.new(model, :my_id) do |db|
db.keep = 2
db.api_key = "my_api_key"
db.api_secret = "my_api_secret"
db.cache_path = ".my_cache"
db.chunk_size = 10
db.max_retries = 15
db.retry_waitsec = 45
db.path = "my/path"
end
expect(storage.storage_id).to eq "my_id"
expect(storage.keep).to be 2
expect(storage.api_key).to eq "my_api_key"
expect(storage.api_secret).to eq "my_api_secret"
expect(storage.cache_path).to eq ".my_cache"
expect(storage.chunk_size).to eq 10
expect(storage.max_retries).to eq 15
expect(storage.retry_waitsec).to eq 45
expect(storage.path).to eq "my/path"
end
it "strips leading path separator" do
storage = Storage::Dropbox.new(model) do |s3|
s3.path = "/this/path"
end
expect(storage.path).to eq "this/path"
end
end # describe '#initialize'
describe "#connection" do
let(:session) { double }
let(:client) { double }
context "when a cached session exists" do
before do
allow(storage).to receive(:cached_session).and_return(session)
expect(storage).to receive(:create_write_and_return_new_session!).never
expect(DropboxClient).to receive(:new).once.with(session, :app_folder).and_return(client)
end
it "uses the cached session to create the client" do
expect(storage.send(:connection)).to be(client)
end
it "returns an already existing client" do
expect(storage.send(:connection)).to be(client)
expect(storage.send(:connection)).to be(client)
end
end
context "when a cached session does not exist" do
before do
allow(storage).to receive(:cached_session).and_return(false)
expect(Logger).to receive(:info).with("Creating a new session!")
expect(storage).to receive(:create_write_and_return_new_session!).and_return(session)
expect(DropboxClient).to receive(:new).once.with(session, :app_folder).and_return(client)
end
it "creates a new session and returns the client" do
expect(storage.send(:connection)).to be(client)
end
it "returns an already existing client" do
expect(storage.send(:connection)).to be(client)
expect(storage.send(:connection)).to be(client)
end
end
context "when an error is raised creating a client for the session" do
it "raises an error" do
allow(storage).to receive(:cached_session).and_return(true)
expect(DropboxClient).to receive(:new).and_raise("error")
expect do
storage.send(:connection)
end.to raise_error(Storage::Dropbox::Error) { |err|
expect(err.message).to eq(
"Storage::Dropbox::Error: Authorization Failed\n" \
"--- Wrapped Exception ---\n" \
"RuntimeError: error"
)
}
end
end
end # describe '#connection'
describe "#cached_session" do
let(:session) { double }
let(:cached_file) { storage.send(:cached_file) }
before do
storage.api_key = "my_api_key"
storage.api_secret = "my_api_secret"
end
it "returns the cached session if one exists" do
expect(File).to receive(:exist?).with(cached_file).and_return(true)
expect(File).to receive(:read).with(cached_file).and_return("yaml_data")
expect(DropboxSession).to receive(:deserialize).with("yaml_data").and_return(session)
expect(Backup::Logger).to receive(:info).with("Session data loaded from cache!")
expect(storage.send(:cached_session)).to be(session)
end
it "returns false when no cached session file exists" do
expect(File).to receive(:exist?).with(cached_file).and_return(false)
expect(storage.send(:cached_session)).to be false
end
context "when errors occur loading the session" do
it "logs a warning and return false" do
expect(File).to receive(:exist?).with(cached_file).and_return(true)
expect(File).to receive(:read).with(cached_file).and_return("yaml_data")
expect(DropboxSession).to receive(:deserialize).with("yaml_data")
.and_raise("error message")
expect(Logger).to receive(:warn) do |err|
expect(err).to be_an_instance_of(Storage::Dropbox::Error)
expect(err.message).to match(
"Could not read session data from cache.\n" \
" Cache data might be corrupt."
)
expect(err.message).to match("RuntimeError: error message")
end
expect do
expect(storage.send(:cached_session)).to be false
end.not_to raise_error
end
end
end # describe '#cached_session'
describe "#transfer!" do
let(:connection) { double }
let(:timestamp) { Time.now.strftime("%Y.%m.%d.%H.%M.%S") }
let(:remote_path) { File.join("my/path/test_trigger", timestamp) }
let(:file) { double }
let(:uploader) { double }
before do
Timecop.freeze
storage.package.time = timestamp
allow(storage).to receive(:connection).and_return(connection)
allow(file).to receive(:stat).and_return(double(File::Stat, size: 6_291_456))
allow(uploader).to receive(:total_size).and_return(6_291_456)
allow(uploader).to receive(:offset).and_return(
0, 2_097_152, 4_194_304, 6_291_456,
0, 2_097_152, 4_194_304, 6_291_456
)
storage.path = "my/path"
storage.chunk_size = 2
end
after { Timecop.return }
it "transfers the package files" do
allow(storage.package).to receive(:filenames).and_return(
["test_trigger.tar-aa", "test_trigger.tar-ab"]
)
# first file
src = File.join(Config.tmp_path, "test_trigger.tar-aa")
dest = File.join(remote_path, "test_trigger.tar-aa")
expect(Logger).to receive(:info).ordered.with("Storing '#{dest}'...")
expect(File).to receive(:open).ordered.with(src, "r").and_yield(file)
expect(connection).to receive(:get_chunked_uploader).ordered
.with(file, 6_291_456).and_return(uploader)
expect(uploader).to receive(:upload).ordered.exactly(3).times.with(2_097_152)
expect(uploader).to receive(:finish).ordered.with(dest)
# second file
src = File.join(Config.tmp_path, "test_trigger.tar-ab")
dest = File.join(remote_path, "test_trigger.tar-ab")
expect(Logger).to receive(:info).ordered.with("Storing '#{dest}'...")
expect(File).to receive(:open).ordered.with(src, "r").and_yield(file)
expect(connection).to receive(:get_chunked_uploader).ordered
.with(file, 6_291_456).and_return(uploader)
expect(uploader).to receive(:upload).ordered.exactly(3).times.with(2_097_152)
expect(uploader).to receive(:finish).ordered.with(dest)
storage.send(:transfer!)
end
it "retries on errors" do
storage.max_retries = 1
allow(storage.package).to receive(:filenames).and_return(["test_trigger.tar"])
src = File.join(Config.tmp_path, "test_trigger.tar")
dest = File.join(remote_path, "test_trigger.tar")
@logger_calls = 0
expect(Logger).to receive(:info).exactly(3).times do |arg|
@logger_calls += 1
case @logger_calls
when 1
expect(arg).to eq "Storing '#{dest}'..."
when 2
expect(arg).to be_an_instance_of Storage::Dropbox::Error
expect(arg.message).to match(
"Storage::Dropbox::Error: Retry #1 of 1."
)
expect(arg.message).to match("RuntimeError: chunk failed")
when 3
expect(arg).to be_an_instance_of Storage::Dropbox::Error
expect(arg.message).to match(
"Storage::Dropbox::Error: Retry #1 of 1."
)
expect(arg.message).to match("RuntimeError: finish failed")
end
end
expect(File).to receive(:open).ordered.with(src, "r").and_yield(file)
expect(connection).to receive(:get_chunked_uploader).ordered
.with(file, 6_291_456).and_return(uploader)
expect(uploader).to receive(:upload).ordered.and_raise("chunk failed")
expect(storage).to receive(:sleep).ordered.with(30)
expect(uploader).to receive(:upload).ordered.exactly(3).times.with(2_097_152)
expect(uploader).to receive(:finish).ordered.with(dest).and_raise("finish failed")
expect(storage).to receive(:sleep).ordered.with(30)
expect(uploader).to receive(:finish).ordered.with(dest)
storage.send(:transfer!)
end
it "fails when retries are exceeded" do
storage.max_retries = 2
allow(storage.package).to receive(:filenames).and_return(["test_trigger.tar"])
src = File.join(Config.tmp_path, "test_trigger.tar")
dest = File.join(remote_path, "test_trigger.tar")
@logger_calls = 0
expect(Logger).to receive(:info).exactly(3).times do |arg|
@logger_calls += 1
case @logger_calls
when 1
expect(arg).to eq "Storing '#{dest}'..."
when 2
expect(arg).to be_an_instance_of Storage::Dropbox::Error
expect(arg.message).to match(
"Storage::Dropbox::Error: Retry #1 of 2."
)
expect(arg.message).to match("RuntimeError: chunk failed")
when 3
expect(arg).to be_an_instance_of Storage::Dropbox::Error
expect(arg.message).to match(
"Storage::Dropbox::Error: Retry #2 of 2."
)
expect(arg.message).to match("RuntimeError: chunk failed again")
end
end
expect(File).to receive(:open).ordered.with(src, "r").and_yield(file)
expect(connection).to receive(:get_chunked_uploader).ordered
.with(file, 6_291_456).and_return(uploader)
expect(uploader).to receive(:upload).ordered.and_raise("chunk failed")
expect(storage).to receive(:sleep).ordered.with(30)
expect(uploader).to receive(:upload).ordered.and_raise("chunk failed again")
expect(storage).to receive(:sleep).ordered.with(30)
expect(uploader).to receive(:upload).ordered.and_raise("strike three")
expect(uploader).to receive(:finish).never
expect do
storage.send(:transfer!)
end.to raise_error(Storage::Dropbox::Error) { |err|
expect(err.message).to match("Upload Failed!")
expect(err.message).to match("RuntimeError: strike three")
}
end
end # describe '#transfer!'
describe "#remove!" do
let(:connection) { double }
let(:timestamp) { Time.now.strftime("%Y.%m.%d.%H.%M.%S") }
let(:remote_path) { File.join("my/path/test_trigger", timestamp) }
let(:package) do
double(
Package, # loaded from YAML storage file
trigger: "test_trigger",
time: timestamp
)
end
before do
Timecop.freeze
allow(storage).to receive(:connection).and_return(connection)
storage.path = "my/path"
end
after { Timecop.return }
it "removes the given package from the remote" do
expect(Logger).to receive(:info).ordered
.with("Removing backup package dated #{timestamp}...")
expect(connection).to receive(:file_delete).with(remote_path)
storage.send(:remove!, package)
end
end # describe '#remove!'
describe "#cached_file" do
before do
storage.api_key = "my_api_key"
storage.api_secret = "my_api_secret"
end
context "with default root_path" do
specify "using default cache_path" do
expect(storage.send(:cached_file)).to eq(
File.join(Config.root_path, ".cache", "my_api_keymy_api_secret")
)
end
specify "using relative cache_path" do
storage.cache_path = ".my_cache"
expect(storage.send(:cached_file)).to eq(
File.join(Config.root_path, ".my_cache", "my_api_keymy_api_secret")
)
end
specify "using absolute cache_path" do
storage.cache_path = "/my/.cache"
expect(storage.send(:cached_file)).to eq(
"/my/.cache/my_api_keymy_api_secret"
)
end
end
context "with custom root_path" do
before do
allow(File).to receive(:directory?).and_return(true)
Config.send(:update, root_path: "/my_root")
end
specify "using default cache_path" do
expect(storage.send(:cached_file)).to eq(
"/my_root/.cache/my_api_keymy_api_secret"
)
end
specify "using relative cache_path" do
storage.cache_path = ".my_cache"
expect(storage.send(:cached_file)).to eq(
"/my_root/.my_cache/my_api_keymy_api_secret"
)
end
specify "using absolute cache_path" do
storage.cache_path = "/my/.cache"
expect(storage.send(:cached_file)).to eq(
"/my/.cache/my_api_keymy_api_secret"
)
end
end
end # describe '#cached_file'
describe "#write_cache!" do
let(:session) { double }
let(:cached_file) { storage.send(:cached_file) }
let(:file) { double }
before do
storage.api_key = "my_api_key"
storage.api_secret = "my_api_secret"
allow(session).to receive(:serialize).and_return("serialized_data")
end
it "should write a serialized session to file" do
expect(FileUtils).to receive(:mkdir_p).with(File.dirname(cached_file))
expect(File).to receive(:open).with(cached_file, "w").and_yield(file)
expect(file).to receive(:write).with("serialized_data")
storage.send(:write_cache!, session)
end
end # describe '#write_cache!'
describe "#create_write_and_return_new_session!" do
let(:session) { double }
let(:template) { double }
let(:cached_file) { storage.send(:cached_file) }
before do
storage.api_key = "my_api_key"
storage.api_secret = "my_api_secret"
expect(DropboxSession).to receive(:new).ordered
.with("my_api_key", "my_api_secret").and_return(session)
expect(session).to receive(:get_request_token).ordered
expect(Template).to receive(:new).ordered.with(
session: session, cached_file: cached_file
).and_return(template)
expect(template).to receive(:render).ordered.with(
"storage/dropbox/authorization_url.erb"
)
expect(Timeout).to receive(:timeout).ordered.with(180).and_yield
expect(STDIN).to receive(:gets).ordered
end
context "when session is authenticated" do
before do
expect(session).to receive(:get_access_token).ordered
end
it "caches and returns the new session" do
expect(template).to receive(:render).ordered.with(
"storage/dropbox/authorized.erb"
)
expect(storage).to receive(:write_cache!).ordered.with(session)
expect(template).to receive(:render).ordered.with(
"storage/dropbox/cache_file_written.erb"
)
expect(storage.send(:create_write_and_return_new_session!)).to be(session)
end
end
context "when session is not authenticated" do
before do
expect(session).to receive(:get_access_token).ordered.and_raise("error message")
end
it "raises an error" do
expect(template).to receive(:render).with("storage/dropbox/authorized.erb").never
expect(storage).to receive(:write_cache!).never
expect(template).to receive(:render).with("storage/dropbox/cache_file_written.erb").never
expect do
storage.send(:create_write_and_return_new_session!)
end.to raise_error(Storage::Dropbox::Error) { |err|
expect(err.message).to match(
"Could not create or authenticate a new session"
)
expect(err.message).to match("RuntimeError: error message")
}
end
end
end # describe '#create_write_and_return_new_session!' do
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/storage/ftp_spec.rb | spec/storage/ftp_spec.rb | require "spec_helper"
module Backup
describe Storage::FTP do
let(:model) { Model.new(:test_trigger, "test label") }
let(:storage) { Storage::FTP.new(model) }
let(:s) { sequence "" }
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Storage::Base"
it_behaves_like "a storage that cycles"
describe "#initialize" do
it "provides default values" do
expect(storage.storage_id).to be_nil
expect(storage.keep).to be_nil
expect(storage.username).to be_nil
expect(storage.password).to be_nil
expect(storage.ip).to be_nil
expect(storage.port).to be 21
expect(storage.passive_mode).to be false
expect(storage.timeout).to be nil
expect(storage.path).to eq "backups"
end
it "configures the storage" do
storage = Storage::FTP.new(model, :my_id) do |ftp|
ftp.keep = 2
ftp.username = "my_username"
ftp.password = "my_password"
ftp.ip = "my_host"
ftp.port = 123
ftp.passive_mode = true
ftp.timeout = 10
ftp.path = "my/path"
end
expect(storage.storage_id).to eq "my_id"
expect(storage.keep).to be 2
expect(storage.username).to eq "my_username"
expect(storage.password).to eq "my_password"
expect(storage.ip).to eq "my_host"
expect(storage.port).to be 123
expect(storage.passive_mode).to be true
expect(storage.timeout).to be 10
expect(storage.path).to eq "my/path"
end
it "converts a tilde path to a relative path" do
storage = Storage::FTP.new(model) do |scp|
scp.path = "~/my/path"
end
expect(storage.path).to eq "my/path"
end
it "does not alter an absolute path" do
storage = Storage::FTP.new(model) do |scp|
scp.path = "/my/path"
end
expect(storage.path).to eq "/my/path"
end
end # describe '#initialize'
describe "#connection" do
let(:connection) { double }
before do
@ftp_port = Net::FTP::FTP_PORT
storage.ip = "123.45.678.90"
storage.username = "my_user"
storage.password = "my_pass"
end
after do
Net::FTP.send(:remove_const, :FTP_PORT)
Net::FTP.send(:const_set, :FTP_PORT, @ftp_port)
end
it "yields a connection to the remote server" do
expect(Net::FTP).to receive(:open).with(
"123.45.678.90", "my_user", "my_pass"
).and_yield(connection)
storage.send(:connection) do |ftp|
expect(ftp).to be connection
end
end
it "sets the FTP_PORT" do
storage = Storage::FTP.new(model) do |ftp|
ftp.port = 123
end
allow(Net::FTP).to receive(:open)
storage.send(:connection)
expect(Net::FTP::FTP_PORT).to be 123
end
# there's no way to really test this without making a connection,
# since an error will be raised if no connection can be made.
it "sets passive mode true if specified" do
storage.passive_mode = true
expect(Net::FTP).to receive(:open).with(
"123.45.678.90", "my_user", "my_pass"
).and_yield(connection)
expect(connection).to receive(:passive=).with(true)
storage.send(:connection) {}
end
it "sets timeout if specified" do
storage.timeout = 10
expect(Net::FTP).to receive(:open).with(
"123.45.678.90", "my_user", "my_pass"
).and_yield(connection)
expect(connection).to receive(:open_timeout=).with(10)
expect(connection).to receive(:read_timeout=).with(10)
storage.send(:connection) {}
end
end # describe '#connection'
describe "#transfer!" do
let(:connection) { double }
let(:timestamp) { Time.now.strftime("%Y.%m.%d.%H.%M.%S") }
let(:remote_path) { File.join("my/path/test_trigger", timestamp) }
before do
Timecop.freeze
storage.package.time = timestamp
allow(storage.package).to receive(:filenames).and_return(
["test_trigger.tar-aa", "test_trigger.tar-ab"]
)
storage.ip = "123.45.678.90"
storage.path = "my/path"
end
after { Timecop.return }
it "transfers the package files" do
expect(storage).to receive(:connection).ordered.and_yield(connection)
expect(storage).to receive(:create_remote_path).ordered.with(connection)
src = File.join(Config.tmp_path, "test_trigger.tar-aa")
dest = File.join(remote_path, "test_trigger.tar-aa")
expect(Logger).to receive(:info).ordered
.with("Storing '123.45.678.90:#{dest}'...")
expect(connection).to receive(:put).ordered.with(src, dest)
src = File.join(Config.tmp_path, "test_trigger.tar-ab")
dest = File.join(remote_path, "test_trigger.tar-ab")
expect(Logger).to receive(:info).ordered
.with("Storing '123.45.678.90:#{dest}'...")
expect(connection).to receive(:put).ordered.with(src, dest)
storage.send(:transfer!)
end
end # describe '#transfer!'
describe "#remove!" do
let(:connection) { double }
let(:timestamp) { Time.now.strftime("%Y.%m.%d.%H.%M.%S") }
let(:remote_path) { File.join("my/path/test_trigger", timestamp) }
let(:package) do
double(
Package, # loaded from YAML storage file
trigger: "test_trigger",
time: timestamp,
filenames: ["test_trigger.tar-aa", "test_trigger.tar-ab"]
)
end
before do
Timecop.freeze
storage.path = "my/path"
end
after { Timecop.return }
it "removes the given package from the remote" do
expect(Logger).to receive(:info).ordered
.with("Removing backup package dated #{timestamp}...")
expect(storage).to receive(:connection).ordered.and_yield(connection)
target = File.join(remote_path, "test_trigger.tar-aa")
expect(connection).to receive(:delete).ordered.with(target)
target = File.join(remote_path, "test_trigger.tar-ab")
expect(connection).to receive(:delete).ordered.with(target)
expect(connection).to receive(:rmdir).ordered.with(remote_path)
storage.send(:remove!, package)
end
end # describe '#remove!'
describe "#create_remote_path" do
let(:connection) { double }
let(:timestamp) { Time.now.strftime("%Y.%m.%d.%H.%M.%S") }
let(:remote_path) { File.join("my/path/test_trigger", timestamp) }
before do
Timecop.freeze
storage.package.time = timestamp
storage.path = "my/path"
end
after { Timecop.return }
context "while properly creating remote directories one by one" do
it "should rescue any SFTP::StatusException and continue" do
expect(connection).to receive(:mkdir).ordered
.with("my")
expect(connection).to receive(:mkdir).ordered
.with("my/path").and_raise(Net::FTPPermError)
expect(connection).to receive(:mkdir).ordered
.with("my/path/test_trigger")
expect(connection).to receive(:mkdir).ordered
.with("my/path/test_trigger/#{timestamp}")
expect do
storage.send(:create_remote_path, connection)
end.not_to raise_error
end
end
end # describe '#create_remote_path'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/storage/scp_spec.rb | spec/storage/scp_spec.rb | require "spec_helper"
module Backup
describe Storage::SCP do
let(:model) { Model.new(:test_trigger, "test label") }
let(:storage) { Storage::SCP.new(model) }
let(:s) { sequence "" }
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Storage::Base"
it_behaves_like "a storage that cycles"
describe "#initialize" do
it "provides default values" do
expect(storage.storage_id).to be_nil
expect(storage.keep).to be_nil
expect(storage.username).to be_nil
expect(storage.password).to be_nil
expect(storage.ssh_options).to eq({})
expect(storage.ip).to be_nil
expect(storage.port).to be 22
expect(storage.path).to eq "backups"
end
it "configures the storage" do
storage = Storage::SCP.new(model, :my_id) do |scp|
scp.keep = 2
scp.username = "my_username"
scp.password = "my_password"
scp.ssh_options = { keys: ["my/key"] }
scp.ip = "my_host"
scp.port = 123
scp.path = "my/path"
end
expect(storage.storage_id).to eq "my_id"
expect(storage.keep).to be 2
expect(storage.username).to eq "my_username"
expect(storage.password).to eq "my_password"
expect(storage.ssh_options).to eq keys: ["my/key"]
expect(storage.ip).to eq "my_host"
expect(storage.port).to be 123
expect(storage.path).to eq "my/path"
end
it "converts a tilde path to a relative path" do
storage = Storage::SCP.new(model) do |scp|
scp.path = "~/my/path"
end
expect(storage.path).to eq "my/path"
end
it "does not alter an absolute path" do
storage = Storage::SCP.new(model) do |scp|
scp.path = "/my/path"
end
expect(storage.path).to eq "/my/path"
end
end # describe '#initialize'
describe "#connection" do
let(:connection) { double }
before do
storage.ip = "123.45.678.90"
storage.username = "my_user"
storage.password = "my_pass"
storage.ssh_options = { keys: ["my/key"] }
end
it "yields a connection to the remote server" do
expect(Net::SSH).to receive(:start).with(
"123.45.678.90", "my_user", password: "my_pass", port: 22,
keys: ["my/key"]
).and_yield(connection)
storage.send(:connection) do |scp|
expect(scp).to be connection
end
end
end # describe '#connection'
describe "#transfer!" do
let(:connection) { double }
let(:scp) { double }
let(:timestamp) { Time.now.strftime("%Y.%m.%d.%H.%M.%S") }
let(:remote_path) { File.join("my/path/test_trigger", timestamp) }
before do
Timecop.freeze
storage.package.time = timestamp
allow(storage.package).to receive(:filenames).and_return(
["test_trigger.tar-aa", "test_trigger.tar-ab"]
)
storage.ip = "123.45.678.90"
storage.path = "my/path"
allow(connection).to receive(:scp).and_return(scp)
end
after { Timecop.return }
it "transfers the package files" do
expect(storage).to receive(:connection).ordered.and_yield(connection)
expect(connection).to receive(:exec!).ordered.with(
"mkdir -p '#{remote_path}'"
)
src = File.join(Config.tmp_path, "test_trigger.tar-aa")
dest = File.join(remote_path, "test_trigger.tar-aa")
expect(Logger).to receive(:info).ordered
.with("Storing '123.45.678.90:#{dest}'...")
expect(scp).to receive(:upload!).ordered.with(src, dest)
src = File.join(Config.tmp_path, "test_trigger.tar-ab")
dest = File.join(remote_path, "test_trigger.tar-ab")
expect(Logger).to receive(:info).ordered
.with("Storing '123.45.678.90:#{dest}'...")
expect(scp).to receive(:upload!).ordered.with(src, dest)
storage.send(:transfer!)
end
end # describe '#transfer!'
describe "#remove!" do
let(:connection) { double }
let(:timestamp) { Time.now.strftime("%Y.%m.%d.%H.%M.%S") }
let(:remote_path) { File.join("my/path/test_trigger", timestamp) }
let(:package) do
double(
Package, # loaded from YAML storage file
trigger: "test_trigger",
time: timestamp
)
end
before do
Timecop.freeze
storage.path = "my/path"
end
after { Timecop.return }
it "removes the given package from the remote" do
expect(Logger).to receive(:info).ordered
.with("Removing backup package dated #{timestamp}...")
expect(storage).to receive(:connection).ordered.and_yield(connection)
expect(connection).to receive(:exec!).ordered
.with("rm -r '#{remote_path}'")
storage.send(:remove!, package)
end
context "when the ssh connection reports errors" do
it "raises an error reporting the errors" do
expect(Logger).to receive(:info).ordered
.with("Removing backup package dated #{timestamp}...")
expect(storage).to receive(:connection).ordered.and_yield(connection)
expect(connection).to receive(:exec!).ordered
.with("rm -r '#{remote_path}'")
.and_yield(:ch, :stderr, "path not found")
expect do
storage.send(:remove!, package)
end.to raise_error Storage::SCP::Error, "Storage::SCP::Error: " \
"Net::SSH reported the following errors:\n" \
" path not found"
end
end
end # describe '#remove!'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/storage/sftp_spec.rb | spec/storage/sftp_spec.rb | require "spec_helper"
module Backup
describe Storage::SFTP do
let(:model) { Model.new(:test_trigger, "test label") }
let(:storage) { Storage::SFTP.new(model) }
let(:s) { sequence "" }
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Storage::Base"
it_behaves_like "a storage that cycles"
describe "#initialize" do
it "provides default values" do
expect(storage.storage_id).to be_nil
expect(storage.keep).to be_nil
expect(storage.username).to be_nil
expect(storage.password).to be_nil
expect(storage.ssh_options).to eq({})
expect(storage.ip).to be_nil
expect(storage.port).to be 22
expect(storage.path).to eq "backups"
end
it "configures the storage" do
storage = Storage::SFTP.new(model, :my_id) do |sftp|
sftp.keep = 2
sftp.username = "my_username"
sftp.password = "my_password"
sftp.ssh_options = { keys: ["my/key"] }
sftp.ip = "my_host"
sftp.port = 123
sftp.path = "my/path"
end
expect(storage.storage_id).to eq "my_id"
expect(storage.keep).to be 2
expect(storage.username).to eq "my_username"
expect(storage.password).to eq "my_password"
expect(storage.ssh_options).to eq keys: ["my/key"]
expect(storage.ip).to eq "my_host"
expect(storage.port).to be 123
expect(storage.path).to eq "my/path"
end
it "converts a tilde path to a relative path" do
storage = Storage::SFTP.new(model) do |sftp|
sftp.path = "~/my/path"
end
expect(storage.path).to eq "my/path"
end
it "does not alter an absolute path" do
storage = Storage::SFTP.new(model) do |sftp|
sftp.path = "/my/path"
end
expect(storage.path).to eq "/my/path"
end
end # describe '#initialize'
describe "#connection" do
let(:connection) { double }
before do
storage.ip = "123.45.678.90"
storage.username = "my_user"
storage.password = "my_pass"
storage.ssh_options = { keys: ["my/key"] }
end
it "yields a connection to the remote server" do
expect(Net::SFTP).to receive(:start).with(
"123.45.678.90", "my_user", password: "my_pass", port: 22,
keys: ["my/key"]
).and_yield(connection)
storage.send(:connection) do |sftp|
expect(sftp).to be connection
end
end
end # describe '#connection'
describe "#transfer!" do
let(:connection) { double }
let(:timestamp) { Time.now.strftime("%Y.%m.%d.%H.%M.%S") }
let(:remote_path) { File.join("my/path/test_trigger", timestamp) }
before do
Timecop.freeze
storage.package.time = timestamp
allow(storage.package).to receive(:filenames).and_return(
["test_trigger.tar-aa", "test_trigger.tar-ab"]
)
storage.ip = "123.45.678.90"
storage.path = "my/path"
end
after { Timecop.return }
it "transfers the package files" do
expect(storage).to receive(:connection).ordered.and_yield(connection)
expect(storage).to receive(:create_remote_path).ordered.with(connection)
src = File.join(Config.tmp_path, "test_trigger.tar-aa")
dest = File.join(remote_path, "test_trigger.tar-aa")
expect(Logger).to receive(:info).ordered
.with("Storing '123.45.678.90:#{dest}'...")
expect(connection).to receive(:upload!).ordered.with(src, dest)
src = File.join(Config.tmp_path, "test_trigger.tar-ab")
dest = File.join(remote_path, "test_trigger.tar-ab")
expect(Logger).to receive(:info).ordered
.with("Storing '123.45.678.90:#{dest}'...")
expect(connection).to receive(:upload!).ordered.with(src, dest)
storage.send(:transfer!)
end
end # describe '#transfer!'
describe "#remove!" do
let(:connection) { double }
let(:timestamp) { Time.now.strftime("%Y.%m.%d.%H.%M.%S") }
let(:remote_path) { File.join("my/path/test_trigger", timestamp) }
let(:package) do
double(
Package, # loaded from YAML storage file
trigger: "test_trigger",
time: timestamp,
filenames: ["test_trigger.tar-aa", "test_trigger.tar-ab"]
)
end
before do
Timecop.freeze
storage.path = "my/path"
end
after { Timecop.return }
it "removes the given package from the remote" do
expect(Logger).to receive(:info).ordered
.with("Removing backup package dated #{timestamp}...")
expect(storage).to receive(:connection).ordered.and_yield(connection)
target = File.join(remote_path, "test_trigger.tar-aa")
expect(connection).to receive(:remove!).ordered.with(target)
target = File.join(remote_path, "test_trigger.tar-ab")
expect(connection).to receive(:remove!).ordered.with(target)
expect(connection).to receive(:rmdir!).ordered.with(remote_path)
storage.send(:remove!, package)
end
end # describe '#remove!'
describe "#create_remote_path" do
let(:connection) { double }
let(:timestamp) { Time.now.strftime("%Y.%m.%d.%H.%M.%S") }
let(:remote_path) { File.join("my/path/test_trigger", timestamp) }
let(:sftp_response) { double(File::Stat, code: 11, message: nil) }
let(:sftp_status_exception) { Net::SFTP::StatusException.new(sftp_response) }
before do
Timecop.freeze
storage.package.time = timestamp
storage.path = "my/path"
end
after { Timecop.return }
context "while properly creating remote directories one by one" do
it "should rescue any SFTP::StatusException and continue" do
expect(connection).to receive(:mkdir!).ordered
.with("my")
expect(connection).to receive(:mkdir!).ordered
.with("my/path").and_raise(sftp_status_exception)
expect(connection).to receive(:mkdir!).ordered
.with("my/path/test_trigger")
expect(connection).to receive(:mkdir!).ordered
.with("my/path/test_trigger/#{timestamp}")
expect do
storage.send(:create_remote_path, connection)
end.not_to raise_error
end
end
end # describe '#create_remote_path'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/storage/cloud_files_spec.rb | spec/storage/cloud_files_spec.rb | require "spec_helper"
module Backup
describe Storage::CloudFiles do
let(:model) { Model.new(:test_trigger, "test label") }
let(:required_config) do
proc do |cf|
cf.username = "my_username"
cf.api_key = "my_api_key"
cf.container = "my_container"
end
end
let(:storage) { Storage::CloudFiles.new(model, &required_config) }
let(:s) { sequence "" }
it_behaves_like "a class that includes Config::Helpers" do
let(:default_overrides) { { "segment_size" => 15 } }
let(:new_overrides) { { "segment_size" => 20 } }
end
it_behaves_like "a subclass of Storage::Base"
it_behaves_like "a storage that cycles"
describe "#initialize" do
it "provides default values" do
# required
expect(storage.username).to eq "my_username"
expect(storage.api_key).to eq "my_api_key"
expect(storage.container).to eq "my_container"
# defaults
expect(storage.storage_id).to be_nil
expect(storage.auth_url).to be_nil
expect(storage.region).to be_nil
expect(storage.servicenet).to be false
expect(storage.segments_container).to be_nil
expect(storage.segment_size).to be 0
expect(storage.days_to_keep).to be_nil
expect(storage.max_retries).to be 10
expect(storage.retry_waitsec).to be 30
expect(storage.fog_options).to be_nil
expect(storage.path).to eq "backups"
expect(storage.keep).to be_nil
end
it "configures the storage" do
storage = Storage::CloudFiles.new(model, :my_id) do |cf|
cf.username = "my_username"
cf.api_key = "my_api_key"
cf.auth_url = "my_auth_url"
cf.region = "my_region"
cf.servicenet = true
cf.container = "my_container"
cf.segments_container = "my_segments_container"
cf.segment_size = 5
cf.days_to_keep = 90
cf.max_retries = 15
cf.retry_waitsec = 45
cf.fog_options = { my_key: "my_value" }
cf.path = "my/path"
cf.keep = 2
end
expect(storage.storage_id).to eq "my_id"
expect(storage.username).to eq "my_username"
expect(storage.api_key).to eq "my_api_key"
expect(storage.auth_url).to eq "my_auth_url"
expect(storage.region).to eq "my_region"
expect(storage.servicenet).to be true
expect(storage.container).to eq "my_container"
expect(storage.segments_container).to eq "my_segments_container"
expect(storage.segment_size).to be 5
expect(storage.days_to_keep).to be 90
expect(storage.max_retries).to be 15
expect(storage.fog_options).to eq my_key: "my_value"
expect(storage.retry_waitsec).to be 45
expect(storage.path).to eq "my/path"
expect(storage.keep).to be 2
end
it "strips leading path separator" do
pre_config = required_config
storage = Storage::CloudFiles.new(model) do |cf|
pre_config.call(cf)
cf.path = "/this/path"
end
expect(storage.path).to eq "this/path"
end
it "requires username" do
pre_config = required_config
expect do
Storage::CloudFiles.new(model) do |cf|
pre_config.call(cf)
cf.username = nil
end
end.to raise_error StandardError, /are all required/
end
it "requires api_key" do
pre_config = required_config
expect do
Storage::CloudFiles.new(model) do |cf|
pre_config.call(cf)
cf.api_key = nil
end
end.to raise_error StandardError, /are all required/
end
it "requires container" do
pre_config = required_config
expect do
Storage::CloudFiles.new(model) do |cf|
pre_config.call(cf)
cf.container = nil
end
end.to raise_error StandardError, /are all required/
end
it "requires segments_container if segment_size > 0" do
pre_config = required_config
expect do
Storage::CloudFiles.new(model) do |cf|
pre_config.call(cf)
cf.segment_size = 1
end
end.to raise_error StandardError, /segments_container is required/
end
it "requires container and segments_container be different" do
pre_config = required_config
expect do
Storage::CloudFiles.new(model) do |cf|
pre_config.call(cf)
cf.segments_container = "my_container"
cf.segment_size = 1
end
end.to raise_error StandardError, /segments_container must not be the same/
end
it "requires segments_size be <= 5120" do
pre_config = required_config
expect do
Storage::CloudFiles.new(model) do |cf|
pre_config.call(cf)
cf.segments_container = "my_segments_container"
cf.segment_size = 5121
end
end.to raise_error StandardError, /segment_size is too large/
end
end # describe '#initialize'
describe "#cloud_io" do
it "caches a new CloudIO instance" do
expect(CloudIO::CloudFiles).to receive(:new).once.with(
username: "my_username",
api_key: "my_api_key",
auth_url: nil,
region: nil,
servicenet: false,
container: "my_container",
segments_container: nil,
segment_size: 0,
days_to_keep: nil,
max_retries: 10,
retry_waitsec: 30,
fog_options: nil
).and_return(:cloud_io)
expect(storage.send(:cloud_io)).to eq :cloud_io
expect(storage.send(:cloud_io)).to eq :cloud_io
end
end # describe '#cloud_io'
describe "#transfer!" do
let(:cloud_io) { double }
let(:timestamp) { Time.now.strftime("%Y.%m.%d.%H.%M.%S") }
let(:remote_path) { File.join("my/path/test_trigger", timestamp) }
before do
Timecop.freeze
storage.package.time = timestamp
allow(storage.package).to receive(:filenames).and_return(
["test_trigger.tar-aa", "test_trigger.tar-ab"]
)
allow(storage).to receive(:cloud_io).and_return(cloud_io)
storage.path = "my/path"
end
after { Timecop.return }
it "transfers the package files" do
src = File.join(Config.tmp_path, "test_trigger.tar-aa")
dest = File.join(remote_path, "test_trigger.tar-aa")
expect(Logger).to receive(:info).ordered
.with("Storing 'my_container/#{dest}'...")
expect(cloud_io).to receive(:upload).ordered.with(src, dest)
src = File.join(Config.tmp_path, "test_trigger.tar-ab")
dest = File.join(remote_path, "test_trigger.tar-ab")
expect(Logger).to receive(:info).ordered
.with("Storing 'my_container/#{dest}'...")
expect(cloud_io).to receive(:upload).ordered.with(src, dest)
storage.send(:transfer!)
expect(storage.package.no_cycle).to eq(false)
end
context "when days_to_keep is set" do
before { storage.days_to_keep = 1 }
it "marks package so the cycler will not attempt to remove it" do
allow(cloud_io).to receive(:upload)
storage.send(:transfer!)
expect(storage.package.no_cycle).to eq(true)
end
end
end # describe '#transfer!'
describe "#remove!" do
let(:cloud_io) { double }
let(:timestamp) { Time.now.strftime("%Y.%m.%d.%H.%M.%S") }
let(:remote_path) { File.join("my/path/test_trigger", timestamp) }
let(:package) do
double(
Package, # loaded from YAML storage file
trigger: "test_trigger",
time: timestamp
)
end
let(:package_file_a) do
double(CloudIO::CloudFiles::Object, marked_for_deletion?: false, slo?: true)
end
let(:package_file_b) do
double(CloudIO::CloudFiles::Object, marked_for_deletion?: false, slo?: false)
end
before do
Timecop.freeze
allow(storage).to receive(:cloud_io).and_return(cloud_io)
storage.path = "my/path"
end
after { Timecop.return }
it "removes the given package from the remote" do
expect(Logger).to receive(:info).with("Removing backup package dated #{timestamp}...")
objects = [package_file_a, package_file_b]
expect(cloud_io).to receive(:objects).with(remote_path).and_return(objects)
expect(cloud_io).to receive(:delete_slo).with([package_file_a])
expect(cloud_io).to receive(:delete).with([package_file_b])
storage.send(:remove!, package)
end
it "raises an error if remote package is missing" do
objects = []
expect(cloud_io).to receive(:objects).with(remote_path).and_return(objects)
expect(cloud_io).to receive(:delete_slo).never
expect(cloud_io).to receive(:delete).never
expect do
storage.send(:remove!, package)
end.to raise_error(
Storage::CloudFiles::Error,
"Storage::CloudFiles::Error: Package at '#{remote_path}' not found"
)
end
end # describe '#remove!'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/storage/rsync_spec.rb | spec/storage/rsync_spec.rb | require "spec_helper"
module Backup
describe Storage::RSync do
let(:model) { Model.new(:test_trigger, "test label") }
let(:storage) { Storage::RSync.new(model) }
let(:s) { sequence "" }
before do
allow_any_instance_of(Storage::RSync).to \
receive(:utility).with(:rsync).and_return("rsync")
allow_any_instance_of(Storage::RSync).to \
receive(:utility).with(:ssh).and_return("ssh")
end
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Storage::Base"
describe "#initialize" do
it "provides default values" do
expect(storage.storage_id).to be_nil
expect(storage.mode).to eq :ssh
expect(storage.host).to be_nil
expect(storage.port).to be 22
expect(storage.ssh_user).to be_nil
expect(storage.rsync_user).to be_nil
expect(storage.rsync_password).to be_nil
expect(storage.rsync_password_file).to be_nil
expect(storage.compress).to be(false)
expect(storage.path).to eq "~/backups"
expect(storage.additional_ssh_options).to be_nil
expect(storage.additional_rsync_options).to be_nil
# this storage doesn't support cycling, but `keep` is still inherited
expect(storage.keep).to be_nil
end
it "configures the storage" do
storage = Storage::RSync.new(model, :my_id) do |rsync|
rsync.mode = :valid_mode
rsync.host = "123.45.678.90"
rsync.port = 123
rsync.ssh_user = "ssh_username"
rsync.rsync_user = "rsync_username"
rsync.rsync_password = "rsync_password"
rsync.rsync_password_file = "/my/rsync_password"
rsync.compress = true
rsync.path = "~/my_backups/"
rsync.additional_ssh_options = "ssh options"
rsync.additional_rsync_options = "rsync options"
end
expect(storage.storage_id).to eq "my_id"
expect(storage.mode).to eq :valid_mode
expect(storage.host).to eq "123.45.678.90"
expect(storage.port).to be 123
expect(storage.ssh_user).to eq "ssh_username"
expect(storage.rsync_user).to eq "rsync_username"
expect(storage.rsync_password).to eq "rsync_password"
expect(storage.rsync_password_file).to eq "/my/rsync_password"
expect(storage.compress).to be true
expect(storage.path).to eq "~/my_backups/"
expect(storage.additional_ssh_options).to eq "ssh options"
expect(storage.additional_rsync_options).to eq "rsync options"
end
it "uses default port 22 for :ssh_daemon mode" do
storage = Storage::RSync.new(model) do |s|
s.mode = :ssh_daemon
end
expect(storage.mode).to eq :ssh_daemon
expect(storage.port).to be 22
end
it "uses default port 873 for :rsync_daemon mode" do
storage = Storage::RSync.new(model) do |s|
s.mode = :rsync_daemon
end
expect(storage.mode).to eq :rsync_daemon
expect(storage.port).to be 873
end
end # describe '#initialize'
describe "#transfer!" do
let(:package_files) do
# source paths for package files never change
["test_trigger.tar-aa", "test_trigger.tar-ab"].map do |name|
File.join(Config.tmp_path, name)
end
end
before do
allow(storage.package).to receive(:filenames).and_return(
["test_trigger.tar-aa", "test_trigger.tar-ab"]
)
end
context "local transfer" do
it "performs transfer with default values" do
# write_password_file does nothing
expect(Tempfile).to receive(:new).never
# create_remote_path
expect(FileUtils).to receive(:mkdir_p).with(File.expand_path("~/backups"))
# First Package File
dest = File.join(File.expand_path("~/backups"), "test_trigger.tar-aa")
expect(Logger).to receive(:info).ordered.with(
"Syncing to '#{dest}'..."
)
expect(storage).to receive(:run).ordered.with(
"rsync --archive '#{package_files[0]}' '#{dest}'"
)
# Second Package File
dest = File.join(File.expand_path("~/backups"), "test_trigger.tar-ab")
expect(Logger).to receive(:info).ordered.with(
"Syncing to '#{dest}'..."
)
expect(storage).to receive(:run).ordered.with(
"rsync --archive '#{package_files[1]}' '#{dest}'"
)
storage.send(:transfer!)
end
it "uses given path, storage id and additional_rsync_options" do
storage = Storage::RSync.new(model, "my storage") do |rsync|
rsync.path = "/my/backups"
rsync.additional_rsync_options = ["--arg1", "--arg2"]
end
# write_password_file does nothing
expect(Tempfile).to receive(:new).never
# create_remote_path
expect(FileUtils).to receive(:mkdir_p).with("/my/backups")
# First Package File
dest = "/my/backups/test_trigger.tar-aa"
expect(Logger).to receive(:info).ordered.with(
"Syncing to '#{dest}'..."
)
expect(storage).to receive(:run).ordered.with(
"rsync --archive --arg1 --arg2 '#{package_files[0]}' '#{dest}'"
)
# Second Package File
dest = "/my/backups/test_trigger.tar-ab"
expect(Logger).to receive(:info).ordered.with(
"Syncing to '#{dest}'..."
)
expect(storage).to receive(:run).ordered.with(
"rsync --archive --arg1 --arg2 '#{package_files[1]}' '#{dest}'"
)
storage.send(:transfer!)
end
end # context 'local transfer'
context "remote transfer in :ssh mode" do
it "performs the transfer" do
storage = Storage::RSync.new(model) do |rsync|
rsync.host = "host.name"
end
# write_password_file does nothing
expect(Tempfile).to receive(:new).never
# create_remote_path
expect(storage).to receive(:run).ordered.with(
%(ssh -p 22 host.name "mkdir -p 'backups'")
)
# First Package File
dest = "host.name:'backups/test_trigger.tar-aa'"
expect(storage).to receive(:run).ordered.with(
%(rsync --archive -e "ssh -p 22" '#{package_files[0]}' #{dest})
)
# Second Package File
dest = "host.name:'backups/test_trigger.tar-ab'"
expect(storage).to receive(:run).ordered.with(
%(rsync --archive -e "ssh -p 22" '#{package_files[1]}' #{dest})
)
storage.send(:transfer!)
end
it "uses additional options" do
storage = Storage::RSync.new(model) do |rsync|
rsync.host = "host.name"
rsync.port = 123
rsync.ssh_user = "ssh_username"
rsync.additional_ssh_options = "-i '/my/id_rsa'"
rsync.compress = true
rsync.additional_rsync_options = "--opt1"
end
# write_password_file does nothing
expect(Tempfile).to receive(:new).never
# create_remote_path
expect(storage).to receive(:run).ordered.with(
"ssh -p 123 -l ssh_username -i '/my/id_rsa' " +
%(host.name "mkdir -p 'backups'")
)
# First Package File
dest = "host.name:'backups/test_trigger.tar-aa'"
expect(storage).to receive(:run).ordered.with(
"rsync --archive --opt1 --compress " +
%(-e "ssh -p 123 -l ssh_username -i '/my/id_rsa'" ) +
"'#{package_files[0]}' #{dest}"
)
# Second Package File
dest = "host.name:'backups/test_trigger.tar-ab'"
expect(storage).to receive(:run).ordered.with(
"rsync --archive --opt1 --compress " +
%(-e "ssh -p 123 -l ssh_username -i '/my/id_rsa'" ) +
"'#{package_files[1]}' #{dest}"
)
storage.send(:transfer!)
end
end # context 'remote transfer in :ssh mode'
context "remote transfer in :ssh_daemon mode" do
it "performs the transfer" do
storage = Storage::RSync.new(model) do |rsync|
rsync.mode = :ssh_daemon
rsync.host = "host.name"
rsync.path = "module/path"
end
# write_password_file does nothing
expect(Tempfile).to receive(:new).never
# create_remote_path does nothing
# (a call to #run would be an unexpected expectation)
expect(FileUtils).to receive(:mkdir_p).never
# First Package File
dest = "host.name::'module/path/test_trigger.tar-aa'"
expect(storage).to receive(:run).ordered.with(
%(rsync --archive -e "ssh -p 22" '#{package_files[0]}' #{dest})
)
# Second Package File
dest = "host.name::'module/path/test_trigger.tar-ab'"
expect(storage).to receive(:run).ordered.with(
%(rsync --archive -e "ssh -p 22" '#{package_files[1]}' #{dest})
)
storage.send(:transfer!)
end
it "uses additional options, with password" do
storage = Storage::RSync.new(model) do |rsync|
rsync.mode = :ssh_daemon
rsync.host = "host.name"
rsync.port = 123
rsync.ssh_user = "ssh_username"
rsync.additional_ssh_options = "-i '/my/id_rsa'"
rsync.rsync_user = "rsync_username"
rsync.rsync_password = "secret"
rsync.compress = true
rsync.additional_rsync_options = "--opt1"
end
# write_password_file
password_file = double(File, path: "/path/to/password_file")
expect(Tempfile).to receive(:new).ordered
.with("backup-rsync-password").and_return(password_file)
expect(password_file).to receive(:write).ordered.with("secret")
expect(password_file).to receive(:close).ordered
# create_remote_path does nothing
# First Package File
dest = "rsync_username@host.name::'backups/test_trigger.tar-aa'"
expect(storage).to receive(:run).ordered.with(
"rsync --archive --opt1 --compress " \
"--password-file='/path/to/password_file' " +
%(-e "ssh -p 123 -l ssh_username -i '/my/id_rsa'" ) +
"'#{package_files[0]}' #{dest}"
)
# Second Package File
dest = "rsync_username@host.name::'backups/test_trigger.tar-ab'"
expect(storage).to receive(:run).ordered.with(
"rsync --archive --opt1 --compress " \
"--password-file='/path/to/password_file' " +
%(-e "ssh -p 123 -l ssh_username -i '/my/id_rsa'" ) +
"'#{package_files[1]}' #{dest}"
)
# remove_password_file
expect(password_file).to receive(:delete).ordered
storage.send(:transfer!)
end
it "ensures temporary password file is removed" do
storage = Storage::RSync.new(model) do |rsync|
rsync.mode = :ssh_daemon
rsync.host = "host.name"
rsync.rsync_password = "secret"
end
# write_password_file
password_file = double(File, path: "/path/to/password_file")
expect(Tempfile).to receive(:new).ordered
.with("backup-rsync-password").and_return(password_file)
expect(password_file).to receive(:write).ordered.with("secret")
expect(password_file).to receive(:close).ordered
# create_remote_path does nothing
# First Package File (fails)
dest = "host.name::'backups/test_trigger.tar-aa'"
expect(storage).to receive(:run).ordered.with(
"rsync --archive " \
"--password-file='/path/to/password_file' " +
%(-e "ssh -p 22" ) +
"'#{package_files[0]}' #{dest}"
).and_raise("an error")
# remove_password_file
expect(password_file).to receive(:delete).ordered
expect do
storage.send(:transfer!)
end.to raise_error("an error")
end
it "uses additional options, with password_file" do
storage = Storage::RSync.new(model) do |rsync|
rsync.mode = :ssh_daemon
rsync.host = "host.name"
rsync.port = 123
rsync.ssh_user = "ssh_username"
rsync.additional_ssh_options = "-i '/my/id_rsa'"
rsync.rsync_user = "rsync_username"
rsync.rsync_password_file = "my/pwd_file"
rsync.compress = true
rsync.additional_rsync_options = "--opt1"
end
# write_password_file does nothing
expect(Tempfile).to receive(:new).never
# create_remote_path does nothing
# First Package File
dest = "rsync_username@host.name::'backups/test_trigger.tar-aa'"
expect(storage).to receive(:run).ordered.with(
"rsync --archive --opt1 --compress " \
"--password-file='#{File.expand_path("my/pwd_file")}' " +
%(-e "ssh -p 123 -l ssh_username -i '/my/id_rsa'" ) +
"'#{package_files[0]}' #{dest}"
)
# Second Package File
dest = "rsync_username@host.name::'backups/test_trigger.tar-ab'"
expect(storage).to receive(:run).ordered.with(
"rsync --archive --opt1 --compress " \
"--password-file='#{File.expand_path("my/pwd_file")}' " +
%(-e "ssh -p 123 -l ssh_username -i '/my/id_rsa'" ) +
"'#{package_files[1]}' #{dest}"
)
storage.send(:transfer!)
end
end # context 'remote transfer in :ssh_daemon mode'
context "remote transfer in :rsync_daemon mode" do
it "performs the transfer" do
storage = Storage::RSync.new(model) do |rsync|
rsync.mode = :rsync_daemon
rsync.host = "host.name"
rsync.path = "module/path"
end
# write_password_file does nothing
expect(Tempfile).to receive(:new).never
# create_remote_path does nothing
# First Package File
dest = "host.name::'module/path/test_trigger.tar-aa'"
expect(storage).to receive(:run).ordered.with(
"rsync --archive --port 873 '#{package_files[0]}' #{dest}"
)
# Second Package File
dest = "host.name::'module/path/test_trigger.tar-ab'"
expect(storage).to receive(:run).ordered.with(
"rsync --archive --port 873 '#{package_files[1]}' #{dest}"
)
storage.send(:transfer!)
end
it "uses additional options, with password" do
storage = Storage::RSync.new(model) do |rsync|
rsync.mode = :rsync_daemon
rsync.host = "host.name"
rsync.port = 123
rsync.rsync_user = "rsync_username"
rsync.rsync_password = "secret"
rsync.compress = true
rsync.additional_rsync_options = "--opt1"
end
# write_password_file
password_file = double(File, path: "/path/to/password_file")
expect(Tempfile).to receive(:new).ordered
.with("backup-rsync-password").and_return(password_file)
expect(password_file).to receive(:write).ordered.with("secret")
expect(password_file).to receive(:close).ordered
# create_remote_path does nothing
# First Package File
dest = "rsync_username@host.name::'backups/test_trigger.tar-aa'"
expect(storage).to receive(:run).ordered.with(
"rsync --archive --opt1 --compress " \
"--password-file='/path/to/password_file' --port 123 " \
"'#{package_files[0]}' #{dest}"
)
# Second Package File
dest = "rsync_username@host.name::'backups/test_trigger.tar-ab'"
expect(storage).to receive(:run).ordered.with(
"rsync --archive --opt1 --compress " \
"--password-file='/path/to/password_file' --port 123 " \
"'#{package_files[1]}' #{dest}"
)
# remove_password_file!
expect(password_file).to receive(:delete).ordered
storage.send(:transfer!)
end
it "ensures temporary password file is removed" do
storage = Storage::RSync.new(model) do |rsync|
rsync.mode = :rsync_daemon
rsync.host = "host.name"
rsync.rsync_password = "secret"
end
# write_password_file
password_file = double(File, path: "/path/to/password_file")
expect(Tempfile).to receive(:new).ordered
.with("backup-rsync-password").and_return(password_file)
expect(password_file).to receive(:write).ordered.with("secret")
expect(password_file).to receive(:close).ordered
# create_remote_path does nothing
# First Package File (fails)
dest = "host.name::'backups/test_trigger.tar-aa'"
expect(storage).to receive(:run).ordered.with(
"rsync --archive " \
"--password-file='/path/to/password_file' --port 873 " \
"'#{package_files[0]}' #{dest}"
).and_raise("an error")
# remove_password_file
expect(password_file).to receive(:delete).ordered
expect do
storage.send(:transfer!)
end.to raise_error("an error")
end
it "uses additional options, with password_file" do
storage = Storage::RSync.new(model) do |rsync|
rsync.mode = :rsync_daemon
rsync.host = "host.name"
rsync.port = 123
rsync.rsync_user = "rsync_username"
rsync.rsync_password_file = "my/pwd_file"
rsync.compress = true
rsync.additional_rsync_options = "--opt1"
end
# write_password_file does nothing
expect(Tempfile).to receive(:new).never
# create_remote_path does nothing
# First Package File
dest = "rsync_username@host.name::'backups/test_trigger.tar-aa'"
expect(storage).to receive(:run).ordered.with(
"rsync --archive --opt1 --compress " \
"--password-file='#{File.expand_path("my/pwd_file")}' --port 123 " \
"'#{package_files[0]}' #{dest}"
)
# Second Package File
dest = "rsync_username@host.name::'backups/test_trigger.tar-ab'"
expect(storage).to receive(:run).ordered.with(
"rsync --archive --opt1 --compress " \
"--password-file='#{File.expand_path("my/pwd_file")}' --port 123 " \
"'#{package_files[1]}' #{dest}"
)
storage.send(:transfer!)
end
end # context 'remote transfer in :rsync_daemon mode'
end # describe '#perform!'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/storage/local_spec.rb | spec/storage/local_spec.rb | require "spec_helper"
module Backup
describe Storage::Local do
let(:model) { Model.new(:test_trigger, "test label") }
let(:storage) { Storage::Local.new(model) }
let(:s) { sequence "" }
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Storage::Base"
it_behaves_like "a storage that cycles"
describe "#initialize" do
it "provides default values" do
expect(storage.storage_id).to be_nil
expect(storage.keep).to be_nil
expect(storage.path).to eq "~/backups"
end
it "configures the storage" do
storage = Storage::Local.new(model, :my_id) do |local|
local.keep = 2
local.path = "/my/path"
end
expect(storage.storage_id).to eq "my_id"
expect(storage.keep).to be 2
expect(storage.path).to eq "/my/path"
end
end # describe '#initialize'
describe "#transfer!" do
let(:timestamp) { Time.now.strftime("%Y.%m.%d.%H.%M.%S") }
let(:remote_path) do
File.expand_path(File.join("my/path/test_trigger", timestamp))
end
before do
Timecop.freeze
storage.package.time = timestamp
allow(storage.package).to receive(:filenames).and_return(
["test_trigger.tar-aa", "test_trigger.tar-ab"]
)
storage.path = "my/path"
end
after { Timecop.return }
context "when the storage is the last for the model" do
before do
model.storages << storage
end
it "moves the package files to their destination" do
expect(FileUtils).to receive(:mkdir_p).ordered.with(remote_path)
expect(Logger).to receive(:warn).never
src = File.join(Config.tmp_path, "test_trigger.tar-aa")
dest = File.join(remote_path, "test_trigger.tar-aa")
expect(Logger).to receive(:info).ordered.with("Storing '#{dest}'...")
expect(FileUtils).to receive(:mv).ordered.with(src, dest)
src = File.join(Config.tmp_path, "test_trigger.tar-ab")
dest = File.join(remote_path, "test_trigger.tar-ab")
expect(Logger).to receive(:info).ordered.with("Storing '#{dest}'...")
expect(FileUtils).to receive(:mv).ordered.with(src, dest)
storage.send(:transfer!)
end
end
context "when the storage is not the last for the model" do
before do
model.storages << storage
model.storages << Storage::Local.new(model)
end
it "logs a warning and copies the package files to their destination" do
expect(FileUtils).to receive(:mkdir_p).ordered.with(remote_path)
expect(Logger).to receive(:warn).ordered do |err|
expect(err).to be_an_instance_of Storage::Local::Error
expect(err.message).to eq <<-EOS.gsub(/^ +/, " ").strip
Storage::Local::Error: Local File Copy Warning!
The final backup file(s) for 'test label' (test_trigger)
will be *copied* to '#{remote_path}'
To avoid this, when using more than one Storage, the 'Local' Storage
should be added *last* so the files may be *moved* to their destination.
EOS
end
src = File.join(Config.tmp_path, "test_trigger.tar-aa")
dest = File.join(remote_path, "test_trigger.tar-aa")
expect(Logger).to receive(:info).ordered.with("Storing '#{dest}'...")
expect(FileUtils).to receive(:cp).ordered.with(src, dest)
src = File.join(Config.tmp_path, "test_trigger.tar-ab")
dest = File.join(remote_path, "test_trigger.tar-ab")
expect(Logger).to receive(:info).ordered.with("Storing '#{dest}'...")
expect(FileUtils).to receive(:cp).ordered.with(src, dest)
storage.send(:transfer!)
end
end
end # describe '#transfer!'
describe "#remove!" do
let(:timestamp) { Time.now.strftime("%Y.%m.%d.%H.%M.%S") }
let(:remote_path) do
File.expand_path(File.join("my/path/test_trigger", timestamp))
end
let(:package) do
double(
Package, # loaded from YAML storage file
trigger: "test_trigger",
time: timestamp
)
end
before do
Timecop.freeze
storage.path = "my/path"
end
after { Timecop.return }
it "removes the given package from the remote" do
expect(Logger).to receive(:info).ordered
.with("Removing backup package dated #{timestamp}...")
expect(FileUtils).to receive(:rm_r).ordered.with(remote_path)
storage.send(:remove!, package)
end
end # describe '#remove!'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/storage/qiniu_spec.rb | spec/storage/qiniu_spec.rb | require "spec_helper"
module Backup
describe Storage::Qiniu do
let(:model) { Model.new(:test_trigger, "test label") }
let(:required_config) do
proc do |s3|
s3.access_key = "my_access_key"
s3.secret_key = "my_secret_key"
s3.bucket = "my_bucket"
end
end
let(:storage) { Storage::Qiniu.new(model, &required_config) }
let(:s) { sequence "" }
describe "#initialize" do
it "provides default values" do
# required
expect(storage.bucket).to eq "my_bucket"
expect(storage.access_key).to eq "my_access_key"
expect(storage.secret_key).to eq "my_secret_key"
# defaults
expect(storage.storage_id).to be_nil
expect(storage.keep).to be_nil
expect(storage.path).to eq "backups"
end
it "requires access_key secret_key and bucket" do
expect do
Storage::Qiniu.new(model)
end.to raise_error StandardError, /#access_key, #secret_key, #bucket are all required/
end
it "establishes connection" do
expect(::Qiniu).to receive(:establish_connection!).with(access_key: "my_access_key", secret_key: "my_secret_key")
pre_config = required_config
Storage::Qiniu.new(model) do |qiniu|
pre_config.call(qiniu)
end
end
end
describe "#transfer!" do
let(:timestamp) { Time.now.strftime("%Y.%m.%d.%H.%M.%S") }
let(:remote_path) { File.join("my/path/test_trigger", timestamp) }
let(:uptoken) { "uptoken" }
before do
Timecop.freeze
storage.package.time = timestamp
allow(storage.package).to receive(:filenames).and_return(
["test_trigger.tar-aa", "test_trigger.tar-ab"]
)
storage.path = "my/path"
allow(::Qiniu).to receive(:generate_upload_token).and_return(uptoken)
end
after { Timecop.return }
it "transfers the package files" do
src = File.join(Config.tmp_path, "test_trigger.tar-aa")
dest = File.join(remote_path, "test_trigger.tar-aa")
expect(Logger).to receive(:info).ordered.with("Storing '#{dest}'...")
expect(::Qiniu).to receive(:upload_file).ordered.with(uptoken: uptoken,
bucket: "my_bucket",
file: src,
key: dest)
src = File.join(Config.tmp_path, "test_trigger.tar-ab")
dest = File.join(remote_path, "test_trigger.tar-ab")
expect(Logger).to receive(:info).ordered.with("Storing '#{dest}'...")
expect(::Qiniu).to receive(:upload_file).ordered.with(uptoken: uptoken,
bucket: "my_bucket",
file: src,
key: dest)
storage.send(:transfer!)
end
end
describe "#remove" do
let(:timestamp) { Time.now.strftime("%Y.%m.%d.%H.%M.%S") }
let(:remote_path) { File.join("my/path/test_trigger", timestamp) }
let(:uptoken) { "uptoken" }
let(:package) do
double(
Package, # loaded from YAML storage file
trigger: "test_trigger",
time: timestamp,
filenames: ["test_trigger.tar-aa", "test_trigger.tar-ab"]
)
end
before do
Timecop.freeze
storage.path = "my/path"
end
after { Timecop.return }
it "removes the given package from the remote" do
expect(Logger).to receive(:info).ordered.with("Removing backup package dated #{timestamp}...")
dest = File.join(remote_path, "test_trigger.tar-aa")
expect(::Qiniu).to receive(:delete).ordered.with("my_bucket", dest)
dest = File.join(remote_path, "test_trigger.tar-ab")
expect(::Qiniu).to receive(:delete).ordered.with("my_bucket", dest)
storage.send(:remove!, package)
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/support/sandbox_file_utils.rb | spec/support/sandbox_file_utils.rb | #
# Provides the ability to perform +FileUtils+ actions, while restricting
# any destructive actions outside of the specified +sandbox_path+.
#
# == Usage
#
# To enable protection:
#
# require 'sandbox_file_utils'
#
# SandboxFileUtils.activate!
# SandboxFileUtils.sandbox_path = 'my_sandbox'
# # or
# # SandboxFileUtils.activate! 'my_sandbox'
#
# FileUtils.touch 'my_sandbox/file' # => OK
# FileUtils.touch 'file' # => Error
#
# To disable protection:
#
# SandboxFileUtils.deactivate!
# FileUtils.touch 'my_sandbox/file' # => OK
# FileUtils.touch 'file' # => OK
#
# # When re-activating, the currently set +sandbox_path+ will still be in effect.
# SandboxFileUtils.activate!
# FileUtils.touch 'my_sandbox/file' # => OK
# FileUtils.touch 'file' # => Error
#
# When disabling protection, you may also pass +:noop+ which will restore
# +::FileUtils+ to +FileUtils::NoWrite+.
#
# SandboxFileUtils.deactivate!(:noop)
# FileUtils.touch 'file' # => OK
# File.exist? 'file' # => false
#
# The +sandbox_path+ may be changed at any time.
#
# require 'sandbox_file_utils'
#
# SandboxFileUtils.activate! 'my_sandbox'
# FileUtils.touch 'my_sandbox/file' # => OK
# FileUtils.touch 'other_path/file' # => Error
#
# SandboxFileUtils.sandbox_path = 'other_path'
# FileUtils.touch 'other_path/file' # => OK
# FileUtils.touch 'my_sandbox/file' # => Error
#
# This module may also be used directly, with no activation required.
#
# require 'sandbox_file_utils'
#
# SandboxFileUtils.sandbox_path = 'my_sandbox'
# SandboxFileUtils.touch 'my_sandbox/file' # => OK
# SandboxFileUtils.touch 'other_path/file' # => Error
#
# == Module Functions
#
# The following are accessible and operate without restriction:
#
# pwd (alias: getwd)
# cd (alias: chdir)
# uptodate?
# compare_file (alias: identical? cmp)
# compare_stream
#
# The following are accessible, but will not allow operations on files or
# directories outside of the +sandbox_path+.
#
# No links may be created within the +sandbox_path+ to outside files or
# directories. Files may be copied from outside into the +sandbox_path+.
#
# Operations not permitted will raise an +Error+.
#
# mkdir
# mkdir_p (alias: makedirs mkpath)
# rmdir
# ln (alias: link)
# ln_s (alias: symlink)
# ln_sf
# cp (alias: copy)
# cp_r
# mv (alias: move)
# rm (alias: remove)
# rm_f (alias: safe_unlink)
# rm_r
# rm_rf (alias: rmtree)
# install
# chmod
# chmod_R
# chown
# chown_R
# touch
#
# The following low-level methods, normally available through +FileUtils+,
# will remain private and not be available:
#
# copy_entry
# copy_file
# copy_stream
# remove_entry_secure
# remove_entry
# remove_file
# remove_dir
#
require "fileutils"
module SandboxFileUtils
class Error < StandardError; end
class << self
include FileUtils
RealFileUtils = FileUtils
# Sets the root path where restricted operations will be allowed.
#
# This is evaluated at the time of each method call,
# so it may be changed at any time.
#
# This may be a relative or absolute path. If relative, it will be
# based on the current working directory.
#
# The +sandbox_path+ itself may be created or removed by this module.
# Missing parent directories in this path may be created using +mkdir_p+,
# but you would not be able to remove them.
#
# FileUtils.sandbox_path = 'my/sandbox'
# FileUtils.mkdir 'my' # => will raise an Error
# FileUtils.mkdir_p 'my/sandbox' # => creates both directories
# FileUtils.rmdir 'my/sandbox' # => removes 'sandbox'
# FileUtils.rmdir 'my' # => will raise an Error
# # This would work in 1.9.x, but the :parents option is currently broken.
# # FileUtils.rmdir 'my/sandbox', parents: true
#
# An +Error+ will be raised if any module functions are called without this set.
attr_accessor :sandbox_path
# Returns whether or not SandboxFileUtils protection for +::FileUtils+ is active.
def activated?
::FileUtils == self
end
# Enables this module so that all calls to +::FileUtils+ will be protected.
#
# If +path+ is given, it will be used to set +sandbox_path+ - regardless of
# whether or not this call returns +true+ or +false+.
#
# Returns +true+ if activation occurs.
# Returns +false+ if +activated?+ already +true+.
def activate!(path = nil)
path = path.to_s
self.sandbox_path = path unless path.empty?
return false if activated?
Object.send(:remove_const, :FileUtils)
Object.const_set(:FileUtils, self)
true
end
# Disables this module by restoring +::FileUtils+ to the real +FileUtils+ module.
#
# When deactivated, +sandbox_path+ will remain set to it's current value.
# Therefore, if +activate!+ is called again, +sandbox_path+ will still be set.
#
# By default, +deactivate!+ will restore +::FileUtils+ to the fully functional
# +FileUtils+ module. If +type+ is set to +:noop+, it will restore +::FileUtils+
# to +FileUtils::NoWrite+, so that any method calls to +::FileUtils+ will be +noop+.
#
# Returns +true+ if deactivation occurs.
# Returns +false+ if +activated?+ is already +false+.
def deactivate!(type = :real)
return false unless activated?
Object.send(:remove_const, :FileUtils)
if type == :noop
Object.const_set(:FileUtils, RealFileUtils::NoWrite)
else
Object.const_set(:FileUtils, RealFileUtils)
end
true
end
%w[
pwd getwd cd chdir uptodate? compare_file identical? cmp
compare_stream
].each do |name|
public :"#{ name }"
end
%w[
mkdir mkdir_p makedirs mkpath rmdir rm remove rm_f safe_unlink rm_r
rm_rf rmtree touch
].each do |name|
class_eval(<<-EOS, __FILE__, __LINE__ + 1)
def #{name}(list, **options)
protect!(list)
super
end
EOS
end
%w[cp copy cp_r install].each do |name|
class_eval(<<-EOS, __FILE__, __LINE__ + 1)
def #{name}(src, dest, **options)
protect!(dest)
super
end
EOS
end
%w[ln link ln_s symlink ln_sf mv move].each do |name|
class_eval(<<-EOS, __FILE__, __LINE__ + 1)
def #{name}(src, dest, **options)
protect!(src)
super
end
EOS
end
%w[chmod chmod_R].each do |name|
class_eval(<<-EOS, __FILE__, __LINE__ + 1)
def #{name}(mode, list, **options)
protect!(list)
super
end
EOS
end
%w[chown chown_R].each do |name|
class_eval(<<-EOS, __FILE__, __LINE__ + 1)
def #{name}(user, group, list, **options)
protect!(list)
super
end
EOS
end
private
def protect!(list)
list = Array(list).flatten.map { |p| File.expand_path(p) }
path = current_sandbox_path + "/"
unless list.all? { |p| p.start_with?(path) || p == path.chomp("/") }
raise Error, <<-EOS.gsub(/^ +/, ""), caller(1)
path(s) outside of the current sandbox path were detected.
sandbox_path: #{path}
path(s) for the current operation:
#{list.join($/)}
EOS
end
end
def current_sandbox_path
path = sandbox_path.to_s.chomp("/")
raise Error, "sandbox_path must be set" if path.empty?
File.expand_path(path)
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/support/very_specific_error.rb | spec/support/very_specific_error.rb | class VerySpecificError < StandardError; end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/support/shared_examples/storage.rb | spec/support/shared_examples/storage.rb | shared_examples "a subclass of Storage::Base" do
let(:storage_name) { described_class.name.sub("Backup::", "") }
describe "#initialize" do
it "sets a reference to the model" do
expect(storage.model).to be model
end
it "sets a reference to the package" do
expect(storage.package).to be model.package
end
it "cleans storage_id for filename use" do
block = respond_to?(:required_config) ? required_config : proc {}
storage = described_class.new(model, :my_id, &block)
expect(storage.storage_id).to eq "my_id"
storage = described_class.new(model, "My #1 ID", &block)
expect(storage.storage_id).to eq "My__1_ID"
end
end # describe '#initialize'
describe "#perform!" do
# Note that using expect(`storage).to receive(:cycle!).never` will cause
# respond_to?(:cycle!) to return true in Storage#perform! for RSync.
specify "does not cycle if keep is not set" do
expect(Backup::Logger).to receive(:info).with("#{storage_name} Started...")
expect(storage).to receive(:transfer!)
expect(storage).to receive(:cycle!).never
expect(Backup::Logger).to receive(:info).with("#{storage_name} Finished!")
storage.perform!
end
context "when a storage_id is given" do
specify "it is used in the log messages" do
block = respond_to?(:required_config) ? required_config : proc {}
storage = described_class.new(model, :my_id, &block)
expect(Backup::Logger).to receive(:info).with("#{storage_name} (my_id) Started...")
expect(storage).to receive(:transfer!)
expect(Backup::Logger).to receive(:info).with("#{storage_name} (my_id) Finished!")
storage.perform!
end
end
end # describe '#perform!'
end
shared_examples "a storage that cycles" do
let(:storage_name) { described_class.name.sub("Backup::", "") }
shared_examples "storage cycling" do
let(:pkg_a) { Backup::Package.new(model) }
let(:pkg_b) { Backup::Package.new(model) }
let(:pkg_c) { Backup::Package.new(model) }
before do
storage.package.time = Time.now
pkg_a.time = Time.now - 10
pkg_b.time = Time.now - 20
pkg_c.time = Time.now - 30
stored_packages = [pkg_a, pkg_b, pkg_c]
(stored_packages + [storage.package]).each do |pkg|
pkg.time = pkg.time.strftime("%Y.%m.%d.%H.%M.%S")
end
expect(File).to receive(:exist?).with(yaml_file).and_return(true)
expect(File).to receive(:zero?).with(yaml_file).and_return(false)
if YAML.respond_to? :safe_load_file
expect(YAML).to receive(:safe_load_file)
.with(yaml_file, permitted_classes: [Backup::Package])
else
expect(YAML).to receive(:load_file).with(yaml_file)
end.and_return(stored_packages)
allow(storage).to receive(:transfer!)
end
it "cycles packages" do
expect(storage).to receive(:remove!).with(pkg_b)
expect(storage).to receive(:remove!).with(pkg_c)
expect(FileUtils).to receive(:mkdir_p).with(File.dirname(yaml_file))
file = double
expect(File).to receive(:open).with(yaml_file, "w").and_yield(file)
saved_packages = [storage.package, pkg_a]
expect(file).to receive(:write).with(saved_packages.to_yaml)
storage.perform!
end
it "cycles but does not remove packages marked :no_cycle" do
pkg_b.no_cycle = true
expect(storage).to receive(:remove!).with(pkg_b).never
expect(storage).to receive(:remove!).with(pkg_c)
expect(FileUtils).to receive(:mkdir_p).with(File.dirname(yaml_file))
file = double
expect(File).to receive(:open).with(yaml_file, "w").and_yield(file)
saved_packages = [storage.package, pkg_a]
expect(file).to receive(:write).with(saved_packages.to_yaml)
storage.perform!
end
it "does cycle when the available packages are more than the keep setting" do
expect(storage).to receive(:remove!).with(pkg_a).never
expect(storage).to receive(:remove!).with(pkg_b)
expect(storage).to receive(:remove!).with(pkg_c)
storage.keep = 2
expect(FileUtils).to receive(:mkdir_p).with(File.dirname(yaml_file))
file = double
expect(File).to receive(:open).with(yaml_file, "w").and_yield(file)
saved_packages = [storage.package, pkg_a]
expect(file).to receive(:write).with(saved_packages.to_yaml)
storage.perform!
end
it "does not cycle when the available packages are less than the keep setting" do
expect(storage).to receive(:remove!).with(pkg_a).never
expect(storage).to receive(:remove!).with(pkg_b).never
expect(storage).to receive(:remove!).with(pkg_c).never
storage.keep = 5
expect(FileUtils).to receive(:mkdir_p).with(File.dirname(yaml_file))
file = double
expect(File).to receive(:open).with(yaml_file, "w").and_yield(file)
saved_packages = [storage.package, pkg_a, pkg_b, pkg_c]
expect(file).to receive(:write).with(saved_packages.to_yaml)
storage.perform!
end
it "warns if remove fails" do
expect(storage).to receive(:remove!).with(pkg_b).and_raise("error message")
expect(storage).to receive(:remove!).with(pkg_c)
allow(pkg_b).to receive(:filenames).and_return(["file1", "file2"])
expect(Backup::Logger).to receive(:warn) do |err|
expect(err).to be_an_instance_of Backup::Storage::Cycler::Error
expect(err.message).to include(
"There was a problem removing the following package:\n" \
" Trigger: test_trigger :: Dated: #{pkg_b.time}\n" \
" Package included the following 2 file(s):\n" \
" file1\n" \
" file2"
)
expect(err.message).to match("RuntimeError: error message")
end
expect(FileUtils).to receive(:mkdir_p).with(File.dirname(yaml_file))
file = double
expect(File).to receive(:open).with(yaml_file, "w").and_yield(file)
saved_packages = [storage.package, pkg_a]
expect(file).to receive(:write).with(saved_packages.to_yaml)
storage.perform!
end
end
context "with a storage_id" do
let(:storage) do
block = respond_to?(:required_config) ? required_config : proc {}
described_class.new(model, :my_id, &block)
end
let(:yaml_file) do
File.join(Backup::Config.data_path, "test_trigger",
"#{storage_name.split("::").last}-my_id.yml")
end
before { storage.keep = "2" } # value is typecast
include_examples "storage cycling"
end
context "without a storage_id" do
let(:yaml_file) do
File.join(Backup::Config.data_path, "test_trigger",
"#{storage_name.split("::").last}.yml")
end
before { storage.keep = 2 }
include_examples "storage cycling"
end
context "keep as a Time" do
let(:yaml_file) do
File.join(Backup::Config.data_path, "test_trigger",
"#{storage_name.split("::").last}.yml")
end
before { storage.keep = Time.now - 11 }
include_examples "storage cycling"
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/support/shared_examples/config_defaults.rb | spec/support/shared_examples/config_defaults.rb | shared_examples "a class that includes Config::Helpers" do
describe "setting defaults" do
let(:accessor_names) do
(described_class.instance_methods - Class.methods)
.select { |method| method.to_s.end_with?("=") }
.map { |name| name.to_s.chomp("=") }
end
before do
overrides = respond_to?(:default_overrides) ? default_overrides : {}
names = accessor_names
described_class.defaults do |klass|
names.each do |name|
val = overrides[name] || "default_#{name}"
klass.send("#{name}=", val)
end
end
end
after { described_class.clear_defaults! }
it "allows accessors to be configured with default values" do
overrides = respond_to?(:default_overrides) ? default_overrides : {}
klass = respond_to?(:model) ?
described_class.new(model) : described_class.new
accessor_names.each do |name|
expected = overrides[name] || "default_#{name}"
expect(klass.send(name)).to eq expected
end
end
it "allows defaults to be overridden" do
overrides = respond_to?(:new_overrides) ? new_overrides : {}
names = accessor_names
block = proc do |klass|
names.each do |name|
val = overrides[name] || "new_#{name}"
klass.send("#{name}=", val)
end
end
klass = respond_to?(:model) ?
described_class.new(model, &block) : described_class.new(&block)
names.each do |name|
expected = overrides[name] || "new_#{name}"
expect(klass.send(name)).to eq expected
end
end
end # describe 'setting defaults'
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/support/shared_examples/notifier.rb | spec/support/shared_examples/notifier.rb | shared_examples "a subclass of Notifier::Base" do
let(:notifier) { described_class.new(model) }
let(:notifier_name) { described_class.name.sub("Backup::", "") }
describe "#perform" do
context "when the model succeeded without warnings" do
before { allow(model).to receive(:exit_status).and_return(0) }
context "when notify_on_success is true" do
before do
notifier.on_success = true
notifier.on_warning = false
notifier.on_failure = false
end
it "sends a notification" do
expect(Backup::Logger).to receive(:info).with(
"Sending notification using #{notifier_name}..."
)
expect(notifier).to receive(:notify!).with(:success)
notifier.perform!
end
end
context "when notify_on_success is false" do
before do
notifier.on_success = false
notifier.on_warning = true
notifier.on_failure = true
end
it "does nothing" do
expect(Backup::Logger).to receive(:info).never
expect(notifier).to receive(:notify!).never
notifier.perform!
end
end
end
context "when the model succeeded with warnings" do
before { allow(model).to receive(:exit_status).and_return(1) }
context "when notify_on_success is true" do
before do
notifier.on_success = true
notifier.on_warning = false
notifier.on_failure = false
end
it "sends a notification" do
expect(Backup::Logger).to receive(:info).with(
"Sending notification using #{notifier_name}..."
)
expect(notifier).to receive(:notify!).with(:warning)
notifier.perform!
end
end
context "when notify_on_warning is true" do
before do
notifier.on_success = false
notifier.on_warning = true
notifier.on_failure = false
end
it "sends a notification" do
expect(Backup::Logger).to receive(:info).with(
"Sending notification using #{notifier_name}..."
)
expect(notifier).to receive(:notify!).with(:warning)
notifier.perform!
end
end
context "when notify_on_success and notify_on_warning are false" do
before do
notifier.on_success = false
notifier.on_warning = false
notifier.on_failure = true
end
it "does nothing" do
expect(Backup::Logger).to receive(:info).never
expect(notifier).to receive(:notify!).never
notifier.perform!
end
end
end
context "when the model failed (non-fatal)" do
before { allow(model).to receive(:exit_status).and_return(2) }
context "when notify_on_failure is true" do
before do
notifier.on_success = false
notifier.on_warning = false
notifier.on_failure = true
end
it "sends a notification" do
expect(Backup::Logger).to receive(:info).with(
"Sending notification using #{notifier_name}..."
)
expect(notifier).to receive(:notify!).with(:failure)
notifier.perform!
end
end
context "when notify_on_failure is false" do
before do
notifier.on_success = true
notifier.on_warning = true
notifier.on_failure = false
end
it "does nothing" do
expect(Backup::Logger).to receive(:info).never
expect(notifier).to receive(:notify!).never
notifier.perform!
end
end
end
context "when the model failed (fatal)" do
before { allow(model).to receive(:exit_status).and_return(3) }
context "when notify_on_failure is true" do
before do
notifier.on_success = false
notifier.on_warning = false
notifier.on_failure = true
end
it "sends a notification" do
expect(Backup::Logger).to receive(:info).with(
"Sending notification using #{notifier_name}..."
)
expect(notifier).to receive(:notify!).with(:failure)
notifier.perform!
end
end
context "when notify_on_failure is false" do
before do
notifier.on_success = true
notifier.on_warning = true
notifier.on_failure = false
end
it "does nothing" do
expect(Backup::Logger).to receive(:info).never
expect(notifier).to receive(:notify!).never
notifier.perform!
end
end
end
specify "only logs exceptions" do
allow(model).to receive(:exit_status).and_return(0)
expect(notifier).to receive(:notify!).with(:success)
.and_raise(Exception.new("error message"))
expect(Backup::Logger).to receive(:error) do |err|
expect(err).to be_an_instance_of Backup::Notifier::Error
expect(err.message).to match(/#{ notifier_name } Failed!/)
expect(err.message).to match(/error message/)
end
notifier.perform!
end
specify "retries failed attempts" do
allow(model).to receive(:exit_status).and_return(0)
notifier.max_retries = 2
logger_calls = 0
expect(Backup::Logger).to receive(:info).exactly(3).times do |arg|
logger_calls += 1
case logger_calls
when 1
expect(arg).to eq "Sending notification using #{notifier_name}..."
when 2
expect(arg).to be_an_instance_of Backup::Notifier::Error
expect(arg.message).to match("RuntimeError: standard error")
expect(arg.message).to match("Retry #1 of 2.")
when 3
expect(arg).to be_an_instance_of Backup::Notifier::Error
expect(arg.message).to match("Timeout::Error")
expect(arg.message).to match("Retry #2 of 2.")
end
end
expect(notifier).to receive(:sleep).with(30).twice
expect(notifier).to receive(:notify!).ordered.and_raise("standard error")
expect(notifier).to receive(:notify!).ordered.and_raise(Timeout::Error.new)
expect(notifier).to receive(:notify!).ordered.and_raise("final error")
expect(Backup::Logger).to receive(:error).ordered do |err|
expect(err).to be_an_instance_of Backup::Notifier::Error
expect(err.message).to match(/#{ notifier_name } Failed!/)
expect(err.message).to match(/final error/)
end
notifier.perform!
end
end # describe '#perform'
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/support/shared_examples/database.rb | spec/support/shared_examples/database.rb | shared_examples "a subclass of Database::Base" do
describe "#initialize" do
it "sets a reference to the model" do
expect(db.model).to be(model)
end
it "cleans database_id for filename use" do
block = respond_to?(:required_config) ? required_config : proc {}
db = described_class.new(model, :my_id, &block)
expect(db.database_id).to eq "my_id"
db = described_class.new(model, "My #1 ID", &block)
expect(db.database_id).to eq "My__1_ID"
end
it "sets the dump_path" do
expect(db.dump_path).to eq(
File.join(Backup::Config.tmp_path, "test_trigger", "databases")
)
end
end # describe '#initialize'
describe "#prepare!" do
it "creates the dump_path" do
expect(FileUtils).to receive(:mkdir_p).with(db.dump_path)
db.send(:prepare!)
end
end
describe "#dump_filename" do
let(:klass_name) { described_class.name.split("::").last }
before do
allow_any_instance_of(described_class).to receive(:sleep)
end
it "logs warning when model is created if database_id is needed" do
expect(Backup::Logger).to receive(:warn) do |err|
expect(err)
.to be_an_instance_of Backup::Database::Error
end
klass = described_class
block = respond_to?(:required_config) ? required_config : proc {}
Backup::Model.new(:test_model, "test model") do
database klass, nil, &block
database klass, :my_id, &block
end
end
it "auto-generates a database_id if needed" do
klass = described_class
block = respond_to?(:required_config) ? required_config : proc {}
test_model = Backup::Model.new(:test_model, "test model") do
database klass, nil, &block
database klass, :my_id, &block
end
db1, db2 = test_model.databases
expect(db1.send(:dump_filename)).to match(/#{ klass_name }-\d{5}/)
expect(db2.send(:dump_filename)).to eq "#{klass_name}-my_id"
end
it "does not warn or auto-generate database_id if only one class defined" do
expect(Backup::Logger).to receive(:warn).never
klass = described_class
block = respond_to?(:required_config) ? required_config : proc {}
test_model = Backup::Model.new(:test_model, "test model") do
database klass, nil, &block
end
db = test_model.databases.first
expect(db.send(:dump_filename)).to eq klass_name
end
end # describe '#dump_filename'
describe "log!" do
let(:klass_name) { described_class.name.to_s.sub("Backup::", "") }
specify "with a database_id" do
block = respond_to?(:required_config) ? required_config : proc {}
db = described_class.new(model, :my_id, &block)
expect(Backup::Logger).to receive(:info).with("#{klass_name} (my_id) Started...")
db.send(:log!, :started)
expect(Backup::Logger).to receive(:info).with("#{klass_name} (my_id) Finished!")
db.send(:log!, :finished)
end
specify "without a database_id" do
expect(Backup::Logger).to receive(:info).with("#{klass_name} Started...")
db.send(:log!, :started)
expect(Backup::Logger).to receive(:info).with("#{klass_name} Finished!")
db.send(:log!, :finished)
end
end # describe 'log!'
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/support/shared_examples/syncer/cloud.rb | spec/support/shared_examples/syncer/cloud.rb | shared_examples "a subclass of Syncer::Cloud::Base" do
let(:syncer_name) { described_class.name.sub("Backup::", "") }
let(:s) { sequence "" }
describe "#initialize" do
it "strips leading path separator" do
pre_config = required_config
klass = described_class.new do |syncer|
pre_config.call(syncer)
syncer.path = "/this/path"
end
expect(klass.path).to eq "this/path"
end
end # describe '#initialize'
describe "#perform" do
let(:syncer) { described_class.new(&required_config) }
let(:cloud_io) { double }
let(:find_md5_data) do
[
["/local/path/sync_dir/unchanged_01", "unchanged_01_md5"],
["/local/path/sync_dir/sub_dir/unchanged_02", "unchanged_02_md5"],
["/local/path/sync_dir/changed_01", "changed_01_md5"],
["/local/path/sync_dir/sub_dir/changed_02", "changed_02_md5"],
["/local/path/sync_dir/missing_01", "missing_01_md5"]
].map do |path, md5|
file = Backup::Syncer::Cloud::LocalFile.new(path)
file.md5 = md5
file
end
end
let(:remote_files_data) do
{
"unchanged_01" => "unchanged_01_md5",
"sub_dir/unchanged_02" => "unchanged_02_md5",
"changed_01" => "changed_01_md5_old",
"sub_dir/changed_02" => "changed_02_md5_old",
"orphan_01" => "orphan_01_md5",
"sub_dir/orphan_02" => "orphan_02_md5"
}
end
before do
syncer.path = "my_backups"
syncer.directories { add "/local/path/sync_dir" }
allow(syncer).to receive(:cloud_io).and_return(cloud_io)
allow(cloud_io).to receive(:upload)
allow(cloud_io).to receive(:delete)
allow(File).to receive(:exist?).and_return(true)
end
context "when no local or remote files are found" do
before do
allow(syncer).to receive(:get_remote_files)
.with("my_backups/sync_dir").and_return({})
expect(Backup::Syncer::Cloud::LocalFile).to receive(:find_md5)
.with("/local/path/sync_dir", []).and_return([])
end
it "does not attempt to sync" do
expected_messages = <<-EOS.gsub(/^ +/, "").chomp
#{syncer_name} Started...
Gathering remote data for 'my_backups/sync_dir'...
Gathering local data for '/local/path/sync_dir'...
No local or remote files found
Summary:
Transferred Files: 0
Orphaned Files: 0
Unchanged Files: 0
#{syncer_name} Finished!
EOS
syncer.perform!
expect(Backup::Logger.has_warnings?).to be(false)
expect(
Backup::Logger.messages.map(&:lines).flatten.map(&:strip).join("\n")
).to eq expected_messages
end
end
context "without threads" do
before do
allow(syncer).to receive(:get_remote_files)
.with("my_backups/sync_dir").and_return(remote_files_data)
expect(Backup::Syncer::Cloud::LocalFile).to receive(:find_md5)
.with("/local/path/sync_dir", []).and_return(find_md5_data)
end
context "without mirror" do
it "leaves orphaned files" do
expected_messages = <<-EOS.gsub(/^ +/, "").chomp
#{syncer_name} Started...
Gathering remote data for 'my_backups/sync_dir'...
Gathering local data for '/local/path/sync_dir'...
Syncing...
[transferring] 'my_backups/sync_dir/changed_01'
[transferring] 'my_backups/sync_dir/missing_01'
[transferring] 'my_backups/sync_dir/sub_dir/changed_02'
[orphaned] 'my_backups/sync_dir/orphan_01'
[orphaned] 'my_backups/sync_dir/sub_dir/orphan_02'
Summary:
Transferred Files: 3
Orphaned Files: 2
Unchanged Files: 2
#{syncer_name} Finished!
EOS
syncer.perform!
expect(Backup::Logger.has_warnings?).to be(false)
expect(
Backup::Logger.messages.map(&:lines).flatten.map(&:strip).join("\n")
).to eq expected_messages
end
end # context 'without mirror'
context "with mirror" do
before { syncer.mirror = true }
it "deletes orphaned files" do
expected_messages = <<-EOS.gsub(/^ +/, "").chomp
#{syncer_name} Started...
Gathering remote data for 'my_backups/sync_dir'...
Gathering local data for '/local/path/sync_dir'...
Syncing...
[transferring] 'my_backups/sync_dir/changed_01'
[transferring] 'my_backups/sync_dir/missing_01'
[transferring] 'my_backups/sync_dir/sub_dir/changed_02'
[removing] 'my_backups/sync_dir/orphan_01'
[removing] 'my_backups/sync_dir/sub_dir/orphan_02'
Summary:
Transferred Files: 3
Deleted Files: 2
Unchanged Files: 2
#{syncer_name} Finished!
EOS
syncer.perform!
expect(Backup::Logger.has_warnings?).to be(false)
expect(
Backup::Logger.messages.map(&:lines).flatten.map(&:strip).join("\n")
).to eq expected_messages
end
it "warns if delete fails" do
allow(cloud_io).to receive(:delete).and_raise("Delete Error")
expected_messages = <<-EOS.gsub(/^ +/, "").chomp
#{syncer_name} Started...
Gathering remote data for 'my_backups/sync_dir'...
Gathering local data for '/local/path/sync_dir'...
Syncing...
[transferring] 'my_backups/sync_dir/changed_01'
[transferring] 'my_backups/sync_dir/missing_01'
[transferring] 'my_backups/sync_dir/sub_dir/changed_02'
[removing] 'my_backups/sync_dir/orphan_01'
[removing] 'my_backups/sync_dir/sub_dir/orphan_02'
Syncer::Cloud::Error: Delete Operation Failed
--- Wrapped Exception ---
RuntimeError: Delete Error
Summary:
Transferred Files: 3
Attempted to Delete: 2 (See log messages for actual results)
Unchanged Files: 2
#{syncer_name} Finished!
EOS
syncer.perform!
expect(Backup::Logger.has_warnings?).to be(true)
expect(
Backup::Logger.messages.map(&:lines).flatten.map(&:strip).join("\n")
).to eq expected_messages
end
end # context 'with mirror'
it "skips files that are too large" do
allow(cloud_io).to receive(:upload).with(
"/local/path/sync_dir/changed_01", "my_backups/sync_dir/changed_01"
).and_raise(Backup::CloudIO::FileSizeError)
expected_messages = <<-EOS.gsub(/^ +/, "").chomp
#{syncer_name} Started...
Gathering remote data for 'my_backups/sync_dir'...
Gathering local data for '/local/path/sync_dir'...
Syncing...
[transferring] 'my_backups/sync_dir/changed_01'
Syncer::Cloud::Error: Skipping 'my_backups/sync_dir/changed_01'
--- Wrapped Exception ---
CloudIO::FileSizeError
[transferring] 'my_backups/sync_dir/missing_01'
[transferring] 'my_backups/sync_dir/sub_dir/changed_02'
[orphaned] 'my_backups/sync_dir/orphan_01'
[orphaned] 'my_backups/sync_dir/sub_dir/orphan_02'
Summary:
Transferred Files: 2
Orphaned Files: 2
Unchanged Files: 2
Skipped Files: 1
#{syncer_name} Finished!
EOS
syncer.perform!
expect(Backup::Logger.has_warnings?).to be(true)
expect(
Backup::Logger.messages.map(&:lines).flatten.map(&:strip).join("\n")
).to eq expected_messages
end
it "logs and raises error on upload failure" do
allow(cloud_io).to receive(:upload).and_raise("upload failure")
expect(Backup::Logger).to receive(:error) do |err|
expect(err.message).to eq "upload failure"
end
expect do
syncer.perform!
end.to raise_error(Backup::Syncer::Cloud::Error)
end
end # context 'without threads'
context "with threads" do
before do
allow(syncer).to receive(:get_remote_files)
.with("my_backups/sync_dir").and_return(remote_files_data)
expect(Backup::Syncer::Cloud::LocalFile).to receive(:find_md5)
.with("/local/path/sync_dir", []).and_return(find_md5_data)
syncer.thread_count = 20
allow(syncer).to receive(:sleep) # quicker tests
end
context "without mirror" do
it "leaves orphaned files" do
expected_head = <<-EOS.gsub(/^ +/, "")
#{syncer_name} Started...
Gathering remote data for 'my_backups/sync_dir'...
Gathering local data for '/local/path/sync_dir'...
Syncing...
Using 7 Threads
EOS
expected_tail = <<-EOS.gsub(/^ +/, "").chomp
[orphaned] 'my_backups/sync_dir/orphan_01'
[orphaned] 'my_backups/sync_dir/sub_dir/orphan_02'
Summary:
Transferred Files: 3
Orphaned Files: 2
Unchanged Files: 2
#{syncer_name} Finished!
EOS
syncer.mirror = false
syncer.perform!
expect(Backup::Logger.has_warnings?).to be(false)
messages = Backup::Logger.messages
.map(&:lines).flatten.map(&:strip).join("\n")
expect(messages).to start_with expected_head
expect(messages).to end_with expected_tail
end
end # context 'without mirror'
context "with mirror" do
before { syncer.mirror = true }
it "deletes orphaned files" do
expected_head = <<-EOS.gsub(/^ +/, "")
#{syncer_name} Started...
Gathering remote data for 'my_backups/sync_dir'...
Gathering local data for '/local/path/sync_dir'...
Syncing...
Using 7 Threads
EOS
expected_tail = <<-EOS.gsub(/^ +/, "").chomp
[removing] 'my_backups/sync_dir/orphan_01'
[removing] 'my_backups/sync_dir/sub_dir/orphan_02'
Summary:
Transferred Files: 3
Deleted Files: 2
Unchanged Files: 2
#{syncer_name} Finished!
EOS
syncer.perform!
expect(Backup::Logger.has_warnings?).to be(false)
messages = Backup::Logger.messages
.map(&:lines).flatten.map(&:strip).join("\n")
expect(messages).to start_with expected_head
expect(messages).to end_with expected_tail
end
it "warns if delete fails" do
allow(cloud_io).to receive(:delete).and_raise("Delete Error")
expected_tail = <<-EOS.gsub(/^ +/, "").chomp
Summary:
Transferred Files: 3
Attempted to Delete: 2 (See log messages for actual results)
Unchanged Files: 2
#{syncer_name} Finished!
EOS
syncer.perform!
expect(Backup::Logger.has_warnings?).to be(true)
messages = Backup::Logger.messages
.map(&:lines).flatten.map(&:strip).join("\n")
expect(messages).to end_with expected_tail
expect(messages).to include(<<-EOS.gsub(/^ +/, ""))
Syncer::Cloud::Error: Delete Operation Failed
--- Wrapped Exception ---
RuntimeError: Delete Error
EOS
end
end # context 'with mirror'
it "skips files that are too large" do
allow(cloud_io).to receive(:upload).with(
"/local/path/sync_dir/changed_01", "my_backups/sync_dir/changed_01"
).and_raise(Backup::CloudIO::FileSizeError)
expected_tail = <<-EOS.gsub(/^ +/, "").chomp
Summary:
Transferred Files: 2
Orphaned Files: 2
Unchanged Files: 2
Skipped Files: 1
#{syncer_name} Finished!
EOS
syncer.perform!
expect(Backup::Logger.has_warnings?).to be(true)
messages = Backup::Logger.messages
.map(&:lines).flatten.map(&:strip).join("\n")
expect(messages).to end_with expected_tail
expect(messages).to include(<<-EOS.gsub(/^ +/, ""))
Syncer::Cloud::Error: Skipping 'my_backups/sync_dir/changed_01'
--- Wrapped Exception ---
CloudIO::FileSizeError
EOS
end
it "logs and raises error on upload failure" do
allow(cloud_io).to receive(:upload).and_raise("upload failure")
expect(Backup::Logger).to receive(:error).at_least(1).times do |err|
expect(err.message).to eq "upload failure"
end
expect do
syncer.perform!
end.to raise_error(Backup::Syncer::Cloud::Error)
end
end # context 'with threads'
end # describe '#perform'
end # shared_examples 'a subclass of Syncer::Cloud::Base'
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/notifier/mail_spec.rb | spec/notifier/mail_spec.rb | require "spec_helper"
module Backup
describe Notifier::Mail do
let(:model) { Model.new(:test_trigger, "test label") }
let(:notifier) { Notifier::Mail.new(model) }
before do
allow_any_instance_of(Notifier::Mail).to receive(:utility)
.with(:sendmail).and_return("/path/to/sendmail")
allow_any_instance_of(Notifier::Mail).to receive(:utility)
.with(:exim).and_return("/path/to/exim")
end
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Notifier::Base"
describe "#initialize" do
it "provides default values" do
expect(notifier.delivery_method).to be_nil
expect(notifier.to).to be_nil
expect(notifier.from).to be_nil
expect(notifier.cc).to be_nil
expect(notifier.bcc).to be_nil
expect(notifier.reply_to).to be_nil
expect(notifier.address).to be_nil
expect(notifier.port).to be_nil
expect(notifier.domain).to be_nil
expect(notifier.user_name).to be_nil
expect(notifier.password).to be_nil
expect(notifier.authentication).to be_nil
expect(notifier.encryption).to eq :starttls
expect(notifier.openssl_verify_mode).to be_nil
expect(notifier.sendmail_args).to be_nil
expect(notifier.exim_args).to be_nil
expect(notifier.mail_folder).to be_nil
expect(notifier.send_log_on).to eq [:warning, :failure]
expect(notifier.on_success).to be(true)
expect(notifier.on_warning).to be(true)
expect(notifier.on_failure).to be(true)
expect(notifier.max_retries).to be(10)
expect(notifier.retry_waitsec).to be(30)
end
it "configures the notifier" do
notifier = Notifier::Mail.new(model) do |mail|
mail.delivery_method = :smtp
mail.to = "my.receiver.email@gmail.com"
mail.from = "my.sender.email@gmail.com"
mail.cc = "my.cc.email@gmail.com"
mail.bcc = "my.bcc.email@gmail.com"
mail.reply_to = "my.reply_to.email@gmail.com"
mail.address = "smtp.gmail.com"
mail.port = 587
mail.domain = "your.host.name"
mail.user_name = "user"
mail.password = "secret"
mail.authentication = "plain"
mail.encryption = :none
mail.openssl_verify_mode = :none
mail.sendmail_args = "-i -t -X/tmp/traffic.log"
mail.exim_args = "-i -t -X/tmp/traffic.log"
mail.mail_folder = "/path/to/backup/mails"
mail.send_log_on = [:success, :warning, :failure]
mail.on_success = false
mail.on_warning = false
mail.on_failure = false
mail.max_retries = 5
mail.retry_waitsec = 10
end
expect(notifier.delivery_method).to eq :smtp
expect(notifier.to).to eq "my.receiver.email@gmail.com"
expect(notifier.from).to eq "my.sender.email@gmail.com"
expect(notifier.cc).to eq "my.cc.email@gmail.com"
expect(notifier.bcc).to eq "my.bcc.email@gmail.com"
expect(notifier.reply_to).to eq "my.reply_to.email@gmail.com"
expect(notifier.address).to eq "smtp.gmail.com"
expect(notifier.port).to eq 587
expect(notifier.domain).to eq "your.host.name"
expect(notifier.user_name).to eq "user"
expect(notifier.password).to eq "secret"
expect(notifier.authentication).to eq "plain"
expect(notifier.encryption).to eq :none
expect(notifier.openssl_verify_mode).to eq :none
expect(notifier.sendmail_args).to eq "-i -t -X/tmp/traffic.log"
expect(notifier.exim_args).to eq "-i -t -X/tmp/traffic.log"
expect(notifier.mail_folder).to eq "/path/to/backup/mails"
expect(notifier.send_log_on).to eq [:success, :warning, :failure]
expect(notifier.on_success).to be(false)
expect(notifier.on_warning).to be(false)
expect(notifier.on_failure).to be(false)
expect(notifier.max_retries).to be(5)
expect(notifier.retry_waitsec).to be(10)
end
end # describe '#initialize'
describe "#notify!" do
let(:message) { "[Backup::%s] test label (test_trigger)" }
before do
notifier.delivery_method = :test
notifier.to = "to@email"
notifier.from = "from@email"
::Mail::TestMailer.deliveries.clear
allow(Logger).to receive(:messages).and_return([
double(Logger::Message, formatted_lines: ["line 1", "line 2"]),
double(Logger::Message, formatted_lines: ["line 3"])
])
time = Time.now
allow(model).to receive(:time).and_return(time.strftime("%Y.%m.%d.%H.%M.%S"))
allow(model).to receive(:started_at).and_return(time)
allow(model).to receive(:finished_at).and_return(time + 5)
end
context "when status is :success" do
context "when send_log_on includes :success" do
before { notifier.send_log_on = [:success, :warning, :failure] }
it "sends a Success email with an attached log" do
notifier.send(:notify!, :success)
sent_message = ::Mail::TestMailer.deliveries.first
filename = "#{model.time}.#{model.trigger}.log"
expect(sent_message.subject).to eq message % "Success"
expect(sent_message.body.multipart?).to eq(true)
expect(sent_message.attachments[filename].read)
.to eq "line 1\nline 2\nline 3"
expect(sent_message.text_part).to be_an_instance_of ::Mail::Part
expect(sent_message.text_part.decoded).to eq <<-EOS.gsub(/^ +/, "")
Backup Completed Successfully!
Job: test label (test_trigger)
Started: #{model.started_at}
Finished: #{model.finished_at}
Duration: 00:00:05
See the attached backup log for details.
#{"=" * 75}
Backup v#{VERSION}
Ruby: #{RUBY_DESCRIPTION}
Project Home: https://github.com/backup/backup
Documentation: http://backup.github.io/backup
Issue Tracker: https://github.com/backup/backup/issues
EOS
end
end
context "when send_log_on does not include :success" do
it "sends a Success email with no log attached" do
notifier.send(:notify!, :success)
sent_message = ::Mail::TestMailer.deliveries.first
expect(sent_message.subject).to eq message % "Success"
expect(sent_message.multipart?).to eq(false)
expect(sent_message.has_attachments?).to eq(false)
expect(sent_message.body).to be_an_instance_of ::Mail::Body
expect(sent_message.body.decoded).to eq <<-EOS.gsub(/^ +/, "")
Backup Completed Successfully!
Job: test label (test_trigger)
Started: #{model.started_at}
Finished: #{model.finished_at}
Duration: 00:00:05
#{"=" * 75}
Backup v#{VERSION}
Ruby: #{RUBY_DESCRIPTION}
Project Home: https://github.com/backup/backup
Documentation: http://backup.github.io/backup
Issue Tracker: https://github.com/backup/backup/issues
EOS
end
end
end # context 'when status is :success'
context "when status is :warning" do
context "when send_log_on includes :warning" do
it "sends a Warning email with an attached log" do
notifier.send(:notify!, :warning)
sent_message = ::Mail::TestMailer.deliveries.first
filename = "#{model.time}.#{model.trigger}.log"
expect(sent_message.subject).to eq message % "Warning"
expect(sent_message.body.multipart?).to eq(true)
expect(sent_message.attachments[filename].read)
.to eq "line 1\nline 2\nline 3"
expect(sent_message.text_part).to be_an_instance_of ::Mail::Part
expect(sent_message.text_part.decoded).to eq <<-EOS.gsub(/^ +/, "")
Backup Completed Successfully (with Warnings)!
Job: test label (test_trigger)
Started: #{model.started_at}
Finished: #{model.finished_at}
Duration: 00:00:05
See the attached backup log for details.
#{"=" * 75}
Backup v#{VERSION}
Ruby: #{RUBY_DESCRIPTION}
Project Home: https://github.com/backup/backup
Documentation: http://backup.github.io/backup
Issue Tracker: https://github.com/backup/backup/issues
EOS
end
end
context "when send_log_on does not include :warning" do
before { notifier.send_log_on = [:success, :failure] }
it "sends a Warning email with no log attached" do
notifier.send(:notify!, :warning)
sent_message = ::Mail::TestMailer.deliveries.first
expect(sent_message.subject).to eq message % "Warning"
expect(sent_message.multipart?).to eq(false)
expect(sent_message.has_attachments?).to eq(false)
expect(sent_message.body).to be_an_instance_of ::Mail::Body
expect(sent_message.body.decoded).to eq <<-EOS.gsub(/^ +/, "")
Backup Completed Successfully (with Warnings)!
Job: test label (test_trigger)
Started: #{model.started_at}
Finished: #{model.finished_at}
Duration: 00:00:05
#{"=" * 75}
Backup v#{VERSION}
Ruby: #{RUBY_DESCRIPTION}
Project Home: https://github.com/backup/backup
Documentation: http://backup.github.io/backup
Issue Tracker: https://github.com/backup/backup/issues
EOS
end
end
end # context 'when status is :warning'
context "when status is :failure" do
context "when send_log_on includes :failure" do
it "sends a Failure email with an attached log" do
notifier.send(:notify!, :failure)
sent_message = ::Mail::TestMailer.deliveries.first
filename = "#{model.time}.#{model.trigger}.log"
expect(sent_message.subject).to eq message % "Failure"
expect(sent_message.body.multipart?).to eq(true)
expect(sent_message.attachments[filename].read)
.to eq "line 1\nline 2\nline 3"
expect(sent_message.text_part).to be_an_instance_of ::Mail::Part
expect(sent_message.text_part.decoded).to eq <<-EOS.gsub(/^ +/, "")
Backup Failed!
Job: test label (test_trigger)
Started: #{model.started_at}
Finished: #{model.finished_at}
Duration: 00:00:05
See the attached backup log for details.
#{"=" * 75}
Backup v#{VERSION}
Ruby: #{RUBY_DESCRIPTION}
Project Home: https://github.com/backup/backup
Documentation: http://backup.github.io/backup
Issue Tracker: https://github.com/backup/backup/issues
EOS
end
end
context "when send_log_on does not include :failure" do
before { notifier.send_log_on = [:success, :warning] }
it "sends a Warning email with no log attached" do
notifier.send(:notify!, :failure)
sent_message = ::Mail::TestMailer.deliveries.first
expect(sent_message.subject).to eq message % "Failure"
expect(sent_message.multipart?).to eq(false)
expect(sent_message.has_attachments?).to eq(false)
expect(sent_message.body).to be_an_instance_of ::Mail::Body
expect(sent_message.body.decoded).to eq <<-EOS.gsub(/^ +/, "")
Backup Failed!
Job: test label (test_trigger)
Started: #{model.started_at}
Finished: #{model.finished_at}
Duration: 00:00:05
#{"=" * 75}
Backup v#{VERSION}
Ruby: #{RUBY_DESCRIPTION}
Project Home: https://github.com/backup/backup
Documentation: http://backup.github.io/backup
Issue Tracker: https://github.com/backup/backup/issues
EOS
end
end
end # context 'when status is :failure'
end # describe '#notify!'
describe "#new_email" do
context "when no delivery_method is set" do
before { notifier.delivery_method = nil }
it "defaults to :smtp" do
email = notifier.send(:new_email)
expect(email).to be_an_instance_of ::Mail::Message
expect(email.delivery_method).to be_an_instance_of ::Mail::SMTP
end
end
context "when delivery_method is :smtp" do
let(:notifier) do
Notifier::Mail.new(model) do |mail|
mail.delivery_method = :smtp
mail.to = "my.receiver.email@gmail.com"
mail.from = "my.sender.email@gmail.com"
mail.cc = "my.cc.email@gmail.com"
mail.bcc = "my.bcc.email@gmail.com"
mail.reply_to = "my.reply_to.email@gmail.com"
mail.address = "smtp.gmail.com"
mail.port = 587
mail.domain = "your.host.name"
mail.user_name = "user"
mail.password = "secret"
mail.authentication = "plain"
mail.encryption = :starttls
mail.openssl_verify_mode = :none
end
end
it "should set the proper options" do
email = notifier.send(:new_email)
expect(email.delivery_method).to be_an_instance_of ::Mail::SMTP
expect(email.to).to eq ["my.receiver.email@gmail.com"]
expect(email.from).to eq ["my.sender.email@gmail.com"]
expect(email.cc).to eq ["my.cc.email@gmail.com"]
expect(email.bcc).to eq ["my.bcc.email@gmail.com"]
expect(email.reply_to).to eq ["my.reply_to.email@gmail.com"]
settings = email.delivery_method.settings
expect(settings[:address]).to eq "smtp.gmail.com"
expect(settings[:port]).to eq 587
expect(settings[:domain]).to eq "your.host.name"
expect(settings[:user_name]).to eq "user"
expect(settings[:password]).to eq "secret"
expect(settings[:authentication]).to eq "plain"
expect(settings[:enable_starttls_auto]).to be(true)
expect(settings[:openssl_verify_mode]).to eq :none
expect(settings[:ssl]).to be(false)
expect(settings[:tls]).to be(false)
end
it "should properly set other encryption settings" do
notifier.encryption = :ssl
email = notifier.send(:new_email)
settings = email.delivery_method.settings
expect(settings[:enable_starttls_auto]).to be(false)
expect(settings[:ssl]).to be(true)
expect(settings[:tls]).to be(false)
notifier.encryption = :tls
email = notifier.send(:new_email)
settings = email.delivery_method.settings
expect(settings[:enable_starttls_auto]).to be(false)
expect(settings[:ssl]).to be(false)
expect(settings[:tls]).to be(true)
end
it "should not override mail smtp domain setting" do
Mail.defaults do
delivery_method :smtp, domain: "localhost.localdomain"
end
notifier.domain = nil
email = notifier.send(:new_email)
settings = email.delivery_method.settings
expect(settings[:domain]).to eq "localhost.localdomain"
end
end
context "when delivery_method is :sendmail" do
let(:notifier) do
Notifier::Mail.new(model) do |mail|
mail.delivery_method = :sendmail
mail.to = "my.receiver.email@gmail.com"
mail.from = "my.sender.email@gmail.com"
mail.cc = "my.cc.email@gmail.com"
mail.bcc = "my.bcc.email@gmail.com"
mail.reply_to = "my.reply_to.email@gmail.com"
mail.sendmail_args = "-i -t -X/tmp/traffic.log"
end
end
it "should set the proper options" do
email = notifier.send(:new_email)
expect(email.delivery_method).to be_an_instance_of ::Mail::Sendmail
expect(email.to).to eq ["my.receiver.email@gmail.com"]
expect(email.from).to eq ["my.sender.email@gmail.com"]
expect(email.cc).to eq ["my.cc.email@gmail.com"]
expect(email.bcc).to eq ["my.bcc.email@gmail.com"]
expect(email.reply_to).to eq ["my.reply_to.email@gmail.com"]
settings = email.delivery_method.settings
expect(settings[:location]).to eq "/path/to/sendmail"
expect(settings[:arguments]).to eq "-i -t -X/tmp/traffic.log"
end
end
context "when delivery_method is :exim" do
let(:notifier) do
Notifier::Mail.new(model) do |mail|
mail.delivery_method = :exim
mail.to = "my.receiver.email@gmail.com"
mail.from = "my.sender.email@gmail.com"
mail.cc = "my.cc.email@gmail.com"
mail.bcc = "my.bcc.email@gmail.com"
mail.reply_to = "my.reply_to.email@gmail.com"
mail.exim_args = "-i -t -X/tmp/traffic.log"
end
end
it "should set the proper options" do
email = notifier.send(:new_email)
expect(email.delivery_method).to be_an_instance_of ::Mail::Exim
expect(email.to).to eq ["my.receiver.email@gmail.com"]
expect(email.from).to eq ["my.sender.email@gmail.com"]
expect(email.cc).to eq ["my.cc.email@gmail.com"]
expect(email.bcc).to eq ["my.bcc.email@gmail.com"]
expect(email.reply_to).to eq ["my.reply_to.email@gmail.com"]
settings = email.delivery_method.settings
expect(settings[:location]).to eq "/path/to/exim"
expect(settings[:arguments]).to eq "-i -t -X/tmp/traffic.log"
end
end
context "when delivery_method is :file" do
let(:notifier) do
Notifier::Mail.new(model) do |mail|
mail.delivery_method = :file
mail.to = "my.receiver.email@gmail.com"
mail.from = "my.sender.email@gmail.com"
mail.cc = "my.cc.email@gmail.com"
mail.bcc = "my.bcc.email@gmail.com"
mail.reply_to = "my.reply_to.email@gmail.com"
mail.mail_folder = "/path/to/backup/mails"
end
end
it "should set the proper options" do
email = notifier.send(:new_email)
expect(email.delivery_method).to be_an_instance_of ::Mail::FileDelivery
expect(email.to).to eq ["my.receiver.email@gmail.com"]
expect(email.from).to eq ["my.sender.email@gmail.com"]
expect(email.cc).to eq ["my.cc.email@gmail.com"]
expect(email.bcc).to eq ["my.bcc.email@gmail.com"]
expect(email.reply_to).to eq ["my.reply_to.email@gmail.com"]
settings = email.delivery_method.settings
expect(settings[:location]).to eq "/path/to/backup/mails"
end
end
end # describe '#new_email'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/notifier/flowdock_spec.rb | spec/notifier/flowdock_spec.rb | require "spec_helper"
module Backup
describe Notifier::FlowDock do
let(:model) { Model.new(:test_trigger, "test label") }
let(:notifier) { Notifier::FlowDock.new(model) }
let(:s) { sequence "" }
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Notifier::Base"
describe "#initialize" do
it "provides default values" do
expect(notifier.token).to be_nil
expect(notifier.from_name).to be_nil
expect(notifier.from_email).to be_nil
expect(notifier.subject).to eql "Backup Notification"
expect(notifier.source).to eql "Backup test label"
expect(notifier.on_success).to be(true)
expect(notifier.on_warning).to be(true)
expect(notifier.on_failure).to be(true)
expect(notifier.max_retries).to be(10)
expect(notifier.retry_waitsec).to be(30)
end
it "configures the notifier" do
notifier = Notifier::FlowDock.new(model) do |flowdock|
flowdock.token = "my_token"
flowdock.from_name = "my_name"
flowdock.from_email = "email@example.com"
flowdock.subject = "My Daily Backup"
flowdock.on_success = false
flowdock.on_warning = false
flowdock.on_failure = false
flowdock.max_retries = 5
flowdock.retry_waitsec = 10
end
expect(notifier.token).to eq "my_token"
expect(notifier.from_name).to eq "my_name"
expect(notifier.from_email).to eq "email@example.com"
expect(notifier.subject).to eq "My Daily Backup"
expect(notifier.on_success).to be(false)
expect(notifier.on_warning).to be(false)
expect(notifier.on_failure).to be(false)
expect(notifier.max_retries).to be(5)
expect(notifier.retry_waitsec).to be(10)
end
end # describe '#initialize'
describe "#notify!" do
let(:notifier) do
Notifier::FlowDock.new(model) do |flowdock|
flowdock.token = "my_token"
flowdock.from_name = "my_name"
flowdock.from_email = "email@example.com"
flowdock.subject = "My Daily Backup"
flowdock.tags = ["prod"]
flowdock.link = "www.example.com"
end
end
let(:client) { double }
let(:push_to_team_inbox) { double }
let(:message) { "[Backup::%s] test label (test_trigger)" }
context "when status is :success" do
it "sends a success message" do
expect(Flowdock::Flow).to receive(:new).ordered.with(
api_token: "my_token", source: "Backup test label",
from: { name: "my_name", address: "email@example.com" }
).and_return(client)
expect(client).to receive(:push_to_team_inbox).ordered.with(
subject: "My Daily Backup",
content: message % "Success",
tags: ["prod", "#BackupSuccess"],
link: "www.example.com"
)
notifier.send(:notify!, :success)
end
end
context "when status is :warning" do
it "sends a warning message" do
expect(Flowdock::Flow).to receive(:new).ordered.with(
api_token: "my_token", source: "Backup test label",
from: { name: "my_name", address: "email@example.com" }
).and_return(client)
expect(client).to receive(:push_to_team_inbox).ordered.with(
subject: "My Daily Backup",
content: message % "Warning",
tags: ["prod", "#BackupWarning"],
link: "www.example.com"
)
notifier.send(:notify!, :warning)
end
end
context "when status is :failure" do
it "sends a failure message" do
expect(Flowdock::Flow).to receive(:new).ordered.with(
api_token: "my_token", source: "Backup test label",
from: { name: "my_name", address: "email@example.com" }
).and_return(client)
expect(client).to receive(:push_to_team_inbox).ordered.with(
subject: "My Daily Backup",
content: message % "Failure",
tags: ["prod", "#BackupFailure"],
link: "www.example.com"
)
notifier.send(:notify!, :failure)
end
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/notifier/prowl_spec.rb | spec/notifier/prowl_spec.rb | require "spec_helper"
module Backup
describe Notifier::Prowl do
let(:model) { Model.new(:test_trigger, "test label") }
let(:notifier) { Notifier::Prowl.new(model) }
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Notifier::Base"
describe "#initialize" do
it "provides default values" do
expect(notifier.application).to be_nil
expect(notifier.api_key).to be_nil
expect(notifier.on_success).to be(true)
expect(notifier.on_warning).to be(true)
expect(notifier.on_failure).to be(true)
expect(notifier.max_retries).to be(10)
expect(notifier.retry_waitsec).to be(30)
end
it "configures the notifier" do
notifier = Notifier::Prowl.new(model) do |prowl|
prowl.application = "my_app"
prowl.api_key = "my_api_key"
prowl.on_success = false
prowl.on_warning = false
prowl.on_failure = false
prowl.max_retries = 5
prowl.retry_waitsec = 10
end
expect(notifier.application).to eq "my_app"
expect(notifier.api_key).to eq "my_api_key"
expect(notifier.on_success).to be(false)
expect(notifier.on_warning).to be(false)
expect(notifier.on_failure).to be(false)
expect(notifier.max_retries).to be(5)
expect(notifier.retry_waitsec).to be(10)
end
end # describe '#initialize'
describe "#notify!" do
let(:notifier) do
Notifier::Prowl.new(model) do |prowl|
prowl.application = "my_app"
prowl.api_key = "my_api_key"
end
end
let(:form_data) do
"application=my_app&apikey=my_api_key&"\
"event=Backup%3A%3ASTATUS&"\
"description=test+label+%28test_trigger%29"
end
context "when status is :success" do
it "sends a success message" do
expect(Excon).to receive(:post).with(
"https://api.prowlapp.com/publicapi/add",
headers: { "Content-Type" => "application/x-www-form-urlencoded" },
body: form_data.sub("STATUS", "Success"),
expects: 200
)
notifier.send(:notify!, :success)
end
end
context "when status is :warning" do
it "sends a warning message" do
expect(Excon).to receive(:post).with(
"https://api.prowlapp.com/publicapi/add",
headers: { "Content-Type" => "application/x-www-form-urlencoded" },
body: form_data.sub("STATUS", "Warning"),
expects: 200
)
notifier.send(:notify!, :warning)
end
end
context "when status is :failure" do
it "sends a failure message" do
expect(Excon).to receive(:post).with(
"https://api.prowlapp.com/publicapi/add",
headers: { "Content-Type" => "application/x-www-form-urlencoded" },
body: form_data.sub("STATUS", "Failure"),
expects: 200
)
notifier.send(:notify!, :failure)
end
end
end # describe '#notify!'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/notifier/command_notifier_spec.rb | spec/notifier/command_notifier_spec.rb | require "spec_helper"
module Backup
describe Notifier::Command do
let(:model) { Model.new(:test_trigger, "test label") }
let(:notifier) do
Notifier::Command.new(model) do |cmd|
cmd.command = "notify-send"
cmd.args = [->(model, _) { model.label.upcase }, "%V | %t"]
end
end
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Notifier::Base"
describe "#initialize" do
it "provides default values" do
notifier = Notifier::Command.new(model)
expect(notifier.command).to be_nil
expect(notifier.args).to eq(["%L %v"])
end
it "configures the notifier" do
notifier = Notifier::Command.new(model) do |cmd|
cmd.command = "my_command"
cmd.args = "my_args"
end
expect(notifier.command).to eq "my_command"
expect(notifier.args).to eq "my_args"
end
end # describe '#initialize'
describe "#notify!" do
context "when status is :success" do
it "sends a success message" do
expect(IO).to receive(:popen).with(
[
"notify-send",
"TEST LABEL",
"Succeeded | test_trigger"
]
)
notifier.send(:notify!, :success)
end
end
context "when status is :warning" do
it "sends a warning message" do
expect(IO).to receive(:popen).with(
[
"notify-send",
"TEST LABEL",
"Succeeded with warnings | test_trigger"
]
)
notifier.send(:notify!, :warning)
end
end
context "when status is :failure" do
it "sends a failure message" do
expect(IO).to receive(:popen).with(
[
"notify-send",
"TEST LABEL",
"Failed | test_trigger"
]
)
notifier.send(:notify!, :failure)
end
end
end # describe '#notify!'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/notifier/slack_spec.rb | spec/notifier/slack_spec.rb | require "spec_helper"
module Backup
describe Notifier::Slack do
let(:model) { Model.new(:test_trigger, "test label") }
let(:notifier) { Notifier::Slack.new(model) }
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Notifier::Base"
describe "#initialize" do
it "provides default values" do
expect(notifier.webhook_url).to be_nil
expect(notifier.channel).to be_nil
expect(notifier.username).to be_nil
expect(notifier.icon_emoji).to eq(":floppy_disk:")
expect(notifier.on_success).to be(true)
expect(notifier.on_warning).to be(true)
expect(notifier.on_failure).to be(true)
expect(notifier.max_retries).to be(10)
expect(notifier.retry_waitsec).to be(30)
end
it "configures the notifier" do
notifier = Notifier::Slack.new(model) do |slack|
slack.webhook_url = "my_webhook_url"
slack.channel = "my_channel"
slack.username = "my_username"
slack.icon_emoji = ":vhs:"
slack.on_success = false
slack.on_warning = false
slack.on_failure = false
slack.max_retries = 5
slack.retry_waitsec = 10
end
expect(notifier.webhook_url).to eq "my_webhook_url"
expect(notifier.channel).to eq "my_channel"
expect(notifier.username).to eq "my_username"
expect(notifier.icon_emoji).to eq ":vhs:"
expect(notifier.on_success).to be(false)
expect(notifier.on_warning).to be(false)
expect(notifier.on_failure).to be(false)
expect(notifier.max_retries).to be(5)
expect(notifier.retry_waitsec).to be(10)
end
end # describe '#initialize'
describe "#notify!" do
def expected_excon_params(given_url, options, expected_payload, send_log = false)
body = Hash[URI.decode_www_form(options[:body])]
payload = JSON.parse(body["payload"])
attachments = payload["attachments"]
fields = attachments.first["fields"]
titles = fields.map { |h| h["title"] }
result = given_url == url
result &&= options[:headers] == { "Content-Type" => "application/x-www-form-urlencoded" }
result &&= options[:expects] == 200
result &&= attachments.size == 1
result &&= titles == send_log ? expected_titles_with_log : expected_titles
expected_payload.each do |k, v|
result &&= payload[k.to_s] == v
end
result
end
let(:expected_titles) do
["Job", "Started", "Finished", "Duration", "Version"]
end
let(:expected_titles_with_log) do
expected_titles + ["Detailed Backup Log"]
end
let(:notifier) do
Notifier::Slack.new(model) do |slack|
slack.webhook_url = "my_webhook_url"
end
end
let(:url) do
"my_webhook_url"
end
context "when status is :success" do
it "sends a success message" do
expect(Excon).to receive(:post) do |given_url, options|
expected_excon_params(given_url, options, text: "[Backup::Success] test label (test_trigger)")
end
notifier.send(:notify!, :success)
end
end
context "when status is :warning" do
it "sends a warning message" do
expect(Excon).to receive(:post) do |given_url, options|
expected_excon_params(given_url, options, { text: "[Backup::Warning] test label (test_trigger)" }, true)
end
notifier.send(:notify!, :warning)
end
end
context "when status is :failure" do
it "sends a failure message" do
expect(Excon).to receive(:post) do |given_url, options|
expected_excon_params(given_url, options, { text: "[Backup::Failure] test label (test_trigger)" }, true)
end
notifier.send(:notify!, :failure)
end
end
context "when optional parameters are provided" do
let(:notifier) do
Notifier::Slack.new(model) do |slack|
slack.webhook_url = "my_webhook_url"
slack.channel = "my_channel"
slack.username = "my_username"
slack.icon_emoji = ":vhs:"
end
end
it "sends message with optional parameters" do
expect(Excon).to receive(:post) do |given_url, options|
expected_excon_params(given_url, options, text: "[Backup::Success] test label (test_trigger)",
channel: "my_channel",
username: "my_username",
icon_emoji: ":vhs:")
end
notifier.send(:notify!, :success)
end
end
end # describe '#notify!'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/notifier/nagios_spec.rb | spec/notifier/nagios_spec.rb | require "spec_helper"
module Backup
describe Notifier::Nagios do
let(:model) { Model.new(:test_trigger, "test model") }
let(:notifier) { Notifier::Nagios.new(model) }
before do
allow(Utilities).to receive(:utility).with(:send_nsca).and_return("send_nsca")
allow(Config).to receive(:hostname).and_return("my.hostname")
end
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Notifier::Base"
describe "#initialize" do
it "provides default values" do
expect(notifier.nagios_host).to eq "my.hostname"
expect(notifier.nagios_port).to be 5667
expect(notifier.send_nsca_cfg).to eq "/etc/nagios/send_nsca.cfg"
expect(notifier.service_name).to eq "Backup test_trigger"
expect(notifier.service_host).to eq "my.hostname"
expect(notifier.on_success).to be(true)
expect(notifier.on_warning).to be(true)
expect(notifier.on_failure).to be(true)
expect(notifier.max_retries).to be(10)
expect(notifier.retry_waitsec).to be(30)
end
it "configures the notifier" do
notifier = Notifier::Nagios.new(model) do |nagios|
nagios.nagios_host = "my_nagios_host"
nagios.nagios_port = 1234
nagios.send_nsca_cfg = "my_send_nsca_cfg"
nagios.service_name = "my_service_name"
nagios.service_host = "my_service_host"
nagios.on_success = false
nagios.on_warning = false
nagios.on_failure = false
nagios.max_retries = 5
nagios.retry_waitsec = 10
end
expect(notifier.nagios_host).to eq "my_nagios_host"
expect(notifier.nagios_port).to be 1234
expect(notifier.send_nsca_cfg).to eq "my_send_nsca_cfg"
expect(notifier.service_name).to eq "my_service_name"
expect(notifier.service_host).to eq "my_service_host"
expect(notifier.on_success).to be(false)
expect(notifier.on_warning).to be(false)
expect(notifier.on_failure).to be(false)
expect(notifier.max_retries).to be(5)
expect(notifier.retry_waitsec).to be(10)
end
end # describe '#initialize'
describe "#notify!" do
let(:nagios_cmd) { "send_nsca -H 'my.hostname' -p '5667' -c '/etc/nagios/send_nsca.cfg'" }
before do
notifier.service_host = "my.service.host"
allow(model).to receive(:duration).and_return("12:34:56")
end
context "when status is :success" do
let(:nagios_msg) do
"my.service.host\tBackup test_trigger\t0\t"\
"[Backup::Success] test model (test_trigger)"
end
before { allow(model).to receive(:exit_status).and_return(0) }
it "sends a Success message" do
expect(Utilities).to receive(:run).with("echo '#{nagios_msg}' | #{nagios_cmd}")
notifier.send(:notify!, :success)
end
end
context "when status is :warning" do
let(:nagios_msg) do
"my.service.host\tBackup test_trigger\t1\t"\
"[Backup::Warning] test model (test_trigger)"
end
before { allow(model).to receive(:exit_status).and_return(1) }
it "sends a Success message" do
expect(Utilities).to receive(:run).with("echo '#{nagios_msg}' | #{nagios_cmd}")
notifier.send(:notify!, :warning)
end
end
context "when status is :failure" do
let(:nagios_msg) do
"my.service.host\tBackup test_trigger\t2\t"\
"[Backup::Failure] test model (test_trigger)"
end
before { allow(model).to receive(:exit_status).and_return(2) }
it "sends a Success message" do
expect(Utilities).to receive(:run).with("echo '#{nagios_msg}' | #{nagios_cmd}")
notifier.send(:notify!, :failure)
end
end
end # describe '#notify!'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/notifier/pushover_spec.rb | spec/notifier/pushover_spec.rb | require "spec_helper"
module Backup
describe Notifier::Pushover do
let(:model) { Model.new(:test_trigger, "test label") }
let(:notifier) { Notifier::Pushover.new(model) }
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Notifier::Base"
describe "#initialize" do
it "provides default values" do
expect(notifier.user).to be_nil
expect(notifier.token).to be_nil
expect(notifier.device).to be_nil
expect(notifier.title).to be_nil
expect(notifier.priority).to be_nil
expect(notifier.on_success).to be(true)
expect(notifier.on_warning).to be(true)
expect(notifier.on_failure).to be(true)
expect(notifier.max_retries).to be(10)
expect(notifier.retry_waitsec).to be(30)
end
it "configures the notifier" do
notifier = Notifier::Pushover.new(model) do |pushover|
pushover.user = "my_user"
pushover.token = "my_token"
pushover.device = "my_device"
pushover.title = "my_title"
pushover.priority = "my_priority"
pushover.on_success = false
pushover.on_warning = false
pushover.on_failure = false
pushover.max_retries = 5
pushover.retry_waitsec = 10
end
expect(notifier.user).to eq "my_user"
expect(notifier.token).to eq "my_token"
expect(notifier.device).to eq "my_device"
expect(notifier.title).to eq "my_title"
expect(notifier.priority).to eq "my_priority"
expect(notifier.on_success).to be(false)
expect(notifier.on_warning).to be(false)
expect(notifier.on_failure).to be(false)
expect(notifier.max_retries).to be(5)
expect(notifier.retry_waitsec).to be(10)
end
end # describe '#initialize'
describe "#notify!" do
let(:notifier) do
Notifier::Pushover.new(model) do |pushover|
pushover.user = "my_user"
pushover.token = "my_token"
end
end
let(:form_data) do
"user=my_user&token=my_token&" \
"message=%5BBackup%3A%3A" + "STATUS" + "%5D+test+label+%28test_trigger%29"
end
context "when status is :success" do
it "sends a success message" do
expect(Excon).to receive(:post).with(
"https://api.pushover.net/1/messages.json",
headers: { "Content-Type" => "application/x-www-form-urlencoded" },
body: form_data.sub("STATUS", "Success"),
expects: 200
)
notifier.send(:notify!, :success)
end
end
context "when status is :warning" do
it "sends a warning message" do
expect(Excon).to receive(:post).with(
"https://api.pushover.net/1/messages.json",
headers: { "Content-Type" => "application/x-www-form-urlencoded" },
body: form_data.sub("STATUS", "Warning"),
expects: 200
)
notifier.send(:notify!, :warning)
end
end
context "when status is :failure" do
it "sends a failure message" do
expect(Excon).to receive(:post).with(
"https://api.pushover.net/1/messages.json",
headers: { "Content-Type" => "application/x-www-form-urlencoded" },
body: form_data.sub("STATUS", "Failure"),
expects: 200
)
notifier.send(:notify!, :failure)
end
end
context "when optional parameters are provided" do
let(:notifier) do
Notifier::Pushover.new(model) do |pushover|
pushover.user = "my_user"
pushover.token = "my_token"
pushover.device = "my_device"
pushover.title = "my_title"
pushover.priority = "my_priority"
end
end
let(:form_data) do
"user=my_user&token=my_token&" \
"message=%5BBackup%3A%3ASuccess%5D+test+label+%28test_trigger%29&" \
"device=my_device&title=my_title&priority=my_priority"
end
it "sends message with optional parameters" do
expect(Excon).to receive(:post).with(
"https://api.pushover.net/1/messages.json",
headers: { "Content-Type" => "application/x-www-form-urlencoded" },
body: form_data,
expects: 200
)
notifier.send(:notify!, :success)
end
end
end # describe '#notify!'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/notifier/http_post_spec.rb | spec/notifier/http_post_spec.rb | require "spec_helper"
module Backup
describe Notifier::HttpPost do
let(:model) { Model.new(:test_trigger, "test label") }
let(:notifier) do
Notifier::HttpPost.new(model) do |post|
post.uri = "https://www.example.com/path"
end
end
let(:default_form_data) do
"message=%5BBackup%3A%3ASuccess%5D+test+label+%28test_trigger%29" \
"&status=success"
end
let(:default_headers) do
{ "User-Agent" => "Backup/#{VERSION}",
"Content-Type" => "application/x-www-form-urlencoded" }
end
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Notifier::Base"
describe "#initialize" do
it "provides default values" do
notifier = Notifier::HttpPost.new(model)
expect(notifier.uri).to be_nil
expect(notifier.headers).to eq({})
expect(notifier.params).to eq({})
expect(notifier.success_codes).to be 200
expect(notifier.ssl_verify_peer).to be_nil
expect(notifier.ssl_ca_file).to be_nil
expect(notifier.on_success).to be(true)
expect(notifier.on_warning).to be(true)
expect(notifier.on_failure).to be(true)
expect(notifier.max_retries).to be(10)
expect(notifier.retry_waitsec).to be(30)
end
it "configures the notifier" do
notifier = Notifier::HttpPost.new(model) do |post|
post.uri = "my_uri"
post.headers = "my_headers"
post.params = "my_params"
post.success_codes = "my_success_codes"
post.ssl_verify_peer = "my_ssl_verify_peer"
post.ssl_ca_file = "my_ssl_ca_file"
post.on_success = false
post.on_warning = false
post.on_failure = false
post.max_retries = 5
post.retry_waitsec = 10
end
expect(notifier.uri).to eq "my_uri"
expect(notifier.headers).to eq "my_headers"
expect(notifier.params).to eq "my_params"
expect(notifier.success_codes).to eq "my_success_codes"
expect(notifier.ssl_verify_peer).to eq "my_ssl_verify_peer"
expect(notifier.ssl_ca_file).to eq "my_ssl_ca_file"
expect(notifier.on_success).to be(false)
expect(notifier.on_warning).to be(false)
expect(notifier.on_failure).to be(false)
expect(notifier.max_retries).to be(5)
expect(notifier.retry_waitsec).to be(10)
end
end # describe '#initialize'
describe "#headers" do
it "defines additional headers to be sent" do
notifier.headers = { "Authorization" => "my_auth" }
expect(Excon).to receive(:post).with(
"https://www.example.com/path",
headers: { "User-Agent" => "Backup/#{VERSION}",
"Content-Type" => "application/x-www-form-urlencoded",
"Authorization" => "my_auth" },
body: default_form_data,
expects: 200
)
notifier.send(:notify!, :success)
end
it "may overrided the User-Agent header" do
notifier.headers = { "Authorization" => "my_auth", "User-Agent" => "my_app" }
expect(Excon).to receive(:post).with(
"https://www.example.com/path",
headers: { "User-Agent" => "my_app",
"Content-Type" => "application/x-www-form-urlencoded",
"Authorization" => "my_auth" },
body: default_form_data,
expects: 200
)
notifier.send(:notify!, :success)
end
it "may omit the User-Agent header" do
notifier.headers = { "Authorization" => "my_auth", "User-Agent" => nil }
expect(Excon).to receive(:post).with(
"https://www.example.com/path",
headers: { "Content-Type" => "application/x-www-form-urlencoded",
"Authorization" => "my_auth" },
body: default_form_data,
expects: 200
)
notifier.send(:notify!, :success)
end
end # describe '#headers'
describe "#params" do
it "defines additional form parameters to be sent" do
notifier.params = { "my_param" => "my_value" }
form_data = "message=%5BBackup%3A%3ASuccess%5D+test+label+%28test_trigger%29" \
"&my_param=my_value&status=success"
expect(Excon).to receive(:post).with(
"https://www.example.com/path",
headers: default_headers,
body: form_data,
expects: 200
)
notifier.send(:notify!, :success)
end
it "may override the `message` parameter" do
notifier.params = { "my_param" => "my_value", "message" => "my message" }
form_data = "message=my+message&my_param=my_value&status=success"
expect(Excon).to receive(:post).with(
"https://www.example.com/path",
headers: default_headers,
body: form_data,
expects: 200
)
notifier.send(:notify!, :success)
end
it "may omit the `message` parameter" do
notifier.params = { "my_param" => "my_value", "message" => nil }
form_data = "my_param=my_value&status=success"
expect(Excon).to receive(:post).with(
"https://www.example.com/path",
headers: default_headers,
body: form_data,
expects: 200
)
notifier.send(:notify!, :success)
end
end # describe '#params'
describe "#success_codes" do
it "specifies expected http success codes" do
notifier.success_codes = [200, 201]
expect(Excon).to receive(:post).with(
"https://www.example.com/path",
headers: default_headers,
body: default_form_data,
expects: [200, 201]
)
notifier.send(:notify!, :success)
end
end # describe '#success_codes'
describe "#ssl_verify_peer" do
it "may force enable verification" do
notifier.ssl_verify_peer = true
expect(Excon).to receive(:post).with(
"https://www.example.com/path",
headers: default_headers,
body: default_form_data,
expects: 200,
ssl_verify_peer: true
)
notifier.send(:notify!, :success)
end
it "may disable verification" do
notifier.ssl_verify_peer = false
expect(Excon).to receive(:post).with(
"https://www.example.com/path",
headers: default_headers,
body: default_form_data,
expects: 200,
ssl_verify_peer: false
)
notifier.send(:notify!, :success)
end
end # describe '#ssl_verify_peer'
describe "#ssl_ca_file" do
it "specifies path to a custom cacert.pem file" do
notifier.ssl_ca_file = "/my/cacert.pem"
expect(Excon).to receive(:post).with(
"https://www.example.com/path",
headers: default_headers,
body: default_form_data,
expects: 200,
ssl_ca_file: "/my/cacert.pem"
)
notifier.send(:notify!, :success)
end
end # describe '#ssl_ca_file'
describe "#notify!" do
let(:form_data) do
"message=%5BBackup%3A%3A" + "TAG" \
"%5D+test+label+%28test_trigger%29&status=" + "STATUS"
end
context "when status is :success" do
it "sends a success message" do
expect(Excon).to receive(:post).with(
"https://www.example.com/path",
headers: default_headers,
body: form_data.sub("TAG", "Success").sub("STATUS", "success"),
expects: 200
)
notifier.send(:notify!, :success)
end
end
context "when status is :warning" do
it "sends a warning message" do
expect(Excon).to receive(:post).with(
"https://www.example.com/path",
headers: default_headers,
body: form_data.sub("TAG", "Warning").sub("STATUS", "warning"),
expects: 200
)
notifier.send(:notify!, :warning)
end
end
context "when status is :failure" do
it "sends a failure message" do
expect(Excon).to receive(:post).with(
"https://www.example.com/path",
headers: default_headers,
body: form_data.sub("TAG", "Failure").sub("STATUS", "failure"),
expects: 200
)
notifier.send(:notify!, :failure)
end
end
end # describe '#notify!'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/notifier/pagerduty_spec.rb | spec/notifier/pagerduty_spec.rb | require "spec_helper"
module Backup
describe Notifier::PagerDuty do
let(:model) { Model.new(:test_trigger, "test label") }
let(:notifier) { Notifier::PagerDuty.new(model) }
describe "#initialize" do
it "has sensible defaults" do
expect(notifier.service_key).to be_nil
expect(notifier.resolve_on_warning).to eq(false)
end
it "yields to allow modifying defaults" do
notifier = Notifier::PagerDuty.new(model) do |pd|
pd.service_key = "foobar"
end
expect(notifier.service_key).to eq("foobar")
end
end
describe "notify!" do
let(:pagerduty) { double }
let(:incident) { double }
let(:incident_key) { "backup/test_trigger" }
let(:incident_details) do
{
incident_key: incident_key,
details: {
trigger: "test_trigger",
label: "test label",
started_at: nil,
finished_at: nil,
duration: nil,
exception: nil
}
}
end
before do
allow(notifier).to receive(:pagerduty).and_return(pagerduty)
end
it "resolves an incident when status is :success" do
incident_details[:details][:status] = :success
expect(pagerduty).to receive(:get_incident).with(incident_key).and_return(incident)
expect(incident).to receive(:resolve).with("Backup - test label", incident_details)
notifier.send(:notify!, :success)
end
it "triggers an incident when status is :warning and resolve_on_warning is false" do
incident_details[:details][:status] = :warning
expect(pagerduty).to receive(:trigger).with("Backup - test label", incident_details)
notifier.send(:notify!, :warning)
end
it "resolves an incident when status is :warning and resolve_on_warning is true" do
notifier.resolve_on_warning = true
incident_details[:details][:status] = :warning
expect(pagerduty).to receive(:get_incident).with(incident_key).and_return(incident)
expect(incident).to receive(:resolve).with("Backup - test label", incident_details)
notifier.send(:notify!, :warning)
end
it "triggers an incident when status is :failure" do
incident_details[:details][:status] = :failure
expect(pagerduty).to receive(:trigger).with("Backup - test label", incident_details)
notifier.send(:notify!, :failure)
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/notifier/datadog_spec.rb | spec/notifier/datadog_spec.rb | require "spec_helper"
module Backup
describe Notifier::DataDog do
let(:model) { Model.new(:test_trigger, "test label") }
let(:notifier) { Notifier::DataDog.new(model) }
let(:s) { sequence "" }
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Notifier::Base"
describe "#initialize" do
it "provides default values" do
expect(notifier.api_key).to be_nil
expect(notifier.title).to eq "Backup test label"
expect(notifier.date_happened).to be_nil
expect(notifier.priority).to be_nil
expect(notifier.host).to be_nil
expect(notifier.tags).to be_nil
expect(notifier.alert_type).to be_nil
expect(notifier.aggregation_key).to be_nil
expect(notifier.source_type_name).to be_nil
expect(notifier.on_success).to be(true)
expect(notifier.on_warning).to be(true)
expect(notifier.on_failure).to be(true)
expect(notifier.max_retries).to be(10)
expect(notifier.retry_waitsec).to be(30)
end
it "configures the notifier" do
notifier = Notifier::DataDog.new(model) do |datadog|
datadog.api_key = "my_key"
datadog.title = "Backup!"
datadog.date_happened = 12_345
datadog.priority = "low"
datadog.host = "local"
datadog.tags = ["tag1", "tag2"]
datadog.alert_type = "error"
datadog.aggregation_key = "key"
datadog.source_type_name = "my apps"
datadog.on_success = false
datadog.on_warning = false
datadog.on_failure = false
datadog.max_retries = 5
datadog.retry_waitsec = 10
end
expect(notifier.api_key).to eq "my_key"
expect(notifier.title).to eq "Backup!"
expect(notifier.date_happened).to eq 12_345
expect(notifier.priority).to eq "low"
expect(notifier.host).to eq "local"
expect(notifier.tags.first).to eq "tag1"
expect(notifier.alert_type).to eq "error"
expect(notifier.aggregation_key).to eq "key"
expect(notifier.source_type_name).to eq "my apps"
expect(notifier.on_success).to be(false)
expect(notifier.on_warning).to be(false)
expect(notifier.on_failure).to be(false)
expect(notifier.max_retries).to be(5)
expect(notifier.retry_waitsec).to be(10)
end
end # describe '#initialize'
describe "#notify!" do
let(:notifier) do
Notifier::DataDog.new(model) do |datadog|
datadog.api_key = "my_token"
end
end
let(:client) { double }
let(:event) { double }
context "when status is :success" do
it "sends a success message" do
expect(Dogapi::Client).to receive(:new).ordered
.with("my_token")
.and_return(client)
expect(Dogapi::Event).to receive(:new).ordered.with(
"[Backup::Success] test label (test_trigger)",
msg_title: "Backup test label",
alert_type: "success"
).and_return(event)
expect(client).to receive(:emit_event).ordered.with(event)
notifier.send(:notify!, :success)
end
end
context "when status is :warning" do
it "sends a warning message" do
expect(Dogapi::Client).to receive(:new).ordered
.with("my_token")
.and_return(client)
expect(Dogapi::Event).to receive(:new).ordered.with(
"[Backup::Warning] test label (test_trigger)",
msg_title: "Backup test label",
alert_type: "warning"
).and_return(event)
expect(client).to receive(:emit_event).ordered.with(event)
notifier.send(:notify!, :warning)
end
end
context "when status is :failure" do
it "sends an error message" do
expect(Dogapi::Client).to receive(:new).ordered
.with("my_token")
.and_return(client)
expect(Dogapi::Event).to receive(:new).ordered.with(
"[Backup::Failure] test label (test_trigger)",
msg_title: "Backup test label",
alert_type: "error"
).and_return(event)
expect(client).to receive(:emit_event).ordered.with(event)
notifier.send(:notify!, :failure)
end
end
end
end # describe '#notify!'
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/notifier/campfire_spec.rb | spec/notifier/campfire_spec.rb | require "spec_helper"
module Backup
describe Notifier::Campfire do
let(:model) { Model.new(:test_trigger, "test label") }
let(:notifier) { Notifier::Campfire.new(model) }
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Notifier::Base"
describe "#initialize" do
it "provides default values" do
expect(notifier.api_token).to be_nil
expect(notifier.subdomain).to be_nil
expect(notifier.room_id).to be_nil
expect(notifier.on_success).to be(true)
expect(notifier.on_warning).to be(true)
expect(notifier.on_failure).to be(true)
expect(notifier.max_retries).to be(10)
expect(notifier.retry_waitsec).to be(30)
end
it "configures the notifier" do
notifier = Notifier::Campfire.new(model) do |campfire|
campfire.api_token = "token"
campfire.subdomain = "subdomain"
campfire.room_id = "room_id"
campfire.on_success = false
campfire.on_warning = false
campfire.on_failure = false
campfire.max_retries = 5
campfire.retry_waitsec = 10
end
expect(notifier.api_token).to eq "token"
expect(notifier.subdomain).to eq "subdomain"
expect(notifier.room_id).to eq "room_id"
expect(notifier.on_success).to be(false)
expect(notifier.on_warning).to be(false)
expect(notifier.on_failure).to be(false)
expect(notifier.max_retries).to be(5)
expect(notifier.retry_waitsec).to be(10)
end
end # describe '#initialize'
describe "#notify!" do
let(:notifier) do
Notifier::Campfire.new(model) do |campfire|
campfire.api_token = "my_token"
campfire.subdomain = "my_subdomain"
campfire.room_id = "my_room_id"
end
end
let(:json_body) do
JSON.dump(
message: {
body: "[Backup::STATUS] test label (test_trigger)",
type: "Textmessage"
}
)
end
context "when status is :success" do
it "sends a success message" do
expect(Excon).to receive(:post).with(
"https://my_subdomain.campfirenow.com/room/my_room_id/speak.json",
headers: { "Content-Type" => "application/json" },
body: json_body.sub("STATUS", "Success"),
user: "my_token",
password: "x",
expects: 201
)
notifier.send(:notify!, :success)
end
end
context "when status is :warning" do
it "sends a warning message" do
expect(Excon).to receive(:post).with(
"https://my_subdomain.campfirenow.com/room/my_room_id/speak.json",
headers: { "Content-Type" => "application/json" },
body: json_body.sub("STATUS", "Warning"),
user: "my_token",
password: "x",
expects: 201
)
notifier.send(:notify!, :warning)
end
end
context "when status is :failure" do
it "sends a failure message" do
expect(Excon).to receive(:post).with(
"https://my_subdomain.campfirenow.com/room/my_room_id/speak.json",
headers: { "Content-Type" => "application/json" },
body: json_body.sub("STATUS", "Failure"),
user: "my_token",
password: "x",
expects: 201
)
notifier.send(:notify!, :failure)
end
end
end # describe '#notify!'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/notifier/zabbix_spec.rb | spec/notifier/zabbix_spec.rb | require "spec_helper"
module Backup
describe Notifier::Zabbix do
let(:model) { Model.new(:test_trigger, "test label") }
let(:notifier) { Notifier::Zabbix.new(model) }
before do
allow(Utilities).to receive(:utility).with(:zabbix_sender).and_return("zabbix_sender")
allow(Config).to receive(:hostname).and_return("zabbix.hostname")
end
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Notifier::Base"
describe "#initialize" do
it "provides default values" do
expect(notifier.zabbix_host).to eq "zabbix.hostname"
expect(notifier.zabbix_port).to be 10_051
expect(notifier.service_name).to eq "Backup test_trigger"
expect(notifier.service_host).to eq "zabbix.hostname"
expect(notifier.item_key).to eq "backup_status"
expect(notifier.on_success).to be(true)
expect(notifier.on_warning).to be(true)
expect(notifier.on_failure).to be(true)
expect(notifier.max_retries).to be(10)
expect(notifier.retry_waitsec).to be(30)
end
it "configures the notifier" do
notifier = Notifier::Zabbix.new(model) do |zabbix|
zabbix.zabbix_host = "my_zabbix_server"
zabbix.zabbix_port = 1234
zabbix.service_name = "my_service_name"
zabbix.service_host = "my_service_host"
zabbix.item_key = "backup_status"
zabbix.on_success = false
zabbix.on_warning = false
zabbix.on_failure = false
zabbix.max_retries = 5
zabbix.retry_waitsec = 10
end
expect(notifier.zabbix_host).to eq "my_zabbix_server"
expect(notifier.zabbix_port).to be 1234
expect(notifier.service_name).to eq "my_service_name"
expect(notifier.service_host).to eq "my_service_host"
expect(notifier.item_key).to eq "backup_status"
expect(notifier.on_success).to be(false)
expect(notifier.on_warning).to be(false)
expect(notifier.on_failure).to be(false)
expect(notifier.max_retries).to be(5)
expect(notifier.retry_waitsec).to be(10)
end
end # describe '#initialize'
describe "#notify!" do
before do
notifier.service_host = "my.service.host"
allow(model).to receive(:duration).and_return("12:34:56")
allow(notifier).to receive(:zabbix_port).and_return(10_051)
end
context "when status is :success" do
let(:zabbix_msg) do
"my.service.host\tBackup test_trigger\t0\t"\
"[Backup::Success] test label (test_trigger)"
end
let(:zabbix_cmd) do
"zabbix_sender -z 'zabbix.hostname'" \
" -p '#{notifier.zabbix_port}'" \
" -s #{notifier.service_host}" \
" -k #{notifier.item_key}" \
" -o '#{zabbix_msg}'"
end
before { allow(model).to receive(:exit_status).and_return(0) }
it "sends a Success message" do
expect(Utilities).to receive(:run).with("echo '#{zabbix_msg}' | #{zabbix_cmd}")
notifier.send(:notify!, :success)
end
end
context "when status is :warning" do
let(:zabbix_msg) do
"my.service.host\tBackup test_trigger\t1\t"\
"[Backup::Warning] test label (test_trigger)"
end
let(:zabbix_cmd) do
"zabbix_sender -z 'zabbix.hostname'" \
" -p '#{notifier.zabbix_port}'" \
" -s #{notifier.service_host}" \
" -k #{notifier.item_key}" \
" -o '#{zabbix_msg}'"
end
before { allow(model).to receive(:exit_status).and_return(1) }
it "sends a Warning message" do
expect(Utilities).to receive(:run).with("echo '#{zabbix_msg}' | #{zabbix_cmd}")
notifier.send(:notify!, :warning)
end
end
context "when status is :failure" do
let(:zabbix_msg) do
"my.service.host\tBackup test_trigger\t2\t"\
"[Backup::Failure] test label (test_trigger)"
end
let(:zabbix_cmd) do
"zabbix_sender -z 'zabbix.hostname'" \
" -p '#{notifier.zabbix_port}'" \
" -s #{notifier.service_host}" \
" -k #{notifier.item_key}" \
" -o '#{zabbix_msg}'"
end
before { allow(model).to receive(:exit_status).and_return(2) }
it "sends a Failure message" do
expect(Utilities).to receive(:run).with("echo '#{zabbix_msg}' | #{zabbix_cmd}")
notifier.send(:notify!, :failure)
end
end
end # describe '#notify!'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/notifier/hipchat_spec.rb | spec/notifier/hipchat_spec.rb | require "spec_helper"
module Backup
describe Notifier::Hipchat do
let(:model) { Model.new(:test_trigger, "test label") }
let(:notifier) { Notifier::Hipchat.new(model) }
let(:s) { sequence "" }
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Notifier::Base"
describe "#initialize" do
it "provides default values" do
expect(notifier.token).to be_nil
expect(notifier.api_version).to eq "v1"
expect(notifier.from).to be_nil
expect(notifier.server_url).to be_nil
expect(notifier.rooms_notified).to eq []
expect(notifier.notify_users).to be(false)
expect(notifier.success_color).to eq "yellow"
expect(notifier.warning_color).to eq "yellow"
expect(notifier.failure_color).to eq "yellow"
expect(notifier.on_success).to be(true)
expect(notifier.on_warning).to be(true)
expect(notifier.on_failure).to be(true)
expect(notifier.max_retries).to be(10)
expect(notifier.retry_waitsec).to be(30)
end
it "configures the notifier" do
notifier = Notifier::Hipchat.new(model) do |hipchat|
hipchat.token = "my_token"
hipchat.from = "my_from"
hipchat.server_url = "https://mycustom.server.com"
hipchat.rooms_notified = ["room_a", "room_b"]
hipchat.notify_users = true
hipchat.success_color = :success_color
hipchat.warning_color = :warning_color
hipchat.failure_color = :failure_color
hipchat.on_success = false
hipchat.on_warning = false
hipchat.on_failure = false
hipchat.max_retries = 5
hipchat.retry_waitsec = 10
end
expect(notifier.token).to eq "my_token"
expect(notifier.from).to eq "my_from"
expect(notifier.server_url).to eq "https://mycustom.server.com"
expect(notifier.rooms_notified).to eq ["room_a", "room_b"]
expect(notifier.notify_users).to be(true)
expect(notifier.success_color).to eq :success_color
expect(notifier.warning_color).to eq :warning_color
expect(notifier.failure_color).to eq :failure_color
expect(notifier.on_success).to be(false)
expect(notifier.on_warning).to be(false)
expect(notifier.on_failure).to be(false)
expect(notifier.max_retries).to be(5)
expect(notifier.retry_waitsec).to be(10)
end
end # describe '#initialize'
describe "#notify!" do
let(:notifier) do
Notifier::Hipchat.new(model) do |hipchat|
hipchat.token = "my_token"
hipchat.from = "my_from"
hipchat.server_url = "https://mycustom.server.com"
hipchat.rooms_notified = ["room_a", "room_b"]
hipchat.notify_users = true
hipchat.success_color = :success_color
hipchat.warning_color = :warning_color
hipchat.failure_color = :failure_color
end
end
let(:client_options) do
{ api_version: "v1", server_url: "https://mycustom.server.com" }
end
let(:client) { double }
let(:room) { double }
let(:message) { "[Backup::%s] test label (test_trigger)" }
context "when status is :success" do
it "sends a success message" do
expect(HipChat::Client).to receive(:new).ordered
.with("my_token", client_options)
.and_return(client)
expect(client).to receive(:[]).ordered.with("room_a").and_return(room)
expect(room).to receive(:send).ordered.with(
"my_from", message % "Success",
color: :success_color,
notify: true
)
expect(client).to receive(:[]).ordered.with("room_b").and_return(room)
expect(room).to receive(:send).ordered.with(
"my_from",
message % "Success",
color: :success_color, notify: true
)
notifier.send(:notify!, :success)
end
end
context "when status is :warning" do
it "sends a warning message" do
expect(HipChat::Client).to receive(:new).ordered
.with("my_token", client_options)
.and_return(client)
expect(client).to receive(:[]).ordered.with("room_a").and_return(room)
expect(room).to receive(:send).ordered.with(
"my_from",
message % "Warning",
color: :warning_color,
notify: true
)
expect(client).to receive(:[]).ordered.with("room_b").and_return(room)
expect(room).to receive(:send).ordered.with(
"my_from",
message % "Warning",
color: :warning_color,
notify: true
)
notifier.send(:notify!, :warning)
end
end
context "when status is :failure" do
it "sends a failure message" do
expect(HipChat::Client).to receive(:new).ordered
.with("my_token", client_options)
.and_return(client)
expect(client).to receive(:[]).ordered.with("room_a").and_return(room)
expect(room).to receive(:send).ordered.with(
"my_from",
message % "Failure",
color: :failure_color,
notify: true
)
expect(client).to receive(:[]).ordered.with("room_b").and_return(room)
expect(room).to receive(:send).ordered.with(
"my_from",
message % "Failure",
color: :failure_color,
notify: true
)
notifier.send(:notify!, :failure)
end
end
end # describe '#notify!'
describe "#rooms_to_notify" do
it "returns an array of rooms from a string with a single room name" do
notifier.rooms_notified = "my_room"
expect(notifier.send(:rooms_to_notify)).to eq ["my_room"]
end
it "returns an array of rooms from a comma-delimited string" do
notifier.rooms_notified = "room_a, room_b"
expect(notifier.send(:rooms_to_notify)).to eq ["room_a", "room_b"]
end
end # describe '#rooms_to_notify'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/notifier/twitter_spec.rb | spec/notifier/twitter_spec.rb | require "spec_helper"
module Backup
describe Notifier::Twitter do
let(:model) { Model.new(:test_trigger, "test label") }
let(:notifier) { Notifier::Twitter.new(model) }
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Notifier::Base"
describe "#initialize" do
it "provides default values" do
expect(notifier.consumer_key).to be_nil
expect(notifier.consumer_secret).to be_nil
expect(notifier.oauth_token).to be_nil
expect(notifier.oauth_token_secret).to be_nil
expect(notifier.on_success).to be(true)
expect(notifier.on_warning).to be(true)
expect(notifier.on_failure).to be(true)
expect(notifier.max_retries).to be(10)
expect(notifier.retry_waitsec).to be(30)
end
it "configures the notifier" do
notifier = Notifier::Twitter.new(model) do |twitter|
twitter.consumer_key = "my_consumer_key"
twitter.consumer_secret = "my_consumer_secret"
twitter.oauth_token = "my_oauth_token"
twitter.oauth_token_secret = "my_oauth_token_secret"
twitter.on_success = false
twitter.on_warning = false
twitter.on_failure = false
twitter.max_retries = 5
twitter.retry_waitsec = 10
end
expect(notifier.consumer_key).to eq "my_consumer_key"
expect(notifier.consumer_secret).to eq "my_consumer_secret"
expect(notifier.oauth_token).to eq "my_oauth_token"
expect(notifier.oauth_token_secret).to eq "my_oauth_token_secret"
expect(notifier.on_success).to be(false)
expect(notifier.on_warning).to be(false)
expect(notifier.on_failure).to be(false)
expect(notifier.max_retries).to be(5)
expect(notifier.retry_waitsec).to be(10)
end
end # describe '#initialize'
describe "#notify!" do
let(:message) { "[Backup::%s] test label (test_trigger)" }
context "when status is :success" do
it "sends a success message" do
expect(notifier).to receive(:send_message).with(message % "Success")
notifier.send(:notify!, :success)
end
end
context "when status is :warning" do
it "sends a warning message" do
expect(notifier).to receive(:send_message).with(message % "Warning")
notifier.send(:notify!, :warning)
end
end
context "when status is :failure" do
it "sends a failure message" do
expect(notifier).to receive(:send_message).with(message % "Failure")
notifier.send(:notify!, :failure)
end
end
end # describe '#notify!'
describe "#send_message" do
let(:notifier) do
Notifier::Twitter.new(model) do |twitter|
twitter.consumer_key = "my_consumer_key"
twitter.consumer_secret = "my_consumer_secret"
twitter.oauth_token = "my_oauth_token"
twitter.oauth_token_secret = "my_oauth_token_secret"
end
end
it "sends a message" do
client = double
config = double
expect(::Twitter::REST::Client).to receive(:new).and_yield(config).and_return(client)
expect(config).to receive(:consumer_key=).with("my_consumer_key")
expect(config).to receive(:consumer_secret=).with("my_consumer_secret")
expect(config).to receive(:access_token=).with("my_oauth_token")
expect(config).to receive(:access_token_secret=).with("my_oauth_token_secret")
expect(client).to receive(:update).with("a message")
notifier.send(:send_message, "a message")
end
end # describe '#send_message'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/notifier/ses_spec.rb | spec/notifier/ses_spec.rb | require "spec_helper"
module Backup
describe Notifier::Ses do
let(:model) { Model.new(:test_trigger, "test label") }
let(:notifier) { Notifier::Ses.new(model) }
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Notifier::Base"
describe "#initialize" do
it "provides default values" do
expect(notifier.to).to be_nil
expect(notifier.from).to be_nil
expect(notifier.cc).to be_nil
expect(notifier.bcc).to be_nil
expect(notifier.reply_to).to be_nil
expect(notifier.send_log_on).to eq [:warning, :failure]
expect(notifier.on_success).to be(true)
expect(notifier.on_warning).to be(true)
expect(notifier.on_failure).to be(true)
expect(notifier.max_retries).to be(10)
expect(notifier.retry_waitsec).to be(30)
end
it "configures the notifier" do
notifier = Notifier::Ses.new(model) do |ses|
ses.to = "my.receiver.email@gmail.com"
ses.from = "my.sender.email@gmail.com"
ses.cc = "my.cc.email@gmail.com"
ses.bcc = "my.bcc.email@gmail.com"
ses.reply_to = "my.reply_to.email@gmail.com"
ses.send_log_on = [:success, :warning, :failure]
ses.on_success = false
ses.on_warning = false
ses.on_failure = false
ses.max_retries = 5
ses.retry_waitsec = 10
end
expect(notifier.to).to eq "my.receiver.email@gmail.com"
expect(notifier.from).to eq "my.sender.email@gmail.com"
expect(notifier.cc).to eq "my.cc.email@gmail.com"
expect(notifier.bcc).to eq "my.bcc.email@gmail.com"
expect(notifier.reply_to).to eq "my.reply_to.email@gmail.com"
expect(notifier.send_log_on).to eq [:success, :warning, :failure]
expect(notifier.on_success).to be(false)
expect(notifier.on_warning).to be(false)
expect(notifier.on_failure).to be(false)
expect(notifier.max_retries).to be(5)
expect(notifier.retry_waitsec).to be(10)
end
end
describe "#notify!" do
let!(:fake_ses) { Aws::SES::Client.new(stub_responses: true) }
shared_examples "messages" do
context "when status is :success" do
it "sends a success message" do
expect(fake_ses).to receive(:send_raw_email).once do |send_opts|
mail = ::Mail.new(send_opts[:raw_message][:data])
expect(mail.subject).to eq("[Backup::Success] test label (test_trigger)")
expect(mail.body.raw_source).to match_regex("Backup Completed Successfully!")
expect(mail.to).to eq ["my.receiver.email@gmail.com"]
expect(mail.from).to eq ["my.sender.email@gmail.com"]
expect(mail.cc).to eq ["my.cc.email@gmail.com"]
expect(mail.bcc).to eq ["my.bcc.email@gmail.com"]
expect(mail.reply_to).to eq ["my.reply_to.email@gmail.com"]
expect(mail.destinations).to eq send_opts[:destinations]
end
notifier.send(:notify!, :success)
end
end
context "when status is :warning" do
it "sends a warning message" do
expect(fake_ses).to receive(:send_raw_email).once do |send_opts|
mail = ::Mail.new(send_opts[:raw_message][:data])
expect(mail.subject).to eq("[Backup::Warning] test label (test_trigger)")
expect(mail.parts[0].body.raw_source).to match_regex("with Warnings")
expect(mail.attachments[0].filename).to match_regex("log")
expect(mail.destinations).to eq send_opts[:destinations]
end
notifier.send(:notify!, :warning)
end
end
context "when status is :failure" do
it "sends a failure message" do
expect(fake_ses).to receive(:send_raw_email).once do |send_opts|
mail = ::Mail.new(send_opts[:raw_message][:data])
expect(mail.subject).to eq("[Backup::Failure] test label (test_trigger)")
expect(mail.parts[0].body.raw_source).to match_regex("Backup Failed!")
expect(mail.attachments[0].filename).to match_regex("log")
expect(mail.destinations).to eq send_opts[:destinations]
end
notifier.send(:notify!, :failure)
end
end
end
context "uses access key id" do
before do
credentials = double
expect(Aws::Credentials).to receive(:new)
.with("my_access_key_id", "my_secret_access_key")
.and_return(credentials)
allow(Aws::SES::Client).to receive(:new).with(
region: "eu-west-1",
credentials: credentials
).and_return(fake_ses)
end
it_behaves_like "messages" do
let(:notifier) do
Notifier::Ses.new(model) do |ses|
ses.access_key_id = "my_access_key_id"
ses.secret_access_key = "my_secret_access_key"
ses.to = "my.receiver.email@gmail.com"
ses.from = "my.sender.email@gmail.com"
ses.cc = "my.cc.email@gmail.com"
ses.bcc = "my.bcc.email@gmail.com"
ses.reply_to = "my.reply_to.email@gmail.com"
end
end
end
end
context "uses iam instance profile" do
before do
iam_profile = double
expect(Aws::InstanceProfileCredentials).to receive(:new).and_return(iam_profile)
allow(Aws::SES::Client).to receive(:new).with(
region: "eu-west-1",
credentials: iam_profile
).and_return(fake_ses)
end
it_behaves_like "messages" do
let(:notifier) do
Notifier::Ses.new(model) do |ses|
ses.use_iam_profile = true
ses.to = "my.receiver.email@gmail.com"
ses.from = "my.sender.email@gmail.com"
ses.cc = "my.cc.email@gmail.com"
ses.bcc = "my.bcc.email@gmail.com"
ses.reply_to = "my.reply_to.email@gmail.com"
end
end
end
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/logger/fog_adapter_spec.rb | spec/logger/fog_adapter_spec.rb | require "spec_helper"
module Backup
describe Logger::FogAdapter do
it "replaces STDOUT fog warning channel" do
expect(Fog::Logger[:warning]).to be Logger::FogAdapter
end
describe "#tty?" do
it "returns false" do
expect(Logger::FogAdapter.tty?).to be(false)
end
end
describe "#write" do
it "logs fog warnings as info messages" do
expect(Logger).to receive(:info).with("[fog][WARNING] some message")
Fog::Logger.warning "some message"
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/logger/console_spec.rb | spec/logger/console_spec.rb | require "spec_helper"
module Backup
describe Logger::Console do
let(:timestamp) { Time.now.utc.strftime("%Y/%m/%d %H:%M:%S") }
before do
expect_any_instance_of(Logger::Logfile).to receive(:log).never
expect_any_instance_of(Logger::Syslog).to receive(:log).never
Logger.configure do
logfile.enabled = false
syslog.enabled = false
console.quiet = false
end
end
describe "console logger configuration" do
it "may be disabled via Logger.configure" do
Logger.configure do
console.quiet = true
end
Logger.start!
expect_any_instance_of(Logger::Console).to receive(:log).never
Logger.info "message"
end
it "may be forced enabled via the command line" do
Logger.configure do
# --no-quiet should set this to nil
console.quiet = nil
end
Logger.configure do
# attempt to disable once set to nil will be ignored
console.quiet = true
end
Logger.start!
expect_any_instance_of(Logger::Console).to receive(:log)
Logger.info "message"
end
end
describe "console logger usage" do
before { Logger.start! }
context "when IO is attached to a terminal" do
before do
allow($stdout).to receive(:tty?).and_return(true)
allow($stderr).to receive(:tty?).and_return(true)
end
it "sends colorized, formatted :info message to $stdout" do
expect($stderr).to receive(:puts).never
Timecop.freeze do
expect($stdout).to receive(:puts).with([
"\e[32m[#{timestamp}][info] message line one\e[0m",
"\e[32m[#{timestamp}][info] message line two\e[0m"
])
Logger.info "message line one\nmessage line two"
end
end
it "sends colorized, formatted :warn message to $stderr" do
expect($stdout).to receive(:puts).never
Timecop.freeze do
expect($stderr).to receive(:puts).with([
"\e[33m[#{timestamp}][warn] message line one\e[0m",
"\e[33m[#{timestamp}][warn] message line two\e[0m"
])
Logger.warn "message line one\nmessage line two"
end
end
it "sends colorized, formatted :error message to $stderr" do
expect($stdout).to receive(:puts).never
Timecop.freeze do
expect($stderr).to receive(:puts).with([
"\e[31m[#{timestamp}][error] message line one\e[0m",
"\e[31m[#{timestamp}][error] message line two\e[0m"
])
Logger.error "message line one\nmessage line two"
end
end
end # context 'when IO is attached to a terminal'
context "when IO is not attached to a terminal" do
before do
allow($stdout).to receive(:tty?).and_return(false)
allow($stderr).to receive(:tty?).and_return(false)
end
it "sends non-colorized, formatted :info message to $stdout" do
expect($stderr).to receive(:puts).never
Timecop.freeze do
expect($stdout).to receive(:puts).with([
"[#{timestamp}][info] message line one",
"[#{timestamp}][info] message line two"
])
Logger.info "message line one\nmessage line two"
end
end
it "sends non-colorized, formatted :warn message to $stderr" do
expect($stdout).to receive(:puts).never
Timecop.freeze do
expect($stderr).to receive(:puts).with([
"[#{timestamp}][warn] message line one",
"[#{timestamp}][warn] message line two"
])
Logger.warn "message line one\nmessage line two"
end
end
it "sends non-colorized, formatted :error message to $stderr" do
expect($stdout).to receive(:puts).never
Timecop.freeze do
expect($stderr).to receive(:puts).with([
"[#{timestamp}][error] message line one",
"[#{timestamp}][error] message line two"
])
Logger.error "message line one\nmessage line two"
end
end
end # context 'when IO is not attached to a terminal'
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/logger/logfile_spec.rb | spec/logger/logfile_spec.rb | require "spec_helper"
module Backup
describe Logger::Logfile do
before do
@tmpdir = Dir.mktmpdir("backup_spec")
SandboxFileUtils.activate!(@tmpdir)
@log_path_absolute = File.join(@tmpdir, "log_path")
@logfile_absolute = File.join(@log_path_absolute, "backup.log")
@root_path = File.join(@tmpdir, "root_dir")
@log_path_rel = File.join(@root_path, "log_path_rel")
@log_path_default = File.join(@root_path, "log")
@logfile_default = File.join(@log_path_default, "backup.log")
allow(Backup::Config).to receive(:root_path).and_return(@root_path)
expect_any_instance_of(Logger::Console).to receive(:log).never
expect_any_instance_of(Logger::Syslog).to receive(:log).never
Logger.configure do
console.quiet = true
logfile.enabled = true
syslog.enabled = false
end
end
after do
FileUtils.rm_r(@tmpdir, force: true, secure: true)
end
describe "logfile logger configuration" do
it "may be disabled via Logger.configure" do
Logger.configure do
logfile.enabled = false
end
Logger.start!
expect_any_instance_of(Logger::Syslog).to receive(:log).never
Logger.info "message"
expect(File.exist?(@log_path_default)).to eq(false)
end
it "may be forced disabled via the command line" do
Logger.configure do
# --no-logfile should set this to nil
logfile.enabled = nil
end
Logger.configure do
# attempt to enable once set to nil will be ignored
logfile.enabled = true
end
Logger.start!
expect_any_instance_of(Logger::Syslog).to receive(:log).never
Logger.info "message"
expect(File.exist?(@log_path_default)).to eq(false)
end
it "ignores log_path setting if it is already set" do
Logger.configure do
# path set using --log-path on the command line
logfile.log_path = "log_path_rel"
end
Logger.configure do
# attempt to set in config.rb will be ignored
logfile.log_path = "log"
end
Logger.start!
expect(File.exist?(@log_path_default)).to eq(false)
expect(File.exist?(@log_path_absolute)).to eq(false)
expect(File.exist?(@log_path_rel)).to eq(true)
end
end
describe "#initialize" do
describe "log_path creation" do
context "when log_path is not set" do
before do
Logger.start!
end
it "should create the default log_path" do
expect(File.exist?(@log_path_rel)).to eq(false)
expect(File.exist?(@log_path_absolute)).to eq(false)
expect(File.exist?(@log_path_default)).to eq(true)
end
end
context "when log_path is set using an absolute path" do
before do
path = @log_path_absolute
Logger.configure do
logfile.log_path = path
end
Logger.start!
end
it "should create the absolute log_path" do
expect(File.exist?(@log_path_default)).to eq(false)
expect(File.exist?(@log_path_rel)).to eq(false)
expect(File.exist?(@log_path_absolute)).to eq(true)
end
end
context "when log_path is set as a relative path" do
before do
Logger.configure do
logfile.log_path = "log_path_rel"
end
Logger.start!
end
it "should create the log_path relative to Backup::Config.root_path" do
expect(File.exist?(@log_path_default)).to eq(false)
expect(File.exist?(@log_path_absolute)).to eq(false)
expect(File.exist?(@log_path_rel)).to eq(true)
end
end
end # describe 'log_path creation'
describe "logfile truncation" do
before do
Logger.configure do
logfile.max_bytes = 1000
end
end
context "when log file is larger than max_bytes" do
before do
FileUtils.mkdir_p(@log_path_default)
end
it "should truncate the file, removing older lines" do
lineno = 0
File.open(@logfile_default, "w") do |file|
bytes = 0
until bytes > 1200
bytes += file.write((lineno += 1).to_s.ljust(120, "x") + "\n")
end
end
expect(File.stat(@logfile_default).size).to be >= 1200
Logger.start!
expect(File.stat(@logfile_default).size).to be <= 1000
expect(File.readlines(@logfile_default).last).to match(/#{ lineno }x/)
expect(File.exist?(@logfile_default + "~")).to eq(false)
end
end
context "when log file is not larger than max_bytes" do
it "does not truncates the file" do
expect(File).to receive(:mv).never
Logger.start!
Logger.info "a message"
expect(File.stat(@logfile_default).size).to be > 0
expect(File.stat(@logfile_default).size).to be < 500
expect(File.exist?(@log_path_default)).to eq(true)
expect(File.exist?(@logfile_default)).to eq(true)
end
end
context "when log file does not exist" do
it "does not truncates the file" do
expect(File).to receive(:mv).never
Logger.start!
expect(File.exist?(@log_path_default)).to eq(true)
expect(File.exist?(@logfile_default)).to eq(false)
end
end
end # describe 'logfile truncation'
end # describe '#initialize'
describe "#log" do
let(:timestamp) { Time.now.utc.strftime("%Y/%m/%d %H:%M:%S") }
before do
Logger.start!
end
it "writes formatted messages to the log file" do
Timecop.freeze do
Logger.info "line one\nline two"
expect(File.readlines(@logfile_default)).to eq([
"[#{timestamp}][info] line one\n",
"[#{timestamp}][info] line two\n"
])
end
end
it "preserves blank lines within the messages" do
Timecop.freeze do
Logger.info "line one\n\nline two"
expect(File.readlines(@logfile_default)).to eq([
"[#{timestamp}][info] line one\n",
"[#{timestamp}][info] \n",
"[#{timestamp}][info] line two\n"
])
end
end
end # describe '#log'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/logger/syslog_spec.rb | spec/logger/syslog_spec.rb | require "spec_helper"
module Backup
describe Logger::Syslog do
before do
expect_any_instance_of(Logger::Console).to receive(:log).never
expect_any_instance_of(Logger::Logfile).to receive(:log).never
Logger.configure do
console.quiet = true
logfile.enabled = false
syslog.enabled = true
end
end
describe "syslog logger configuration" do
it "may be disabled via Logger.configure" do
Logger.configure do
syslog.enabled = false
end
Logger.start!
expect_any_instance_of(Logger::Syslog).to receive(:log).never
Logger.info "message"
end
it "may be forced disabled via the command line" do
Logger.configure do
# --no-syslog should set this to nil
syslog.enabled = nil
end
Logger.configure do
# attempt to enable once set to nil will be ignored
syslog.enabled = true
end
Logger.start!
expect_any_instance_of(Logger::Syslog).to receive(:log).never
Logger.info "message"
end
end
describe "console logger usage" do
let(:syslog_logger) { double }
let(:s) { sequence "" }
before do
Logger.configure do
syslog.ident = "test ident"
syslog.facility = ::Syslog::LOG_LOCAL4
end
expect(::Syslog).to receive(:open).with(
"test ident", ::Syslog::LOG_PID, ::Syslog::LOG_LOCAL4
).and_yield(syslog_logger)
Logger.start!
end
context "when sending an :info message" do
it "sends info messages to syslog" do
expect(syslog_logger).to receive(:log).ordered.with(
::Syslog::LOG_INFO, "%s", "message line one"
)
expect(syslog_logger).to receive(:log).ordered.with(
::Syslog::LOG_INFO, "%s", "message line two"
)
Logger.info "message line one\nmessage line two"
end
end
context "when sending an :warn message" do
it "sends warn messages to syslog" do
expect(syslog_logger).to receive(:log).ordered.with(
::Syslog::LOG_WARNING, "%s", "message line one"
)
expect(syslog_logger).to receive(:log).ordered.with(
::Syslog::LOG_WARNING, "%s", "message line two"
)
Logger.warn "message line one\nmessage line two"
end
end
context "when sending an :error message" do
it "sends error messages to syslog" do
expect(syslog_logger).to receive(:log).ordered.with(
::Syslog::LOG_ERR, "%s", "message line one"
)
expect(syslog_logger).to receive(:log).ordered.with(
::Syslog::LOG_ERR, "%s", "message line two"
)
Logger.error "message line one\nmessage line two"
end
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/cloud_io/s3_spec.rb | spec/cloud_io/s3_spec.rb | require "spec_helper"
require "backup/cloud_io/s3"
module Backup
describe CloudIO::S3 do
let(:connection) { double }
describe "#upload" do
context "with multipart support" do
let(:cloud_io) { CloudIO::S3.new(bucket: "my_bucket", chunk_size: 5) }
let(:parts) { double }
context "when src file is larger than chunk_size" do
before do
expect(File).to receive(:size).with("/src/file").and_return(10 * 1024**2)
end
it "uploads using multipart" do
expect(cloud_io).to receive(:initiate_multipart).with("dest/file").and_return(1234)
expect(cloud_io).to receive(:upload_parts).with(
"/src/file", "dest/file", 1234, 5 * 1024**2, 10 * 1024**2
).and_return(parts)
expect(cloud_io).to receive(:complete_multipart).with("dest/file", 1234, parts)
expect(cloud_io).to receive(:put_object).never
cloud_io.upload("/src/file", "dest/file")
end
end
context "when src file is not larger than chunk_size" do
before do
expect(File).to receive(:size).with("/src/file").and_return(5 * 1024**2)
end
it "uploads without multipart" do
expect(cloud_io).to receive(:put_object).with("/src/file", "dest/file")
expect(cloud_io).to receive(:initiate_multipart).never
cloud_io.upload("/src/file", "dest/file")
end
end
context "when chunk_size is too small for the src file" do
before do
expect(File).to receive(:size).with("/src/file").and_return((50_000 * 1024**2) + 1)
end
it "warns and adjusts the chunk_size" do
expect(cloud_io).to receive(:initiate_multipart).with("dest/file").and_return(1234)
expect(cloud_io).to receive(:upload_parts).with(
"/src/file", "dest/file", 1234, 6 * 1024**2, (50_000 * 1024**2) + 1
).and_return(parts)
expect(cloud_io).to receive(:complete_multipart).with("dest/file", 1234, parts)
expect(cloud_io).to receive(:put_object).never
expect(Logger).to receive(:warn) do |err|
expect(err.message).to include(
"#chunk_size of 5 MiB has been adjusted\n to 6 MiB"
)
end
cloud_io.upload("/src/file", "dest/file")
end
end
context "when src file is too large" do
before do
expect(File).to receive(:size).with("/src/file")
.and_return(described_class::MAX_MULTIPART_SIZE + 1)
end
it "raises an error" do
expect(cloud_io).to receive(:initiate_multipart).never
expect(cloud_io).to receive(:put_object).never
expect do
cloud_io.upload("/src/file", "dest/file")
end.to raise_error(CloudIO::FileSizeError)
end
end
end # context 'with multipart support'
context "without multipart support" do
let(:cloud_io) { CloudIO::S3.new(bucket: "my_bucket", chunk_size: 0) }
before do
expect(cloud_io).to receive(:initiate_multipart).never
end
context "when src file size is ok" do
before do
expect(File).to receive(:size).with("/src/file")
.and_return(described_class::MAX_FILE_SIZE)
end
it "uploads using put_object" do
expect(cloud_io).to receive(:put_object).with("/src/file", "dest/file")
cloud_io.upload("/src/file", "dest/file")
end
end
context "when src file is too large" do
before do
expect(File).to receive(:size).with("/src/file")
.and_return(described_class::MAX_FILE_SIZE + 1)
end
it "raises an error" do
expect(cloud_io).to receive(:put_object).never
expect do
cloud_io.upload("/src/file", "dest/file")
end.to raise_error(CloudIO::FileSizeError)
end
end
end # context 'without multipart support'
end # describe '#upload'
describe "#objects" do
let(:cloud_io) do
CloudIO::S3.new(
bucket: "my_bucket",
max_retries: 1,
retry_waitsec: 0
)
end
before do
allow(cloud_io).to receive(:connection).and_return(connection)
end
it "ensures prefix ends with /" do
expect(connection).to receive(:get_bucket)
.with("my_bucket", "prefix" => "foo/bar/")
.and_return(double("response", body: { "Contents" => [] }))
expect(cloud_io.objects("foo/bar")).to eq []
end
it "returns an empty array when no objects are found" do
expect(connection).to receive(:get_bucket)
.with("my_bucket", "prefix" => "foo/bar/")
.and_return(double("response", body: { "Contents" => [] }))
expect(cloud_io.objects("foo/bar/")).to eq []
end
context "when returned objects are not truncated" do
let(:resp_body) do
{ "IsTruncated" => false,
"Contents" => Array.new(10) do |n|
{ "Key" => "key_#{n}",
"ETag" => "etag_#{n}",
"StorageClass" => "STANDARD" }
end }
end
it "returns all objects" do
expect(cloud_io).to receive(:with_retries)
.with("GET 'my_bucket/foo/bar/*'").and_yield
expect(connection).to receive(:get_bucket)
.with("my_bucket", "prefix" => "foo/bar/")
.and_return(double("response", body: resp_body))
objects = cloud_io.objects("foo/bar/")
expect(objects.count).to be 10
objects.each_with_index do |object, n|
expect(object.key).to eq("key_#{n}")
expect(object.etag).to eq("etag_#{n}")
expect(object.storage_class).to eq("STANDARD")
end
end
end
context "when returned objects are truncated" do
let(:resp_body_a) do
{ "IsTruncated" => true,
"Contents" => (0..6).map do |n|
{ "Key" => "key_#{n}",
"ETag" => "etag_#{n}",
"StorageClass" => "STANDARD" }
end }
end
let(:resp_body_b) do
{ "IsTruncated" => false,
"Contents" => (7..9).map do |n|
{ "Key" => "key_#{n}",
"ETag" => "etag_#{n}",
"StorageClass" => "STANDARD" }
end }
end
it "returns all objects" do
expect(cloud_io).to receive(:with_retries).twice
.with("GET 'my_bucket/foo/bar/*'").and_yield
expect(connection).to receive(:get_bucket)
.with("my_bucket", "prefix" => "foo/bar/")
.and_return(double("response", body: resp_body_a))
expect(connection).to receive(:get_bucket)
.with("my_bucket", "prefix" => "foo/bar/", "marker" => "key_6")
.and_return(double("response", body: resp_body_b))
objects = cloud_io.objects("foo/bar/")
expect(objects.count).to be 10
objects.each_with_index do |object, n|
expect(object.key).to eq("key_#{n}")
expect(object.etag).to eq("etag_#{n}")
expect(object.storage_class).to eq("STANDARD")
end
end
it "retries on errors" do
expect(connection).to receive(:get_bucket).once
.with("my_bucket", "prefix" => "foo/bar/")
.and_raise("error")
expect(connection).to receive(:get_bucket).once
.with("my_bucket", "prefix" => "foo/bar/")
.and_return(double("response", body: resp_body_a))
expect(connection).to receive(:get_bucket).once
.with("my_bucket", "prefix" => "foo/bar/", "marker" => "key_6")
.and_raise("error")
expect(connection).to receive(:get_bucket).once
.with("my_bucket", "prefix" => "foo/bar/", "marker" => "key_6")
.and_return(double("response", body: resp_body_b))
objects = cloud_io.objects("foo/bar/")
expect(objects.count).to be 10
objects.each_with_index do |object, n|
expect(object.key).to eq("key_#{n}")
expect(object.etag).to eq("etag_#{n}")
expect(object.storage_class).to eq("STANDARD")
end
end
end
end # describe '#objects'
describe "#head_object" do
let(:cloud_io) do
CloudIO::S3.new(
bucket: "my_bucket",
max_retries: 1,
retry_waitsec: 0
)
end
before do
allow(cloud_io).to receive(:connection).and_return(connection)
end
it "returns head_object response with retries" do
object = double("response", key: "obj_key")
expect(connection).to receive(:head_object).once
.with("my_bucket", "obj_key")
.and_raise("error")
expect(connection).to receive(:head_object).once
.with("my_bucket", "obj_key")
.and_return(:response)
expect(cloud_io.head_object(object)).to eq :response
end
end # describe '#head_object'
describe "#delete" do
let(:cloud_io) do
CloudIO::S3.new(
bucket: "my_bucket",
max_retries: 1,
retry_waitsec: 0
)
end
let(:resp_ok) { double("response", body: { "DeleteResult" => [] }) }
let(:resp_bad) do
double(
"response",
body: {
"DeleteResult" => [
{ "Error" => {
"Key" => "obj_key",
"Code" => "InternalError",
"Message" => "We encountered an internal error. Please try again."
} }
]
}
)
end
before do
allow(cloud_io).to receive(:connection).and_return(connection)
end
it "accepts a single Object" do
object = described_class::Object.new(:foo, "Key" => "obj_key")
expect(cloud_io).to receive(:with_retries).with("DELETE Multiple Objects").and_yield
expect(connection).to receive(:delete_multiple_objects).with(
"my_bucket", ["obj_key"], quiet: true
).and_return(resp_ok)
cloud_io.delete(object)
end
it "accepts multiple Objects" do
object_a = described_class::Object.new(:foo, "Key" => "obj_key_a")
object_b = described_class::Object.new(:foo, "Key" => "obj_key_b")
expect(cloud_io).to receive(:with_retries).with("DELETE Multiple Objects").and_yield
expect(connection).to receive(:delete_multiple_objects).with(
"my_bucket", ["obj_key_a", "obj_key_b"], quiet: true
).and_return(resp_ok)
objects = [object_a, object_b]
expect { cloud_io.delete(objects) }.not_to change { objects }
end
it "accepts a single key" do
expect(cloud_io).to receive(:with_retries).with("DELETE Multiple Objects").and_yield
expect(connection).to receive(:delete_multiple_objects).with(
"my_bucket", ["obj_key"], quiet: true
).and_return(resp_ok)
cloud_io.delete("obj_key")
end
it "accepts multiple keys" do
expect(cloud_io).to receive(:with_retries).with("DELETE Multiple Objects").and_yield
expect(connection).to receive(:delete_multiple_objects).with(
"my_bucket", ["obj_key_a", "obj_key_b"], quiet: true
).and_return(resp_ok)
objects = ["obj_key_a", "obj_key_b"]
expect { cloud_io.delete(objects) }.not_to change { objects }
end
it "does nothing if empty array passed" do
expect(connection).to receive(:delete_multiple_objects).never
cloud_io.delete([])
end
context "with more than 1000 objects" do
let(:keys_1k) { Array.new(1000) { "key" } }
let(:keys_10) { Array.new(10) { "key" } }
let(:keys_all) { keys_1k + keys_10 }
before do
expect(cloud_io).to receive(:with_retries).twice.with("DELETE Multiple Objects").and_yield
end
it "deletes 1000 objects per request" do
expect(connection).to receive(:delete_multiple_objects).with(
"my_bucket", keys_1k, quiet: true
).and_return(resp_ok)
expect(connection).to receive(:delete_multiple_objects).with(
"my_bucket", keys_10, quiet: true
).and_return(resp_ok)
expect { cloud_io.delete(keys_all) }.not_to change { keys_all }
end
it "prevents mutation of options to delete_multiple_objects" do
expect(connection).to receive(:delete_multiple_objects) do |bucket, keys, opts|
bucket == "my_bucket" && keys == keys_1k && opts.delete(:quiet)
end.and_return(resp_ok)
expect(connection).to receive(:delete_multiple_objects).with(
"my_bucket", keys_10, quiet: true
).and_return(resp_ok)
expect { cloud_io.delete(keys_all) }.not_to change { keys_all }
end
end
it "retries on raised errors" do
expect(connection).to receive(:delete_multiple_objects).once
.with("my_bucket", ["obj_key"], quiet: true)
.and_raise("error")
expect(connection).to receive(:delete_multiple_objects).once
.with("my_bucket", ["obj_key"], quiet: true)
.and_return(resp_ok)
cloud_io.delete("obj_key")
end
it "retries on returned errors" do
expect(connection).to receive(:delete_multiple_objects).twice
.with("my_bucket", ["obj_key"], quiet: true)
.and_return(resp_bad, resp_ok)
cloud_io.delete("obj_key")
end
it "fails after retries exceeded" do
expect(connection).to receive(:delete_multiple_objects).once
.with("my_bucket", ["obj_key"], quiet: true)
.and_raise("error message")
expect(connection).to receive(:delete_multiple_objects).once
.with("my_bucket", ["obj_key"], quiet: true)
.and_return(resp_bad)
expect do
cloud_io.delete("obj_key")
end.to raise_error CloudIO::Error, "CloudIO::Error: Max Retries (1) Exceeded!\n" \
" Operation: DELETE Multiple Objects\n" \
" Be sure to check the log messages for each retry attempt.\n" \
"--- Wrapped Exception ---\n" \
"CloudIO::S3::Error: The server returned the following:\n" \
" Failed to delete: obj_key\n" \
" Reason: InternalError: We encountered an internal error. " \
"Please try again."
expect(Logger.messages.map(&:lines).join("\n")).to eq(
"CloudIO::Error: Retry #1 of 1\n" \
" Operation: DELETE Multiple Objects\n" \
"--- Wrapped Exception ---\n" \
"RuntimeError: error message"
)
end
end # describe '#delete'
describe "#connection" do
specify "using AWS access keys" do
expect(Fog::Storage).to receive(:new).once.with(
provider: "AWS",
aws_access_key_id: "my_access_key_id",
aws_secret_access_key: "my_secret_access_key",
region: "my_region"
).and_return(connection)
expect(connection).to receive(:sync_clock).once
cloud_io = CloudIO::S3.new(
access_key_id: "my_access_key_id",
secret_access_key: "my_secret_access_key",
region: "my_region"
)
expect(cloud_io.send(:connection)).to be connection
expect(cloud_io.send(:connection)).to be connection
end
specify "using AWS IAM profile" do
expect(Fog::Storage).to receive(:new).once.with(
provider: "AWS",
use_iam_profile: true,
region: "my_region"
).and_return(connection)
expect(connection).to receive(:sync_clock).once
cloud_io = CloudIO::S3.new(
use_iam_profile: true,
region: "my_region"
)
expect(cloud_io.send(:connection)).to be connection
expect(cloud_io.send(:connection)).to be connection
end
it "passes along fog_options" do
expect(Fog::Storage).to receive(:new).with(provider: "AWS",
region: nil,
aws_access_key_id: "my_key",
aws_secret_access_key: "my_secret",
connection_options: { opt_key: "opt_value" },
my_key: "my_value").and_return(double("response", sync_clock: nil))
CloudIO::S3.new(
access_key_id: "my_key",
secret_access_key: "my_secret",
fog_options: {
connection_options: { opt_key: "opt_value" },
my_key: "my_value"
}
).send(:connection)
end
end # describe '#connection'
describe "#put_object" do
let(:cloud_io) do
CloudIO::S3.new(
bucket: "my_bucket",
max_retries: 1,
retry_waitsec: 0
)
end
let(:file) { double }
before do
allow(cloud_io).to receive(:connection).and_return(connection)
md5_file = double
expect(Digest::MD5).to receive(:file).with("/src/file").and_return(md5_file)
expect(md5_file).to receive(:digest).and_return(:md5_digest)
expect(Base64).to receive(:encode64).with(:md5_digest).and_return("encoded_digest\n")
end
it "calls put_object with Content-MD5 header" do
expect(File).to receive(:open).with("/src/file", "r").and_yield(file)
expect(connection).to receive(:put_object)
.with("my_bucket", "dest/file", file, "Content-MD5" => "encoded_digest")
cloud_io.send(:put_object, "/src/file", "dest/file")
end
it "fails after retries" do
expect(File).to receive(:open).twice.with("/src/file", "r").and_yield(file)
expect(connection).to receive(:put_object).once
.with("my_bucket", "dest/file", file, "Content-MD5" => "encoded_digest")
.and_raise("error1")
expect(connection).to receive(:put_object).once
.with("my_bucket", "dest/file", file, "Content-MD5" => "encoded_digest")
.and_raise("error2")
expect do
cloud_io.send(:put_object, "/src/file", "dest/file")
end.to raise_error CloudIO::Error, "CloudIO::Error: Max Retries (1) Exceeded!\n" \
" Operation: PUT 'my_bucket/dest/file'\n" \
" Be sure to check the log messages for each retry attempt.\n" \
"--- Wrapped Exception ---\n" \
"RuntimeError: error2"
expect(Logger.messages.map(&:lines).join("\n")).to eq(
"CloudIO::Error: Retry #1 of 1\n" \
" Operation: PUT 'my_bucket/dest/file'\n" \
"--- Wrapped Exception ---\n" \
"RuntimeError: error1"
)
end
context "with #encryption and #storage_class set" do
let(:cloud_io) do
CloudIO::S3.new(
bucket: "my_bucket",
encryption: :aes256,
storage_class: :reduced_redundancy,
max_retries: 1,
retry_waitsec: 0
)
end
it "sets headers for encryption and storage_class" do
expect(File).to receive(:open).with("/src/file", "r").and_yield(file)
expect(connection).to receive(:put_object).with(
"my_bucket", "dest/file", file,
"Content-MD5" => "encoded_digest",
"x-amz-server-side-encryption" => "AES256",
"x-amz-storage-class" => "REDUCED_REDUNDANCY"
)
cloud_io.send(:put_object, "/src/file", "dest/file")
end
end
end # describe '#put_object'
describe "#initiate_multipart" do
let(:cloud_io) do
CloudIO::S3.new(
bucket: "my_bucket",
max_retries: 1,
retry_waitsec: 0
)
end
let(:response) { double("response", body: { "UploadId" => 1234 }) }
before do
allow(cloud_io).to receive(:connection).and_return(connection)
expect(Logger).to receive(:info).with(" Initiate Multipart 'my_bucket/dest/file'")
end
it "initiates multipart upload with retries" do
expect(cloud_io).to receive(:with_retries)
.with("POST 'my_bucket/dest/file' (Initiate)").and_yield
expect(connection).to receive(:initiate_multipart_upload)
.with("my_bucket", "dest/file", {}).and_return(response)
expect(cloud_io.send(:initiate_multipart, "dest/file")).to be 1234
end
context "with #encryption and #storage_class set" do
let(:cloud_io) do
CloudIO::S3.new(
bucket: "my_bucket",
encryption: :aes256,
storage_class: :reduced_redundancy,
max_retries: 1,
retry_waitsec: 0
)
end
it "sets headers for encryption and storage_class" do
expect(connection).to receive(:initiate_multipart_upload).with(
"my_bucket", "dest/file",
"x-amz-server-side-encryption" => "AES256",
"x-amz-storage-class" => "REDUCED_REDUNDANCY"
).and_return(response)
expect(cloud_io.send(:initiate_multipart, "dest/file")).to be 1234
end
end
end # describe '#initiate_multipart'
describe "#upload_parts" do
let(:cloud_io) do
CloudIO::S3.new(
bucket: "my_bucket",
max_retries: 1,
retry_waitsec: 0
)
end
let(:chunk_bytes) { 1024**2 * 5 }
let(:file_size) { chunk_bytes + 250 }
let(:chunk_a) { "a" * chunk_bytes }
let(:encoded_digest_a) { "ebKBBg0ze5srhMzzkK3PdA==" }
let(:chunk_a_resp) { double("response", headers: { "ETag" => "chunk_a_etag" }) }
let(:chunk_b) { "b" * 250 }
let(:encoded_digest_b) { "OCttLDka1ocamHgkHvZMyQ==" }
let(:chunk_b_resp) { double("response", headers: { "ETag" => "chunk_b_etag" }) }
let(:file) { StringIO.new(chunk_a + chunk_b) }
before do
allow(cloud_io).to receive(:connection).and_return(connection)
end
it "uploads chunks with Content-MD5" do
expect(File).to receive(:open).with("/src/file", "r").and_yield(file)
allow(StringIO).to receive(:new).with(chunk_a).and_return(:stringio_a)
allow(StringIO).to receive(:new).with(chunk_b).and_return(:stringio_b)
expect(cloud_io).to receive(:with_retries).with(
"PUT 'my_bucket/dest/file' Part #1"
).and_yield
expect(connection).to receive(:upload_part).with(
"my_bucket", "dest/file", 1234, 1, :stringio_a,
"Content-MD5" => encoded_digest_a
).and_return(chunk_a_resp)
expect(cloud_io).to receive(:with_retries).with(
"PUT 'my_bucket/dest/file' Part #2"
).and_yield
expect(connection).to receive(:upload_part).with(
"my_bucket", "dest/file", 1234, 2, :stringio_b,
"Content-MD5" => encoded_digest_b
).and_return(chunk_b_resp)
expect(
cloud_io.send(:upload_parts,
"/src/file", "dest/file", 1234, chunk_bytes, file_size)
).to eq ["chunk_a_etag", "chunk_b_etag"]
expect(Logger.messages.map(&:lines).join("\n")).to eq(
" Uploading 2 Parts...\n" \
" ...90% Complete..."
)
end
it "logs progress" do
chunk_bytes = 1024**2 * 1
file_size = chunk_bytes * 100
file = StringIO.new("x" * file_size)
expect(File).to receive(:open).with("/src/file", "r").and_yield(file)
allow(Digest::MD5).to receive(:digest)
allow(Base64).to receive(:encode64).and_return("")
allow(connection).to receive(:upload_part).and_return(double("response", headers: {}))
cloud_io.send(:upload_parts,
"/src/file", "dest/file", 1234, chunk_bytes, file_size)
expect(Logger.messages.map(&:lines).join("\n")).to eq(
" Uploading 100 Parts...\n" \
" ...10% Complete...\n" \
" ...20% Complete...\n" \
" ...30% Complete...\n" \
" ...40% Complete...\n" \
" ...50% Complete...\n" \
" ...60% Complete...\n" \
" ...70% Complete...\n" \
" ...80% Complete...\n" \
" ...90% Complete..."
)
end
end # describe '#upload_parts'
describe "#complete_multipart" do
let(:cloud_io) do
CloudIO::S3.new(
bucket: "my_bucket",
max_retries: 1,
retry_waitsec: 0
)
end
let(:resp_ok) do
double(
"response",
body: {
"Location" => "http://my_bucket.s3.amazonaws.com/dest/file",
"Bucket" => "my_bucket",
"Key" => "dest/file",
"ETag" => '"some-etag"'
}
)
end
let(:resp_bad) do
double(
"response",
body: {
"Code" => "InternalError",
"Message" => "We encountered an internal error. Please try again."
}
)
end
before do
allow(cloud_io).to receive(:connection).and_return(connection)
end
it "retries on raised errors" do
expect(connection).to receive(:complete_multipart_upload).once
.with("my_bucket", "dest/file", 1234, [:parts])
.and_raise("error")
expect(connection).to receive(:complete_multipart_upload).once
.with("my_bucket", "dest/file", 1234, [:parts])
.and_return(resp_ok)
cloud_io.send(:complete_multipart, "dest/file", 1234, [:parts])
end
it "retries on returned errors" do
expect(connection).to receive(:complete_multipart_upload).twice
.with("my_bucket", "dest/file", 1234, [:parts])
.and_return(resp_bad, resp_ok)
cloud_io.send(:complete_multipart, "dest/file", 1234, [:parts])
end
it "fails after retries exceeded" do
expect(connection).to receive(:complete_multipart_upload).once
.with("my_bucket", "dest/file", 1234, [:parts])
.and_raise("error message")
expect(connection).to receive(:complete_multipart_upload).once
.with("my_bucket", "dest/file", 1234, [:parts])
.and_return(resp_bad)
expect do
cloud_io.send(:complete_multipart, "dest/file", 1234, [:parts])
end.to raise_error CloudIO::Error, "CloudIO::Error: Max Retries (1) Exceeded!\n" \
" Operation: POST 'my_bucket/dest/file' (Complete)\n" \
" Be sure to check the log messages for each retry attempt.\n" \
"--- Wrapped Exception ---\n" \
"CloudIO::S3::Error: The server returned the following error:\n" \
" InternalError: We encountered an internal error. Please try again."
expect(Logger.messages.map(&:lines).join("\n")).to eq(
" Complete Multipart 'my_bucket/dest/file'\n" \
"CloudIO::Error: Retry #1 of 1\n" \
" Operation: POST 'my_bucket/dest/file' (Complete)\n" \
"--- Wrapped Exception ---\n" \
"RuntimeError: error message"
)
end
end # describe '#complete_multipart'
describe "#headers" do
let(:cloud_io) { CloudIO::S3.new }
it "returns empty headers by default" do
allow(cloud_io).to receive(:encryption).and_return(nil)
allow(cloud_io).to receive(:storage_class).and_return(nil)
expect(cloud_io.send(:headers)).to eq({})
end
it "returns headers for server-side encryption" do
allow(cloud_io).to receive(:storage_class).and_return(nil)
["aes256", :aes256].each do |arg|
allow(cloud_io).to receive(:encryption).and_return(arg)
expect(cloud_io.send(:headers)).to eq(
"x-amz-server-side-encryption" => "AES256"
)
end
end
it "returns headers for reduced redundancy storage" do
allow(cloud_io).to receive(:encryption).and_return(nil)
["reduced_redundancy", :reduced_redundancy].each do |arg|
allow(cloud_io).to receive(:storage_class).and_return(arg)
expect(cloud_io.send(:headers)).to eq(
"x-amz-storage-class" => "REDUCED_REDUNDANCY"
)
end
end
it "returns headers for both" do
allow(cloud_io).to receive(:encryption).and_return(:aes256)
allow(cloud_io).to receive(:storage_class).and_return(:reduced_redundancy)
expect(cloud_io.send(:headers)).to eq(
"x-amz-server-side-encryption" => "AES256",
"x-amz-storage-class" => "REDUCED_REDUNDANCY"
)
end
it "returns empty headers for empty values" do
allow(cloud_io).to receive(:encryption).and_return("")
allow(cloud_io).to receive(:storage_class).and_return("")
expect(cloud_io.send(:headers)).to eq({})
end
end # describe '#headers
describe "Object" do
let(:cloud_io) { CloudIO::S3.new }
let(:obj_data) do
{ "Key" => "obj_key", "ETag" => "obj_etag", "StorageClass" => "STANDARD" }
end
let(:object) { CloudIO::S3::Object.new(cloud_io, obj_data) }
describe "#initialize" do
it "creates Object from data" do
expect(object.key).to eq "obj_key"
expect(object.etag).to eq "obj_etag"
expect(object.storage_class).to eq "STANDARD"
end
end
describe "#encryption" do
it "returns the algorithm used for server-side encryption" do
expect(cloud_io).to receive(:head_object).once.with(object).and_return(
double("response", headers: { "x-amz-server-side-encryption" => "AES256" })
)
expect(object.encryption).to eq "AES256"
expect(object.encryption).to eq "AES256"
end
it "returns nil if SSE was not used" do
expect(cloud_io).to receive(:head_object).once.with(object)
.and_return(double("response", headers: {}))
expect(object.encryption).to be_nil
expect(object.encryption).to be_nil
end
end # describe '#encryption'
end # describe 'Object'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/cloud_io/cloud_files_spec.rb | spec/cloud_io/cloud_files_spec.rb | require "spec_helper"
require "backup/cloud_io/cloud_files"
module Backup # rubocop:disable Metrics/ModuleLength
describe CloudIO::CloudFiles do
let(:connection) { double }
describe "#upload" do
before do
expect_any_instance_of(described_class).to receive(:create_containers)
end
context "with SLO support" do
let(:cloud_io) do
CloudIO::CloudFiles.new(
container: "my_container",
segments_container: "my_segments_container",
segment_size: 5
)
end
let(:segments) { double }
context "when src file is larger than segment_size" do
before do
expect(File).to receive(:size).with("/src/file").and_return(10 * 1024**2)
end
it "uploads as a SLO" do
expect(cloud_io).to receive(:upload_segments).with(
"/src/file", "dest/file", 5 * 1024**2, 10 * 1024**2
).and_return(segments)
expect(cloud_io).to receive(:upload_manifest).with("dest/file", segments)
expect(cloud_io).to receive(:put_object).never
cloud_io.upload("/src/file", "dest/file")
end
end
context "when src file is not larger than segment_size" do
before do
expect(File).to receive(:size).with("/src/file").and_return(5 * 1024**2)
end
it "uploads as a non-SLO" do
expect(cloud_io).to receive(:put_object).with("/src/file", "dest/file")
expect(cloud_io).to receive(:upload_segments).never
expect(cloud_io).to receive(:upload_manifest).never
cloud_io.upload("/src/file", "dest/file")
end
end
context "when segment_size is too small for the src file" do
before do
expect(File).to receive(:size).with("/src/file").and_return((5000 * 1024**2) + 1)
end
it "warns and adjusts the segment_size" do
expect(cloud_io).to receive(:upload_segments).with(
"/src/file", "dest/file", 6 * 1024**2, (5000 * 1024**2) + 1
).and_return(segments)
expect(cloud_io).to receive(:upload_manifest).with("dest/file", segments)
expect(cloud_io).to receive(:put_object).never
expect(Logger).to receive(:warn) do |err|
expect(err.message).to include(
"#segment_size of 5 MiB has been adjusted\n to 6 MiB"
)
end
cloud_io.upload("/src/file", "dest/file")
end
end
context "when src file is too large" do
before do
expect(File).to receive(:size).with("/src/file")
.and_return(described_class::MAX_SLO_SIZE + 1)
end
it "raises an error" do
expect(cloud_io).to receive(:upload_segments).never
expect(cloud_io).to receive(:upload_manifest).never
expect(cloud_io).to receive(:put_object).never
expect do
cloud_io.upload("/src/file", "dest/file")
end.to raise_error(CloudIO::FileSizeError)
end
end
end # context 'with SLO support'
context "without SLO support" do
let(:cloud_io) do
CloudIO::CloudFiles.new(
container: "my_container",
segment_size: 0
)
end
before do
expect(cloud_io).to receive(:upload_segments).never
end
context "when src file size is ok" do
before do
expect(File).to receive(:size).with("/src/file")
.and_return(described_class::MAX_FILE_SIZE)
end
it "uploads as non-SLO" do
expect(cloud_io).to receive(:put_object).with("/src/file", "dest/file")
cloud_io.upload("/src/file", "dest/file")
end
end
context "when src file is too large" do
before do
expect(File).to receive(:size).with("/src/file")
.and_return(described_class::MAX_FILE_SIZE + 1)
end
it "raises an error" do
expect(cloud_io).to receive(:put_object).never
expect do
cloud_io.upload("/src/file", "dest/file")
end.to raise_error(CloudIO::FileSizeError)
end
end
end # context 'without SLO support'
end # describe '#upload'
describe "#objects" do
let(:cloud_io) do
CloudIO::CloudFiles.new(
container: "my_container",
max_retries: 1,
retry_waitsec: 0
)
end
before do
allow(cloud_io).to receive(:connection).and_return(connection)
expect(cloud_io).to receive(:create_containers)
end
it "ensures prefix ends with /" do
expect(connection).to receive(:get_container)
.with("my_container", prefix: "foo/bar/")
.and_return(double("response", body: []))
expect(cloud_io.objects("foo/bar")).to eq []
end
it "returns an empty array when no objects are found" do
expect(connection).to receive(:get_container)
.with("my_container", prefix: "foo/bar/")
.and_return(double("response", body: []))
expect(cloud_io.objects("foo/bar/")).to eq []
end
context "when less than 10,000 objects are available" do
let(:resp_body) do
Array.new(10) { |n| { "name" => "name_#{n}", "hash" => "hash_#{n}" } }
end
it "returns all objects" do
expect(cloud_io).to receive(:with_retries)
.with("GET 'my_container/foo/bar/*'").and_yield
expect(connection).to receive(:get_container)
.with("my_container", prefix: "foo/bar/")
.and_return(double("response", body: resp_body))
objects = cloud_io.objects("foo/bar/")
expect(objects.count).to be 10
objects.each_with_index do |object, n|
expect(object.name).to eq("name_#{n}")
expect(object.hash).to eq("hash_#{n}")
end
end
end
context "when more than 10,000 objects are available" do
let(:resp_body_a) do
Array.new(10_000) { |n| { "name" => "name_#{n}", "hash" => "hash_#{n}" } }
end
let(:resp_body_b) do
Array.new(10) do |n|
n += 10_000
{ "name" => "name_#{n}", "hash" => "hash_#{n}" }
end
end
it "returns all objects" do
expect(cloud_io).to receive(:with_retries).twice
.with("GET 'my_container/foo/bar/*'").and_yield
expect(connection).to receive(:get_container)
.with("my_container", prefix: "foo/bar/")
.and_return(double("response", body: resp_body_a))
expect(connection).to receive(:get_container)
.with("my_container", prefix: "foo/bar/", marker: "name_9999")
.and_return(double("response", body: resp_body_b))
objects = cloud_io.objects("foo/bar/")
expect(objects.count).to be 10_010
end
it "retries on errors" do
expect(connection).to receive(:get_container).once
.with("my_container", prefix: "foo/bar/")
.and_raise("error")
expect(connection).to receive(:get_container).once
.with("my_container", prefix: "foo/bar/")
.and_return(double("response", body: resp_body_a))
expect(connection).to receive(:get_container).once
.with("my_container", prefix: "foo/bar/", marker: "name_9999")
.and_raise("error")
expect(connection).to receive(:get_container).once
.with("my_container", prefix: "foo/bar/", marker: "name_9999")
.and_return(double("response", body: resp_body_b))
objects = cloud_io.objects("foo/bar/")
expect(objects.count).to be 10_010
end
end
end # describe '#objects'
describe "#head_object" do
let(:cloud_io) do
CloudIO::CloudFiles.new(
container: "my_container",
max_retries: 1,
retry_waitsec: 0
)
end
before do
allow(cloud_io).to receive(:connection).and_return(connection)
end
it "returns head_object response with retries" do
object = double("response", name: "obj_name")
expect(connection).to receive(:head_object).once
.with("my_container", "obj_name")
.and_raise("error")
expect(connection).to receive(:head_object).once
.with("my_container", "obj_name")
.and_return(:response)
expect(cloud_io.head_object(object)).to eq :response
end
end # describe '#head_object'
describe "#delete" do
let(:cloud_io) do
CloudIO::CloudFiles.new(
container: "my_container",
max_retries: 1,
retry_waitsec: 0
)
end
let(:resp_ok) { double("response", body: { "Response Status" => "200 OK" }) }
let(:resp_bad) { double("response", body: { "Response Status" => "400 Bad Request" }) }
before do
allow(cloud_io).to receive(:connection).and_return(connection)
end
it "accepts a single Object" do
object = described_class::Object.new(
:foo, "name" => "obj_name", "hash" => "obj_hash"
)
expect(cloud_io).to receive(:with_retries).with("DELETE Multiple Objects").and_yield
expect(connection).to receive(:delete_multiple_objects)
.with("my_container", ["obj_name"]).and_return(resp_ok)
cloud_io.delete(object)
end
it "accepts a multiple Objects" do
object_a = described_class::Object.new(
:foo, "name" => "obj_a_name", "hash" => "obj_a_hash"
)
object_b = described_class::Object.new(
:foo, "name" => "obj_b_name", "hash" => "obj_b_hash"
)
expect(cloud_io).to receive(:with_retries).with("DELETE Multiple Objects").and_yield
expect(connection).to receive(:delete_multiple_objects)
.with("my_container", ["obj_a_name", "obj_b_name"]).and_return(resp_ok)
objects = [object_a, object_b]
expect { cloud_io.delete(objects) }.not_to change { objects.map(&:inspect) }
end
it "accepts a single name" do
expect(cloud_io).to receive(:with_retries).with("DELETE Multiple Objects").and_yield
expect(connection).to receive(:delete_multiple_objects)
.with("my_container", ["obj_name"]).and_return(resp_ok)
cloud_io.delete("obj_name")
end
it "accepts multiple names" do
expect(cloud_io).to receive(:with_retries).with("DELETE Multiple Objects").and_yield
expect(connection).to receive(:delete_multiple_objects)
.with("my_container", ["obj_a_name", "obj_b_name"]).and_return(resp_ok)
names = ["obj_a_name", "obj_b_name"]
expect { cloud_io.delete(names) }.not_to change { names }
end
it "does nothing if empty array passed" do
expect(connection).to receive(:delete_multiple_objects).never
cloud_io.delete([])
end
it "deletes 10,000 objects per request" do
max_names = ["name"] * 10_000
names_remaining = ["name"] * 10
names_all = max_names + names_remaining
expect(cloud_io).to receive(:with_retries).twice.with("DELETE Multiple Objects").and_yield
expect(connection).to receive(:delete_multiple_objects)
.with("my_container", max_names).and_return(resp_ok)
expect(connection).to receive(:delete_multiple_objects)
.with("my_container", names_remaining).and_return(resp_ok)
expect { cloud_io.delete(names_all) }.not_to change { names_all }
end
it "retries on raised errors" do
expect(connection).to receive(:delete_multiple_objects).once
.with("my_container", ["obj_name"])
.and_raise("error")
expect(connection).to receive(:delete_multiple_objects).once
.with("my_container", ["obj_name"])
.and_return(resp_ok)
cloud_io.delete("obj_name")
end
it "retries on returned errors" do
expect(connection).to receive(:delete_multiple_objects).twice
.with("my_container", ["obj_name"])
.and_return(resp_bad, resp_ok)
cloud_io.delete("obj_name")
end
it "fails after retries exceeded" do
expect(connection).to receive(:delete_multiple_objects).once
.with("my_container", ["obj_name"])
.and_raise("error message")
expect(connection).to receive(:delete_multiple_objects).once
.with("my_container", ["obj_name"])
.and_return(resp_bad)
expect do
cloud_io.delete("obj_name")
end.to raise_error(CloudIO::Error) { |err|
expect(err.message).to eq(
"CloudIO::Error: Max Retries (1) Exceeded!\n" \
" Operation: DELETE Multiple Objects\n" \
" Be sure to check the log messages for each retry attempt.\n" \
"--- Wrapped Exception ---\n" \
"CloudIO::CloudFiles::Error: 400 Bad Request\n" \
" The server returned the following:\n" \
" {\"Response Status\"=>\"400 Bad Request\"}"
)
}
expect(Logger.messages.map(&:lines).join("\n")).to eq(
"CloudIO::Error: Retry #1 of 1\n" \
" Operation: DELETE Multiple Objects\n" \
"--- Wrapped Exception ---\n" \
"RuntimeError: error message"
)
end
end # describe '#delete'
describe "#delete_slo" do
let(:cloud_io) do
CloudIO::CloudFiles.new(
container: "my_container",
max_retries: 1,
retry_waitsec: 0
)
end
let(:object_a) do
described_class::Object.new(
:foo, "name" => "obj_a_name", "hash" => "obj_a_hash"
)
end
let(:object_b) do
described_class::Object.new(
:foo, "name" => "obj_b_name", "hash" => "obj_b_hash"
)
end
let(:resp_ok) { double("response", body: { "Response Status" => "200 OK" }) }
let(:resp_bad) { double("response", body: { "Response Status" => "400 Bad Request" }) }
before do
allow(cloud_io).to receive(:connection).and_return(connection)
end
it "deletes a single SLO" do
expect(connection).to receive(:delete_static_large_object)
.with("my_container", "obj_a_name").and_return(resp_ok)
cloud_io.delete_slo(object_a)
end
it "deletes a multiple SLOs" do
expect(connection).to receive(:delete_static_large_object)
.with("my_container", "obj_a_name").and_return(resp_ok)
expect(connection).to receive(:delete_static_large_object)
.with("my_container", "obj_b_name").and_return(resp_ok)
cloud_io.delete_slo([object_a, object_b])
end
it "retries on raised and returned errors" do
expect(connection).to receive(:delete_static_large_object).once
.with("my_container", "obj_a_name")
.and_raise("error")
expect(connection).to receive(:delete_static_large_object).once
.with("my_container", "obj_a_name")
.and_return(resp_ok)
expect(connection).to receive(:delete_static_large_object).twice
.with("my_container", "obj_b_name")
.and_return(resp_bad, resp_ok)
cloud_io.delete_slo([object_a, object_b])
end
it "fails after retries exceeded" do
expect(connection).to receive(:delete_static_large_object).once
.with("my_container", "obj_a_name")
.and_raise("error message")
expect(connection).to receive(:delete_static_large_object).once
.with("my_container", "obj_a_name")
.and_return(resp_ok)
expect(connection).to receive(:delete_static_large_object).once
.with("my_container", "obj_b_name")
.and_return(resp_bad)
expect(connection).to receive(:delete_static_large_object).once
.with("my_container", "obj_b_name")
.and_raise("failure")
expect do
cloud_io.delete_slo([object_a, object_b])
end.to raise_error(CloudIO::Error) { |err|
expect(err.message).to eq(
"CloudIO::Error: Max Retries (1) Exceeded!\n" \
" Operation: DELETE SLO Manifest 'my_container/obj_b_name'\n" \
" Be sure to check the log messages for each retry attempt.\n" \
"--- Wrapped Exception ---\n" \
"RuntimeError: failure"
)
}
expect(Logger.messages.map(&:lines).join("\n")).to eq(
"CloudIO::Error: Retry #1 of 1\n" \
" Operation: DELETE SLO Manifest 'my_container/obj_a_name'\n" \
"--- Wrapped Exception ---\n" \
"RuntimeError: error message\n" \
"CloudIO::Error: Retry #1 of 1\n" \
" Operation: DELETE SLO Manifest 'my_container/obj_b_name'\n" \
"--- Wrapped Exception ---\n" \
"CloudIO::CloudFiles::Error: 400 Bad Request\n" \
" The server returned the following:\n" \
" {\"Response Status\"=>\"400 Bad Request\"}"
)
end
end # describe '#delete_slo'
describe "#connection" do
let(:cloud_io) do
CloudIO::CloudFiles.new(
username: "my_username",
api_key: "my_api_key",
auth_url: "my_auth_url",
region: "my_region",
servicenet: false
)
end
it "caches a connection" do
expect(Fog::Storage).to receive(:new).once.with(
provider: "Rackspace",
rackspace_username: "my_username",
rackspace_api_key: "my_api_key",
rackspace_auth_url: "my_auth_url",
rackspace_region: "my_region",
rackspace_servicenet: false
).and_return(connection)
expect(cloud_io.send(:connection)).to be connection
expect(cloud_io.send(:connection)).to be connection
end
it "passes along fog_options" do
expect(Fog::Storage).to receive(:new).with(provider: "Rackspace",
rackspace_username: "my_user",
rackspace_api_key: "my_key",
rackspace_auth_url: nil,
rackspace_region: nil,
rackspace_servicenet: nil,
connection_options: { opt_key: "opt_value" },
my_key: "my_value")
CloudIO::CloudFiles.new(
username: "my_user",
api_key: "my_key",
fog_options: {
connection_options: { opt_key: "opt_value" },
my_key: "my_value"
}
).send(:connection)
end
end # describe '#connection'
describe "#create_containers" do
context "with SLO support" do
let(:cloud_io) do
CloudIO::CloudFiles.new(
container: "my_container",
segments_container: "my_segments_container",
max_retries: 1,
retry_waitsec: 0
)
end
before do
allow(cloud_io).to receive(:connection).and_return(connection)
end
it "creates containers once with retries" do
expect(connection).to receive(:put_container).twice
.with("my_container")
expect(connection).to receive(:put_container).once
.with("my_segments_container")
.and_raise("error")
expect(connection).to receive(:put_container).once
.with("my_segments_container")
.and_return(nil)
cloud_io.send(:create_containers)
cloud_io.send(:create_containers)
end
end
context "without SLO support" do
let(:cloud_io) do
CloudIO::CloudFiles.new(
container: "my_container",
max_retries: 1,
retry_waitsec: 0
)
end
before do
allow(cloud_io).to receive(:connection).and_return(connection)
end
it "creates containers once with retries" do
expect(connection).to receive(:put_container).once
.with("my_container")
.and_raise("error")
expect(connection).to receive(:put_container).once
.with("my_container")
.and_return(nil)
cloud_io.send(:create_containers)
cloud_io.send(:create_containers)
end
end
end # describe '#create_containers'
describe "#put_object" do
let(:cloud_io) do
CloudIO::CloudFiles.new(
container: "my_container",
max_retries: 1,
retry_waitsec: 0
)
end
let(:file) { double }
before do
allow(cloud_io).to receive(:connection).and_return(connection)
md5_file = double
expect(Digest::MD5).to receive(:file).with("/src/file").and_return(md5_file)
expect(md5_file).to receive(:hexdigest).and_return("abc123")
end
it "calls put_object with ETag" do
expect(File).to receive(:open).with("/src/file", "r").and_yield(file)
expect(connection).to receive(:put_object)
.with("my_container", "dest/file", file, "ETag" => "abc123")
cloud_io.send(:put_object, "/src/file", "dest/file")
end
it "fails after retries" do
expect(File).to receive(:open).twice.with("/src/file", "r").and_yield(file)
expect(connection).to receive(:put_object).once
.with("my_container", "dest/file", file, "ETag" => "abc123")
.and_raise("error1")
expect(connection).to receive(:put_object).once
.with("my_container", "dest/file", file, "ETag" => "abc123")
.and_raise("error2")
expect do
cloud_io.send(:put_object, "/src/file", "dest/file")
end.to raise_error(CloudIO::Error) { |err|
expect(err.message).to eq(
"CloudIO::Error: Max Retries (1) Exceeded!\n" \
" Operation: PUT 'my_container/dest/file'\n" \
" Be sure to check the log messages for each retry attempt.\n" \
"--- Wrapped Exception ---\n" \
"RuntimeError: error2"
)
}
expect(Logger.messages.map(&:lines).join("\n")).to eq(
"CloudIO::Error: Retry #1 of 1\n" \
" Operation: PUT 'my_container/dest/file'\n" \
"--- Wrapped Exception ---\n" \
"RuntimeError: error1"
)
end
context "with #days_to_keep set" do
let(:cloud_io) do
CloudIO::CloudFiles.new(
container: "my_container",
days_to_keep: 1,
max_retries: 1,
retry_waitsec: 0
)
end
let(:delete_at) { cloud_io.send(:delete_at) }
it "call put_object with X-Delete-At" do
expect(File).to receive(:open).with("/src/file", "r").and_yield(file)
expect(connection).to receive(:put_object).with(
"my_container", "dest/file", file,
"ETag" => "abc123", "X-Delete-At" => delete_at
)
cloud_io.send(:put_object, "/src/file", "dest/file")
end
end
end # describe '#put_object'
describe "#upload_segments" do
let(:cloud_io) do
CloudIO::CloudFiles.new(
segments_container: "my_segments_container",
max_retries: 1,
retry_waitsec: 0
)
end
let(:segment_bytes) { 1024**2 * 2 }
let(:file_size) { segment_bytes + 250 }
let(:digest_a) { "de89461b64701958984c95d1bfb0065a" }
let(:digest_b) { "382b6d2c391ad6871a9878241ef64cc9" }
let(:file) { StringIO.new(("a" * segment_bytes) + ("b" * 250)) }
before do
allow(cloud_io).to receive(:connection).and_return(connection)
end
it "uploads segments with ETags" do
expect(File).to receive(:open).with("/src/file", "r").and_yield(file)
expect(cloud_io).to receive(:with_retries)
.with("PUT 'my_segments_container/dest/file/0001'").and_yield
expect(connection).to receive(:put_object).with(
"my_segments_container", "dest/file/0001", nil,
"ETag" => digest_a
).and_yield.and_yield.and_yield # twice to read 2 MiB, third should not read
expect(cloud_io).to receive(:with_retries)
.with("PUT 'my_segments_container/dest/file/0002'").and_yield
expect(connection).to receive(:put_object).with(
"my_segments_container", "dest/file/0002", nil,
"ETag" => digest_b
).and_yield.and_yield # once to read 250 B, second should not read
expected = [
{ path: "my_segments_container/dest/file/0001",
etag: digest_a,
size_bytes: segment_bytes },
{ path: "my_segments_container/dest/file/0002",
etag: digest_b,
size_bytes: 250 }
]
expect(
cloud_io.send(:upload_segments,
"/src/file", "dest/file", segment_bytes, file_size)
).to eq expected
expect(Logger.messages.map(&:lines).join("\n")).to eq(
" Uploading 2 SLO Segments...\n" \
" ...90% Complete..."
)
end
it "logs progress" do
segment_bytes = 1024**2 * 1
file_size = segment_bytes * 100
file = StringIO.new("x" * file_size)
expect(File).to receive(:open).with("/src/file", "r").and_yield(file)
allow(cloud_io).to receive(:segment_md5)
allow(connection).to receive(:put_object).and_yield
cloud_io.send(:upload_segments,
"/src/file", "dest/file", segment_bytes, file_size)
expect(Logger.messages.map(&:lines).join("\n")).to eq(
" Uploading 100 SLO Segments...\n" \
" ...10% Complete...\n" \
" ...20% Complete...\n" \
" ...30% Complete...\n" \
" ...40% Complete...\n" \
" ...50% Complete...\n" \
" ...60% Complete...\n" \
" ...70% Complete...\n" \
" ...80% Complete...\n" \
" ...90% Complete..."
)
end
context "when #days_to_keep is set" do
let(:cloud_io) do
CloudIO::CloudFiles.new(
segments_container: "my_segments_container",
days_to_keep: 1,
max_retries: 1,
retry_waitsec: 0
)
end
let(:delete_at) { cloud_io.send(:delete_at) }
it "uploads segments with X-Delete-At" do
expect(File).to receive(:open).with("/src/file", "r").and_yield(file)
expect(connection).to receive(:put_object).with(
"my_segments_container", "dest/file/0001", nil,
"ETag" => digest_a, "X-Delete-At" => delete_at
).and_yield.and_yield # twice to read 2 MiB
expect(connection).to receive(:put_object).with(
"my_segments_container", "dest/file/0002", nil,
"ETag" => digest_b, "X-Delete-At" => delete_at
).and_yield # once to read 250 B
expected = [
{ path: "my_segments_container/dest/file/0001",
etag: digest_a,
size_bytes: segment_bytes },
{ path: "my_segments_container/dest/file/0002",
etag: digest_b,
size_bytes: 250 }
]
expect(
cloud_io.send(:upload_segments,
"/src/file", "dest/file", segment_bytes, file_size)
).to eq expected
end
end
end # describe '#upload_segments'
describe "#upload_manifest" do
let(:cloud_io) do
CloudIO::CloudFiles.new(
container: "my_container",
max_retries: 1,
retry_waitsec: 0
)
end
let(:segments) { double }
before do
allow(cloud_io).to receive(:connection).and_return(connection)
end
it "uploads manifest with retries" do
expect(connection).to receive(:put_static_obj_manifest).once
.with("my_container", "dest/file", segments, {})
.and_raise("error")
expect(connection).to receive(:put_static_obj_manifest).once
.with("my_container", "dest/file", segments, {})
.and_return(nil)
cloud_io.send(:upload_manifest, "dest/file", segments)
end
it "fails when retries exceeded" do
expect(connection).to receive(:put_static_obj_manifest).once
.with("my_container", "dest/file", segments, {})
.and_raise("error1")
expect(connection).to receive(:put_static_obj_manifest).once
.with("my_container", "dest/file", segments, {})
.and_raise("error2")
expect do
cloud_io.send(:upload_manifest, "dest/file", segments)
end.to raise_error(CloudIO::Error) { |err|
expect(err.message).to eq(
"CloudIO::Error: Max Retries (1) Exceeded!\n" \
" Operation: PUT SLO Manifest 'my_container/dest/file'\n" \
" Be sure to check the log messages for each retry attempt.\n" \
"--- Wrapped Exception ---\n" \
"RuntimeError: error2"
)
}
expect(Logger.messages.map(&:lines).join("\n")).to eq(
" Storing SLO Manifest 'my_container/dest/file'\n" \
"CloudIO::Error: Retry #1 of 1\n" \
" Operation: PUT SLO Manifest 'my_container/dest/file'\n" \
"--- Wrapped Exception ---\n" \
"RuntimeError: error1"
)
end
context "with #days_to_keep set" do
let(:cloud_io) do
CloudIO::CloudFiles.new(
container: "my_container",
days_to_keep: 1,
max_retries: 1,
retry_waitsec: 0
)
end
let(:delete_at) { cloud_io.send(:delete_at) }
it "uploads manifest with X-Delete-At" do
expect(connection).to receive(:put_static_obj_manifest)
.with("my_container", "dest/file", segments, "X-Delete-At" => delete_at)
cloud_io.send(:upload_manifest, "dest/file", segments)
end
end
end # describe '#upload_manifest'
describe "#headers" do
let(:cloud_io) do
CloudIO::CloudFiles.new(
container: "my_container",
max_retries: 1,
retry_waitsec: 0
)
end
it "returns empty headers" do
expect(cloud_io.send(:headers)).to eq({})
end
context "with #days_to_keep set" do
let(:cloud_io) { CloudIO::CloudFiles.new(days_to_keep: 30) }
it "returns X-Delete-At header" do
Timecop.freeze do
expected = (Time.now.utc + 30 * 60**2 * 24).to_i
headers = cloud_io.send(:headers)
expect(headers["X-Delete-At"]).to eq expected
end
end
it "returns the same headers for subsequent calls" do
headers = cloud_io.send(:headers)
expect(cloud_io.send(:headers)).to eq headers
end
end
end # describe '#headers'
describe "Object" do
let(:cloud_io) { CloudIO::CloudFiles.new }
let(:obj_data) { { "name" => "obj_name", "hash" => "obj_hash" } }
let(:object) { CloudIO::CloudFiles::Object.new(cloud_io, obj_data) }
describe "#initialize" do
it "creates Object from data" do
expect(object.name).to eq "obj_name"
expect(object.hash).to eq "obj_hash"
end
end
describe "#slo?" do
it "returns true when object is an SLO" do
expect(cloud_io).to receive(:head_object).once
.with(object)
.and_return(double("response", headers: { "X-Static-Large-Object" => "True" }))
expect(object.slo?).to be(true)
expect(object.slo?).to be(true)
end
it "returns false when object is not an SLO" do
expect(cloud_io).to receive(:head_object).with(object).and_return(double("response", headers: {}))
expect(object.slo?).to be(false)
end
end
describe "#marked_for_deletion?" do
it "returns true when object has X-Delete-At set" do
expect(cloud_io).to receive(:head_object).once
.with(object)
.and_return(double("response", headers: { "X-Delete-At" => "12345" }))
expect(object.marked_for_deletion?).to be(true)
expect(object.marked_for_deletion?).to be(true)
end
it "returns false when object does not have X-Delete-At set" do
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | true |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/compressor/bzip2_spec.rb | spec/compressor/bzip2_spec.rb | require "spec_helper"
describe Backup::Compressor::Bzip2 do
before do
allow_any_instance_of(Backup::Compressor::Bzip2).to receive(:utility).and_return("bzip2")
end
it "should be a subclass of Compressor::Base" do
expect(Backup::Compressor::Bzip2
.superclass).to eq(Backup::Compressor::Base)
end
describe "#initialize" do
let(:compressor) { Backup::Compressor::Bzip2.new }
after { Backup::Compressor::Bzip2.clear_defaults! }
it "should load pre-configured defaults" do
expect_any_instance_of(Backup::Compressor::Bzip2).to receive(:load_defaults!)
compressor
end
context "when no pre-configured defaults have been set" do
it "should use default values" do
expect(compressor.level).to eq(false)
expect(compressor.instance_variable_get(:@cmd)).to eq("bzip2")
expect(compressor.instance_variable_get(:@ext)).to eq(".bz2")
end
it "should use the values given" do
compressor = Backup::Compressor::Bzip2.new do |c|
c.level = 5
end
expect(compressor.level).to eq(5)
expect(compressor.instance_variable_get(:@cmd)).to eq("bzip2 -5")
expect(compressor.instance_variable_get(:@ext)).to eq(".bz2")
end
end # context 'when no pre-configured defaults have been set'
context "when pre-configured defaults have been set" do
before do
Backup::Compressor::Bzip2.defaults do |c|
c.level = 7
end
end
it "should use pre-configured defaults" do
expect(compressor.level).to eq(7)
expect(compressor.instance_variable_get(:@cmd)).to eq("bzip2 -7")
expect(compressor.instance_variable_get(:@ext)).to eq(".bz2")
end
it "should override pre-configured defaults" do
compressor = Backup::Compressor::Bzip2.new do |c|
c.level = 6
end
expect(compressor.level).to eq(6)
expect(compressor.instance_variable_get(:@cmd)).to eq("bzip2 -6")
expect(compressor.instance_variable_get(:@ext)).to eq(".bz2")
end
end # context 'when pre-configured defaults have been set'
end # describe '#initialize'
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/compressor/gzip_spec.rb | spec/compressor/gzip_spec.rb | require "spec_helper"
describe Backup::Compressor::Gzip do
before do
allow(Backup::Compressor::Gzip).to receive(:utility).and_return("gzip")
Backup::Compressor::Gzip.instance_variable_set(:@has_rsyncable, true)
allow_any_instance_of(Backup::Compressor::Gzip).to receive(:utility).and_return("gzip")
end
it "should be a subclass of Compressor::Base" do
expect(Backup::Compressor::Gzip
.superclass).to eq(Backup::Compressor::Base)
end
it "should be extended by Utilities::Helpers" do
expect(Backup::Compressor::Gzip.instance_eval("class << self; self; end"))
.to include(Backup::Utilities::Helpers)
end
describe ".has_rsyncable?" do
before do
Backup::Compressor::Gzip.instance_variable_set(:@has_rsyncable, nil)
end
context "when --rsyncable is available" do
before do
expect(Backup::Compressor::Gzip).to receive(:`).once
.with("gzip --rsyncable --version >/dev/null 2>&1; echo $?")
.and_return("0\n")
end
it "returns true and caches the result" do
expect(Backup::Compressor::Gzip.has_rsyncable?).to be(true)
expect(Backup::Compressor::Gzip.has_rsyncable?).to be(true)
end
end
context "when --rsyncable is not available" do
before do
expect(Backup::Compressor::Gzip).to receive(:`).once
.with("gzip --rsyncable --version >/dev/null 2>&1; echo $?")
.and_return("1\n")
end
it "returns false and caches the result" do
expect(Backup::Compressor::Gzip.has_rsyncable?).to be(false)
expect(Backup::Compressor::Gzip.has_rsyncable?).to be(false)
end
end
end
describe "#initialize" do
let(:compressor) { Backup::Compressor::Gzip.new }
after { Backup::Compressor::Gzip.clear_defaults! }
context "when no pre-configured defaults have been set" do
it "should use default values" do
expect(compressor.level).to be(false)
expect(compressor.rsyncable).to be(false)
compressor.compress_with do |cmd, ext|
expect(cmd).to eq("gzip")
expect(ext).to eq(".gz")
end
end
it "should use the values given" do
compressor = Backup::Compressor::Gzip.new do |c|
c.level = 5
c.rsyncable = true
end
expect(compressor.level).to eq(5)
expect(compressor.rsyncable).to be(true)
compressor.compress_with do |cmd, ext|
expect(cmd).to eq("gzip -5 --rsyncable")
expect(ext).to eq(".gz")
end
end
end # context 'when no pre-configured defaults have been set'
context "when pre-configured defaults have been set" do
before do
Backup::Compressor::Gzip.defaults do |c|
c.level = 7
c.rsyncable = true
end
end
it "should use pre-configured defaults" do
expect(compressor.level).to eq(7)
expect(compressor.rsyncable).to be(true)
compressor.compress_with do |cmd, ext|
expect(cmd).to eq("gzip -7 --rsyncable")
expect(ext).to eq(".gz")
end
end
it "should override pre-configured defaults" do
compressor = Backup::Compressor::Gzip.new do |c|
c.level = 6
c.rsyncable = false
end
expect(compressor.level).to eq(6)
expect(compressor.rsyncable).to be(false)
compressor.compress_with do |cmd, ext|
expect(cmd).to eq("gzip -6")
expect(ext).to eq(".gz")
end
end
end # context 'when pre-configured defaults have been set'
it "should ignore rsyncable option and warn user if not supported" do
Backup::Compressor::Gzip.instance_variable_set(:@has_rsyncable, false)
expect(Backup::Logger).to receive(:warn) do |err|
expect(err).to be_a(Backup::Compressor::Gzip::Error)
expect(err.message).to match(/'rsyncable' option ignored/)
end
compressor = Backup::Compressor::Gzip.new do |c|
c.level = 5
c.rsyncable = true
end
expect(compressor.level).to eq(5)
expect(compressor.rsyncable).to be(true)
compressor.compress_with do |cmd, ext|
expect(cmd).to eq("gzip -5")
expect(ext).to eq(".gz")
end
end
end # describe '#initialize'
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/compressor/base_spec.rb | spec/compressor/base_spec.rb | require "spec_helper"
describe Backup::Compressor::Base do
let(:compressor) { Backup::Compressor::Base.new }
it "should include Utilities::Helpers" do
expect(Backup::Compressor::Base
.include?(Backup::Utilities::Helpers)).to eq(true)
end
it "should include Config::Helpers" do
expect(Backup::Compressor::Base
.include?(Backup::Config::Helpers)).to eq(true)
end
describe "#compress_with" do
it "should yield the compressor command and extension" do
compressor.instance_variable_set(:@cmd, "compressor command")
compressor.instance_variable_set(:@ext, "compressor extension")
expect(compressor).to receive(:log!)
compressor.compress_with do |cmd, ext|
expect(cmd).to eq("compressor command")
expect(ext).to eq("compressor extension")
end
end
end
describe "#compressor_name" do
it "should return class name with Backup namespace removed" do
expect(compressor.send(:compressor_name)).to eq("Compressor::Base")
end
end
describe "#log!" do
it "should log a message" do
compressor.instance_variable_set(:@cmd, "compressor command")
compressor.instance_variable_set(:@ext, "compressor extension")
expect(compressor).to receive(:compressor_name).and_return("Compressor Name")
expect(Backup::Logger).to receive(:info).with(
"Using Compressor Name for compression.\n" \
" Command: 'compressor command'\n" \
" Ext: 'compressor extension'"
)
compressor.send(:log!)
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/compressor/custom_spec.rb | spec/compressor/custom_spec.rb | require "spec_helper"
describe Backup::Compressor::Custom do
let(:compressor) { Backup::Compressor::Custom.new }
before(:context) do
# Utilities::Helpers#utility will raise an error
# if the command is invalid or not set
Backup::Compressor::Custom.send(
:define_method, :utility,
->(arg) { arg.to_s.empty? ? "error" : "/path/to/#{arg}" }
)
end
it "should be a subclass of Compressor::Base" do
expect(Backup::Compressor::Custom
.superclass).to eq(Backup::Compressor::Base)
end
describe "#initialize" do
let(:compressor) { Backup::Compressor::Custom.new }
after { Backup::Compressor::Custom.clear_defaults! }
it "should load pre-configured defaults" do
expect_any_instance_of(Backup::Compressor::Custom).to receive(:load_defaults!)
compressor
end
it "should call Utilities::Helpers#utility to validate command" do
expect_any_instance_of(Backup::Compressor::Custom).to receive(:utility)
compressor
end
it "should clean the command and extension for use with compress_with" do
compressor = Backup::Compressor::Custom.new do |c|
c.command = " my_command --option foo "
c.extension = " my_extension "
end
expect(compressor.command).to eq(" my_command --option foo ")
expect(compressor.extension).to eq(" my_extension ")
expect(compressor).to receive(:log!)
compressor.compress_with do |cmd, ext|
expect(cmd).to eq("/path/to/my_command --option foo")
expect(ext).to eq("my_extension")
end
end
context "when no pre-configured defaults have been set" do
it "should use default values" do
expect(compressor.command).to be_nil
expect(compressor.extension).to be_nil
expect(compressor.instance_variable_get(:@cmd)).to eq("error")
expect(compressor.instance_variable_get(:@ext)).to eq("")
end
it "should use the values given" do
compressor = Backup::Compressor::Custom.new do |c|
c.command = "my_command"
c.extension = "my_extension"
end
expect(compressor.command).to eq("my_command")
expect(compressor.extension).to eq("my_extension")
expect(compressor.instance_variable_get(:@cmd)).to eq("/path/to/my_command")
expect(compressor.instance_variable_get(:@ext)).to eq("my_extension")
end
end # context 'when no pre-configured defaults have been set'
context "when pre-configured defaults have been set" do
before do
Backup::Compressor::Custom.defaults do |c|
c.command = "default_command"
c.extension = "default_extension"
end
end
it "should use pre-configured defaults" do
expect(compressor.command).to eq("default_command")
expect(compressor.extension).to eq("default_extension")
expect(compressor.instance_variable_get(:@cmd)).to eq("/path/to/default_command")
expect(compressor.instance_variable_get(:@ext)).to eq("default_extension")
end
it "should override pre-configured defaults" do
compressor = Backup::Compressor::Custom.new do |c|
c.command = "new_command"
c.extension = "new_extension"
end
expect(compressor.command).to eq("new_command")
expect(compressor.extension).to eq("new_extension")
expect(compressor.instance_variable_get(:@cmd)).to eq("/path/to/new_command")
expect(compressor.instance_variable_get(:@ext)).to eq("new_extension")
end
end # context 'when pre-configured defaults have been set'
end # describe '#initialize'
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/config/dsl_spec.rb | spec/config/dsl_spec.rb | require "spec_helper"
module Backup
describe Config::DSL do
describe ".add_dsl_constants" do
it "adds constants when the module is loaded" do
described_class.constants.each do |const|
described_class.send(:remove_const, const)
end
expect(described_class.constants).to be_empty
load File.expand_path("../../../lib/backup/config/dsl.rb", __FILE__)
expect(described_class.const_defined?("MySQL")).to eq(true)
expect(described_class.const_defined?("RSync")).to eq(true)
expect(described_class::RSync.const_defined?("Local")).to eq(true)
end
end
describe ".create_modules" do
module TestScope; end
context "when given an array of constant names" do
it "creates modules for the given scope" do
described_class.send(:create_modules, TestScope, ["Foo", "Bar"])
expect(TestScope.const_defined?("Foo")).to eq(true)
expect(TestScope.const_defined?("Bar")).to eq(true)
expect(TestScope::Foo.class).to eq(Module)
expect(TestScope::Bar.class).to eq(Module)
end
end
context "when the given array contains Hash values" do
it "creates deeply nested modules" do
described_class.send(
:create_modules,
TestScope,
["FooBar", {
LevelA: ["NameA", {
LevelB: ["NameB"]
}]
}]
)
expect(TestScope.const_defined?("FooBar")).to eq(true)
expect(TestScope.const_defined?("LevelA")).to eq(true)
expect(TestScope::LevelA.const_defined?("NameA")).to eq(true)
expect(TestScope::LevelA.const_defined?("LevelB")).to eq(true)
expect(TestScope::LevelA::LevelB.const_defined?("NameB")).to eq(true)
end
end
end
describe "#_config_options" do
it "returns paths set in config.rb" do
[:root_path, :data_path, :tmp_path].each { |name| subject.send(name, name) }
expect(subject._config_options).to eq(
root_path: :root_path,
data_path: :data_path,
tmp_path: :tmp_path
)
end
end
describe "#preconfigure" do
after do
if described_class.const_defined?("MyBackup")
described_class.send(:remove_const, "MyBackup")
end
end
specify "name must be a String" do
expect do
subject.preconfigure(:Abc)
end.to raise_error(described_class::Error)
end
specify "name must begin with a capital letter" do
expect do
subject.preconfigure("myBackup")
end.to raise_error(described_class::Error)
end
specify "Backup::Model may not be preconfigured" do
expect do
subject.preconfigure("Model")
end.to raise_error(described_class::Error)
end
specify "preconfigured models can only be preconfigured once" do
block = proc {}
subject.preconfigure("MyBackup", &block)
klass = described_class.const_get("MyBackup")
expect(klass.superclass).to eq(Backup::Model)
expect do
subject.preconfigure("MyBackup", &block)
end.to raise_error(described_class::Error)
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/config/helpers_spec.rb | spec/config/helpers_spec.rb | require "spec_helper"
module Backup
describe "Config::Helpers" do
before do
class Foo
include Backup::Config::Helpers
attr_accessor :accessor, :accessor_two
attr_reader :reader
attr_deprecate :removed,
version: "1.1"
attr_deprecate :removed_with_message,
version: "1.2",
message: "This has no replacement."
attr_deprecate :removed_with_action,
version: "1.3",
action: (lambda do |klass, val|
klass.accessor = val ? "1" : "0"
klass.accessor_two = "updated"
end)
attr_deprecate :removed_with_action_and_message,
version: "1.4",
message: "Updating accessors.",
action: (lambda do |klass, val|
klass.accessor = val ? "1" : "0"
klass.accessor_two = "updated"
end)
end
end
after { Backup.send(:remove_const, "Foo") }
describe ".defaults" do
let(:defaults) { double }
before do
expect(Config::Defaults).to receive(:new).once.and_return(defaults)
end
it "should return the Config::Defaults for the class" do
expect(Foo.defaults).to eq(defaults)
end
it "should yield the Config::Defaults for the class" do
Foo.defaults do |config|
expect(config).to eq(defaults)
end
end
it "should cache the Config::Defaults for the class" do
expect(Foo.instance_variable_get(:@defaults)).to be_nil
expect(Foo.defaults).to be(defaults)
expect(Foo.instance_variable_get(:@defaults)).to be(defaults)
expect(Foo.defaults).to be(defaults)
end
end
describe ".clear_defaults!" do
it "should clear all defaults set" do
Foo.defaults do |config|
config.accessor = "foo"
end
expect(Foo.defaults.accessor).to eq("foo")
Foo.clear_defaults!
expect(Foo.defaults.accessor).to be_nil
end
end
describe ".deprecations" do
it "should return @deprecations" do
expect(Foo.deprecations).to be_a(Hash)
expect(Foo.deprecations.keys.count).to eq(4)
end
it "should set @deprecations to an empty hash if not set" do
Foo.send(:remove_instance_variable, :@deprecations)
expect(Foo.deprecations).to eq({})
end
end
describe ".attr_deprecate" do
before { Foo.send(:remove_instance_variable, :@deprecations) }
it "should add deprected attributes" do
Foo.send :attr_deprecate, :attr1
Foo.send :attr_deprecate, :attr2, version: "2"
Foo.send :attr_deprecate, :attr3, version: "3", message: "attr3 message"
Foo.send :attr_deprecate, :attr4, version: "4", message: "attr4 message",
action: "attr4 action"
expect(Foo.deprecations).to eq(
attr1: {
version: nil,
message: nil,
action: nil
},
attr2: {
version: "2",
message: nil,
action: nil
},
attr3: {
version: "3",
message: "attr3 message",
action: nil
},
attr4: {
version: "4",
message: "attr4 message",
action: "attr4 action"
}
)
end
end
describe ".log_deprecation_warning" do
context "when no message given" do
it "should log a warning that the attribute has been removed" do
expect(Logger).to receive(:warn) do |err|
expect(err.message).to eq "Config::Error: [DEPRECATION WARNING]\n" \
" Backup::Foo#removed has been deprecated as of backup v.1.1"
end
deprecation = Foo.deprecations[:removed]
Foo.log_deprecation_warning(:removed, deprecation)
end
end
context "when a message is given" do
it "should log warning with the message" do
expect(Logger).to receive(:warn) do |err|
expect(err.message).to eq "Config::Error: [DEPRECATION WARNING]\n" \
" Backup::Foo#removed_with_message has been deprecated " \
"as of backup v.1.2\n" \
" This has no replacement."
end
deprecation = Foo.deprecations[:removed_with_message]
Foo.log_deprecation_warning(:removed_with_message, deprecation)
end
end
end # describe '.log_deprecation_warning'
describe "#load_defaults!" do
let(:klass) { Foo.new }
it "should load default values set for the class" do
Foo.defaults do |config|
config.accessor = "foo"
end
klass.send(:load_defaults!)
expect(klass.accessor).to eq("foo")
end
it "should protect default values" do
default_value = "foo"
Foo.defaults do |config|
config.accessor = default_value
config.accessor_two = 5
end
klass.send(:load_defaults!)
expect(klass.accessor).to eq("foo")
expect(klass.accessor).to_not be(default_value)
expect(klass.accessor_two).to eq(5)
end
it "should raise an error if defaults are set for attribute readers" do
Foo.defaults do |config|
config.reader = "foo"
end
expect do
klass.send(:load_defaults!)
end.to raise_error(NoMethodError, /Backup::Foo/)
end
it "should raise an error if defaults were set for invalid accessors" do
Foo.defaults do |config|
config.foobar = "foo"
end
expect do
klass.send(:load_defaults!)
end.to raise_error(NoMethodError, /Backup::Foo/)
end
end
describe "#method_missing" do
context "when the method is a deprecated method" do
before do
expect(Logger).to receive(:warn).with(instance_of(Config::Error))
end
context "when an :action is specified" do
it "should call the :action" do
value = [true, false].sample
expected_value = value ? "1" : "0"
klass = Foo.new
klass.removed_with_action = value
expect(klass.accessor).to eq(expected_value)
# lambda additionally sets :accessor_two
expect(klass.accessor_two).to eq("updated")
end
end
context "when no :action is specified" do
it "should only log the warning" do
expect_any_instance_of(Foo).to receive(:accessor=).never
klass = Foo.new
klass.removed = "foo"
expect(klass.accessor).to be_nil
end
end
end
context "when the method is not a deprecated method" do
it "should raise a NoMethodError" do
expect(Logger).to receive(:warn).never
klass = Foo.new
expect do
klass.foobar = "attr_value"
end.to raise_error(NoMethodError)
end
end
context "when the method is not a set operation" do
it "should raise a NoMethodError" do
expect(Logger).to receive(:warn).never
klass = Foo.new
expect do
klass.removed
end.to raise_error(NoMethodError)
end
end
end # describe '#method_missing'
end
describe "Config::Defaults" do
let(:defaults) { Config::Defaults.new }
before do
defaults.foo = "one"
defaults.bar = "two"
end
it "should return nil for unset attributes" do
expect(defaults.foobar).to be_nil
end
describe "#_attribues" do
it "should return an array of attribute names" do
expect(defaults._attributes).to be_an Array
expect(defaults._attributes.count).to eq(2)
expect(defaults._attributes).to include(:foo, :bar)
end
end
describe "#reset!" do
it "should clear all attributes set" do
defaults.reset!
expect(defaults._attributes).to be_an Array
expect(defaults._attributes).to be_empty
expect(defaults.foo).to be_nil
expect(defaults.bar).to be_nil
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/database/mysql_spec.rb | spec/database/mysql_spec.rb | require "spec_helper"
module Backup
describe Database::MySQL do
let(:model) { Model.new(:test_trigger, "test label") }
let(:db) { Database::MySQL.new(model) }
let(:s) { sequence "" }
before do
allow_any_instance_of(Database::MySQL).to receive(:utility)
.with(:mysqldump).and_return("mysqldump")
allow_any_instance_of(Database::MySQL).to receive(:utility)
.with(:cat).and_return("cat")
allow_any_instance_of(Database::MySQL).to receive(:utility)
.with(:innobackupex).and_return("innobackupex")
allow_any_instance_of(Database::MySQL).to receive(:utility)
.with(:tar).and_return("tar")
end
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Database::Base"
describe "#initialize" do
it "provides default values" do
expect(db.database_id).to be_nil
expect(db.name).to be :all
expect(db.username).to be_nil
expect(db.password).to be_nil
expect(db.host).to be_nil
expect(db.port).to be_nil
expect(db.socket).to be_nil
expect(db.skip_tables).to be_nil
expect(db.only_tables).to be_nil
expect(db.additional_options).to be_nil
expect(db.prepare_options).to be_nil
expect(db.sudo_user).to be_nil
expect(db.backup_engine).to eq :mysqldump
expect(db.prepare_backup).to eq(true)
end
it "configures the database" do
db = Database::MySQL.new(model, :my_id) do |mysql|
mysql.name = "my_name"
mysql.username = "my_username"
mysql.password = "my_password"
mysql.host = "my_host"
mysql.port = "my_port"
mysql.socket = "my_socket"
mysql.skip_tables = "my_skip_tables"
mysql.only_tables = "my_only_tables"
mysql.additional_options = "my_additional_options"
mysql.prepare_options = "my_prepare_options"
mysql.sudo_user = "my_sudo_user"
mysql.backup_engine = "my_backup_engine"
mysql.prepare_backup = false
end
expect(db.database_id).to eq "my_id"
expect(db.name).to eq "my_name"
expect(db.username).to eq "my_username"
expect(db.password).to eq "my_password"
expect(db.host).to eq "my_host"
expect(db.port).to eq "my_port"
expect(db.socket).to eq "my_socket"
expect(db.skip_tables).to eq "my_skip_tables"
expect(db.only_tables).to eq "my_only_tables"
expect(db.additional_options).to eq "my_additional_options"
expect(db.prepare_options).to eq "my_prepare_options"
expect(db.sudo_user).to eq "my_sudo_user"
expect(db.backup_engine).to eq "my_backup_engine"
expect(db.verbose).to be_falsy
expect(db.prepare_backup).to eq(false)
end
end # describe '#initialize'
describe "#perform!" do
let(:pipeline) { double }
let(:compressor) { double }
before do
allow(db).to receive(:mysqldump).and_return("mysqldump_command")
allow(db).to receive(:dump_path).and_return("/tmp/trigger/databases")
expect(db).to receive(:log!).ordered.with(:started)
expect(db).to receive(:prepare!).ordered
end
context "without a compressor" do
it "packages the dump without compression" do
expect(Pipeline).to receive(:new).ordered.and_return(pipeline)
expect(pipeline).to receive(:<<).ordered.with("mysqldump_command")
expect(pipeline).to receive(:<<).ordered.with(
"cat > '/tmp/trigger/databases/MySQL.sql'"
)
expect(pipeline).to receive(:run).ordered
expect(pipeline).to receive(:success?).ordered.and_return(true)
expect(db).to receive(:log!).ordered.with(:finished)
db.perform!
end
end # context 'without a compressor'
context "with a compressor" do
before do
allow(model).to receive(:compressor).and_return(compressor)
allow(compressor).to receive(:compress_with).and_yield("cmp_cmd", ".cmp_ext")
end
it "packages the dump with compression" do
expect(Pipeline).to receive(:new).ordered.and_return(pipeline)
expect(pipeline).to receive(:<<).ordered.with("mysqldump_command")
expect(pipeline).to receive(:<<).ordered.with("cmp_cmd")
expect(pipeline).to receive(:<<).ordered.with(
"cat > '/tmp/trigger/databases/MySQL.sql.cmp_ext'"
)
expect(pipeline).to receive(:run).ordered
expect(pipeline).to receive(:success?).ordered.and_return(true)
expect(db).to receive(:log!).ordered.with(:finished)
db.perform!
end
end # context 'without a compressor'
context "when the pipeline fails" do
before do
allow_any_instance_of(Pipeline).to receive(:success?).and_return(false)
allow_any_instance_of(Pipeline).to receive(:error_messages).and_return("error messages")
end
it "raises an error" do
expect do
db.perform!
end.to raise_error(Database::MySQL::Error) { |err|
expect(err.message).to eq(
"Database::MySQL::Error: Dump Failed!\n error messages"
)
}
end
end # context 'when the pipeline fails'
end # describe '#perform!'
context "using alternative engine (innobackupex)" do
before do
db.backup_engine = :innobackupex
end
describe "#perform!" do
let(:pipeline) { double }
let(:compressor) { double }
before do
allow(db).to receive(:innobackupex).and_return("innobackupex_command")
allow(db).to receive(:dump_path).and_return("/tmp/trigger/databases")
expect(db).to receive(:log!).ordered.with(:started)
expect(db).to receive(:prepare!).ordered
end
context "without a compressor" do
it "packages the dump without compression" do
expect(Pipeline).to receive(:new).ordered.and_return(pipeline)
expect(pipeline).to receive(:<<).ordered.with("innobackupex_command")
expect(pipeline).to receive(:<<).ordered.with(
"cat > '/tmp/trigger/databases/MySQL.tar'"
)
expect(pipeline).to receive(:run).ordered
expect(pipeline).to receive(:success?).ordered.and_return(true)
expect(db).to receive(:log!).ordered.with(:finished)
db.perform!
end
end # context 'without a compressor'
context "with a compressor" do
before do
allow(model).to receive(:compressor).and_return(compressor)
allow(compressor).to receive(:compress_with).and_yield("cmp_cmd", ".cmp_ext")
end
it "packages the dump with compression" do
expect(Pipeline).to receive(:new).ordered.and_return(pipeline)
expect(pipeline).to receive(:<<).ordered.with("innobackupex_command")
expect(pipeline).to receive(:<<).ordered.with("cmp_cmd")
expect(pipeline).to receive(:<<).ordered.with(
"cat > '/tmp/trigger/databases/MySQL.tar.cmp_ext'"
)
expect(pipeline).to receive(:run).ordered
expect(pipeline).to receive(:success?).ordered.and_return(true)
expect(db).to receive(:log!).ordered.with(:finished)
db.perform!
end
end # context 'without a compressor'
context "when the pipeline fails" do
before do
allow_any_instance_of(Pipeline).to receive(:success?).and_return(false)
allow_any_instance_of(Pipeline).to receive(:error_messages).and_return("error messages")
end
it "raises an error" do
expect do
db.perform!
end.to raise_error(Database::MySQL::Error) { |err|
expect(err.message).to eq(
"Database::MySQL::Error: Dump Failed!\n error messages"
)
}
end
end # context 'when the pipeline fails'
end # describe '#perform!'
end # context 'using alternative engine (innobackupex)'
describe "#mysqldump" do
let(:option_methods) do
%w[
user_options credential_options connectivity_options
name_option tables_to_dump tables_to_skip
]
end
it "returns full mysqldump command built from all options" do
option_methods.each { |name| allow(db).to receive(name).and_return(name) }
expect(db.send(:mysqldump)).to eq(
"mysqldump #{option_methods.join(" ")}"
)
end
it "handles nil values from option methods" do
option_methods.each { |name| allow(db).to receive(name).and_return(nil) }
expect(db.send(:mysqldump)).to eq(
"mysqldump #{" " * (option_methods.count - 1)}"
)
end
end # describe '#mysqldump'
describe "backup engine option methods" do
describe "#credential_options" do
it "returns the credentials arguments" do
expect(db.send(:credential_options)).to eq ""
db.username = "my_user"
expect(db.send(:credential_options)).to eq(
"--user=my_user"
)
db.password = "my_password"
expect(db.send(:credential_options)).to eq(
"--user=my_user --password=my_password"
)
db.username = nil
expect(db.send(:credential_options)).to eq(
"--password=my_password"
)
end
it "handles special characters" do
db.username = "my_user'\""
db.password = "my_password'\""
expect(db.send(:credential_options)).to eq(
"--user=my_user\\'\\\" --password=my_password\\'\\\""
)
end
end # describe '#credential_options'
describe "#connectivity_options" do
it "returns only the socket argument if #socket specified" do
db.host = "my_host"
db.port = "my_port"
db.socket = "my_socket"
expect(db.send(:connectivity_options)).to eq(
"--socket='my_socket'"
)
end
it "returns host and port arguments if specified" do
expect(db.send(:connectivity_options)).to eq ""
db.host = "my_host"
expect(db.send(:connectivity_options)).to eq(
"--host='my_host'"
)
db.port = "my_port"
expect(db.send(:connectivity_options)).to eq(
"--host='my_host' --port='my_port'"
)
db.host = nil
expect(db.send(:connectivity_options)).to eq(
"--port='my_port'"
)
end
end # describe '#connectivity_options'
describe "#user_options" do
it "returns arguments for any #additional_options specified" do
expect(db.send(:user_options)).to eq ""
db.additional_options = ["--opt1", "--opt2"]
expect(db.send(:user_options)).to eq "--opt1 --opt2"
db.additional_options = "--opta --optb"
expect(db.send(:user_options)).to eq "--opta --optb"
end
end # describe '#user_options'
describe "#user_prepare_options" do
it "returns arguments for any #prepare_options specified" do
expect(db.send(:user_prepare_options)).to eq ""
db.prepare_options = ["--opt1", "--opt2"]
expect(db.send(:user_prepare_options)).to eq "--opt1 --opt2"
db.prepare_options = "--opta --optb"
expect(db.send(:user_prepare_options)).to eq "--opta --optb"
end
end # describe '#user_prepare_options'
describe "#name_option" do
it "returns argument to dump all databases if name is :all" do
expect(db.send(:name_option)).to eq "--all-databases"
end
it "returns the database name if name is not :all" do
db.name = "my_db"
expect(db.send(:name_option)).to eq "my_db"
end
end # describe '#name_option'
describe "#tables_to_dump" do
it "returns nil if dumping all databases" do
db.only_tables = "will be ignored"
expect(db.send(:tables_to_dump)).to be_nil
end
it "returns arguments for only_tables" do
db.name = "not_all"
db.only_tables = ["one", "two", "three"]
expect(db.send(:tables_to_dump)).to eq "one two three"
db.only_tables = "four five six"
expect(db.send(:tables_to_dump)).to eq "four five six"
end
end # describe '#tables_to_dump'
describe "#tables_to_skip" do
specify "when no #skip_tables are specified" do
expect(db.send(:tables_to_skip)).to eq ""
end
context "when dumping all databases" do
it "returns arguments for all tables given, as given" do
db.skip_tables = ["my_db.my_table", "foo"]
# Note that mysqldump will exit(1) if these don't include the db name.
expect(db.send(:tables_to_skip)).to eq(
"--ignore-table='my_db.my_table' --ignore-table='foo'"
)
end
end
context "when a database name is specified" do
it "will add the database name prefix if missing" do
db.name = "my_db"
db.skip_tables = ["my_table", "foo.bar"]
expect(db.send(:tables_to_skip)).to eq(
"--ignore-table='my_db.my_table' --ignore-table='foo.bar'"
)
end
end
end # describe '#tables_to_skip'
describe "sudo_option" do
it "does not change the command block by default" do
expect(db.send(:sudo_option, "foo")).to eq "foo"
end
context "with sudo_user" do
before do
db.sudo_user = "some_user"
end
it "wraps the block around the proper sudo command" do
expect(db.send(:sudo_option, "foo")).to eq(
"sudo -s -u some_user -- <<END_OF_SUDO\n" \
"foo\n" \
"END_OF_SUDO\n"
)
end
end # context 'with sudo_user' do
end # describe 'sudo_option'
end # describe 'backup engine option methods'
describe "#innobackupex" do
before do
allow(db).to receive(:dump_path).and_return("/tmp")
end
it "builds command to create backup, prepare for restore and tar to stdout" do
expect(db.send(:innobackupex).split.join(" ")).to eq(
"innobackupex --no-timestamp /tmp/MySQL.bkpdir 2> /dev/null && " \
"innobackupex --apply-log /tmp/MySQL.bkpdir 2> /dev/null && " \
"tar --remove-files -cf - -C /tmp MySQL.bkpdir"
)
end
context "with verbose option enabled" do
before do
db.verbose = true
end
it "does not suppress innobackupex STDOUT" do
expect(db.send(:innobackupex).split.join(" ")).to eq(
"innobackupex --no-timestamp /tmp/MySQL.bkpdir && " \
"innobackupex --apply-log /tmp/MySQL.bkpdir && " \
"tar --remove-files -cf - -C /tmp MySQL.bkpdir"
)
end
end
context "with prepare_backup option disabled" do
before do
db.prepare_backup = false
end
it "does not contain apply-log command" do
expect(db.send(:innobackupex).split.join(" ")).to eq(
"innobackupex --no-timestamp /tmp/MySQL.bkpdir 2> /dev/null && " \
"tar --remove-files -cf - -C /tmp MySQL.bkpdir"
)
end
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/database/riak_spec.rb | spec/database/riak_spec.rb | require "spec_helper"
module Backup
describe Database::Riak do
let(:model) { Model.new(:test_trigger, "test label") }
let(:db) { Database::Riak.new(model) }
let(:s) { sequence "" }
before do
allow_any_instance_of(Database::Riak).to receive(:utility)
.with("riak-admin").and_return("riak-admin")
allow_any_instance_of(Database::Riak).to receive(:utility)
.with(:sudo).and_return("sudo")
allow_any_instance_of(Database::Riak).to receive(:utility)
.with(:chown).and_return("chown")
end
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Database::Base"
describe "#initialize" do
it "provides default values" do
expect(db.database_id).to be_nil
expect(db.node).to eq "riak@127.0.0.1"
expect(db.cookie).to eq "riak"
expect(db.user).to eq "riak"
end
it "configures the database" do
db = Database::Riak.new(model, :my_id) do |riak|
riak.node = "my_node"
riak.cookie = "my_cookie"
riak.user = "my_user"
end
expect(db.database_id).to eq "my_id"
expect(db.node).to eq "my_node"
expect(db.cookie).to eq "my_cookie"
expect(db.user).to eq "my_user"
end
end # describe '#initialize'
describe "#perform!" do
before do
allow(db).to receive(:dump_path).and_return("/tmp/trigger/databases")
allow(Config).to receive(:user).and_return("backup_user")
expect(db).to receive(:log!).ordered.with(:started)
expect(db).to receive(:prepare!).ordered
end
context "with a compressor configured" do
let(:compressor) { double }
before do
allow(model).to receive(:compressor).and_return(compressor)
allow(compressor).to receive(:compress_with).and_yield("cmp_cmd", ".cmp_ext")
end
it "dumps the database with compression" do
expect(db).to receive(:run).ordered.with(
"sudo -n chown riak '/tmp/trigger/databases'"
)
expect(db).to receive(:run).ordered.with(
"sudo -n -u riak riak-admin backup riak@127.0.0.1 riak " \
"'/tmp/trigger/databases/Riak' node"
)
expect(db).to receive(:run).ordered.with(
"sudo -n chown -R backup_user '/tmp/trigger/databases'"
)
expect(db).to receive(:run).ordered.with(
"cmp_cmd -c '/tmp/trigger/databases/Riak-riak@127.0.0.1' " \
"> '/tmp/trigger/databases/Riak-riak@127.0.0.1.cmp_ext'"
)
expect(FileUtils).to receive(:rm_f).ordered.with(
"/tmp/trigger/databases/Riak-riak@127.0.0.1"
)
expect(db).to receive(:log!).ordered.with(:finished)
db.perform!
end
end # context 'with a compressor configured'
context "without a compressor configured" do
it "dumps the database without compression" do
expect(db).to receive(:run).ordered.with(
"sudo -n chown riak '/tmp/trigger/databases'"
)
expect(db).to receive(:run).ordered.with(
"sudo -n -u riak riak-admin backup riak@127.0.0.1 riak " \
"'/tmp/trigger/databases/Riak' node"
)
expect(db).to receive(:run).ordered.with(
"sudo -n chown -R backup_user '/tmp/trigger/databases'"
)
expect(FileUtils).to receive(:rm_f).never
expect(db).to receive(:log!).ordered.with(:finished)
db.perform!
end
end # context 'without a compressor configured'
it "ensures dump_path ownership is reclaimed" do
expect(db).to receive(:run).ordered.with(
"sudo -n chown riak '/tmp/trigger/databases'"
)
expect(db).to receive(:run).ordered.with(
"sudo -n -u riak riak-admin backup riak@127.0.0.1 riak " \
"'/tmp/trigger/databases/Riak' node"
).and_raise("an error")
expect(db).to receive(:run).ordered.with(
"sudo -n chown -R backup_user '/tmp/trigger/databases'"
)
expect do
db.perform!
end.to raise_error("an error")
end
end # describe '#perform!'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/database/openldap_spec.rb | spec/database/openldap_spec.rb | require "spec_helper"
module Backup
describe Database::OpenLDAP do
let(:model) { Model.new(:test_trigger, "test label") }
let(:db) { Database::OpenLDAP.new(model) }
let(:s) { sequence "" }
before do
allow_any_instance_of(Database::OpenLDAP).to receive(:utility)
.with(:slapcat).and_return("/real/slapcat")
allow_any_instance_of(Database::OpenLDAP).to receive(:utility)
.with(:cat).and_return("cat")
allow_any_instance_of(Database::OpenLDAP).to receive(:utility)
.with(:sudo).and_return("sudo")
end
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Database::Base"
describe "#initialize" do
it "provides default values" do
expect(db.name).to eq("ldap_backup")
expect(db.slapcat_args).to be_empty
expect(db.use_sudo).to eq(false)
expect(db.slapcat_utility).to eq "/real/slapcat"
expect(db.slapcat_conf).to eq "/etc/ldap/slapd.d"
end
it "configures the database" do
db = Database::OpenLDAP.new(model) do |ldap|
ldap.name = "my_name"
ldap.slapcat_args = ["--query", "--foo"]
end
expect(db.name).to eq "my_name"
expect(db.slapcat_args).to eq ["--query", "--foo"]
end
end # describe '#initialize'
describe "#perform!" do
let(:pipeline) { double }
let(:compressor) { double }
before do
allow(db).to receive(:slapcat).and_return("slapcat_command")
allow(db).to receive(:dump_path).and_return("/tmp/trigger/databases")
expect(db).to receive(:log!).ordered.with(:started)
expect(db).to receive(:prepare!).ordered
end
context "without a compressor" do
it "packages the dump without compression" do
expect(Pipeline).to receive(:new).ordered.and_return(pipeline)
expect(pipeline).to receive(:<<).ordered.with("slapcat_command")
expect(pipeline).to receive(:<<).ordered.with(
"cat > '/tmp/trigger/databases/OpenLDAP.ldif'"
)
expect(pipeline).to receive(:run).ordered
expect(pipeline).to receive(:success?).ordered.and_return(true)
expect(db).to receive(:log!).ordered.with(:finished)
db.perform!
end
end # context 'without a compressor'
context "with a compressor" do
before do
allow(model).to receive(:compressor).and_return(compressor)
allow(compressor).to receive(:compress_with).and_yield("cmp_cmd", ".cmp_ext")
end
it "packages the dump with compression" do
expect(Pipeline).to receive(:new).ordered.and_return(pipeline)
expect(pipeline).to receive(:<<).ordered.with("slapcat_command")
expect(pipeline).to receive(:<<).ordered.with("cmp_cmd")
expect(pipeline).to receive(:<<).ordered.with(
"cat > '/tmp/trigger/databases/OpenLDAP.ldif.cmp_ext'"
)
expect(pipeline).to receive(:run).ordered
expect(pipeline).to receive(:success?).ordered.and_return(true)
expect(db).to receive(:log!).ordered.with(:finished)
db.perform!
end
end # context 'without a compressor'
context "when the pipeline fails" do
before do
allow_any_instance_of(Pipeline).to receive(:success?).and_return(false)
allow_any_instance_of(Pipeline).to receive(:error_messages).and_return("error messages")
end
it "raises an error" do
expect do
db.perform!
end.to raise_error(Database::OpenLDAP::Error) { |err|
expect(err.message).to eq(
"Database::OpenLDAP::Error: Dump Failed!\n error messages"
)
}
end
end # context 'when the pipeline fails'
end # describe '#perform!'
describe "#slapcat" do
let(:slapcat_args) do
[
"-H",
"ldap:///subtree-dn",
"-a",
%("(!(entryDN:dnSubtreeMatch:=ou=People,dc=example,dc=com))")
]
end
before do
allow(db).to receive(:slapcat_utility).and_return("real_slapcat")
end
it "returns full slapcat command built from confdir" do
expect(db.send(:slapcat)).to eq(
"real_slapcat -F /etc/ldap/slapd.d "
)
end
it "returns full slapcat command built from additional options and conf file" do
allow(db).to receive(:slapcat_args).and_return(slapcat_args)
expect(db.send(:slapcat)).to eq "real_slapcat -F /etc/ldap/slapd.d -H ldap:///subtree-dn "\
"-a \"(!(entryDN:dnSubtreeMatch:=ou=People,dc=example,dc=com))\""
end
it "supports sudo" do
allow(db).to receive(:use_sudo).and_return("true")
expect(db.send(:slapcat)).to eq(
"sudo real_slapcat -F /etc/ldap/slapd.d "
)
end
it "returns full slapcat command built from additional options and conf file and sudo" do
allow(db).to receive(:slapcat_args).and_return(slapcat_args)
allow(db).to receive(:use_sudo).and_return("true")
expect(db.send(:slapcat)).to eq "sudo real_slapcat -F /etc/ldap/slapd.d -H "\
"ldap:///subtree-dn -a \"(!(entryDN:dnSubtreeMatch:=ou=People,dc=example,dc=com))\""
end
context "slapcat_conf_option" do
it "supports both slapcat confdir" do
db.instance_variable_set(:@slapcat_conf, "/etc/ldap/slapd.d")
expect(db.send(:slapcat)).to eq(
"real_slapcat -F /etc/ldap/slapd.d "
)
end
it "supports both slapcat conffile" do
db.instance_variable_set(:@slapcat_conf, "/etc/ldap/ldap.conf")
expect(db.send(:slapcat)).to eq(
"real_slapcat -f /etc/ldap/ldap.conf "
)
end
end
end # describe '#slapcat'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/database/redis_spec.rb | spec/database/redis_spec.rb | require "spec_helper"
module Backup
describe Database::Redis do
let(:model) { Model.new(:test_trigger, "test label") }
let(:required_config) do
proc do |redis|
redis.rdb_path = "rdb_path_required_for_copy_mode"
end
end
let(:db) { Database::Redis.new(model, &required_config) }
let(:s) { sequence "" }
before do
allow_any_instance_of(Database::Redis).to receive(:utility)
.with("redis-cli").and_return("redis-cli")
allow_any_instance_of(Database::Redis).to receive(:utility)
.with(:cat).and_return("cat")
end
it_behaves_like "a class that includes Config::Helpers" do
let(:default_overrides) { { "mode" => :sync } }
let(:new_overrides) { { "mode" => :copy } }
end
it_behaves_like "a subclass of Database::Base"
describe "#initialize" do
it "provides default values" do
expect(db.database_id).to be_nil
expect(db.mode).to eq :copy
expect(db.rdb_path).to eq "rdb_path_required_for_copy_mode"
expect(db.invoke_save).to be_nil
expect(db.host).to be_nil
expect(db.port).to be_nil
expect(db.socket).to be_nil
expect(db.password).to be_nil
expect(db.additional_options).to be_nil
end
it "configures the database" do
db = Database::Redis.new(model, :my_id) do |redis|
redis.mode = :copy
redis.rdb_path = "my_path"
redis.invoke_save = true
redis.host = "my_host"
redis.port = "my_port"
redis.socket = "my_socket"
redis.password = "my_password"
redis.additional_options = "my_additional_options"
end
expect(db.database_id).to eq "my_id"
expect(db.mode).to eq :copy
expect(db.rdb_path).to eq "my_path"
expect(db.invoke_save).to be true
expect(db.host).to eq "my_host"
expect(db.port).to eq "my_port"
expect(db.socket).to eq "my_socket"
expect(db.password).to eq "my_password"
expect(db.additional_options).to eq "my_additional_options"
end
it "raises an error if mode is invalid" do
expect do
Database::Redis.new(model) do |redis|
redis.mode = "sync" # symbol required
end
end.to raise_error(Database::Redis::Error) { |err|
expect(err.message).to match(/not a valid mode/)
}
end
it "raises an error if rdb_path is not set for :copy mode" do
expect do
Database::Redis.new(model) do |redis|
redis.rdb_path = nil
end
end.to raise_error(Database::Redis::Error) { |err|
expect(err.message).to match(/`rdb_path` must be set/)
}
end
end # describe '#initialize'
describe "#perform!" do
before do
expect(db).to receive(:log!).ordered.with(:started)
expect(db).to receive(:prepare!).ordered
end
context "when mode is :sync" do
before do
db.mode = :sync
end
it "uses sync!" do
expect(Logger).to receive(:configure).ordered
expect(db).to receive(:sync!).ordered
expect(db).to receive(:log!).ordered.with(:finished)
db.perform!
end
end
context "when mode is :copy" do
before do
db.mode = :copy
end
context "when :invoke_save is false" do
it "calls copy! without save!" do
expect(Logger).to receive(:configure).never
expect(db).to receive(:save!).never
expect(db).to receive(:copy!).ordered
expect(db).to receive(:log!).ordered.with(:finished)
db.perform!
end
end
context "when :invoke_save is true" do
before do
db.invoke_save = true
end
it "calls save! before copy!" do
expect(Logger).to receive(:configure).never
expect(db).to receive(:save!).ordered
expect(db).to receive(:copy!).ordered
expect(db).to receive(:log!).ordered.with(:finished)
db.perform!
end
end
end
end # describe '#perform!'
describe "#sync!" do
let(:pipeline) { double }
let(:compressor) { double }
before do
allow(db).to receive(:redis_cli_cmd).and_return("redis_cli_cmd")
allow(db).to receive(:dump_path).and_return("/tmp/trigger/databases")
end
context "without a compressor" do
it "packages the dump without compression" do
expect(Pipeline).to receive(:new).ordered.and_return(pipeline)
expect(pipeline).to receive(:<<).ordered.with("redis_cli_cmd --rdb -")
expect(pipeline).to receive(:<<).ordered.with(
"cat > '/tmp/trigger/databases/Redis.rdb'"
)
expect(pipeline).to receive(:run).ordered
expect(pipeline).to receive(:success?).ordered.and_return(true)
db.send(:sync!)
end
end # context 'without a compressor'
context "with a compressor" do
before do
allow(model).to receive(:compressor).and_return(compressor)
allow(compressor).to receive(:compress_with).and_yield("cmp_cmd", ".cmp_ext")
end
it "packages the dump with compression" do
expect(Pipeline).to receive(:new).ordered.and_return(pipeline)
expect(pipeline).to receive(:<<).ordered.with("redis_cli_cmd --rdb -")
expect(pipeline).to receive(:<<).ordered.with("cmp_cmd")
expect(pipeline).to receive(:<<).ordered.with(
"cat > '/tmp/trigger/databases/Redis.rdb.cmp_ext'"
)
expect(pipeline).to receive(:run).ordered
expect(pipeline).to receive(:success?).ordered.and_return(true)
db.send(:sync!)
end
end # context 'without a compressor'
context "when the pipeline fails" do
before do
allow_any_instance_of(Pipeline).to receive(:success?).and_return(false)
allow_any_instance_of(Pipeline).to receive(:error_messages).and_return("error messages")
end
it "raises an error" do
expect do
db.send(:sync!)
end.to raise_error(Database::Redis::Error) { |err|
expect(err.message).to eq(
"Database::Redis::Error: Dump Failed!\n error messages"
)
}
end
end # context 'when the pipeline fails'
end # describe '#sync!'
describe "#save!" do
before do
allow(db).to receive(:redis_cli_cmd).and_return("redis_cli_cmd")
end
# the redis docs say this returns "+OK\n", although it appears
# to only return "OK\n". Utilities#run strips the STDOUT returned,
# so a successful response should =~ /OK$/
specify "when response is OK" do
expect(db).to receive(:run).with("redis_cli_cmd SAVE").and_return("+OK")
db.send(:save!)
end
specify "when response is not OK" do
expect(db).to receive(:run).with("redis_cli_cmd SAVE").and_return("No OK Returned")
expect do
db.send(:save!)
end.to raise_error(Database::Redis::Error) { |err|
expect(err.message).to match(/Failed to invoke the `SAVE` command/)
expect(err.message).to match(/Response was: No OK Returned/)
}
end
specify "retries if save already in progress" do
expect(db).to receive(:run).with("redis_cli_cmd SAVE").exactly(5).times
.and_return("Background save already in progress")
expect(db).to receive(:sleep).with(5).exactly(4).times
expect do
db.send(:save!)
end.to raise_error(Database::Redis::Error) { |err|
expect(err.message).to match(/Failed to invoke the `SAVE` command/)
expect(err.message).to match(
/Response was: Background save already in progress/
)
}
end
end # describe '#save!'
describe "#copy!" do
before do
allow(db).to receive(:dump_path).and_return("/tmp/trigger/databases")
db.rdb_path = "/var/lib/redis/dump.rdb"
end
context "when the redis dump file exists" do
before do
expect(File).to receive(:exist?).ordered.with(
"/var/lib/redis/dump.rdb"
).and_return(true)
end
context "when a compressor is configured" do
let(:compressor) { double }
before do
allow(model).to receive(:compressor).and_return(compressor)
allow(compressor).to receive(:compress_with).and_yield("cmp_cmd", ".cmp_ext")
end
it "should copy the redis dump file with compression" do
expect(db).to receive(:run).ordered.with(
"cmp_cmd -c '/var/lib/redis/dump.rdb' > " \
"'/tmp/trigger/databases/Redis.rdb.cmp_ext'"
)
expect(FileUtils).to receive(:cp).never
db.send(:copy!)
end
end # context 'when a compressor is configured'
context "when no compressor is configured" do
it "should copy the redis dump file without compression" do
expect(FileUtils).to receive(:cp).ordered.with(
"/var/lib/redis/dump.rdb", "/tmp/trigger/databases/Redis.rdb"
)
expect(db).to receive(:run).never
db.send(:copy!)
end
end # context 'when no compressor is configured'
end # context 'when the redis dump file exists'
context "when the redis dump file does not exist" do
it "raises an error" do
expect(File).to receive(:exist?).ordered.with(
"/var/lib/redis/dump.rdb"
).and_return(false)
expect do
db.send(:copy!)
end.to raise_error(Database::Redis::Error)
end
end # context 'when the redis dump file does not exist'
end # describe '#copy!'
describe "#redis_cli_cmd" do
let(:option_methods) do
%w[
password_option connectivity_options user_options
]
end
it "returns full redis-cli command built from all options" do
option_methods.each { |name| allow(db).to receive(name).and_return(name) }
expect(db.send(:redis_cli_cmd)).to eq(
"redis-cli #{option_methods.join(" ")}"
)
end
it "handles nil values from option methods" do
option_methods.each { |name| allow(db).to receive(name).and_return(nil) }
expect(db.send(:redis_cli_cmd)).to eq(
"redis-cli #{(" " * (option_methods.count - 1))}"
)
end
end # describe '#redis_cli_cmd'
describe "redis_cli_cmd option methods" do
describe "#password_option" do
it "returns argument if specified" do
expect(db.send(:password_option)).to be_nil
db.password = "my_password"
expect(db.send(:password_option)).to eq "-a 'my_password'"
end
end # describe '#password_option'
describe "#connectivity_options" do
it "returns only the socket argument if #socket specified" do
db.host = "my_host"
db.port = "my_port"
db.socket = "my_socket"
expect(db.send(:connectivity_options)).to eq(
"-s 'my_socket'"
)
end
it "returns host and port arguments if specified" do
expect(db.send(:connectivity_options)).to eq ""
db.host = "my_host"
expect(db.send(:connectivity_options)).to eq(
"-h 'my_host'"
)
db.port = "my_port"
expect(db.send(:connectivity_options)).to eq(
"-h 'my_host' -p 'my_port'"
)
db.host = nil
expect(db.send(:connectivity_options)).to eq(
"-p 'my_port'"
)
end
end # describe '#connectivity_options'
describe "#user_options" do
it "returns arguments for any #additional_options specified" do
expect(db.send(:user_options)).to eq ""
db.additional_options = ["--opt1", "--opt2"]
expect(db.send(:user_options)).to eq "--opt1 --opt2"
db.additional_options = "--opta --optb"
expect(db.send(:user_options)).to eq "--opta --optb"
end
end # describe '#user_options'
end # describe 'redis_cli_cmd option methods'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/database/mongodb_spec.rb | spec/database/mongodb_spec.rb | require "spec_helper"
module Backup
describe Database::MongoDB do
let(:model) { Model.new(:test_trigger, "test label") }
let(:db) { Database::MongoDB.new(model) }
before do
allow_any_instance_of(Database::MongoDB).to receive(:utility)
.with(:mongodump).and_return("mongodump")
allow_any_instance_of(Database::MongoDB).to receive(:utility)
.with(:mongo).and_return("mongo")
allow_any_instance_of(Database::MongoDB).to receive(:utility)
.with(:cat).and_return("cat")
allow_any_instance_of(Database::MongoDB).to receive(:utility)
.with(:tar).and_return("tar")
end
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Database::Base"
describe "#initialize" do
it "provides default values" do
expect(db.database_id).to be_nil
expect(db.name).to be_nil
expect(db.username).to be_nil
expect(db.password).to be_nil
expect(db.authdb).to be_nil
expect(db.host).to be_nil
expect(db.port).to be_nil
expect(db.ipv6).to be_nil
expect(db.only_collections).to be_nil
expect(db.additional_options).to be_nil
expect(db.lock).to be_nil
expect(db.oplog).to be_nil
end
it "configures the database" do
db = Database::MongoDB.new(model, :my_id) do |mongodb|
mongodb.name = "my_name"
mongodb.username = "my_username"
mongodb.password = "my_password"
mongodb.authdb = "my_authdb"
mongodb.host = "my_host"
mongodb.port = "my_port"
mongodb.ipv6 = "my_ipv6"
mongodb.only_collections = "my_only_collections"
mongodb.additional_options = "my_additional_options"
mongodb.lock = "my_lock"
mongodb.oplog = "my_oplog"
end
expect(db.database_id).to eq "my_id"
expect(db.name).to eq "my_name"
expect(db.username).to eq "my_username"
expect(db.password).to eq "my_password"
expect(db.authdb).to eq "my_authdb"
expect(db.host).to eq "my_host"
expect(db.port).to eq "my_port"
expect(db.ipv6).to eq "my_ipv6"
expect(db.only_collections).to eq "my_only_collections"
expect(db.additional_options).to eq "my_additional_options"
expect(db.lock).to eq "my_lock"
expect(db.oplog).to eq "my_oplog"
end
end # describe '#initialize'
describe "#perform!" do
before do
expect(db).to receive(:log!).ordered.with(:started)
expect(db).to receive(:prepare!).ordered
end
context "with #lock set to false" do
it "does not lock the database" do
expect(db).to receive(:lock_database).never
expect(db).to receive(:unlock_database).never
expect(db).to receive(:dump!).ordered
expect(db).to receive(:package!).ordered
db.perform!
end
end
context "with #lock set to true" do
before { db.lock = true }
it "locks the database" do
expect(db).to receive(:lock_database).ordered
expect(db).to receive(:dump!).ordered
expect(db).to receive(:package!).ordered
expect(db).to receive(:unlock_database).ordered
db.perform!
end
it "ensures the database is unlocked" do
expect(db).to receive(:lock_database).ordered
expect(db).to receive(:dump!).ordered
expect(db).to receive(:package!).ordered.and_raise("an error")
expect(db).to receive(:unlock_database).ordered
expect do
db.perform!
end.to raise_error "an error"
end
end
end # describe '#perform!'
describe "#dump!" do
before do
allow(db).to receive(:mongodump).and_return("mongodump_command")
allow(db).to receive(:dump_path).and_return("/tmp/trigger/databases")
expect(FileUtils).to receive(:mkdir_p).ordered
.with("/tmp/trigger/databases/MongoDB")
end
context "when #only_collections are not specified" do
it "runs mongodump once" do
expect(db).to receive(:run).ordered.with("mongodump_command")
db.send(:dump!)
end
end
context "when #only_collections are specified" do
it "runs mongodump for each collection" do
db.only_collections = ["collection_a", "collection_b"]
expect(db).to receive(:run).ordered.with(
"mongodump_command --collection='collection_a'"
)
expect(db).to receive(:run).ordered.with(
"mongodump_command --collection='collection_b'"
)
db.send(:dump!)
end
it "allows only_collections to be a single string" do
db.only_collections = "collection_a"
expect(db).to receive(:run).ordered.with(
"mongodump_command --collection='collection_a'"
)
db.send(:dump!)
end
end
end # describe '#dump!'
describe "#package!" do
let(:pipeline) { double }
let(:compressor) { double }
before do
allow(db).to receive(:dump_path).and_return("/tmp/trigger/databases")
end
context "without a compressor" do
it "packages the dump without compression" do
expect(Pipeline).to receive(:new).ordered.and_return(pipeline)
expect(pipeline).to receive(:<<).ordered.with(
"tar -cf - -C '/tmp/trigger/databases' 'MongoDB'"
)
expect(pipeline).to receive(:<<).ordered.with(
"cat > '/tmp/trigger/databases/MongoDB.tar'"
)
expect(pipeline).to receive(:run).ordered
expect(pipeline).to receive(:success?).ordered.and_return(true)
expect(FileUtils).to receive(:rm_rf).ordered.with(
"/tmp/trigger/databases/MongoDB"
)
expect(db).to receive(:log!).ordered.with(:finished)
db.send(:package!)
end
end # context 'without a compressor'
context "with a compressor" do
before do
allow(model).to receive(:compressor).and_return(compressor)
allow(compressor).to receive(:compress_with).and_yield("cmp_cmd", ".cmp_ext")
end
it "packages the dump with compression" do
expect(Pipeline).to receive(:new).ordered.and_return(pipeline)
expect(pipeline).to receive(:<<).ordered.with(
"tar -cf - -C '/tmp/trigger/databases' 'MongoDB'"
)
expect(pipeline).to receive(:<<).ordered.with("cmp_cmd")
expect(pipeline).to receive(:<<).ordered.with(
"cat > '/tmp/trigger/databases/MongoDB.tar.cmp_ext'"
)
expect(pipeline).to receive(:run).ordered
expect(pipeline).to receive(:success?).ordered.and_return(true)
expect(FileUtils).to receive(:rm_rf).ordered.with(
"/tmp/trigger/databases/MongoDB"
)
expect(db).to receive(:log!).ordered.with(:finished)
db.send(:package!)
end
end # context 'with a compressor'
context "when the pipeline fails" do
before do
allow_any_instance_of(Pipeline).to receive(:success?).and_return(false)
allow_any_instance_of(Pipeline).to receive(:error_messages).and_return("error messages")
end
it "raises an error and does not remove the packaging path" do
expect(FileUtils).to receive(:rm_rf).never
expect(db).to receive(:log!).never
expect do
db.send(:package!)
end.to raise_error(Database::MongoDB::Error) { |err|
expect(err.message).to eq(
"Database::MongoDB::Error: Dump Failed!\n error messages"
)
}
end
end # context 'when the pipeline fails'
end # describe '#package!'
describe "#mongodump" do
let(:option_methods) do
%w[
name_option credential_options connectivity_options
ipv6_option oplog_option user_options dump_packaging_path
]
end
it "returns full mongodump command built from all options" do
option_methods.each { |name| allow(db).to receive(name).and_return(name) }
expect(db.send(:mongodump)).to eq(
"mongodump name_option credential_options connectivity_options " \
"ipv6_option oplog_option user_options --out='dump_packaging_path'"
)
end
it "handles nil values from option methods" do
option_methods.each { |name| allow(db).to receive(name).and_return(nil) }
expect(db.send(:mongodump)).to eq "mongodump --out=''"
end
end # describe '#mongodump'
describe "mongo and monogodump option methods" do
describe "#name_option" do
it "returns database argument if #name is specified" do
expect(db.send(:name_option)).to be_nil
db.name = "my_database"
expect(db.send(:name_option)).to eq "--db='my_database'"
end
end # describe '#name_option'
describe "#credential_options" do
it "returns credentials arguments based on #username and #password and #authdb" do
expect(db.send(:credential_options)).to eq ""
db.username = "my_user"
expect(db.send(:credential_options)).to eq(
"--username='my_user'"
)
db.password = "my_password"
expect(db.send(:credential_options)).to eq(
"--username='my_user' --password='my_password'"
)
db.authdb = "my_authdb"
expect(db.send(:credential_options)).to eq(
"--username='my_user' --password='my_password' --authenticationDatabase='my_authdb'"
)
db.username = nil
expect(db.send(:credential_options)).to eq(
"--password='my_password' --authenticationDatabase='my_authdb'"
)
db.authdb = nil
expect(db.send(:credential_options)).to eq(
"--password='my_password'"
)
end
end # describe '#credential_options'
describe "#connectivity_options" do
it "returns connectivity arguments based on #host and #port" do
expect(db.send(:connectivity_options)).to eq ""
db.host = "my_host"
expect(db.send(:connectivity_options)).to eq(
"--host='my_host'"
)
db.port = "my_port"
expect(db.send(:connectivity_options)).to eq(
"--host='my_host' --port='my_port'"
)
db.host = nil
expect(db.send(:connectivity_options)).to eq(
"--port='my_port'"
)
end
end # describe '#connectivity_options'
describe "#ipv6_option" do
it "returns the ipv6 argument if #ipv6 is true" do
expect(db.send(:ipv6_option)).to be_nil
db.ipv6 = true
expect(db.send(:ipv6_option)).to eq "--ipv6"
end
end # describe '#ipv6_option'
describe "#oplog_option" do
it "returns the oplog argument if #oplog is true" do
expect(db.send(:oplog_option)).to be_nil
db.oplog = true
expect(db.send(:oplog_option)).to eq "--oplog"
end
end # describe '#oplog_option'
describe "#user_options" do
it "returns arguments for any #additional_options specified" do
expect(db.send(:user_options)).to eq ""
db.additional_options = ["--opt1", "--opt2"]
expect(db.send(:user_options)).to eq "--opt1 --opt2"
db.additional_options = "--opta --optb"
expect(db.send(:user_options)).to eq "--opta --optb"
end
end # describe '#user_options'
end # describe 'mongo and monogodump option methods'
describe "#lock_database" do
it "runs command to disable profiling and lock the database" do
db = Database::MongoDB.new(model)
allow(db).to receive(:mongo_shell).and_return("mongo_shell")
expect(db).to receive(:run).with(
"echo 'use admin\n" \
"db.setProfilingLevel(0)\n" \
"db.fsyncLock()' | mongo_shell\n"
)
db.send(:lock_database)
end
end # describe '#lock_database'
describe "#unlock_database" do
it "runs command to unlock the database" do
db = Database::MongoDB.new(model)
allow(db).to receive(:mongo_shell).and_return("mongo_shell")
expect(db).to receive(:run).with(
"echo 'use admin\n" \
"db.fsyncUnlock()' | mongo_shell\n"
)
db.send(:unlock_database)
end
end # describe '#unlock_database'
describe "#mongo_shell" do
specify "with all options" do
db.host = "my_host"
db.port = "my_port"
db.username = "my_user"
db.password = "my_pwd"
db.authdb = "my_authdb"
db.ipv6 = true
db.name = "my_db"
expect(db.send(:mongo_shell)).to eq(
"mongo --host='my_host' --port='my_port' --username='my_user' " \
"--password='my_pwd' --authenticationDatabase='my_authdb' --ipv6 'my_db'"
)
end
specify "with no options" do
expect(db.send(:mongo_shell)).to eq "mongo"
end
end # describe '#mongo_shell'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/database/sqlite_spec.rb | spec/database/sqlite_spec.rb | require "spec_helper"
module Backup
describe Database::SQLite do
let(:model) { Model.new(:test_trigger, "test label") }
let(:db) do
Database::SQLite.new(model) do |db|
db.path = "/tmp/db1.sqlite3"
db.sqlitedump_utility = "/path/to/sqlitedump"
end
end
before do
allow_any_instance_of(Database::SQLite).to receive(:utility)
.with(:sqlitedump).and_return("sqlitedump")
end
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Database::Base"
describe "#initialize" do
it "should load pre-configured defaults through Base" do
expect_any_instance_of(Database::SQLite).to receive(:load_defaults!)
db
end
it "should pass the model reference to Base" do
expect(db.instance_variable_get(:@model)).to eq(model)
end
context "when no pre-configured defaults have been set" do
context "when options are specified" do
it "should use the given values" do
expect(db.sqlitedump_utility).to eq("/path/to/sqlitedump")
end
end
end # context 'when no pre-configured defaults have been set'
context "when pre-configured defaults have been set" do
before do
Database::SQLite.defaults do |db|
db.sqlitedump_utility = "/default/path/to/sqlitedump"
end
end
after { Database::SQLite.clear_defaults! }
context "when options are specified" do
it "should override the pre-configured defaults" do
expect(db.sqlitedump_utility).to eq("/path/to/sqlitedump")
end
end
context "when options are not specified" do
it "should use the pre-configured defaults" do
db = Database::SQLite.new(model)
expect(db.sqlitedump_utility).to eq("/default/path/to/sqlitedump")
end
end
end # context 'when no pre-configured defaults have been set'
end # describe '#initialize'
describe "#perform!" do
let(:pipeline) { double }
let(:compressor) { double }
before do
# superclass actions
db.instance_variable_set(:@dump_path, "/dump/path")
allow(db).to receive(:dump_filename).and_return("dump_filename")
expect(db).to receive(:log!).ordered.with(:started)
expect(db).to receive(:prepare!).ordered
end
context "when no compressor is configured" do
it "should run sqlitedump without compression" do
expect(Pipeline).to receive(:new).and_return(pipeline)
expect(pipeline).to receive(:<<).ordered.with("echo '.dump' | /path/to/sqlitedump /tmp/db1.sqlite3")
expect(model).to receive(:compressor).and_return(nil)
expect(pipeline).to receive(:<<).ordered.with("cat > '/dump/path/dump_filename.sql'")
expect(pipeline).to receive(:run).ordered
expect(pipeline).to receive(:success?).ordered.and_return(true)
expect(db).to receive(:log!).ordered.with(:finished)
db.perform!
end
end
context "when a compressor is configured" do
it "should run sqlitedump with compression" do
expect(Pipeline).to receive(:new).and_return(pipeline)
expect(pipeline).to receive(:<<).ordered.with("echo '.dump' | /path/to/sqlitedump /tmp/db1.sqlite3")
expect(model).to receive(:compressor).twice.and_return(compressor)
expect(compressor).to receive(:compress_with).and_yield("gzip", ".gz")
expect(pipeline).to receive(:<<).ordered.with("gzip")
expect(pipeline).to receive(:<<).ordered.with("cat > '/dump/path/dump_filename.sql.gz'")
expect(pipeline).to receive(:run).ordered
expect(pipeline).to receive(:success?).ordered.and_return(true)
expect(db).to receive(:log!).ordered.with(:finished)
db.perform!
end
end
context "when pipeline command fails" do
before do
expect(Pipeline).to receive(:new).and_return(pipeline)
expect(pipeline).to receive(:<<).ordered.with("echo '.dump' | /path/to/sqlitedump /tmp/db1.sqlite3")
expect(model).to receive(:compressor).and_return(nil)
expect(pipeline).to receive(:<<).ordered.with("cat > '/dump/path/dump_filename.sql'")
expect(pipeline).to receive(:run).ordered
expect(pipeline).to receive(:success?).ordered.and_return(false)
expect(pipeline).to receive(:error_messages).and_return("pipeline_errors")
end
it "should raise an error" do
expect do
db.perform!
end.to raise_error(
Database::SQLite::Error,
"Database::SQLite::Error: Database::SQLite Dump Failed!\n" \
" pipeline_errors"
)
end
end # context 'when pipeline command fails'
end # describe '#perform!'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/database/postgresql_spec.rb | spec/database/postgresql_spec.rb | require "spec_helper"
module Backup
describe Database::PostgreSQL do
let(:model) { Model.new(:test_trigger, "test label") }
let(:db) { Database::PostgreSQL.new(model) }
let(:s) { sequence "" }
before do
allow(Utilities).to receive(:utility).with(:pg_dump).and_return("pg_dump")
allow(Utilities).to receive(:utility).with(:pg_dumpall).and_return("pg_dumpall")
allow(Utilities).to receive(:utility).with(:cat).and_return("cat")
allow(Utilities).to receive(:utility).with(:sudo).and_return("sudo")
end
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Database::Base"
describe "#initialize" do
it "provides default values" do
expect(db.database_id).to be_nil
expect(db.name).to eq :all
expect(db.username).to be_nil
expect(db.password).to be_nil
expect(db.sudo_user).to be_nil
expect(db.host).to be_nil
expect(db.port).to be_nil
expect(db.socket).to be_nil
expect(db.skip_tables).to be_nil
expect(db.only_tables).to be_nil
expect(db.additional_options).to be_nil
end
it "configures the database" do
db = Database::PostgreSQL.new(model, :my_id) do |pgsql|
pgsql.name = "my_name"
pgsql.username = "my_username"
pgsql.password = "my_password"
pgsql.sudo_user = "my_sudo_user"
pgsql.host = "my_host"
pgsql.port = "my_port"
pgsql.socket = "my_socket"
pgsql.skip_tables = "my_skip_tables"
pgsql.only_tables = "my_only_tables"
pgsql.additional_options = "my_additional_options"
end
expect(db.database_id).to eq "my_id"
expect(db.name).to eq "my_name"
expect(db.username).to eq "my_username"
expect(db.password).to eq "my_password"
expect(db.sudo_user).to eq "my_sudo_user"
expect(db.host).to eq "my_host"
expect(db.port).to eq "my_port"
expect(db.socket).to eq "my_socket"
expect(db.skip_tables).to eq "my_skip_tables"
expect(db.only_tables).to eq "my_only_tables"
expect(db.additional_options).to eq "my_additional_options"
end
end # describe '#initialize'
describe "#perform!" do
let(:pipeline) { double }
let(:compressor) { double }
before do
allow(db).to receive(:pgdump).and_return("pgdump_command")
allow(db).to receive(:pgdumpall).and_return("pgdumpall_command")
allow(db).to receive(:dump_path).and_return("/tmp/trigger/databases")
expect(db).to receive(:log!).ordered.with(:started)
expect(db).to receive(:prepare!).ordered
end
context "without a compressor" do
it "packages the dump without compression" do
expect(Pipeline).to receive(:new).ordered.and_return(pipeline)
expect(pipeline).to receive(:<<).ordered.with("pgdumpall_command")
expect(pipeline).to receive(:<<).ordered.with(
"cat > '/tmp/trigger/databases/PostgreSQL.sql'"
)
expect(pipeline).to receive(:run).ordered
expect(pipeline).to receive(:success?).ordered.and_return(true)
expect(db).to receive(:log!).ordered.with(:finished)
db.perform!
end
end # context 'without a compressor'
context "with a compressor" do
before do
allow(model).to receive(:compressor).and_return(compressor)
allow(compressor).to receive(:compress_with).and_yield("cmp_cmd", ".cmp_ext")
end
it "packages the dump with compression" do
expect(Pipeline).to receive(:new).ordered.and_return(pipeline)
expect(pipeline).to receive(:<<).ordered.with("pgdumpall_command")
expect(pipeline).to receive(:<<).ordered.with("cmp_cmd")
expect(pipeline).to receive(:<<).ordered.with(
"cat > '/tmp/trigger/databases/PostgreSQL.sql.cmp_ext'"
)
expect(pipeline).to receive(:run).ordered
expect(pipeline).to receive(:success?).ordered.and_return(true)
expect(db).to receive(:log!).ordered.with(:finished)
db.perform!
end
end # context 'without a compressor'
context "when #name is set" do
before do
db.name = "my_db"
end
it "uses the pg_dump command" do
expect(Pipeline).to receive(:new).ordered.and_return(pipeline)
expect(pipeline).to receive(:<<).ordered.with("pgdump_command")
expect(pipeline).to receive(:<<).ordered.with(
"cat > '/tmp/trigger/databases/PostgreSQL.sql'"
)
expect(pipeline).to receive(:run).ordered
expect(pipeline).to receive(:success?).ordered.and_return(true)
expect(db).to receive(:log!).ordered.with(:finished)
db.perform!
end
end # context 'without a compressor'
context "when the pipeline fails" do
before do
allow_any_instance_of(Pipeline).to receive(:success?).and_return(false)
allow_any_instance_of(Pipeline).to receive(:error_messages).and_return("error messages")
end
it "raises an error" do
expect do
db.perform!
end.to raise_error(Database::PostgreSQL::Error) { |err|
expect(err.message).to eq(
"Database::PostgreSQL::Error: Dump Failed!\n error messages"
)
}
end
end # context 'when the pipeline fails'
end # describe '#perform!'
describe "#pgdump" do
let(:option_methods) do
%w[
username_option connectivity_options
user_options tables_to_dump tables_to_skip name
]
end
# password_option and sudo_option leave no leading space if it's not used
it "returns full pg_dump command built from all options" do
option_methods.each { |name| allow(db).to receive(name).and_return(name) }
allow(db).to receive(:password_option).and_return("password_option")
allow(db).to receive(:sudo_option).and_return("sudo_option")
expect(db.send(:pgdump)).to eq(
"password_optionsudo_optionpg_dump #{option_methods.join(" ")}"
)
end
it "handles nil values from option methods" do
option_methods.each { |name| allow(db).to receive(name).and_return(nil) }
allow(db).to receive(:password_option).and_return(nil)
allow(db).to receive(:sudo_option).and_return(nil)
expect(db.send(:pgdump)).to eq(
"pg_dump #{" " * (option_methods.count - 1)}"
)
end
end # describe '#pgdump'
describe "#pgdumpall" do
let(:option_methods) do
%w[
username_option connectivity_options user_options
]
end
# password_option and sudo_option leave no leading space if it's not used
it "returns full pg_dump command built from all options" do
option_methods.each { |name| allow(db).to receive(name).and_return(name) }
allow(db).to receive(:password_option).and_return("password_option")
allow(db).to receive(:sudo_option).and_return("sudo_option")
expect(db.send(:pgdumpall)).to eq(
"password_optionsudo_optionpg_dumpall #{option_methods.join(" ")}"
)
end
it "handles nil values from option methods" do
option_methods.each { |name| allow(db).to receive(name).and_return(nil) }
allow(db).to receive(:password_option).and_return(nil)
allow(db).to receive(:sudo_option).and_return(nil)
expect(db.send(:pgdumpall)).to eq(
"pg_dumpall #{" " * (option_methods.count - 1)}"
)
end
end # describe '#pgdumpall'
describe "pgdump option methods" do
describe "#password_option" do
it "returns syntax to set environment variable" do
expect(db.send(:password_option)).to be_nil
db.password = "my_password"
expect(db.send(:password_option)).to eq "PGPASSWORD=my_password "
end
it "handles special characters" do
db.password = "my_password'\""
expect(db.send(:password_option)).to eq(
"PGPASSWORD=my_password\\'\\\" "
)
end
end # describe '#password_option'
describe "#sudo_option" do
it "returns argument if specified" do
expect(db.send(:sudo_option)).to be_nil
db.sudo_user = "my_sudo_user"
expect(db.send(:sudo_option)).to eq "sudo -n -H -u my_sudo_user "
end
end # describe '#sudo_option'
describe "#username_option" do
it "returns argument if specified" do
expect(db.send(:username_option)).to be_nil
db.username = "my_username"
expect(db.send(:username_option)).to eq "--username=my_username"
end
it "handles special characters" do
db.username = "my_user'\""
expect(db.send(:username_option)).to eq(
"--username=my_user\\'\\\""
)
end
end # describe '#username_option'
describe "#connectivity_options" do
it "returns only the socket argument if #socket specified" do
db.host = "my_host"
db.port = "my_port"
db.socket = "my_socket"
# pgdump uses --host to specify a socket
expect(db.send(:connectivity_options)).to eq(
"--host='my_socket'"
)
end
it "returns host and port arguments if specified" do
expect(db.send(:connectivity_options)).to eq ""
db.host = "my_host"
expect(db.send(:connectivity_options)).to eq(
"--host='my_host'"
)
db.port = "my_port"
expect(db.send(:connectivity_options)).to eq(
"--host='my_host' --port='my_port'"
)
db.host = nil
expect(db.send(:connectivity_options)).to eq(
"--port='my_port'"
)
end
end # describe '#connectivity_options'
describe "#user_options" do
it "returns arguments for any #additional_options specified" do
expect(db.send(:user_options)).to eq ""
db.additional_options = ["--opt1", "--opt2"]
expect(db.send(:user_options)).to eq "--opt1 --opt2"
db.additional_options = "--opta --optb"
expect(db.send(:user_options)).to eq "--opta --optb"
end
end # describe '#user_options'
describe "#tables_to_dump" do
it "returns arguments for only_tables" do
expect(db.send(:tables_to_dump)).to eq ""
db.only_tables = ["one", "two"]
expect(db.send(:tables_to_dump)).to eq(
"--table='one' --table='two'"
)
db.only_tables = "three four"
expect(db.send(:tables_to_dump)).to eq(
"--table='three four'"
)
end
end # describe '#tables_to_dump'
describe "#tables_to_skip" do
it "returns arguments for skip_tables" do
expect(db.send(:tables_to_skip)).to eq ""
db.skip_tables = ["one", "two"]
expect(db.send(:tables_to_skip)).to eq(
"--exclude-table='one' --exclude-table='two'"
)
db.skip_tables = "three four"
expect(db.send(:tables_to_skip)).to eq(
"--exclude-table='three four'"
)
end
end # describe '#tables_to_dump'
end # describe 'pgdump option methods'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/syncer/rsync/pull_spec.rb | spec/syncer/rsync/pull_spec.rb | require "spec_helper"
module Backup
describe Syncer::RSync::Pull do
before do
allow_any_instance_of(Syncer::RSync::Pull).to \
receive(:utility).with(:rsync).and_return("rsync")
allow_any_instance_of(Syncer::RSync::Pull).to \
receive(:utility).with(:ssh).and_return("ssh")
end
describe "#perform!" do
describe "pulling from the remote host" do
specify "using :ssh mode" do
syncer = Syncer::RSync::Pull.new do |s|
s.mode = :ssh
s.host = "my_host"
s.path = "~/some/path/"
s.directories do |dirs|
dirs.add "/this/dir/"
dirs.add "that/dir"
dirs.add "~/home/dir/"
end
end
expect(FileUtils).to receive(:mkdir_p).with(File.expand_path("~/some/path/"))
expect(syncer).to receive(:run).with(
"rsync --archive -e \"ssh -p 22\" " \
"my_host:'/this/dir' :'that/dir' :'home/dir' " \
"'#{File.expand_path("~/some/path/")}'"
)
syncer.perform!
end
specify "using :ssh_daemon mode" do
syncer = Syncer::RSync::Pull.new do |s|
s.mode = :ssh_daemon
s.host = "my_host"
s.path = "~/some/path/"
s.directories do |dirs|
dirs.add "/this/dir/"
dirs.add "that/dir"
dirs.add "~/home/dir/"
end
end
expect(FileUtils).to receive(:mkdir_p).with(File.expand_path("~/some/path/"))
expect(syncer).to receive(:run).with(
"rsync --archive -e \"ssh -p 22\" " \
"my_host::'/this/dir' ::'that/dir' ::'home/dir' " \
"'#{File.expand_path("~/some/path/")}'"
)
syncer.perform!
end
specify "using :rsync_daemon mode" do
syncer = Syncer::RSync::Pull.new do |s|
s.mode = :rsync_daemon
s.host = "my_host"
s.path = "~/some/path/"
s.directories do |dirs|
dirs.add "/this/dir/"
dirs.add "that/dir"
dirs.add "~/home/dir/"
end
end
expect(FileUtils).to receive(:mkdir_p).with(File.expand_path("~/some/path/"))
expect(syncer).to receive(:run).with(
"rsync --archive --port 873 " \
"my_host::'/this/dir' ::'that/dir' ::'home/dir' " \
"'#{File.expand_path("~/some/path/")}'"
)
syncer.perform!
end
end # describe 'pulling from the remote host'
describe "password handling" do
let(:s) { sequence "" }
let(:syncer) { Syncer::RSync::Pull.new }
it "writes and removes the temporary password file" do
expect(syncer).to receive(:write_password_file!).ordered
expect(syncer).to receive(:run).ordered
expect(syncer).to receive(:remove_password_file!).ordered
syncer.perform!
end
it "ensures temporary password file removal" do
expect(syncer).to receive(:write_password_file!).ordered
expect(syncer).to receive(:run).ordered.and_raise(VerySpecificError)
expect(syncer).to receive(:remove_password_file!).ordered
expect do
syncer.perform!
end.to raise_error(VerySpecificError)
end
end # describe 'password handling'
describe "logging messages" do
it "logs started/finished messages" do
syncer = Syncer::RSync::Pull.new
expect(Logger).to receive(:info).with("Syncer::RSync::Pull Started...")
expect(Logger).to receive(:info).with("Syncer::RSync::Pull Finished!")
syncer.perform!
end
it "logs messages using optional syncer_id" do
syncer = Syncer::RSync::Pull.new("My Syncer")
expect(Logger).to receive(:info).with("Syncer::RSync::Pull (My Syncer) Started...")
expect(Logger).to receive(:info).with("Syncer::RSync::Pull (My Syncer) Finished!")
syncer.perform!
end
end
end # describe '#perform!'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/syncer/rsync/push_spec.rb | spec/syncer/rsync/push_spec.rb | require "spec_helper"
module Backup
describe Syncer::RSync::Push do
before do
allow_any_instance_of(Syncer::RSync::Push).to \
receive(:utility).with(:rsync).and_return("rsync")
allow_any_instance_of(Syncer::RSync::Push).to \
receive(:utility).with(:ssh).and_return("ssh")
end
describe "#initialize" do
after { Syncer::RSync::Push.clear_defaults! }
it "should use the values given" do
syncer = Syncer::RSync::Push.new("my syncer") do |rsync|
rsync.mode = :valid_mode
rsync.host = "123.45.678.90"
rsync.port = 123
rsync.ssh_user = "ssh_username"
rsync.rsync_user = "rsync_username"
rsync.rsync_password = "rsync_password"
rsync.rsync_password_file = "/my/rsync_password"
rsync.mirror = true
rsync.compress = true
rsync.path = "~/my_backups/"
rsync.additional_ssh_options = "ssh options"
rsync.additional_rsync_options = "rsync options"
rsync.directories do |directory|
directory.add "/some/directory/"
directory.add "~/home/directory"
directory.exclude "*~"
directory.exclude "tmp/"
end
end
expect(syncer.syncer_id).to eq "my syncer"
expect(syncer.mode).to eq :valid_mode
expect(syncer.host).to eq "123.45.678.90"
expect(syncer.port).to be 123
expect(syncer.ssh_user).to eq "ssh_username"
expect(syncer.rsync_user).to eq "rsync_username"
expect(syncer.rsync_password).to eq "rsync_password"
expect(syncer.rsync_password_file).to eq "/my/rsync_password"
expect(syncer.mirror).to be true
expect(syncer.compress).to be true
expect(syncer.path).to eq "~/my_backups/"
expect(syncer.additional_ssh_options).to eq "ssh options"
expect(syncer.additional_rsync_options).to eq "rsync options"
expect(syncer.directories).to eq ["/some/directory/", "~/home/directory"]
expect(syncer.excludes).to eq ["*~", "tmp/"]
end
it "should use default values if none are given" do
syncer = Syncer::RSync::Push.new
expect(syncer.syncer_id).to be_nil
expect(syncer.mode).to eq :ssh
expect(syncer.host).to be_nil
expect(syncer.port).to be 22
expect(syncer.ssh_user).to be_nil
expect(syncer.rsync_user).to be_nil
expect(syncer.rsync_password).to be_nil
expect(syncer.rsync_password_file).to be_nil
expect(syncer.mirror).to be(false)
expect(syncer.compress).to be(false)
expect(syncer.path).to eq "~/backups"
expect(syncer.additional_ssh_options).to be_nil
expect(syncer.additional_rsync_options).to be_nil
expect(syncer.directories).to eq []
expect(syncer.excludes).to eq []
end
it "should use default port 22 for :ssh_daemon mode" do
syncer = Syncer::RSync::Push.new do |s|
s.mode = :ssh_daemon
end
expect(syncer.mode).to eq :ssh_daemon
expect(syncer.port).to be 22
end
it "should use default port 873 for :rsync_daemon mode" do
syncer = Syncer::RSync::Push.new do |s|
s.mode = :rsync_daemon
end
expect(syncer.mode).to eq :rsync_daemon
expect(syncer.port).to be 873
end
context "when pre-configured defaults have been set" do
before do
Backup::Syncer::RSync::Push.defaults do |rsync|
rsync.mode = :default_mode
rsync.host = "default_host"
rsync.port = 456
rsync.ssh_user = "default_ssh_username"
rsync.rsync_user = "default_rsync_username"
rsync.rsync_password = "default_rsync_password"
rsync.rsync_password_file = "/my/default_rsync_password"
rsync.mirror = true
rsync.compress = true
rsync.path = "~/default_my_backups"
rsync.additional_ssh_options = "default ssh options"
rsync.additional_rsync_options = "default rsync options"
end
end
it "should use pre-configured defaults" do
syncer = Syncer::RSync::Push.new
expect(syncer.mode).to eq :default_mode
expect(syncer.host).to eq "default_host"
expect(syncer.port).to be 456
expect(syncer.ssh_user).to eq "default_ssh_username"
expect(syncer.rsync_user).to eq "default_rsync_username"
expect(syncer.rsync_password).to eq "default_rsync_password"
expect(syncer.rsync_password_file).to eq "/my/default_rsync_password"
expect(syncer.mirror).to be true
expect(syncer.compress).to be true
expect(syncer.path).to eq "~/default_my_backups"
expect(syncer.additional_ssh_options).to eq "default ssh options"
expect(syncer.additional_rsync_options).to eq "default rsync options"
expect(syncer.directories).to eq []
end
it "should override pre-configured defaults" do
syncer = Syncer::RSync::Push.new do |rsync|
rsync.mode = :valid_mode
rsync.host = "123.45.678.90"
rsync.port = 123
rsync.ssh_user = "ssh_username"
rsync.rsync_user = "rsync_username"
rsync.rsync_password = "rsync_password"
rsync.rsync_password_file = "/my/rsync_password"
rsync.mirror = true
rsync.compress = true
rsync.path = "~/my_backups"
rsync.additional_ssh_options = "ssh options"
rsync.additional_rsync_options = "rsync options"
rsync.directories do |directory|
directory.add "/some/directory"
directory.add "~/home/directory"
end
end
expect(syncer.mode).to eq :valid_mode
expect(syncer.host).to eq "123.45.678.90"
expect(syncer.port).to be 123
expect(syncer.ssh_user).to eq "ssh_username"
expect(syncer.rsync_user).to eq "rsync_username"
expect(syncer.rsync_password).to eq "rsync_password"
expect(syncer.rsync_password_file).to eq "/my/rsync_password"
expect(syncer.mirror).to be true
expect(syncer.compress).to be true
expect(syncer.path).to eq "~/my_backups"
expect(syncer.additional_ssh_options).to eq "ssh options"
expect(syncer.additional_rsync_options).to eq "rsync options"
expect(syncer.directories).to eq ["/some/directory", "~/home/directory"]
end
end # context 'when pre-configured defaults have been set'
end # describe '#initialize'
describe "#perform!" do
# Using :ssh mode, as these are not mode dependant.
describe "mirror and compress options" do
specify "with both" do
syncer = Syncer::RSync::Push.new do |s|
s.mode = :ssh
s.host = "my_host"
s.ssh_user = "ssh_username"
s.mirror = true
s.compress = true
s.path = "~/path/in/remote/home/"
s.directories do |dirs|
dirs.add "/this/dir/"
dirs.add "that/dir"
end
end
expect(syncer).to receive(:create_dest_path!)
expect(syncer).to receive(:run).with(
"rsync --archive --delete --compress " \
"-e \"ssh -p 22 -l ssh_username\" " \
"'/this/dir' '#{File.expand_path("that/dir")}' " \
"my_host:'path/in/remote/home'"
)
syncer.perform!
end
specify "without mirror" do
syncer = Syncer::RSync::Push.new do |s|
s.mode = :ssh
s.host = "my_host"
s.ssh_user = "ssh_username"
s.compress = true
s.path = "relative/path/in/remote/home"
s.directories do |dirs|
dirs.add "/this/dir/"
dirs.add "that/dir"
end
end
expect(syncer).to receive(:create_dest_path!)
expect(syncer).to receive(:run).with(
"rsync --archive --compress " \
"-e \"ssh -p 22 -l ssh_username\" " \
"'/this/dir' '#{File.expand_path("that/dir")}' " \
"my_host:'relative/path/in/remote/home'"
)
syncer.perform!
end
specify "without compress" do
syncer = Syncer::RSync::Push.new do |s|
s.mode = :ssh
s.host = "my_host"
s.mirror = true
s.path = "/absolute/path/on/remote/"
s.directories do |dirs|
dirs.add "/this/dir/"
dirs.add "that/dir"
end
end
expect(syncer).to receive(:create_dest_path!)
expect(syncer).to receive(:run).with(
"rsync --archive --delete " \
"-e \"ssh -p 22\" " \
"'/this/dir' '#{File.expand_path("that/dir")}' " \
"my_host:'/absolute/path/on/remote'"
)
syncer.perform!
end
specify "without both" do
syncer = Syncer::RSync::Push.new do |s|
s.mode = :ssh
s.host = "my_host"
s.path = "/absolute/path/on/remote"
s.directories do |dirs|
dirs.add "/this/dir/"
dirs.add "that/dir"
end
end
expect(syncer).to receive(:create_dest_path!)
expect(syncer).to receive(:run).with(
"rsync --archive " \
"-e \"ssh -p 22\" " \
"'/this/dir' '#{File.expand_path("that/dir")}' " \
"my_host:'/absolute/path/on/remote'"
)
syncer.perform!
end
end # describe 'mirror and compress options'
describe "additional_rsync_options" do
specify "given as an Array (with mirror option)" do
syncer = Syncer::RSync::Push.new do |s|
s.mode = :ssh
s.host = "my_host"
s.mirror = true
s.additional_rsync_options = ["--opt-a", "--opt-b"]
s.path = "path/on/remote/"
s.directories do |dirs|
dirs.add "/this/dir"
dirs.add "that/dir"
end
end
expect(syncer).to receive(:create_dest_path!)
expect(syncer).to receive(:run).with(
"rsync --archive --delete --opt-a --opt-b " \
"-e \"ssh -p 22\" " \
"'/this/dir' '#{File.expand_path("that/dir")}' " \
"my_host:'path/on/remote'"
)
syncer.perform!
end
specify "given as a String (without mirror option)" do
syncer = Syncer::RSync::Push.new do |s|
s.mode = :ssh
s.host = "my_host"
s.additional_rsync_options = "--opt-a --opt-b"
s.path = "path/on/remote/"
s.directories do |dirs|
dirs.add "/this/dir/"
dirs.add "that/dir"
end
end
expect(syncer).to receive(:create_dest_path!)
expect(syncer).to receive(:run).with(
"rsync --archive --opt-a --opt-b " \
"-e \"ssh -p 22\" " \
"'/this/dir' '#{File.expand_path("that/dir")}' " \
"my_host:'path/on/remote'"
)
syncer.perform!
end
specify "with excludes" do
syncer = Syncer::RSync::Push.new do |s|
s.mode = :ssh
s.host = "my_host"
s.additional_rsync_options = "--opt-a --opt-b"
s.path = "path/on/remote/"
s.directories do |dirs|
dirs.add "/this/dir/"
dirs.add "that/dir"
dirs.exclude "*~"
dirs.exclude "tmp/"
end
end
expect(syncer).to receive(:create_dest_path!)
expect(syncer).to receive(:run).with(
"rsync --archive --exclude='*~' --exclude='tmp/' --opt-a --opt-b " \
"-e \"ssh -p 22\" " \
"'/this/dir' '#{File.expand_path("that/dir")}' " \
"my_host:'path/on/remote'"
)
syncer.perform!
end
end # describe 'additional_rsync_options'
describe "rsync password options" do
let(:s) { sequence "" }
let(:password_file) { double }
context "when an rsync_password is given" do
let(:syncer) do
Syncer::RSync::Push.new do |syncer|
syncer.mode = :rsync_daemon
syncer.host = "my_host"
syncer.rsync_user = "rsync_username"
syncer.rsync_password = "my_password"
syncer.mirror = true
syncer.compress = true
syncer.path = "my_module"
syncer.directories do |dirs|
dirs.add "/this/dir"
dirs.add "that/dir"
end
end
end
before do
allow(password_file).to receive(:path).and_return("path/to/password_file")
expect(Tempfile).to receive(:new).ordered
.with("backup-rsync-password").and_return(password_file)
expect(password_file).to receive(:write).ordered.with("my_password")
expect(password_file).to receive(:close).ordered
end
it "creates and uses a temp file for the password" do
expect(syncer).to receive(:run).ordered.with(
"rsync --archive --delete --compress " \
"--password-file='#{File.expand_path("path/to/password_file")}' " \
"--port 873 " \
"'/this/dir' '#{File.expand_path("that/dir")}' " \
"rsync_username@my_host::'my_module'"
)
expect(password_file).to receive(:delete).ordered
syncer.perform!
end
it "ensures tempfile removal" do
expect(syncer).to receive(:run).ordered.and_raise("error message")
expect(password_file).to receive(:delete).ordered
expect do
syncer.perform!
end.to raise_error(RuntimeError, "error message")
end
end # context 'when an rsync_password is given'
context "when an rsync_password_file is given" do
let(:syncer) do
Syncer::RSync::Push.new do |syncer|
syncer.mode = :ssh_daemon
syncer.host = "my_host"
syncer.ssh_user = "ssh_username"
syncer.rsync_user = "rsync_username"
syncer.rsync_password_file = "path/to/my_password"
syncer.mirror = true
syncer.compress = true
syncer.path = "my_module"
syncer.directories do |dirs|
dirs.add "/this/dir"
dirs.add "that/dir"
end
end
end
before do
expect(Tempfile).to receive(:new).never
end
it "uses the given path" do
expect(syncer).to receive(:run).ordered.with(
"rsync --archive --delete --compress " \
"--password-file='#{File.expand_path("path/to/my_password")}' " \
"-e \"ssh -p 22 -l ssh_username\" " \
"'/this/dir' '#{File.expand_path("that/dir")}' " \
"rsync_username@my_host::'my_module'"
)
syncer.perform!
end
end # context 'when an rsync_password_file is given'
context "when using :ssh mode" do
let(:syncer) do
Syncer::RSync::Push.new do |syncer|
syncer.mode = :ssh
syncer.host = "my_host"
syncer.ssh_user = "ssh_username"
syncer.rsync_user = "rsync_username"
syncer.rsync_password = "my_password"
syncer.rsync_password_file = "path/to/my_password"
syncer.mirror = true
syncer.compress = true
syncer.path = "~/path/in/remote/home"
syncer.directories do |dirs|
dirs.add "/this/dir"
dirs.add "that/dir"
end
end
end
before do
expect(Tempfile).to receive(:new).never
end
it "uses no rsync_user, tempfile or password_option" do
expect(syncer).to receive(:create_dest_path!)
expect(syncer).to receive(:run).ordered.with(
"rsync --archive --delete --compress " \
"-e \"ssh -p 22 -l ssh_username\" " \
"'/this/dir' '#{File.expand_path("that/dir")}' " \
"my_host:'path/in/remote/home'"
)
syncer.perform!
end
end # context 'when an rsync_password_file is given'
end # describe 'rsync password options'
describe "transport_options and host_command" do
context "using :rsync_daemon mode" do
it "uses the rsync --port option" do
syncer = Syncer::RSync::Push.new do |s|
s.mode = :rsync_daemon
s.host = "my_host"
s.mirror = true
s.compress = true
s.additional_rsync_options = "--opt-a --opt-b"
s.path = "module_name/path/"
s.directories do |dirs|
dirs.add "/this/dir/"
dirs.add "that/dir"
end
end
expect(syncer).to receive(:run).with(
"rsync --archive --delete --opt-a --opt-b --compress " \
"--port 873 " \
"'/this/dir' '#{File.expand_path("that/dir")}' " \
"my_host::'module_name/path'"
)
syncer.perform!
end
it "uses the rsync_user" do
syncer = Syncer::RSync::Push.new do |s|
s.mode = :rsync_daemon
s.host = "my_host"
s.port = 789
s.rsync_user = "rsync_username"
s.mirror = true
s.additional_rsync_options = "--opt-a --opt-b"
s.path = "module_name/path/"
s.directories do |dirs|
dirs.add "/this/dir/"
dirs.add "that/dir"
end
end
expect(syncer).to receive(:run).with(
"rsync --archive --delete --opt-a --opt-b " \
"--port 789 " \
"'/this/dir' '#{File.expand_path("that/dir")}' " \
"rsync_username@my_host::'module_name/path'"
)
syncer.perform!
end
end # context 'in :rsync_daemon mode'
context "using :ssh_daemon mode" do
specify "rsync_user, additional_ssh_options as an Array" do
syncer = Syncer::RSync::Push.new do |s|
s.mode = :ssh_daemon
s.host = "my_host"
s.mirror = true
s.compress = true
s.additional_ssh_options = ["--opt1", "--opt2"]
s.rsync_user = "rsync_username"
s.additional_rsync_options = "--opt-a --opt-b"
s.path = "module_name/path/"
s.directories do |dirs|
dirs.add "/this/dir/"
dirs.add "that/dir"
end
end
expect(syncer).to receive(:run).with(
"rsync --archive --delete --opt-a --opt-b --compress " \
"-e \"ssh -p 22 --opt1 --opt2\" " \
"'/this/dir' '#{File.expand_path("that/dir")}' " \
"rsync_username@my_host::'module_name/path'"
)
syncer.perform!
end
specify "ssh_user, port, additional_ssh_options as an String" do
syncer = Syncer::RSync::Push.new do |s|
s.mode = :ssh_daemon
s.host = "my_host"
s.port = 789
s.mirror = true
s.compress = true
s.ssh_user = "ssh_username"
s.additional_ssh_options = "-i '/my/identity_file'"
s.additional_rsync_options = "--opt-a --opt-b"
s.path = "module_name/path/"
s.directories do |dirs|
dirs.add "/this/dir/"
dirs.add "that/dir"
end
end
expect(syncer).to receive(:run).with(
"rsync --archive --delete --opt-a --opt-b --compress " \
"-e \"ssh -p 789 -l ssh_username -i '/my/identity_file'\" " \
"'/this/dir' '#{File.expand_path("that/dir")}' " \
"my_host::'module_name/path'"
)
syncer.perform!
end
end # context 'in :ssh_daemon mode'
context "using :ssh mode" do
it "uses no daemon or rsync user" do
syncer = Syncer::RSync::Push.new do |s|
s.mode = :ssh
s.host = "my_host"
s.mirror = true
s.compress = true
s.ssh_user = "ssh_username"
s.additional_ssh_options = ["--opt1", "--opt2"]
s.rsync_user = "rsync_username"
s.additional_rsync_options = "--opt-a 'something'"
s.path = "~/some/path/"
s.directories do |dirs|
dirs.add "/this/dir/"
dirs.add "that/dir"
end
end
expect(syncer).to receive(:create_dest_path!)
expect(syncer).to receive(:run).with(
"rsync --archive --delete --opt-a 'something' --compress " \
"-e \"ssh -p 22 -l ssh_username --opt1 --opt2\" " \
"'/this/dir' '#{File.expand_path("that/dir")}' " \
"my_host:'some/path'"
)
syncer.perform!
end
end # context 'in :ssh mode'
end # describe 'transport_options and host_command'
describe "dest_path creation" do
context "when using :ssh mode" do
it "creates path using ssh with transport args" do
syncer = Syncer::RSync::Push.new do |s|
s.mode = :ssh
s.host = "my_host"
s.ssh_user = "ssh_username"
s.additional_ssh_options = "-i '/path/to/id_rsa'"
s.path = "~/some/path/"
s.directories do |dirs|
dirs.add "/this/dir/"
dirs.add "that/dir"
end
end
expect(syncer).to receive(:run).with(
"ssh -p 22 -l ssh_username -i '/path/to/id_rsa' my_host " +
%("mkdir -p 'some/path'")
)
expect(syncer).to receive(:run).with(
"rsync --archive " \
"-e \"ssh -p 22 -l ssh_username -i '/path/to/id_rsa'\" " \
"'/this/dir' '#{File.expand_path("that/dir")}' " \
"my_host:'some/path'"
)
syncer.perform!
end
it "only creates path if mkdir -p is required" do
syncer = Syncer::RSync::Push.new do |s|
s.mode = :ssh
s.host = "my_host"
s.ssh_user = "ssh_username"
s.additional_ssh_options = "-i '/path/to/id_rsa'"
s.path = "~/path/"
s.directories do |dirs|
dirs.add "/this/dir/"
dirs.add "that/dir"
end
end
expect(syncer).to receive(:run).with(
"rsync --archive " \
"-e \"ssh -p 22 -l ssh_username -i '/path/to/id_rsa'\" " \
"'/this/dir' '#{File.expand_path("that/dir")}' " \
"my_host:'path'"
)
syncer.perform!
end
end
end # describe 'dest_path creation'
describe "logging messages" do
it "logs started/finished messages" do
syncer = Syncer::RSync::Push.new
expect(Logger).to receive(:info).with("Syncer::RSync::Push Started...")
expect(Logger).to receive(:info).with("Syncer::RSync::Push Finished!")
syncer.perform!
end
it "logs messages using optional syncer_id" do
syncer = Syncer::RSync::Push.new("My Syncer")
expect(Logger).to receive(:info).with("Syncer::RSync::Push (My Syncer) Started...")
expect(Logger).to receive(:info).with("Syncer::RSync::Push (My Syncer) Finished!")
syncer.perform!
end
end
end # describe '#perform!'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/syncer/rsync/local_spec.rb | spec/syncer/rsync/local_spec.rb | require "spec_helper"
module Backup
describe Syncer::RSync::Local do
before do
allow_any_instance_of(Syncer::RSync::Local).to \
receive(:utility).with(:rsync).and_return("rsync")
end
describe "#initialize" do
after { Syncer::RSync::Local.clear_defaults! }
it "should use the values given" do
syncer = Syncer::RSync::Local.new do |rsync|
rsync.path = "~/my_backups"
rsync.mirror = true
rsync.archive = false
rsync.additional_rsync_options = ["--opt-a", "--opt-b"]
rsync.directories do |directory|
directory.add "/some/directory"
directory.add "~/home/directory"
directory.exclude "*~"
directory.exclude "tmp/"
end
end
expect(syncer.path).to eq "~/my_backups"
expect(syncer.mirror).to be(true)
expect(syncer.archive).to be(false)
expect(syncer.directories).to eq ["/some/directory", "~/home/directory"]
expect(syncer.excludes).to eq ["*~", "tmp/"]
expect(syncer.additional_rsync_options).to eq ["--opt-a", "--opt-b"]
end
it "should use default values if none are given" do
syncer = Syncer::RSync::Local.new
expect(syncer.path).to eq "~/backups"
expect(syncer.mirror).to be(false)
expect(syncer.archive).to be(true)
expect(syncer.directories).to eq []
expect(syncer.excludes).to eq []
expect(syncer.additional_rsync_options).to be_nil
end
context "when pre-configured defaults have been set" do
before do
Syncer::RSync::Local.defaults do |rsync|
rsync.path = "some_path"
rsync.mirror = "some_mirror"
rsync.archive = "archive"
rsync.additional_rsync_options = "rsync_options"
end
end
it "should use pre-configured defaults" do
syncer = Syncer::RSync::Local.new
expect(syncer.path).to eq "some_path"
expect(syncer.mirror).to eq "some_mirror"
expect(syncer.archive).to eq "archive"
expect(syncer.directories).to eq []
expect(syncer.excludes).to eq []
expect(syncer.additional_rsync_options).to eq "rsync_options"
end
it "should override pre-configured defaults" do
syncer = Syncer::RSync::Local.new do |rsync|
rsync.path = "new_path"
rsync.mirror = "new_mirror"
rsync.archive = false
rsync.additional_rsync_options = "new_rsync_options"
end
expect(syncer.path).to eq "new_path"
expect(syncer.mirror).to eq "new_mirror"
expect(syncer.archive).to be(false)
expect(syncer.directories).to eq []
expect(syncer.excludes).to eq []
expect(syncer.additional_rsync_options).to eq "new_rsync_options"
end
end # context 'when pre-configured defaults have been set'
end # describe '#initialize'
describe "#perform!" do
specify "with mirror option and Array of additional_rsync_options" do
syncer = Syncer::RSync::Local.new do |rsync|
rsync.path = "~/my_backups"
rsync.mirror = true
rsync.additional_rsync_options = ["--opt-a", "--opt-b"]
rsync.directories do |directory|
directory.add "/some/directory/"
directory.add "~/home/directory"
end
end
expect(FileUtils).to receive(:mkdir_p).with(File.expand_path("~/my_backups/"))
expect(syncer).to receive(:run).with(
"rsync --archive --delete --opt-a --opt-b " \
"'/some/directory' '#{File.expand_path("~/home/directory")}' " \
"'#{File.expand_path("~/my_backups")}'"
)
syncer.perform!
end
specify "without mirror option and String of additional_rsync_options" do
syncer = Syncer::RSync::Local.new do |rsync|
rsync.path = "~/my_backups"
rsync.additional_rsync_options = "--opt-a --opt-b"
rsync.directories do |directory|
directory.add "/some/directory/"
directory.add "~/home/directory"
end
end
expect(FileUtils).to receive(:mkdir_p).with(File.expand_path("~/my_backups/"))
expect(syncer).to receive(:run).with(
"rsync --archive --opt-a --opt-b " \
"'/some/directory' '#{File.expand_path("~/home/directory")}' " \
"'#{File.expand_path("~/my_backups")}'"
)
syncer.perform!
end
specify "without archive option and String of additional_rsync_options" do
syncer = Syncer::RSync::Local.new do |rsync|
rsync.path = "~/my_backups"
rsync.additional_rsync_options = "--opt-a --opt-b"
rsync.archive = false
rsync.directories do |directory|
directory.add "/some/directory/"
directory.add "~/home/directory"
end
end
expect(FileUtils).to receive(:mkdir_p).with(File.expand_path("~/my_backups/"))
expect(syncer).to receive(:run).with(
"rsync --opt-a --opt-b " \
"'/some/directory' '#{File.expand_path("~/home/directory")}' " \
"'#{File.expand_path("~/my_backups")}'"
)
syncer.perform!
end
specify "with mirror, excludes and additional_rsync_options" do
syncer = Syncer::RSync::Local.new do |rsync|
rsync.path = "~/my_backups"
rsync.mirror = true
rsync.additional_rsync_options = ["--opt-a", "--opt-b"]
rsync.directories do |directory|
directory.add "/some/directory/"
directory.add "~/home/directory"
directory.exclude "*~"
directory.exclude "tmp/"
end
end
expect(FileUtils).to receive(:mkdir_p).with(File.expand_path("~/my_backups/"))
expect(syncer).to receive(:run).with(
"rsync --archive --delete --exclude='*~' --exclude='tmp/' " \
"--opt-a --opt-b " \
"'/some/directory' '#{File.expand_path("~/home/directory")}' " \
"'#{File.expand_path("~/my_backups")}'"
)
syncer.perform!
end
describe "logging messages" do
it "logs started/finished messages" do
syncer = Syncer::RSync::Local.new
expect(Logger).to receive(:info).with("Syncer::RSync::Local Started...")
expect(Logger).to receive(:info).with("Syncer::RSync::Local Finished!")
syncer.perform!
end
it "logs messages using optional syncer_id" do
syncer = Syncer::RSync::Local.new("My Syncer")
expect(Logger).to receive(:info).with("Syncer::RSync::Local (My Syncer) Started...")
expect(Logger).to receive(:info).with("Syncer::RSync::Local (My Syncer) Finished!")
syncer.perform!
end
end
end # describe '#perform!'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/syncer/cloud/s3_spec.rb | spec/syncer/cloud/s3_spec.rb | require "spec_helper"
module Backup
describe Syncer::Cloud::S3 do
let(:required_config) do
proc do |s3|
s3.access_key_id = "my_access_key_id"
s3.secret_access_key = "my_secret_access_key"
s3.bucket = "my_bucket"
end
end
let(:required_iam_config) do
proc do |s3|
s3.use_iam_profile = true
s3.bucket = "my_bucket"
end
end
let(:syncer) { Syncer::Cloud::S3.new(&required_config) }
it_behaves_like "a class that includes Config::Helpers" do
let(:default_overrides) do
{ "encryption" => :aes256,
"storage_class" => :reduced_redundancy }
end
let(:new_overrides) do
{ "encryption" => "aes256",
"storage_class" => "standard" }
end
end
it_behaves_like "a subclass of Syncer::Cloud::Base"
describe "#initialize" do
it "provides default values" do
# required
expect(syncer.bucket).to eq "my_bucket"
# required unless using IAM profile
expect(syncer.access_key_id).to eq "my_access_key_id"
expect(syncer.secret_access_key).to eq "my_secret_access_key"
# defaults
expect(syncer.use_iam_profile).to be_nil
expect(syncer.region).to be_nil
expect(syncer.encryption).to be_nil
expect(syncer.storage_class).to eq :standard
expect(syncer.fog_options).to be_nil
# from Syncer::Cloud::Base
expect(syncer.thread_count).to be 0
expect(syncer.max_retries).to be 10
expect(syncer.retry_waitsec).to be 30
expect(syncer.path).to eq "backups"
# from Syncer::Base
expect(syncer.syncer_id).to be_nil
expect(syncer.mirror).to be(false)
expect(syncer.directories).to eq []
end
it "configures the syncer" do
syncer = Syncer::Cloud::S3.new(:my_id) do |s3|
s3.access_key_id = "my_access_key_id"
s3.secret_access_key = "my_secret_access_key"
s3.bucket = "my_bucket"
s3.region = "my_region"
s3.encryption = :aes256
s3.storage_class = :reduced_redundancy
s3.thread_count = 5
s3.max_retries = 15
s3.retry_waitsec = 45
s3.path = "my_backups"
s3.mirror = true
s3.fog_options = { my_key: "my_value" }
s3.directories do
add "/this/path"
add "that/path"
end
end
expect(syncer.access_key_id).to eq "my_access_key_id"
expect(syncer.secret_access_key).to eq "my_secret_access_key"
expect(syncer.use_iam_profile).to be_nil
expect(syncer.bucket).to eq "my_bucket"
expect(syncer.region).to eq "my_region"
expect(syncer.encryption).to eq :aes256
expect(syncer.storage_class).to eq :reduced_redundancy
expect(syncer.thread_count).to be 5
expect(syncer.max_retries).to be 15
expect(syncer.retry_waitsec).to be 45
expect(syncer.path).to eq "my_backups"
expect(syncer.syncer_id).to eq :my_id
expect(syncer.mirror).to be(true)
expect(syncer.fog_options).to eq my_key: "my_value"
expect(syncer.directories).to eq ["/this/path", "that/path"]
end
it "requires bucket" do
pre_config = required_config
expect do
Syncer::Cloud::S3.new do |s3|
pre_config.call(s3)
s3.bucket = nil
end
end.to raise_error(StandardError, /are all required/)
end
context "when using AWS IAM profile" do
it "does not require access_key_id or secret_access_key" do
pre_config = required_iam_config
expect do
Syncer::Cloud::S3.new do |s3|
pre_config.call(s3)
end
end.not_to raise_error
end
end
context "when using AWS access keys" do
it "requires access_key_id" do
pre_config = required_config
expect do
Syncer::Cloud::S3.new do |s3|
pre_config.call(s3)
s3.access_key_id = nil
end
end.to raise_error StandardError, /are all required/
end
it "requires secret_access_key" do
pre_config = required_config
expect do
Syncer::Cloud::S3.new do |s3|
pre_config.call(s3)
s3.secret_access_key = nil
end
end.to raise_error StandardError, /are all required/
end
end
it "validates encryption" do
pre_config = required_config
expect do
Syncer::Cloud::S3.new do |s3|
pre_config.call(s3)
s3.encryption = :aes512
end
end.to raise_error StandardError, /must be :aes256 or nil/
end
it "validates storage_class" do
pre_config = required_config
expect do
Syncer::Cloud::S3.new do |s3|
pre_config.call(s3)
s3.storage_class = :glacier
end
end.to raise_error StandardError, /must be :standard or :reduced_redundancy/
end
end # describe '#initialize'
describe "#cloud_io" do
specify "when using AWS access keys" do
expect(CloudIO::S3).to receive(:new).once.with(
access_key_id: "my_access_key_id",
secret_access_key: "my_secret_access_key",
use_iam_profile: nil,
bucket: "my_bucket",
region: nil,
encryption: nil,
storage_class: :standard,
max_retries: 10,
retry_waitsec: 30,
chunk_size: 0,
fog_options: nil
).and_return(:cloud_io)
syncer = Syncer::Cloud::S3.new(&required_config)
expect(syncer.send(:cloud_io)).to eq :cloud_io
expect(syncer.send(:cloud_io)).to eq :cloud_io
end
specify "when using AWS IAM profile" do
expect(CloudIO::S3).to receive(:new).once.with(
access_key_id: nil,
secret_access_key: nil,
use_iam_profile: true,
bucket: "my_bucket",
region: nil,
encryption: nil,
storage_class: :standard,
max_retries: 10,
retry_waitsec: 30,
chunk_size: 0,
fog_options: nil
).and_return(:cloud_io)
syncer = Syncer::Cloud::S3.new(&required_iam_config)
expect(syncer.send(:cloud_io)).to eq :cloud_io
expect(syncer.send(:cloud_io)).to eq :cloud_io
end
end # describe '#cloud_io'
describe "#get_remote_files" do
let(:cloud_io) { double }
let(:object_a) do
double(
CloudIO::S3::Object,
key: "my/path/dir_to_sync/some_dir/object_a",
etag: "12345"
)
end
let(:object_b) do
double(
CloudIO::S3::Object,
key: "my/path/dir_to_sync/another_dir/object_b",
etag: "67890"
)
end
before { allow(syncer).to receive(:cloud_io).and_return(cloud_io) }
it "returns a hash of relative paths and checksums for remote objects" do
expect(cloud_io).to receive(:objects).with("my/path/dir_to_sync")
.and_return([object_a, object_b])
expect(
syncer.send(:get_remote_files, "my/path/dir_to_sync")
).to eq(
"some_dir/object_a" => "12345", "another_dir/object_b" => "67890"
)
end
it "returns an empty hash if no remote objects are found" do
expect(cloud_io).to receive(:objects).and_return([])
expect(syncer.send(:get_remote_files, "foo")).to eq({})
end
end # describe '#get_remote_files'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/syncer/cloud/local_file_spec.rb | spec/syncer/cloud/local_file_spec.rb | require "spec_helper"
module Backup
describe Syncer::Cloud::LocalFile do
describe ".find" do
before do
@tmpdir = Dir.mktmpdir("backup_spec")
SandboxFileUtils.activate!(@tmpdir)
FileUtils.mkdir_p File.join(@tmpdir, "sync_dir/sub_dir")
allow(Utilities).to receive(:utility).and_call_original
end
after do
FileUtils.rm_r(@tmpdir, force: true, secure: true)
end
it "returns an empty hash if no files are found" do
expect(described_class.find(@tmpdir)).to eq({})
end
context "with test files" do
let(:test_files) do
{ "sync_dir/one.file" => "c9f90c31589526ef50cc974a614038d5",
"sync_dir/two.file" => "1d26903171cef8b1d7eb035ca049f492",
"sync_dir/sub_dir/three.file" => "4ccdba38597e718ed00e3344dc78b6a1",
"base_dir.file" => "a6cfa67bfa0e16402b76d4560c0baa3d" }
end
before do
test_files.each_key do |path|
File.open(File.join(@tmpdir, path), "w") { |file| file.write path }
end
end
# This fails on OSX, see https://github.com/backup/backup/issues/482
# for more information.
it "returns a Hash of LocalFile objects, keyed by relative path", skip: RUBY_PLATFORM =~ /darwin/ do
Dir.chdir(@tmpdir) do
bad_file = "sync_dir/bad\xFFfile"
sanitized_bad_file = "sync_dir/bad\xEF\xBF\xBDfile"
FileUtils.touch bad_file
expect(Logger).to receive(:warn).with(
"\s\s[skipping] #{File.expand_path(sanitized_bad_file)}\n" \
"\s\sPath Contains Invalid UTF-8 byte sequences"
)
local_files = described_class.find("sync_dir")
expect(local_files.keys.count).to be 3
local_files.each do |relative_path, local_file|
expect(local_file.path).to eq(
File.expand_path("sync_dir/#{relative_path}")
)
expect(local_file.md5).to eq(
test_files["sync_dir/#{relative_path}"]
)
end
end
end
it "ignores excluded files" do
expect(
described_class.find(@tmpdir, ["**/two.*", /sub|base_dir/]).keys
).to eq(["sync_dir/one.file"])
end
it "follows symlinks" do
FileUtils.ln_s File.join(@tmpdir, "base_dir.file"),
File.join(@tmpdir, "sync_dir/link")
found = described_class.find(@tmpdir)
expect(found.keys).to include("sync_dir/link")
expect(found["sync_dir/link"].md5).to eq(test_files["base_dir.file"])
end
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/syncer/cloud/cloud_files_spec.rb | spec/syncer/cloud/cloud_files_spec.rb | require "spec_helper"
module Backup
describe Syncer::Cloud::CloudFiles do
let(:required_config) do
proc do |cf|
cf.username = "my_username"
cf.api_key = "my_api_key"
cf.container = "my_container"
end
end
let(:syncer) { Syncer::Cloud::CloudFiles.new(&required_config) }
it_behaves_like "a class that includes Config::Helpers"
it_behaves_like "a subclass of Syncer::Cloud::Base"
describe "#initialize" do
it "provides default values" do
# required
expect(syncer.username).to eq "my_username"
expect(syncer.api_key).to eq "my_api_key"
expect(syncer.container).to eq "my_container"
# defaults
expect(syncer.auth_url).to be_nil
expect(syncer.region).to be_nil
expect(syncer.servicenet).to be(false)
expect(syncer.fog_options).to be_nil
# from Syncer::Cloud::Base
expect(syncer.thread_count).to be 0
expect(syncer.max_retries).to be 10
expect(syncer.retry_waitsec).to be 30
expect(syncer.path).to eq "backups"
# from Syncer::Base
expect(syncer.syncer_id).to be_nil
expect(syncer.mirror).to be(false)
expect(syncer.directories).to eq []
end
it "configures the syncer" do
syncer = Syncer::Cloud::CloudFiles.new(:my_id) do |cf|
cf.username = "my_username"
cf.api_key = "my_api_key"
cf.container = "my_container"
cf.auth_url = "my_auth_url"
cf.region = "my_region"
cf.servicenet = true
cf.thread_count = 5
cf.max_retries = 15
cf.retry_waitsec = 45
cf.fog_options = { my_key: "my_value" }
cf.path = "my_backups"
cf.mirror = true
cf.directories do
add "/this/path"
add "that/path"
end
end
expect(syncer.username).to eq "my_username"
expect(syncer.api_key).to eq "my_api_key"
expect(syncer.container).to eq "my_container"
expect(syncer.auth_url).to eq "my_auth_url"
expect(syncer.region).to eq "my_region"
expect(syncer.servicenet).to be(true)
expect(syncer.thread_count).to be 5
expect(syncer.max_retries).to be 15
expect(syncer.retry_waitsec).to be 45
expect(syncer.fog_options).to eq my_key: "my_value"
expect(syncer.path).to eq "my_backups"
expect(syncer.syncer_id).to eq :my_id
expect(syncer.mirror).to be(true)
expect(syncer.directories).to eq ["/this/path", "that/path"]
end
it "requires username" do
pre_config = required_config
expect do
Syncer::Cloud::CloudFiles.new do |cf|
pre_config.call(cf)
cf.username = nil
end
end.to raise_error StandardError, /are all required/
end
it "requires api_key" do
pre_config = required_config
expect do
Syncer::Cloud::CloudFiles.new do |cf|
pre_config.call(cf)
cf.api_key = nil
end
end.to raise_error StandardError, /are all required/
end
it "requires container" do
pre_config = required_config
expect do
Syncer::Cloud::CloudFiles.new do |cf|
pre_config.call(cf)
cf.container = nil
end
end.to raise_error StandardError, /are all required/
end
end # describe '#initialize'
describe "#cloud_io" do
it "caches a new CloudIO instance" do
expect(CloudIO::CloudFiles).to receive(:new).once.with(
username: "my_username",
api_key: "my_api_key",
auth_url: nil,
region: nil,
servicenet: false,
container: "my_container",
max_retries: 10,
retry_waitsec: 30,
segments_container: nil,
segment_size: 0,
fog_options: nil
).and_return(:cloud_io)
expect(syncer.send(:cloud_io)).to eq :cloud_io
expect(syncer.send(:cloud_io)).to eq :cloud_io
end
end # describe '#cloud_io'
describe "#get_remote_files" do
let(:cloud_io) { double }
let(:object_a) do
double(
CloudIO::CloudFiles::Object,
name: "my/path/dir_to_sync/some_dir/object_a",
hash: "12345"
)
end
let(:object_b) do
double(
CloudIO::CloudFiles::Object,
name: "my/path/dir_to_sync/another_dir/object_b",
hash: "67890"
)
end
before { allow(syncer).to receive(:cloud_io).and_return(cloud_io) }
it "returns a hash of relative paths and checksums for remote objects" do
expect(cloud_io).to receive(:objects).with("my/path/dir_to_sync")
.and_return([object_a, object_b])
expect(
syncer.send(:get_remote_files, "my/path/dir_to_sync")
).to eq(
"some_dir/object_a" => "12345", "another_dir/object_b" => "67890"
)
end
it "returns an empty hash if no remote objects are found" do
expect(cloud_io).to receive(:objects).and_return([])
expect(syncer.send(:get_remote_files, "foo")).to eq({})
end
end # describe '#get_remote_files'
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/encryptor/gpg_spec.rb | spec/encryptor/gpg_spec.rb | require "spec_helper"
describe Backup::Encryptor::GPG do
let(:encryptor) do
Backup::Encryptor::GPG.new do |e|
e.mode = :symmetric
e.passphrase = "test secret"
end
end
it "should be a subclass of Encryptor::Base" do
expect(Backup::Encryptor::GPG
.superclass).to eq(Backup::Encryptor::Base)
end
it "supports three modes of operation" do
expect(Backup::Encryptor::GPG::MODES).to eq([:asymmetric, :symmetric, :both])
end
describe "#mode=" do
it "should accept valid modes" do
mode = Backup::Encryptor::GPG::MODES.sample
encryptor.mode = mode
expect(encryptor.mode).to eq(mode)
end
it "should convert string input to a symbol" do
mode = Backup::Encryptor::GPG::MODES.sample
encryptor.mode = mode.to_s
expect(encryptor.mode).to eq(mode)
end
it "should raise an error for invalid modes" do
expect do
encryptor.mode = "foo"
end.to raise_error(Backup::Encryptor::GPG::Error)
end
end # describe '#mode='
describe "#initialize" do
after { Backup::Encryptor::GPG.clear_defaults! }
it "should load pre-configured defaults" do
expect_any_instance_of(Backup::Encryptor::GPG).to receive(:load_defaults!)
encryptor
end
context "when no pre-configured defaults have been set" do
it "should use the values given" do
expect(encryptor.mode).to eq(:symmetric)
expect(encryptor.passphrase).to eq("test secret")
end
it "should use default values if none are given" do
encryptor = Backup::Encryptor::GPG.new
expect(encryptor.mode).to eq(:asymmetric)
expect(encryptor.keys).to be_nil
expect(encryptor.recipients).to be_nil
expect(encryptor.passphrase).to be_nil
expect(encryptor.passphrase_file).to be_nil
expect(encryptor.gpg_config).to be_nil
expect(encryptor.gpg_homedir).to be_nil
end
end # context 'when no pre-configured defaults have been set'
context "when pre-configured defaults have been set" do
before do
Backup::Encryptor::GPG.defaults do |e|
e.mode = :both
e.keys = { "test_key" => "test public key" }
e.recipients = "test_key"
e.passphrase_file = "my/pass/file"
end
end
it "should use pre-configured defaults" do
encryptor = Backup::Encryptor::GPG.new
expect(encryptor.mode).to eq(:both)
expect(encryptor.keys).to eq("test_key" => "test public key")
expect(encryptor.recipients).to eq("test_key")
expect(encryptor.passphrase_file).to eq("my/pass/file")
end
it "should override pre-configured defaults" do
expect(encryptor.mode).to eq(:symmetric)
expect(encryptor.keys).to eq("test_key" => "test public key")
expect(encryptor.recipients).to eq("test_key")
expect(encryptor.passphrase).to eq("test secret")
expect(encryptor.passphrase_file).to eq("my/pass/file")
end
end # context 'when pre-configured defaults have been set'
end # describe '#initialize'
describe "#encrypt_with" do
before do
expect(encryptor).to receive(:log!)
expect(encryptor).to receive(:prepare)
expect(encryptor).to receive(:cleanup) # ensure call
end
context "when encryption can be performed" do
it "should yield the encryption command and extension" do
expect(encryptor).to receive(:mode_options).twice.and_return("mode_options")
expect(encryptor).to receive(:base_options).and_return("base_options")
expect(encryptor).to receive(:utility).with(:gpg).and_return("gpg")
encryptor.encrypt_with do |command, ext|
expect(command).to eq("gpg base_options mode_options")
expect(ext).to eq(".gpg")
end
end
end
context "when encryption can not be performed" do
it "should raise an error when no mode_options are returned" do
expect(encryptor).to receive(:mode_options).and_return([])
expect do
encryptor.encrypt_with
end.to raise_error(Backup::Encryptor::GPG::Error)
end
end
end # describe '#encrypt_with'
describe "#prepare and #cleanup" do
it "should setup required variables" do
encryptor.instance_variable_set(:@tempdirs, nil)
expect(FileUtils).to receive(:rm_rf).never
encryptor.send(:prepare)
expect(encryptor.instance_variable_get(:@tempdirs)).to eq([])
end
it "should remove any tempdirs and clear all variables" do
encryptor.instance_variable_set(:@tempdirs, ["a", "b"])
expect(FileUtils).to receive(:rm_rf).with(["a", "b"], secure: true)
encryptor.instance_variable_set(:@base_options, true)
encryptor.instance_variable_set(:@mode_options, true)
encryptor.instance_variable_set(:@user_recipients, true)
encryptor.instance_variable_set(:@user_keys, true)
encryptor.instance_variable_set(:@system_identifiers, true)
encryptor.send(:cleanup)
expect(encryptor.instance_variable_get(:@tempdirs)).to eq([])
expect(encryptor.instance_variable_get(:@base_options)).to be_nil
expect(encryptor.instance_variable_get(:@mode_options)).to be_nil
expect(encryptor.instance_variable_get(:@user_recipients)).to be_nil
expect(encryptor.instance_variable_get(:@user_keys)).to be_nil
expect(encryptor.instance_variable_get(:@system_identifiers)).to be_nil
end
end # describe '#prepare and #cleanup'
describe "#base_options" do
context "while caching the return value in @base_options" do
before { encryptor.instance_variable_set(:@base_options, nil) }
context "when #gpg_homedir is given" do
it "should return the proper options" do
expect(encryptor).to receive(:setup_gpg_homedir).once.and_return("/a/dir")
expect(encryptor).to receive(:setup_gpg_config).once.and_return(false)
ret = "--no-tty --homedir '/a/dir'"
expect(encryptor.send(:base_options)).to eq(ret)
expect(encryptor.send(:base_options)).to eq(ret)
expect(encryptor.instance_variable_get(:@base_options)).to eq(ret)
end
end
context "when #gpg_config is given" do
it "should return the proper options" do
expect(encryptor).to receive(:setup_gpg_homedir).once.and_return(false)
expect(encryptor).to receive(:setup_gpg_config).once.and_return("/a/file")
ret = "--no-tty --options '/a/file'"
expect(encryptor.send(:base_options)).to eq(ret)
expect(encryptor.send(:base_options)).to eq(ret)
expect(encryptor.instance_variable_get(:@base_options)).to eq(ret)
end
end
context "when #gpg_homedir and #gpg_config is given" do
it "should return the proper options" do
expect(encryptor).to receive(:setup_gpg_homedir).once.and_return("/a/dir")
expect(encryptor).to receive(:setup_gpg_config).once.and_return("/a/file")
ret = "--no-tty --homedir '/a/dir' --options '/a/file'"
expect(encryptor.send(:base_options)).to eq(ret)
expect(encryptor.send(:base_options)).to eq(ret)
expect(encryptor.instance_variable_get(:@base_options)).to eq(ret)
end
end
context "when neither #gpg_homedir and #gpg_config is given" do
it "should return the proper options" do
expect(encryptor).to receive(:setup_gpg_homedir).once.and_return(false)
expect(encryptor).to receive(:setup_gpg_config).once.and_return(false)
ret = "--no-tty"
expect(encryptor.send(:base_options)).to eq(ret)
expect(encryptor.send(:base_options)).to eq(ret)
expect(encryptor.instance_variable_get(:@base_options)).to eq(ret)
end
end
end
end # describe '#base_options'
describe "#setup_gpg_homedir" do
context "when #gpg_homedir is not set" do
it "should return false" do
encryptor.gpg_homedir = nil
expect(encryptor.send(:setup_gpg_homedir)).to eq(false)
end
end
context "when #gpg_homedir is set" do
let(:path) { "some/path" }
let(:expanded_path) { File.expand_path(path) }
before do
encryptor.gpg_homedir = path
allow(Backup::Config).to receive(:user).and_return("a_user")
end
context "and no errors occur" do
before do
expect(FileUtils).to receive(:mkdir_p).with(expanded_path)
expect(FileUtils).to receive(:chown).with("a_user", nil, expanded_path)
expect(FileUtils).to receive(:chmod).with(0o700, expanded_path)
end
context "and the gpg_homedir files exist" do
before do
%w[pubring.gpg secring.gpg trustdb.gpg].each do |file|
expect(File).to receive(:exist?).with(
File.join(expanded_path, file)
).and_return(true)
end
end
it "should ensure permissions and return the path" do
expect(encryptor).to receive(:utility).never
expect(encryptor.send(:setup_gpg_homedir)).to eq(expanded_path)
end
end
context "and the gpg_homedir files do not exist" do
before do
allow(File).to receive(:exist?).and_return(false)
end
it "should call gpg to initialize the files" do
expect(encryptor).to receive(:utility).with(:gpg).and_return("gpg")
expect(encryptor).to receive(:run).with(
"gpg --homedir '#{expanded_path}' -K 2>&1 >/dev/null"
)
expect(encryptor.send(:setup_gpg_homedir)).to eq(expanded_path)
end
end
end
context "and errors occur" do
it "should wrap and raise the error" do
expect(File).to receive(:expand_path).and_raise("error message")
expect do
encryptor.send(:setup_gpg_homedir)
end.to raise_error(proc do |err|
expect(err).to be_an_instance_of Backup::Encryptor::GPG::Error
expect(err.message).to match("Failed to create or set permissions")
expect(err.message).to match("RuntimeError: error message")
end)
end
end
end
end # describe '#setup_gpg_homedir'
describe "#setup_gpg_config" do
context "when #gpg_config is not set" do
it "should return false" do
encryptor.gpg_config = nil
expect(encryptor.send(:setup_gpg_config)).to eq(false)
end
end
context "when #gpg_config is set" do
before do
encryptor.gpg_config = <<-EOF
# a comment
text which will be
\tthe content of a gpg.conf file
EOF
allow(Backup::Config).to receive(:tmp_path).and_return("/Backup/tmp")
encryptor.instance_variable_set(:@tempdirs, [])
end
context "when no errors occur" do
let(:tempdir) { double }
let(:tempfile) { double }
let(:tempfile_path) { double }
let(:path) { double }
before do
expect(encryptor).to receive(:cleanup).never
allow(tempfile).to receive(:path).and_return(tempfile_path)
end
it "should create and return the file path" do
# create temporary directory and convert to a Pathname object
expect(Dir).to receive(:mktmpdir).with(
"backup-gpg_config", "/Backup/tmp"
).and_return(tempdir)
# create temporary file within the temporary directory
expect(Tempfile).to receive(:open).with(
"backup-gpg_config", tempdir
).and_return(tempfile)
# write the gpg_config, stripping leading tabs/spaces
expect(tempfile).to receive(:write).with(
"# a comment\n" \
"text which will be\n" \
"\n" \
"the content of a gpg.conf file\n"
)
# close the file
expect(tempfile).to receive(:close)
# check the config file
expect(encryptor).to receive(:check_gpg_config).with(tempfile_path)
# method returns the tempfile's path
expect(encryptor.send(:setup_gpg_config)).to eq(tempfile_path)
# tempdir added to @tempdirs
expect(encryptor.instance_variable_get(:@tempdirs)[0]).to eq(tempdir)
end
end
context "when errors occur" do
before do
expect(encryptor).to receive(:cleanup) # run before the error is raised
end
it "should wrap and raise the error" do
expect(Dir).to receive(:mktmpdir).and_raise("an error")
expect do
encryptor.send(:setup_gpg_config)
end.to raise_error(proc do |err|
expect(err).to be_an_instance_of(Backup::Encryptor::GPG::Error)
expect(err.message).to match("Error creating temporary file for #gpg_config")
expect(err.message).to match("RuntimeError: an error")
end)
end
end
end
end # describe '#setup_gpg_config'
describe "#check_gpg_config" do
let(:cmd_ret) { double }
let(:file_path) { "/path/to/tempfile" }
before do
expect(encryptor).to receive(:utility).with(:gpg).and_return("gpg")
expect(encryptor).to receive(:run).with(
"gpg --options '#{file_path}' --gpgconf-test 2>&1"
).and_return(cmd_ret)
end
context "when no errors are reported" do
before { expect(cmd_ret).to receive(:chomp).and_return("") }
it "should do nothing" do
expect(encryptor.send(:check_gpg_config, file_path)).to be_nil
end
end
context "when errors are reported" do
let(:error_message) { "gpg: /path/to/tempfile:1: invalid option" }
before { expect(cmd_ret).to receive(:chomp).and_return(error_message) }
it "should raise the error message reported" do
expect do
encryptor.send(:check_gpg_config, file_path)
end.to raise_error(RuntimeError, error_message)
end
end
end # describe '#check_gpg_config'
describe "#mode_options" do
let(:s_opts) { "-c --passphrase_file '/some/file'" }
let(:a_opts) { "-e --trust-model always -r 'identifier'" }
context "while caching the return value in @mode_options" do
before { encryptor.instance_variable_set(:@mode_options, nil) }
context "when #mode is :symmetric" do
it "should return symmetric encryption options" do
expect(encryptor).to receive(:symmetric_options).once.and_return(s_opts)
expect(encryptor).to receive(:asymmetric_options).never
encryptor.mode = :symmetric
expect(encryptor.send(:mode_options)).to eq(s_opts)
expect(encryptor.send(:mode_options)).to eq(s_opts)
expect(encryptor.instance_variable_get(:@mode_options)).to eq(s_opts)
end
end
context "when #mode is :asymmetric" do
it "should return asymmetric encryption options" do
expect(encryptor).to receive(:symmetric_options).never
expect(encryptor).to receive(:asymmetric_options).once.and_return(a_opts)
encryptor.mode = :asymmetric
expect(encryptor.send(:mode_options)).to eq(a_opts)
expect(encryptor.send(:mode_options)).to eq(a_opts)
expect(encryptor.instance_variable_get(:@mode_options)).to eq(a_opts)
end
end
context "when #mode is :both" do
it "should return both symmetric and asymmetric encryption options" do
expect(encryptor).to receive(:symmetric_options).once.and_return(s_opts)
expect(encryptor).to receive(:asymmetric_options).once.and_return(a_opts)
encryptor.mode = :both
opts = "#{s_opts} #{a_opts}"
expect(encryptor.send(:mode_options)).to eq(opts)
expect(encryptor.send(:mode_options)).to eq(opts)
expect(encryptor.instance_variable_get(:@mode_options)).to eq(opts)
end
end
end
end # describe '#mode_options'
describe "#symmetric_options" do
let(:path) { "/path/to/passphrase/file" }
let(:s_opts) { "-c --passphrase-file '#{path}'" }
context "when setup_passphrase_file returns a path" do
it "should return the options" do
expect(encryptor).to receive(:setup_passphrase_file).and_return(path)
expect(File).to receive(:exist?).with(path).and_return(true)
expect(encryptor.send(:symmetric_options)).to eq(s_opts)
end
end
context "when setup_passphrase_file returns false" do
before do
expect(encryptor).to receive(:setup_passphrase_file).and_return(false)
end
context "and no :passphrase_file is set" do
it "should return nil and log a warning" do
expect(encryptor).to receive(:passphrase_file).and_return(nil)
expect(Backup::Logger).to receive(:warn)
expect(encryptor.send(:symmetric_options)).to be_nil
end
end
context "and a :passphrase_file is set" do
before do
expect(encryptor).to receive(:passphrase_file).twice.and_return(path)
expect(File).to receive(:expand_path).with(path).and_return(path)
end
context "when :passphrase_file exists" do
it "should return the options" do
expect(File).to receive(:exist?).with(path).and_return(true)
expect(encryptor.send(:symmetric_options)).to eq(s_opts)
end
end
context "when :passphrase_file is no valid" do
it "should return nil and log a warning" do
expect(File).to receive(:exist?).with(path).and_return(false)
expect(Backup::Logger).to receive(:warn)
expect(encryptor.send(:symmetric_options)).to be_nil
end
end
end
end
end # describe '#symmetric_options'
describe "#setup_passphrase_file" do
context "when :passphrase is not set" do
it "should return false" do
expect(encryptor).to receive(:passphrase).and_return(nil)
expect(encryptor.send(:setup_passphrase_file)).to eq(false)
end
end
context "when :passphrase is set" do
let(:tempdir) { double }
let(:tempfile) { double }
let(:tempfile_path) { double }
before do
encryptor.instance_variable_set(:@tempdirs, [])
allow(Backup::Config).to receive(:tmp_path).and_return("/Backup/tmp")
allow(encryptor).to receive(:passphrase).and_return("a secret")
allow(tempfile).to receive(:path).and_return(tempfile_path)
end
context "and no errors occur" do
it "should return the path for the temp file" do
# creates temporary directory in Config.tmp_path
expect(Dir).to receive(:mktmpdir)
.with("backup-gpg_passphrase", "/Backup/tmp")
.and_return(tempdir)
# create the temporary file in that temporary directory
expect(Tempfile).to receive(:open)
.with("backup-gpg_passphrase", tempdir)
.and_return(tempfile)
expect(tempfile).to receive(:write).with("a secret")
expect(tempfile).to receive(:close)
expect(encryptor.send(:setup_passphrase_file)).to eq(tempfile_path)
# adds the temporary directory to @tempdirs
expect(encryptor.instance_variable_get(:@tempdirs)[0]).to eq(tempdir)
end
end
context "and an error occurs" do
it "should return false and log a warning" do
expect(Dir).to receive(:mktmpdir).and_raise("an error")
expect(Backup::Logger).to receive(:warn) do |err|
expect(err).to be_an_instance_of(Backup::Encryptor::GPG::Error)
expect(err.message).to match("Error creating temporary passphrase file")
expect(err.message).to match("RuntimeError: an error")
end
expect(encryptor.send(:setup_passphrase_file)).to eq(false)
end
end
end
end # describe '#setup_passphrase_file'
describe "#asymmetric_options" do
context "when recipients are found" do
it "should return the options" do
allow(encryptor).to receive(:user_recipients).and_return(["keyid1", "keyid2"])
expect(encryptor.send(:asymmetric_options)).to eq(
"-e --trust-model always -r 'keyid1' -r 'keyid2'"
)
end
end
context "when no recipients are found" do
it "should return nil log a warning" do
expect(encryptor).to receive(:user_recipients).and_return([])
expect(Backup::Logger).to receive(:warn)
expect(encryptor.send(:asymmetric_options)).to be_nil
end
end
end # describe '#asymmetric_options'
describe "#user_recipients" do
context "when an Array of :recipients are given" do
it "should return the recipient list and cache the result" do
expect(encryptor).to receive(:recipients).and_return(
["key_id1", "key_id2", "key_id3", "key_id4"]
)
expect(encryptor).to receive(:clean_identifier).with("key_id1").and_return("key_id1")
expect(encryptor).to receive(:clean_identifier).with("key_id2").and_return("key_id2")
expect(encryptor).to receive(:clean_identifier).with("key_id3").and_return("key_id3")
expect(encryptor).to receive(:clean_identifier).with("key_id4").and_return("key_id4")
# key_id1 and key_id3 will be found in the system
allow(encryptor).to receive(:system_identifiers).and_return(["key_id1", "key_id3"])
# key_id2 will be imported (key_id returned)
allow(encryptor).to receive(:user_keys).and_return("key_id2" => "a public key")
expect(encryptor).to receive(:import_key)
.with("key_id2", "a public key")
.and_return("key_id2")
# key_id4 will not be found in user_keys, so a warning will be logged.
# This will return nil into the array, which will be compacted out.
expect(Backup::Logger).to receive(:warn) do |msg|
expect(msg).to match(/'key_id4'/)
end
encryptor.instance_variable_set(:@user_recipients, nil)
recipient_list = ["key_id1", "key_id2", "key_id3"]
expect(encryptor.send(:user_recipients)).to eq(recipient_list)
# results are cached (expectations would fail if called twice)
expect(encryptor.send(:user_recipients)).to eq(recipient_list)
expect(encryptor.instance_variable_get(:@user_recipients)).to eq(recipient_list)
end
end
context "when :recipients is a single recipient, given as a String" do
it "should return the cleaned identifier in an Array" do
expect(encryptor).to receive(:recipients).and_return("key_id")
# the key will be found in system_identifiers
allow(encryptor).to receive(:system_identifiers).and_return(["key_id"])
expect(encryptor).to receive(:clean_identifier).with("key_id").and_return("key_id")
expect(encryptor.send(:user_recipients)).to eq(["key_id"])
end
end
context "when :recipients is not set" do
it "should return an empty Array" do
expect(encryptor).to receive(:recipients).and_return(nil)
expect(encryptor.send(:user_recipients)).to eq([])
end
end
end # describe '#user_recipients'
describe "#user_keys" do
context "when :keys has been set" do
before do
expect(encryptor).to receive(:keys).and_return(
"key1" => :foo, "key2" => :foo, "key3" => :foo
)
encryptor.instance_variable_set(:@user_keys, nil)
end
it "should return a new Hash of #keys with cleaned identifiers" do
expect(encryptor).to receive(:clean_identifier).with("key1").and_return("clean_key1")
expect(encryptor).to receive(:clean_identifier).with("key2").and_return("clean_key2")
expect(encryptor).to receive(:clean_identifier).with("key3").and_return("clean_key3")
expect(Backup::Logger).to receive(:warn).never
cleaned_hash = {
"clean_key1" => :foo, "clean_key2" => :foo, "clean_key3" => :foo
}
expect(encryptor.send(:user_keys)).to eq(cleaned_hash)
# results are cached (expectations would fail if called twice)
expect(encryptor.send(:user_keys)).to eq(cleaned_hash)
expect(encryptor.instance_variable_get(:@user_keys)).to eq(cleaned_hash)
end
it "should log a warning if cleaning results in a duplicate identifier" do
expect(encryptor).to receive(:clean_identifier).with("key1").and_return("clean_key1")
expect(encryptor).to receive(:clean_identifier).with("key2").and_return("clean_key2")
# return a duplicate key
expect(encryptor).to receive(:clean_identifier).with("key3").and_return("clean_key2")
expect(Backup::Logger).to receive(:warn)
cleaned_hash = {
"clean_key1" => :foo, "clean_key2" => :foo
}
expect(encryptor.send(:user_keys)).to eq(cleaned_hash)
# results are cached (expectations would fail if called twice)
expect(encryptor.send(:user_keys)).to eq(cleaned_hash)
expect(encryptor.instance_variable_get(:@user_keys)).to eq(cleaned_hash)
end
end
context "when :keys has not be set" do
before do
expect(encryptor).to receive(:keys).and_return(nil)
encryptor.instance_variable_set(:@user_keys, nil)
end
it "should return an empty hash" do
expect(encryptor.send(:user_keys)).to eq({})
end
end
end # describe '#user_keys'
describe "#clean_identifier" do
it "should remove all spaces and upcase non-email identifiers" do
expect(encryptor.send(:clean_identifier, " 9d66 6290 c5f7 ee0f "))
.to eq("9D666290C5F7EE0F")
end
# Even though spaces in an email are technically possible,
# GPG won't allow anything but /[A-Za-z0-9_\-.]/
it "should remove all spaces and wrap email addresses in <>" do
emails = [
"\t Foo.Bar@example.com ",
" < Foo-Bar@example.com\t > ",
"< <Foo_Bar @\texample.com> >"
]
cleaned = [
"<Foo.Bar@example.com>",
"<Foo-Bar@example.com>",
"<Foo_Bar@example.com>"
]
expect(emails.map do |email|
encryptor.send(:clean_identifier, email)
end).to eq(cleaned)
end
end # describe '#clean_identifier'
describe "#import_key" do
let(:gpg_return_ok) do
<<-EOS.gsub(/^ +/, "")
gpg: keyring `/tmp/.gnupg/secring.gpg' created
gpg: keyring `/tmp/.gnupg/pubring.gpg' created
gpg: /tmp/.gnupg/trustdb.gpg: trustdb created
gpg: key 0x9D666290C5F7EE0F: public key "Backup Test <backup01@foo.com>" imported
gpg: Total number processed: 1
gpg: imported: 1 (RSA: 1)
EOS
end
let(:gpg_return_failed) do
<<-EOS.gsub(/^ +/, "")
gpg: no valid OpenPGP data found.
gpg: Total number processed: 0
EOS
end
let(:gpg_key) do
<<-EOS
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1.4.12 (GNU/Linux)
mI0EUAmiNwEEAKpNP4GVKcjJrTtAh0XKk0NQsId6h/1pzEok2bExkNvD6eSjYRFL
gXY+pNqaEE6cHrg+uQatVQITX8EoVJhQ9Z1mYJB+g62zqOQPe10Spb381O9y4dN/
/ge/yL+/+R2CUrKeNF9nSA24+V4mTSqgo7sTnevDzGj4Srzs76MmkpU=
=TU/B
-----END PGP PUBLIC KEY BLOCK-----
EOS
end
let(:tempfile) { double }
before do
allow(Backup::Config).to receive(:tmp_path).and_return("/tmp/path")
allow(encryptor).to receive(:base_options).and_return("--some 'base options'")
allow(encryptor).to receive(:utility).and_return("gpg")
allow(tempfile).to receive(:path).and_return("/tmp/file/path")
end
context "when the import is successful" do
it "should return the long key ID" do
expect(Tempfile).to receive(:open).with("backup-gpg_import", "/tmp/path").and_return(tempfile)
expect(tempfile).to receive(:write).with(<<-EOS)
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1.4.12 (GNU/Linux)
mI0EUAmiNwEEAKpNP4GVKcjJrTtAh0XKk0NQsId6h/1pzEok2bExkNvD6eSjYRFL
gXY+pNqaEE6cHrg+uQatVQITX8EoVJhQ9Z1mYJB+g62zqOQPe10Spb381O9y4dN/
/ge/yL+/+R2CUrKeNF9nSA24+V4mTSqgo7sTnevDzGj4Srzs76MmkpU=
=TU/B
-----END PGP PUBLIC KEY BLOCK-----
EOS
expect(tempfile).to receive(:close)
expect(encryptor).to receive(:run).with(
"gpg --some 'base options' --keyid-format 0xlong " \
"--import '/tmp/file/path' 2>&1"
).and_return(gpg_return_ok)
expect(tempfile).to receive(:delete)
expect(Backup::Logger).to receive(:warn).never
expect(encryptor.send(:import_key, "some_identifier", gpg_key))
.to eq("9D666290C5F7EE0F")
end
end
context "when the import is unsuccessful" do
it "should return nil and log a warning" do
expect(Tempfile).to receive(:open).and_raise("an error")
expect(Backup::Logger).to receive(:warn) do |err|
expect(err).to be_an_instance_of(Backup::Encryptor::GPG::Error)
expect(err.message).to match("Public key import failed for 'some_identifier'")
expect(err.message).to match("RuntimeError: an error")
end
expect(encryptor.send(:import_key, "some_identifier", "foo")).to be_nil
end
end
end # describe '#import_key'
describe "#system_identifiers" do
let(:gpg_output) do
<<-EOS.gsub(/^ +/, "")
tru::1:1343402941:0:3:1:5
pub:-:1024:1:5EFD157FFF9CFEA6:1342808803:::-:::scESC:
fpr:::::::::72E56E48E362BB402B3344045EFD157FFF9CFEA6:
uid:-::::1342808803::3BED8A0A5100FE9028BEB53610247518594B60A8::Backup Test (No Email):
sub:-:1024:1:E6CF1DC860A82E07:1342808803::::::e:
pub:-:1024:1:570CE9221E3DA3E8:1342808841:::-:::scESC:
fpr:::::::::616BBC8409C1AED791F8E6F8570CE9221E3DA3E8:
uid:-::::1342808875::ECFF419EFE4BD3C7CBCCD58FACAD283A9E98FECD::Backup Test <backup04@foo.com>:
uid:-::::1342808841::DDFD072C193BB45587EBA9D19A7DA1BB0E5E8A22::Backup Test <backup03@foo.com>:
sub:-:1024:1:B65C0ADEB804268D:1342808841::::::e:
pub:-:1024:1:54F81C93A7641A16:1342809011:::-:::scESC:
fpr:::::::::71335B9B960CF3A3071535F454F81C93A7641A16:
uid:-::::1342809011::2E5801E9C064C2A165B61EE35D50A5F9B64BF345::Backup Test (other email is <backup06@foo.com>) <backup05@foo.com>:
sub:-:1024:1:5B57BC34628252C7:1342809011::::::e:
pub:-:1024:1:0A5B6CC9581A88CF:1342809049:::-:::scESC:
fpr:::::::::E8C459082544924B8AEA06280A5B6CC9581A88CF:
uid:-::::1342809470::4A404F9ED6780E7E0E02A7F7607828E648789058::Backup Test <backup08@foo.com>:
uid:-::::::9785ADEBBBCE94CE0FF25774F610F2B11C839E9B::Backup Test <backup07@foo.com>:
uid:r::::::4AD074B1857819EFA105DFB6C464600AA451BF18::Backup Test <backup09@foo.com>:
sub:e:1024:1:60A420E39B979B06:1342809049:1342895611:::::e:
sub:-:1024:1:A05786E7AD5B8352:1342809166::::::e:
pub:i:1024:1:4A83569F4E5E8D8A:1342810132:::-:::esca:
fpr:::::::::FFEAD1DB201FB214873E73994A83569F4E5E8D8A:
uid:-::::::3D41A10AF2437C8C5BF6050FA80FE20CE30769BF::Backup Test <backup10@foo.com>:
sub:i:1024:1:662F18DB92C8DFD8:1342810132::::::e:
pub:r:1024:1:15ECEF9ECA136FFF:1342810387:::-:::sc:
fpr:::::::::3D1CBF3FEFCE5ABB728922F615ECEF9ECA136FFF:
uid:r::::1342810387::296434E1662AE0B2FF8E93EC3BF3AFE24514D0E0::Backup Test <backup11@foo.com>:
sub:r:1024:1:097A79EB1F7D4619:1342810387::::::e:
sub:r:1024:1:39093E8E9057625E:1342810404::::::e:
pub:e:1024:1:31920687A8A7941B:1342810629:1342897029::-:::sc:
fpr:::::::::03B399CBC2F4B61019D14BCD31920687A8A7941B:
uid:e::::1342810629::ED8151565B25281CB92DD1E534701E660126CB0C::Backup Test <backup12@foo.com>:
sub:e:1024:1:AEF89BEE95042A0F:1342810629:1342897029:::::e:
pub:-:1024:1:E3DBAEC3FEEA03E2:1342810728:::-:::scSC:
fpr:::::::::444B0870D985CF70BBB7F4DCE3DBAEC3FEEA03E2:
uid:-::::1342810796::4D1B8CC29335BF79232CA71210F75CF80318B06A::Backup Test <backup13@foo.com>:
uid:-::::1342810728::F1422363E8DC1EC3076906505CE66855BB44CAB7::Backup Test <backup14@foo.com>:
sub:e:1024:1:C95DED316504D17C:1342810728:1342897218:::::e:
pub:u:1024:1:027B83DB8A82B9CB:1343402840:::u:::scESC:
fpr:::::::::A20D90150CE4E5F851AD3A9D027B83DB8A82B9CB:
uid:u::::1343402840::307F1E025E8BEB7DABCADC353291184AD493A28E::Backup Test <backup01@foo.com>:
sub:u:1024:1:EF31D36414FD8B2B:1343402840::::::e:
pub:u:1024:1:4CEA6442A4A57A76:1343402867:::u:::scESC:
fpr:::::::::5742EAFB4CF38014B474671E4CEA6442A4A57A76:
uid:u::::1343402932::C220D9FF5C9652AA31D3CE0487D88EFF291FA1ED::Backup Test:
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | true |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/encryptor/open_ssl_spec.rb | spec/encryptor/open_ssl_spec.rb | require "spec_helper"
describe Backup::Encryptor::OpenSSL do
let(:encryptor) do
Backup::Encryptor::OpenSSL.new do |e|
e.password = "mypassword"
e.password_file = "/my/password/file"
e.base64 = true
end
end
it "should be a subclass of Encryptor::Base" do
expect(Backup::Encryptor::OpenSSL
.superclass).to eq(Backup::Encryptor::Base)
end
describe "#initialize" do
after { Backup::Encryptor::OpenSSL.clear_defaults! }
it "should load pre-configured defaults" do
expect_any_instance_of(Backup::Encryptor::OpenSSL).to receive(:load_defaults!)
encryptor
end
context "when no pre-configured defaults have been set" do
it "should use the values given" do
expect(encryptor.password).to eq("mypassword")
expect(encryptor.password_file).to eq("/my/password/file")
expect(encryptor.base64).to eq(true)
end
it "should use default values if none are given" do
encryptor = Backup::Encryptor::OpenSSL.new
expect(encryptor.password).to be_nil
expect(encryptor.password_file).to be_nil
expect(encryptor.base64).to eq(false)
end
end # context 'when no pre-configured defaults have been set'
context "when pre-configured defaults have been set" do
before do
Backup::Encryptor::OpenSSL.defaults do |e|
e.password = "default_password"
e.password_file = "/default/password/file"
e.base64 = "default_base64"
end
end
it "should use pre-configured defaults" do
encryptor = Backup::Encryptor::OpenSSL.new
encryptor.password = "default_password"
encryptor.password_file = "/default/password/file"
encryptor.base64 = "default_base64"
end
it "should override pre-configured defaults" do
expect(encryptor.password).to eq("mypassword")
expect(encryptor.password_file).to eq("/my/password/file")
expect(encryptor.base64).to eq(true)
end
end # context 'when pre-configured defaults have been set'
end # describe '#initialize'
describe "#encrypt_with" do
it "should yield the encryption command and extension" do
expect(encryptor).to receive(:log!)
expect(encryptor).to receive(:utility).with(:openssl).and_return("openssl_cmd")
expect(encryptor).to receive(:options).and_return("cmd_options")
encryptor.encrypt_with do |command, ext|
expect(command).to eq("openssl_cmd cmd_options")
expect(ext).to eq(".enc")
end
end
end
describe "#options" do
let(:encryptor) { Backup::Encryptor::OpenSSL.new }
context "with no options given" do
it "should always include cipher command" do
expect(encryptor.send(:options)).to match(/^aes-256-cbc\s.*$/)
end
it "should add #password option whenever #password_file not given" do
expect(encryptor.send(:options)).to eq(
"aes-256-cbc -pbkdf2 -iter 310000 -k ''"
)
end
end
context "when #password_file is given" do
before { encryptor.password_file = "password_file" }
it "should add #password_file option" do
expect(encryptor.send(:options)).to eq(
"aes-256-cbc -pbkdf2 -iter 310000 -pass file:password_file"
)
end
it "should add #password_file option even when #password given" do
encryptor.password = "password"
expect(encryptor.send(:options)).to eq(
"aes-256-cbc -pbkdf2 -iter 310000 -pass file:password_file"
)
end
end
context "when #password is given (without #password_file given)" do
before { encryptor.password = %q(pa\ss'w"ord) }
it "should include the given password in the #password option" do
expect(encryptor.send(:options)).to eq(
%q(aes-256-cbc -pbkdf2 -iter 310000 -k pa\\\ss\'w\"ord)
)
end
end
context "when #base64 is true" do
before { encryptor.base64 = true }
it "should add the option" do
expect(encryptor.send(:options)).to eq(
"aes-256-cbc -pbkdf2 -iter 310000 -base64 -k ''"
)
end
end
end # describe '#options'
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/spec/encryptor/base_spec.rb | spec/encryptor/base_spec.rb | require "spec_helper"
describe Backup::Encryptor::Base do
let(:base) { Backup::Encryptor::Base.new }
it "should include Utilities::Helpers" do
expect(Backup::Encryptor::Base
.include?(Backup::Utilities::Helpers)).to eq(true)
end
it "should include Config::Helpers" do
expect(Backup::Encryptor::Base
.include?(Backup::Config::Helpers)).to eq(true)
end
describe "#initialize" do
it "should load defaults" do
expect_any_instance_of(Backup::Encryptor::Base).to receive(:load_defaults!)
base
end
end
describe "#encryptor_name" do
it "should return class name with Backup namespace removed" do
expect(base.send(:encryptor_name)).to eq("Encryptor::Base")
end
end
describe "#log!" do
it "should log a message" do
expect(base).to receive(:encryptor_name).and_return("Encryptor Name")
expect(Backup::Logger).to receive(:info).with(
"Using Encryptor Name to encrypt the archive."
)
base.send(:log!)
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/lib/backup.rb | lib/backup.rb | # Load Ruby Core Libraries
require "time"
require "fileutils"
require "tempfile"
require "syslog"
require "yaml"
require "etc"
require "forwardable"
require "thread"
require "open4"
require "thor"
require "shellwords"
require "excon"
# Include response.inspect in error messages.
Excon.defaults[:debug_response] = true
# Excon should not retry failed requests. We handle that.
Excon.defaults[:middlewares].delete(Excon::Middleware::Idempotent)
##
# The Backup Ruby Gem
module Backup
##
# Backup's internal paths
LIBRARY_PATH = File.join(File.dirname(__FILE__), "backup")
STORAGE_PATH = File.join(LIBRARY_PATH, "storage")
SYNCER_PATH = File.join(LIBRARY_PATH, "syncer")
DATABASE_PATH = File.join(LIBRARY_PATH, "database")
COMPRESSOR_PATH = File.join(LIBRARY_PATH, "compressor")
ENCRYPTOR_PATH = File.join(LIBRARY_PATH, "encryptor")
NOTIFIER_PATH = File.join(LIBRARY_PATH, "notifier")
TEMPLATE_PATH = File.expand_path("../../templates", __FILE__)
##
# Autoload Backup storage files
module Storage
autoload :Base, File.join(STORAGE_PATH, "base")
autoload :Cycler, File.join(STORAGE_PATH, "cycler")
autoload :S3, File.join(STORAGE_PATH, "s3")
autoload :CloudFiles, File.join(STORAGE_PATH, "cloud_files")
autoload :Ninefold, File.join(STORAGE_PATH, "ninefold")
autoload :Dropbox, File.join(STORAGE_PATH, "dropbox")
autoload :FTP, File.join(STORAGE_PATH, "ftp")
autoload :SFTP, File.join(STORAGE_PATH, "sftp")
autoload :SCP, File.join(STORAGE_PATH, "scp")
autoload :RSync, File.join(STORAGE_PATH, "rsync")
autoload :Local, File.join(STORAGE_PATH, "local")
autoload :Qiniu, File.join(STORAGE_PATH, "qiniu")
end
##
# Autoload Backup syncer files
module Syncer
autoload :Base, File.join(SYNCER_PATH, "base")
module Cloud
autoload :Base, File.join(SYNCER_PATH, "cloud", "base")
autoload :LocalFile, File.join(SYNCER_PATH, "cloud", "local_file")
autoload :CloudFiles, File.join(SYNCER_PATH, "cloud", "cloud_files")
autoload :S3, File.join(SYNCER_PATH, "cloud", "s3")
end
module RSync
autoload :Base, File.join(SYNCER_PATH, "rsync", "base")
autoload :Local, File.join(SYNCER_PATH, "rsync", "local")
autoload :Push, File.join(SYNCER_PATH, "rsync", "push")
autoload :Pull, File.join(SYNCER_PATH, "rsync", "pull")
end
end
##
# Autoload Backup database files
module Database
autoload :Base, File.join(DATABASE_PATH, "base")
autoload :MySQL, File.join(DATABASE_PATH, "mysql")
autoload :PostgreSQL, File.join(DATABASE_PATH, "postgresql")
autoload :MongoDB, File.join(DATABASE_PATH, "mongodb")
autoload :Redis, File.join(DATABASE_PATH, "redis")
autoload :Riak, File.join(DATABASE_PATH, "riak")
autoload :OpenLDAP, File.join(DATABASE_PATH, "openldap")
autoload :SQLite, File.join(DATABASE_PATH, "sqlite")
end
##
# Autoload compressor files
module Compressor
autoload :Base, File.join(COMPRESSOR_PATH, "base")
autoload :Gzip, File.join(COMPRESSOR_PATH, "gzip")
autoload :Bzip2, File.join(COMPRESSOR_PATH, "bzip2")
autoload :Custom, File.join(COMPRESSOR_PATH, "custom")
end
##
# Autoload encryptor files
module Encryptor
autoload :Base, File.join(ENCRYPTOR_PATH, "base")
autoload :OpenSSL, File.join(ENCRYPTOR_PATH, "open_ssl")
autoload :GPG, File.join(ENCRYPTOR_PATH, "gpg")
end
##
# Autoload notification files
module Notifier
autoload :Base, File.join(NOTIFIER_PATH, "base")
autoload :Mail, File.join(NOTIFIER_PATH, "mail")
autoload :Twitter, File.join(NOTIFIER_PATH, "twitter")
autoload :Campfire, File.join(NOTIFIER_PATH, "campfire")
autoload :Prowl, File.join(NOTIFIER_PATH, "prowl")
autoload :Hipchat, File.join(NOTIFIER_PATH, "hipchat")
autoload :PagerDuty, File.join(NOTIFIER_PATH, "pagerduty")
autoload :Pushover, File.join(NOTIFIER_PATH, "pushover")
autoload :Slack, File.join(NOTIFIER_PATH, "slack")
autoload :HttpPost, File.join(NOTIFIER_PATH, "http_post")
autoload :Nagios, File.join(NOTIFIER_PATH, "nagios")
autoload :FlowDock, File.join(NOTIFIER_PATH, "flowdock")
autoload :Zabbix, File.join(NOTIFIER_PATH, "zabbix")
autoload :DataDog, File.join(NOTIFIER_PATH, "datadog")
autoload :Ses, File.join(NOTIFIER_PATH, "ses")
autoload :Command, File.join(NOTIFIER_PATH, "command")
end
##
# Require Backup base files
%w[
errors
logger
utilities
archive
binder
cleaner
model
config
cli
package
packager
pipeline
splitter
template
version
].each { |lib| require File.join(LIBRARY_PATH, lib) }
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/lib/backup/packager.rb | lib/backup/packager.rb | module Backup
module Packager
class Error < Backup::Error; end
class << self
include Utilities::Helpers
##
# Build the final package for the backup model.
def package!(model)
@package = model.package
@encryptor = model.encryptor
@splitter = model.splitter
@pipeline = Pipeline.new
Logger.info "Packaging the backup files..."
procedure.call
if @pipeline.success?
Logger.info "Packaging Complete!"
else
raise Error, "Failed to Create Backup Package\n" +
@pipeline.error_messages
end
end
private
##
# Builds a chain of nested Procs which adds each command to a Pipeline
# needed to package the final command to package the backup.
# This is done so that the Encryptor and Splitter have the ability
# to perform actions before and after the final command is executed.
# No Encryptors currently utilize this, however the Splitter does.
def procedure
stack = []
##
# Initial `tar` command to package the temporary backup folder.
# The command's output will then be either piped to the Encryptor
# or the Splitter (if no Encryptor), or through `cat` into the final
# output file if neither are configured.
@pipeline.add(
"#{utility(:tar)} -cf - " \
"-C '#{Config.tmp_path}' '#{@package.trigger}'",
tar_success_codes
)
##
# If an Encryptor was configured, it will be called first
# to add the encryption utility command to be piped through,
# and amend the final package extension.
# It's output will then be either piped into a Splitter,
# or through `cat` into the final output file.
if @encryptor
stack << lambda do
@encryptor.encrypt_with do |command, ext|
@pipeline << command
@package.extension << ext
stack.shift.call
end
end
end
##
# If a Splitter was configured, the `split` utility command will be
# added to the Pipeline to split the final output into multiple files.
# Once the Proc executing the Pipeline has completed and returns back
# to the Splitter, it will check the final output files to determine
# if the backup was indeed split.
# If so, it will set the package's chunk_suffixes. If not, it will
# remove the '-aa' suffix from the only file created by `split`.
#
# If no Splitter was configured, the final file output will be
# piped through `cat` into the final output file.
stack <<
if @splitter
lambda do
@splitter.split_with do |command|
@pipeline << command
stack.shift.call
end
end
else
lambda do
outfile = File.join(Config.tmp_path, @package.basename)
@pipeline << "#{utility(:cat)} > #{outfile}"
stack.shift.call
end
end
##
# Last Proc to be called runs the Pipeline the procedure built.
# Once complete, the call stack will unwind back through the
# preceeding Procs in the stack (if any)
stack << -> { @pipeline.run }
stack.shift
end
def tar_success_codes
gnu_tar? ? [0, 1] : [0]
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/lib/backup/pipeline.rb | lib/backup/pipeline.rb | module Backup
class Pipeline
class Error < Backup::Error; end
include Utilities::Helpers
attr_reader :stderr, :errors
def initialize
@commands = []
@success_codes = []
@errors = []
@stderr = ""
end
##
# Adds a command to be executed in the pipeline.
# Each command will be run in the order in which it was added,
# with it's output being piped to the next command.
#
# +success_codes+ must be an Array of Integer exit codes that will
# be considered successful for the +command+.
def add(command, success_codes)
@commands << command
@success_codes << success_codes
end
##
# Commands added using this method will only be considered successful
# if their exit status is 0.
#
# Use #add if successful exit status codes need to be specified.
def <<(command)
add(command, [0])
end
##
# Runs the command line from `#pipeline` and collects STDOUT/STDERR.
# STDOUT is then parsed to determine the exit status of each command.
# For each command with a non-zero exit status, a SystemCallError is
# created and added to @errors. All STDERR output is set in @stderr.
#
# Note that there is no accumulated STDOUT from the commands themselves.
# Also, the last command should not attempt to write to STDOUT.
# Any output on STDOUT from the final command will be sent to STDERR.
# This in itself will not cause #run to fail, but will log warnings
# when all commands exit with non-zero status.
#
# Use `#success?` to determine if all commands in the pipeline succeeded.
# If `#success?` returns `false`, use `#error_messages` to get an error report.
def run
Open4.popen4(pipeline) do |_pid, _stdin, stdout, stderr|
pipestatus = stdout.read.delete("\n").split(":").sort
pipestatus.each do |status|
index, exitstatus = status.split("|").map(&:to_i)
next if @success_codes[index].include?(exitstatus)
command = command_name(@commands[index])
@errors << SystemCallError.new(
"'#{command}' returned exit code: #{exitstatus}", exitstatus
)
end
@stderr = stderr.read.strip
end
Logger.warn(stderr_messages) if success? && stderr_messages
rescue Exception => err
raise Error.wrap(err, "Pipeline failed to execute")
end
def success?
@errors.empty?
end
##
# Returns a multi-line String, reporting all STDERR messages received
# from the commands in the pipeline (if any), along with the SystemCallError
# (Errno) message for each command which had a non-zero exit status.
def error_messages
@error_messages ||= (stderr_messages || "") +
"The following system errors were returned:\n" +
@errors.map { |err| "#{err.class}: #{err.message}" }.join("\n")
end
private
##
# Each command is added as part of the pipeline, grouped with an `echo`
# command to pass along the command's index in @commands and it's exit status.
# The command's STDERR is redirected to FD#4, and the `echo` command to
# report the "index|exit status" is redirected to FD#3.
# Each command's STDOUT will be connected to the STDIN of the next subshell.
# The entire pipeline is run within a container group, which redirects
# FD#3 to STDOUT and FD#4 to STDERR so these can be collected.
# FD#1 is redirected to STDERR so that any output from the final command
# on STDOUT will generate warnings, since the final command should not
# attempt to write to STDOUT, as this would interfere with collecting
# the exit statuses.
#
# There is no guarantee as to the order of this output, which is why the
# command's index in @commands is passed along with it's exit status.
# And, if multiple commands output messages on STDERR, those messages
# may be interleaved. Interleaving of the "index|exit status" outputs
# should not be an issue, given the small byte size of the data being written.
def pipeline
parts = []
@commands.each_with_index do |command, index|
parts << %({ #{command} 2>&4 ; echo "#{index}|$?:" >&3 ; })
end
%({ #{parts.join(" | ")} } 3>&1 1>&2 4>&2)
end
def stderr_messages
@stderr_messages ||= @stderr.empty? ? false : <<-EOS.gsub(/^ +/, " ")
Pipeline STDERR Messages:
(Note: may be interleaved if multiple commands returned error messages)
#{@stderr}
EOS
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/lib/backup/version.rb | lib/backup/version.rb | module Backup
VERSION = "5.0.0.beta.3"
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/lib/backup/logger.rb | lib/backup/logger.rb | require "backup/logger/console"
require "backup/logger/logfile"
require "backup/logger/syslog"
require "backup/logger/fog_adapter"
module Backup
class Logger
class Config
class Logger < Struct.new(:class, :options)
def enabled?
options.enabled?
end
end
class DSL < Struct.new(:ignores, :console, :logfile, :syslog)
def ignore_warning(str_or_regexp)
ignores << str_or_regexp
end
end
attr_reader :ignores, :loggers, :dsl
def initialize
@ignores = []
@loggers = [
Logger.new(Console, Console::Options.new),
Logger.new(Logfile, Logfile::Options.new),
Logger.new(Syslog, Syslog::Options.new)
]
@dsl = DSL.new(ignores, *loggers.map(&:options))
end
end
##
# All messages sent to the Logger are stored in Logger.messages
# and sent to all enabled logger's #log method as Message objects.
class Message < Struct.new(:time, :level, :lines)
##
# Returns an Array of the message lines in the following format:
#
# [YYYY/MM/DD HH:MM:SS][level] message line text
def formatted_lines
timestamp = time.strftime("%Y/%m/%d %H:%M:%S")
lines.map { |line| "[#{timestamp}][#{level}] #{line}" }
end
def matches?(ignores)
text = lines.join("\n")
ignores.any? do |obj|
obj.is_a?(Regexp) ? text.match(obj) : text.include?(obj)
end
end
end
class << self
extend Forwardable
def_delegators :logger,
:start!, :abort!, :info, :warn, :error,
:messages, :has_warnings?, :has_errors?
##
# Allows the Logger to be configured.
#
# # shown with their default values
# Backup::Logger.configure do
# # Console options:
# console.quiet = false
#
# # Logfile options:
# logfile.enabled = true
# logfile.log_path = 'log'
# logfile.max_bytes = 500_000
#
# # Syslog options:
# syslog.enabled = false
# syslog.ident = 'backup'
# syslog.options = Syslog::LOG_PID
# syslog.facility = Syslog::LOG_LOCAL0
# syslog.info = Syslog::LOG_INFO
# syslog.warn = Syslog::LOG_WARNING
# syslog.error = Syslog::LOG_ERR
#
# # Ignore Warnings:
# # Converts :warn level messages to level :info
# ignore_warning 'that contains this string'
# ignore_warning /that matches this regexp/
# end
#
# See each Logger's Option class for details.
# @see Console::Options
# @see Logfile::Options
# @see Syslog::Options
def configure(&block)
config.dsl.instance_eval(&block)
end
##
# Called after each backup model/trigger has been performed.
def clear!
@logger = nil
logger.start!
end
private
def config
@config ||= Config.new
end
def logger
@logger ||= new(config)
end
def reset!
@config = @logger = nil
end
end
MUTEX = Mutex.new
##
# Returns an Array of Message objects for all logged messages received.
# These are used to attach log files to Mail notifications.
attr_reader :messages
def initialize(config)
@config = config
@messages = []
@loggers = []
@has_warnings = @has_errors = false
end
##
# Sends a message to the Logger using the specified log level.
# +obj+ may be any Object that responds to #to_s (i.e. an Exception)
[:info, :warn, :error].each do |level|
define_method level do |obj|
MUTEX.synchronize { log(obj, level) }
end
end
##
# Returns true if any +:warn+ level messages have been received.
def has_warnings?
@has_warnings
end
##
# Returns true if any +:error+ level messages have been received.
def has_errors?
@has_errors
end
##
# The Logger is available as soon as Backup is loaded, and stores all
# messages it receives. Since the Logger may be configured via the
# command line and/or the user's +config.rb+, no messages are sent
# until configuration can be completed. (see CLI#perform)
#
# Once configuration is completed, this method is called to activate
# all enabled loggers and send them any messages that have been received
# up to this point. From this point onward, these loggers will be sent
# all messages as soon as they're received.
def start!
@config.loggers.each do |logger|
@loggers << logger.class.new(logger.options) if logger.enabled?
end
messages.each do |message|
@loggers.each { |logger| logger.log(message) }
end
end
##
# If errors are encountered by Backup::CLI while preparing to perform
# the backup jobs, this method is called to dump all messages to the
# console before Backup exits.
def abort!
console = Console.new
console.log(messages.shift) until messages.empty?
end
private
def log(obj, level)
message = Message.new(Time.now.utc, level, obj.to_s.split("\n"))
if message.level == :warn && message.matches?(@config.ignores)
message.level = :info
end
@has_warnings ||= message.level == :warn
@has_errors ||= message.level == :error
messages << message
@loggers.each { |logger| logger.log(message) }
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/lib/backup/errors.rb | lib/backup/errors.rb | module Backup
# Provides cascading errors with formatted messages.
# See the specs for details.
module NestedExceptions
def self.included(klass)
klass.extend(Module.new do
def wrap(wrapped_exception, msg = nil)
new(msg, wrapped_exception)
end
end)
end
def initialize(obj = nil, wrapped_exception = nil)
@wrapped_exception = wrapped_exception
msg = (obj.respond_to?(:to_str) ? obj.to_str : obj.to_s)
.gsub(/^ */, " ").strip
msg = clean_name(self.class.name) + (msg.empty? ? "" : ": #{msg}")
if wrapped_exception
msg << "\n--- Wrapped Exception ---\n"
class_name = clean_name(wrapped_exception.class.name)
msg << class_name + ": " unless
wrapped_exception.message.start_with? class_name
msg << wrapped_exception.message
end
super(msg)
set_backtrace(wrapped_exception.backtrace) if wrapped_exception
end
def exception(obj = nil)
return self if obj.nil? || equal?(obj)
ex = self.class.new(obj, @wrapped_exception)
ex.set_backtrace(backtrace) unless ex.backtrace
ex
end
private
def clean_name(name)
name.sub(/^Backup::/, "")
end
end
class Error < StandardError
include NestedExceptions
end
class FatalError < Exception
include NestedExceptions
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/lib/backup/package.rb | lib/backup/package.rb | module Backup
class Package
##
# The time when the backup initiated (in format: 2011.02.20.03.29.59)
attr_accessor :time
##
# The trigger which initiated the backup process
attr_reader :trigger
##
# Extension for the final archive file(s)
attr_accessor :extension
##
# Set by the Splitter if the final archive was "chunked"
attr_accessor :chunk_suffixes
##
# If true, the Cycler will not attempt to remove the package when Cycling.
attr_accessor :no_cycle
##
# The version of Backup used to create the package
attr_reader :version
def initialize(model)
@trigger = model.trigger
@extension = "tar"
@chunk_suffixes = []
@no_cycle = false
@version = VERSION
end
def filenames
if chunk_suffixes.empty?
[basename]
else
chunk_suffixes.map { |suffix| "#{basename}-#{suffix}" }
end
end
def basename
"#{trigger}.#{extension}"
end
def time_as_object
Time.strptime(time, "%Y.%m.%d.%H.%M.%S")
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/lib/backup/utilities.rb | lib/backup/utilities.rb | module Backup
module Utilities
class Error < Backup::Error; end
UTILITIES_NAMES = %w[
tar cat split sudo chown hostname
gzip bzip2
mongo mongodump mysqldump innobackupex
pg_dump pg_dumpall redis-cli riak-admin
gpg openssl
rsync ssh
sendmail exim
send_nsca
zabbix_sender
].freeze
# @api private
class DSL
def initialize(utils)
@utilities = utils
end
# Helper methods to allow users to set the path for all utilities in the
# .configure block.
#
# Utility names with dashes (`redis-cli`) will be set using method calls
# with an underscore (`redis_cli`).
UTILITIES_NAMES.each do |util_name|
define_method util_name.tr("-", "_") do |raw_path|
path = File.expand_path(raw_path)
unless File.executable?(path)
raise Utilities::Error, <<-EOS
The path given for '#{util_name}' was not found or not executable.
Path was: #{path}
EOS
end
@utilities.utilities[util_name] = path
end
end
# Allow users to set the +tar+ distribution if needed. (:gnu or :bsd)
def tar_dist(val)
Utilities.tar_dist(val)
end
end
class << self
##
# Configure the path to system utilities used by Backup.
#
# Backup will attempt to locate any required system utilities using a
# +which+ command call. If a utility can not be found, or you need to
# specify an alternate path for a utility, you may do so in your
# +config.rb+ file using this method.
#
# Backup supports both GNU and BSD utilities.
# While Backup uses these utilities in a manner compatible with either
# version, the +tar+ utility requires some special handling with respect
# to +Archive+s. Backup will attempt to detect if the +tar+ command
# found (or set here) is GNU or BSD. If for some reason this fails,
# this may be set using the +tar_dist+ command shown below.
#
# Backup::Utilities.configure do
# # General Utilites
# tar '/path/to/tar'
# tar_dist :gnu # or :bsd
# cat '/path/to/cat'
# split '/path/to/split'
# sudo '/path/to/sudo'
# chown '/path/to/chown'
# hostname '/path/to/hostname'
#
# # Compressors
# gzip '/path/to/gzip'
# bzip2 '/path/to/bzip2'
#
# # Database Utilities
# mongo '/path/to/mongo'
# mongodump '/path/to/mongodump'
# mysqldump '/path/to/mysqldump'
# pg_dump '/path/to/pg_dump'
# pg_dumpall '/path/to/pg_dumpall'
# redis_cli '/path/to/redis-cli'
# riak_admin '/path/to/riak-admin'
#
# # Encryptors
# gpg '/path/to/gpg'
# openssl '/path/to/openssl'
#
# # Syncer and Storage
# rsync '/path/to/rsync'
# ssh '/path/to/ssh'
#
# # Notifiers
# sendmail '/path/to/sendmail'
# exim '/path/to/exim'
# send_nsca '/path/to/send_nsca'
# zabbix_sender '/path/to/zabbix_sender'
# end
#
# These paths may be set using absolute paths, or relative to the
# working directory when Backup is run.
def configure(&block)
DSL.new(self).instance_eval(&block)
end
def tar_dist(val)
# the acceptance tests need to be able to reset this to nil
@gnu_tar = val.nil? ? nil : val == :gnu
end
def gnu_tar?
return @gnu_tar unless @gnu_tar.nil?
@gnu_tar = !!run("#{utility(:tar)} --version").match(/GNU/)
end
def utilities
@utilities ||= {}
end
private
##
# Returns the full path to the specified utility.
# Raises an error if utility can not be found in the system's $PATH
def utility(name)
name = name.to_s.strip
raise Error, "Utility Name Empty" if name.empty?
utilities[name] ||= `which '#{name}' 2>/dev/null`.chomp
raise Error, <<-EOS if utilities[name].empty?
Could not locate '#{name}'.
Make sure the specified utility is installed
and available in your system's $PATH, or specify it's location
in your 'config.rb' file using Backup::Utilities.configure
EOS
utilities[name].dup
end
##
# Returns the name of the command name from the given command line.
# This is only used to simplify log messages.
def command_name(command)
parts = []
command = command.split(" ")
command.shift while command[0].to_s.include?("=")
parts << command.shift.split("/")[-1]
if parts[0] == "sudo"
until command.empty?
part = command.shift
if part.include?("/")
parts << part.split("/")[-1]
break
else
parts << part
end
end
end
parts.join(" ")
end
##
# Runs a system command
#
# All messages generated by the command will be logged.
# Messages on STDERR will be logged as warnings.
#
# If the command fails to execute, or returns a non-zero exit status
# an Error will be raised.
#
# Returns STDOUT
def run(command)
name = command_name(command)
Logger.info "Running system utility '#{name}'..."
begin
out = ""
err = ""
ps = Open4.popen4(command) do |_pid, stdin, stdout, stderr|
stdin.close
out = stdout.read.strip
err = stderr.read.strip
end
rescue Exception => e
raise Error.wrap(e, "Failed to execute '#{name}'")
end
unless ps.success?
raise Error, <<-EOS
'#{name}' failed with exit status: #{ps.exitstatus}
STDOUT Messages: #{out.empty? ? "None" : "\n#{out}"}
STDERR Messages: #{err.empty? ? "None" : "\n#{err}"}
EOS
end
unless out.empty?
Logger.info(out.lines.map { |line| "#{name}:STDOUT: #{line}" }.join)
end
unless err.empty?
Logger.warn(err.lines.map { |line| "#{name}:STDERR: #{line}" }.join)
end
out
end
def reset!
utilities.clear
@gnu_tar = nil
end
end
# Allows these utility methods to be included in other classes,
# while allowing them to be stubbed in spec_helper for all specs.
module Helpers
[:utility, :command_name, :run].each do |name|
define_method name do |arg|
Utilities.send(name, arg)
end
private name
end
private
def gnu_tar?
Utilities.gnu_tar?
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/lib/backup/splitter.rb | lib/backup/splitter.rb | module Backup
class Splitter
include Utilities::Helpers
attr_reader :package, :chunk_size, :suffix_length
def initialize(model, chunk_size, suffix_length)
@package = model.package
@chunk_size = chunk_size
@suffix_length = suffix_length
end
##
# This is called as part of the procedure used to build the final
# backup package file(s). It yields it's portion of the command line
# for this procedure, which will split the data being piped into it
# into multiple files, based on the @chunk_size, using a suffix length as
# specified by @suffix_length.
# Once the packaging procedure is complete, it will return and
# @package.chunk_suffixes will be set based on the resulting files.
def split_with
Logger.info "Splitter configured with a chunk size of #{chunk_size}MB " \
"and suffix length of #{suffix_length}."
yield split_command
after_packaging
end
private
##
# The `split` command reads from $stdin and will store it's output in
# multiple files, based on @chunk_size and @suffix_length, using the full
# path to the final @package.basename, plus a '-' separator as the `prefix`.
def split_command
"#{utility(:split)} -a #{suffix_length} -b #{chunk_size}m - " \
"'#{File.join(Config.tmp_path, package.basename + "-")}'"
end
##
# Finds the resulting files from the packaging procedure
# and stores an Array of suffixes used in @package.chunk_suffixes.
# If the @chunk_size was never reached and only one file
# was written, that file will be suffixed with '-aa' (or -a; -aaa; etc
# depending upon suffix_length). In which case, it will simply
# remove the suffix from the filename.
def after_packaging
suffixes = chunk_suffixes
first_suffix = "a" * suffix_length
if suffixes == [first_suffix]
FileUtils.mv(
File.join(Config.tmp_path, "#{package.basename}-#{first_suffix}"),
File.join(Config.tmp_path, package.basename)
)
else
package.chunk_suffixes = suffixes
end
end
##
# Returns an array of suffixes for each chunk, in alphabetical order.
# For example: [aa, ab, ac, ad, ae] or [aaa, aab, aac aad]
def chunk_suffixes
chunks.map { |chunk| File.extname(chunk).split("-").last }.sort
end
##
# Returns an array of full paths to the backup chunks.
# Chunks are sorted in alphabetical order.
def chunks
Dir[File.join(Config.tmp_path, package.basename + "-*")].sort
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/lib/backup/cleaner.rb | lib/backup/cleaner.rb | module Backup
module Cleaner
class Error < Backup::Error; end
class << self
##
# Logs warnings if any temporary files still exist
# from the last time this model/trigger was run,
# then removes the files.
def prepare(model)
messages = []
packaging_folder = File.join(Config.tmp_path, model.trigger)
if File.exist?(packaging_folder)
messages << <<-EOS
The temporary packaging folder still exists!
'#{packaging_folder}'
It will now be removed.
EOS
FileUtils.rm_rf(packaging_folder)
end
package_files = package_files_for(model.trigger)
unless package_files.empty?
# the chances of the packaging folder AND
# the package files existing are practically nil
messages << ("-" * 74) unless messages.empty?
messages << <<-EOS
The temporary backup folder '#{Config.tmp_path}'
appears to contain the package files from the previous backup!
#{package_files.join("\n")}
These files will now be removed.
EOS
package_files.each { |file| FileUtils.rm_f(file) }
end
unless messages.empty?
Logger.warn Error.new(<<-EOS)
Cleanup Warning
#{messages.join("\n")}
Please check the log for messages and/or your notifications
concerning this backup: '#{model.label} (#{model.trigger})'
The temporary files which had to be removed should not have existed.
EOS
end
end
##
# Remove the temporary folder used during packaging
def remove_packaging(model)
Logger.info "Cleaning up the temporary files..."
FileUtils.rm_rf(File.join(Config.tmp_path, model.trigger))
end
##
# Remove the final package files from tmp_path
# Note: 'force' is used, since a Local Storage may *move* these files.
def remove_package(package)
Logger.info "Cleaning up the package files..."
package.filenames.each do |file|
FileUtils.rm_f(File.join(Config.tmp_path, file))
end
end
##
# Logs warnings if any temporary files still exist
# when errors occur during the backup
def warnings(model)
messages = []
packaging_folder = File.join(Config.tmp_path, model.trigger)
if File.exist?(packaging_folder)
messages << <<-EOS
The temporary packaging folder still exists!
'#{packaging_folder}'
This folder may contain completed Archives and/or Database backups.
EOS
end
package_files = package_files_for(model.trigger)
unless package_files.empty?
# the chances of the packaging folder AND
# the package files existing are practically nil
messages << ("-" * 74) unless messages.empty?
messages << <<-EOS
The temporary backup folder '#{Config.tmp_path}'
appears to contain the backup files which were to be stored:
#{package_files.join("\n")}
EOS
end
unless messages.empty?
Logger.warn Error.new(<<-EOS)
Cleanup Warning
#{messages.join("\n")}
Make sure you check these files before the next scheduled backup for
'#{model.label} (#{model.trigger})'
These files will be removed at that time!
EOS
end
end
private
def package_files_for(trigger)
Dir[File.join(Config.tmp_path, "#{trigger}.tar{,[.-]*}")]
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/lib/backup/template.rb | lib/backup/template.rb | require "erb"
module Backup
class Template
# Holds a binding object. Nil if not provided.
attr_accessor :binding
##
# Creates a new instance of the Backup::Template class
# and optionally takes an argument that can be either a binding object, a Hash or nil
def initialize(object = nil)
@binding =
if object.is_a?(Binding)
object
elsif object.is_a?(Hash)
Backup::Binder.new(object).get_binding
end
end
##
# Renders the provided file (in the context of the binding if any) to the console
def render(file)
puts result(file)
end
##
# Returns a String object containing the contents of the file (in the context of the binding if any)
def result(file)
if Gem::Version.new(RUBY_VERSION) >= Gem::Version.new("3.0.0")
ERB.new(file_contents(file), trim_mode: "<>")
else
ERB.new(file_contents(file), nil, "<>")
end.result(binding)
end
private
##
# Reads and returns the contents of the provided file path,
# relative from the Backup::TEMPLATE_PATH
def file_contents(file)
File.read(File.join(Backup::TEMPLATE_PATH, file))
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/lib/backup/cli.rb | lib/backup/cli.rb | ##
# Build the Backup Command Line Interface using Thor
module Backup
class CLI < Thor
class Error < Backup::Error; end
class FatalError < Backup::FatalError; end
##
# [Perform]
#
# The only required option is the --trigger [-t].
# If --config-file, --data-path, --tmp-path or --log-path
# aren't specified they will fallback to defaults.
# If --root-path is given, it will be used as the base path for our defaults,
# as well as the base path for any option specified as a relative path.
# Any option given as an absolute path will be used "as-is".
#
# This command will exit with one of the following status codes:
#
# 0: All triggers were successful and no warnings were issued.
# 1: All triggers were successful, but some had warnings.
# 2: All triggers were processed, but some failed.
# 3: A fatal error caused Backup to exit.
# Some triggers may not have been processed.
#
# If the --check option is given, `backup check` will be run
# and no triggers will be performed.
desc "perform", "Performs the backup for the specified trigger(s)."
long_desc <<-EOS.gsub(/^ +/, "")
Performs the backup for the specified trigger(s).
You may perform multiple backups by providing multiple triggers,
separated by commas. Each will run in the order specified.
$ backup perform --triggers backup1,backup2,backup3,backup4
--root-path may be an absolute path or relative to the current directory.
To use the current directory, use: `--root-path .`
Relative paths given for --config-file, --data-path, --tmp-path,
and --log-path will be relative to --root-path.
Console log output may be forced using --no-quiet.
Logging to file or syslog may be disabled using --no-logfile or --no-syslog
respectively. This will override logging options set in `config.rb`.
EOS
method_option :trigger,
aliases: ["-t", "--triggers"],
required: true,
type: :string,
desc: "Triggers to perform. e.g. 'trigger_a,trigger_b'"
method_option :config_file,
aliases: "-c",
type: :string,
default: "",
desc: "Path to your config.rb file."
method_option :root_path,
aliases: "-r",
type: :string,
default: "",
desc: "Root path to base all relative path on."
method_option :data_path,
aliases: "-d",
type: :string,
default: "",
desc: "Path to store storage cycling data."
method_option :log_path,
aliases: "-l",
type: :string,
default: "",
desc: "Path to store Backup's log file."
method_option :tmp_path,
type: :string,
default: "",
desc: "Path to store temporary data during the backup."
# Note that :quiet, :syslog and :logfile are specified as :string types,
# so the --no-<option> usage will set the value to nil instead of false.
method_option :quiet,
aliases: "-q",
type: :boolean,
default: false,
banner: "",
desc: "Disable console log output."
method_option :syslog,
type: :boolean,
default: false,
banner: "",
desc: "Enable logging to syslog."
method_option :logfile,
type: :boolean,
default: true,
banner: "",
desc: "Enable Backup's log file."
method_option :check,
type: :boolean,
default: false,
desc: "Check configuration for errors or warnings."
def perform
check if options[:check] # this will exit()
models = nil
begin
# Set logger options
opts = options
Logger.configure do
console.quiet = opts[:quiet]
logfile.enabled = opts[:logfile]
logfile.log_path = opts[:log_path]
syslog.enabled = opts[:syslog]
end
# Load the user's +config.rb+ file and all their Models
Config.load(options)
# Identify all Models to be run for the given +triggers+.
triggers = options[:trigger].split(",").map(&:strip)
models = triggers.uniq.flat_map do |trigger|
Model.find_by_trigger(trigger)
end
if models.empty?
raise Error, "No Models found for trigger(s) " \
"'#{triggers.join(",")}'."
end
# Finalize Logger and begin real-time logging.
Logger.start!
rescue Exception => err
Logger.error Error.wrap(err)
unless Helpers.is_backup_error? err
Logger.error err.backtrace.join("\n")
end
# Logger configuration will be ignored
# and messages will be output to the console only.
Logger.abort!
exit 3
end
until models.empty?
model = models.shift
model.perform!
case model.exit_status
when 1
warnings = true
when 2
errors = true
unless models.empty?
Logger.info Error.new(<<-EOS)
Backup will now continue...
The following triggers will now be processed:
(#{models.map(&:trigger).join(", ")})
EOS
end
when 3
fatal = true
unless models.empty?
Logger.error FatalError.new(<<-EOS)
Backup will now exit.
The following triggers will not be processed:
(#{models.map(&:trigger).join(", ")})
EOS
end
end
model.notifiers.each(&:perform!)
exit(3) if fatal
Logger.clear!
end
exit(errors ? 2 : 1) if errors || warnings
end
##
# [Check]
#
# Loads the user's `config.rb` (and all Model files) and reports any Errors
# or Warnings. This is primarily for checking for syntax errors, missing
# dependencies and deprecation warnings.
#
# This may also be invoked using the `--check` option to `backup perform`.
#
# This command only requires `Config.config_file` to be correct.
# All other Config paths are irrelevant.
#
# All output will be sent to the console only.
# Logger options will be ignored.
#
# If successful, this method with exit(0).
# If there are Errors or Warnings, it will exit(1).
desc "check", "Check for configuration errors or warnings"
long_desc <<-EOS.gsub(/^ +/, "")
Loads your 'config.rb' file and all models and reports any
errors or warnings with your configuration, including missing
dependencies and the use of any deprecated settings.
EOS
method_option :config_file,
aliases: "-c",
type: :string,
default: "",
desc: "Path to your config.rb file."
def check
begin
Config.load(options)
rescue Exception => err
Logger.error Error.wrap(err)
unless Helpers.is_backup_error? err
Logger.error err.backtrace.join("\n")
end
end
if Logger.has_warnings? || Logger.has_errors?
Logger.error "Configuration Check Failed."
exit_code = 1
else
Logger.info "Configuration Check Succeeded."
exit_code = 0
end
Logger.abort!
exit(exit_code)
end
##
# [Generate:Model]
# Generates a model configuration file based on the arguments passed in.
# For example:
# $ backup generate:model --trigger my_backup --databases='mongodb'
# will generate a pre-populated model with a base MongoDB setup
desc "generate:model", "Generates a Backup model file."
long_desc <<-EOS.gsub(/^ +/, "")
Generates a Backup model file.
If your configuration file is not in the default location at
#{Config.config_file}
you must specify it's location using '--config-file'.
If no configuration file exists at this location, one will be created.
The model file will be created as '<config_path>/models/<trigger>.rb'
Your model file will be created in a 'models/' sub-directory
where your config file is located. The default location would be:
#{Config.root_path}/models/<trigger>.rb
EOS
method_option :trigger,
aliases: "-t",
required: true,
type: :string,
desc: "Trigger name for the Backup model"
method_option :config_file,
type: :string,
desc: "Path to your Backup configuration file"
# options with their available values
%w[databases storages syncers encryptor compressor notifiers].each do |name|
path = File.join(Backup::TEMPLATE_PATH, "cli", name)
opts = Dir[path + "/*"].sort.map { |p| File.basename(p) }.join(", ")
method_option name, type: :string, desc: "(#{opts})"
end
method_option :archives,
type: :boolean,
desc: "Model will include tar archives."
method_option :splitter,
type: :boolean,
default: false,
desc: "Add Splitter to the model"
define_method "generate:model" do
opts = options.merge(trigger: options[:trigger].gsub(/\W/, "_"))
config_file = opts[:config_file] ?
File.expand_path(opts.delete(:config_file)) : Config.config_file
models_path = File.join(File.dirname(config_file), "models")
model_file = File.join(models_path, "#{opts[:trigger]}.rb")
unless File.exist?(config_file)
invoke "generate:config", [], config_file: config_file
end
FileUtils.mkdir_p(models_path)
if Helpers.overwrite?(model_file)
File.open(model_file, "w") do |file|
file.write(Backup::Template.new(options: opts).result("cli/model"))
end
puts "Generated model file: '#{model_file}'."
end
end
##
# [Generate:Config]
# Generates the main configuration file
desc "generate:config", "Generates the main Backup configuration file"
long_desc <<-EOS.gsub(/^ +/, "")
Path to the Backup configuration file to generate.
Defaults to:
#{Config.config_file}
EOS
method_option :config_file,
type: :string,
desc: "Path to the Backup configuration file to generate."
define_method "generate:config" do
config_file = options[:config_file] ?
File.expand_path(options[:config_file]) : Config.config_file
FileUtils.mkdir_p(File.dirname(config_file))
if Helpers.overwrite?(config_file)
File.open(config_file, "w") do |file|
file.write(Backup::Template.new.result("cli/config"))
end
puts "Generated configuration file: '#{config_file}'."
end
end
##
# [Version]
# Returns the current version of the Backup gem
map "-v" => :version
desc "version", "Display installed Backup version"
def version
puts "Backup #{Backup::VERSION}"
end
# This is to avoid Thor's warnings when stubbing methods on the Thor class.
module Helpers
class << self
def overwrite?(path)
return true unless File.exist?(path)
$stderr.print "A file already exists at '#{path}'.\n" \
"Do you want to overwrite? [y/n] "
/^[Yy]/ =~ $stdin.gets
end
def exec!(cmd)
puts "Launching: #{cmd}"
exec(cmd)
end
def is_backup_error?(error)
error.class.ancestors.include? Backup::Error
end
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/lib/backup/config.rb | lib/backup/config.rb | require "backup/config/dsl"
require "backup/config/helpers"
module Backup
module Config
class Error < Backup::Error; end
DEFAULTS = {
config_file: "config.rb",
data_path: ".data",
tmp_path: ".tmp"
}
class << self
include Utilities::Helpers
attr_reader :user, :root_path, :config_file, :data_path, :tmp_path
# Loads the user's +config.rb+ and all model files.
def load(options = {})
update(options) # from the command line
unless File.exist?(config_file)
raise Error, "Could not find configuration file: '#{config_file}'."
end
config = File.read(config_file)
version = Backup::VERSION.split(".").first
unless config =~ /^# Backup v#{ version }\.x Configuration$/
raise Error, <<-EOS
Invalid Configuration File
The configuration file at '#{config_file}'
does not appear to be a Backup v#{version}.x configuration file.
If you have upgraded to v#{version}.x from a previous version,
you need to upgrade your configuration file.
Please see the instructions for upgrading in the Backup documentation.
EOS
end
dsl = DSL.new
dsl.instance_eval(config, config_file)
update(dsl._config_options) # from config.rb
update(options) # command line takes precedence
Dir[File.join(File.dirname(config_file), "models", "*.rb")].each do |model|
dsl.instance_eval(File.read(model), model)
end
end
def hostname
@hostname ||= run(utility(:hostname))
end
private
# If :root_path is set in the options, all paths will be updated.
# Otherwise, only the paths given will be updated.
def update(options = {})
root_path = options[:root_path].to_s.strip
new_root = root_path.empty? ? false : set_root_path(root_path)
DEFAULTS.each do |name, ending|
set_path_variable(name, options[name], ending, new_root)
end
end
# Sets the @root_path to the given +path+ and returns it.
# Raises an error if the given +path+ does not exist.
def set_root_path(path)
# allows #reset! to set the default @root_path,
# then use #update to set all other paths,
# without requiring that @root_path exist.
return @root_path if path == @root_path
path = File.expand_path(path)
unless File.directory?(path)
raise Error, <<-EOS
Root Path Not Found
When specifying a --root-path, the path must exist.
Path was: #{path}
EOS
end
@root_path = path
end
def set_path_variable(name, path, ending, root_path)
# strip any trailing '/' in case the user supplied this as part of
# an absolute path, so we can match it against File.expand_path()
path = path.to_s.sub(/\/\s*$/, "").lstrip
new_path = false
# If no path is given, the variable will not be set/updated
# unless a root_path was given. In which case the value will
# be updated with our default ending.
if path.empty?
new_path = File.join(root_path, ending) if root_path
else
# When a path is given, the variable will be set/updated.
# If the path is relative, it will be joined with root_path (if given),
# or expanded relative to PWD.
new_path = File.expand_path(path)
unless path == new_path
new_path = File.join(root_path, path) if root_path
end
end
instance_variable_set(:"@#{name}", new_path) if new_path
end
def reset!
@user = ENV["USER"] || Etc.getpwuid.name
@root_path = File.join(File.expand_path(ENV["HOME"] || ""), "Backup")
update(root_path: @root_path)
end
end
reset! # set defaults on load
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/lib/backup/model.rb | lib/backup/model.rb | module Backup
class Model
class Error < Backup::Error; end
class FatalError < Backup::FatalError; end
class << self
##
# The Backup::Model.all class method keeps track of all the models
# that have been instantiated. It returns the @all class variable,
# which contains an array of all the models
def all
@all ||= []
end
##
# Return an Array of Models matching the given +trigger+.
def find_by_trigger(trigger)
trigger = trigger.to_s
if trigger.include?("*")
regex = /^#{ trigger.gsub('*', '(.*)') }$/
all.select { |model| regex =~ model.trigger }
else
all.select { |model| trigger == model.trigger }
end
end
# Allows users to create preconfigured models.
def preconfigure(&block)
@preconfigure ||= block
end
private
# used for testing
def reset!
@all = @preconfigure = nil
end
end
##
# The trigger (stored as a String) is used as an identifier
# for initializing the backup process
attr_reader :trigger
##
# The label (stored as a String) is used for a more friendly user output
attr_reader :label
##
# Array of configured Database objects.
attr_reader :databases
##
# Array of configured Archive objects.
attr_reader :archives
##
# Array of configured Notifier objects.
attr_reader :notifiers
##
# Array of configured Storage objects.
attr_reader :storages
##
# Array of configured Syncer objects.
attr_reader :syncers
##
# The configured Compressor, if any.
attr_reader :compressor
##
# The configured Encryptor, if any.
attr_reader :encryptor
##
# The configured Splitter, if any.
attr_reader :splitter
##
# The final backup Package this model will create.
attr_reader :package
##
# The time when the backup initiated (in format: 2011.02.20.03.29.59)
attr_reader :time
##
# The time when the backup initiated (as a Time object)
attr_reader :started_at
##
# The time when the backup finished (as a Time object)
attr_reader :finished_at
##
# Result of this model's backup process.
#
# 0 = Job was successful
# 1 = Job was successful, but issued warnings
# 2 = Job failed, additional triggers may be performed
# 3 = Job failed, additional triggers will not be performed
attr_reader :exit_status
##
# Exception raised by either a +before+ hook or one of the model's
# procedures that caused the model to fail. An exception raised by an
# +after+ hook would not be stored here. Therefore, it is possible for
# this to be +nil+ even if #exit_status is 2 or 3.
attr_reader :exception
def initialize(trigger, label, &block)
@trigger = trigger.to_s
@label = label.to_s
@package = Package.new(self)
@databases = []
@archives = []
@storages = []
@notifiers = []
@syncers = []
instance_eval(&self.class.preconfigure) if self.class.preconfigure
instance_eval(&block) if block_given?
# trigger all defined databases to generate their #dump_filename
# so warnings may be logged if `backup perform --check` is used
databases.each { |db| db.send(:dump_filename) }
Model.all << self
end
##
# Adds an Archive. Multiple Archives may be added to the model.
def archive(name, &block)
@archives << Archive.new(self, name, &block)
end
##
# Adds an Database. Multiple Databases may be added to the model.
def database(name, database_id = nil, &block)
@databases << get_class_from_scope(Database, name)
.new(self, database_id, &block)
end
##
# Adds an Storage. Multiple Storages may be added to the model.
def store_with(name, storage_id = nil, &block)
@storages << get_class_from_scope(Storage, name)
.new(self, storage_id, &block)
end
##
# Adds an Syncer. Multiple Syncers may be added to the model.
def sync_with(name, syncer_id = nil, &block)
@syncers << get_class_from_scope(Syncer, name).new(syncer_id, &block)
end
##
# Adds an Notifier. Multiple Notifiers may be added to the model.
def notify_by(name, &block)
@notifiers << get_class_from_scope(Notifier, name).new(self, &block)
end
##
# Adds an Encryptor. Only one Encryptor may be added to the model.
# This will be used to encrypt the final backup package.
def encrypt_with(name, &block)
@encryptor = get_class_from_scope(Encryptor, name).new(&block)
end
##
# Adds an Compressor. Only one Compressor may be added to the model.
# This will be used to compress each individual Archive and Database
# stored within the final backup package.
def compress_with(name, &block)
@compressor = get_class_from_scope(Compressor, name).new(&block)
end
##
# Adds a Splitter to split the final backup package into multiple files.
#
# +chunk_size+ is specified in MiB and must be given as an Integer.
# +suffix_length+ controls the number of characters used in the suffix
# (and the maximum number of chunks possible).
# ie. 1 (-a, -b), 2 (-aa, -ab), 3 (-aaa, -aab)
def split_into_chunks_of(chunk_size, suffix_length = 3)
if chunk_size.is_a?(Integer) && suffix_length.is_a?(Integer)
@splitter = Splitter.new(self, chunk_size, suffix_length)
else
raise Error, <<-EOS
Invalid arguments for #split_into_chunks_of()
+chunk_size+ (and optional +suffix_length+) must be Integers.
EOS
end
end
##
# Defines a block of code to run before the model's procedures.
#
# Warnings logged within the before hook will elevate the model's
# exit_status to 1 and cause warning notifications to be sent.
#
# Raising an exception will abort the model and cause failure notifications
# to be sent. If the exception is a StandardError, exit_status will be 2.
# If the exception is not a StandardError, exit_status will be 3.
#
# If any exception is raised, any defined +after+ hook will be skipped.
def before(&block)
@before = block if block
@before
end
##
# Defines a block of code to run after the model's procedures.
#
# This code is ensured to run, even if the model failed, **unless** a
# +before+ hook raised an exception and aborted the model.
#
# The code block will be passed the model's current exit_status:
#
# `0`: Success, no warnings.
# `1`: Success, but warnings were logged.
# `2`: Failure, but additional models/triggers will still be processed.
# `3`: Failure, no additional models/triggers will be processed.
#
# The model's exit_status may be elevated based on the after hook's
# actions, but will never be decreased.
#
# Warnings logged within the after hook may elevate the model's
# exit_status to 1 and cause warning notifications to be sent.
#
# Raising an exception may elevate the model's exit_status and cause
# failure notifications to be sent. If the exception is a StandardError,
# the exit_status will be elevated to 2. If the exception is not a
# StandardError, the exit_status will be elevated to 3.
def after(&block)
@after = block if block
@after
end
##
# Performs the backup process
#
# Once complete, #exit_status will indicate the result of this process.
#
# If any errors occur during the backup process, all temporary files will
# be left in place. If the error occurs before Packaging, then the
# temporary folder (tmp_path/trigger) will remain and may contain all or
# some of the configured Archives and/or Database dumps. If the error
# occurs after Packaging, but before the Storages complete, then the final
# packaged files (located in the root of tmp_path) will remain.
#
# *** Important ***
# If an error occurs and any of the above mentioned temporary files remain,
# those files *** will be removed *** before the next scheduled backup for
# the same trigger.
def perform!
@started_at = Time.now.utc
@time = package.time = started_at.strftime("%Y.%m.%d.%H.%M.%S")
log!(:started)
before_hook
procedures.each do |procedure|
procedure.is_a?(Proc) ? procedure.call : procedure.each(&:perform!)
end
syncers.each(&:perform!)
rescue Interrupt
@interrupted = true
raise
rescue Exception => err
@exception = err
ensure
unless @interrupted
set_exit_status
@finished_at = Time.now.utc
log!(:finished)
after_hook
end
end
##
# The duration of the backup process (in format: HH:MM:SS)
def duration
return unless finished_at
elapsed_time(started_at, finished_at)
end
private
##
# Returns an array of procedures that will be performed if any
# Archives or Databases are configured for the model.
def procedures
return [] unless databases.any? || archives.any?
[-> { prepare! }, databases, archives,
-> { package! }, -> { store! }, -> { clean! }]
end
##
# Clean any temporary files and/or package files left over
# from the last time this model/trigger was performed.
# Logs warnings if files exist and are cleaned.
def prepare!
Cleaner.prepare(self)
end
##
# After all the databases and archives have been dumped and stored,
# these files will be bundled in to a .tar archive (uncompressed),
# which may be optionally Encrypted and/or Split into multiple "chunks".
# All information about this final archive is stored in the @package.
# Once complete, the temporary folder used during packaging is removed.
def package!
Packager.package!(self)
Cleaner.remove_packaging(self)
end
##
# Attempts to use all configured Storages, even if some of them result in exceptions.
# Returns true or raises first encountered exception.
def store!
storage_results = storages.map do |storage|
begin
storage.perform!
rescue => ex
ex
end
end
first_exception, *other_exceptions = storage_results.select { |result| result.is_a? Exception }
if first_exception
other_exceptions.each do |exception|
Logger.error exception.to_s
Logger.error exception.backtrace.join('\n')
end
raise first_exception
else
true
end
end
##
# Removes the final package file(s) once all configured Storages have run.
def clean!
Cleaner.remove_package(package)
end
##
# Returns the class/model specified by +name+ inside of +scope+.
# +scope+ should be a Class/Module.
# +name+ may be Class/Module or String representation
# of any namespace which exists under +scope+.
#
# The 'Backup::Config::DSL' namespace is stripped from +name+,
# since this is the namespace where we define module namespaces
# for use with Model's DSL methods.
#
# Examples:
# get_class_from_scope(Backup::Database, 'MySQL')
# returns the class Backup::Database::MySQL
#
# get_class_from_scope(Backup::Syncer, Backup::Config::RSync::Local)
# returns the class Backup::Syncer::RSync::Local
#
def get_class_from_scope(scope, name)
klass = scope
name = name.to_s.sub(/^Backup::Config::DSL::/, "")
name.split("::").each do |chunk|
klass = klass.const_get(chunk)
end
klass
end
##
# Sets or updates the model's #exit_status.
def set_exit_status
@exit_status =
if exception
exception.is_a?(StandardError) ? 2 : 3
else
Logger.has_warnings? ? 1 : 0
end
end
##
# Runs the +before+ hook.
# Any exception raised will be wrapped and re-raised, where it will be
# handled by #perform the same as an exception raised while performing
# the model's #procedures. Only difference is that an exception raised
# here will prevent any +after+ hook from being run.
def before_hook
return unless before
Logger.info "Before Hook Starting..."
before.call
Logger.info "Before Hook Finished."
rescue Exception => err
@before_hook_failed = true
ex = err.is_a?(StandardError) ? Error : FatalError
raise ex.wrap(err, "Before Hook Failed!")
end
##
# Runs the +after+ hook.
# Any exception raised here will be logged only and the model's
# #exit_status will be elevated if neccessary.
def after_hook
return unless after && !@before_hook_failed
Logger.info "After Hook Starting..."
after.call(exit_status)
Logger.info "After Hook Finished."
set_exit_status # in case hook logged warnings
rescue Exception => err
fatal = !err.is_a?(StandardError)
ex = fatal ? FatalError : Error
Logger.error ex.wrap(err, "After Hook Failed!")
# upgrade exit_status if needed
(@exit_status = fatal ? 3 : 2) unless exit_status == 3
end
##
# Logs messages when the model starts and finishes.
#
# #exception will be set here if #exit_status is > 1,
# since log(:finished) is called before the +after+ hook.
def log!(action)
case action
when :started
Logger.info "Performing Backup for '#{label} (#{trigger})'!\n" \
"[ backup #{VERSION} : #{RUBY_DESCRIPTION} ]"
when :finished
if exit_status > 1
ex = exit_status == 2 ? Error : FatalError
err = ex.wrap(exception, "Backup for #{label} (#{trigger}) Failed!")
Logger.error err
Logger.error "\nBacktrace:\n\s\s" + err.backtrace.join("\n\s\s") + "\n\n"
Cleaner.warnings(self)
else
msg = "Backup for '#{label} (#{trigger})' "
if exit_status == 1
msg << "Completed Successfully (with Warnings) in #{duration}"
Logger.warn msg
else
msg << "Completed Successfully in #{duration}"
Logger.info msg
end
end
end
end
##
# Returns a string representing the elapsed time in HH:MM:SS.
def elapsed_time(start_time, finish_time)
duration = finish_time.to_i - start_time.to_i
hours = duration / 3600
remainder = duration - (hours * 3600)
minutes = remainder / 60
seconds = remainder - (minutes * 60)
sprintf "%02d:%02d:%02d", hours, minutes, seconds
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/lib/backup/binder.rb | lib/backup/binder.rb | module Backup
class Binder
##
# Creates a new Backup::Notifier::Binder instance. Loops through the provided
# Hash to set instance variables
def initialize(key_and_values)
key_and_values.each do |key, value|
instance_variable_set("@#{key}", value)
end
end
##
# Returns the binding (needs a wrapper method because #binding is a private method)
def get_binding
binding
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/lib/backup/archive.rb | lib/backup/archive.rb | module Backup
class Archive
class Error < Backup::Error; end
include Utilities::Helpers
attr_reader :name, :options
##
# Adds a new Archive to a Backup Model.
#
# Backup::Model.new(:my_backup, 'My Backup') do
# archive :my_archive do |archive|
# archive.add 'path/to/archive'
# archive.add '/another/path/to/archive'
# archive.exclude 'path/to/exclude'
# archive.exclude '/another/path/to/exclude'
# end
# end
#
# All paths added using `add` or `exclude` will be expanded to their
# full paths from the root of the filesystem. Files will be added to
# the tar archive using these full paths, and their leading `/` will
# be preserved (using tar's `-P` option).
#
# /path/to/pwd/path/to/archive/...
# /another/path/to/archive/...
#
# When a `root` path is given, paths to add/exclude are taken as
# relative to the `root` path, unless given as absolute paths.
#
# Backup::Model.new(:my_backup, 'My Backup') do
# archive :my_archive do |archive|
# archive.root '~/my_data'
# archive.add 'path/to/archive'
# archive.add '/another/path/to/archive'
# archive.exclude 'path/to/exclude'
# archive.exclude '/another/path/to/exclude'
# end
# end
#
# This directs `tar` to change directories to the `root` path to create
# the archive. Unless paths were given as absolute, the paths within the
# archive will be relative to the `root` path.
#
# path/to/archive/...
# /another/path/to/archive/...
#
# For absolute paths added to this archive, the leading `/` will be
# preserved. Take note that when archives are extracted, leading `/` are
# stripped by default, so care must be taken when extracting archives with
# mixed relative/absolute paths.
def initialize(model, name, &block)
@model = model
@name = name.to_s
@options = {
sudo: false,
root: false,
paths: [],
excludes: [],
tar_options: ""
}
DSL.new(@options).instance_eval(&block)
end
def perform!
Logger.info "Creating Archive '#{name}'..."
path = File.join(Config.tmp_path, @model.trigger, "archives")
FileUtils.mkdir_p(path)
pipeline = Pipeline.new
with_files_from(paths_to_package) do |files_from|
pipeline.add(
"#{tar_command} #{tar_options} -cPf -#{tar_root} " \
"#{paths_to_exclude} #{files_from}",
tar_success_codes
)
extension = "tar"
if @model.compressor
@model.compressor.compress_with do |command, ext|
pipeline << command
extension << ext
end
end
pipeline << "#{utility(:cat)} > " \
"'#{File.join(path, "#{name}.#{extension}")}'"
pipeline.run
end
if pipeline.success?
Logger.info "Archive '#{name}' Complete!"
else
raise Error, "Failed to Create Archive '#{name}'\n" +
pipeline.error_messages
end
end
private
def tar_command
tar = utility(:tar)
options[:sudo] ? "#{utility(:sudo)} -n #{tar}" : tar
end
def tar_root
options[:root] ? " -C '#{File.expand_path(options[:root])}'" : ""
end
def paths_to_package
options[:paths].map { |path| prepare_path(path) }
end
def with_files_from(paths)
tmpfile = Tempfile.new("backup-archive-paths")
paths.each { |path| tmpfile.puts path }
tmpfile.close
yield "-T '#{tmpfile.path}'"
ensure
tmpfile.delete
end
def paths_to_exclude
options[:excludes].map do |path|
"--exclude='#{prepare_path(path)}'"
end.join(" ")
end
def prepare_path(path)
options[:root] ? path : File.expand_path(path)
end
def tar_options
args = options[:tar_options]
gnu_tar? ? "--ignore-failed-read #{args}".strip : args
end
def tar_success_codes
gnu_tar? ? [0, 1] : [0]
end
class DSL
def initialize(options)
@options = options
end
def use_sudo(val = true)
@options[:sudo] = val
end
def root(path)
@options[:root] = path
end
def add(path)
@options[:paths] << path
end
def exclude(path)
@options[:excludes] << path
end
def tar_options(opts)
@options[:tar_options] = opts
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/lib/backup/storage/local.rb | lib/backup/storage/local.rb | module Backup
module Storage
class Local < Base
include Storage::Cycler
class Error < Backup::Error; end
def initialize(model, storage_id = nil)
super
@path ||= "~/backups"
end
private
def transfer!
FileUtils.mkdir_p(remote_path)
transfer_method = package_movable? ? :mv : :cp
package.filenames.each do |filename|
src = File.join(Config.tmp_path, filename)
dest = File.join(remote_path, filename)
Logger.info "Storing '#{dest}'..."
FileUtils.send(transfer_method, src, dest)
end
end
# Called by the Cycler.
# Any error raised will be logged as a warning.
def remove!(package)
Logger.info "Removing backup package dated #{package.time}..."
FileUtils.rm_r(remote_path_for(package))
end
# expanded since this is a local path
def remote_path(pkg = package)
File.expand_path(super)
end
alias :remote_path_for :remote_path
##
# If this Local Storage is not the last Storage for the Model,
# force the transfer to use a *copy* operation and issue a warning.
def package_movable?
if self == model.storages.last
true
else
Logger.warn Error.new(<<-EOS)
Local File Copy Warning!
The final backup file(s) for '#{model.label}' (#{model.trigger})
will be *copied* to '#{remote_path}'
To avoid this, when using more than one Storage, the 'Local' Storage
should be added *last* so the files may be *moved* to their destination.
EOS
false
end
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/lib/backup/storage/rsync.rb | lib/backup/storage/rsync.rb | module Backup
module Storage
class RSync < Base
include Utilities::Helpers
##
# Mode of operation
#
# [:ssh (default)]
# Connects to the remote via SSH.
# Does not use an rsync daemon on the remote.
#
# [:ssh_daemon]
# Connects to the remote via SSH.
# Spawns a single-use daemon on the remote, which allows certain
# daemon features (like modules) to be used.
#
# [:rsync_daemon]
# Connects directly to an rsync daemon via TCP.
# Data transferred is not encrypted.
#
attr_accessor :mode
##
# Server Address
#
# If not specified, the storage operation will be local.
attr_accessor :host
##
# SSH or RSync port
#
# For `:ssh` or `:ssh_daemon` mode, this specifies the SSH port to use
# and defaults to 22.
#
# For `:rsync_daemon` mode, this specifies the TCP port to use
# and defaults to 873.
attr_accessor :port
##
# SSH User
#
# If the user running the backup is not the same user that needs to
# authenticate with the remote server, specify the user here.
#
# The user must have SSH keys setup for passphrase-less access to the
# remote. If the SSH User does not have passphrase-less keys, or no
# default keys in their `~/.ssh` directory, you will need to use the
# `-i` option in `:additional_ssh_options` to specify the
# passphrase-less key to use.
#
# Used only for `:ssh` and `:ssh_daemon` modes.
attr_accessor :ssh_user
##
# Additional SSH Options
#
# Used to supply a String or Array of options to be passed to the SSH
# command in `:ssh` and `:ssh_daemon` modes.
#
# For example, if you need to supply a specific SSH key for the `ssh_user`,
# you would set this to: "-i '/path/to/id_rsa'". Which would produce:
#
# rsync -e "ssh -p 22 -i '/path/to/id_rsa'"
#
# Arguments may be single-quoted, but should not contain any double-quotes.
#
# Used only for `:ssh` and `:ssh_daemon` modes.
attr_accessor :additional_ssh_options
##
# RSync User
#
# If the user running the backup is not the same user that needs to
# authenticate with the rsync daemon, specify the user here.
#
# Used only for `:ssh_daemon` and `:rsync_daemon` modes.
attr_accessor :rsync_user
##
# RSync Password
#
# If specified, Backup will write the password to a temporary file and
# use it with rsync's `--password-file` option for daemon authentication.
#
# Note that setting this will override `rsync_password_file`.
#
# Used only for `:ssh_daemon` and `:rsync_daemon` modes.
attr_accessor :rsync_password
##
# RSync Password File
#
# If specified, this path will be passed to rsync's `--password-file`
# option for daemon authentication.
#
# Used only for `:ssh_daemon` and `:rsync_daemon` modes.
attr_accessor :rsync_password_file
##
# Additional String or Array of options for the rsync cli
attr_accessor :additional_rsync_options
##
# Flag for compressing (only compresses for the transfer)
attr_accessor :compress
##
# Path to store the synced backup package file(s) to.
#
# If no +host+ is specified, then +path+ will be local, and the only
# other used option would be +additional_rsync_options+.
# +path+ will be expanded, so '~/my_path' will expand to '$HOME/my_path'.
#
# If a +host+ is specified, this will be a path on the host.
# If +mode+ is `:ssh` (default), then any relative path, or path starting
# with '~/' will be relative to the directory the ssh_user is logged
# into. For `:ssh_daemon` or `:rsync_daemon` modes, this would reference
# an rsync module/path.
#
# In :ssh_daemon and :rsync_daemon modes, +path+ (or path defined by
# your rsync module) must already exist.
#
# In :ssh mode or local operation (no +host+ specified), +path+ will
# be created if needed - either locally, or on the remote for :ssh mode.
attr_accessor :path
def initialize(model, storage_id = nil)
super
@mode ||= :ssh
@port ||= mode == :rsync_daemon ? 873 : 22
@compress ||= false
@path ||= "~/backups"
end
private
def transfer!
write_password_file
create_remote_path
package.filenames.each do |filename|
src = "'#{File.join(Config.tmp_path, filename)}'"
dest = "#{host_options}'#{File.join(remote_path, filename)}'"
Logger.info "Syncing to #{dest}..."
run("#{rsync_command} #{src} #{dest}")
end
ensure
remove_password_file
end
##
# Other storages add an additional timestamp directory to this path.
# This is not desired here, since we need to transfer the package files
# to the same location each time.
def remote_path
@remote_path ||= begin
if host
path.sub(/^~\//, "").sub(/\/$/, "")
else
File.expand_path(path)
end
end
end
##
# Runs a 'mkdir -p' command on the host (or locally) to ensure the
# dest_path exists. This is used because we're transferring a single
# file, and rsync won't attempt to create the intermediate directories.
#
# This is only applicable locally and in :ssh mode.
# In :ssh_daemon and :rsync_daemon modes the `path` would include a
# module name that must define a path on the remote that already exists.
def create_remote_path
if host
return unless mode == :ssh
run "#{utility(:ssh)} #{ssh_transport_args} #{host} " +
%("mkdir -p '#{remote_path}'")
else
FileUtils.mkdir_p(remote_path)
end
end
def host_options
@host_options ||= begin
if !host
""
elsif mode == :ssh
"#{host}:"
else
user = "#{rsync_user}@" if rsync_user
"#{user}#{host}::"
end
end
end
def rsync_command
@rsync_command ||= begin
cmd = utility(:rsync) << " --archive" <<
" #{Array(additional_rsync_options).join(" ")}".rstrip
cmd << compress_option << password_option << transport_options if host
cmd
end
end
def compress_option
compress ? " --compress" : ""
end
def password_option
return "" if mode == :ssh
path = @password_file ? @password_file.path : rsync_password_file
path ? " --password-file='#{File.expand_path(path)}'" : ""
end
def transport_options
if mode == :rsync_daemon
" --port #{port}"
else
%( -e "#{utility(:ssh)} #{ssh_transport_args}")
end
end
def ssh_transport_args
args = "-p #{port} "
args << "-l #{ssh_user} " if ssh_user
args << Array(additional_ssh_options).join(" ")
args.rstrip
end
def write_password_file
return unless host && rsync_password && mode != :ssh
@password_file = Tempfile.new("backup-rsync-password")
@password_file.write(rsync_password)
@password_file.close
end
def remove_password_file
@password_file.delete if @password_file
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/lib/backup/storage/dropbox.rb | lib/backup/storage/dropbox.rb | require "dropbox_sdk"
module Backup
module Storage
class Dropbox < Base
include Storage::Cycler
class Error < Backup::Error; end
##
# Dropbox API credentials
attr_accessor :api_key, :api_secret
##
# Path to store cached authorized session.
#
# Relative paths will be expanded using Config.root_path,
# which by default is ~/Backup unless --root-path was used
# on the command line or set in config.rb.
#
# By default, +cache_path+ is '.cache', which would be
# '~/Backup/.cache/' if using the default root_path.
attr_accessor :cache_path
##
# Dropbox Access Type
# Valid values are:
# :app_folder (default)
# :dropbox (full access)
attr_accessor :access_type
##
# Chunk size, specified in MiB, for the ChunkedUploader.
attr_accessor :chunk_size
##
# Number of times to retry failed operations.
#
# Default: 10
attr_accessor :max_retries
##
# Time in seconds to pause before each retry.
#
# Default: 30
attr_accessor :retry_waitsec
##
# Creates a new instance of the storage object
def initialize(model, storage_id = nil)
super
@path ||= "backups"
@cache_path ||= ".cache"
@access_type ||= :app_folder
@chunk_size ||= 4 # MiB
@max_retries ||= 10
@retry_waitsec ||= 30
path.sub!(/^\//, "")
end
private
##
# The initial connection to Dropbox will provide the user with an
# authorization url. The user must open this URL and confirm that the
# authorization successfully took place. If this is the case, then the
# user hits 'enter' and the session will be properly established.
# Immediately after establishing the session, the session will be
# serialized and written to a cache file in +cache_path+.
# The cached file will be used from that point on to re-establish a
# connection with Dropbox at a later time. This allows the user to avoid
# having to go to a new Dropbox URL to authorize over and over again.
def connection
return @connection if @connection
unless session = cached_session
Logger.info "Creating a new session!"
session = create_write_and_return_new_session!
end
# will raise an error if session not authorized
@connection = DropboxClient.new(session, access_type)
rescue => err
raise Error.wrap(err, "Authorization Failed")
end
##
# Attempt to load a cached session
def cached_session
session = false
if File.exist?(cached_file)
begin
session = DropboxSession.deserialize(File.read(cached_file))
Logger.info "Session data loaded from cache!"
rescue => err
Logger.warn Error.wrap(err, <<-EOS)
Could not read session data from cache.
Cache data might be corrupt.
EOS
end
end
session
end
##
# Transfer each of the package files to Dropbox in chunks of +chunk_size+.
# Each chunk will be retried +chunk_retries+ times, pausing +retry_waitsec+
# between retries, if errors occur.
def transfer!
package.filenames.each do |filename|
src = File.join(Config.tmp_path, filename)
dest = File.join(remote_path, filename)
Logger.info "Storing '#{dest}'..."
uploader = nil
File.open(src, "r") do |file|
uploader = connection.get_chunked_uploader(file, file.stat.size)
while uploader.offset < uploader.total_size
with_retries do
uploader.upload(1024**2 * chunk_size)
end
end
end
with_retries do
uploader.finish(dest)
end
end
rescue => err
raise Error.wrap(err, "Upload Failed!")
end
def with_retries
retries = 0
begin
yield
rescue StandardError => err
retries += 1
raise if retries > max_retries
Logger.info Error.wrap(err, "Retry ##{retries} of #{max_retries}.")
sleep(retry_waitsec)
retry
end
end
# Called by the Cycler.
# Any error raised will be logged as a warning.
def remove!(package)
Logger.info "Removing backup package dated #{package.time}..."
connection.file_delete(remote_path_for(package))
end
def cached_file
path = cache_path.start_with?("/") ?
cache_path : File.join(Config.root_path, cache_path)
File.join(path, api_key + api_secret)
end
##
# Serializes and writes the Dropbox session to a cache file
def write_cache!(session)
FileUtils.mkdir_p File.dirname(cached_file)
File.open(cached_file, "w") do |cache_file|
cache_file.write(session.serialize)
end
end
##
# Create a new session, write a serialized version of it to the
# .cache directory, and return the session object
def create_write_and_return_new_session!
require "timeout"
session = DropboxSession.new(api_key, api_secret)
# grab the request token for session
session.get_request_token
template = Backup::Template.new(
session: session, cached_file: cached_file
)
template.render("storage/dropbox/authorization_url.erb")
# wait for user to hit 'return' to continue
Timeout.timeout(180) { STDIN.gets }
# this will raise an error if the user did not
# visit the authorization_url and grant access
#
# get the access token from the server
# this will be stored with the session in the cache file
session.get_access_token
template.render("storage/dropbox/authorized.erb")
write_cache!(session)
template.render("storage/dropbox/cache_file_written.erb")
session
rescue => err
raise Error.wrap(err, "Could not create or authenticate a new session")
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/lib/backup/storage/sftp.rb | lib/backup/storage/sftp.rb | require "net/sftp"
module Backup
module Storage
class SFTP < Base
include Storage::Cycler
##
# Server credentials
attr_accessor :username, :password, :ssh_options
##
# Server IP Address and SFTP port
attr_accessor :ip, :port
def initialize(model, storage_id = nil)
super
@ssh_options ||= {}
@port ||= 22
@path ||= "backups"
path.sub!(/^~\//, "")
end
private
def connection
Net::SFTP.start(
ip, username, { password: password, port: port }.merge(ssh_options)
) { |sftp| yield sftp }
end
def transfer!
connection do |sftp|
create_remote_path(sftp)
package.filenames.each do |filename|
src = File.join(Config.tmp_path, filename)
dest = File.join(remote_path, filename)
Logger.info "Storing '#{ip}:#{dest}'..."
sftp.upload!(src, dest)
end
end
end
# Called by the Cycler.
# Any error raised will be logged as a warning.
def remove!(package)
Logger.info "Removing backup package dated #{package.time}..."
remote_path = remote_path_for(package)
connection do |sftp|
package.filenames.each do |filename|
sftp.remove!(File.join(remote_path, filename))
end
sftp.rmdir!(remote_path)
end
end
##
# Creates (if they don't exist yet) all the directories on the remote
# server in order to upload the backup file. Net::SFTP does not support
# paths to directories that don't yet exist when creating new
# directories. Instead, we split the parts up in to an array (for each
# '/') and loop through that to create the directories one by one.
# Net::SFTP raises an exception when the directory it's trying to create
# already exists, so we have rescue it
def create_remote_path(sftp)
path_parts = []
remote_path.split("/").each do |path_part|
path_parts << path_part
begin
sftp.mkdir!(path_parts.join("/"))
rescue Net::SFTP::StatusException; end
end
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/lib/backup/storage/base.rb | lib/backup/storage/base.rb | module Backup
module Storage
class Base
include Config::Helpers
##
# Base path on the remote where backup package files will be stored.
attr_accessor :path
##
# Number of backups to keep or time until which to keep.
#
# If an Integer is given it sets the limit to how many backups to keep in
# the remote location. If exceeded, the oldest will be removed to make
# room for the newest.
#
# If a Time object is given it will remove backups _older_ than the given
# date.
#
# @!attribute [rw] keep
# @param [Integer|Time]
# @return [Integer|Time]
attr_accessor :keep
attr_reader :model, :package, :storage_id
##
# +storage_id+ is a user-defined string used to uniquely identify
# multiple storages of the same type. If multiple storages of the same
# type are added to a single backup model, this identifier must be set.
# This will be appended to the YAML storage file used for cycling backups.
def initialize(model, storage_id = nil, &block)
@model = model
@package = model.package
@storage_id = storage_id.to_s.gsub(/\W/, "_") if storage_id
load_defaults!
instance_eval(&block) if block_given?
end
def perform!
Logger.info "#{storage_name} Started..."
transfer!
if respond_to?(:cycle!, true) && (keep.to_i > 0 || keep.is_a?(Time))
cycle!
end
Logger.info "#{storage_name} Finished!"
end
private
##
# Return the remote path for the current or given package.
def remote_path(pkg = package)
path.empty? ? File.join(pkg.trigger, pkg.time) :
File.join(path, pkg.trigger, pkg.time)
end
alias :remote_path_for :remote_path
def storage_name
@storage_name ||= self.class.to_s.sub("Backup::", "") +
(storage_id ? " (#{storage_id})" : "")
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/lib/backup/storage/ftp.rb | lib/backup/storage/ftp.rb | require "net/ftp"
module Backup
module Storage
class FTP < Base
include Storage::Cycler
##
# Server credentials
attr_accessor :username, :password
##
# Server IP Address and FTP port
attr_accessor :ip, :port
##
# Use passive mode?
attr_accessor :passive_mode
##
# Configure connection open and read timeouts.
# Net::FTP's open_timeout and read_timeout will both be configured using
# this setting.
# @!attribute [rw] timeout
# @param [Integer|Float]
# @return [Integer|Float]
attr_accessor :timeout
def initialize(model, storage_id = nil)
super
@port ||= 21
@path ||= "backups"
@passive_mode ||= false
@timeout ||= nil
path.sub!(/^~\//, "")
end
private
##
# Establishes a connection to the remote server
#
# Note:
# Since the FTP port is defined as a constant in the Net::FTP class, and
# might be required to change by the user, we dynamically remove and
# re-add the constant with the provided port value
def connection
if Net::FTP.const_defined?(:FTP_PORT)
Net::FTP.send(:remove_const, :FTP_PORT)
end; Net::FTP.send(:const_set, :FTP_PORT, port)
# Ensure default passive mode to false.
# Note: The default passive setting changed between Ruby 2.2 and 2.3
if Net::FTP.respond_to?(:default_passive=)
Net::FTP.default_passive = false
end
Net::FTP.open(ip, username, password) do |ftp|
if timeout
ftp.open_timeout = timeout
ftp.read_timeout = timeout
end
ftp.passive = true if passive_mode
yield ftp
end
end
def transfer!
connection do |ftp|
create_remote_path(ftp)
package.filenames.each do |filename|
src = File.join(Config.tmp_path, filename)
dest = File.join(remote_path, filename)
Logger.info "Storing '#{ip}:#{dest}'..."
ftp.put(src, dest)
end
end
end
# Called by the Cycler.
# Any error raised will be logged as a warning.
def remove!(package)
Logger.info "Removing backup package dated #{package.time}..."
remote_path = remote_path_for(package)
connection do |ftp|
package.filenames.each do |filename|
ftp.delete(File.join(remote_path, filename))
end
ftp.rmdir(remote_path)
end
end
##
# Creates (if they don't exist yet) all the directories on the remote
# server in order to upload the backup file. Net::FTP does not support
# paths to directories that don't yet exist when creating new
# directories. Instead, we split the parts up in to an array (for each
# '/') and loop through that to create the directories one by one.
# Net::FTP raises an exception when the directory it's trying to create
# already exists, so we have rescue it
def create_remote_path(ftp)
path_parts = []
remote_path.split("/").each do |path_part|
path_parts << path_part
begin
ftp.mkdir(path_parts.join("/"))
rescue Net::FTPPermError; end
end
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
backup/backup | https://github.com/backup/backup/blob/86c9b07c2a2974376888b1506001f77792d6359a/lib/backup/storage/s3.rb | lib/backup/storage/s3.rb | require "backup/cloud_io/s3"
module Backup
module Storage
class S3 < Base
include Storage::Cycler
class Error < Backup::Error; end
##
# Amazon Simple Storage Service (S3) Credentials
attr_accessor :access_key_id, :secret_access_key, :use_iam_profile
##
# Amazon S3 bucket name
attr_accessor :bucket
##
# Region of the specified S3 bucket
attr_accessor :region
##
# Multipart chunk size, specified in MiB.
#
# Each package file larger than +chunk_size+
# will be uploaded using S3 Multipart Upload.
#
# Minimum: 5 (but may be disabled with 0)
# Maximum: 5120
# Default: 5
attr_accessor :chunk_size
##
# Number of times to retry failed operations.
#
# Default: 10
attr_accessor :max_retries
##
# Time in seconds to pause before each retry.
#
# Default: 30
attr_accessor :retry_waitsec
##
# Encryption algorithm to use for Amazon Server-Side Encryption
#
# Supported values:
#
# - :aes256
#
# Default: nil
attr_accessor :encryption
##
# Storage class to use for the S3 objects uploaded
#
# Supported values:
#
# - :standard (default)
# - :standard_ia
# - :reduced_redundancy
#
# Default: :standard
attr_accessor :storage_class
##
# Additional options to pass along to fog.
# e.g. Fog::Storage.new({ :provider => 'AWS' }.merge(fog_options))
attr_accessor :fog_options
def initialize(model, storage_id = nil)
super
@chunk_size ||= 5 # MiB
@max_retries ||= 10
@retry_waitsec ||= 30
@path ||= "backups"
@storage_class ||= :standard
@path = @path.sub(/^\//, "")
check_configuration
end
private
def cloud_io
@cloud_io ||= CloudIO::S3.new(
access_key_id: access_key_id,
secret_access_key: secret_access_key,
use_iam_profile: use_iam_profile,
region: region,
bucket: bucket,
encryption: encryption,
storage_class: storage_class,
max_retries: max_retries,
retry_waitsec: retry_waitsec,
chunk_size: chunk_size,
fog_options: fog_options
)
end
def transfer!
package.filenames.each do |filename|
src = File.join(Config.tmp_path, filename)
dest = File.join(remote_path, filename)
Logger.info "Storing '#{bucket}/#{dest}'..."
cloud_io.upload(src, dest)
end
end
# Called by the Cycler.
# Any error raised will be logged as a warning.
def remove!(package)
Logger.info "Removing backup package dated #{package.time}..."
remote_path = remote_path_for(package)
objects = cloud_io.objects(remote_path)
raise Error, "Package at '#{remote_path}' not found" if objects.empty?
cloud_io.delete(objects)
end
def check_configuration
required =
if use_iam_profile
%w[bucket]
else
%w[access_key_id secret_access_key bucket]
end
raise Error, <<-EOS if required.map { |name| send(name) }.any?(&:nil?)
Configuration Error
#{required.map { |name| "##{name}" }.join(", ")} are all required
EOS
raise Error, <<-EOS if chunk_size > 0 && !chunk_size.between?(5, 5120)
Configuration Error
#chunk_size must be between 5 and 5120 (or 0 to disable multipart)
EOS
raise Error, <<-EOS if encryption && encryption.to_s.upcase != "AES256"
Configuration Error
#encryption must be :aes256 or nil
EOS
classes = ["STANDARD", "STANDARD_IA", "REDUCED_REDUNDANCY"]
raise Error, <<-EOS unless classes.include?(storage_class.to_s.upcase)
Configuration Error
#storage_class must be :standard or :standard_ia or :reduced_redundancy
EOS
end
end
end
end
| ruby | MIT | 86c9b07c2a2974376888b1506001f77792d6359a | 2026-01-04T15:45:04.712671Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.