repo stringlengths 5 92 | file_url stringlengths 80 287 | file_path stringlengths 5 197 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:37:27 2026-01-04 17:58:21 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/examples/ranking/db/schema.rb | examples/ranking/db/schema.rb | # This file is auto-generated from the current state of the database. Instead
# of editing this file, please use the migrations feature of Active Record to
# incrementally modify your database, and then regenerate this schema definition.
#
# This file is the source Rails uses to define your schema when running `bin/rails
# db:schema:load`. When creating a new database, `bin/rails db:schema:load` tends to
# be faster and is potentially less error prone than running all of your
# migrations from scratch. Old migrations may fail to apply correctly if those
# migrations use external dependencies or application code.
#
# It's strongly recommended that you check this file into your version control system.
ActiveRecord::Schema[7.1].define(version: 0) do
create_schema "_timescaledb_cache"
create_schema "_timescaledb_debug"
# These are extensions that must be enabled in order to support this database
enable_extension "pg_stat_statements"
enable_extension "pg_trgm"
enable_extension "plpgsql"
enable_extension "timescaledb"
enable_extension "timescaledb_toolkit"
create_table "network_device_data", id: false, force: :cascade do |t|
t.timestamptz "time", null: false
t.integer "device", null: false
t.integer "id", null: false
t.bigint "counter32bit"
t.bigint "counter64bit"
t.index ["time"], name: "network_device_data_time_idx", order: :desc
end
create_table "pages", id: false, force: :cascade do |t|
t.timestamptz "time", null: false
t.text "url", null: false
t.float "time_to_fetch"
t.text "title", null: false
t.text "headers", default: [], array: true
t.jsonb "links"
t.text "body", default: [], array: true
t.text "codeblocks", default: [], array: true
t.integer "html_size"
t.tsvector "search_vector"
t.index ["search_vector"], name: "pages_search_vector_idx", using: :gin
t.index ["time"], name: "pages_time_idx", order: :desc
end
create_table "sample", id: false, force: :cascade do |t|
t.timestamptz "time", null: false
t.text "device_id", null: false
t.float "value", null: false
t.index ["time"], name: "sample_time_idx", order: :desc
end
create_table "ticks", id: false, force: :cascade do |t|
t.timestamptz "time", null: false
t.text "symbol", null: false
t.decimal "price", null: false
t.decimal "volume", null: false
t.index ["time"], name: "ticks_time_idx", order: :desc
end
create_view "network_data_final", sql_definition: <<-SQL
SELECT id,
bucket,
interpolated_rate(counter32bit_agg, bucket, 'PT1M'::interval, lag(counter32bit_agg) OVER (PARTITION BY id ORDER BY bucket), lead(counter32bit_agg) OVER (PARTITION BY id ORDER BY bucket)) AS counter32bitrate,
interpolated_rate(counter64bit_agg, bucket, 'PT1M'::interval, lag(counter64bit_agg) OVER (PARTITION BY id ORDER BY bucket), lead(counter64bit_agg) OVER (PARTITION BY id ORDER BY bucket)) AS counter64bitrate
FROM network_data_agg_1min
ORDER BY id, bucket;
SQL
create_view "network_data_final_with_resets", sql_definition: <<-SQL
WITH counter_data AS (
SELECT network_device_data."time",
network_device_data.device,
network_device_data.id,
network_device_data.counter64bit,
lag(network_device_data.counter64bit) OVER (PARTITION BY network_device_data.device, network_device_data.id ORDER BY network_device_data."time") AS prev_counter64bit
FROM network_device_data
), resets_detected AS (
SELECT counter_data."time",
counter_data.device,
counter_data.id,
counter_data.counter64bit,
counter_data.prev_counter64bit,
CASE
WHEN (counter_data.counter64bit < counter_data.prev_counter64bit) THEN 1
ELSE 0
END AS reset_detected
FROM counter_data
), rate_calculation AS (
SELECT resets_detected."time",
resets_detected.device,
resets_detected.id,
resets_detected.counter64bit,
resets_detected.prev_counter64bit,
resets_detected.reset_detected,
CASE
WHEN (resets_detected.reset_detected = 1) THEN (((resets_detected.counter64bit)::numeric + ('18446744073709551615'::numeric - (COALESCE(resets_detected.prev_counter64bit, (0)::bigint))::numeric)) / EXTRACT(epoch FROM (resets_detected."time" - lag(resets_detected."time") OVER (PARTITION BY resets_detected.device, resets_detected.id ORDER BY resets_detected."time"))))
ELSE (((resets_detected.counter64bit - COALESCE(resets_detected.prev_counter64bit, resets_detected.counter64bit)))::numeric / EXTRACT(epoch FROM (resets_detected."time" - lag(resets_detected."time") OVER (PARTITION BY resets_detected.device, resets_detected.id ORDER BY resets_detected."time"))))
END AS rate
FROM resets_detected
)
SELECT "time",
device,
id,
rate
FROM rate_calculation
ORDER BY "time", device, id;
SQL
create_view "ohlcv_1m", sql_definition: <<-SQL
SELECT bucket,
symbol,
open(candlestick) AS open,
high(candlestick) AS high,
low(candlestick) AS low,
close(candlestick) AS close,
volume(candlestick) AS volume,
vwap(candlestick) AS vwap
FROM _ohlcv_1m;
SQL
create_view "ohlcv_1h", sql_definition: <<-SQL
SELECT bucket,
symbol,
open(candlestick) AS open,
high(candlestick) AS high,
low(candlestick) AS low,
close(candlestick) AS close,
volume(candlestick) AS volume,
vwap(candlestick) AS vwap
FROM _ohlcv_1h;
SQL
create_view "ohlcv_1d", sql_definition: <<-SQL
SELECT bucket,
symbol,
open(candlestick) AS open,
high(candlestick) AS high,
low(candlestick) AS low,
close(candlestick) AS close,
volume(candlestick) AS volume,
vwap(candlestick) AS vwap
FROM _ohlcv_1d;
SQL
create_hypertable "network_device_data", time_column: "time", chunk_time_interval: "7 days"
create_hypertable "pages", time_column: "time", chunk_time_interval: "1 day"
create_hypertable "sample", time_column: "time", chunk_time_interval: "7 days"
create_hypertable "ticks", time_column: "time", chunk_time_interval: "1 day", compress_segmentby: "symbol", compress_orderby: "time ASC", compress_after: "7 days"
create_continuous_aggregate("network_data_agg_1min", <<-SQL, , materialized_only: true, finalized: true)
SELECT time_bucket('PT1M'::interval, "time") AS bucket,
device,
id,
counter_agg("time", (counter32bit)::double precision) AS counter32bit_agg,
counter_agg("time", (counter64bit)::double precision) AS counter64bit_agg
FROM network_device_data
GROUP BY (time_bucket('PT1M'::interval, "time")), device, id
SQL
create_continuous_aggregate("_ohlcv_1m", <<-SQL, , materialized_only: false, finalized: true)
SELECT time_bucket('PT1M'::interval, "time") AS bucket,
symbol,
candlestick_agg("time", (price)::double precision, (volume)::double precision) AS candlestick
FROM ticks
GROUP BY (time_bucket('PT1M'::interval, "time")), symbol
SQL
create_continuous_aggregate("_ohlcv_1h", <<-SQL, , materialized_only: true, finalized: true)
SELECT time_bucket('PT1H'::interval, bucket) AS bucket,
symbol,
rollup(candlestick) AS candlestick
FROM _ohlcv_1m
GROUP BY (time_bucket('PT1H'::interval, bucket)), symbol
SQL
create_continuous_aggregate("_ohlcv_1d", <<-SQL, , materialized_only: true, finalized: true)
SELECT time_bucket('P1D'::interval, bucket) AS bucket,
symbol,
rollup(candlestick) AS candlestick
FROM _ohlcv_1h
GROUP BY (time_bucket('P1D'::interval, bucket)), symbol
SQL
create_continuous_aggregate("stats_agg_1m_sample", <<-SQL, refresh_policies: { start_offset: "INTERVAL '00:05:00'", end_offset: "INTERVAL '00:01:00'", schedule_interval: "INTERVAL '60'"}, materialized_only: false, finalized: true)
SELECT time_bucket('PT1M'::interval, "time") AS bucket,
device_id,
stats_agg(value) AS stats_agg
FROM sample
GROUP BY (time_bucket('PT1M'::interval, "time")), device_id
SQL
create_continuous_aggregate("stats_agg_1h_sample", <<-SQL, refresh_policies: { start_offset: "INTERVAL '03:00:00'", end_offset: "INTERVAL '01:00:00'", schedule_interval: "INTERVAL '5'"}, materialized_only: false, finalized: true)
SELECT time_bucket('PT1H'::interval, bucket) AS bucket,
device_id,
rollup(stats_agg) AS stats_agg
FROM stats_agg_1m_sample
GROUP BY (time_bucket('PT1H'::interval, bucket)), device_id
SQL
create_continuous_aggregate("stats_agg_1d_sample", <<-SQL, refresh_policies: { start_offset: "INTERVAL '3 days'", end_offset: "INTERVAL '01:00:00'", schedule_interval: "INTERVAL '5'"}, materialized_only: false, finalized: true)
SELECT time_bucket('P1D'::interval, bucket) AS bucket,
device_id,
rollup(stats_agg) AS stats_agg
FROM stats_agg_1h_sample
GROUP BY (time_bucket('P1D'::interval, bucket)), device_id
SQL
create_continuous_aggregate("stats_agg_monthly_sample", <<-SQL, refresh_policies: { start_offset: "INTERVAL '3 mons'", end_offset: "INTERVAL '01:00:00'", schedule_interval: "INTERVAL '5'"}, materialized_only: false, finalized: true)
SELECT time_bucket('P1M'::interval, bucket) AS bucket,
device_id,
rollup(stats_agg) AS stats_agg
FROM stats_agg_1d_sample
GROUP BY (time_bucket('P1M'::interval, bucket)), device_id
SQL
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/examples/ranking/db/migrate/20220209120910_create_plays.rb | examples/ranking/db/migrate/20220209120910_create_plays.rb | class CreatePlays < ActiveRecord::Migration[7.0]
def change
enable_extension("timescaledb") unless extensions.include? "timescaledb"
hypertable_options = {
time_column: 'created_at',
chunk_time_interval: '1 day',
compress_segmentby: 'game_id',
compress_orderby: 'created_at',
compress_after: '7 days'
}
create_table :plays, hypertable: hypertable_options, id: false do |t|
t.references :game, null: false, foreign_key: false
t.integer :score
t.decimal :total_time
t.timestamps
end
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/examples/ranking/db/migrate/20220209120747_create_games.rb | examples/ranking/db/migrate/20220209120747_create_games.rb | class CreateGames < ActiveRecord::Migration[7.0]
def change
create_table :games do |t|
t.string :name
t.string :description
t.timestamps
end
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/examples/ranking/db/migrate/20220209143347_create_score_per_hours.rb | examples/ranking/db/migrate/20220209143347_create_score_per_hours.rb | class CreateScorePerHours < ActiveRecord::Migration[7.0]
def change
create_scenic_continuous_aggregate :score_per_hours
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/examples/ranking/config/application.rb | examples/ranking/config/application.rb | require_relative "boot"
require "rails"
# Pick the frameworks you want:
require "active_model/railtie"
require "active_job/railtie"
require "active_record/railtie"
require "active_storage/engine"
require "action_controller/railtie"
# require "action_mailer/railtie"
require "action_mailbox/engine"
require "action_text/engine"
require "action_view/railtie"
# require "action_cable/engine"
# require "rails/test_unit/railtie"
# Require the gems listed in Gemfile, including any gems
# you've limited to :test, :development, or :production.
Bundler.require(*Rails.groups)
module Ranking
class Application < Rails::Application
# Initialize configuration defaults for originally generated Rails version.
config.load_defaults 7.0
# Configuration for the application, engines, and railties goes here.
#
# These settings can be overridden in specific environments using the files
# in config/environments, which are processed later.
#
# config.time_zone = "Central Time (US & Canada)"
# config.eager_load_paths << Rails.root.join("extras")
# Only loads a smaller set of middleware suitable for API only apps.
# Middleware like session, flash, cookies can be added back manually.
# Skip views, helpers and assets when generating a new resource.
config.api_only = true
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/examples/ranking/config/environment.rb | examples/ranking/config/environment.rb | # Load the Rails application.
require_relative "application"
# Initialize the Rails application.
Rails.application.initialize!
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/examples/ranking/config/puma.rb | examples/ranking/config/puma.rb | # Puma can serve each request in a thread from an internal thread pool.
# The `threads` method setting takes two numbers: a minimum and maximum.
# Any libraries that use thread pools should be configured to match
# the maximum value specified for Puma. Default is set to 5 threads for minimum
# and maximum; this matches the default thread size of Active Record.
#
max_threads_count = ENV.fetch("RAILS_MAX_THREADS") { 5 }
min_threads_count = ENV.fetch("RAILS_MIN_THREADS") { max_threads_count }
threads min_threads_count, max_threads_count
# Specifies the `worker_timeout` threshold that Puma will use to wait before
# terminating a worker in development environments.
#
worker_timeout 3600 if ENV.fetch("RAILS_ENV", "development") == "development"
# Specifies the `port` that Puma will listen on to receive requests; default is 3000.
#
port ENV.fetch("PORT") { 3000 }
# Specifies the `environment` that Puma will run in.
#
environment ENV.fetch("RAILS_ENV") { "development" }
# Specifies the `pidfile` that Puma will use.
pidfile ENV.fetch("PIDFILE") { "tmp/pids/server.pid" }
# Specifies the number of `workers` to boot in clustered mode.
# Workers are forked web server processes. If using threads and workers together
# the concurrency of the application would be max `threads` * `workers`.
# Workers do not work on JRuby or Windows (both of which do not support
# processes).
#
# workers ENV.fetch("WEB_CONCURRENCY") { 2 }
# Use the `preload_app!` method when specifying a `workers` number.
# This directive tells Puma to first boot the application and load code
# before forking the application. This takes advantage of Copy On Write
# process behavior so workers use less memory.
#
# preload_app!
# Allow puma to be restarted by `bin/rails restart` command.
plugin :tmp_restart
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/examples/ranking/config/routes.rb | examples/ranking/config/routes.rb | Rails.application.routes.draw do
# Define your application routes per the DSL in https://guides.rubyonrails.org/routing.html
# Defines the root path route ("/")
# root "articles#index"
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/examples/ranking/config/boot.rb | examples/ranking/config/boot.rb | ENV["BUNDLE_GEMFILE"] ||= File.expand_path("../Gemfile", __dir__)
require "bundler/setup" # Set up gems listed in the Gemfile.
require "bootsnap/setup" # Speed up boot time by caching expensive operations.
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/examples/ranking/config/initializers/filter_parameter_logging.rb | examples/ranking/config/initializers/filter_parameter_logging.rb | # Be sure to restart your server when you modify this file.
# Configure parameters to be filtered from the log file. Use this to limit dissemination of
# sensitive information. See the ActiveSupport::ParameterFilter documentation for supported
# notations and behaviors.
Rails.application.config.filter_parameters += [
:passw, :secret, :token, :_key, :crypt, :salt, :certificate, :otp, :ssn
]
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/examples/ranking/config/initializers/timescale.rb | examples/ranking/config/initializers/timescale.rb | require 'timescaledb'
require 'scenic'
ActiveSupport.on_load(:active_record) { extend Timescaledb::ActsAsHypertable }
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/examples/ranking/config/initializers/inflections.rb | examples/ranking/config/initializers/inflections.rb | # Be sure to restart your server when you modify this file.
# Add new inflection rules using the following format. Inflections
# are locale specific, and you may define rules for as many different
# locales as you wish. All of these examples are active by default:
# ActiveSupport::Inflector.inflections(:en) do |inflect|
# inflect.plural /^(ox)$/i, "\\1en"
# inflect.singular /^(ox)en/i, "\\1"
# inflect.irregular "person", "people"
# inflect.uncountable %w( fish sheep )
# end
# These inflection rules are supported but not enabled by default:
# ActiveSupport::Inflector.inflections(:en) do |inflect|
# inflect.acronym "RESTful"
# end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/examples/ranking/config/initializers/cors.rb | examples/ranking/config/initializers/cors.rb | # Be sure to restart your server when you modify this file.
# Avoid CORS issues when API is called from the frontend app.
# Handle Cross-Origin Resource Sharing (CORS) in order to accept cross-origin AJAX requests.
# Read more: https://github.com/cyu/rack-cors
# Rails.application.config.middleware.insert_before 0, Rack::Cors do
# allow do
# origins "example.com"
#
# resource "*",
# headers: :any,
# methods: [:get, :post, :put, :patch, :delete, :options, :head]
# end
# end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/examples/ranking/config/environments/test.rb | examples/ranking/config/environments/test.rb | require "active_support/core_ext/integer/time"
# The test environment is used exclusively to run your application's
# test suite. You never need to work with it otherwise. Remember that
# your test database is "scratch space" for the test suite and is wiped
# and recreated between test runs. Don't rely on the data there!
Rails.application.configure do
# Settings specified here will take precedence over those in config/application.rb.
# Turn false under Spring and add config.action_view.cache_template_loading = true.
config.cache_classes = true
# Eager loading loads your whole application. When running a single test locally,
# this probably isn't necessary. It's a good idea to do in a continuous integration
# system, or in some way before deploying your code.
config.eager_load = ENV["CI"].present?
# Configure public file server for tests with Cache-Control for performance.
config.public_file_server.enabled = true
config.public_file_server.headers = {
"Cache-Control" => "public, max-age=#{1.hour.to_i}"
}
# Show full error reports and disable caching.
config.consider_all_requests_local = true
config.action_controller.perform_caching = false
config.cache_store = :null_store
# Raise exceptions instead of rendering exception templates.
config.action_dispatch.show_exceptions = false
# Disable request forgery protection in test environment.
config.action_controller.allow_forgery_protection = false
# Store uploaded files on the local file system in a temporary directory.
config.active_storage.service = :test
# Print deprecation notices to the stderr.
config.active_support.deprecation = :stderr
# Raise exceptions for disallowed deprecations.
config.active_support.disallowed_deprecation = :raise
# Tell Active Support which deprecation messages to disallow.
config.active_support.disallowed_deprecation_warnings = []
# Raises error for missing translations.
# config.i18n.raise_on_missing_translations = true
# Annotate rendered view with file names.
# config.action_view.annotate_rendered_view_with_filenames = true
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/examples/ranking/config/environments/development.rb | examples/ranking/config/environments/development.rb | require "active_support/core_ext/integer/time"
Rails.application.configure do
# Settings specified here will take precedence over those in config/application.rb.
# In the development environment your application's code is reloaded any time
# it changes. This slows down response time but is perfect for development
# since you don't have to restart the web server when you make code changes.
config.cache_classes = false
# Do not eager load code on boot.
config.eager_load = false
# Show full error reports.
config.consider_all_requests_local = true
# Enable server timing
config.server_timing = true
# Enable/disable caching. By default caching is disabled.
# Run rails dev:cache to toggle caching.
if Rails.root.join("tmp/caching-dev.txt").exist?
config.cache_store = :memory_store
config.public_file_server.headers = {
"Cache-Control" => "public, max-age=#{2.days.to_i}"
}
else
config.action_controller.perform_caching = false
config.cache_store = :null_store
end
# Store uploaded files on the local file system (see config/storage.yml for options).
config.active_storage.service = :local
# Print deprecation notices to the Rails logger.
config.active_support.deprecation = :log
# Raise exceptions for disallowed deprecations.
config.active_support.disallowed_deprecation = :raise
# Tell Active Support which deprecation messages to disallow.
config.active_support.disallowed_deprecation_warnings = []
# Raise an error on page load if there are pending migrations.
config.active_record.migration_error = :page_load
# Highlight code that triggered database queries in logs.
config.active_record.verbose_query_logs = true
# Raises error for missing translations.
# config.i18n.raise_on_missing_translations = true
# Annotate rendered view with file names.
# config.action_view.annotate_rendered_view_with_filenames = true
# Uncomment if you wish to allow Action Cable access from any origin.
# config.action_cable.disable_request_forgery_protection = true
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/examples/ranking/config/environments/production.rb | examples/ranking/config/environments/production.rb | require "active_support/core_ext/integer/time"
Rails.application.configure do
# Settings specified here will take precedence over those in config/application.rb.
# Code is not reloaded between requests.
config.cache_classes = true
# Eager load code on boot. This eager loads most of Rails and
# your application in memory, allowing both threaded web servers
# and those relying on copy on write to perform better.
# Rake tasks automatically ignore this option for performance.
config.eager_load = true
# Full error reports are disabled and caching is turned on.
config.consider_all_requests_local = false
# Ensures that a master key has been made available in either ENV["RAILS_MASTER_KEY"]
# or in config/master.key. This key is used to decrypt credentials (and other encrypted files).
# config.require_master_key = true
# Disable serving static files from the `/public` folder by default since
# Apache or NGINX already handles this.
config.public_file_server.enabled = ENV["RAILS_SERVE_STATIC_FILES"].present?
# Enable serving of images, stylesheets, and JavaScripts from an asset server.
# config.asset_host = "http://assets.example.com"
# Specifies the header that your server uses for sending files.
# config.action_dispatch.x_sendfile_header = "X-Sendfile" # for Apache
# config.action_dispatch.x_sendfile_header = "X-Accel-Redirect" # for NGINX
# Store uploaded files on the local file system (see config/storage.yml for options).
config.active_storage.service = :local
# Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies.
# config.force_ssl = true
# Include generic and useful information about system operation, but avoid logging too much
# information to avoid inadvertent exposure of personally identifiable information (PII).
config.log_level = :info
# Prepend all log lines with the following tags.
config.log_tags = [ :request_id ]
# Use a different cache store in production.
# config.cache_store = :mem_cache_store
# Use a real queuing backend for Active Job (and separate queues per environment).
# config.active_job.queue_adapter = :resque
# config.active_job.queue_name_prefix = "ranking_production"
# Enable locale fallbacks for I18n (makes lookups for any locale fall back to
# the I18n.default_locale when a translation cannot be found).
config.i18n.fallbacks = true
# Don't log any deprecations.
config.active_support.report_deprecations = false
# Use default logging formatter so that PID and timestamp are not suppressed.
config.log_formatter = ::Logger::Formatter.new
# Use a different logger for distributed setups.
# require "syslog/logger"
# config.logger = ActiveSupport::TaggedLogging.new(Syslog::Logger.new "app-name")
if ENV["RAILS_LOG_TO_STDOUT"].present?
logger = ActiveSupport::Logger.new(STDOUT)
logger.formatter = config.log_formatter
config.logger = ActiveSupport::TaggedLogging.new(logger)
end
# Do not dump schema after migrations.
config.active_record.dump_schema_after_migration = false
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/examples/chatgpt/openai-cli.rb | examples/chatgpt/openai-cli.rb | require 'bundler/inline'
gemfile(true) do
source 'https://rubygems.org'
gem 'timescaledb', path: '../../' #git: 'https://github.com/jonatas/timescaledb.git'
gem 'rest-client'
gem 'pry'
gem 'markdown'
gem 'rouge'
gem 'redcarpet'
gem 'tty-markdown'
gem 'tty-link'
gem 'readline'
gem 'ruby-openai'
end
require 'json'
require 'time'
OpenAI.configure do |config|
config.access_token = ENV["GPT4_KEY"]
end
PG_URI = ENV['PG_URI'] || ARGV[ARGV.index("--pg-uri")]
def topic
ARGV[1] || ENV['USER']
end
def instructions
ARGV.select{|f| File.exist?(f)} || ["instructions.md"]
end
INSTRUCTIONS = instructions.map(&IO.method(:read)).join("\n")
WELCOME_INFO = <<~MD
# Chat GPT + TimescaleDB
Welcome #{topic} to the ChatGPT command line tool!
## Commands:
* Enter 'quit' to exit.
* Enter 'debug' to enter debug mode.
* Enter any other text to chat with GPT.
## Initial instructions
#{INSTRUCTIONS}
MD
class Conversation < ActiveRecord::Base
self.primary_key = nil
acts_as_hypertable
scope :history, -> {
with_no_logs do
where(:topic => topic)
.select(:ts, <<~SQL).map{|e|e["chat"]}.join("\n")
'User: ' || user_input || '\n' ||
'AI: ' || ai_response || '\n' as chat
SQL
end
}
default_scope { order(ts: :asc) }
end
class SQLExtractor < Redcarpet::Render::Base
attr_reader :sql
def block_code(code, language)
if language == 'sql'
@sql ||= []
@sql << code
code
else
""
end
end
end
def sql_from_markdown(content)
extractor = SQLExtractor.new
md = Redcarpet::Markdown
.new(extractor, fenced_code_blocks: true)
md.render(content)
extractor.sql
end
def client
OpenAI::Client.new
end
def call_gpt4_api(prompt)
full_prompt = INSTRUCTIONS +
"\nHistory: #{Conversation.history}" +
"\nInput: #{prompt}"
response = client.chat(
parameters: {
model: "gpt-4",
max_tokens: 1000,
messages: [{ role: "user", content: full_prompt}],
temperature: 0,
})
response.dig("choices", 0, "message", "content").strip
end
def execute(query)
begin
ActiveRecord::Base.connection.execute(query)
rescue => e
"Query Error: #{e.message}"
end
end
def info(content)
puts TTY::Markdown.parse(content)
end
def chat_mode
info WELCOME_INFO
timeout = 300 # Set the timeout in seconds
loop do
print "\n#{topic}: "
# use readline to get input
input = Readline.readline(topic, true)
next if input =~ /^\s*$/
case input.downcase
when /^(quit|exit)\s+$/
puts "Exiting chat."
break
when 'debug'
require "pry";binding.pry
else
with_no_logs do
chat(input) rescue info($!)
end
end
end
end
def run_queries queries
queries.each_with_index.map do |query,i|
sql = query.gsub(/#\{(.*)\}/){eval($1)}
json = execute(sql).to_json
if json.length > 1000
json = json[0..1000]+"... (truncated)"
end
<<~MARKDOWN
Result from query #{i+1}:
```json
#{json}
```
MARKDOWN
end.join("\n")
end
def chat(prompt)
response = call_gpt4_api(prompt)
with_no_logs do
Conversation.create(topic: topic,
user_input: prompt,
ai_response: response,
ts: Time.now)
end
info("**AI:** #{response}")
queries = sql_from_markdown(response)
if queries&.any?
output = run_queries(queries)
info(output)
chat(output)
end
end
def with_no_logs
old_logger = ActiveRecord::Base.logger
ActiveRecord::Base.logger = nil
ret = yield
ActiveRecord::Base.logger = old_logger
ret
end
def main
ActiveRecord::Base.logger = Logger.new(STDOUT)
ActiveRecord::Base.establish_connection(PG_URI)
# Create the events table if it doesn't exist
unless Conversation.table_exists?
ActiveRecord::Base.connection.instance_exec do
execute "CREATE EXTENSION IF NOT EXISTS timescaledb"
create_table :conversations, id: false, hypertable: {time_column: :ts} do |t|
t.timestamptz :ts, default: "now()", null: false
t.string :topic, null: false
t.text :user_input, null: false
t.text :ai_response, null: false
end
end
end
chat_mode
end
main
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/examples/all_in_one/benchmark_comparison.rb | examples/all_in_one/benchmark_comparison.rb | require 'bundler/inline' #require only what you need
gemfile(true) do
gem 'timescaledb', path: '../..'
gem 'pry'
gem 'faker'
gem 'benchmark-ips', require: "benchmark/ips", git: 'https://github.com/evanphx/benchmark-ips'
end
require 'pp'
require 'benchmark'
# ruby all_in_one.rb postgres://user:pass@host:port/db_name
ActiveRecord::Base.establish_connection( ARGV.last)
# Simple example
class Event < ActiveRecord::Base
self.primary_key = nil
acts_as_hypertable
# If you want to override the automatic assingment of the `created_at ` time series column
def self.timestamp_attributes_for_create_in_model
[]
end
def self.timestamp_attributes_for_update_in_model
[]
end
end
class Event2 < ActiveRecord::Base
self.table_name = "events_2"
end
# Setup Hypertable as in a migration
ActiveRecord::Base.connection.instance_exec do
ActiveRecord::Base.logger = Logger.new(STDOUT)
drop_table(Event.table_name, if_exists: true)
drop_table(Event2.table_name, if_exists: true)
hypertable_options = {
time_column: 'created_at',
chunk_time_interval: '7 day',
compress_segmentby: 'identifier',
compress_after: '7 days'
}
create_table(:events, id: false, hypertable: hypertable_options) do |t|
t.string :identifier, null: false
t.jsonb :payload
t.timestamps
end
create_table(Event2.table_name) do |t|
t.string :identifier, null: false
t.jsonb :payload
t.timestamps
end
end
def generate_fake_data(total: 100_000)
time = Time.now
total.times.flat_map do
identifier = %w[sign_up login click scroll logout view]
time = time + rand(60).seconds
{
created_at: time,
updated_at: time,
identifier: identifier.sample,
payload: {
"name" => Faker::Name.name,
"email" => Faker::Internet.email
}
}
end
end
def parallel_inserts clazz: nil, size: 5_000, data: nil
limit = 8
threads = []
while (batch = data.shift(size)).any? do
threads << Thread.new(batch) do |batch|
begin
clazz.insert_all(batch, returning: false)
ensure
ActiveRecord::Base.connection.close if ActiveRecord::Base.connection
end
end
if threads.size == limit
threads.each(&:join)
threads = []
end
end
threads.each(&:join)
end
payloads = nil
ActiveRecord::Base.logger = nil
Benchmark.ips do |x|
x.config(time: 500, warmup: 2)
x.report("gen data") { payloads = generate_fake_data total: 100_000}
x.report("normal ") { parallel_inserts(data: payloads.dup, clazz: Event2, size: 5000) }
x.report("hyper ") { parallel_inserts(data: payloads.dup, clazz: Event, size: 5000) }
x.compare!
end
ActiveRecord::Base.logger = Logger.new(STDOUT)
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/examples/all_in_one/query_data.rb | examples/all_in_one/query_data.rb | require 'bundler/inline' #require only what you need
gemfile(true) do
gem 'timescaledb', path: '../..'
gem 'pry'
gem 'faker'
end
require 'pp'
# ruby all_in_one.rb postgres://user:pass@host:port/db_name
ActiveRecord::Base.establish_connection( ARGV.last)
# Simple example
class Event < ActiveRecord::Base
self.primary_key = nil
acts_as_hypertable
# If you want to override the automatic assingment of the `created_at ` time series column
def self.timestamp_attributes_for_create_in_model
[]
end
def self.timestamp_attributes_for_update_in_model
[]
end
end
# Setup Hypertable as in a migration
ActiveRecord::Base.connection.instance_exec do
ActiveRecord::Base.logger = Logger.new(STDOUT)
drop_table(Event.table_name, if_exists: true)
hypertable_options = {
time_column: 'created_at',
chunk_time_interval: '7 day',
compress_segmentby: 'identifier',
compress_after: '7 days'
}
create_table(:events, id: false, hypertable: hypertable_options) do |t|
t.string :identifier, null: false
t.jsonb :payload
t.timestamps
end
end
def generate_fake_data(total: 100_000)
time = 1.month.ago
total.times.flat_map do
identifier = %w[sign_up login click scroll logout view]
time = time + rand(60).seconds
{
created_at: time,
updated_at: time,
identifier: identifier.sample,
payload: {
"name" => Faker::Name.name,
"email" => Faker::Internet.email
}
}
end
end
batch = generate_fake_data total: 10_000
ActiveRecord::Base.logger = nil
Event.insert_all(batch, returning: false)
ActiveRecord::Base.logger = Logger.new(STDOUT)
pp Event.previous_month.count
pp Event.previous_week.count
pp Event.previous_month.group('identifier').count
pp Event.previous_week.group('identifier').count
pp Event
.previous_month
.select("time_bucket('1 day', created_at) as time, identifier, count(*)")
.group("1,2").map(&:attributes)
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/examples/all_in_one/all_in_one.rb | examples/all_in_one/all_in_one.rb | require 'bundler/inline' #require only what you need
gemfile(true) do
gem 'timescaledb', path: '../..'
gem 'pry'
gem 'faker'
end
require 'timescaledb'
require 'pp'
require 'pry'
# ruby all_in_one.rb postgres://user:pass@host:port/db_name
ActiveRecord::Base.establish_connection( ARGV.last)
# Simple example
class Event < ActiveRecord::Base
extend Timescaledb::ActsAsHypertable
include Timescaledb::ContinuousAggregatesHelper
acts_as_hypertable time_column: "time",
segment_by: "identifier",
value_column: "cast(payload->>'price' as float)"
scope :count_clicks, -> { select("count(*)").where(identifier: "click") }
scope :count_views, -> { select("count(*)").where(identifier: "views") }
scope :purchase, -> { where(identifier: "purchase") }
scope :purchase_stats, -> { purchase.select("stats_agg(#{value_column}) as stats_agg") }
scope :stats, -> { select("average(stats_agg), stddev(stats_agg)") } # just for descendants aggregated classes
continuous_aggregates scopes: [:count_clicks, :count_views, :purchase_stats],
timeframes: [:minute, :hour, :day],
refresh_policy: {
minute: {
start_offset: '3 minute',
end_offset: '1 minute',
schedule_interval: '1 minute'
},
hour: {
start_offset: '3 hours',
end_offset: '1 hour',
schedule_interval: '1 minute'
},
day: {
start_offset: '3 day',
end_offset: '1 day',
schedule_interval: '1 minute'
}
}
end
# Setup Hypertable as in a migration
ActiveRecord::Base.connection.instance_exec do
ActiveRecord::Base.logger = Logger.new(STDOUT)
Event.drop_continuous_aggregates
drop_table(:events, if_exists: true, cascade: true)
hypertable_options = {
time_column: 'time',
chunk_time_interval: '1 day',
compress_after: '7 days',
compress_orderby: 'time',
compress_segmentby: 'identifier',
}
create_table(:events, id: false, hypertable: hypertable_options) do |t|
t.timestamptz :time, null: false, default: -> { 'now()' }
t.string :identifier, null: false
t.jsonb :payload
end
end
ActiveRecord::Base.connection.instance_exec do
Event.create_continuous_aggregates
end
# Create some data just to see how it works
1.times do
Event.transaction do
Event.create identifier: "sign_up", payload: {"name" => "Eon"}
Event.create identifier: "login", payload: {"email" => "eon@timescale.com"}
Event.create identifier: "click", payload: {"user" => "eon", "path" => "/install/timescaledb"}
Event.create identifier: "scroll", payload: {"user" => "eon", "path" => "/install/timescaledb"}
Event.create identifier: "logout", payload: {"email" => "eon@timescale.com"}
Event.create identifier: "purchase", payload: { price: 100.0}
Event.create identifier: "purchase", payload: { price: 120.0}
Event.create identifier: "purchase", payload: { price: 140.0}
end
end
def generate_fake_data(total: 100_000)
time = 1.month.ago
total.times.flat_map do
identifier = %w[sign_up login click scroll logout view purchase]
time = time + rand(60).seconds
id = identifier.sample
payload = id == "purchase" ? {
"price" => rand(100..1000)
} : {
"name" => Faker::Name.name,
"email" => Faker::Internet.email,
}
{
time: time,
identifier: id,
payload: payload
}
end
end
def supress_logs
ActiveRecord::Base.logger =nil
yield
ActiveRecord::Base.logger = Logger.new(STDOUT)
end
batch = generate_fake_data total: 10_000
supress_logs do
Event.insert_all(batch, returning: false)
end
# Now let's see what we have in the scopes
Event.last_hour.group(:identifier).count # => {"login"=>2, "click"=>1, "logout"=>1, "sign_up"=>1, "scroll"=>1}
Event.refresh_aggregates
pp Event::CountClicksPerMinute.last_hour.map(&:attributes)
pp Event::CountViewsPerMinute.last_hour.map(&:attributes)
puts "compressing 1 chunk of #{ Event.chunks.count } chunks"
Event.chunks.first.compress!
puts "detailed size"
pp Event.hypertable.detailed_size
puts "compression stats"
pp Event.hypertable.compression_stats
puts "decompressing"
Event.chunks.first.decompress!
Pry.start
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/examples/all_in_one/caggs.rb | examples/all_in_one/caggs.rb | require 'bundler/inline' #require only what you need
gemfile(true) do
gem 'timescaledb', path: '../..'
gem 'pry'
end
require 'pp'
# ruby caggs.rb postgres://user:pass@host:port/db_name
ActiveRecord::Base.establish_connection( ARGV.last)
class Tick < ActiveRecord::Base
self.table_name = 'ticks'
self.primary_key = nil
acts_as_hypertable time_column: 'time'
%w[open high low close].each{|name| attribute name, :decimal}
scope :ohlc, -> (timeframe='1m') do
select("time_bucket('#{timeframe}', time) as time,
symbol,
FIRST(price, time) as open,
MAX(price) as high,
MIN(price) as low,
LAST(price, time) as close,
SUM(volume) as volume").group("1,2")
end
end
ActiveRecord::Base.connection.instance_exec do
drop_table(:ticks, if_exists: true, force: :cascade)
hypertable_options = {
time_column: 'time',
chunk_time_interval: '1 day',
compress_segmentby: 'symbol',
compress_orderby: 'time',
compress_after: '7 days'
}
create_table :ticks, hypertable: hypertable_options, id: false do |t|
t.timestamp :time
t.string :symbol
t.decimal :price
t.integer :volume
end
end
FAANG = %w[META AMZN AAPL NFLX GOOG]
OPERATION = [:+, :-]
RAND_VOLUME = -> { (rand(10) * rand(10)) * 100 }
RAND_CENT = -> { (rand / 50.0).round(2) }
def generate_fake_data(total: 100)
previous_price = {}
time = Time.now
(total / FAANG.size).times.flat_map do
time += rand(10)
FAANG.map do |symbol|
if previous_price[symbol]
price = previous_price[symbol].send(OPERATION.sample, RAND_CENT.()).round(2)
else
price = 50 + rand(100)
end
payload = { time: time, symbol: symbol, price: price, volume: RAND_VOLUME.() }
previous_price[symbol] = price
payload
end
end
end
batch = generate_fake_data total: 10_000
ActiveRecord::Base.logger = nil
Tick.insert_all(batch, returning: false)
ActiveRecord::Base.logger = Logger.new(STDOUT)
ActiveRecord::Base.connection.instance_exec do
create_continuous_aggregates('ohlc_1m', Tick.ohlc('1m'), with_data: true)
end
class Ohlc1m < ActiveRecord::Base
self.table_name = 'ohlc_1m'
attribute :time, :time
attribute :symbol, :string
%w[open high low close volume].each{|name| attribute name, :decimal}
def readonly?
true
end
end
binding.pry
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb.rb | lib/timescaledb.rb | require 'active_record'
require_relative 'timescaledb/application_record'
require_relative 'timescaledb/acts_as_hypertable'
require_relative 'timescaledb/acts_as_hypertable/core'
require_relative 'timescaledb/continuous_aggregates_helper'
require_relative 'timescaledb/connection'
require_relative 'timescaledb/toolkit'
require_relative 'timescaledb/chunk'
require_relative 'timescaledb/compression_settings'
require_relative 'timescaledb/connection_handling'
require_relative 'timescaledb/continuous_aggregates'
require_relative 'timescaledb/dimensions'
require_relative 'timescaledb/hypertable'
require_relative 'timescaledb/job'
require_relative 'timescaledb/job_stats'
require_relative 'timescaledb/schema_dumper'
require_relative 'timescaledb/stats'
require_relative 'timescaledb/stats_report'
require_relative 'timescaledb/migration_helpers'
require_relative 'timescaledb/extension'
require_relative 'timescaledb/version'
module Timescaledb
module_function
def connection
Connection.instance
end
def extension
Extension
end
def chunks
Chunk.all
end
def hypertables
Hypertable.all
end
def continuous_aggregates
ContinuousAggregates.all
end
def compression_settings
CompressionSettings.all
end
def jobs
Job.all
end
def job_stats
JobStats.all
end
def stats(scope=Hypertable.all)
StatsReport.resume(scope)
end
def default_hypertable_options
Timescaledb::ActsAsHypertable::DEFAULT_OPTIONS
end
end
begin
require 'scenic'
require_relative 'timescaledb/scenic/adapter'
require_relative 'timescaledb/scenic/extension'
Scenic.configure do |config|
config.database = Timescaledb::Scenic::Adapter.new
end
rescue LoadError
# This is expected when the scenic gem is not being used
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/chunk.rb | lib/timescaledb/chunk.rb | module Timescaledb
class Chunk < ::Timescaledb::ApplicationRecord
self.table_name = "timescaledb_information.chunks"
self.primary_key = "chunk_name"
belongs_to :hypertable, foreign_key: :hypertable_name
scope :compressed, -> { where(is_compressed: true) }
scope :uncompressed, -> { where(is_compressed: false) }
scope :resume, -> do
{
total: count,
compressed: compressed.count,
uncompressed: uncompressed.count
}
end
def compress!
execute("SELECT compress_chunk(#{chunk_relation})")
end
def decompress!
execute("SELECT decompress_chunk(#{chunk_relation})")
end
def chunk_relation
"('#{chunk_schema}.#{chunk_name}')::regclass"
end
def execute(sql)
self.class.connection.execute(sql)
end
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/version.rb | lib/timescaledb/version.rb | module Timescaledb
VERSION = '0.3.2'
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/stats_report.rb | lib/timescaledb/stats_report.rb | require "active_support/core_ext/numeric/conversions"
module Timescaledb
module StatsReport
module_function
def resume(scope=Hypertable.all)
base_filter = {hypertable_name: scope.pluck(:hypertable_name)}
{
hypertables: {
count: scope.count,
uncompressed: scope.to_a.count { |h| h.compression_stats.empty? },
approximate_row_count: approximate_row_count(scope),
chunks: Chunk.where(base_filter).resume,
size: compression_resume(scope)
},
continuous_aggregates: ContinuousAggregates.where(base_filter).resume,
jobs_stats: JobStats.where(base_filter).resume
}
end
def compression_resume(scope)
sum = -> (method) { (scope.map(&method).inject(:+) || 0).to_s(:human_size)}
{
uncompressed: sum[:before_total_bytes],
compressed: sum[:after_total_bytes]
}
end
def approximate_row_count(scope)
scope.to_a.map do |hypertable|
{ hypertable.hypertable_name => hypertable.approximate_row_count }
end.inject(&:merge!)
end
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/hypertable.rb | lib/timescaledb/hypertable.rb | require 'ostruct'
module Timescaledb
class Hypertable < ::Timescaledb::ApplicationRecord
self.table_name = "timescaledb_information.hypertables"
self.primary_key = "hypertable_name"
has_many :jobs, foreign_key: "hypertable_name"
has_many :chunks, foreign_key: "hypertable_name"
has_many :compression_settings,
foreign_key: "hypertable_name",
class_name: "Timescaledb::CompressionSetting"
has_many :dimensions,
foreign_key: "hypertable_name",
class_name: "Timescaledb::Dimension"
has_many :continuous_aggregates,
foreign_key: "hypertable_name",
class_name: "Timescaledb::ContinuousAggregate"
def main_dimension
dimensions.find_by dimension_number: 1
end
def chunks_detailed_size
struct_from "SELECT * from chunks_detailed_size('#{self.hypertable_name}')"
end
def approximate_row_count
struct_from("SELECT * FROM approximate_row_count('#{self.hypertable_name}')").first.approximate_row_count
end
def compression_stats
@compression_stats ||=
struct_from("SELECT * from hypertable_compression_stats('#{self.hypertable_name}')").first || {}
end
def detailed_size
struct_from("SELECT * FROM hypertable_detailed_size('#{self.hypertable_name}')").first
end
def before_total_bytes
compression_stats["before_compression_total_bytes"] || detailed_size.total_bytes
end
def after_total_bytes
compression_stats["after_compression_total_bytes"] || 0
end
private
def struct_from(sql)
self.class.connection.execute(sql).map(&OpenStruct.method(:new))
end
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/job_stats.rb | lib/timescaledb/job_stats.rb | module Timescaledb
class JobStat < ::Timescaledb::ApplicationRecord
self.table_name = "timescaledb_information.job_stats"
belongs_to :job
# attribute :last_run_duration, :interval
scope :success, -> { where(last_run_status: "Success") }
scope :scheduled, -> { where(job_status: "Scheduled") }
scope :resume, -> do
select("sum(total_successes)::int as success,
sum(total_runs)::int as runs,
sum(total_failures)::int as failures")
.to_a.map{|e|e.attributes.transform_keys(&:to_sym) }
end
end
JobStats = JobStat
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/acts_as_hypertable.rb | lib/timescaledb/acts_as_hypertable.rb | # frozen_string_literal: true
module Timescaledb
# If you want your model to hook into its underlying hypertable
# as well as have access to TimescaleDB specific data, methods, and more,
# specify this macro in your model.
#
# @note Your model's table needs to have already been converted to a hypertable
# via the TimescaleDB `create_hypertable` function for this to work.
#
# @see https://docs.timescale.com/api/latest/hypertable/create_hypertable/ for
# how to use the SQL `create_hypertable` function.
# @see Timescaledb::MigrationHelpers#create_table for how to create a new hypertable
# via a Rails migration utilizing the standard `create_table` method.
#
# @example Enabling the macro on your model
# class Event < ActiveRecord::Base
# acts_as_hypertable
# end
#
# @see Timescaledb::ActsAsHypertable#acts_as_hypertable
# for configuration options
module ActsAsHypertable
DEFAULT_OPTIONS = {
time_column: :created_at,
}.freeze
def acts_as_hypertable?
included_modules.include?(Timescaledb::ActsAsHypertable::Core)
end
# == Configuration options
#
# @param [Hash] options The options to initialize your macro with.
# @option options [Symbol] :time_column The name of the column in your
# model's table containing time values. The name provided should be
# the same name as the `time_column_name` you passed to the
# TimescaleDB `create_hypertable` function.
#
# @example Enabling the macro on your model with options
# class Event < ActiveRecord::Base
# acts_as_hypertable time_column: :timestamp
# end
#
# @param [Hash] options The options to initialize your macro with.
# @option options [Boolean] :skip_association_scopes to avoid `.hypertable`, `.chunks` and other scopes related to metadata.
# @option options [Boolean] :skip_default_scopes to avoid the generation of default time related scopes like `last_hour`, `last_week`, `yesterday` and so on...
# @option options [Boolean] :skip_time_vector to avoid the generation of time vector related scopes
def acts_as_hypertable(options = {})
return if acts_as_hypertable?
include Timescaledb::ActsAsHypertable::Core
include Timescaledb::Toolkit::TimeVector
class_attribute :hypertable_options, instance_writer: false
self.hypertable_options = DEFAULT_OPTIONS.dup
hypertable_options.merge!(options)
normalize_hypertable_options
define_association_scopes unless options[:skip_association_scopes]
define_default_scopes unless options[:skip_default_scopes]
define_default_vector_scopes unless options[:skip_time_vector]
end
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/migration_helpers.rb | lib/timescaledb/migration_helpers.rb | require 'active_record/connection_adapters/postgresql_adapter'
# Useful methods to run TimescaleDB in you Ruby app.
module Timescaledb
# Migration helpers can help you to setup hypertables by default.
module MigrationHelpers
# `create_table` accepts a `hypertable` argument with options for creating
# a TimescaleDB hypertable.
#
# See https://docs.timescale.com/api/latest/hypertable/create_hypertable/#optional-arguments
# for additional options supported by the plugin.
#
# @example
# options = {
# time_column: 'created_at',
# chunk_time_interval: '1 min',
# compress_segmentby: 'identifier',
# compress_orderby: 'created_at',
# compress_after: '7 days'
# }
#
# create_table(:events, id: false, hypertable: options) do |t|
# t.string :identifier, null: false
# t.jsonb :payload
# t.timestamps
# end
def create_table(table_name, id: :primary_key, primary_key: nil, force: nil, **options)
super
create_hypertable(table_name, **options[:hypertable]) if options.key?(:hypertable)
end
# Override the valid_table_definition_options to include hypertable.
def valid_table_definition_options # :nodoc:
super + [:hypertable]
end
# Setup hypertable from options
# @see create_table with the hypertable options.
def create_hypertable(table_name,
time_column: 'created_at',
chunk_time_interval: '1 week',
compress_segmentby: nil,
compress_orderby: 'created_at',
compress_after: nil,
drop_after: nil,
partition_column: nil,
number_partitions: nil,
**hypertable_options)
original_logger = ActiveRecord::Base.logger
ActiveRecord::Base.logger = Logger.new(STDOUT)
dimension = "by_range(#{quote(time_column)}, #{parse_interval(chunk_time_interval)})"
arguments = [ quote(table_name), dimension,
*hypertable_options.map { |k, v| "#{k} => #{quote(v)}" }
]
execute "SELECT create_hypertable(#{arguments.compact.join(', ')})"
if partition_column && number_partitions
execute "SELECT add_dimension('#{table_name}', by_hash(#{quote(partition_column)}, #{number_partitions}))"
end
if compress_segmentby || compress_after
add_compression_policy(table_name, orderby: compress_orderby, segmentby: compress_segmentby, compress_after: compress_after)
end
if drop_after
add_retention_policy(table_name, drop_after: drop_after)
end
ensure
ActiveRecord::Base.logger = original_logger if original_logger
end
# Create a new continuous aggregate
#
# @param name [String, Symbol] The name of the continuous aggregate.
# @param query [String] The SQL query for the aggregate view definition.
# @param with_data [Boolean] Set to true to create the aggregate WITH DATA
# @param refresh_policies [Hash] Set to create a refresh policy
# @option refresh_policies [String] start_offset: INTERVAL or integer
# @option refresh_policies [String] end_offset: INTERVAL or integer
# @option refresh_policies [String] schedule_interval: INTERVAL
# @option materialized_only [Boolean] Override the WITH clause 'timescaledb.materialized_only'
# @option create_group_indexes [Boolean] Override the WITH clause 'timescaledb.create_group_indexes'
# @option finalized [Boolean] Override the WITH clause 'timescaledb.finalized'
#
# @see https://docs.timescale.com/api/latest/continuous-aggregates/create_materialized_view/
# @see https://docs.timescale.com/api/latest/continuous-aggregates/add_continuous_aggregate_policy/
#
# @example
# create_continuous_aggregate(:activity_counts, query: <<-SQL, refresh_policies: { schedule_interval: "INTERVAL '1 hour'" })
# SELECT
# time_bucket(INTERVAL '1 day', activity.created_at) AS bucket,
# count(*)
# FROM activity
# GROUP BY bucket
# SQL
#
def create_continuous_aggregate(table_name, query, **options)
execute <<~SQL
CREATE MATERIALIZED VIEW #{table_name}
WITH (
timescaledb.continuous
#{build_with_clause_option_string(:materialized_only, options)}
#{build_with_clause_option_string(:create_group_indexes, options)}
#{build_with_clause_option_string(:finalized, options)}
) AS
#{query.respond_to?(:to_sql) ? query.to_sql : query}
WITH #{'NO' unless options[:with_data]} DATA;
SQL
create_continuous_aggregate_policy(table_name, **(options[:refresh_policies] || {}))
end
# Drop a new continuous aggregate.
#
# It basically DROP MATERIALIZED VIEW for a given @name.
#
# @param name [String, Symbol] The name of the continuous aggregate view.
def drop_continuous_aggregates view_name
execute "DROP MATERIALIZED VIEW #{view_name}"
end
alias_method :create_continuous_aggregates, :create_continuous_aggregate
def create_continuous_aggregate_policy(table_name, **options)
return if options.empty?
# TODO: assert valid keys
execute <<~SQL
SELECT add_continuous_aggregate_policy('#{table_name}',
start_offset => #{options[:start_offset]},
end_offset => #{options[:end_offset]},
schedule_interval => #{options[:schedule_interval]});
SQL
end
def remove_continuous_aggregate_policy(table_name)
execute "SELECT remove_continuous_aggregate_policy('#{table_name}')"
end
def create_retention_policy(table_name, drop_after:)
execute "SELECT add_retention_policy('#{table_name}', drop_after => #{parse_interval(drop_after)})"
end
alias_method :add_retention_policy, :create_retention_policy
def remove_retention_policy(table_name)
execute "SELECT remove_retention_policy('#{table_name}')"
end
# Enable compression policy.
#
# @param table_name [String] The name of the table.
# @param orderby [String] The column to order by.
# @param segmentby [String] The column to segment by.
# @param compress_after [String] The interval to compress after.
# @param compression_chunk_time_interval [String] In case to merge chunks.
#
# @see https://docs.timescale.com/api/latest/compression/add_compression_policy/
def add_compression_policy(table_name, orderby:, segmentby:, compress_after: nil, compression_chunk_time_interval: nil)
options = []
options << 'timescaledb.compress'
options << "timescaledb.compress_orderby = '#{orderby}'" if orderby
options << "timescaledb.compress_segmentby = '#{segmentby}'" if segmentby
options << "timescaledb.compression_chunk_time_interval = INTERVAL '#{compression_chunk_time_interval}'" if compression_chunk_time_interval
execute <<~SQL
ALTER TABLE #{table_name} SET (
#{options.join(',')}
)
SQL
execute "SELECT add_compression_policy('#{table_name}', compress_after => INTERVAL '#{compress_after}')" if compress_after
end
private
# Build a string for the WITH clause of the CREATE MATERIALIZED VIEW statement.
# When the option is omitted, this method returns an empty string, which allows this gem to use the
# defaults provided by TimescaleDB.
def build_with_clause_option_string(option_key, options)
return '' unless options.key?(option_key)
value = options[option_key] ? 'true' : 'false'
",timescaledb.#{option_key}=#{value}"
end
def parse_interval(interval)
if interval.is_a?(Numeric)
interval
else
"INTERVAL '#{interval}'"
end
end
end
end
ActiveRecord::ConnectionAdapters::PostgreSQLAdapter.include(Timescaledb::MigrationHelpers)
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/stats.rb | lib/timescaledb/stats.rb | require_relative './stats/continuous_aggregates'
require_relative './stats/hypertables'
require_relative './stats/job_stats'
module Timescaledb
class Stats
# @param [Array<OpenStruct>] hypertables The list of hypertables.
# @param [Timescaledb:Connection] connection The PG connection.
def initialize(hypertables = [], connection = Timescaledb.connection)
@hypertables = hypertables
@connection = connection
end
def to_h
{
hypertables: Hypertables.new(@hypertables).to_h,
continuous_aggregates: ContinuousAggregates.new.to_h,
jobs_stats: JobStats.new.to_h
}
end
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/connection_handling.rb | lib/timescaledb/connection_handling.rb | module Timescaledb
class ConnectionNotEstablishedError < StandardError; end
module_function
# @param [String] config with the postgres connection string.
def establish_connection(config)
# Establish connection for Timescaledb
Connection.instance.config = config
# Also establish connection for ActiveRecord if it's defined
if defined?(ActiveRecord::Base)
ActiveRecord::Base.establish_connection(config)
end
end
# @param [PG::Connection] to use it directly from a raw connection
def use_connection(conn)
Connection.instance.use_connection(conn)
# Also set ActiveRecord connection if it's defined
if defined?(ActiveRecord::Base) && ActiveRecord::Base.connected?
ar_conn = ActiveRecord::Base.connection
current_conn = ar_conn.raw_connection
# Only set if it's different to avoid redundant assignment
if current_conn != conn
ar_conn.instance_variable_set(:@raw_connection, conn)
end
end
end
def connection
raise ConnectionNotEstablishedError.new unless Connection.instance.connected?
Connection.instance
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/continuous_aggregates.rb | lib/timescaledb/continuous_aggregates.rb | module Timescaledb
class ContinuousAggregates < ::Timescaledb::ApplicationRecord
self.table_name = "timescaledb_information.continuous_aggregates"
self.primary_key = 'materialization_hypertable_name'
has_many :jobs, foreign_key: "hypertable_name",
class_name: "Timescaledb::Job"
has_many :chunks, foreign_key: "hypertable_name",
class_name: "Timescaledb::Chunk"
scope :resume, -> do
{
total: count
}
end
scope :hierarchical, -> do
with_recursive = <<~SQL
WITH RECURSIVE caggs AS (
SELECT mat_hypertable_id, parent_mat_hypertable_id, user_view_name
FROM _timescaledb_catalog.continuous_agg
UNION ALL
SELECT continuous_agg.mat_hypertable_id, continuous_agg.parent_mat_hypertable_id, continuous_agg.user_view_name
FROM _timescaledb_catalog.continuous_agg
JOIN caggs ON caggs.parent_mat_hypertable_id = continuous_agg.mat_hypertable_id
)
SELECT * FROM caggs
ORDER BY mat_hypertable_id
SQL
views = unscoped
.select("distinct user_view_name")
.from("(#{with_recursive}) as caggs")
.pluck(:user_view_name)
.uniq
views.map do |view|
find_by(view_name: view)
end
end
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/compression_settings.rb | lib/timescaledb/compression_settings.rb | module Timescaledb
class CompressionSetting < ::Timescaledb::ApplicationRecord
self.table_name = "timescaledb_information.compression_settings"
belongs_to :hypertable, foreign_key: :hypertable_name
end
CompressionSettings = CompressionSetting
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/schema_dumper.rb | lib/timescaledb/schema_dumper.rb | require 'active_record/connection_adapters/postgresql_adapter'
require 'active_support/core_ext/string/indent'
module Timescaledb
# Schema dumper overrides default schema dumper to include:
# * hypertables
# * retention policies
# * continuous aggregates
# * compression settings
# It also ignores Timescale related schemas when dumping the schema.
# It also ignores dumping options as extension is not installed or no hypertables are available.
module SchemaDumper
def tables(stream)
super # This will call #table for each table in the database
if exports_timescaledb_metadata?
timescale_hypertables(stream)
timescale_retention_policies(stream)
timescale_continuous_aggregates(stream) # Define these before any Scenic views that might use them
end
end
# Ignore dumps in case DB is not eligible for TimescaleDB metadata.
# @return [Boolean] true if the extension is installed and hypertables are available, otherwise false.
private def exports_timescaledb_metadata?
# Note it's safe to use the raw connection here because we're only reading from the database
# and not modifying it. We're also on the same connection pool as ActiveRecord::Base.
# The dump process also runs standalone, so we don't need to worry about the connection being
# used elsewhere.
Timescaledb.use_connection @connection.raw_connection
Timescaledb.extension.installed? && Timescaledb.hypertables.any?
end
# Ignores Timescale related schemas when dumping the schema
IGNORE_SCHEMAS = %w[
_timescaledb_cache
_timescaledb_config
_timescaledb_catalog
_timescaledb_debug
_timescaledb_functions
_timescaledb_internal
timescaledb_experimental
timescaledb_information
toolkit_experimental
]
def schemas(stream)
schema_names = @connection.schema_names - ["public", *IGNORE_SCHEMAS]
if schema_names.any?
schema_names.sort.each do |name|
stream.puts " create_schema #{name.inspect}"
end
stream.puts
end
end
def timescale_hypertables(stream)
sorted_hypertables.each do |hypertable|
timescale_hypertable(hypertable, stream)
end
end
def timescale_retention_policies(stream)
if sorted_hypertables.any? { |hypertable| hypertable.jobs.exists?(proc_name: "policy_retention") }
stream.puts # Insert a blank line above the retention policies, for readability
end
sorted_hypertables.each do |hypertable|
timescale_retention_policy(hypertable, stream)
end
end
private
def timescale_hypertable(hypertable, stream)
time = hypertable.main_dimension
options = {
time_column: time.column_name,
chunk_time_interval: time.time_interval ? time.time_interval.inspect : time.integer_interval,
**timescale_compression_settings_for(hypertable),
**timescale_space_partition_for(hypertable),
**timescale_index_options_for(hypertable)
}
options = options.map { |k, v| "#{k}: #{v.to_json}" }.join(", ")
stream.puts %Q[ create_hypertable "#{hypertable.hypertable_name}", #{options}]
end
def timescale_retention_policy(hypertable, stream)
hypertable.jobs.where(proc_name: "policy_retention").each do |job|
stream.puts %Q[ create_retention_policy "#{job.hypertable_name}", drop_after: "#{job.config["drop_after"]}"]
end
end
def timescale_compression_settings_for(hypertable)
compression_settings = hypertable.compression_settings.each_with_object({}) do |setting, compression_settings|
# It's possible to configure compression so that it is segmented by multiple
# columns. To make sure we capture that correctly, we'll treat them as an array.
compression_settings[:compress_segmentby] ||= []
compression_settings[:compress_orderby] ||= []
compression_settings[:compress_segmentby] << setting.attname if setting.segmentby_column_index
if setting.orderby_column_index
if setting.orderby_asc
direction = "ASC"
# For ASC, default is NULLS LAST, so only add if explicitly set to FIRST
direction += " NULLS FIRST" if setting.orderby_nullsfirst == true
else
direction = "DESC"
# For DESC, default is NULLS FIRST, so only add if explicitly set to LAST
direction += " NULLS LAST" if setting.orderby_nullsfirst == false
end
compression_settings[:compress_orderby] << "#{setting.attname} #{direction}"
end
end
hypertable.jobs.compression.each do |job|
compression_settings[:compress_after] = job.config["compress_after"]
end
# Pack the compression setting arrays into a comma-separated string instead.
if compression_settings[:compress_segmentby]
compression_settings[:compress_segmentby] = compression_settings[:compress_segmentby].join(", ")
end
if compression_settings[:compress_orderby]
compression_settings[:compress_orderby] = compression_settings[:compress_orderby].join(", ")
end
compression_settings
end
def timescale_space_partition_for(hypertable)
return {} unless hypertable.dimensions.length > 1
space = hypertable.dimensions.last
{partition_column: space.column_name, number_partitions: space.num_partitions}
end
def timescale_index_options_for(hypertable)
time = hypertable.main_dimension
if @connection.indexes(hypertable.hypertable_name).any? { |i| i.columns == [time.column_name] }
{}
else
{create_default_indexes: false}
end
end
def timescale_continuous_aggregates(stream)
return unless Timescaledb::ContinuousAggregates.table_exists?
Timescaledb::ContinuousAggregates.hierarchical.each do |aggregate|
refresh_policies_opts = if (refresh_policy = aggregate.jobs.refresh_continuous_aggregate.first)
interval = timescale_interval(refresh_policy.schedule_interval)
end_offset = timescale_interval(refresh_policy.config["end_offset"])
start_offset = timescale_interval(refresh_policy.config["start_offset"])
%(refresh_policies: { start_offset: "#{start_offset}", end_offset: "#{end_offset}", schedule_interval: "#{interval}"}, )
else
""
end
with_clause_opts = "materialized_only: #{aggregate[:materialized_only]}, finalized: #{aggregate[:finalized]}"
stream.puts <<~AGG.indent(2)
create_continuous_aggregate("#{aggregate.view_name}", <<-SQL, #{refresh_policies_opts}#{with_clause_opts})
#{aggregate.view_definition.strip.gsub(/;$/, '')}
SQL
AGG
stream.puts
end
end
def timescale_interval(value)
return "NULL" if value.nil? || value.to_s.downcase == "null"
"INTERVAL '#{value}'"
end
def sorted_hypertables
@sorted_hypertables ||= Timescaledb::Hypertable.order(:hypertable_name).to_a
end
end
end
ActiveRecord::ConnectionAdapters::PostgreSQL::SchemaDumper.prepend(Timescaledb::SchemaDumper)
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/connection.rb | lib/timescaledb/connection.rb | require 'singleton'
require 'ostruct'
module Timescaledb
# Minimal connection setup for Timescaledb directly with the PG.
# The concept is use a singleton component that can query
# independently of the ActiveRecord::Base connections.
# This is useful for the extension and hypertable metadata.
# It can also #use_connection from active record if needed.
class Connection
include Singleton
attr_writer :config
# @param [String] query The SQL raw query.
# @param [Array] params The SQL query parameters.
# @return [Array<OpenStruct>] The SQL result.
def query(query, params = [])
query = params.empty? ? connection.exec(query) : connection.exec_params(query, params)
query.map(&OpenStruct.method(:new))
end
# @param [String] query The SQL raw query.
# @param [Array] params The SQL query parameters.
# @return [OpenStruct] The first SQL result.
def query_first(query, params = [])
query(query, params).first
end
# @param [String] query The SQL raw query.
# @param [Array] params The SQL query parameters.
# @return [Integr] The count value from SQL result.
def query_count(query, params = [])
query_first(query, params).count.to_i
end
# @param [Boolean] True if the connection singleton was configured, otherwise returns false.
def connected?
!@config.nil?
end
# Override the connection with a raw PG connection.
# @param [PG::Connection] connection The raw PG connection.
def use_connection connection
@connection = connection
end
private
def connection
@connection ||= PG.connect(@config)
end
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/application_record.rb | lib/timescaledb/application_record.rb | # frozen_string_literal: true
module Timescaledb
class ApplicationRecord < ::ActiveRecord::Base
self.abstract_class = true
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/continuous_aggregates_helper.rb | lib/timescaledb/continuous_aggregates_helper.rb | module Timescaledb
module ContinuousAggregatesHelper
extend ActiveSupport::Concern
included do
class_attribute :rollup_rules, default: {
/count\(\*\)\s+as\s+(\w+)/ => 'sum(\1) as \1',
/sum\((\w+)\)\s+as\s+(\w+)/ => 'sum(\2) as \2',
/min\((\w+)\)\s+as\s+(\w+)/ => 'min(\2) as \2',
/max\((\w+)\)\s+as\s+(\w+)/ => 'max(\2) as \2',
/first\((\w+),\s*(\w+)\)\s+as\s+(\w+)/ => 'first(\3, \2) as \3',
/high\((\w+),\s*(\w+)\)\s+as\s+(\w+)/ => 'max(\1) as \1',
/low\((\w+),\s*(\w+)\)\s+as\s+(\w+)/ => 'min(\1) as \1',
/last\((\w+),\s*(\w+)\)\s+as\s+(\w+)/ => 'last(\3, \2) as \3',
/candlestick_agg\((\w+),\s*(\w+),\s*(\w+)\)\s+as\s+(\w+)/ => 'rollup(\4) as \4',
/stats_agg\((\w+),\s*(\w+)\)\s+as\s+(\w+)/ => 'rollup(\3) as \3',
/stats_agg\((\w+)\)\s+as\s+(\w+)/ => 'rollup(\2) as \2',
/state_agg\((\w+)\)\s+as\s+(\w+)/ => 'rollup(\2) as \2',
/percentile_agg\((\w+),\s*(\w+)\)\s+as\s+(\w+)/ => 'rollup(\3) as \3',
/heartbeat_agg\((\w+)\)\s+as\s+(\w+)/ => 'rollup(\2) as \2',
/stats_agg\(([^)]+)\)\s+(as\s+(\w+))/ => 'rollup(\3) \2',
/stats_agg\((.*)\)\s+(as\s+(\w+))/ => 'rollup(\3) \2'
}
scope :rollup, ->(interval) do
select_values = (self.select_values - ["time"]).select{|e|!e.downcase.start_with?("time_bucket")}
if self.select_values.any?{|e|e.downcase.start_with?('time_bucket(')} || self.select_values.include?('time')
select_values = apply_rollup_rules(select_values)
select_values.gsub!(/time_bucket\((.+), (.+)\)/, "time_bucket(#{interval}, \2)")
select_values.gsub!(/\btime\b/, "time_bucket(#{interval}, time) as time")
end
group_values = self.group_values.dup
if self.segment_by_column
if !group_values.include?(self.segment_by_column)
group_values << self.segment_by_column
end
if !select_values.include?(self.segment_by_column.to_s)
select_values.insert(0, self.segment_by_column.to_s)
end
end
where_values = self.where_values_hash
tb = "time_bucket(#{interval}, #{time_column})"
self.unscoped.select("#{tb} as #{time_column}, #{select_values.join(', ')}")
.where(where_values)
.group(tb, *group_values)
end
end
class_methods do
def continuous_aggregates(options = {})
@time_column = options[:time_column] || self.time_column
@timeframes = options[:timeframes] || [:minute, :hour, :day, :week, :month, :year]
scopes = options[:scopes] || []
@aggregates = {}
scopes.each do |scope_name|
@aggregates[scope_name] = {
scope_name: scope_name,
select: nil,
group_by: nil,
refresh_policy: options[:refresh_policy] || {}
}
end
# Allow for custom aggregate definitions to override or add to scope-based ones
@aggregates.merge!(options[:aggregates] || {})
# Add custom rollup rules if provided
self.rollup_rules.merge!(options[:custom_rollup_rules] || {})
define_continuous_aggregate_classes unless options[:skip_definition]
end
def refresh_aggregates(timeframes = nil)
timeframes ||= @timeframes
@aggregates.each do |aggregate_name, _|
timeframes.each do |timeframe|
klass = const_get("#{aggregate_name}_per_#{timeframe}".classify)
klass.refresh!
end
end
end
def create_continuous_aggregates(with_data: false)
@aggregates.each do |aggregate_name, config|
@timeframes.each do |timeframe|
klass = const_get("#{aggregate_name}_per_#{timeframe}".classify)
connection.execute <<~SQL
CREATE MATERIALIZED VIEW IF NOT EXISTS #{klass.table_name}
WITH (timescaledb.continuous) AS
#{klass.base_query}
#{with_data ? 'WITH DATA' : 'WITH NO DATA'};
SQL
if (policy = klass.refresh_policy)
connection.execute <<~SQL
SELECT add_continuous_aggregate_policy('#{klass.table_name}',
start_offset => INTERVAL '#{policy[:start_offset]}',
end_offset => INTERVAL '#{policy[:end_offset]}',
schedule_interval => INTERVAL '#{policy[:schedule_interval]}');
SQL
end
end
end
end
def apply_rollup_rules(select_values)
result = select_values.dup
rollup_rules.each do |pattern, replacement|
result.gsub!(pattern, replacement)
end
# Remove any remaining time_bucket
result.gsub!(/time_bucket\(.+?\)( as \w+)?/, '')
result
end
def drop_continuous_aggregates
@aggregates.each do |aggregate_name, _|
@timeframes.reverse_each do |timeframe|
view_name = "#{aggregate_name}_per_#{timeframe}"
connection.execute("DROP MATERIALIZED VIEW IF EXISTS #{view_name} CASCADE")
end
end
end
private
def define_continuous_aggregate_classes
base_model = self
@aggregates.each do |aggregate_name, config|
previous_timeframe = nil
@timeframes.each do |timeframe|
_table_name = "#{aggregate_name}_per_#{timeframe}"
class_name = "#{aggregate_name}_per_#{timeframe}".classify
const_set(class_name, Class.new(base_model) do
class << self
attr_accessor :config, :timeframe, :base_query, :base_model, :previous_timeframe, :interval, :aggregate_name, :prev_klass
end
self.table_name = _table_name
self.config = config
self.timeframe = timeframe
self.previous_timeframe = previous_timeframe
self.aggregate_name = aggregate_name
self.interval = "'1 #{timeframe.to_s}'"
self.base_model = base_model
def self.prev_klass
base_model.const_get("#{aggregate_name}_per_#{previous_timeframe}".classify)
end
def self.base_query
@base_query ||= begin
tb = "time_bucket(#{interval}, #{time_column})"
if previous_timeframe
select_clause = base_model.apply_rollup_rules("#{config[:select]}")
# Note there's no where clause here, because we're using the previous timeframe's data
"SELECT #{tb} as #{time_column}, #{select_clause} FROM \"#{prev_klass.table_name}\" GROUP BY #{[tb, *config[:group_by]].join(', ')}"
else
scope = base_model.public_send(config[:scope_name])
config[:select] = scope.select_values.select{|e|!e.downcase.start_with?("time_bucket")}.join(', ')
config[:group_by] = scope.group_values
config[:where] =
if scope.where_values_hash.present?
scope.where_values_hash.map { |key, value| "#{key} = '#{value}'" }.join(' AND ')
elsif scope.where_clause.ast.present? && scope.where_clause.ast.to_sql.present?
scope.where_clause.ast.to_sql
end
sql = "SELECT #{tb} as #{time_column}, #{config[:select]}"
sql += " FROM \"#{base_model.table_name}\""
sql += " WHERE #{config[:where]}" if config[:where]
sql += " GROUP BY #{[tb, *config[:group_by]].join(', ')}"
sql
end
end
end
def self.refresh!(start_time = nil, end_time = nil)
if start_time && end_time
connection.execute("CALL refresh_continuous_aggregate('#{table_name}', '#{start_time}', '#{end_time}')")
else
connection.execute("CALL refresh_continuous_aggregate('#{table_name}', null, null)")
end
end
def readonly?
true
end
def self.refresh_policy
config[:refresh_policy]&.dig(timeframe)
end
end)
previous_timeframe = timeframe
end
end
end
end
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/toolkit.rb | lib/timescaledb/toolkit.rb | require_relative "toolkit/helpers"
require_relative "toolkit/time_vector"
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/extension.rb | lib/timescaledb/extension.rb | module Timescaledb
# Provides metadata around the extension in the database
module Extension
module_function
# @return String version of the timescaledb extension
def version
@version ||= Timescaledb.connection.query_first(<<~SQL)&.version
SELECT extversion as version
FROM pg_extension
WHERE extname = 'timescaledb'
SQL
end
def installed?
version.present?
end
def update!
Timescaledb.connection.execute('ALTER EXTENSION timescaledb UPDATE')
end
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/job.rb | lib/timescaledb/job.rb | module Timescaledb
class Job < ::Timescaledb::ApplicationRecord
self.table_name = "timescaledb_information.jobs"
self.primary_key = "job_id"
scope :compression, -> { where(proc_name: [:tsbs_compress_chunks, :policy_compression]) }
scope :refresh_continuous_aggregate, -> { where(proc_name: :policy_refresh_continuous_aggregate) }
scope :scheduled, -> { where(scheduled: true) }
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/dimensions.rb | lib/timescaledb/dimensions.rb | module Timescaledb
class Dimension < ::Timescaledb::ApplicationRecord
self.table_name = "timescaledb_information.dimensions"
# attribute :time_interval, :interval
end
Dimensions = Dimension
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/database.rb | lib/timescaledb/database.rb | require_relative 'database/chunk_statements'
require_relative 'database/hypertable_statements'
require_relative 'database/quoting'
require_relative 'database/schema_statements'
require_relative 'database/types'
module Timescaledb
class Database
extend ChunkStatements
extend HypertableStatements
extend Quoting
extend SchemaStatements
extend Types
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/toolkit/helpers.rb | lib/timescaledb/toolkit/helpers.rb | require 'active_record/connection_adapters/postgresql_adapter'
# Useful methods to run TimescaleDB with Toolkit functions in you Ruby app.
module Timescaledb
# Helpers methods to setup queries that uses the toolkit.
module Toolkit
module Helpers
# Includes toolkit_experimental in the search path to make it easy to have
# access to all the functions
def add_toolkit_to_search_path!
return if schema_search_path.include?("toolkit_experimental")
self.schema_search_path = "#{schema_search_path}, toolkit_experimental"
end
end
end
end
ActiveRecord::ConnectionAdapters::PostgreSQLAdapter.include(Timescaledb::Toolkit::Helpers)
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/toolkit/time_vector.rb | lib/timescaledb/toolkit/time_vector.rb | # frozen_string_literal: true
module Timescaledb
module Toolkit
module TimeVector
def self.included(base)
base.extend(ClassMethods)
end
module ClassMethods
def value_column
@value_column ||= hypertable_options[:value_column] || :val
end
def time_column
respond_to?(:time_column) && super || hypertable_options[:time_column]
end
def segment_by_column
hypertable_options[:segment_by] || hypertable_options[:compress_segment_by]
end
protected
def define_default_vector_scopes
scope :volatility, -> (segment_by: segment_by_column, value: value_column) do
select([*segment_by,
"timevector(#{time_column}, #{value}) -> sort() -> delta() -> abs() -> sum() as volatility"
].join(", ")).group(segment_by)
end
scope :time_weight, -> (segment_by: segment_by_column) do
select([*segment_by,
"timevector(#{time_column}, #{value_column}) -> sort() -> delta() -> abs() -> time_weight() as time_weight"
].join(", "))
.group(segment_by)
end
scope :lttb, -> (threshold:, segment_by: segment_by_column, time: time_column, value: value_column, value_exp: value_column) do
if value =~ /(.*)\bas\b(.*)/
value_exp = $1
value = $2
end
lttb_query = <<~SQL
WITH x AS ( #{select(*segment_by, time_column, value_exp || value).to_sql})
SELECT #{"x.#{segment_by}," if segment_by}
(lttb( x.#{time_column}, x.#{value}, #{threshold}) -> unnest()).*
FROM x
#{"GROUP BY #{segment_by}" if segment_by}
SQL
downsampled = unscoped
.select(*segment_by, "time as #{time_column}, value as #{value_column}")
.from("(#{lttb_query}) as x")
if segment_by
downsampled.inject({}) do |group,e|
key = e.send(segment_by)
(group[key] ||= []) << [e.send(time_column), e.send(value_column)]
group
end
else
downsampled.map{|e|[ e[time_column],e[value_column]]}
end
end
scope :_candlestick, -> (timeframe: '1h',
segment_by: segment_by_column,
time: time_column,
volume: 'volume',
value: value_column) do
select( %|time_bucket('#{timeframe}', "#{time}")|,
*segment_by,
"candlestick_agg(#{time}, #{value}, #{volume}) as candlestick")
.order(1)
.group(*(segment_by ? [1,2] : 1))
end
scope :candlestick, -> (timeframe: '1h',
segment_by: segment_by_column,
time: time_column,
volume: 'volume',
value: value_column) do
raw = _candlestick(timeframe: timeframe, segment_by: segment_by, time: time, value: value, volume: volume)
unscoped
.from("(#{raw.to_sql}) AS candlestick")
.select("time_bucket",*segment_by,
"open(candlestick),
high(candlestick),
low(candlestick),
close(candlestick),
open_time(candlestick),
high_time(candlestick),
low_time(candlestick),
close_time(candlestick),
volume(candlestick),
vwap(candlestick)")
end
end
end
end
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/scenic/adapter.rb | lib/timescaledb/scenic/adapter.rb | require 'scenic/adapters/postgres'
require 'scenic/adapters/postgres/views'
module Timescaledb
module Scenic
class Views < ::Scenic::Adapters::Postgres::Views
# All of the views that this connection has defined, excluding any
# Timescale continuous aggregates. Those should be defined using
# +create_continuous_aggregate+ rather than +create_view+.
#
# @return [Array<Scenic::View>]
def all
ts_views = views_from_timescale.map { |v| to_scenic_view(v) }
pg_views = views_from_postgres.map { |v| to_scenic_view(v) }
ts_view_names = ts_views.map(&:name)
# Skip records with matching names (includes the schema name
# for records not in the public schema)
pg_views.reject { |v| v.name.in?(ts_view_names) }
end
private
def views_from_timescale
connection.execute(<<-SQL.squish)
SELECT
view_name as viewname,
view_definition AS definition,
'm' AS kind,
view_schema AS namespace
FROM timescaledb_information.continuous_aggregates
SQL
end
end
class Adapter < ::Scenic::Adapters::Postgres
# Timescale does some funky stuff under the hood with continuous
# aggregates. A continuous aggregate is made up of:
#
# 1. A hypertable to store the materialized data
# 2. An entry in the jobs table to refresh the data
# 3. A view definition that union's the hypertable and any recent data
# not included in the hypertable
#
# That doesn't dump well, even to structure.sql (we lose the job
# definition, since it's not part of the DDL).
#
# Our schema dumper implementation will handle dumping the continuous
# aggregate definitions, but we need to override Scenic's schema dumping
# to exclude those continuous aggregates.
def views
Views.new(connection).all
end
end
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/scenic/extension.rb | lib/timescaledb/scenic/extension.rb | # Scenic does not include `WITH` option that is used with continuous aggregates.
module Timescaledb
module Scenic
module Extension
# @override Scenic::Adapters::Postgres#create_materialized_view
# Creates a materialized view in the database
#
# @param name The name of the materialized view to create
# @param sql_definition The SQL schema that defines the materialized view.
# @param with [String] Default: nil. Set with: "..." to add "WITH (...)".
# @param no_data [Boolean] Default: false. Set to true to not create data.
# materialized view without running the associated query. You will need
# to perform a non-concurrent refresh to populate with data.
#
# This is typically called in a migration via {Statements#create_view}.
# @return [void]
def create_materialized_view(name, sql_definition, with: nil, no_data: false)
execute <<-SQL
CREATE MATERIALIZED VIEW #{quote_table_name(name)}
#{"WITH (#{with})" if with} AS
#{sql_definition.rstrip.chomp(';')}
#{'WITH NO DATA' if no_data};
SQL
end
# @override Scenic::Adapters::Postgres#create_view
# to add the `with: ` keyword that can be used for such option.
def create_view(name, version: nil, with: nil, sql_definition: nil, materialized: false, no_data: false)
if version.present? && sql_definition.present?
raise(
ArgumentError,
"sql_definition and version cannot both be set",
)
end
if version.blank? && sql_definition.blank?
version = 1
end
sql_definition ||= definition(name, version)
if materialized
::Scenic.database.create_materialized_view(
name,
sql_definition,
no_data: no_data,
with: with
)
else
::Scenic.database.create_view(name, sql_definition, with: with)
end
end
private
def definition(name, version)
::Scenic::Definition.new(name, version).to_sql
end
end
module MigrationHelpers
# Create a timescale continuous aggregate view
def create_scenic_continuous_aggregate(name)
::Scenic.database.create_view(name, materialized: true, no_data: true, with: "timescaledb.continuous")
end
end
end
end
Scenic::Adapters::Postgres.include(Timescaledb::Scenic::Extension)
ActiveRecord::ConnectionAdapters::AbstractAdapter.prepend(Timescaledb::Scenic::MigrationHelpers)
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/stats/job_stats.rb | lib/timescaledb/stats/job_stats.rb |
module Timescaledb
class Stats
class JobStats
# @param [Timescaledb:Connection] connection The PG connection.
def initialize(connection = Timescaledb.connection)
@connection = connection
end
delegate :query_first, to: :@connection
# @return [Hash] The job_stats stats
def to_h
query_first(job_stats_query).to_h.transform_values(&:to_i)
end
private
def job_stats_query
<<-SQL
SELECT SUM(total_successes)::INT AS success,
SUM(total_runs)::INT AS runs,
SUM(total_failures)::INT AS failures
FROM timescaledb_information.job_stats
SQL
end
end
end
end | ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/stats/hypertables.rb | lib/timescaledb/stats/hypertables.rb | require_relative './chunks'
module Timescaledb
class Stats
class Hypertables
# @param [Timescaledb:Connection] connection The PG connection.
# @param [Array<String>] hypertables The list of hypertable names.
def initialize(hypertables = [], connection = Timescaledb.connection)
@connection = connection
@hypertables = hypertables.map(&method('hypertable_name_with_schema'))
end
delegate :query, :query_first, :query_count, to: :@connection
# @return [Hash] The hypertables stats
def to_h
{
count: @hypertables.count,
uncompressed_count: uncompressed_count,
approximate_row_count: approximate_row_count,
chunks: Timescaledb::Stats::Chunks.new(@hypertables).to_h,
size: size
}
end
private
def uncompressed_count
@hypertables.count do |hypertable|
query("SELECT * from hypertable_compression_stats('#{hypertable}')").empty?
end
end
def approximate_row_count
@hypertables.each_with_object(Hash.new) do |hypertable, summary|
row_count = query_first("SELECT * FROM approximate_row_count('#{hypertable}')").approximate_row_count.to_i
summary[hypertable] = row_count
end
end
def size
sum = -> (method_name) { (@hypertables.map(&method(method_name)).inject(:+) || 0) }
{
uncompressed: humanize_bytes(sum[:before_total_bytes]),
compressed: humanize_bytes(sum[:after_total_bytes])
}
end
def before_total_bytes(hypertable)
(compression_stats[hypertable]&.before_compression_total_bytes || detailed_size[hypertable]).to_i
end
def after_total_bytes(hypertable)
(compression_stats[hypertable]&.after_compression_total_bytes || 0).to_i
end
def compression_stats
@compression_stats ||=
@hypertables.each_with_object(Hash.new) do |hypertable, stats|
stats[hypertable] = query_first(compression_stats_query, [hypertable])
stats
end
end
def compression_stats_query
'SELECT * FROM hypertable_compression_stats($1)'
end
def detailed_size
@detailed_size ||=
@hypertables.each_with_object(Hash.new) do |hypertable, size|
size[hypertable] = query_first(detailed_size_query, [hypertable]).total_bytes
size
end
end
def detailed_size_query
'SELECT * FROM hypertable_detailed_size($1)'
end
def hypertable_name_with_schema(hypertable)
[hypertable.hypertable_schema, hypertable.hypertable_name].compact.join('.')
end
def humanize_bytes(bytes)
units = %w(B KiB MiB GiB TiB PiB EiB)
return '0 B' if bytes == 0
exp = (Math.log2(bytes) / 10).floor
max_exp = units.size - 1
exp = max_exp if exp > max_exp
value = (bytes.to_f / (1 << (exp * 10))).round(1)
"#{value} #{units[exp]}"
end
end
end
end | ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/stats/continuous_aggregates.rb | lib/timescaledb/stats/continuous_aggregates.rb |
module Timescaledb
class Stats
class ContinuousAggregates
# @param [Timescaledb:Connection] connection The PG connection.
def initialize(connection = Timescaledb.connection)
@connection = connection
end
delegate :query_count, to: :@connection
# @return [Hash] The continuous_aggregates stats
def to_h
{ total: total }
end
private
def total
query_count('SELECT COUNT(1) FROM timescaledb_information.continuous_aggregates')
end
end
end
end | ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/stats/chunks.rb | lib/timescaledb/stats/chunks.rb | module Timescaledb
class Stats
class Chunks
# @param [Array<String>] hypertables The list of hypertable names.
# @param [Timescaledb:Connection] connection The PG connection.
def initialize(hypertables = [], connection = Timescaledb.connection)
@connection = connection
@hypertables = hypertables
end
delegate :query_count, to: :@connection
# @return [Hash] The chunks stats
def to_h
{ total: total, compressed: compressed, uncompressed: uncompressed }
end
private
def total
query_count(base_query, [@hypertables])
end
def compressed
compressed_query = [base_query, 'is_compressed'].join(' AND ')
query_count(compressed_query, [@hypertables])
end
def uncompressed
uncompressed_query = [base_query, 'NOT is_compressed'].join(' AND ')
query_count(uncompressed_query, [@hypertables])
end
def base_query
"SELECT COUNT(1) FROM timescaledb_information.chunks WHERE hypertable_name IN ($1)"
end
end
end
end | ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/acts_as_hypertable/core.rb | lib/timescaledb/acts_as_hypertable/core.rb | # frozen_string_literal: true
module Timescaledb
module ActsAsHypertable
module Core
def self.included(base)
base.extend(ClassMethods)
end
module ClassMethods
def time_column
@time_column ||= hypertable_options[:time_column] || :created_at
end
protected
def define_association_scopes
scope :chunks, -> do
Chunk.where(hypertable_name: table_name)
end
scope :hypertable, -> do
Hypertable.find_by(hypertable_name: table_name)
end
scope :jobs, -> do
Job.where(hypertable_name: table_name)
end
scope :job_stats, -> do
JobStats.where(hypertable_name: table_name)
end
scope :compression_settings, -> do
CompressionSettings.where(hypertable_name: table_name)
end
scope :caggs, -> do
ContinuousAggregates.where(hypertable_name: table_name)
end
end
def define_default_scopes
scope :between, ->(start_time, end_time) do
where("#{time_column} BETWEEN ? AND ?", start_time, end_time)
end
scope :previous_month, -> do
ref = 1.month.ago.in_time_zone
between(ref.beginning_of_month, ref.end_of_month)
end
scope :previous_week, -> do
ref = 1.week.ago.in_time_zone
between(ref.beginning_of_week, ref.end_of_week)
end
scope :this_month, -> do
ref = Time.now.in_time_zone
between(ref.beginning_of_month, ref.end_of_month)
end
scope :this_week, -> do
ref = Time.now.in_time_zone
between(ref.beginning_of_week, ref.end_of_week)
end
scope :yesterday, -> do
ref = 1.day.ago.in_time_zone
between(ref.yesterday, ref.yesterday)
end
scope :today, -> do
ref = Time.now.in_time_zone
between(ref.beginning_of_day, ref.end_of_day)
end
scope :last_hour, -> { where("#{time_column} > ?", 1.hour.ago.in_time_zone) }
end
def normalize_hypertable_options
hypertable_options[:time_column] = hypertable_options[:time_column].to_sym
end
end
end
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/database/schema_statements.rb | lib/timescaledb/database/schema_statements.rb | module Timescaledb
class Database
module SchemaStatements
# @see https://docs.timescale.com/api/latest/hypertable/create_hypertable/#create_hypertable
#
# @param [String] relation The identifier of the table to convert to hypertable
# @param [String] time_column_name The name of the column containing time values as well as the primary column to partition by
# @param [Hash] options The optional arguments
# @return [String] The create_hypertable SQL statement
def create_hypertable_sql(relation, time_column_name, **options)
options.transform_keys!(&:to_sym)
partitioning_column = options.delete(:partitioning_column)
number_partitions = options.delete(:number_partitions)
arguments = [quote(relation), quote(time_column_name)]
arguments += [quote(partitioning_column), number_partitions] if partitioning_column && number_partitions
arguments += create_hypertable_options_to_named_notation_sql(options)
"SELECT create_hypertable(#{arguments.join(', ')});"
end
# @see https://docs.timescale.com/api/latest/compression/alter_table_compression/#alter-table-compression
#
# @param [String] hypertable The name of the hypertable to enable compression
# @param [Hash] options The optional arguments
# @return [String] The ALTER TABLE SQL to enable compression
def enable_hypertable_compression_sql(hypertable, **options)
options.transform_keys!(&:to_sym)
compress_orderby = options.delete(:compress_orderby)
compress_segmentby = options.delete(:compress_segmentby)
arguments = ['timescaledb.compress']
arguments << "timescaledb.compress_orderby = #{quote(compress_orderby)}" if compress_orderby
arguments << "timescaledb.compress_segmentby = #{quote(compress_segmentby)}" if compress_segmentby
"ALTER TABLE #{hypertable} SET (#{arguments.join(', ')});"
end
# @see https://docs.timescale.com/api/latest/compression/alter_table_compression/#alter-table-compression
#
# @param [String] hypertable The name of the hypertable to disable compression
# @return [String] The ALTER TABLE SQL to disable compression
def disable_hypertable_compression_sql(hypertable)
"ALTER TABLE #{hypertable} SET (timescaledb.compress = FALSE);"
end
# @see https://docs.timescale.com/api/latest/compression/add_compression_policy/#add_compression_policy
#
# @param [String] hypertable The name of the hypertable or continuous aggregate to create the policy for
# @param [String] compress_after The age after which the policy job compresses chunks
# @param [Hash] options The optional arguments
# @return [String] The add_compression_policy SQL statement
def add_compression_policy_sql(hypertable, compress_after, **options)
options.transform_keys!(&:to_sym)
arguments = [quote(hypertable), interval_to_sql(compress_after)]
arguments += policy_options_to_named_notation_sql(options)
"SELECT add_compression_policy(#{arguments.join(', ')});"
end
# @see https://docs.timescale.com/api/latest/compression/remove_compression_policy/#remove_compression_policy
#
# @param [String] hypertable The name of the hypertable to remove the policy from
# @param [Hash] options The optional arguments
# @return [String] The remove_compression_policy SQL statement
def remove_compression_policy_sql(hypertable, **options)
options.transform_keys!(&:to_sym)
arguments = [quote(hypertable)]
arguments += policy_options_to_named_notation_sql(options)
"SELECT remove_compression_policy(#{arguments.join(', ')});"
end
# @see https://docs.timescale.com/api/latest/data-retention/add_retention_policy/#add_retention_policy
#
# @param [String] hypertable The name of the hypertable to create the policy for
# @param [String] drop_after The age after which the policy job drops chunks
# @param [Hash] options The optional arguments
# @return [String] The add_retention_policy SQL statement
def add_retention_policy_sql(hypertable, drop_after, **options)
options.transform_keys!(&:to_sym)
arguments = [quote(hypertable), interval_to_sql(drop_after)]
arguments += policy_options_to_named_notation_sql(options)
"SELECT add_retention_policy(#{arguments.join(', ')});"
end
# @see https://docs.timescale.com/api/latest/data-retention/remove_retention_policy/#remove_retention_policy
#
# @param [String] hypertable The name of the hypertable to remove the policy from
# @param [Hash] options The optional arguments
# @return [String] The remove_retention_policy SQL statement
def remove_retention_policy_sql(hypertable, **options)
options.transform_keys!(&:to_sym)
arguments = [quote(hypertable)]
arguments += policy_options_to_named_notation_sql(options)
"SELECT remove_retention_policy(#{arguments.join(', ')});"
end
# @see https://docs.timescale.com/api/latest/hypertable/add_reorder_policy/#add_reorder_policy
#
# @param [String] hypertable The name of the hypertable to create the policy for
# @param [String] index_name The existing index by which to order rows on disk
# @param [Hash] options The optional arguments
# @return [String] The add_reorder_policy SQL statement
def add_reorder_policy_sql(hypertable, index_name, **options)
options.transform_keys!(&:to_sym)
arguments = [quote(hypertable), quote(index_name)]
arguments += policy_options_to_named_notation_sql(options)
"SELECT add_reorder_policy(#{arguments.join(', ')});"
end
# @see https://docs.timescale.com/api/latest/hypertable/remove_reorder_policy/#remove_reorder_policy
#
# @param [String] hypertable The name of the hypertable to remove the policy from
# @param [Hash] options The optional arguments
# @return [String] The remove_retention_policy SQL statement
def remove_reorder_policy_sql(hypertable, **options)
options.transform_keys!(&:to_sym)
arguments = [quote(hypertable)]
arguments += policy_options_to_named_notation_sql(options)
"SELECT remove_reorder_policy(#{arguments.join(', ')});"
end
# @see https://docs.timescale.com/api/latest/continuous-aggregates/create_materialized_view
#
# @param [String] continuous_aggregate The name of the continuous aggregate view to be created
# @param [Hash] options The optional arguments
# @return [String] The create materialized view SQL statement
def create_continuous_aggregate_sql(continuous_aggregate, sql, **options)
options.transform_keys!(&:to_sym)
with_data_opts = %w[WITH DATA]
with_data_opts.insert(1, 'NO') if options.key?(:with_no_data)
<<~SQL
CREATE MATERIALIZED VIEW #{continuous_aggregate}
WITH (timescaledb.continuous) AS
#{sql.strip}
#{with_data_opts.join(' ')};
SQL
end
# @see https://docs.timescale.com/api/latest/continuous-aggregates/drop_materialized_view
#
# @param [String] continuous_aggregate The name of the continuous aggregate view to be dropped
# @param [Boolean] cascade A boolean to drop objects that depend on the continuous aggregate view
# @return [String] The drop materialized view SQL statement
def drop_continuous_aggregate_sql(continuous_aggregate, cascade: false)
arguments = [continuous_aggregate]
arguments << 'CASCADE' if cascade
"DROP MATERIALIZED VIEW #{arguments.join(' ')};"
end
# @see https://docs.timescale.com/api/latest/continuous-aggregates/add_continuous_aggregate_policy
#
# @param [String] continuous_aggregate The name of the continuous aggregate to add the policy for
# @param [String] start_offset The start of the refresh window as an interval relative to the time when the policy is executed
# @param [String] end_offset The end of the refresh window as an interval relative to the time when the policy is executed
# @param [String] schedule_interval The interval between refresh executions in wall-clock time
# @param [Hash] options The optional arguments
# @return [String] The add_continuous_aggregate_policy SQL statement
def add_continuous_aggregate_policy_sql(continuous_aggregate, start_offset: nil, end_offset: nil, schedule_interval:, **options)
options.transform_keys!(&:to_sym)
arguments = [quote(continuous_aggregate)]
arguments << named_notation_sql(name: :start_offset, value: interval_to_sql(start_offset))
arguments << named_notation_sql(name: :end_offset, value: interval_to_sql(end_offset))
arguments << named_notation_sql(name: :schedule_interval, value: interval_to_sql(schedule_interval))
arguments += continuous_aggregate_policy_options_to_named_notation_sql(options)
"SELECT add_continuous_aggregate_policy(#{arguments.join(', ')});"
end
# @see https://docs.timescale.com/api/latest/continuous-aggregates/remove_continuous_aggregate_policy
#
# @param [String] continuous_aggregate The name of the continuous aggregate the policy should be removed from
# @param [Hash] options The optional arguments
# @return [String] The remove_continuous_aggregate_policy SQL statement
def remove_continuous_aggregate_policy_sql(continuous_aggregate, **options)
options.transform_keys!(&:to_sym)
arguments = [quote(continuous_aggregate)]
arguments += policy_options_to_named_notation_sql(options)
"SELECT remove_continuous_aggregate_policy(#{arguments.join(', ')});"
end
private
# @param [Array<Hash<Symbol, Object>>] options The policy optional arguments.
# @return [Array<String>]
def policy_options_to_named_notation_sql(options)
options.map do |option, value|
case option
when :if_not_exists, :if_exists then named_notation_sql(name: option, value: boolean_to_sql(value))
when :initial_start, :timezone then named_notation_sql(name: option, value: quote(value))
end
end.compact
end
# @param [Array<Hash<Symbol, Object>>] options The create_hypertable optional arguments.
# @return [Array<String>]
def create_hypertable_options_to_named_notation_sql(options)
options.map do |option, value|
case option
when :chunk_time_interval
named_notation_sql(name: option, value: interval_to_sql(value))
when :if_not_exists, :create_default_indexes, :migrate_data, :distributed
named_notation_sql(name: option, value: boolean_to_sql(value))
when :partitioning_func, :associated_schema_name,
:associated_table_prefix, :time_partitioning_func
named_notation_sql(name: option, value: quote(value))
end
end.compact
end
# @param [Array<Hash<Symbol, Object>>] options The continuous aggregate policy arguments.
# @return [Array<String>]
def continuous_aggregate_policy_options_to_named_notation_sql(options)
options.map do |option, value|
case option
when :if_not_exists then named_notation_sql(name: option, value: boolean_to_sql(value))
when :initial_start, :timezone then named_notation_sql(name: option, value: quote(value))
end
end.compact
end
def named_notation_sql(name:, value:)
"#{name} => #{value}"
end
end
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/database/types.rb | lib/timescaledb/database/types.rb | module Timescaledb
class Database
module Types
# @param [String, Integer] interval The interval value
# @return [String]
def interval_to_sql(interval)
return 'NULL' if interval.nil?
return interval if interval.kind_of?(Integer)
"INTERVAL #{quote(interval)}"
end
# @param [String] boolean The boolean value
# @return [String]
def boolean_to_sql(boolean)
quote(boolean ? 'TRUE' : 'FALSE')
end
end
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/database/quoting.rb | lib/timescaledb/database/quoting.rb | module Timescaledb
class Database
module Quoting
# Quotes given value and escapes single quote and backslash characters.
#
# @return [String] The given value between quotes
def quote(value)
"'#{value.gsub("\\", '\&\&').gsub("'", "''")}'"
end
end
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/database/hypertable_statements.rb | lib/timescaledb/database/hypertable_statements.rb | module Timescaledb
class Database
module HypertableStatements
# @see https://docs.timescale.com/api/latest/hypertable/hypertable_size/
#
# @param [String] hypertable The hypertable to show size of
# @return [String] The hypertable_size SQL statement
def hypertable_size_sql(hypertable)
"SELECT hypertable_size(#{quote(hypertable)});"
end
# @see https://docs.timescale.com/api/latest/hypertable/hypertable_detailed_size/
#
# @param [String] hypertable The hypertable to show detailed size of
# @return [String] The hypertable_detailed_size SQL statementh
def hypertable_detailed_size_sql(hypertable)
"SELECT * FROM hypertable_detailed_size(#{quote(hypertable)});"
end
# @see https://docs.timescale.com/api/latest/hypertable/hypertable_index_size/
#
# @param [String] index_name The name of the index on a hypertable
# @return [String] The hypertable_detailed_size SQL statementh
def hypertable_index_size_sql(index_name)
"SELECT hypertable_index_size(#{quote(index_name)});"
end
# @see https://docs.timescale.com/api/latest/hypertable/chunks_detailed_size/
#
# @param [String] hypertable The name of the hypertable
# @return [String] The chunks_detailed_size SQL statementh
def chunks_detailed_size_sql(hypertable)
"SELECT * FROM chunks_detailed_size(#{quote(hypertable)});"
end
end
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
timescale/timescaledb-ruby | https://github.com/timescale/timescaledb-ruby/blob/e15cabd97d1b0901abedea39c4443386b305f184/lib/timescaledb/database/chunk_statements.rb | lib/timescaledb/database/chunk_statements.rb | module Timescaledb
class Database
module ChunkStatements
# @see https://docs.timescale.com/api/latest/compression/compress_chunk/
#
# @param [String] chunk_name The name of the chunk to be compressed
# @return [String] The compress_chunk SQL statement
def compress_chunk_sql(chunk_name)
"SELECT compress_chunk(#{quote(chunk_name)});"
end
# @see https://docs.timescale.com/api/latest/compression/decompress_chunk/
#
# @param [String] chunk_name The name of the chunk to be decompressed
# @return [String] The decompress_chunk SQL statement
def decompress_chunk_sql(chunk_name)
"SELECT decompress_chunk(#{quote(chunk_name)});"
end
end
end
end
| ruby | MIT | e15cabd97d1b0901abedea39c4443386b305f184 | 2026-01-04T17:50:22.541182Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/spec/kumogata_convert_spec.rb | spec/kumogata_convert_spec.rb | describe 'Kumogata::Client#convert' do
it 'convert Ruby template to JSON template' do
template = <<-EOS
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
json_template = run_client(:convert, :template => template)
expect(json_template).to eq((<<-EOS).chomp)
{
"Resources": {
"myEC2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-XXXXXXXX",
"InstanceType": "t1.micro"
}
}
},
"Outputs": {
"AZ": {
"Value": {
"Fn::GetAtt": [
"myEC2Instance",
"AvailabilityZone"
]
}
}
}
}
EOS
end
it 'convert Ruby template to YAML template' do
template = <<-EOS
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
json_template = run_client(:convert, :template => template, :options => {:output_format => :yaml})
expect(json_template).to eq((<<-EOS))
---
Resources:
myEC2Instance:
Type: AWS::EC2::Instance
Properties:
ImageId: ami-XXXXXXXX
InstanceType: t1.micro
Outputs:
AZ:
Value:
Fn::GetAtt:
- myEC2Instance
- AvailabilityZone
EOS
end
it 'convert YAML template to Ruby template' do
template = <<-EOS
---
Resources:
myEC2Instance:
Type: AWS::EC2::Instance
Properties:
ImageId: ami-XXXXXXXX
InstanceType: t1.micro
Outputs:
AZ:
Value:
Fn::GetAtt:
- myEC2Instance
- AvailabilityZone
EOS
ruby_template = run_client(:convert, :template => template, :template_ext => '.yml', :options => {:output_format => :ruby})
expect(ruby_template).to eq((<<-EOS).chomp)
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
end
it 'convert Ruby template to JavaScript template' do
template = <<-EOS
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
js_template = run_client(:convert, :template => template, :options => {:output_format => :js})
expect(js_template).to eq <<-EOS.strip
({
"Resources": {
"myEC2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-XXXXXXXX",
"InstanceType": "t1.micro"
}
}
},
"Outputs": {
"AZ": {
"Value": {
"Fn::GetAtt": [
"myEC2Instance",
"AvailabilityZone"
]
}
}
}
})
EOS
end
it 'convert JavaScript template to Ruby template' do
template = <<-EOS
function fetch_ami() {
return "ami-XXXXXXXX";
}
({
Resources: { /* comment */
myEC2Instance: {
Type: "AWS::EC2::Instance",
Properties: {
ImageId: fetch_ami(),
InstanceType: "t1.micro"
}
}
},
Outputs: {
AZ: { /* comment */
Value: {
"Fn::GetAtt": [
"myEC2Instance",
"AvailabilityZone"
]
}
}
}
})
EOS
ruby_template = run_client(:convert, :template => template, :template_ext => '.js', :options => {:output_format => :ruby})
expect(ruby_template).to eq((<<-EOS).chomp)
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
end
it 'convert Ruby template to JSON5 template' do
template = <<-EOS
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
js_template = run_client(:convert, :template => template, :options => {:output_format => :json5})
expect(js_template).to eq <<-EOS.strip
{
"Resources": {
"myEC2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-XXXXXXXX",
"InstanceType": "t1.micro"
}
}
},
"Outputs": {
"AZ": {
"Value": {
"Fn::GetAtt": [
"myEC2Instance",
"AvailabilityZone"
]
}
}
}
}
EOS
end
it 'convert JSON5 template to Ruby template' do
template = <<-EOS
{
Resources: { /* comment */
myEC2Instance: {
Type: "AWS::EC2::Instance",
Properties: {
ImageId: "ami-XXXXXXXX",
InstanceType: "t1.micro"
}
}
},
Outputs: {
AZ: { /* comment */
Value: {
"Fn::GetAtt": [
"myEC2Instance",
"AvailabilityZone"
]
}
}
}
}
EOS
ruby_template = run_client(:convert, :template => template, :template_ext => '.json5', :options => {:output_format => :ruby})
expect(ruby_template).to eq((<<-EOS).chomp)
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
end
it 'convert JavaScript template to Ruby template' do
template = <<-EOS
fetch_ami = () -> "ami-XXXXXXXX"
# comment
return {
Resources:
myEC2Instance:
Type: "AWS::EC2::Instance",
Properties:
ImageId: fetch_ami(),
InstanceType: "t1.micro"
Outputs:
AZ: # comment
Value:
"Fn::GetAtt": [
"myEC2Instance",
"AvailabilityZone"
]
}
EOS
ruby_template = run_client(:convert, :template => template, :template_ext => '.coffee', :options => {:output_format => :ruby})
expect(ruby_template).to eq((<<-EOS).chomp)
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
end
it 'convert YAML template to JSON template' do
template = <<-EOS
---
Resources:
myEC2Instance:
Type: AWS::EC2::Instance
Properties:
ImageId: ami-XXXXXXXX
InstanceType: t1.micro
Outputs:
AZ:
Value:
Fn::GetAtt:
- myEC2Instance
- AvailabilityZone
EOS
ruby_template = run_client(:convert, :template => template, :template_ext => '.yml', :options => {:output_format => :json})
expect(ruby_template).to eq((<<-EOS).chomp)
{
"Resources": {
"myEC2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-XXXXXXXX",
"InstanceType": "t1.micro"
}
}
},
"Outputs": {
"AZ": {
"Value": {
"Fn::GetAtt": [
"myEC2Instance",
"AvailabilityZone"
]
}
}
}
}
EOS
end
it 'convert JSON template to Ruby template' do
template = <<-EOS
{
"Resources": {
"myEC2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-XXXXXXXX",
"InstanceType": "t1.micro"
}
}
},
"Outputs": {
"AZ": {
"Value": {
"Fn::GetAtt": [
"myEC2Instance",
"AvailabilityZone"
]
}
}
}
}
EOS
ruby_template = run_client(:convert, :template => template, :template_ext => '.template')
expect(ruby_template).to eq((<<-EOS).chomp)
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
end
it 'convert JSON template to YAML template' do
template = <<-EOS
{
"Resources": {
"myEC2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-XXXXXXXX",
"InstanceType": "t1.micro"
}
}
},
"Outputs": {
"AZ": {
"Value": {
"Fn::GetAtt": [
"myEC2Instance",
"AvailabilityZone"
]
}
}
}
}
EOS
ruby_template = run_client(:convert, :template => template, :template_ext => '.template', :options => {:output_format => :yaml})
expect(ruby_template).to eq((<<-EOS))
---
Resources:
myEC2Instance:
Type: AWS::EC2::Instance
Properties:
ImageId: ami-XXXXXXXX
InstanceType: t1.micro
Outputs:
AZ:
Value:
Fn::GetAtt:
- myEC2Instance
- AvailabilityZone
EOS
end
it 'convert Ruby template to JSON template with fn_join()' do
template = <<-TEMPLATE
Parameters do
Password do
NoEcho true
Type "String"
end
end
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
UserData do
Fn__Base64 (<<-EOS).fn_join
#!/bin/bash
echo START | logger
/opt/aws/bin/cfn-init -s <%= Ref "AWS::StackName" %> -r myEC2Instance --region <%= Ref "AWS::Region" %>
echo END | logger
EOS
end
end
Metadata do
AWS__CloudFormation__Init do
config do
packages do
yum({"httpd"=>[]})
end
services do
sysvinit do
httpd do
enabled "true"
ensureRunning "true"
end
end
end
commands do
any_name do
command (<<-EOS).fn_join
echo <%= Ref "Password" %> > /tmp/my-password
EOS
end
end
end # config
end # AWS__CloudFormation__Init
end # Metadata
end
end
TEMPLATE
json_template = run_client(:convert, :template => template)
expect(json_template).to eq((<<-'EOS').chomp)
{
"Parameters": {
"Password": {
"NoEcho": "true",
"Type": "String"
}
},
"Resources": {
"myEC2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-XXXXXXXX",
"InstanceType": "t1.micro",
"UserData": {
"Fn::Base64": {
"Fn::Join": [
"",
[
"#!/bin/bash\n",
"echo START | logger\n",
"/opt/aws/bin/cfn-init -s ",
{
"Ref": "AWS::StackName"
},
" -r myEC2Instance --region ",
{
"Ref": "AWS::Region"
},
"\n",
"echo END | logger\n"
]
]
}
}
},
"Metadata": {
"AWS::CloudFormation::Init": {
"config": {
"packages": {
"yum": {
"httpd": [
]
}
},
"services": {
"sysvinit": {
"httpd": {
"enabled": "true",
"ensureRunning": "true"
}
}
},
"commands": {
"any:name": {
"command": {
"Fn::Join": [
"",
[
"echo ",
{
"Ref": "Password"
},
" > /tmp/my-password\n"
]
]
}
}
}
}
}
}
}
}
}
EOS
end
it 'convert Ruby template to JSON template with converting user_data' do
template = <<-TEMPLATE
Parameters do
Password do
NoEcho true
Type "String"
end
end
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
UserData (<<-EOS).undent.encode64
#!/bin/bash
yum install -y httpd
services start httpd
EOS
end
end
end
TEMPLATE
json_template = run_client(:convert, :template => template)
# UserData: IyEvYmluL2Jhc2gKeXVtIGluc3RhbGwgLXkgaHR0cGQKc2VydmljZXMgc3RhcnQgaHR0cGQK
# => #!/bin/bash
# yum install -y httpd
# services start httpd
expect(json_template).to eq((<<-EOS).chomp)
{
"Parameters": {
"Password": {
"NoEcho": "true",
"Type": "String"
}
},
"Resources": {
"myEC2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-XXXXXXXX",
"InstanceType": "t1.micro",
"UserData": "IyEvYmluL2Jhc2gKeXVtIGluc3RhbGwgLXkgaHR0cGQKc2VydmljZXMgc3RhcnQgaHR0cGQK"
}
}
}
}
EOS
end
it 'convert Ruby template to JSON template with block args' do
template = <<-'TEMPLATE'
Parameters do
Password do
NoEcho true
Type "String"
end
end
Resources do
myEC2Instance do |resource_name|
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
UserData do
Fn__Base64 (<<-EOS).fn_join
#!/bin/bash
echo START | logger
/opt/aws/bin/cfn-init -s <%= Ref "AWS::StackName" %> -r #{resource_name} --region <%= Ref "AWS::Region" %>
echo END | logger
EOS
end
end
Metadata do
AWS__CloudFormation__Init do
config do
packages do
yum({"httpd"=>[]})
end
services do
sysvinit do
httpd do
enabled "true"
ensureRunning "true"
end
end
end
commands do
any_name do
command (<<-EOS).fn_join
echo <%= Ref "Password" %> > /tmp/my-password
EOS
end
end
end # config
end # AWS__CloudFormation__Init
end # Metadata
end
end
Outputs do
WebsiteURL do
Value (<<-EOS).fn_join
http://<%= Fn__GetAtt "myEC2Instance", "PublicDnsName" %>
EOS
end
Base64Str do
Value (<<-EOS).fn_join
<%= Fn__Base64 "AWS CloudFormation" %>
EOS
end
MappedValue do
Value (<<-EOS).fn_join
<%= Fn__FindInMap "RegionMap", _{ Ref "AWS::Region" }, 32 %>
EOS
end
AZ do
Value (<<-EOS).fn_join
<%= Fn__GetAZs "us-east-1" %>
EOS
end
ConditionalValue do
Value (<<-EOS).fn_join
<%= Fn__If ["Tokyo", "ap-northeast-1", _{ Ref "AWS::Region" }] %>
EOS
end
SelectedValue do
Value (<<-EOS).fn_join
<%= Fn__Select [1, _ { Fn__GetAZs "ap-northeast-1" }] %>
EOS
end
end
TEMPLATE
json_template = run_client(:convert, :template => template)
expect(json_template).to eq((<<-'EOS').chomp)
{
"Parameters": {
"Password": {
"NoEcho": "true",
"Type": "String"
}
},
"Resources": {
"myEC2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-XXXXXXXX",
"InstanceType": "t1.micro",
"UserData": {
"Fn::Base64": {
"Fn::Join": [
"",
[
"#!/bin/bash\n",
"echo START | logger\n",
"/opt/aws/bin/cfn-init -s ",
{
"Ref": "AWS::StackName"
},
" -r myEC2Instance --region ",
{
"Ref": "AWS::Region"
},
"\n",
"echo END | logger\n"
]
]
}
}
},
"Metadata": {
"AWS::CloudFormation::Init": {
"config": {
"packages": {
"yum": {
"httpd": [
]
}
},
"services": {
"sysvinit": {
"httpd": {
"enabled": "true",
"ensureRunning": "true"
}
}
},
"commands": {
"any:name": {
"command": {
"Fn::Join": [
"",
[
"echo ",
{
"Ref": "Password"
},
" > /tmp/my-password\n"
]
]
}
}
}
}
}
}
}
},
"Outputs": {
"WebsiteURL": {
"Value": {
"Fn::Join": [
"",
[
"http://",
{
"Fn::GetAtt": [
"myEC2Instance",
"PublicDnsName"
]
},
"\n"
]
]
}
},
"Base64Str": {
"Value": {
"Fn::Join": [
"",
[
{
"Fn::Base64": "AWS CloudFormation"
},
"\n"
]
]
}
},
"MappedValue": {
"Value": {
"Fn::Join": [
"",
[
{
"Fn::FindInMap": [
"RegionMap",
{
"Ref": "AWS::Region"
},
"32"
]
},
"\n"
]
]
}
},
"AZ": {
"Value": {
"Fn::Join": [
"",
[
{
"Fn::GetAZs": "us-east-1"
},
"\n"
]
]
}
},
"ConditionalValue": {
"Value": {
"Fn::Join": [
"",
[
{
"Fn::If": [
"Tokyo",
"ap-northeast-1",
{
"Ref": "AWS::Region"
}
]
},
"\n"
]
]
}
},
"SelectedValue": {
"Value": {
"Fn::Join": [
"",
[
{
"Fn::Select": [
1,
{
"Fn::GetAZs": "ap-northeast-1"
}
]
},
"\n"
]
]
}
}
}
}
EOS
end
it 'convert splitted Ruby template to JSON template' do
json_template = nil
part_of_template = <<-EOS
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
EOS
tempfile(part_of_template, '.rb') do |f|
template = <<-EOS
Resources do
_include #{f.path.inspect}
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
json_template = run_client(:convert, :template => template)
end
expect(json_template).to eq((<<-EOS).chomp)
{
"Resources": {
"myEC2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-XXXXXXXX",
"InstanceType": "t1.micro"
}
}
},
"Outputs": {
"AZ": {
"Value": {
"Fn::GetAtt": [
"myEC2Instance",
"AvailabilityZone"
]
}
}
}
}
EOS
end
it 'convert splitted Ruby template to JSON template with args' do
json_template = nil
part_of_template = <<-EOS
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId args[:ami_id]
InstanceType "t1.micro"
end
end
EOS
tempfile(part_of_template, '.rb') do |f|
template = <<-EOS
Resources do
_include #{f.path.inspect}, {:ami_id => "ami-XXXXXXXX"}
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
json_template = run_client(:convert, :template => template)
end
expect(json_template).to eq((<<-EOS).chomp)
{
"Resources": {
"myEC2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-XXXXXXXX",
"InstanceType": "t1.micro"
}
}
},
"Outputs": {
"AZ": {
"Value": {
"Fn::GetAtt": [
"myEC2Instance",
"AvailabilityZone"
]
}
}
}
}
EOS
end
it 'convert Ruby template to JSON template with require' do
template = <<-EOS
require 'fileutils'
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
json_template = run_client(:convert, :template => template)
expect(json_template).to eq((<<-EOS).chomp)
{
"Resources": {
"myEC2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-XXXXXXXX",
"InstanceType": "t1.micro"
}
}
},
"Outputs": {
"AZ": {
"Value": {
"Fn::GetAtt": [
"myEC2Instance",
"AvailabilityZone"
]
}
}
}
}
EOS
end
it 'convert splitted Ruby template to JSON template' do
json_template = nil
part_of_template = <<-EOS
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
EOS
tempfile(part_of_template, '.rb') do |f|
template = <<-EOS
Resources do
_include #{f.path.inspect}
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
json_template = run_client(:convert, :template => template)
end
expect(json_template).to eq((<<-EOS).chomp)
{
"Resources": {
"myEC2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-XXXXXXXX",
"InstanceType": "t1.micro"
}
}
},
"Outputs": {
"AZ": {
"Value": {
"Fn::GetAtt": [
"myEC2Instance",
"AvailabilityZone"
]
}
}
}
}
EOS
end
let(:drupal_single_instance_template) do
path = File.expand_path('../Drupal_Single_Instance.template', __FILE__)
open(path) {|f| f.read }
end
let(:drupal_single_instance_template_rb) do
path = File.expand_path('../Drupal_Single_Instance.template.rb', __FILE__)
open(path) {|f| f.read }
end
it 'Ruby templates and JSON template should be same' do
json_template = JSON.parse(drupal_single_instance_template)
ruby_template = run_client(:convert, :template => drupal_single_instance_template_rb)
ruby_template = JSON.parse(ruby_template)
expect(ruby_template).to eq(json_template)
end
let(:vpc_knowhow_2014_04_template) do
path = File.expand_path('../vpc-knowhow-2014-04.template', __FILE__)
open(path) {|f| f.read }
end
it 'convert JSON template to Ruby template (include yum key)' do
ruby_template = run_client(:convert, :template => vpc_knowhow_2014_04_template, :template_ext => '.template')
expect(ruby_template).to eq <<-'EOS'.strip
AWSTemplateFormatVersion "2010-09-09"
Description "VPC knowhow template"
Parameters do
KeyName do
Description "Name of an existing EC2 KeyPair to enable SSH access to the instances"
Type "String"
MinLength 1
MaxLength 64
AllowedPattern "[-_ a-zA-Z0-9]*"
ConstraintDescription "can contain only alphanumeric characters, spaces, dashes and underscores."
end
SSHFrom do
Description "Lockdown SSH access to the bastion host (default can be accessed from anywhere)"
Type "String"
MinLength 9
MaxLength 18
Default "0.0.0.0/0"
AllowedPattern "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})"
ConstraintDescription "must be a valid CIDR range of the form x.x.x.x/x."
end
DBInstanceType do
Description "EC2 instance type for the Blue environment"
Default "db.t1.micro"
Type "String"
end
DBSnapshotName do
Default ""
Description "The name of a DB snapshot (optional)"
Type "String"
end
DBAllocatedStorage do
Default 5
Description "DB instance disk size"
Type "Number"
end
DBUsername do
Default "admin"
Description "The database master account username"
Type "String"
MinLength 1
MaxLength 16
AllowedPattern "[a-zA-Z][a-zA-Z0-9]*"
ConstraintDescription "must begin with a letter and contain only alphanumeric characters."
end
DBPassword do
Description "Password of RDS master password"
Type "String"
NoEcho "true"
MinLength 4
end
DBName do
Default ""
Description "The name of a DB01 database"
Type "String"
end
WebInstanceType do
Description "EC2 instance type for the web server"
Default "t1.micro"
Type "String"
end
WebFleetSize do
Description "Number of EC2 instances to launch for the web server"
Default 2
Type "Number"
MaxValue 100
MinValue 1
end
HostedZone do
Description "The DNS name of an existing Amazon Route 53 hosted zone"
Type "String"
end
end
Conditions do
UseDBSnapshot do
Fn__Not [
_{
Fn__Equals [
_{
Ref "DBSnapshotName"
},
""
]
}
]
end
end
Mappings do
AWSAmazonLinuxAMI(
{"us-east-1"=>
{"name"=>"Virginia",
"201303"=>"ami-3275ee5b",
"201309"=>"ami-35792c5c",
"201403"=>"ami-2f726546"},
"us-west-2"=>
{"name"=>"Oregon",
"201303"=>"ami-ecbe2adc",
"201309"=>"ami-d03ea1e0",
"201403"=>"ami-b8f69f88"},
"us-west-1"=>
{"name"=>"California",
"201303"=>"ami-66d1fc23",
"201309"=>"ami-687b4f2d",
"201403"=>"ami-84f1cfc1"},
"eu-west-1"=>
{"name"=>"Ireland",
"201303"=>"ami-44939930",
"201309"=>"ami-149f7863",
"201403"=>"ami-a921dfde"},
"ap-southeast-1"=>
{"name"=>"Singapole",
"201303"=>"ami-aa9ed2f8",
"201309"=>"ami-14f2b946",
"201403"=>"ami-787c2c2a"},
"ap-southeast-2"=>
{"name"=>"Sydney",
"201303"=>"ami-363eaf0c",
"201309"=>"ami-a148d59b",
"201403"=>"ami-0bc85031"},
"ap-northeast-1"=>
{"name"=>"Tokyo",
"201303"=>"ami-173fbf16",
"201309"=>"ami-3561fe34",
"201403"=>"ami-a1bec3a0"},
"sa-east-1"=>
{"name"=>"SaoPaulo",
"201303"=>"ami-dd6bb0c0",
"201309"=>"ami-9f6ec982",
"201403"=>"ami-89de7c94"}})
ELBLogger(
{"us-east-1"=>{"AccountID"=>"127311923021"},
"us-west-2"=>{"AccountID"=>"797873946194"},
"us-west-1"=>{"AccountID"=>"027434742980"},
"eu-west-1"=>{"AccountID"=>"156460612806"},
"ap-southeast-1"=>{"AccountID"=>"114774131450"},
"ap-southeast-2"=>{"AccountID"=>"783225319266"},
"ap-northeast-1"=>{"AccountID"=>"582318560864"},
"sa-east-1"=>{"AccountID"=>"507241528517"},
"us-gov-west-1"=>{"AccountID"=>"048591011584"}})
StackConfig do
VPC do
CIDR "10.0.0.0/16"
end
FrontendSubnet1 do
CIDR "10.0.0.0/24"
end
FrontendSubnet2 do
CIDR "10.0.1.0/24"
end
ApplicationSubnet1 do
CIDR "10.0.100.0/24"
end
ApplicationSubnet2 do
CIDR "10.0.101.0/24"
end
DatastoreSubnet1 do
CIDR "10.0.200.0/24"
end
DatastoreSubnet2 do
CIDR "10.0.201.0/24"
end
BastionServer do
InstanceType "t1.micro"
end
end
end
Resources do
PowerUserRole do
Type "AWS::IAM::Role"
Properties do
AssumeRolePolicyDocument do
Statement [
_{
Effect "Allow"
Principal do
Service ["ec2.amazonaws.com"]
end
Action ["sts:AssumeRole"]
}
]
end
Path "/"
Policies [
_{
PolicyName "PowerUserPolicy"
PolicyDocument do
Statement [
_{
Sid "PowerUserStmt"
Effect "Allow"
NotAction "iam:*"
Resource "*"
}
]
end
}
]
end
end
PowerUserProfile do
Type "AWS::IAM::InstanceProfile"
Properties do
Path "/"
Roles [
_{
Ref "PowerUserRole"
}
]
end
end
LogBucket do
Type "AWS::S3::Bucket"
DeletionPolicy "Retain"
end
LogBucketPolicy do
Type "AWS::S3::BucketPolicy"
Properties do
Bucket do
Ref "LogBucket"
end
PolicyDocument do
Id "LogBucketPolicy"
Statement [
_{
Sid "WriteAccess"
Action ["s3:PutObject"]
Effect "Allow"
Resource do
Fn__Join [
"",
[
"arn:aws:s3:::",
_{
Ref "LogBucket"
},
"/AWSLogs/",
_{
Ref "AWS::AccountId"
},
"/*"
]
]
end
Principal do
AWS do
Fn__FindInMap [
"ELBLogger",
_{
Ref "AWS::Region"
},
"AccountID"
]
end
end
}
]
end
end
end
VPC do
Type "AWS::EC2::VPC"
Properties do
CidrBlock do
Fn__FindInMap "StackConfig", "VPC", "CIDR"
end
EnableDnsSupport "true"
EnableDnsHostnames "true"
InstanceTenancy "default"
Tags [
_{
Key "Application"
Value do
Ref "AWS::StackId"
end
},
_{
Key "Network"
Value "Public"
}
]
end
end
InternetGateway do
Type "AWS::EC2::InternetGateway"
Properties do
Tags [
_{
Key "Application"
Value do
Ref "AWS::StackId"
end
},
_{
Key "Network"
Value "Public"
}
]
end
end
AttachGateway do
Type "AWS::EC2::VPCGatewayAttachment"
Properties do
VpcId do
Ref "VPC"
end
InternetGatewayId do
Ref "InternetGateway"
end
end
end
PublicRouteTable do
Type "AWS::EC2::RouteTable"
DependsOn "AttachGateway"
Properties do
VpcId do
Ref "VPC"
end
Tags [
_{
Key "Application"
Value do
Ref "AWS::StackId"
end
},
_{
Key "Network"
Value "Public"
}
]
end
end
PrivateRouteTable do
Type "AWS::EC2::RouteTable"
DependsOn "AttachGateway"
Properties do
VpcId do
Ref "VPC"
end
Tags [
_{
Key "Application"
Value do
Ref "AWS::StackId"
end
},
_{
Key "Network"
Value "Private"
}
]
end
end
PublicRoute do
Type "AWS::EC2::Route"
DependsOn "AttachGateway"
Properties do
RouteTableId do
Ref "PublicRouteTable"
end
DestinationCidrBlock "0.0.0.0/0"
GatewayId do
Ref "InternetGateway"
end
end
end
FrontendSubnet1 do
Type "AWS::EC2::Subnet"
DependsOn "AttachGateway"
Properties do
VpcId do
Ref "VPC"
end
AvailabilityZone do
Fn__Select [
"0",
_{
Fn__GetAZs do
Ref "AWS::Region"
end
}
]
end
CidrBlock do
Fn__FindInMap "StackConfig", "FrontendSubnet1", "CIDR"
end
Tags [
_{
Key "Application"
Value do
Ref "AWS::StackId"
end
},
_{
Key "Network"
Value "Public"
}
]
end
end
FrontendSubnet2 do
Type "AWS::EC2::Subnet"
DependsOn "AttachGateway"
Properties do
VpcId do
Ref "VPC"
end
AvailabilityZone do
Fn__Select [
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | true |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/spec/kumogata_update_spec.rb | spec/kumogata_update_spec.rb | describe 'Kumogata::Client#update' do
it 'update a stack from Ruby template' do
template = <<-EOS
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
run_client(:update, :arguments => ['MyStack'], :template => template) do |client, cf|
template = eval_template(template)
json = JSON.pretty_generate(template)
expect(client).to receive(:print_event_log).once
expect(client).to receive(:create_event_log).once
output = make_double('output') do |obj|
expect(obj).to receive(:key) { 'AZ' }
expect(obj).to receive(:value) { 'ap-northeast-1b' }
end
resource_summary = make_double('resource_summary') do |obj|
expect(obj).to receive(:[]).with(:logical_resource_id) { 'myEC2Instance' }
expect(obj).to receive(:[]).with(:physical_resource_id) { 'i-XXXXXXXX' }
expect(obj).to receive(:[]).with(:resource_type) { 'AWS::EC2::Instance' }
expect(obj).to receive(:[]).with(:resource_status) { 'UPDATE_COMPLETE' }
expect(obj).to receive(:[]).with(:resource_status_reason) { nil }
expect(obj).to receive(:[]).with(:last_updated_timestamp) { '2014-03-02 04:35:12 UTC' }
end
stack = make_double('stack') do |obj|
expect(obj).to receive(:update).with(:template => json)
expect(obj).to receive(:status).and_return(
'UPDATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_COMPLETE')
expect(obj).to receive(:outputs) { [output] }
expect(obj).to receive(:resource_summaries) { [resource_summary] }
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:[])
.with('MyStack') { stack }
end
expect(cf).to receive(:stacks) { stacks }
end
end
it 'update a stack from Ruby template (with capabilities option)' do
template = <<-EOS
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
run_client(:update, :arguments => ['MyStack'], :template => template, :options => {:capabilities => ['AWS::CloudFormation::Stack']}) do |client, cf|
template = eval_template(template)
json = JSON.pretty_generate(template)
expect(client).to receive(:print_event_log).once
expect(client).to receive(:create_event_log).once
output = make_double('output') do |obj|
expect(obj).to receive(:key) { 'AZ' }
expect(obj).to receive(:value) { 'ap-northeast-1b' }
end
resource_summary = make_double('resource_summary') do |obj|
expect(obj).to receive(:[]).with(:logical_resource_id) { 'myEC2Instance' }
expect(obj).to receive(:[]).with(:physical_resource_id) { 'i-XXXXXXXX' }
expect(obj).to receive(:[]).with(:resource_type) { 'AWS::EC2::Instance' }
expect(obj).to receive(:[]).with(:resource_status) { 'UPDATE_COMPLETE' }
expect(obj).to receive(:[]).with(:resource_status_reason) { nil }
expect(obj).to receive(:[]).with(:last_updated_timestamp) { '2014-03-02 04:35:12 UTC' }
end
stack = make_double('stack') do |obj|
expect(obj).to receive(:update).with(:template => json, :capabilities => ['AWS::CloudFormation::Stack'])
expect(obj).to receive(:status).and_return(
'UPDATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_COMPLETE')
expect(obj).to receive(:outputs) { [output] }
expect(obj).to receive(:resource_summaries) { [resource_summary] }
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:[])
.with('MyStack') { stack }
end
expect(cf).to receive(:stacks) { stacks }
end
end
it 'update a stack from Ruby template (detach)' do
template = <<-EOS
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
out = run_client(:update, :arguments => ['MyStack'], :template => template, :options => {:detach => true}) do |client, cf|
template = eval_template(template)
json = JSON.pretty_generate(template)
expect(client).not_to receive(:print_event_log)
expect(client).to receive(:create_event_log).once
stack = make_double('stack') do |obj|
expect(obj).to receive(:update).with(:template => json)
expect(obj).to receive(:status).once
expect(obj).not_to receive(:outputs)
expect(obj).not_to receive(:resource_summaries)
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:[])
.with('MyStack') { stack }
end
expect(cf).to receive(:stacks) { stacks }
end
expect(out).to be_nil
end
it 'update a stack from Ruby template with deletion policy retain' do
template = <<-EOS
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
Timecop.freeze(Time.utc(2014)) do
run_client(:update, :arguments => ['MyStack'], :template => template, :options => {:deletion_policy_retain => true}) do |client, cf|
template = eval_template(template, :update_deletion_policy => true)
template["Resources"]["myEC2Instance"]["Metadata"] = {
"DeletionPolicyUpdateKeyForKumogata" => "DeletionPolicyUpdateValueForKumogata1388534400"
}
json = JSON.pretty_generate(template)
expect(client).to receive(:print_event_log).once
expect(client).to receive(:create_event_log).once
output = make_double('output') do |obj|
expect(obj).to receive(:key) { 'AZ' }
expect(obj).to receive(:value) { 'ap-northeast-1b' }
end
resource_summary = make_double('resource_summary') do |obj|
expect(obj).to receive(:[]).with(:logical_resource_id) { 'myEC2Instance' }
expect(obj).to receive(:[]).with(:physical_resource_id) { 'i-XXXXXXXX' }
expect(obj).to receive(:[]).with(:resource_type) { 'AWS::EC2::Instance' }
expect(obj).to receive(:[]).with(:resource_status) { 'UPDATE_COMPLETE' }
expect(obj).to receive(:[]).with(:resource_status_reason) { nil }
expect(obj).to receive(:[]).with(:last_updated_timestamp) { '2014-03-02 04:35:12 UTC' }
end
stack = make_double('stack') do |obj|
expect(obj).to receive(:update).with(:template => json)
expect(obj).to receive(:status).and_return(
'UPDATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_COMPLETE')
expect(obj).to receive(:outputs) { [output] }
expect(obj).to receive(:resource_summaries) { [resource_summary] }
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:[])
.with('MyStack') { stack }
end
expect(cf).to receive(:stacks) { stacks }
end
end
end
it 'update a stack from Ruby template and run command' do
template = <<-TEMPLATE
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
Region do
Value do
Ref "AWS::Region"
end
end
end
_post do
command_a do
command <<-EOS
echo <%= Key "AZ" %>
echo <%= Key "Region" %>
EOS
end
command_b do
command <<-EOS
echo <%= Key "Region" %>
echo <%= Key "AZ" %>
EOS
end
end
TEMPLATE
run_client(:update, :arguments => ['MyStack'], :template => template) do |client, cf|
template = eval_template(template)
json = JSON.pretty_generate(template)
expect(client).to receive(:print_event_log).once
expect(client).to receive(:create_event_log).once
output1 = make_double('output') do |obj|
expect(obj).to receive(:key) { 'AZ' }
expect(obj).to receive(:value) { 'ap-northeast-1b' }
end
output2 = make_double('output') do |obj|
expect(obj).to receive(:key) { 'Region' }
expect(obj).to receive(:value) { 'ap-northeast-1' }
end
resource_summary = make_double('resource_summary') do |obj|
expect(obj).to receive(:[]).with(:logical_resource_id) { 'myEC2Instance' }
expect(obj).to receive(:[]).with(:physical_resource_id) { 'i-XXXXXXXX' }
expect(obj).to receive(:[]).with(:resource_type) { 'AWS::EC2::Instance' }
expect(obj).to receive(:[]).with(:resource_status) { 'UPDATE_COMPLETE' }
expect(obj).to receive(:[]).with(:resource_status_reason) { nil }
expect(obj).to receive(:[]).with(:last_updated_timestamp) { '2014-03-02 04:35:12 UTC' }
end
stack = make_double('stack') do |obj|
expect(obj).to receive(:update).with(:template => json)
expect(obj).to receive(:status).and_return(
'UPDATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_COMPLETE')
expect(obj).to receive(:outputs) { [output1, output2] }
expect(obj).to receive(:resource_summaries) { [resource_summary] }
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:[])
.with('MyStack') { stack }
end
expect(cf).to receive(:stacks) { stacks }
process_status1 = double('process_status1')
process_status2 = double('process_status2')
expect(Open3).not_to receive(:capture3)
expect(client.instance_variable_get(:@post_processing))
.not_to receive(:print_command_result)
expect(client.instance_variable_get(:@post_processing))
.not_to receive(:save_command_results)
end
end
it 'update a stack from Ruby template and run ssh command' do
template = <<-TEMPLATE
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
PublicIp do
Value do
Fn__GetAtt "myEC2Instance", "PublicIp"
end
end
end
_post do
ssh_command do
after :update
ssh do
host { Key "PublicIp" }
user "ec2-user"
end
command <<-EOS
ls
EOS
end
end
TEMPLATE
run_client(:update, :arguments => ['MyStack'], :template => template) do |client, cf|
template = eval_template(template)
json = JSON.pretty_generate(template)
expect(client).to receive(:print_event_log).once
expect(client).to receive(:create_event_log).once
output = make_double('output') do |obj|
expect(obj).to receive(:key) { 'PublicIp' }
expect(obj).to receive(:value) { '127.0.0.1' }
end
resource_summary = make_double('resource_summary') do |obj|
expect(obj).to receive(:[]).with(:logical_resource_id) { 'myEC2Instance' }
expect(obj).to receive(:[]).with(:physical_resource_id) { 'i-XXXXXXXX' }
expect(obj).to receive(:[]).with(:resource_type) { 'AWS::EC2::Instance' }
expect(obj).to receive(:[]).with(:resource_status) { 'UPDATE_COMPLETE' }
expect(obj).to receive(:[]).with(:resource_status_reason) { nil }
expect(obj).to receive(:[]).with(:last_updated_timestamp) { '2014-03-02 04:35:12 UTC' }
end
stack = make_double('stack') do |obj|
expect(obj).to receive(:update).with(:template => json)
expect(obj).to receive(:status).and_return(
'UPDATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_COMPLETE')
expect(obj).to receive(:outputs) { [output] }
expect(obj).to receive(:resource_summaries) { [resource_summary] }
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:[])
.with('MyStack') { stack }
end
expect(cf).to receive(:stacks) { stacks }
expect(client.instance_variable_get(:@post_processing))
.to receive(:run_ssh_command)
.with({"host"=>"<%= Key \"PublicIp\" %>", "user"=>"ec2-user", "request_pty"=>true}, " ls\n", {"PublicIp"=>"127.0.0.1"})
.and_return(["file1\nfile2\n", "", 0])
expect(client.instance_variable_get(:@post_processing))
.to receive(:print_command).with('ssh:command')
expect(client.instance_variable_get(:@post_processing))
.to receive(:print_command_result)
.with("file1\nfile2\n", "", 0)
expect(client.instance_variable_get(:@post_processing))
.to receive(:save_command_results)
.with([{'ssh:command' => {'ExitStatus' => 0, 'StdOut' => "file1\nfile2\n", 'StdErr' => ""}}])
end
end
it 'update a stack from Ruby template and run ssh command (modify outputs)' do
template = <<-TEMPLATE
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
PublicIp do
Value do
Fn__GetAtt "myEC2Instance", "PublicIp"
end
end
end
_outputs_filter do |outputs|
outputs['PublicIp'].gsub!('.', '-')
end
_post do
ssh_command do
after :update
ssh do
host { Key "PublicIp" }
user "ec2-user"
end
command <<-EOS
ls
EOS
end
end
TEMPLATE
run_client(:update, :arguments => ['MyStack'], :template => template) do |client, cf|
template = eval_template(template)
json = JSON.pretty_generate(template)
expect(client).to receive(:print_event_log).once
expect(client).to receive(:create_event_log).once
output = make_double('output') do |obj|
expect(obj).to receive(:key) { 'PublicIp' }
expect(obj).to receive(:value) { '127.0.0.1' }
end
resource_summary = make_double('resource_summary') do |obj|
expect(obj).to receive(:[]).with(:logical_resource_id) { 'myEC2Instance' }
expect(obj).to receive(:[]).with(:physical_resource_id) { 'i-XXXXXXXX' }
expect(obj).to receive(:[]).with(:resource_type) { 'AWS::EC2::Instance' }
expect(obj).to receive(:[]).with(:resource_status) { 'UPDATE_COMPLETE' }
expect(obj).to receive(:[]).with(:resource_status_reason) { nil }
expect(obj).to receive(:[]).with(:last_updated_timestamp) { '2014-03-02 04:35:12 UTC' }
end
stack = make_double('stack') do |obj|
expect(obj).to receive(:update).with(:template => json)
expect(obj).to receive(:status).and_return(
'UPDATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_COMPLETE')
expect(obj).to receive(:outputs) { [output] }
expect(obj).to receive(:resource_summaries) { [resource_summary] }
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:[])
.with('MyStack') { stack }
end
expect(cf).to receive(:stacks) { stacks }
expect(client.instance_variable_get(:@post_processing))
.to receive(:run_ssh_command)
.with({"host"=>"<%= Key \"PublicIp\" %>", "user"=>"ec2-user", "request_pty"=>true}, " ls\n", {"PublicIp"=>"127-0-0-1"})
.and_return(["file1\nfile2\n", "", 0])
expect(client.instance_variable_get(:@post_processing))
.to receive(:print_command).with('ssh:command')
expect(client.instance_variable_get(:@post_processing))
.to receive(:print_command_result)
.with("file1\nfile2\n", "", 0)
expect(client.instance_variable_get(:@post_processing))
.to receive(:save_command_results)
.with([{'ssh:command' => {'ExitStatus' => 0, 'StdOut' => "file1\nfile2\n", 'StdErr' => ""}}])
end
end
it 'update a stack from Ruby template and run command (specifies timing)' do
template = <<-TEMPLATE
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
Region do
Value do
Ref "AWS::Region"
end
end
end
_post do
command_a do
after :update
command <<-EOS
echo <%= Key "AZ" %>
echo <%= Key "Region" %>
EOS
end
command_b do
after :create, :update
command <<-EOS
echo <%= Key "Region" %>
echo <%= Key "AZ" %>
EOS
end
end
TEMPLATE
run_client(:update, :arguments => ['MyStack'], :template => template) do |client, cf|
template = eval_template(template)
json = JSON.pretty_generate(template)
expect(client).to receive(:print_event_log).once
expect(client).to receive(:create_event_log).once
output1 = make_double('output') do |obj|
expect(obj).to receive(:key) { 'AZ' }
expect(obj).to receive(:value) { 'ap-northeast-1b' }
end
output2 = make_double('output') do |obj|
expect(obj).to receive(:key) { 'Region' }
expect(obj).to receive(:value) { 'ap-northeast-1' }
end
resource_summary = make_double('resource_summary') do |obj|
expect(obj).to receive(:[]).with(:logical_resource_id) { 'myEC2Instance' }
expect(obj).to receive(:[]).with(:physical_resource_id) { 'i-XXXXXXXX' }
expect(obj).to receive(:[]).with(:resource_type) { 'AWS::EC2::Instance' }
expect(obj).to receive(:[]).with(:resource_status) { 'UPDATE_COMPLETE' }
expect(obj).to receive(:[]).with(:resource_status_reason) { nil }
expect(obj).to receive(:[]).with(:last_updated_timestamp) { '2014-03-02 04:35:12 UTC' }
end
stack = make_double('stack') do |obj|
expect(obj).to receive(:update).with(:template => json)
expect(obj).to receive(:status).and_return(
'UPDATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_COMPLETE')
expect(obj).to receive(:outputs) { [output1, output2] }
expect(obj).to receive(:resource_summaries) { [resource_summary] }
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:[])
.with('MyStack') { stack }
end
expect(cf).to receive(:stacks) { stacks }
process_status1 = make_double('process_status1') {|obj| expect(obj).to receive(:to_i).and_return(0) }
process_status2 = make_double('process_status2') {|obj| expect(obj).to receive(:to_i).and_return(0) }
expect(client.instance_variable_get(:@post_processing))
.to receive(:run_shell_command)
.with(" echo <%= Key \"AZ\" %>\n echo <%= Key \"Region\" %>\n", {"AZ"=>"ap-northeast-1b", "Region"=>"ap-northeast-1"})
.and_return(["ap-northeast-1b\nap-northeast-1\n", "", process_status1])
expect(client.instance_variable_get(:@post_processing))
.to receive(:run_shell_command)
.with(" echo <%= Key \"Region\" %>\n echo <%= Key \"AZ\" %>\n", {"AZ"=>"ap-northeast-1b", "Region"=>"ap-northeast-1"})
.and_return(["ap-northeast-1\nap-northeast-1b\n", "", process_status2])
expect(client.instance_variable_get(:@post_processing))
.to receive(:print_command).with('command:a')
expect(client.instance_variable_get(:@post_processing))
.to receive(:print_command).with('command:b')
expect(client.instance_variable_get(:@post_processing))
.to receive(:print_command_result)
.with("ap-northeast-1b\nap-northeast-1\n", "", process_status1)
expect(client.instance_variable_get(:@post_processing))
.to receive(:print_command_result)
.with("ap-northeast-1\nap-northeast-1b\n", "", process_status2)
expect(client.instance_variable_get(:@post_processing))
.to receive(:save_command_results)
.with([{'command:a' => {'ExitStatus' => 0, 'StdOut' => "ap-northeast-1b\nap-northeast-1\n", 'StdErr' => ""}},
{'command:b' => {'ExitStatus' => 0, 'StdOut' => "ap-northeast-1\nap-northeast-1b\n", 'StdErr' => ""}}])
end
end
it 'update a stack from Ruby template and run command (create timing)' do
template = <<-TEMPLATE
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
Region do
Value do
Ref "AWS::Region"
end
end
end
_post do
command_a do
after :create
command <<-EOS
echo <%= Key "AZ" %>
echo <%= Key "Region" %>
EOS
end
command_b do
after :create
command <<-EOS
echo <%= Key "Region" %>
echo <%= Key "AZ" %>
EOS
end
end
TEMPLATE
run_client(:update, :arguments => ['MyStack'], :template => template) do |client, cf|
template = eval_template(template)
json = JSON.pretty_generate(template)
expect(client).to receive(:print_event_log).once
expect(client).to receive(:create_event_log).once
output1 = make_double('output') do |obj|
expect(obj).to receive(:key) { 'AZ' }
expect(obj).to receive(:value) { 'ap-northeast-1b' }
end
output2 = make_double('output') do |obj|
expect(obj).to receive(:key) { 'Region' }
expect(obj).to receive(:value) { 'ap-northeast-1' }
end
resource_summary = make_double('resource_summary') do |obj|
expect(obj).to receive(:[]).with(:logical_resource_id) { 'myEC2Instance' }
expect(obj).to receive(:[]).with(:physical_resource_id) { 'i-XXXXXXXX' }
expect(obj).to receive(:[]).with(:resource_type) { 'AWS::EC2::Instance' }
expect(obj).to receive(:[]).with(:resource_status) { 'UPDATE_COMPLETE' }
expect(obj).to receive(:[]).with(:resource_status_reason) { nil }
expect(obj).to receive(:[]).with(:last_updated_timestamp) { '2014-03-02 04:35:12 UTC' }
end
stack = make_double('stack') do |obj|
expect(obj).to receive(:update).with(:template => json)
expect(obj).to receive(:status).and_return(
'UPDATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_COMPLETE')
expect(obj).to receive(:outputs) { [output1, output2] }
expect(obj).to receive(:resource_summaries) { [resource_summary] }
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:[])
.with('MyStack') { stack }
end
expect(cf).to receive(:stacks) { stacks }
process_status1 = double('process_status1')
process_status2 = double('process_status2')
expect(Open3).not_to receive(:capture3)
expect(client.instance_variable_get(:@post_processing))
.not_to receive(:print_command_result)
expect(client.instance_variable_get(:@post_processing))
.not_to receive(:save_command_results)
end
end
it 'update a stack from Ruby template with parameters' do
template = <<-EOS
Parameters do
InstanceType do
Default "t1.micro"
Description "Instance Type"
Type "String"
end
end
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType { Ref "InstanceType" }
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
run_client(:update, :arguments => ['MyStack'], :template => template, :options => {:parameters => {'InstanceType'=>'m1.large'}}) do |client, cf|
template = eval_template(template)
json = JSON.pretty_generate(template)
expect(client).to receive(:print_event_log).once
expect(client).to receive(:create_event_log).once
output = make_double('output') do |obj|
expect(obj).to receive(:key) { 'AZ' }
expect(obj).to receive(:value) { 'ap-northeast-1b' }
end
resource_summary = make_double('resource_summary') do |obj|
expect(obj).to receive(:[]).with(:logical_resource_id) { 'myEC2Instance' }
expect(obj).to receive(:[]).with(:physical_resource_id) { 'i-XXXXXXXX' }
expect(obj).to receive(:[]).with(:resource_type) { 'AWS::EC2::Instance' }
expect(obj).to receive(:[]).with(:resource_status) { 'UPDATE_COMPLETE' }
expect(obj).to receive(:[]).with(:resource_status_reason) { nil }
expect(obj).to receive(:[]).with(:last_updated_timestamp) { '2014-03-02 04:35:12 UTC' }
end
stack = make_double('stack') do |obj|
expect(obj).to receive(:update).with(:template => json, :parameters=>{"InstanceType"=>"m1.large"})
expect(obj).to receive(:status).and_return(
'UPDATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_COMPLETE')
expect(obj).to receive(:outputs) { [output] }
expect(obj).to receive(:resource_summaries) { [resource_summary] }
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:[])
.with('MyStack') { stack }
end
expect(cf).to receive(:stacks) { stacks }
end
end
it 'update a stack from Ruby template with invalid stack name' do
template = <<-EOS
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
expect {
run_client(:update, :arguments => ['0MyStack'], :template => template)
}.to raise_error("1 validation error detected: Value '0MyStack' at 'stackName' failed to satisfy constraint: Member must satisfy regular expression pattern: [a-zA-Z][-a-zA-Z0-9]*")
end
it 'update a stack from Ruby template with encrypted parameters' do
template = <<-EOS
Parameters do
InstanceType do
Default "t1.micro"
Description "Instance Type"
Type "String"
end
end
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType { Ref "InstanceType" }
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
run_client(:update, :arguments => ['MyStack'], :template => template, :options => {:parameters => {'InstanceType'=>'m1.large'}, :encrypt_parameters => ['Password']}) do |client, cf|
template = eval_template(template, :add_encryption_password => true)
json = JSON.pretty_generate(template)
expect(client).to receive(:print_event_log).once
expect(client).to receive(:create_event_log).once
output = make_double('output') do |obj|
expect(obj).to receive(:key) { 'AZ' }
expect(obj).to receive(:value) { 'ap-northeast-1b' }
end
resource_summary = make_double('resource_summary') do |obj|
expect(obj).to receive(:[]).with(:logical_resource_id) { 'myEC2Instance' }
expect(obj).to receive(:[]).with(:physical_resource_id) { 'i-XXXXXXXX' }
expect(obj).to receive(:[]).with(:resource_type) { 'AWS::EC2::Instance' }
expect(obj).to receive(:[]).with(:resource_status) { 'UPDATE_COMPLETE' }
expect(obj).to receive(:[]).with(:resource_status_reason) { nil }
expect(obj).to receive(:[]).with(:last_updated_timestamp) { '2014-03-02 04:35:12 UTC' }
end
stack = make_double('stack') do |obj|
expect(obj).to receive(:update).with(:template => json, :parameters=>{"InstanceType"=>"m1.large", "EncryptionPassword"=>"KioqKioqKioqKioqKioqKg=="})
expect(obj).to receive(:status).and_return(
'UPDATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_COMPLETE')
expect(obj).to receive(:outputs) { [output] }
expect(obj).to receive(:resource_summaries) { [resource_summary] }
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:[])
.with('MyStack') { stack }
end
expect(cf).to receive(:stacks) { stacks }
end
end
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/spec/kumogata_list_spec.rb | spec/kumogata_list_spec.rb | describe 'Kumogata::Client#list' do
it 'list stacks' do
json = run_client(:list) do |client, cf|
stack1 = make_double('stack1') do |obj|
expect(obj).to receive(:name) { 'stack1' }
expect(obj).to receive(:creation_time) { '2014-03-02 16:17:18 UTC' }
expect(obj).to receive(:status) { 'CREATE_COMPLETE' }
expect(obj).to receive(:description) { nil }
end
stack2 = make_double('stack2') do |obj|
expect(obj).to receive(:name) { 'stack2' }
expect(obj).to receive(:creation_time) { '2014-03-02 16:17:19 UTC' }
expect(obj).to receive(:status) { 'CREATE_COMPLETE' }
expect(obj).to receive(:description) { nil }
end
expect(cf).to receive(:stacks) { [stack1, stack2] }
end
expect(json).to eq((<<-EOS).chomp)
[
{
"StackName": "stack1",
"CreationTime": "2014-03-02 16:17:18 UTC",
"StackStatus": "CREATE_COMPLETE",
"Description": null
},
{
"StackName": "stack2",
"CreationTime": "2014-03-02 16:17:19 UTC",
"StackStatus": "CREATE_COMPLETE",
"Description": null
}
]
EOS
end
it 'list a specified stack' do
json = run_client(:list, :arguments => ['stack1']) do |client, cf|
stack1 = make_double('stack1') do |obj|
expect(obj).to receive(:name).twice { 'stack1' }
expect(obj).to receive(:creation_time) { '2014-03-02 16:17:18 UTC' }
expect(obj).to receive(:status) { 'CREATE_COMPLETE' }
expect(obj).to receive(:description) { nil }
end
stack2 = make_double('stack2') do |obj|
expect(obj).to receive(:name) { 'stack2' }
end
expect(cf).to receive(:stacks) { [stack1, stack2] }
end
expect(json).to eq((<<-EOS).chomp)
[
{
"StackName": "stack1",
"CreationTime": "2014-03-02 16:17:18 UTC",
"StackStatus": "CREATE_COMPLETE",
"Description": null
}
]
EOS
end
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/spec/kumogata_show_events_spec.rb | spec/kumogata_show_events_spec.rb | describe 'Kumogata::Client#show_events' do
it 'show events' do
resources = run_client(:show_events, :arguments => ['MyStack']) do |client, cf|
event = make_double('event') do |obj|
expect(obj).to receive(:event_id) { "f45e6070-a4f7-11e3-9326-5088487c4896" }
expect(obj).to receive(:logical_resource_id) { "kumogata-f11118a4-a4f7-11e3-8183-98fe943e66ca" }
expect(obj).to receive(:physical_resource_id) { "arn:aws:cloudformation:ap-northeast-1:822997939312:stack/kumogata-f11118a4-a4f7-11e3-8183-98fe943e66ca/f1381a30-a4f7-11e3-a340-506cf9a1c096" }
expect(obj).to receive(:resource_properties) { nil }
expect(obj).to receive(:resource_status) { "CREATE_FAILED" }
expect(obj).to receive(:resource_status_reason) { "The following resource(s) failed to create: [myEC2Instance]. " }
expect(obj).to receive(:resource_type) { "AWS::CloudFormation::Stack" }
expect(obj).to receive(:stack_id) { "arn:aws:cloudformation:ap-northeast-1:822997939312:stack/kumogata-f11118a4-a4f7-11e3-8183-98fe943e66ca/f1381a30-a4f7-11e3-a340-506cf9a1c096" }
expect(obj).to receive(:stack_name) { "kumogata-f11118a4-a4f7-11e3-8183-98fe943e66ca" }
expect(obj).to receive(:timestamp) { "2014-03-06 06:24:21 UTC" }
end
stack = make_double('stack') do |obj|
expect(obj).to receive(:status) { 'CREATE_COMPLETE' }
expect(obj).to receive(:events).and_return([event])
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:[]).with('MyStack') { stack }
end
expect(cf).to receive(:stacks) { stacks }
end
expect(resources).to eq((<<-EOS).chomp)
[
{
"EventId": "f45e6070-a4f7-11e3-9326-5088487c4896",
"LogicalResourceId": "kumogata-f11118a4-a4f7-11e3-8183-98fe943e66ca",
"PhysicalResourceId": "arn:aws:cloudformation:ap-northeast-1:822997939312:stack/kumogata-f11118a4-a4f7-11e3-8183-98fe943e66ca/f1381a30-a4f7-11e3-a340-506cf9a1c096",
"ResourceProperties": null,
"ResourceStatus": "CREATE_FAILED",
"ResourceStatusReason": "The following resource(s) failed to create: [myEC2Instance]. ",
"ResourceType": "AWS::CloudFormation::Stack",
"StackId": "arn:aws:cloudformation:ap-northeast-1:822997939312:stack/kumogata-f11118a4-a4f7-11e3-8183-98fe943e66ca/f1381a30-a4f7-11e3-a340-506cf9a1c096",
"StackName": "kumogata-f11118a4-a4f7-11e3-8183-98fe943e66ca",
"Timestamp": "2014-03-06 06:24:21 UTC"
}
]
EOS
end
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/spec/string_stream_spec.rb | spec/string_stream_spec.rb | describe Kumogata::StringStream do
it 'pass the line ("\n")' do
lines = []
sstream = Kumogata::StringStream.new do |line|
lines << line
end
sstream.push("chunk1")
sstream.push("chunk2\n")
sstream.push("chunk3")
sstream.push("chunk4")
sstream.push("chunk5\n")
sstream.push("\n")
sstream.push("\n")
sstream.push("chunk6")
sstream.push("chunk7")
sstream.close
expect(lines).to eq([
"chunk1chunk2",
"chunk3chunk4chunk5",
"",
"",
"chunk6chunk7",
])
end
it 'pass the line ("\r")' do
lines = []
sstream = Kumogata::StringStream.new do |line|
lines << line
end
sstream.push("chunk1")
sstream.push("chunk2\r")
sstream.push("chunk3")
sstream.push("chunk4")
sstream.push("chunk5\r")
sstream.push("\r")
sstream.push("\r")
sstream.push("chunk6")
sstream.push("chunk7")
sstream.close
expect(lines).to eq([
"chunk1chunk2",
"chunk3chunk4chunk5",
"",
"",
"chunk6chunk7",
])
end
it 'pass the line ("\r\n")' do
lines = []
sstream = Kumogata::StringStream.new do |line|
lines << line
end
sstream.push("chunk1")
sstream.push("chunk2\r\n")
sstream.push("chunk3")
sstream.push("chunk4")
sstream.push("chunk5\r\n")
sstream.push("\r\n")
sstream.push("\r\n")
sstream.push("chunk6")
sstream.push("chunk7")
sstream.close
expect(lines).to eq([
"chunk1chunk2",
"chunk3chunk4chunk5",
"",
"",
"chunk6chunk7",
])
end
it 'pass the line ("\n" / "\r" / "\r\n")' do
lines = []
sstream = Kumogata::StringStream.new do |line|
lines << line
end
sstream.push("chunk1")
sstream.push("chunk2\n")
sstream.push("chunk3")
sstream.push("chunk4")
sstream.push("chunk5\r")
sstream.push("\r\n")
sstream.push("\n")
sstream.push("chunk6")
sstream.push("chunk7")
sstream.push("chunk1")
sstream.push("chunk2\r")
sstream.push("chunk3")
sstream.push("chunk4")
sstream.push("chunk5\n\r")
sstream.push("\n")
sstream.push("\r")
sstream.push("chunk6")
sstream.push("chunk7")
sstream.close
expect(lines).to eq([
"chunk1chunk2",
"chunk3chunk4chunk5",
"",
"",
"chunk6chunk7chunk1chunk2",
"chunk3chunk4chunk5",
"",
"",
"",
"chunk6chunk7",
])
end
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/spec/kumogata_create_spec.rb | spec/kumogata_create_spec.rb | describe 'Kumogata::Client#create' do
it 'create a stack from Ruby template' do
template = <<-EOS
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
run_client(:create, :template => template) do |client, cf|
template = eval_template(template, :update_deletion_policy => true)
json = JSON.pretty_generate(template)
expect(client).to receive(:print_event_log).twice
expect(client).to receive(:create_event_log).once
output = make_double('output') do |obj|
expect(obj).to receive(:key) { 'AZ' }
expect(obj).to receive(:value) { 'ap-northeast-1b' }
end
resource_summary = make_double('resource_summary') do |obj|
expect(obj).to receive(:[]).with(:logical_resource_id) { 'myEC2Instance' }
expect(obj).to receive(:[]).with(:physical_resource_id) { 'i-XXXXXXXX' }
expect(obj).to receive(:[]).with(:resource_type) { 'AWS::EC2::Instance' }
expect(obj).to receive(:[]).with(:resource_status) { 'CREATE_COMPLETE' }
expect(obj).to receive(:[]).with(:resource_status_reason) { nil }
expect(obj).to receive(:[]).with(:last_updated_timestamp) { '2014-03-02 04:35:12 UTC' }
end
stack = make_double('stack') do |obj|
expect(obj).to receive(:status).and_return(
'CREATE_COMPLETE', 'CREATE_COMPLETE',
'DELETE_COMPLETE', 'DELETE_COMPLETE', 'DELETE_COMPLETE')
expect(obj).to receive(:outputs) { [output] }
expect(obj).to receive(:resource_summaries) { [resource_summary] }
expect(obj).to receive(:delete)
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:create)
.with('kumogata-user-host-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX', json, {}) { stack }
expect(obj).to receive(:[])
.with('kumogata-user-host-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX') { stack }
end
expect(cf).to receive(:stacks).twice { stacks }
end
end
it 'create a stack from Ruby template (detach)' do
template = <<-EOS
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
out = run_client(:create, :template => template, :options => {:detach => true}) do |client, cf|
template = eval_template(template, :update_deletion_policy => true)
json = JSON.pretty_generate(template)
expect(client).not_to receive(:print_event_log)
expect(client).not_to receive(:create_event_log)
stack = make_double('stack') do |obj|
expect(obj).not_to receive(:status)
expect(obj).not_to receive(:outputs)
expect(obj).not_to receive(:resource_summaries)
expect(obj).not_to receive(:delete)
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:create)
.with('kumogata-user-host-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX', json, {}) { stack }
expect(obj).not_to receive(:[])
end
expect(cf).to receive(:stacks).once { stacks }
end
expect(out).to be_nil
end
it 'create a stack from Ruby template and run command' do
template = <<-TEMPLATE
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
Region do
Value do
Ref "AWS::Region"
end
end
end
_post do
command_a do
command <<-EOS
echo <%= Key "AZ" %>
echo <%= Key "Region" %>
EOS
end
command_b do
command <<-EOS
echo <%= Key "Region" %>
echo <%= Key "AZ" %>
EOS
end
end
TEMPLATE
run_client(:create, :template => template) do |client, cf|
template = eval_template(template, :update_deletion_policy => true)
json = JSON.pretty_generate(template)
expect(client).to receive(:print_event_log).twice
expect(client).to receive(:create_event_log).once
output1 = make_double('output') do |obj|
expect(obj).to receive(:key) { 'AZ' }
expect(obj).to receive(:value) { 'ap-northeast-1b' }
end
output2 = make_double('output') do |obj|
expect(obj).to receive(:key) { 'Region' }
expect(obj).to receive(:value) { 'ap-northeast-1' }
end
resource_summary = make_double('resource_summary') do |obj|
expect(obj).to receive(:[]).with(:logical_resource_id) { 'myEC2Instance' }
expect(obj).to receive(:[]).with(:physical_resource_id) { 'i-XXXXXXXX' }
expect(obj).to receive(:[]).with(:resource_type) { 'AWS::EC2::Instance' }
expect(obj).to receive(:[]).with(:resource_status) { 'CREATE_COMPLETE' }
expect(obj).to receive(:[]).with(:resource_status_reason) { nil }
expect(obj).to receive(:[]).with(:last_updated_timestamp) { '2014-03-02 04:35:12 UTC' }
end
stack = make_double('stack') do |obj|
expect(obj).to receive(:status).and_return(
'CREATE_COMPLETE', 'CREATE_COMPLETE',
'DELETE_COMPLETE', 'DELETE_COMPLETE', 'DELETE_COMPLETE')
expect(obj).to receive(:outputs) { [output1, output2] }
expect(obj).to receive(:resource_summaries) { [resource_summary] }
expect(obj).to receive(:delete)
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:create)
.with('kumogata-user-host-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX', json, {}) { stack }
expect(obj).to receive(:[])
.with('kumogata-user-host-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX') { stack }
end
expect(cf).to receive(:stacks).twice { stacks }
process_status1 = make_double('process_status1') {|obj| expect(obj).to receive(:to_i).and_return(0) }
process_status2 = make_double('process_status2') {|obj| expect(obj).to receive(:to_i).and_return(0) }
expect(client.instance_variable_get(:@post_processing))
.to receive(:run_shell_command)
.with(" echo <%= Key \"AZ\" %>\n echo <%= Key \"Region\" %>\n", {"AZ"=>"ap-northeast-1b", "Region"=>"ap-northeast-1"})
.and_return(["ap-northeast-1b\nap-northeast-1\n", "", process_status1])
expect(client.instance_variable_get(:@post_processing))
.to receive(:run_shell_command)
.with(" echo <%= Key \"Region\" %>\n echo <%= Key \"AZ\" %>\n", {"AZ"=>"ap-northeast-1b", "Region"=>"ap-northeast-1"})
.and_return(["ap-northeast-1\nap-northeast-1b\n", "", process_status2])
expect(client.instance_variable_get(:@post_processing))
.to receive(:print_command).with('command:a')
expect(client.instance_variable_get(:@post_processing))
.to receive(:print_command).with('command:b')
expect(client.instance_variable_get(:@post_processing))
.to receive(:print_command_result)
.with("ap-northeast-1b\nap-northeast-1\n", "", process_status1)
expect(client.instance_variable_get(:@post_processing))
.to receive(:print_command_result)
.with("ap-northeast-1\nap-northeast-1b\n", "", process_status2)
expect(client.instance_variable_get(:@post_processing))
.to receive(:save_command_results)
.with([{'command:a' => {'ExitStatus' => 0, 'StdOut' => "ap-northeast-1b\nap-northeast-1\n", 'StdErr' => ""}},
{'command:b' => {'ExitStatus' => 0, 'StdOut' => "ap-northeast-1\nap-northeast-1b\n", 'StdErr' => ""}}])
end
end
it 'create a stack from Ruby template and run ssh command' do
template = <<-TEMPLATE
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
PublicIp do
Value do
Fn__GetAtt "myEC2Instance", "PublicIp"
end
end
end
_post do
ssh_command do
ssh do
host { Key "PublicIp" }
user "ec2-user"
end
command <<-EOS
ls
EOS
end
end
TEMPLATE
run_client(:create, :template => template) do |client, cf|
template = eval_template(template, :update_deletion_policy => true)
json = JSON.pretty_generate(template)
expect(client).to receive(:print_event_log).twice
expect(client).to receive(:create_event_log).once
output = make_double('output') do |obj|
expect(obj).to receive(:key) { 'PublicIp' }
expect(obj).to receive(:value) { '127.0.0.1' }
end
resource_summary = make_double('resource_summary') do |obj|
expect(obj).to receive(:[]).with(:logical_resource_id) { 'myEC2Instance' }
expect(obj).to receive(:[]).with(:physical_resource_id) { 'i-XXXXXXXX' }
expect(obj).to receive(:[]).with(:resource_type) { 'AWS::EC2::Instance' }
expect(obj).to receive(:[]).with(:resource_status) { 'CREATE_COMPLETE' }
expect(obj).to receive(:[]).with(:resource_status_reason) { nil }
expect(obj).to receive(:[]).with(:last_updated_timestamp) { '2014-03-02 04:35:12 UTC' }
end
stack = make_double('stack') do |obj|
expect(obj).to receive(:status).and_return(
'CREATE_COMPLETE', 'CREATE_COMPLETE',
'DELETE_COMPLETE', 'DELETE_COMPLETE', 'DELETE_COMPLETE')
expect(obj).to receive(:outputs) { [output] }
expect(obj).to receive(:resource_summaries) { [resource_summary] }
expect(obj).to receive(:delete)
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:create)
.with('kumogata-user-host-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX', json, {}) { stack }
expect(obj).to receive(:[])
.with('kumogata-user-host-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX') { stack }
end
expect(cf).to receive(:stacks).twice { stacks }
expect(client.instance_variable_get(:@post_processing))
.to receive(:run_ssh_command)
.with({"host"=>"<%= Key \"PublicIp\" %>", "user"=>"ec2-user", "request_pty"=>true}, " ls\n", {"PublicIp"=>"127.0.0.1"})
.and_return(["file1\nfile2\n", "", 0])
expect(client.instance_variable_get(:@post_processing))
.to receive(:print_command).with('ssh:command')
expect(client.instance_variable_get(:@post_processing))
.to receive(:print_command_result)
.with("file1\nfile2\n", "", 0)
expect(client.instance_variable_get(:@post_processing))
.to receive(:save_command_results)
.with([{'ssh:command' => {'ExitStatus' => 0, 'StdOut' => "file1\nfile2\n", 'StdErr' => ""}}])
end
end
it 'create a stack from Ruby template and run ssh command (modify outputs)' do
template = <<-TEMPLATE
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
PublicIp do
Value do
Fn__GetAtt "myEC2Instance", "PublicIp"
end
end
end
_outputs_filter do |outputs|
outputs['MyOutput'] = 100
end
_post do
ssh_command do
ssh do
host { Key "PublicIp" }
user "ec2-user"
end
command <<-EOS
ls
EOS
end
end
TEMPLATE
run_client(:create, :template => template) do |client, cf|
template = eval_template(template, :update_deletion_policy => true)
json = JSON.pretty_generate(template)
expect(client).to receive(:print_event_log).twice
expect(client).to receive(:create_event_log).once
output = make_double('output') do |obj|
expect(obj).to receive(:key) { 'PublicIp' }
expect(obj).to receive(:value) { '127.0.0.1' }
end
resource_summary = make_double('resource_summary') do |obj|
expect(obj).to receive(:[]).with(:logical_resource_id) { 'myEC2Instance' }
expect(obj).to receive(:[]).with(:physical_resource_id) { 'i-XXXXXXXX' }
expect(obj).to receive(:[]).with(:resource_type) { 'AWS::EC2::Instance' }
expect(obj).to receive(:[]).with(:resource_status) { 'CREATE_COMPLETE' }
expect(obj).to receive(:[]).with(:resource_status_reason) { nil }
expect(obj).to receive(:[]).with(:last_updated_timestamp) { '2014-03-02 04:35:12 UTC' }
end
stack = make_double('stack') do |obj|
expect(obj).to receive(:status).and_return(
'CREATE_COMPLETE', 'CREATE_COMPLETE',
'DELETE_COMPLETE', 'DELETE_COMPLETE', 'DELETE_COMPLETE')
expect(obj).to receive(:outputs) { [output] }
expect(obj).to receive(:resource_summaries) { [resource_summary] }
expect(obj).to receive(:delete)
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:create)
.with('kumogata-user-host-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX', json, {}) { stack }
expect(obj).to receive(:[])
.with('kumogata-user-host-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX') { stack }
end
expect(cf).to receive(:stacks).twice { stacks }
expect(client.instance_variable_get(:@post_processing))
.to receive(:run_ssh_command)
.with({"host"=>"<%= Key \"PublicIp\" %>", "user"=>"ec2-user", "request_pty"=>true}, " ls\n", {"PublicIp"=>"127.0.0.1", "MyOutput"=>100})
.and_return(["file1\nfile2\n", "", 0])
expect(client.instance_variable_get(:@post_processing))
.to receive(:print_command).with('ssh:command')
expect(client.instance_variable_get(:@post_processing))
.to receive(:print_command_result)
.with("file1\nfile2\n", "", 0)
expect(client.instance_variable_get(:@post_processing))
.to receive(:save_command_results)
.with([{'ssh:command' => {'ExitStatus' => 0, 'StdOut' => "file1\nfile2\n", 'StdErr' => ""}}])
end
end
it 'create a stack from Ruby template and run command (specifies timing)' do
template = <<-TEMPLATE
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
Region do
Value do
Ref "AWS::Region"
end
end
end
_post do
command_a do
after :create
command <<-EOS
echo <%= Key "AZ" %>
echo <%= Key "Region" %>
EOS
end
command_b do
after :create, :update
command <<-EOS
echo <%= Key "Region" %>
echo <%= Key "AZ" %>
EOS
end
end
TEMPLATE
run_client(:create, :template => template) do |client, cf|
template = eval_template(template, :update_deletion_policy => true)
json = JSON.pretty_generate(template)
expect(client).to receive(:print_event_log).twice
expect(client).to receive(:create_event_log).once
output1 = make_double('output') do |obj|
expect(obj).to receive(:key) { 'AZ' }
expect(obj).to receive(:value) { 'ap-northeast-1b' }
end
output2 = make_double('output') do |obj|
expect(obj).to receive(:key) { 'Region' }
expect(obj).to receive(:value) { 'ap-northeast-1' }
end
resource_summary = make_double('resource_summary') do |obj|
expect(obj).to receive(:[]).with(:logical_resource_id) { 'myEC2Instance' }
expect(obj).to receive(:[]).with(:physical_resource_id) { 'i-XXXXXXXX' }
expect(obj).to receive(:[]).with(:resource_type) { 'AWS::EC2::Instance' }
expect(obj).to receive(:[]).with(:resource_status) { 'CREATE_COMPLETE' }
expect(obj).to receive(:[]).with(:resource_status_reason) { nil }
expect(obj).to receive(:[]).with(:last_updated_timestamp) { '2014-03-02 04:35:12 UTC' }
end
stack = make_double('stack') do |obj|
expect(obj).to receive(:status).and_return(
'CREATE_COMPLETE', 'CREATE_COMPLETE',
'DELETE_COMPLETE', 'DELETE_COMPLETE', 'DELETE_COMPLETE')
expect(obj).to receive(:outputs) { [output1, output2] }
expect(obj).to receive(:resource_summaries) { [resource_summary] }
expect(obj).to receive(:delete)
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:create)
.with('kumogata-user-host-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX', json, {}) { stack }
expect(obj).to receive(:[])
.with('kumogata-user-host-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX') { stack }
end
expect(cf).to receive(:stacks).twice { stacks }
process_status1 = make_double('process_status1') {|obj| expect(obj).to receive(:to_i).and_return(0) }
process_status2 = make_double('process_status2') {|obj| expect(obj).to receive(:to_i).and_return(0) }
expect(client.instance_variable_get(:@post_processing))
.to receive(:run_shell_command)
.with(" echo <%= Key \"AZ\" %>\n echo <%= Key \"Region\" %>\n", {"AZ"=>"ap-northeast-1b", "Region"=>"ap-northeast-1"})
.and_return(["ap-northeast-1b\nap-northeast-1\n", "", process_status1])
expect(client.instance_variable_get(:@post_processing))
.to receive(:run_shell_command)
.with(" echo <%= Key \"Region\" %>\n echo <%= Key \"AZ\" %>\n", {"AZ"=>"ap-northeast-1b", "Region"=>"ap-northeast-1"})
.and_return(["ap-northeast-1\nap-northeast-1b\n", "", process_status2])
expect(client.instance_variable_get(:@post_processing))
.to receive(:print_command).with('command:a')
expect(client.instance_variable_get(:@post_processing))
.to receive(:print_command).with('command:b')
expect(client.instance_variable_get(:@post_processing))
.to receive(:print_command_result)
.with("ap-northeast-1b\nap-northeast-1\n", "", process_status1)
expect(client.instance_variable_get(:@post_processing))
.to receive(:print_command_result)
.with("ap-northeast-1\nap-northeast-1b\n", "", process_status2)
expect(client.instance_variable_get(:@post_processing))
.to receive(:save_command_results)
.with([{'command:a' => {'ExitStatus' => 0, 'StdOut' => "ap-northeast-1b\nap-northeast-1\n", 'StdErr' => ""}},
{'command:b' => {'ExitStatus' => 0, 'StdOut' => "ap-northeast-1\nap-northeast-1b\n", 'StdErr' => ""}}])
end
end
it 'create a stack from Ruby template and run command (update timing)' do
template = <<-TEMPLATE
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
Region do
Value do
Ref "AWS::Region"
end
end
end
_post do
command_a do
after :update
command <<-EOS
echo <%= Key "AZ" %>
echo <%= Key "Region" %>
EOS
end
command_b do
after :update
command <<-EOS
echo <%= Key "Region" %>
echo <%= Key "AZ" %>
EOS
end
end
TEMPLATE
run_client(:create, :template => template) do |client, cf|
template = eval_template(template, :update_deletion_policy => true)
json = JSON.pretty_generate(template)
expect(client).to receive(:print_event_log).twice
expect(client).to receive(:create_event_log).once
output1 = make_double('output') do |obj|
expect(obj).to receive(:key) { 'AZ' }
expect(obj).to receive(:value) { 'ap-northeast-1b' }
end
output2 = make_double('output') do |obj|
expect(obj).to receive(:key) { 'Region' }
expect(obj).to receive(:value) { 'ap-northeast-1' }
end
resource_summary = make_double('resource_summary') do |obj|
expect(obj).to receive(:[]).with(:logical_resource_id) { 'myEC2Instance' }
expect(obj).to receive(:[]).with(:physical_resource_id) { 'i-XXXXXXXX' }
expect(obj).to receive(:[]).with(:resource_type) { 'AWS::EC2::Instance' }
expect(obj).to receive(:[]).with(:resource_status) { 'CREATE_COMPLETE' }
expect(obj).to receive(:[]).with(:resource_status_reason) { nil }
expect(obj).to receive(:[]).with(:last_updated_timestamp) { '2014-03-02 04:35:12 UTC' }
end
stack = make_double('stack') do |obj|
expect(obj).to receive(:status).and_return(
'CREATE_COMPLETE', 'CREATE_COMPLETE',
'DELETE_COMPLETE', 'DELETE_COMPLETE', 'DELETE_COMPLETE')
expect(obj).to receive(:outputs) { [output1, output2] }
expect(obj).to receive(:resource_summaries) { [resource_summary] }
expect(obj).to receive(:delete)
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:create)
.with('kumogata-user-host-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX', json, {}) { stack }
expect(obj).to receive(:[])
.with('kumogata-user-host-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX') { stack }
end
expect(cf).to receive(:stacks).twice { stacks }
expect(Open3).not_to receive(:capture3)
expect(client.instance_variable_get(:@post_processing))
.not_to receive(:print_command_result)
expect(client.instance_variable_get(:@post_processing))
.not_to receive(:save_command_results)
end
end
it 'create a stack from Ruby template (include DeletionPolicy)' do
template = <<-EOS
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
DeletionPolicy "Delete"
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
run_client(:create, :template => template) do |client, cf|
template = eval_template(template, :update_deletion_policy => true)
expect(template['Resources']['myEC2Instance']['DeletionPolicy']).to eq('Delete')
json = JSON.pretty_generate(template)
expect(client).to receive(:print_event_log).twice
expect(client).to receive(:create_event_log).once
output = make_double('output') do |obj|
expect(obj).to receive(:key) { 'AZ' }
expect(obj).to receive(:value) { 'ap-northeast-1b' }
end
resource_summary = make_double('resource_summary') do |obj|
expect(obj).to receive(:[]).with(:logical_resource_id) { 'myEC2Instance' }
expect(obj).to receive(:[]).with(:physical_resource_id) { 'i-XXXXXXXX' }
expect(obj).to receive(:[]).with(:resource_type) { 'AWS::EC2::Instance' }
expect(obj).to receive(:[]).with(:resource_status) { 'CREATE_COMPLETE' }
expect(obj).to receive(:[]).with(:resource_status_reason) { nil }
expect(obj).to receive(:[]).with(:last_updated_timestamp) { '2014-03-02 04:35:12 UTC' }
end
stack = make_double('stack') do |obj|
expect(obj).to receive(:status).and_return(
'CREATE_COMPLETE', 'CREATE_COMPLETE',
'DELETE_COMPLETE', 'DELETE_COMPLETE', 'DELETE_COMPLETE')
expect(obj).to receive(:outputs) { [output] }
expect(obj).to receive(:resource_summaries) { [resource_summary] }
expect(obj).to receive(:delete)
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:create)
.with('kumogata-user-host-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX', json, {}) { stack }
expect(obj).to receive(:[])
.with('kumogata-user-host-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX') { stack }
end
expect(cf).to receive(:stacks).twice { stacks }
end
end
it 'create a stack from Ruby template with parameters' do
template = <<-EOS
Parameters do
InstanceType do
Default "t1.micro"
Description "Instance Type"
Type "String"
end
end
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType { Ref "InstanceType" }
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
run_client(:create, :template => template, :options => {:parameters => {'InstanceType'=>'m1.large'}}) do |client, cf|
template = eval_template(template, :update_deletion_policy => true)
json = JSON.pretty_generate(template)
expect(client).to receive(:print_event_log).twice
expect(client).to receive(:create_event_log).once
output = make_double('output') do |obj|
expect(obj).to receive(:key) { 'AZ' }
expect(obj).to receive(:value) { 'ap-northeast-1b' }
end
resource_summary = make_double('resource_summary') do |obj|
expect(obj).to receive(:[]).with(:logical_resource_id) { 'myEC2Instance' }
expect(obj).to receive(:[]).with(:physical_resource_id) { 'i-XXXXXXXX' }
expect(obj).to receive(:[]).with(:resource_type) { 'AWS::EC2::Instance' }
expect(obj).to receive(:[]).with(:resource_status) { 'CREATE_COMPLETE' }
expect(obj).to receive(:[]).with(:resource_status_reason) { nil }
expect(obj).to receive(:[]).with(:last_updated_timestamp) { '2014-03-02 04:35:12 UTC' }
end
stack = make_double('stack') do |obj|
expect(obj).to receive(:status).and_return(
'CREATE_COMPLETE', 'CREATE_COMPLETE',
'DELETE_COMPLETE', 'DELETE_COMPLETE', 'DELETE_COMPLETE')
expect(obj).to receive(:outputs) { [output] }
expect(obj).to receive(:resource_summaries) { [resource_summary] }
expect(obj).to receive(:delete)
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:create)
.with('kumogata-user-host-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX', json, {:parameters=>{"InstanceType"=>"m1.large"}}) { stack }
expect(obj).to receive(:[])
.with('kumogata-user-host-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX') { stack }
end
expect(cf).to receive(:stacks).twice { stacks }
end
end
it 'create a stack from Ruby template with stack name' do
template = <<-EOS
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
run_client(:create, :arguments => ['MyStack'], :template => template) do |client, cf|
template = eval_template(template)
json = JSON.pretty_generate(template)
expect(client).to receive(:print_event_log).once
output = make_double('output') do |obj|
expect(obj).to receive(:key) { 'AZ' }
expect(obj).to receive(:value) { 'ap-northeast-1b' }
end
resource_summary = make_double('resource_summary') do |obj|
expect(obj).to receive(:[]).with(:logical_resource_id) { 'myEC2Instance' }
expect(obj).to receive(:[]).with(:physical_resource_id) { 'i-XXXXXXXX' }
expect(obj).to receive(:[]).with(:resource_type) { 'AWS::EC2::Instance' }
expect(obj).to receive(:[]).with(:resource_status) { 'CREATE_COMPLETE' }
expect(obj).to receive(:[]).with(:resource_status_reason) { nil }
expect(obj).to receive(:[]).with(:last_updated_timestamp) { '2014-03-02 04:35:12 UTC' }
end
stack = make_double('stack') do |obj|
expect(obj).to receive(:status).and_return(
'CREATE_COMPLETE',
'CREATE_COMPLETE')
expect(obj).to receive(:outputs) { [output] }
expect(obj).to receive(:resource_summaries) { [resource_summary] }
end
stacks = make_double('status') do |obj|
expect(obj).to receive(:create)
.with('MyStack', json, {}) { stack }
end
expect(cf).to receive(:stacks) { stacks }
end
end
it 'create a stack from Ruby template with deletion policy retain' do
template = <<-EOS
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
run_client(:create, :arguments => ['MyStack'], :template => template, :options => {:deletion_policy_retain => true}) do |client, cf|
template = eval_template(template, :update_deletion_policy => true)
json = JSON.pretty_generate(template)
expect(client).to receive(:print_event_log).once
output = make_double('output') do |obj|
expect(obj).to receive(:key) { 'AZ' }
expect(obj).to receive(:value) { 'ap-northeast-1b' }
end
resource_summary = make_double('resource_summary') do |obj|
expect(obj).to receive(:[]).with(:logical_resource_id) { 'myEC2Instance' }
expect(obj).to receive(:[]).with(:physical_resource_id) { 'i-XXXXXXXX' }
expect(obj).to receive(:[]).with(:resource_type) { 'AWS::EC2::Instance' }
expect(obj).to receive(:[]).with(:resource_status) { 'CREATE_COMPLETE' }
expect(obj).to receive(:[]).with(:resource_status_reason) { nil }
expect(obj).to receive(:[]).with(:last_updated_timestamp) { '2014-03-02 04:35:12 UTC' }
end
stack = make_double('stack') do |obj|
expect(obj).to receive(:status).and_return(
'CREATE_COMPLETE',
'CREATE_COMPLETE')
expect(obj).to receive(:outputs) { [output] }
expect(obj).to receive(:resource_summaries) { [resource_summary] }
end
stacks = make_double('status') do |obj|
expect(obj).to receive(:create)
.with('MyStack', json, {}) { stack }
end
expect(cf).to receive(:stacks) { stacks }
end
end
it 'create a stack from Ruby template with invalid stack name' do
template = <<-EOS
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
expect {
run_client(:create, :arguments => ['0MyStack'], :template => template)
}.to raise_error("1 validation error detected: Value '0MyStack' at 'stackName' failed to satisfy constraint: Member must satisfy regular expression pattern: [a-zA-Z][-a-zA-Z0-9]*")
end
it 'create a stack from Ruby template with encrypted parameters' do
template = <<-EOS
Parameters do
InstanceType do
Default "t1.micro"
Description "Instance Type"
Type "String"
end
end
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType { Ref "InstanceType" }
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
run_client(:create, :template => template, :options => {:parameters => {'InstanceType'=>'m1.large'}, :encrypt_parameters => ['Password']}) do |client, cf|
template = eval_template(template, :update_deletion_policy => true, :add_encryption_password => true)
json = JSON.pretty_generate(template)
expect(client).to receive(:print_event_log).twice
expect(client).to receive(:create_event_log).once
output = make_double('output') do |obj|
expect(obj).to receive(:key) { 'AZ' }
expect(obj).to receive(:value) { 'ap-northeast-1b' }
end
resource_summary = make_double('resource_summary') do |obj|
expect(obj).to receive(:[]).with(:logical_resource_id) { 'myEC2Instance' }
expect(obj).to receive(:[]).with(:physical_resource_id) { 'i-XXXXXXXX' }
expect(obj).to receive(:[]).with(:resource_type) { 'AWS::EC2::Instance' }
expect(obj).to receive(:[]).with(:resource_status) { 'CREATE_COMPLETE' }
expect(obj).to receive(:[]).with(:resource_status_reason) { nil }
expect(obj).to receive(:[]).with(:last_updated_timestamp) { '2014-03-02 04:35:12 UTC' }
end
stack = make_double('stack') do |obj|
expect(obj).to receive(:status).and_return(
'CREATE_COMPLETE', 'CREATE_COMPLETE',
'DELETE_COMPLETE', 'DELETE_COMPLETE', 'DELETE_COMPLETE')
expect(obj).to receive(:outputs) { [output] }
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | true |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/spec/kumogata_validate_spec.rb | spec/kumogata_validate_spec.rb | describe 'Kumogata::Client#validate' do
it 'validate Ruby template (without error)' do
template = <<-EOS
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
run_client(:validate, :template => template) do |client, cf|
json = eval_template(template, :add_encryption_password_for_validation => true).to_json
expect(cf).to receive(:validate_template).with(json) {
{}
}
end
end
it 'validate Ruby template (with error)' do
template = <<-EOS
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
#Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
#end
EOS
expect {
run_client(:validate, :template => template) do |client, cf|
json = eval_template(template, :add_encryption_password_for_validation => true).to_json
expect(cf).to receive(:validate_template).with(json) {
{
:code => 'CODE',
:message => 'MESSAGE'
}
}
end
}.to raise_error('CODE: MESSAGE')
end
it 'validate JSON template (without error)' do
template = <<-EOS
{
"Resources": {
"myEC2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-XXXXXXXX",
"InstanceType": "t1.micro"
}
}
},
"AZ": {
"Value": {
"Fn::GetAtt": [
"myEC2Instance",
"AvailabilityZone"
]
}
}
}
EOS
run_client(:validate, :template => template, :template_ext => '.template') do |client, cf|
template = JSON.parse(template)
add_encryption_password_for_validation(template)
json = template.to_json
expect(cf).to receive(:validate_template) {
{}
}
end
end
it 'validate JSON template (with error)' do
template = <<-EOS
{
"Resources": {
"myEC2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-XXXXXXXX",
"InstanceType": "t1.micro"
}
}
},
"AZ": {
"Value": {
"Fn::GetAtt": [
"myEC2Instance",
"AvailabilityZone"
]
}
}
}
EOS
expect {
run_client(:validate, :template => template, :template_ext => '.template') do |client, cf|
template = JSON.parse(template)
add_encryption_password_for_validation(template)
json = template.to_json
expect(cf).to receive(:validate_template).with(json) {
{
:code => 'CODE',
:message => 'MESSAGE'
}
}
end
}.to raise_error('CODE: MESSAGE')
end
it 'validate Ruby template (without verbose option)' do
template = <<-EOS
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
result = {"parameters"=>
[{"no_echo"=>false,
"parameter_key"=>"SSHLocation",
"description"=>
"The IP address range that can be used to SSH to the EC2 instances",
"default_value"=>"0.0.0.0/0"},
{"no_echo"=>false,
"parameter_key"=>"XXXXXXXXXXXXXXXX",
"default_value"=>"(XXXXXXXXXXXXXXXX)"},
{"no_echo"=>false,
"parameter_key"=>"InstanceType",
"description"=>"WebServer EC2 instance type",
"default_value"=>"m1.small"},
{"no_echo"=>false,
"parameter_key"=>"KeyName",
"description"=>
"Name of an existing EC2 KeyPair to enable SSH access to the instance"}],
"capabilities"=>[],
"description"=>"'test CloudFormation Template\n",
"response_metadata"=>{"request_id"=>"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"}}
expect(Kumogata.logger).to receive(:info).with('Template validated successfully')
expect(Kumogata.logger).to receive(:info).with(JSON.pretty_generate(result))
run_client(:validate, :template => template, :options => {:verbose => true}) do |client, cf|
json = eval_template(template, :add_encryption_password_for_validation => true).to_json
expect(cf).to receive(:validate_template).with(json) { result }
end
end
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/spec/kumogata_delete_spec.rb | spec/kumogata_delete_spec.rb | describe 'Kumogata::Client#delete' do
it 'update a stack from Ruby template' do
run_client(:delete, :arguments => ['MyStack'], :options => {:force => true}) do |client, cf|
expect(client).to receive(:print_event_log).once
expect(client).to receive(:create_event_log).once
stack = make_double('stack') do |obj|
expect(obj).to receive(:delete).with(no_args())
expect(obj).to receive(:status).and_return(
'DELETE_COMPLETE', 'DELETE_COMPLETE', 'DELETE_COMPLETE')
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:[])
.with('MyStack') { stack }
end
expect(cf).to receive(:stacks) { stacks }
end
end
it 'update a stack from Ruby template (detach)' do
out = run_client(:delete, :arguments => ['MyStack'], :options => {:force => true, :detach => true}) do |client, cf|
expect(client).not_to receive(:print_event_log)
expect(client).to receive(:create_event_log).once
stack = make_double('stack') do |obj|
expect(obj).to receive(:delete).with(no_args())
expect(obj).to receive(:status).once
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:[])
.with('MyStack') { stack }
end
expect(cf).to receive(:stacks) { stacks }
end
expect(out).to be_nil
end
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/spec/kumogata_show_resources_spec.rb | spec/kumogata_show_resources_spec.rb | describe 'Kumogata::Client#show_resources' do
it 'show resources' do
resources = run_client(:show_resources, :arguments => ['MyStack']) do |client, cf|
stack = make_double('stack') do |obj|
expect(obj).to receive(:status) { 'CREATE_COMPLETE' }
expect(obj).to receive(:resource_summaries).and_return([
{
:logical_resource_id => 'myEC2Instance',
:physical_resource_id => 'i-XXXXXXXX',
:resource_type => 'AWS::EC2::Instance',
:resource_status => 'CREATE_COMPLETE',
:resource_status_reason => nil,
:last_updated_timestamp => '2014-03-03 04:04:40 UTC',
}
])
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:[]).with('MyStack') { stack }
end
expect(cf).to receive(:stacks) { stacks }
end
expect(resources).to eq((<<-EOS).chomp)
[
{
"LogicalResourceId": "myEC2Instance",
"PhysicalResourceId": "i-XXXXXXXX",
"ResourceType": "AWS::EC2::Instance",
"ResourceStatus": "CREATE_COMPLETE",
"ResourceStatusReason": null,
"LastUpdatedTimestamp": "2014-03-03 04:04:40 UTC"
}
]
EOS
end
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/spec/kumogata_diff_spec.rb | spec/kumogata_diff_spec.rb | describe 'Kumogata::Client#diff' do
let(:drupal_single_instance_template) do
path = File.expand_path('../Drupal_Single_Instance.template', __FILE__)
open(path) {|f| f.read }
end
let(:drupal_single_instance_template_rb) do
path = File.expand_path('../Drupal_Single_Instance.template.rb', __FILE__)
open(path) {|f| f.read }
end
let(:drupal_single_instance_template_yaml) do
path = File.expand_path('../Drupal_Single_Instance.template.yml', __FILE__)
open(path) {|f| f.read }
end
it 'compare templates logically' do
json_template = drupal_single_instance_template
json_template.sub!('localhost', '127.0.0.1')
json_template.sub!('"ToPort" : "80"', '"ToPort" : "8080"')
tempfile(json_template, '.templates') do |js|
tempfile(drupal_single_instance_template_rb, '.rb') do |rb|
diff = ruby_template = run_client(:diff, :arguments => [js.path, rb.path], :options => {:color => false})
diff = diff.split(/\n/).slice(2..-1).join("\n")
expect(diff).to eq((<<-EOS).chomp)
@@ -257,7 +257,7 @@
{
"Ref": "DBUsername"
},
- "'@'127.0.0.1' IDENTIFIED BY '",
+ "'@'localhost' IDENTIFIED BY '",
{
"Ref": "DBPassword"
},
@@ -437,7 +437,7 @@
{
"IpProtocol": "tcp",
"FromPort": "80",
- "ToPort": "8080",
+ "ToPort": "80",
"CidrIp": "0.0.0.0/0"
},
{
EOS
end
end
end
it 'compare yaml templates logically' do
yaml_template = drupal_single_instance_template_yaml
yaml_template.sub!('localhost', '127.0.0.1')
yaml_template.sub!('ToPort: 80', 'ToPort: 8080')
tempfile(yaml_template, '.yml') do |yaml|
tempfile(drupal_single_instance_template_rb, '.rb') do |rb|
diff = ruby_template = run_client(:diff, :arguments => [yaml.path, rb.path], :options => {:color => false})
diff = diff.split(/\n/).slice(2..-1).join("\n")
expect(diff).to eq((<<-EOS).chomp)
@@ -257,7 +257,7 @@
{
"Ref": "DBUsername"
},
- "'@'127.0.0.1' IDENTIFIED BY '",
+ "'@'localhost' IDENTIFIED BY '",
{
"Ref": "DBPassword"
},
@@ -437,7 +437,7 @@
{
"IpProtocol": "tcp",
"FromPort": "80",
- "ToPort": "8080",
+ "ToPort": "80",
"CidrIp": "0.0.0.0/0"
},
{
EOS
end
end
end
it 'compare templates logically with "-w"' do
json_template = drupal_single_instance_template
json_template.sub!('localhost', '127.0.0.1')
json_template.sub!('"ToPort" : "80"', '"ToPort" : "8080"')
tempfile(json_template, '.templates') do |js|
tempfile(drupal_single_instance_template_rb, '.rb') do |rb|
diff = ruby_template = run_client(:diff, :arguments => [js.path, rb.path], :options => {:color => false, :ignore_all_space => true})
diff = diff.split(/\n/).slice(2..-1).join("\n")
expect(diff).to eq((<<-EOS).chomp)
@@ -257,7 +257,7 @@
{
"Ref": "DBUsername"
},
- "'@'127.0.0.1' IDENTIFIED BY '",
+ "'@'localhost' IDENTIFIED BY '",
{
"Ref": "DBPassword"
},
@@ -437,7 +437,7 @@
{
"IpProtocol": "tcp",
"FromPort": "80",
- "ToPort": "8080",
+ "ToPort": "80",
"CidrIp": "0.0.0.0/0"
},
{
EOS
end
end
end
it 'compare templates logically with whitespace' do
json_template = drupal_single_instance_template
json_template.sub!('localhost', 'local host')
tempfile(json_template, '.templates') do |js|
tempfile(drupal_single_instance_template_rb, '.rb') do |rb|
diff = ruby_template = run_client(:diff, :arguments => [js.path, rb.path], :options => {:color => false})
diff = diff.split(/\n/).slice(2..-1).join("\n")
expect(diff).to eq((<<-EOS).chomp)
@@ -257,7 +257,7 @@
{
"Ref": "DBUsername"
},
- "'@'local host' IDENTIFIED BY '",
+ "'@'localhost' IDENTIFIED BY '",
{
"Ref": "DBPassword"
},
EOS
end
end
end
it 'compare templates logically with whitespace and "-w"' do
json_template = drupal_single_instance_template
json_template.sub!('localhost', 'local host')
tempfile(json_template, '.templates') do |js|
tempfile(drupal_single_instance_template_rb, '.rb') do |rb|
diff = ruby_template = run_client(:diff, :arguments => [js.path, rb.path], :options => {:color => false, :ignore_all_space => true})
expect(diff).to be_empty
end
end
end
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/spec/kumogata_utils_spec.rb | spec/kumogata_utils_spec.rb | describe Kumogata::Utils do
it 'should stringify the hash' do
hash = {
:foo => {
'bar' => ['1', 2, 3],
'zoo' => :value,
},
12 => :value2
}
expect(Kumogata::Utils.stringify(hash)).to eq(
{
'foo' => {
'bar' => ['1', '2', '3'],
'zoo' => 'value',
},
'12' => 'value2'
}
)
end
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/spec/kumogata_crypt_spec.rb | spec/kumogata_crypt_spec.rb | describe Kumogata::Crypt do
it 'encrypt string' do
encrypted = Kumogata::Crypt.encrypt("my_password", "jugem jugem")
decrypted = Kumogata::Crypt.decrypt("my_password", encrypted)
expect(decrypted).to eq("jugem jugem")
end
it 'encrypt long string' do
len = (RUBY_PLATFORM =~ /darwin/) ? 1 : 10
encrypted = Kumogata::Crypt.encrypt("my_password", "jugem jugem" * 1024 * len)
decrypted = Kumogata::Crypt.decrypt("my_password", encrypted)
expect(decrypted).to eq("jugem jugem" * 1024 * len)
end
it 'make password' do
passwd = Kumogata::Crypt.mkpasswd(16)
expect(passwd).to be_kind_of(String)
expect(passwd.length).to eq(16)
end
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/spec/Drupal_Single_Instance.template.rb | spec/Drupal_Single_Instance.template.rb | AWSTemplateFormatVersion "2010-09-09"
Description (<<-EOS).undent
AWS CloudFormation Sample Template Drupal_Single_Instance.
Drupal is an open source content management platform powering millions of websites and applications.
This template installs a singe instance deployment with a local MySQL database for storage.
It uses the AWS CloudFormation bootstrap scripts to install packages and files at instance launch time.
**WARNING**
This template creates an Amazon EC2 instance.
You will be billed for the AWS resources used if you create a stack from this template.
EOS
Parameters do
KeyName do
Description "Name of an existing EC2 KeyPair to enable SSH access to the instances"
Type "String"
MinLength 1
MaxLength 255
AllowedPattern "[\\x20-\\x7E]*"
ConstraintDescription "can contain only ASCII characters."
end
InstanceType do
Description "WebServer EC2 instance type"
Type "String"
Default "m1.small"
AllowedValues "t1.micro", "m1.small", "m1.medium", "m1.large", "m1.xlarge", "m2.xlarge", "m2.2xlarge", "m2.4xlarge", "m3.xlarge", "m3.2xlarge", "c1.medium", "c1.xlarge", "cc1.4xlarge", "cc2.8xlarge", "cg1.4xlarge"
ConstraintDescription "must be a valid EC2 instance type."
end
SiteName do
Default "My Site"
Description "The name of the Drupal Site"
Type "String"
end
SiteEMail do
Description "EMail for site adminitrator"
Type "String"
end
SiteAdmin do
Description "The Drupal site admin account username"
Type "String"
MinLength 1
MaxLength 16
AllowedPattern "[a-zA-Z][a-zA-Z0-9]*"
ConstraintDescription "must begin with a letter and contain only alphanumeric characters."
end
SitePassword do
NoEcho "true"
Description "The Drupal site admin account password"
Type "String"
MinLength 1
MaxLength 41
AllowedPattern "[a-zA-Z0-9]*"
ConstraintDescription "must contain only alphanumeric characters."
end
DBName do
Default "drupaldb"
Description "The Drupal database name"
Type "String"
MinLength 1
MaxLength 64
AllowedPattern "[a-zA-Z][a-zA-Z0-9]*"
ConstraintDescription "must begin with a letter and contain only alphanumeric characters."
end
DBUsername do
Default "admin"
NoEcho "true"
Description "The Drupal database admin account username"
Type "String"
MinLength 1
MaxLength 16
AllowedPattern "[a-zA-Z][a-zA-Z0-9]*"
ConstraintDescription "must begin with a letter and contain only alphanumeric characters."
end
DBPassword do
Default "admin"
NoEcho "true"
Description "The Drupal database admin account password"
Type "String"
MinLength 1
MaxLength 41
AllowedPattern "[a-zA-Z0-9]*"
ConstraintDescription "must contain only alphanumeric characters."
end
DBRootPassword do
NoEcho "true"
Description "Root password for MySQL"
Type "String"
MinLength 1
MaxLength 41
AllowedPattern "[a-zA-Z0-9]*"
ConstraintDescription "must contain only alphanumeric characters."
end
SSHLocation do
Description "The IP address range that can be used to SSH to the EC2 instances"
Type "String"
MinLength 9
MaxLength 18
Default "0.0.0.0/0"
AllowedPattern "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})"
ConstraintDescription "must be a valid IP CIDR range of the form x.x.x.x/x."
end
end
Mappings do
AWSInstanceType2Arch(
{"t1.micro"=>{"Arch"=>"64"},
"m1.small"=>{"Arch"=>"64"},
"m1.medium"=>{"Arch"=>"64"},
"m1.large"=>{"Arch"=>"64"},
"m1.xlarge"=>{"Arch"=>"64"},
"m2.xlarge"=>{"Arch"=>"64"},
"m2.2xlarge"=>{"Arch"=>"64"},
"m2.4xlarge"=>{"Arch"=>"64"},
"m3.xlarge"=>{"Arch"=>"64"},
"m3.2xlarge"=>{"Arch"=>"64"},
"c1.medium"=>{"Arch"=>"64"},
"c1.xlarge"=>{"Arch"=>"64"},
"cc1.4xlarge"=>{"Arch"=>"64HVM"},
"cc2.8xlarge"=>{"Arch"=>"64HVM"},
"cg1.4xlarge"=>{"Arch"=>"64HVM"}})
AWSRegionArch2AMI(
{"us-east-1"=>
{"32"=>"ami-a0cd60c9", "64"=>"ami-aecd60c7", "64HVM"=>"ami-a8cd60c1"},
"us-west-2"=>
{"32"=>"ami-46da5576", "64"=>"ami-48da5578", "64HVM"=>"NOT_YET_SUPPORTED"},
"us-west-1"=>
{"32"=>"ami-7d4c6938", "64"=>"ami-734c6936", "64HVM"=>"NOT_YET_SUPPORTED"},
"eu-west-1"=>
{"32"=>"ami-61555115", "64"=>"ami-6d555119", "64HVM"=>"ami-67555113"},
"ap-southeast-1"=>
{"32"=>"ami-220b4a70", "64"=>"ami-3c0b4a6e", "64HVM"=>"NOT_YET_SUPPORTED"},
"ap-southeast-2"=>
{"32"=>"ami-b3990e89", "64"=>"ami-bd990e87", "64HVM"=>"NOT_YET_SUPPORTED"},
"ap-northeast-1"=>
{"32"=>"ami-2a19aa2b", "64"=>"ami-2819aa29", "64HVM"=>"NOT_YET_SUPPORTED"},
"sa-east-1"=>
{"32"=>"ami-f836e8e5", "64"=>"ami-fe36e8e3", "64HVM"=>"NOT_YET_SUPPORTED"}})
end
Resources do
WebServer do
Type "AWS::EC2::Instance"
Metadata do
AWS__CloudFormation__Init do
config do
packages do
yum(
{"httpd"=>[],
"php"=>[],
"php-mysql"=>[],
"php-gd"=>[],
"php-xml"=>[],
"php-mbstring"=>[],
"mysql"=>[],
"mysql-server"=>[],
"mysql-devel"=>[],
"mysql-libs"=>[]})
end
sources do
_path "/var/www/html", "http://ftp.drupal.org/files/projects/drupal-7.8.tar.gz"
_path "/home/ec2-user", "http://ftp.drupal.org/files/projects/drush-7.x-4.5.tar.gz"
end
files do
_path("/tmp/setup.mysql") do
content (<<-EOS).fn_join
CREATE DATABASE <%= Ref "DBName" %>;
CREATE USER '<%= Ref "DBUsername" %>'@'localhost' IDENTIFIED BY '<%= Ref "DBPassword" %>';
GRANT ALL ON <%= Ref "DBName" %>.* TO '<%= Ref "DBUsername" %>'@'localhost';
FLUSH PRIVILEGES;
EOS
mode "000644"
owner "root"
group "root"
end
end
services do
sysvinit do
httpd do
enabled "true"
ensureRunning "true"
end
mysqld do
enabled "true"
ensureRunning "true"
end
sendmail do
enabled "false"
ensureRunning "false"
end
end
end
end
end
end
Properties do
ImageId do
Fn__FindInMap "AWSRegionArch2AMI", _{ Ref "AWS::Region" }, _{
Fn__FindInMap "AWSInstanceType2Arch", _{ Ref "InstanceType" }, "Arch"
}
end
InstanceType do
Ref "InstanceType"
end
SecurityGroups [
_{ Ref "WebServerSecurityGroup" }
]
KeyName do
Ref "KeyName"
end
UserData do
Fn__Base64 (<<-EOS).fn_join
#!/bin/bash -v
yum update -y aws-cfn-bootstrap
# Helper function
function error_exit
{
/opt/aws/bin/cfn-signal -e 0 -r "$1" '<%= Ref "WaitHandle" %>'
exit 1
}
# Install Apache Web Server, MySQL, PHP and Drupal
/opt/aws/bin/cfn-init -s <%= Ref "AWS::StackId" %> -r WebServer --region <%= Ref "AWS::Region" %> || error_exit 'Failed to run cfn-init'
# Setup MySQL root password and create a user
mysqladmin -u root password '<%= Ref "DBRootPassword" %>' || error_exit 'Failed to initialize root password'
mysql -u root --password='<%= Ref "DBRootPassword" %>' < /tmp/setup.mysql || error_exit 'Failed to create database user'
# Make changes to Apache Web Server configuration
mv /var/www/html/drupal-7.8/* /var/www/html
mv /var/www/html/drupal-7.8/.* /var/www/html
rmdir /var/www/html/drupal-7.8
sed -i 's/AllowOverride None/AllowOverride All/g' /etc/httpd/conf/httpd.conf
service httpd restart
# Create the site in Drupal
cd /var/www/html
~ec2-user/drush/drush site-install standard --yes --site-name='<%= Ref "SiteName" %>' --site-mail=<%= Ref "SiteEMail" %> --account-name=<%= Ref "SiteAdmin" %> --account-pass=<%= Ref "SitePassword" %> --db-url=mysql://<%= Ref "DBUsername" %>:<%= Ref "DBPassword" %>@localhost/<%= Ref "DBName" %> --db-prefix=drupal_
chown apache:apache sites/default/files
# All is well so signal success
/opt/aws/bin/cfn-signal -e 0 -r "Drupal setup complete" '<%= Ref "WaitHandle" %>'
EOS
end
end
end
WaitHandle do
Type "AWS::CloudFormation::WaitConditionHandle"
end
WaitCondition do
Type "AWS::CloudFormation::WaitCondition"
DependsOn "WebServer"
Properties do
Handle do
Ref "WaitHandle"
end
Timeout 300
end
end
WebServerSecurityGroup do
Type "AWS::EC2::SecurityGroup"
Properties do
GroupDescription "Enable HTTP access via port 80 and SSH access"
SecurityGroupIngress [
_{
IpProtocol "tcp"
FromPort 80
ToPort 80
CidrIp "0.0.0.0/0"
},
_{
IpProtocol "tcp"
FromPort 22
ToPort 22
CidrIp do
Ref "SSHLocation"
end
}
]
end
end
end
Outputs do
WebsiteURL do
Value 'http://<%= Fn__GetAtt "WebServer", "PublicDnsName" %>'.fn_join
Description "Drupal Website"
end
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/spec/spec_helper.rb | spec/spec_helper.rb | require 'kumogata'
require 'kumogata/argument_parser'
require 'tempfile'
require 'time'
require 'timecop'
require 'uuidtools'
Kumogata::ENCRYPTION_PASSWORD.replace('EncryptionPassword')
class UUIDTools::UUID
def self.timestamp_create; 'XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX'; end
end
class Kumogata::Utils
def self.get_user_host
'user-host'
end
end
class Kumogata::Crypt
def self.mkpasswd(n)
'*' * n
end
end
def tempfile(content, template_ext = nil)
basename = "#{File.basename __FILE__}.#{$$}"
basename = [basename, template_ext] if template_ext
Tempfile.open(basename) do |f|
f << content
f.flush
f.rewind
yield(f)
end
end
class Proc
def to_s
"<Proc Object>"
end
end
def run_client(command, options = {})
$stdout = open('/dev/null', 'w') unless ENV['DEBUG']
kumogata_template = options[:template]
kumogata_arguments = options[:arguments] || []
kumogata_options = Kumogata::ArgumentParser::DEFAULT_OPTIONS.merge(options[:options] || {})
kumogata_options[:result_log] = '/dev/null'
kumogata_options[:command_result_log] = '/dev/null'
template_ext = options[:template_ext] || '.rb'
client = Kumogata::Client.new(kumogata_options)
cloud_formation = client.instance_variable_get(:@cloud_formation)
yield(client, cloud_formation) if block_given?
if kumogata_template
tempfile(kumogata_template, template_ext) do |f|
kumogata_arguments.unshift(f.path)
client.send(command, *kumogata_arguments)
end
else
client.send(command, *kumogata_arguments)
end
end
def eval_template(template, options = {})
kumogata_options = Kumogata::ArgumentParser::DEFAULT_OPTIONS.merge(options[:options] || {})
template_ext = options[:template_ext] || '.rb'
template = tempfile(template, template_ext) do |f|
Kumogata::Client.new(kumogata_options).send(:evaluate_template, f, f.path)
end
if options[:update_deletion_policy]
update_deletion_policy(template)
end
if options[:add_encryption_password]
add_encryption_password(template)
end
if options[:add_encryption_password_for_validation]
add_encryption_password_for_validation(template)
end
return template
end
def update_deletion_policy(template)
template['Resources'].each do |k, v|
v['DeletionPolicy'] ||= 'Retain'
end
end
def add_encryption_password(template)
template['Parameters'] ||= {}
template['Parameters'][Kumogata::ENCRYPTION_PASSWORD] = {
'Type' => 'String',
'NoEcho' => 'true',
}
end
def add_encryption_password_for_validation(template)
template['Parameters'] ||= {}
template['Parameters'][Kumogata::ENCRYPTION_PASSWORD] = {
'Type' => 'String',
'Default' => "(#{Kumogata::ENCRYPTION_PASSWORD})",
}
end
def make_double(name)
obj = double(name)
yield(obj)
return obj
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/spec/kumogata_show_outputs_spec.rb | spec/kumogata_show_outputs_spec.rb | describe 'Kumogata::Client#show_outputs' do
it 'show outputs' do
outputs = run_client(:show_outputs, :arguments => ['MyStack']) do |client, cf|
output = make_double('output') do |obj|
expect(obj).to receive(:key) { 'AZ' }
expect(obj).to receive(:value) { 'ap-northeast-1a' }
end
stack = make_double('stack') do |obj|
expect(obj).to receive(:status) { 'CREATE_COMPLETE' }
expect(obj).to receive(:outputs) { [output] }
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:[]).with('MyStack') { stack }
end
expect(cf).to receive(:stacks) { stacks }
end
expect(outputs).to eq((<<-EOS).chomp)
{
"AZ": "ap-northeast-1a"
}
EOS
end
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/spec/kumogata_export_spec.rb | spec/kumogata_export_spec.rb | describe 'Kumogata::Client#export' do
it 'export a template' do
json = <<-EOS
{
"Resources": {
"myEC2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-XXXXXXXX",
"InstanceType": "t1.micro"
}
}
},
"Outputs": {
"AZ": {
"Value": {
"Fn::GetAtt": [
"myEC2Instance",
"AvailabilityZone"
]
}
}
}
}
EOS
template = run_client(:export, :arguments => ['MyStack']) do |client, cf|
stack = make_double('stack') do |obj|
expect(obj).to receive(:status) { 'CREATE_COMPLETE' }
expect(obj).to receive(:template) { json }
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:[]).with('MyStack') { stack }
end
expect(cf).to receive(:stacks) { stacks }
end
expect(template).to eq((<<-EOS).chomp)
Resources do
myEC2Instance do
Type "AWS::EC2::Instance"
Properties do
ImageId "ami-XXXXXXXX"
InstanceType "t1.micro"
end
end
end
Outputs do
AZ do
Value do
Fn__GetAtt "myEC2Instance", "AvailabilityZone"
end
end
end
EOS
end
it 'export a JSON template' do
json = <<-EOS
{
"Resources": {
"myEC2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-XXXXXXXX",
"InstanceType": "t1.micro"
}
}
},
"Outputs": {
"AZ": {
"Value": {
"Fn::GetAtt": [
"myEC2Instance",
"AvailabilityZone"
]
}
}
}
}
EOS
template = run_client(:export, :arguments => ['MyStack'], :options => {:format => :json}) do |client, cf|
stack = make_double('stack') do |obj|
expect(obj).to receive(:status) { 'CREATE_COMPLETE' }
expect(obj).to receive(:template) { json }
end
stacks = make_double('stacks') do |obj|
expect(obj).to receive(:[]).with('MyStack') { stack }
end
expect(cf).to receive(:stacks) { stacks }
end
expect(template).to eq((<<-EOS).chomp)
{
"Resources": {
"myEC2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-XXXXXXXX",
"InstanceType": "t1.micro"
}
}
},
"Outputs": {
"AZ": {
"Value": {
"Fn::GetAtt": [
"myEC2Instance",
"AvailabilityZone"
]
}
}
}
}
EOS
end
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/lib/kumogata.rb | lib/kumogata.rb | module Kumogata; end
require 'kumogata/version'
require 'aws-sdk-v1'
require 'base64'
require 'coderay'
require 'coffee-script'
require 'diffy'
require 'dslh'
require 'hashie'
require 'highline/import'
require 'json'
require 'json5'
require 'logger'
require 'net/ssh'
require 'open-uri'
require 'open3'
require 'optparse'
require 'pathname'
require 'rbconfig'
require 'retryable'
require 'set'
require 'singleton'
require 'strscan'
require 'term/ansicolor'
require 'thread'
require 'uuidtools'
require 'v8'
require 'yaml'
require 'kumogata/client'
require 'kumogata/crypt'
require 'kumogata/ext/coderay_ext'
require 'kumogata/ext/json_ext'
require 'kumogata/ext/string_ext'
require 'kumogata/logger'
require 'kumogata/outputs_filter'
require 'kumogata/post_processing'
require 'kumogata/string_stream'
require 'kumogata/utils'
require 'kumogata/v8_object_ext'
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/lib/kumogata/v8_object_ext.rb | lib/kumogata/v8_object_ext.rb | class V8::Object
def to_hash
to_hash0(self)
end
def to_hash0(obj)
case obj
when V8::Array
obj.map {|v| to_hash0(v) }
when V8::Object
h = {}
obj.each do |k, v|
h[to_hash0(k)] = to_hash0(v)
end
h
else
obj
end
end
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/lib/kumogata/version.rb | lib/kumogata/version.rb | module Kumogata
VERSION = '0.5.12'
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/lib/kumogata/argument_parser.rb | lib/kumogata/argument_parser.rb | Version = Kumogata::VERSION
$kumogata = Hashie::Mash.new
class Kumogata::ArgumentParser
DEFAULT_OPTIONS = {
:delete_stack => true,
:result_log => File.join(Dir.pwd, 'result.json'),
:command_result_log => File.join(Dir.pwd, 'command_result.json'),
:color => $stdout.tty?,
:debug => false,
:config_path => File.expand_path('~/.aws/config'),
}
COMMANDS = {
:create => {
:description => 'Create resources as specified in the template',
:arguments => [:path_or_url, :stack_name?],
:output => false,
},
:validate => {
:description => 'Validate a specified template',
:arguments => [:path_or_url],
:output => false,
},
:convert => {
:description => 'Convert a template format',
:arguments => [:path_or_url],
},
:update => {
:description => 'Update a stack as specified in the template',
:arguments => [:path_or_url, :stack_name],
:output => false,
},
:delete => {
:description => 'Delete a specified stack',
:arguments => [:stack_name],
:output => false,
},
:list => {
:description => 'List summary information for stacks',
:arguments => [:stack_name?],
},
:export => {
:description => 'Export a template from a specified stack',
:arguments => [:stack_name],
},
:'show-events' => {
:description => 'Show events for a specified stack',
:arguments => [:stack_name],
},
:'show-outputs' => {
:description => 'Show outputs for a specified stack',
:arguments => [:stack_name],
},
:'show-resources' => {
:description => 'Show resources for a specified stack',
:arguments => [:stack_name],
},
:diff => {
:description => 'Compare templates logically (file, http://..., stack://...)',
:arguments => [:path_or_url1, :path_or_url2],
},
}
class << self
def parse!(&block)
self.new.parse!(&block)
end
end # of class methods
def parse!
command = nil
arguments = nil
options = {}
if ENV['KUMOGATA_OPTIONS']
ARGV.concat(scan_args(ENV['KUMOGATA_OPTIONS']))
end
ARGV.options do |opt|
update_usage(opt)
begin
supported_formats = [:ruby, :json, :yaml, :js, :coffee, :json5]
opt.on('-k', '--access-key ACCESS_KEY') {|v| options[:access_key_id] = v }
opt.on('-s', '--secret-key SECRET_KEY') {|v| options[:secret_access_key] = v }
opt.on('-r', '--region REGION') {|v| options[:region] = v }
opt.on('' , '--profile CONFIG_PROFILE') {|v| options[:config_profile] = v }
opt.on('' , '--credentials-path PATH') {|v| options[:credentials_path] = v }
opt.on('' , '--config-path PATH') {|v| options[:config_path] = v }
opt.on('' , '--stack-policy-body PATH') {|v| options[:stack_policy_body] = File.read(v) }
opt.on('' , '--stack-policy-url URL') {|v| options[:stack_policy_url] = v }
opt.on('' , '--format TMPLATE_FORMAT', supported_formats) {|v| options[:format] = v }
opt.on('' , '--output-format FORMAT', supported_formats) {|v| options[:output_format] = v }
opt.on('' , '--skip-replace-underscore') { options[:skip_replace_underscore] = false }
opt.on('' , '--deletion-policy-retain') { options[:deletion_policy_retain] = true }
opt.on('-p', '--parameters KEY_VALUES', Array) {|v| options[:parameters] = v }
opt.on('-j', '--json-parameters JSON') {|v| options[:json_parameters] = v }
opt.on('-e', '--encrypt-parameters KEYS', Array) {|v| options[:encrypt_parameters] = v }
opt.on('', '--encryption-password PASS') {|v| options[:encryption_password] = v }
opt.on('', '--skip-send-password') { options[:skip_send_password] = true }
opt.on('' , '--capabilities CAPABILITIES', Array) {|v| options[:capabilities] = v }
opt.on('' , '--disable-rollback') { options[:disable_rollback] = true }
opt.on('' , '--notify SNS_TOPICS', Array) {|v| options[:notify] = v }
opt.on('' , '--timeout MINUTES', Integer) {|v| options[:timeout] = v }
opt.on('' , '--result-log PATH') {|v| options[:result_log] = v }
opt.on('' , '--command-result-log PATH') {|v| options[:command] = v }
opt.on('' , '--detach') { options[:detach] = true }
opt.on('' , '--force') { options[:force] = true }
opt.on('-w', '--ignore-all-space') { options[:ignore_all_space] = true }
opt.on('' , '--color') { options[:color] = true }
opt.on('' , '--no-color') { options[:color] = false }
opt.on('' , '--debug') { options[:debug] = true }
opt.on('-v', '--verbose') { options[:verbose] = true }
opt.parse!
unless (command = ARGV.shift)
puts opt.help
exit 1
end
command = command.to_sym
unless COMMANDS.has_key?(command)
raise "Unknown command: #{command}"
end
arguments = ARGV.dup
validate_arguments(command, arguments)
options = DEFAULT_OPTIONS.merge(options)
options = Hashie::Mash.new(options)
if block_given?
yield(opt, command, arguments, options)
end
update_parameters(options)
rescue => e
$stderr.puts("#{e.message}")
raise e if options[:debug]
exit 1
end
end
output = COMMANDS[command].fetch(:output, true)
command = command.to_s.gsub('-', '_').to_sym
$kumogata.command = command
$kumogata.arguments = arguments
$kumogata.options = options
options = $kumogata.options # Copy of the reference
[command, arguments, options, output]
end
private
def update_usage(opt)
opt.banner = "Usage: kumogata <command> [args] [options]"
opt.separator ''
opt.separator 'Commands:'
cmd_max = COMMANDS.keys.map {|i| i.to_s.length }.max
cmd_arg_descs = COMMANDS.map {|command, attributes|
description = attributes[:description]
arguments = attributes[:arguments]
[
'%-*s %s' % [cmd_max, command, arguments_to_message(arguments)],
description,
]
}
cmd_arg_max = cmd_arg_descs.map {|i| i[0].length }.max
opt.separator(cmd_arg_descs.map {|cmd_arg, desc|
' %-*s %-s' % [cmd_arg_max, cmd_arg, desc]
}.join("\n"))
opt.separator ''
opt.separator 'Options:'
end
def validate_arguments(command, arguments)
expected = COMMANDS[command][:arguments] || []
min = expected.count {|i| i.to_s !~ /\?\Z/ }
max = expected.length
if arguments.length < min or max < arguments.length
raise "Usage: kumogata #{command} #{arguments_to_message(expected)} [options]"
end
end
def arguments_to_message(arguments)
arguments.map {|i| i.to_s.sub(/(.+)\?\Z/) { "[#{$1}]" }.upcase }.join(' ')
end
def update_parameters(options)
parameters = {}
(options.parameters || []).each do |i|
key, value = i.split('=', 2)
parameters[key] = value
end
if options.json_parameters
parameters.merge! JSON.parse(options.json_parameters)
end
options.parameters = parameters
end
def scan_args(str)
args = []
ss = StringScanner.new(str)
buf = ''
until ss.eos?
if ss.scan(/\s+/)
unless buf.empty?
args << buf
buf = ''
end
elsif (tok = ss.scan(/'[^']*'/))
buf << tok.gsub(/'([^']*)'/) { $1 }
elsif (tok = ss.scan(/"[^"]*"/))
buf << tok.gsub(/"([^"]*)"/) { $1 }
elsif (tok = ss.scan(/[^\s'"]+/))
buf << tok
else
buf << ss.getch
end
end
args << buf unless buf.empty?
return args
end
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/lib/kumogata/logger.rb | lib/kumogata/logger.rb | module Kumogata
def self.logger
Kumogata::Logger.instance
end
class Logger < ::Logger
include Singleton
def initialize
super($stdout)
self.formatter = proc do |severity, datetime, progname, msg|
"#{msg}\n"
end
self.level = Logger::INFO
end
def set_debug(value)
self.level = value ? Logger::DEBUG : Logger::INFO
end
end
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/lib/kumogata/utils.rb | lib/kumogata/utils.rb | class Kumogata::Utils
class << self
def camelize(str)
str.to_s.split(/[-_]/).map {|i|
i[0, 1].upcase + i[1..-1].downcase
}.join
end
def get_user_host
user = `whoami`.strip rescue ''
host = `hostname`.strip rescue ''
user_host = [user, host].select {|i| not i.empty? }.join('-')
user_host.empty? ? nil : user_host
end
def random_param_name(n)
a_zA_Z0_9 = (('a'..'z').to_a + ('A'..'Z').to_a + ('0'..'9').to_a)
a_zA_Z0_9.sample(n).join
end
def filter_backtrace(backtrace)
filter_path = ['(eval)']
if defined?(Gem)
filter_path.concat(Gem.path)
filter_path << Gem.bindir
end
RbConfig::CONFIG.select {|k, v|
k.to_s =~ /libdir/
}.each {|k, v| filter_path << v }
filter_path = filter_path.map {|i| /\A#{Regexp.escape(i)}/ }
backtrace.select do |path|
path = path.split(':', 2).first
not filter_path.any? {|i| i =~ path }
end
end
def stringify(obj)
case obj
when Array
obj.map {|i| stringify(i) }
when Hash
hash = {}
obj.each {|k, v| hash[stringify(k)] = stringify(v) }
hash
else
obj.to_s
end
end
end # of class methods
end
module Kumogata
ENCRYPTION_PASSWORD = Kumogata::Utils.random_param_name(16)
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/lib/kumogata/string_stream.rb | lib/kumogata/string_stream.rb | class Kumogata::StringStream
def initialize(&block)
@buf = StringScanner.new('')
@block = block
@fiber = Fiber.new do
self.run
end
# Step to `yield`
@fiber.resume
end
def run
loop do
chunk = Fiber.yield
break unless chunk
@buf << chunk.to_s
self.each_line
end
end
def each_line
while (line = @buf.scan_until(/(\r\n|\r|\n)/))
@block.call(line.chomp)
end
end
def push(chunk)
@fiber.resume(chunk)
end
def close
self.each_line
@block.call(@buf.rest) if @buf.rest?
@fiber.resume
end
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/lib/kumogata/post_processing.rb | lib/kumogata/post_processing.rb | class Kumogata::PostProcessing
TRIGGER_TIMING = [:create, :update]
def initialize(options)
@options = options
@commands = {}
@command_options = {
:undent => true,
:trim_mode => nil,
}
end
def fetch!(template)
_post = template.delete(:_post)
return unless _post
options = _post[:options] || {}
@command_options.merge(options)
outputs = template['Outputs'] || {}
_post.fetch(:commands).each do |name, attrs|
unless attrs.kind_of?(Hash) and attrs['command']
raise "Invalid post processing: #{name} => #{attrs.inspect}"
end
timing = [(attrs['after'] || [:create])].flatten.map {|i| i.to_sym }
command = attrs['command']
validate_timing(name, timing)
validate_command_template(name, command, outputs)
@commands[name] = {
:after => timing,
:command => command,
}
if (ssh = attrs['ssh'])
validate_ssh(name, ssh, outputs)
@commands[name][:ssh] = ssh
end
end
end
def run(timing, outputs)
results = []
@commands.each do |name, attrs|
next unless attrs[:after].include?(timing)
print_command(name)
out, err, status = run_command(attrs, outputs)
print_command_result(out, err, status)
results << {
name => {
'ExitStatus' => status.to_i,
'StdOut' => out.force_encoding('UTF-8'),
'StdErr' => err.force_encoding('UTF-8'),
}
}
end
if @options.command_result_log? and not results.empty?
save_command_results(results)
end
end
private
def validate_timing(name, timing)
timing.each do |t|
unless TRIGGER_TIMING.include?(t)
raise "Unknown post processing timing: #{name} => #{timing.inspect}"
end
end
end
def validate_ssh(name, ssh, outputs)
host, user, options = ssh.values_at('host', 'user', 'options')
unless host and user
raise "`host` and `user` is required for post processing ssh: #{name}"
end
if host.kind_of?(Hash)
if host.keys != ['Key']
raise "Invalid post processing ssh host: #{name} => #{host.inspect}"
end
host_key, host_value = host.first
ssh['host'] = "<%= #{host_key} #{host_value.to_s.inspect} %>"
else
ssh['host'] = host.to_s
end
validate_command_template(name, ssh['host'], outputs)
if user.kind_of?(Hash)
if user.keys != ['Key']
raise "Invalid post processing ssh user: #{name} => #{user.inspect}"
end
user_key, user_value = user.first
ssh['user'] = "<%= #{user_key} #{user_value.to_s.inspect} %>"
else
ssh['user'] = user.to_s
end
validate_command_template(name, ssh['user'], outputs)
if options and not options.kind_of?(Hash)
raise "Invalid post processing ssh options: #{name} => #{options.inspect}"
end
ssh['request_pty'] = !!((ssh['request_pty'] || true).to_s =~ /\Atrue\Z/)
end
def run_command(attrs, outputs)
command, ssh = attrs.values_at(:command, :ssh)
if ssh
run_ssh_command(ssh, command, outputs)
else
run_shell_command(command, outputs)
end
end
def run_ssh_command(ssh, command, outputs)
host, user, options = ssh.values_at('host', 'user', 'options')
host = evaluate_command_template(host, outputs)
user = evaluate_command_template(user, outputs)
args = [host, user]
args << ssh['options'] if ssh['options']
command = evaluate_command_template(command, outputs)
connect_tries = (ssh['connect_tries'] || 36).to_i
retry_interval = (ssh['retry_interval'] || 5).to_i
stderr_orig = nil
ssh_exec_opts = {:request_pty => ssh['request_pty']}
begin
stderr_orig = STDERR.dup
STDERR.reopen('/dev/null', 'w')
Retryable.retryable(:tries => connect_tries, :sleep => retry_interval) do
begin
Net::SSH.start(*args) {|ssh| ssh.exec!('echo') }
rescue Net::SSH::HostKeyMismatch => e
e.remember_host!
retry
end
end
Net::SSH.start(*args) {|ssh| ssh_exec!(ssh, command, ssh_exec_opts) }
ensure
STDERR.reopen(stderr_orig)
end
end
def ssh_exec!(ssh, command, options)
stdout_data = ''
stderr_data = ''
exit_code = nil
#exit_signal = nil
stdout_stream = create_stdout_stream
stderr_stream = create_stderr_stream
ssh.open_channel do |channel|
if options[:request_pty]
channel.request_pty do |ch, success|
unless success
raise "Couldn't obtain pty (ssh.channel.request_pty)"
end
end
end
channel.exec(command) do |ch, success|
unless success
raise "Couldn't execute command #{command.inspect} (ssh.channel.exec)"
end
channel.on_data do |ch, data|
stdout_stream.push data
stdout_data << data
end
channel.on_extended_data do |ch, type, data|
stderr_stream.push data
stderr_data << data
end
channel.on_request('exit-status') do |ch, data|
exit_code = data.read_long
end
#channel.on_request('exit-signal') do |ch, data|
# exit_signal = data.read_long
#end
end
end
ssh.loop
stdout_stream.close
stderr_stream.close
#[stdout_data, stderr_data, exit_code, exit_signal]
[stdout_data, stderr_data, exit_code]
end
def run_shell_command(command, outputs)
command = evaluate_command_template(command, outputs)
stdout_data = ''
stderr_data = ''
exit_code = nil
Open3.popen3(command) do |stdin, stdout, stderr, wait_thr|
mutex = Mutex.new
th_out = Thread.start do
stdout_stream = create_stdout_stream
stdout.each_line do |line|
mutex.synchronize do
stdout_stream.push line
end
stdout_data << line
end
stdout_stream.close
end
th_err = Thread.start do
stderr_stream = create_stderr_stream
stderr.each_line do |line|
mutex.synchronize do
stderr_stream.push line
end
stderr_data << line
end
stderr_stream.close
end
th_out.join
th_err.join
exit_code = wait_thr.value
end
#[stdout_data, stderr_data, exit_code, exit_signal]
[stdout_data, stderr_data, exit_code]
end
def validate_command_template(name, command, outputs)
command = command.undent if @command_options[:undent]
trim_mode = @command_options[:trim_mode]
expected_outputs = Set.new
scope = Object.new
scope.instance_variable_set(:@__expected_outputs__, expected_outputs)
scope.instance_eval(<<-EOS)
def Ref(name)
$kumogata.options.parameters[name]
end
def Key(name)
@__expected_outputs__ << name
end
ERB.new(#{command.inspect}, nil, #{trim_mode.inspect}).result(binding)
EOS
expected_outputs.each do |key|
unless outputs.keys.include?(key)
$stderr.puts("[WARN] Undefined output: #{name} => #{key.inspect}".yellow)
end
end
end
def evaluate_command_template(command, outputs)
command = command.undent if @command_options[:undent]
trim_mode = @command_options[:trim_mode]
scope = Object.new
scope.instance_variable_set(:@__outputs__, outputs)
scope.instance_eval(<<-EOS)
def Ref(name)
$kumogata.options.parameters[name]
end
def Key(name)
@__outputs__[name]
end
ERB.new(#{command.inspect}, nil, #{trim_mode.inspect}).result(binding)
EOS
end
def print_command(name)
puts <<-EOS
Command: #{name.intense_blue}
EOS
end
def create_stdout_stream
Kumogata::StringStream.new do |line|
puts '1> '.intense_green + line
$stdout.flush
end
end
def create_stderr_stream
Kumogata::StringStream.new do |line|
puts '2> '.intense_red + line
$stdout.flush
end
end
def print_command_result(out, err, status) # XXX:
status = status.to_i
puts <<-EOS
Status: #{status.zero? ? status : status.to_s.red}
EOS
end
def save_command_results(results)
puts <<-EOS
(Save to `#{@options.command_result_log}`)
EOS
open(@options.command_result_log, 'wb') do |f|
f.puts JSON.pretty_generate(results)
end
end
def validate_stack_name(stack_name)
return unless stack_name
unless /\A[a-zA-Z][-a-zA-Z0-9]*\Z/i =~ stack_name
raise "1 validation error detected: Value '#{stack_name}' at 'stackName' failed to satisfy constraint: Member must satisfy regular expression pattern: [a-zA-Z][-a-zA-Z0-9]*"
end
end
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/lib/kumogata/crypt.rb | lib/kumogata/crypt.rb | class Kumogata::Crypt
ALGORITHM = 'aes256'
PASSWORD_CHARS = 'abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ123456789_*;:@{}()[]#$%&=-'
class << self
def encrypt(pass, str)
IO.popen("openssl enc -e -#{ALGORITHM} -pass pass:#{enquote(pass)}", "r+") {|io|
io.print str
io.close_write
io.read
}.encode64
end
def decrypt(pass, str)
IO.popen("openssl enc -d -#{ALGORITHM} -pass pass:#{enquote(pass)}", "r+") {|io|
io.print Base64.decode64(str)
io.close_write
io.read
}
end
def mkpasswd(n)
PASSWORD_CHARS.split(//).sample(n).join
end
private
def enquote(str)
"'" + str.gsub("'", %!'"'"'!) + "'"
end
end # of class methods
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/lib/kumogata/pre_processing.rb | lib/kumogata/pre_processing.rb | # TODO:
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/lib/kumogata/client.rb | lib/kumogata/client.rb | class Kumogata::Client
def initialize(options)
@options = options
@options = Hashie::Mash.new(@options) unless @options.kind_of?(Hashie::Mash)
@cloud_formation = AWS::CloudFormation.new
@outputs_filter = Kumogata::OutputsFilter.new(@options)
@post_processing = Kumogata::PostProcessing.new(@options)
end
def create(path_or_url, stack_name = nil)
validate_stack_name(stack_name)
@options.delete_stack = false if stack_name
template = open_template(path_or_url)
update_deletion_policy(template)
add_encryption_password(template)
outputs = create_stack(template, stack_name)
unless @options.detach?
@outputs_filter.filter!(outputs)
@post_processing.run(:create, outputs)
outputs
end
end
def validate(path_or_url)
template = open_template(path_or_url)
add_encryption_password_for_validation(template)
validate_template(template)
true
end
def convert(path_or_url)
template = open_template(path_or_url)
output_format = @options.output_format
unless output_format
output_format = case @options.format || guess_format(path_or_url)
when :ruby then :json
when :json then :ruby
when :yaml then :json
when :js then :json
when :json5 then :json
end
end
case output_format
when :ruby
devaluate_template(template).chomp.colorize_as(:ruby)
when :json, :json5
JSON.pretty_generate(template).colorize_as(:json)
when :yaml
YAML.dump(template).colorize_as(:yaml)
when :js
'(' + JSON.pretty_generate(template).colorize_as(:json) + ')'
when :coffee
raise 'Output to CoffeeScript is not implemented'
end
end
def update(path_or_url, stack_name)
validate_stack_name(stack_name)
@options.delete_stack = false
template = open_template(path_or_url)
update_deletion_policy(template, :update_metadate => true)
add_encryption_password(template)
outputs = update_stack(template, stack_name)
unless @options.detach?
@outputs_filter.filter!(outputs)
@post_processing.run(:update, outputs)
outputs
end
end
def delete(stack_name)
validate_stack_name(stack_name)
if @options.force? or agree("Are you sure you want to delete `#{stack_name}`? ".yellow)
delete_stack(stack_name)
end
unless @options.detach?
true
end
end
def list(stack_name = nil)
validate_stack_name(stack_name)
stacks = describe_stacks(stack_name)
JSON.pretty_generate(stacks).colorize_as(:json)
end
def export(stack_name)
validate_stack_name(stack_name)
template = export_template(stack_name)
format = @options.format || :ruby
case format
when :ruby
devaluate_template(template).chomp.colorize_as(:ruby)
when :json
JSON.pretty_generate(template).colorize_as(:json)
else
raise "Unknown format: #{format}"
end
end
def show_events(stack_name)
validate_stack_name(stack_name)
events = describe_events(stack_name)
JSON.pretty_generate(events).colorize_as(:json)
end
def show_outputs(stack_name)
validate_stack_name(stack_name)
outputs = describe_outputs(stack_name)
JSON.pretty_generate(outputs).colorize_as(:json)
end
def show_resources(stack_name)
validate_stack_name(stack_name)
resources = describe_resources(stack_name)
JSON.pretty_generate(resources).colorize_as(:json)
end
def diff(path_or_url1, path_or_url2)
templates = [path_or_url1, path_or_url2].map do |path_or_url|
template = nil
if path_or_url =~ %r|\Astack://(.*)|
stack_name = $1 || ''
validate_stack_name(stack_name)
template = export_template(stack_name)
else
template = open_template(path_or_url)
end
JSON.pretty_generate(template)
end
diff_opts = @options.ignore_all_space? ? '-uw' : '-u'
opts = {:include_diff_info => true, :diff => diff_opts}
diff = Diffy::Diff.new(*templates, opts).to_s
diff.sub(/^(\e\[\d+m)?\-\-\-(\s+)(\S+)/m) { "#{$1}---#{$2}#{path_or_url1}"}
.sub(/^(\e\[\d+m)?\+\+\+(\s+)(\S+)/m) { "#{$1}+++#{$2}#{path_or_url2}"}
end
private ###########################################################
def open_template(path_or_url)
format = @options.format || guess_format(path_or_url)
block = proc do |f|
case format
when :ruby
evaluate_template(f, path_or_url)
when :json
JSON.parse(f.read)
when :yaml
parsed = YAML.load(f.read)
Kumogata::Utils.stringify(parsed)
when :js
obj = V8::Context.new.eval(f.read)
unless obj.instance_of?(V8::Object)
raise "Invalid JavaScript template. Please return Object: #{path_or_url}"
end
Kumogata::Utils.stringify(obj.to_hash)
when :coffee
completed = CoffeeScript.compile(f.read)
obj = V8::Context.new.eval(completed)
unless obj.instance_of?(V8::Object)
raise "Invalid CoffeeScript template. Please return Object: #{path_or_url}"
end
Kumogata::Utils.stringify(obj.to_hash)
when :json5
parsed = JSON5.parse(f.read)
Kumogata::Utils.stringify(parsed)
else
raise "Unknown format: #{format}"
end
end
if path_or_url == '-'
block.call($stdin)
else
open(path_or_url, &block)
end
end
def guess_format(path_or_url)
case File.extname(path_or_url)
when '.rb'
:ruby
when '.json'
:json
when '.yml', '.yaml'
:yaml
when '.js'
:js
when '.coffee'
:coffee
when '.json5'
:json5
else
:json
end
end
def evaluate_template(template, path_or_url)
key_converter = proc do |key|
key = key.to_s
unless @options.skip_replace_underscore?
key.gsub!('_', ':')
key.gsub!('__', '::')
end
key
end
value_converter = proc do |v|
case v
when Hash, Array
v
else
v.to_s
end
end
template = Dslh.eval(template.read, {
:key_conv => key_converter,
:value_conv => value_converter,
:scope_hook => proc {|scope|
define_template_func(scope, path_or_url)
},
:filename => path_or_url,
})
@outputs_filter.fetch!(template)
@post_processing.fetch!(template)
return template
end
def evaluate_after_trigger(template)
triggers = template.delete('_after')
return {} unless triggers
end
def devaluate_template(template)
exclude_key = proc do |k|
k = k.to_s.gsub(':', '_')
k = k.to_s.gsub('::', '__')
k !~ /\A[_a-z]\w+\Z/i and k !~ %r|\A/\S*\Z|
end
key_conv = proc do |k|
k = k.to_s
if k =~ %r|\A/\S*\Z|
proc do |v, nested|
if nested
"_path(#{k.inspect}) #{v}"
else
"_path #{k.inspect}, #{v}"
end
end
else
k.gsub(':', '_')
k.gsub('::', '__')
end
end
value_conv = proc do |v|
if v.kind_of?(String) and v =~ /\A(?:0|[1-9]\d*)\Z/
v.to_i
else
v
end
end
Dslh.deval(template, :key_conv => key_conv, :value_conv => value_conv, :exclude_key => exclude_key)
end
def define_template_func(scope, path_or_url)
scope.instance_eval(<<-EOS)
def _include(file, args = {})
path = file.dup
unless path =~ %r|\\A/| or path =~ %r|\\A\\w+://|
path = File.expand_path(File.join(File.dirname(#{path_or_url.inspect}), path))
end
open(path) {|f| instance_eval(f.read) }
end
def _path(path, value = nil, &block)
if block
value = Dslh::ScopeBlock.nest(binding, 'block')
end
@__hash__[path] = value
end
def _outputs_filter(&block)
@__hash__[:_outputs_filter] = block
end
def _post(options = {}, &block)
commands = Dslh::ScopeBlock.nest(binding, 'block')
@__hash__[:_post] = {
:options => options,
:commands => commands,
}
end
EOS
end
def create_stack(template, stack_name)
unless stack_name
user_host = Kumogata::Utils.get_user_host
stack_name = ['kumogata']
stack_name << user_host if user_host
stack_name << UUIDTools::UUID.timestamp_create
stack_name = stack_name.join('-')
stack_name.gsub!(/[^-a-zA-Z0-9]+/, '-')
end
Kumogata.logger.info("Creating stack: #{stack_name}".cyan)
stack = @cloud_formation.stacks.create(stack_name,
JSON.pretty_generate(template),
build_create_options)
return if @options.detach?
event_log = {}
unless while_in_progress(stack, 'CREATE_COMPLETE', event_log)
errmsgs = ['Create failed']
errmsgs << stack_name
errmsgs << stack.status_reason if stack.status_reason
raise errmsgs.join(': ')
end
outputs = outputs_for(stack)
summaries = resource_summaries_for(stack)
if @options.delete_stack?
delete_stack(stack_name)
end
output_result(stack_name, outputs, summaries)
return outputs
end
def update_stack(template, stack_name)
stack = @cloud_formation.stacks[stack_name]
stack.status
Kumogata.logger.info("Updating stack: #{stack_name}".green)
event_log = create_event_log(stack)
stack.update(build_update_options(JSON.pretty_generate(template)))
return if @options.detach?
unless while_in_progress(stack, 'UPDATE_COMPLETE', event_log)
errmsgs = ['Update failed']
errmsgs << stack_name
errmsgs << stack.status_reason if stack.status_reason
raise errmsgs.join(': ')
end
outputs = outputs_for(stack)
summaries = resource_summaries_for(stack)
output_result(stack_name, outputs, summaries)
return outputs
end
def delete_stack(stack_name)
stack = @cloud_formation.stacks[stack_name]
stack.status
Kumogata.logger.info("Deleting stack: #{stack_name}".red)
event_log = create_event_log(stack)
stack.delete
return if @options.detach?
completed = false
begin
completed = while_in_progress(stack, 'DELETE_COMPLETE', event_log)
rescue AWS::CloudFormation::Errors::ValidationError
# Handle `Stack does not exist`
completed = true
Kumogata.logger.info('Success')
end
unless completed
errmsgs = ['Delete failed']
errmsgs << stack_name
errmsgs << stack.status_reason if stack.status_reason
raise errmsgs.join(': ')
end
end
def describe_stacks(stack_name)
AWS.memoize do
stacks = @cloud_formation.stacks
stacks = stacks.select {|i| i.name == stack_name } if stack_name
stacks.map do |stack|
{
'StackName' => stack.name,
'CreationTime' => stack.creation_time,
'StackStatus' => stack.status,
'Description' => stack.description,
}
end
end
end
def export_template(stack_name)
stack = @cloud_formation.stacks[stack_name]
stack.status
JSON.parse(stack.template)
end
def describe_events(stack_name)
AWS.memoize do
stack = @cloud_formation.stacks[stack_name]
stack.status
events_for(stack)
end
end
def describe_outputs(stack_name)
AWS.memoize do
stack = @cloud_formation.stacks[stack_name]
stack.status
outputs_for(stack)
end
end
def describe_resources(stack_name)
AWS.memoize do
stack = @cloud_formation.stacks[stack_name]
stack.status
resource_summaries_for(stack)
end
end
def while_in_progress(stack, complete_status, event_log)
# XXX: Status does not change if you have been memoized.
# Should be forcibly disabled memoization?
while stack.status =~ /_IN_PROGRESS\Z/
print_event_log(stack, event_log)
sleep 1
end
print_event_log(stack, event_log)
completed = (stack.status == complete_status)
Kumogata.logger.info(completed ? 'Success' : 'Failure')
return completed
end
def print_event_log(stack, event_log)
events_for(stack).sort_by {|i| i['Timestamp'] }.each do |event|
event_id = event['EventId']
unless event_log[event_id]
event_log[event_id] = event
timestamp = event['Timestamp']
summary = {}
['LogicalResourceId', 'ResourceStatus', 'ResourceStatusReason'].map do |k|
summary[k] = event[k]
end
puts [
timestamp.getlocal.strftime('%Y/%m/%d %H:%M:%S %Z'),
summary.to_json.colorize_as(:json),
].join(': ')
end
end
end
def create_event_log(stack)
event_log = {}
events_for(stack).sort_by {|i| i['Timestamp'] }.each do |event|
event_id = event['EventId']
event_log[event_id] = event
end
return event_log
end
def build_create_options
opts = {}
add_parameters(opts)
[:capabilities, :disable_rollback, :notify, :timeout,
:stack_policy_body, :stack_policy_url].each do |k|
opts[k] = @options[k] if @options[k]
end
return opts
end
def build_update_options(template)
opts = {:template => template}
add_parameters(opts)
[:capabilities, :stack_policy_body, :stack_policy_url].each do |k|
opts[k] = @options[k] if @options[k]
end
return opts
end
def add_parameters(hash)
if @options.parameters? and not @options.parameters.empty?
parameters = {}
enc_params = @options.encrypt_parameters
passwd = @options.encryption_password || Kumogata::Crypt.mkpasswd(16)
@options.parameters.each do |key, value|
if enc_params and (enc_params.include?('*') or enc_params.include?(key))
value = Kumogata::Crypt.encrypt(passwd, value)
end
parameters[key] = value
end
if @options.encrypt_parameters? and not @options.skip_send_password?
parameters[Kumogata::ENCRYPTION_PASSWORD] = passwd.encode64
end
hash[:parameters] = parameters
end
end
def update_deletion_policy(template, options = {})
if @options.delete_stack? or @options.deletion_policy_retain?
template['Resources'].each do |k, v|
next if /\AAWS::CloudFormation::/ =~ v['Type']
v['DeletionPolicy'] ||= 'Retain'
if options[:update_metadate]
v['Metadata'] ||= {}
v['Metadata']['DeletionPolicyUpdateKeyForKumogata'] = "DeletionPolicyUpdateValueForKumogata#{Time.now.to_i}"
end
end
end
end
def add_encryption_password(template)
if @options.encrypt_parameters? and not @options.skip_send_password?
template['Parameters'] ||= {}
template['Parameters'][Kumogata::ENCRYPTION_PASSWORD] = {
'Type' => 'String',
'NoEcho' => 'true',
}
end
end
def add_encryption_password_for_validation(template)
template['Parameters'] ||= {}
template['Parameters'][Kumogata::ENCRYPTION_PASSWORD] ||= {
'Type' => 'String',
'Default' => "(#{Kumogata::ENCRYPTION_PASSWORD})",
}
end
def validate_template(template)
result = @cloud_formation.validate_template(template.to_json)
if result[:code]
raise result.values_at(:code, :message).join(': ')
end
Kumogata.logger.info('Template validated successfully'.green)
if @options.verbose
Kumogata.logger.info(JSON.pretty_generate(JSON.parse(result.to_json)).colorize_as(:json))
end
end
def events_for(stack)
stack.events.map do |event|
event_hash = {}
[
:event_id,
:logical_resource_id,
:physical_resource_id,
:resource_properties,
:resource_status,
:resource_status_reason,
:resource_type,
:stack_id,
:stack_name,
:timestamp,
].each do |k|
event_hash[Kumogata::Utils.camelize(k)] = event.send(k)
end
event_hash
end
end
def outputs_for(stack)
outputs_hash = {}
stack.outputs.each do |output|
outputs_hash[output.key] = output.value
end
return outputs_hash
end
def resource_summaries_for(stack)
stack.resource_summaries.map do |summary|
summary_hash = {}
[
:logical_resource_id,
:physical_resource_id,
:resource_type,
:resource_status,
:resource_status_reason,
:last_updated_timestamp
].each do |k|
summary_hash[Kumogata::Utils.camelize(k)] = summary[k]
end
summary_hash
end
end
def output_result(stack_name, outputs, summaries)
puts <<-EOS
Stack Resource Summaries:
#{JSON.pretty_generate(summaries).colorize_as(:json)}
Outputs:
#{JSON.pretty_generate(outputs).colorize_as(:json)}
EOS
if @options.result_log?
puts <<-EOS
(Save to `#{@options.result_log}`)
EOS
open(@options.result_log, 'wb') do |f|
f.puts JSON.pretty_generate({
'StackName' => stack_name,
'StackResourceSummaries' => summaries,
'Outputs' => outputs,
})
end
end
end
def validate_stack_name(stack_name)
return unless stack_name
unless /\A[a-zA-Z][-a-zA-Z0-9]*\Z/i =~ stack_name
raise "1 validation error detected: Value '#{stack_name}' at 'stackName' failed to satisfy constraint: Member must satisfy regular expression pattern: [a-zA-Z][-a-zA-Z0-9]*"
end
end
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/lib/kumogata/outputs_filter.rb | lib/kumogata/outputs_filter.rb | class Kumogata::OutputsFilter
def initialize(options)
@options = options
end
def fetch!(template)
@filter = template.delete(:_outputs_filter)
end
def filter!(outputs)
@filter.call(outputs) if @filter
return outputs
end
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/lib/kumogata/ext/coderay_ext.rb | lib/kumogata/ext/coderay_ext.rb | CodeRay::Encoders::Terminal::TOKEN_COLORS[:constant] = "\e[1;34m"
CodeRay::Encoders::Terminal::TOKEN_COLORS[:float] = "\e[36m"
CodeRay::Encoders::Terminal::TOKEN_COLORS[:integer] = "\e[36m"
CodeRay::Encoders::Terminal::TOKEN_COLORS[:keyword] = "\e[1;31m"
CodeRay::Encoders::Terminal::TOKEN_COLORS[:key] = {
:self => "\e[1;34m",
:char => "\e[1;34m",
:delimiter => "\e[1;34m",
}
CodeRay::Encoders::Terminal::TOKEN_COLORS[:string] = {
:self => "\e[32m",
:modifier => "\e[1;32m",
:char => "\e[1;32m",
:delimiter => "\e[1;32m",
:escape => "\e[1;32m",
}
CodeRay::Encoders::Terminal::TOKEN_COLORS[:error] = "\e[0m"
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/lib/kumogata/ext/string_ext.rb | lib/kumogata/ext/string_ext.rb | class String
@@colorize = false
class << self
def colorize=(value)
@@colorize = value
end
def colorize
@@colorize
end
end # of class methods
Term::ANSIColor::Attribute.named_attributes.each do |attribute|
class_eval(<<-EOS, __FILE__, __LINE__ + 1)
def #{attribute.name}
if @@colorize
Term::ANSIColor.send(#{attribute.name.inspect}, self)
else
self
end
end
EOS
end
def colorize_as(lang)
if @@colorize
CodeRay.scan(self, lang).terminal
else
self
end
end
def encode64
Base64.encode64(self).delete("\n")
end
def undent
min_space_num = self.split("\n").delete_if {|s| s =~ /^\s*$/ }.map {|s| (s[/^\s+/] || '').length }.min
if min_space_num and min_space_num > 0
gsub(/^[ \t]{,#{min_space_num}}/, '')
else
self
end
end
def fn_join(options = {})
options = {
:undent => true,
:trim_mode => nil,
}.merge(options)
data = self.dup
data = data.undent if options[:undent]
trim_mode = options[:trim_mode]
null = "\0"
data = Object.new.instance_eval(<<-EOS)
@__functions__ = []
@__value_conv__ = proc do |v|
case v
when Array, Hash
v
else
v.to_s
end
end
def Fn__Base64(value)
value = {'Fn::Base64' => @__value_conv__[value]}
case @__functions__
when Array
@__functions__ << value
when Hash
@__functions__.update(value)
end
#{null.inspect}
end
def Fn__FindInMap(map_name, top_level_key, second_level_key)
value = {'Fn::FindInMap' => [
map_name, top_level_key, second_level_key].map(&@__value_conv__)}
case @__functions__
when Array
@__functions__ << value
when Hash
@__functions__.update(value)
end
#{null.inspect}
end
def Fn__GetAtt(logical_name, attr_name)
value = {'Fn::GetAtt' => [
logical_name, attr_name].map(&@__value_conv__)}
case @__functions__
when Array
@__functions__ << value
when Hash
@__functions__.update(value)
end
#{null.inspect}
end
def Fn__GetAZs(region)
value = {'Fn::GetAZs' => @__value_conv__[region]}
case @__functions__
when Array
@__functions__ << value
when Hash
@__functions__.update(value)
end
#{null.inspect}
end
def Fn__If(value)
value = {'Fn::If' => @__value_conv__[value]}
case @__functions__
when Array
@__functions__ << value
when Hash
@__functions__.update(value)
end
#{null.inspect}
end
def Fn__Select(value)
value = {'Fn::Select' => @__value_conv__[value]}
case @__functions__
when Array
@__functions__ << value
when Hash
@__functions__.update(value)
end
#{null.inspect}
end
def Ref(value)
value = {'Ref' => value}
case @__functions__
when Array
@__functions__ << value
when Hash
@__functions__.update(value)
end
#{null.inspect}
end
def _(&block)
__functions__orig = @__functions__
@__functions__ = {}
block.call if block
value = @__functions__
@__functions__ = __functions__orig
return value
end
ERB.new(#{data.inspect}, nil, #{trim_mode.inspect}).result(binding).split(#{null.inspect}).zip(@__functions__)
EOS
data = data.flatten.select {|i| not i.nil? }.map {|i|
if i.kind_of?(String)
i.lines.to_a
else
i
end
}.flatten
return {
'Fn::Join' => ['', data]
}
end
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
kumogata/kumogata | https://github.com/kumogata/kumogata/blob/1c4eadd07e9bf2d7bdef6333a993f7096adcec97/lib/kumogata/ext/json_ext.rb | lib/kumogata/ext/json_ext.rb | module JSON
class << self
alias pretty_generate_orig pretty_generate
def pretty_generate(object, options = nil)
begin
pretty_generate_orig(object, options)
rescue JSON::NestingError => e
e.message << ': Undefined function may have been referenced [e.g. _user_data()/_join()]'
raise e
end
end
end
end
| ruby | MIT | 1c4eadd07e9bf2d7bdef6333a993f7096adcec97 | 2026-01-04T17:51:03.061350Z | false |
nebulab/pulsar | https://github.com/nebulab/pulsar/blob/fd8d081653f70517e0d40e71611aede8814ab1d8/spec/spec_helper.rb | spec/spec_helper.rb | require 'support/coverage_setup'
require 'rspec'
require 'stringio'
require 'fileutils'
require 'timecop'
require 'tmpdir'
require 'pulsar'
RSpec.configure do |config|
config.mock_with :rspec
config.raise_errors_for_deprecations!
config.add_setting :pulsar_command
config.add_setting :pulsar_conf_path
config.add_setting :pulsar_empty_conf_path
config.add_setting :pulsar_wrong_cap_conf_path
config.add_setting :pulsar_wrong_bundle_conf_path
config.add_setting :pulsar_dotenv_conf_path
config.add_setting :pulsar_local_conf_repo_path
config.add_setting :pulsar_remote_git_conf
config.add_setting :pulsar_remote_github_conf
config.pulsar_command = "ruby -Ilib #{File.expand_path('./exe/pulsar')}"
config.pulsar_conf_path = File.expand_path('./spec/support/dummies/conf/dir')
config.pulsar_empty_conf_path = File.expand_path('./spec/support/dummies/conf/empty')
config.pulsar_wrong_cap_conf_path = File.expand_path('./spec/support/dummies/conf/wrong_cap')
config.pulsar_wrong_bundle_conf_path = File.expand_path('./spec/support/dummies/conf/wrong_bundle')
config.pulsar_dotenv_conf_path = File.expand_path('./spec/support/dummies/conf/dotenv')
config.pulsar_local_conf_repo_path = File.expand_path('./spec/support/tmp/dummy-repo')
config.pulsar_remote_git_conf = 'git@github.com:nebulab/pulsar-conf-demo.git'
config.pulsar_remote_github_conf = 'nebulab/pulsar-conf-demo'
config.before(:suite) do
Dir.chdir('./spec/support/tmp')
end
config.before(:each) do
ENV.delete_if { |name, _| name =~ /^PULSAR_/ }
end
config.after(:each) do
FileUtils.rm_rf(Dir.glob("#{File.dirname(__FILE__)}/support/tmp/*"))
FileUtils.rm_rf(Dir.glob("#{Pulsar::PULSAR_TMP}/*"))
end
end
| ruby | MIT | fd8d081653f70517e0d40e71611aede8814ab1d8 | 2026-01-04T17:51:14.324688Z | false |
nebulab/pulsar | https://github.com/nebulab/pulsar/blob/fd8d081653f70517e0d40e71611aede8814ab1d8/spec/support/coverage_setup.rb | spec/support/coverage_setup.rb | require 'simplecov'
require 'coveralls'
SimpleCov.formatters = [
SimpleCov::Formatter::HTMLFormatter,
Coveralls::SimpleCov::Formatter
]
# Save to CircleCI's artifacts directory if we're on CircleCI
if ENV['CIRCLE_ARTIFACTS']
dir = File.join(ENV['CIRCLE_ARTIFACTS'], "coverage")
SimpleCov.coverage_dir(dir)
end
SimpleCov.minimum_coverage 99
SimpleCov.minimum_coverage_by_file 90
SimpleCov.refuse_coverage_drop
if ENV['FEATURE_TESTS']
SimpleCov.command_name 'features'
# This is needed because otherwise SimpleCov will output some text at exit
# and it will make most of the feature specs fail (that check the output).
SimpleCov.at_exit do
$stdout.reopen(File::NULL, 'w')
$stdout.sync = true
SimpleCov.result.format!
$stdout = STDOUT
end
end
if ENV['COVERAGE']
SimpleCov.start do
add_group 'Interactors', 'lib/pulsar/interactors'
add_group 'Organizers', 'lib/pulsar/organizers'
add_filter 'spec/*'
end
end
| ruby | MIT | fd8d081653f70517e0d40e71611aede8814ab1d8 | 2026-01-04T17:51:14.324688Z | false |
nebulab/pulsar | https://github.com/nebulab/pulsar/blob/fd8d081653f70517e0d40e71611aede8814ab1d8/spec/support/dummies/conf/wrong_cap/apps/deploy.rb | spec/support/dummies/conf/wrong_cap/apps/deploy.rb | # Defaults deployrb
| ruby | MIT | fd8d081653f70517e0d40e71611aede8814ab1d8 | 2026-01-04T17:51:14.324688Z | false |
nebulab/pulsar | https://github.com/nebulab/pulsar/blob/fd8d081653f70517e0d40e71611aede8814ab1d8/spec/support/dummies/conf/wrong_cap/apps/blog/production.rb | spec/support/dummies/conf/wrong_cap/apps/blog/production.rb | # Production config
server 'blog.com', user: 'deploy', roles: %w{web app db}, primary: true
set :stage, :production
| ruby | MIT | fd8d081653f70517e0d40e71611aede8814ab1d8 | 2026-01-04T17:51:14.324688Z | false |
nebulab/pulsar | https://github.com/nebulab/pulsar/blob/fd8d081653f70517e0d40e71611aede8814ab1d8/spec/support/dummies/conf/wrong_cap/apps/blog/deploy.rb | spec/support/dummies/conf/wrong_cap/apps/blog/deploy.rb | # App Defaults deployrb
set :application, 'blog'
set :repo_url, 'git@example.com:me/blog.git'
| ruby | MIT | fd8d081653f70517e0d40e71611aede8814ab1d8 | 2026-01-04T17:51:14.324688Z | false |
nebulab/pulsar | https://github.com/nebulab/pulsar/blob/fd8d081653f70517e0d40e71611aede8814ab1d8/spec/support/dummies/conf/wrong_bundle/apps/deploy.rb | spec/support/dummies/conf/wrong_bundle/apps/deploy.rb | # Defaults deployrb
| ruby | MIT | fd8d081653f70517e0d40e71611aede8814ab1d8 | 2026-01-04T17:51:14.324688Z | false |
nebulab/pulsar | https://github.com/nebulab/pulsar/blob/fd8d081653f70517e0d40e71611aede8814ab1d8/spec/support/dummies/conf/wrong_bundle/apps/blog/production.rb | spec/support/dummies/conf/wrong_bundle/apps/blog/production.rb | # Production config
server 'blog.com', user: 'deploy', roles: %w{web app db}, primary: true
set :stage, :production
| ruby | MIT | fd8d081653f70517e0d40e71611aede8814ab1d8 | 2026-01-04T17:51:14.324688Z | false |
nebulab/pulsar | https://github.com/nebulab/pulsar/blob/fd8d081653f70517e0d40e71611aede8814ab1d8/spec/support/dummies/conf/wrong_bundle/apps/blog/deploy.rb | spec/support/dummies/conf/wrong_bundle/apps/blog/deploy.rb | # App Defaults deployrb
set :application, 'blog'
set :repo_url, 'git@example.com:me/blog.git'
| ruby | MIT | fd8d081653f70517e0d40e71611aede8814ab1d8 | 2026-01-04T17:51:14.324688Z | false |
nebulab/pulsar | https://github.com/nebulab/pulsar/blob/fd8d081653f70517e0d40e71611aede8814ab1d8/spec/support/dummies/conf/dir/apps/deploy.rb | spec/support/dummies/conf/dir/apps/deploy.rb | # Defaults deployrb
| ruby | MIT | fd8d081653f70517e0d40e71611aede8814ab1d8 | 2026-01-04T17:51:14.324688Z | false |
nebulab/pulsar | https://github.com/nebulab/pulsar/blob/fd8d081653f70517e0d40e71611aede8814ab1d8/spec/support/dummies/conf/dir/apps/ecommerce/staging.rb | spec/support/dummies/conf/dir/apps/ecommerce/staging.rb | ruby | MIT | fd8d081653f70517e0d40e71611aede8814ab1d8 | 2026-01-04T17:51:14.324688Z | false | |
nebulab/pulsar | https://github.com/nebulab/pulsar/blob/fd8d081653f70517e0d40e71611aede8814ab1d8/spec/support/dummies/conf/dir/apps/blog/staging.rb | spec/support/dummies/conf/dir/apps/blog/staging.rb | # Staging config
server 'staging.blog.com', user: 'deploy', roles: %w{web app db}, primary: true
set :stage, :staging
| ruby | MIT | fd8d081653f70517e0d40e71611aede8814ab1d8 | 2026-01-04T17:51:14.324688Z | false |
nebulab/pulsar | https://github.com/nebulab/pulsar/blob/fd8d081653f70517e0d40e71611aede8814ab1d8/spec/support/dummies/conf/dir/apps/blog/production.rb | spec/support/dummies/conf/dir/apps/blog/production.rb | # Production config
server 'blog.com', user: 'deploy', roles: %w{web app db}, primary: true
set :stage, :production
| ruby | MIT | fd8d081653f70517e0d40e71611aede8814ab1d8 | 2026-01-04T17:51:14.324688Z | false |
nebulab/pulsar | https://github.com/nebulab/pulsar/blob/fd8d081653f70517e0d40e71611aede8814ab1d8/spec/support/dummies/conf/dir/apps/blog/deploy.rb | spec/support/dummies/conf/dir/apps/blog/deploy.rb | # App Defaults deployrb
set :application, 'blog'
set :repo_url, 'git@example.com:me/blog.git'
| ruby | MIT | fd8d081653f70517e0d40e71611aede8814ab1d8 | 2026-01-04T17:51:14.324688Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.